From 24e839c04c39d1f4423b267c371e8e5b5bc33867 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Sun, 12 Feb 2023 11:03:37 +0100 Subject: Adding upstream version 11.0.1. Signed-off-by: Daniel Baumann --- .github/workflows/python-package.yml | 14 +- CHANGELOG.md | 33 + Makefile | 4 +- docs/CNAME | 1 + docs/index.html | 7 + docs/search.js | 46 + docs/sqlglot.html | 1226 + docs/sqlglot/dataframe.html | 506 + docs/sqlglot/dataframe/sql.html | 4953 +++ docs/sqlglot/dialects.html | 400 + docs/sqlglot/dialects/bigquery.html | 1434 + docs/sqlglot/dialects/clickhouse.html | 1077 + docs/sqlglot/dialects/databricks.html | 704 + docs/sqlglot/dialects/dialect.html | 2134 + docs/sqlglot/dialects/drill.html | 1088 + docs/sqlglot/dialects/duckdb.html | 1028 + docs/sqlglot/dialects/hive.html | 1461 + docs/sqlglot/dialects/mysql.html | 2149 + docs/sqlglot/dialects/oracle.html | 1052 + docs/sqlglot/dialects/postgres.html | 1245 + docs/sqlglot/dialects/presto.html | 1255 + docs/sqlglot/dialects/redshift.html | 1175 + docs/sqlglot/dialects/snowflake.html | 1528 + docs/sqlglot/dialects/spark.html | 1136 + docs/sqlglot/dialects/sqlite.html | 918 + docs/sqlglot/dialects/starrocks.html | 658 + docs/sqlglot/dialects/tableau.html | 704 + docs/sqlglot/dialects/teradata.html | 960 + docs/sqlglot/dialects/trino.html | 653 + docs/sqlglot/dialects/tsql.html | 1772 + docs/sqlglot/diff.html | 1560 + docs/sqlglot/errors.html | 877 + docs/sqlglot/executor.html | 694 + docs/sqlglot/executor/context.html | 715 + docs/sqlglot/executor/env.html | 717 + docs/sqlglot/executor/python.html | 2130 + docs/sqlglot/executor/table.html | 802 + docs/sqlglot/expressions.html | 39484 +++++++++++++++++++ docs/sqlglot/generator.html | 9855 +++++ docs/sqlglot/helper.html | 1651 + docs/sqlglot/lineage.html | 931 + docs/sqlglot/optimizer.html | 264 + docs/sqlglot/optimizer/annotate_types.html | 1179 + docs/sqlglot/optimizer/canonicalize.html | 445 + docs/sqlglot/optimizer/eliminate_ctes.html | 371 + docs/sqlglot/optimizer/eliminate_joins.html | 610 + docs/sqlglot/optimizer/eliminate_subqueries.html | 582 + docs/sqlglot/optimizer/expand_laterals.html | 353 + .../optimizer/expand_multi_table_selects.html | 321 + docs/sqlglot/optimizer/isolate_table_selects.html | 317 + docs/sqlglot/optimizer/lower_identities.html | 430 + docs/sqlglot/optimizer/merge_subqueries.html | 794 + docs/sqlglot/optimizer/normalize.html | 585 + docs/sqlglot/optimizer/optimize_joins.html | 489 + docs/sqlglot/optimizer/optimizer.html | 401 + docs/sqlglot/optimizer/pushdown_predicates.html | 773 + docs/sqlglot/optimizer/pushdown_projections.html | 477 + docs/sqlglot/optimizer/qualify_columns.html | 804 + docs/sqlglot/optimizer/qualify_tables.html | 427 + docs/sqlglot/optimizer/scope.html | 2512 ++ docs/sqlglot/optimizer/simplify.html | 1428 + docs/sqlglot/optimizer/unnest_subqueries.html | 835 + docs/sqlglot/parser.html | 8049 ++++ docs/sqlglot/planner.html | 1995 + docs/sqlglot/schema.html | 1624 + docs/sqlglot/serde.html | 408 + docs/sqlglot/time.html | 385 + docs/sqlglot/tokens.html | 6712 ++++ docs/sqlglot/transforms.html | 667 + docs/sqlglot/trie.html | 479 + pdoc/cli.py | 4 +- sqlglot/__init__.py | 3 +- sqlglot/dialects/bigquery.py | 54 +- sqlglot/dialects/clickhouse.py | 2 +- sqlglot/dialects/dialect.py | 124 +- sqlglot/dialects/drill.py | 33 +- sqlglot/dialects/duckdb.py | 8 +- sqlglot/dialects/hive.py | 2 +- sqlglot/dialects/mysql.py | 7 +- sqlglot/dialects/postgres.py | 3 +- sqlglot/dialects/redshift.py | 3 +- sqlglot/dialects/snowflake.py | 13 +- sqlglot/dialects/spark.py | 1 + sqlglot/dialects/sqlite.py | 1 - sqlglot/diff.py | 1 + sqlglot/errors.py | 15 +- sqlglot/executor/__init__.py | 1 + sqlglot/executor/python.py | 2 +- sqlglot/expressions.py | 107 +- sqlglot/generator.py | 54 +- sqlglot/lineage.py | 3 +- sqlglot/optimizer/annotate_types.py | 17 +- sqlglot/optimizer/expand_laterals.py | 34 + sqlglot/optimizer/optimizer.py | 5 +- sqlglot/optimizer/pushdown_projections.py | 6 +- sqlglot/optimizer/qualify_columns.py | 30 +- sqlglot/optimizer/qualify_tables.py | 13 +- sqlglot/optimizer/scope.py | 20 +- sqlglot/parser.py | 48 +- sqlglot/tokens.py | 38 +- tests/dialects/test_databricks.py | 3 + tests/dialects/test_dialect.py | 8 +- tests/dialects/test_duckdb.py | 4 + tests/dialects/test_hive.py | 2 +- tests/dialects/test_snowflake.py | 48 +- tests/fixtures/identity.sql | 12 +- tests/fixtures/optimizer/expand_laterals.sql | 40 + tests/fixtures/optimizer/optimizer.sql | 23 +- tests/fixtures/optimizer/qualify_columns.sql | 57 +- tests/fixtures/pretty.sql | 5 + tests/test_expressions.py | 4 + tests/test_optimizer.py | 19 +- 112 files changed, 126100 insertions(+), 230 deletions(-) create mode 100644 docs/CNAME create mode 100644 docs/index.html create mode 100644 docs/search.js create mode 100644 docs/sqlglot.html create mode 100644 docs/sqlglot/dataframe.html create mode 100644 docs/sqlglot/dataframe/sql.html create mode 100644 docs/sqlglot/dialects.html create mode 100644 docs/sqlglot/dialects/bigquery.html create mode 100644 docs/sqlglot/dialects/clickhouse.html create mode 100644 docs/sqlglot/dialects/databricks.html create mode 100644 docs/sqlglot/dialects/dialect.html create mode 100644 docs/sqlglot/dialects/drill.html create mode 100644 docs/sqlglot/dialects/duckdb.html create mode 100644 docs/sqlglot/dialects/hive.html create mode 100644 docs/sqlglot/dialects/mysql.html create mode 100644 docs/sqlglot/dialects/oracle.html create mode 100644 docs/sqlglot/dialects/postgres.html create mode 100644 docs/sqlglot/dialects/presto.html create mode 100644 docs/sqlglot/dialects/redshift.html create mode 100644 docs/sqlglot/dialects/snowflake.html create mode 100644 docs/sqlglot/dialects/spark.html create mode 100644 docs/sqlglot/dialects/sqlite.html create mode 100644 docs/sqlglot/dialects/starrocks.html create mode 100644 docs/sqlglot/dialects/tableau.html create mode 100644 docs/sqlglot/dialects/teradata.html create mode 100644 docs/sqlglot/dialects/trino.html create mode 100644 docs/sqlglot/dialects/tsql.html create mode 100644 docs/sqlglot/diff.html create mode 100644 docs/sqlglot/errors.html create mode 100644 docs/sqlglot/executor.html create mode 100644 docs/sqlglot/executor/context.html create mode 100644 docs/sqlglot/executor/env.html create mode 100644 docs/sqlglot/executor/python.html create mode 100644 docs/sqlglot/executor/table.html create mode 100644 docs/sqlglot/expressions.html create mode 100644 docs/sqlglot/generator.html create mode 100644 docs/sqlglot/helper.html create mode 100644 docs/sqlglot/lineage.html create mode 100644 docs/sqlglot/optimizer.html create mode 100644 docs/sqlglot/optimizer/annotate_types.html create mode 100644 docs/sqlglot/optimizer/canonicalize.html create mode 100644 docs/sqlglot/optimizer/eliminate_ctes.html create mode 100644 docs/sqlglot/optimizer/eliminate_joins.html create mode 100644 docs/sqlglot/optimizer/eliminate_subqueries.html create mode 100644 docs/sqlglot/optimizer/expand_laterals.html create mode 100644 docs/sqlglot/optimizer/expand_multi_table_selects.html create mode 100644 docs/sqlglot/optimizer/isolate_table_selects.html create mode 100644 docs/sqlglot/optimizer/lower_identities.html create mode 100644 docs/sqlglot/optimizer/merge_subqueries.html create mode 100644 docs/sqlglot/optimizer/normalize.html create mode 100644 docs/sqlglot/optimizer/optimize_joins.html create mode 100644 docs/sqlglot/optimizer/optimizer.html create mode 100644 docs/sqlglot/optimizer/pushdown_predicates.html create mode 100644 docs/sqlglot/optimizer/pushdown_projections.html create mode 100644 docs/sqlglot/optimizer/qualify_columns.html create mode 100644 docs/sqlglot/optimizer/qualify_tables.html create mode 100644 docs/sqlglot/optimizer/scope.html create mode 100644 docs/sqlglot/optimizer/simplify.html create mode 100644 docs/sqlglot/optimizer/unnest_subqueries.html create mode 100644 docs/sqlglot/parser.html create mode 100644 docs/sqlglot/planner.html create mode 100644 docs/sqlglot/schema.html create mode 100644 docs/sqlglot/serde.html create mode 100644 docs/sqlglot/time.html create mode 100644 docs/sqlglot/tokens.html create mode 100644 docs/sqlglot/transforms.html create mode 100644 docs/sqlglot/trie.html create mode 100644 sqlglot/optimizer/expand_laterals.py create mode 100644 tests/fixtures/optimizer/expand_laterals.sql diff --git a/.github/workflows/python-package.yml b/.github/workflows/python-package.yml index 8cd3634..555ac5b 100644 --- a/.github/workflows/python-package.yml +++ b/.github/workflows/python-package.yml @@ -22,5 +22,17 @@ jobs: python -m pip install --upgrade pip make install-dev - name: Run checks (linter, code style, tests) + run: make check + - name: Update documentation run: | - make check + make docs + git add docs + git config --local user.email "41898282+github-actions[bot]@users.noreply.github.com" + git config --local user.name "github-actions[bot]" + git commit -m "CI: Auto-generated documentation" -a | exit 0 + if: ${{ matrix.python-version == '3.10' && github.event_name == 'push' }} + - name: Push changes + if: ${{ matrix.python-version == '3.10' && github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v') }} + uses: ad-m/github-push-action@master + with: + github_token: ${{ secrets.GITHUB_TOKEN }} diff --git a/CHANGELOG.md b/CHANGELOG.md index 76c9e96..3b70b85 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,39 @@ Changelog ========= +v11.0.0 +------ + +Changes: + +- Breaking: Renamed ESCAPES to STRING_ESCAPES in the Tokenizer class. + +- New: Deployed pdoc documentation page. + +- New: Add support for read locking using the FOR UPDATE/SHARE syntax (e.g. MySQL). + +- New: Added support for CASCADE, SET NULL and SET DEFAULT constraints. + +- New: Added "cast" expression helper. + +- New: Add support for transpiling Postgres GENERATE_SERIES into Presto SEQUENCE. + +- Improvement: Fix tokenizing of identifier escapes. + +- Improvement: Fix eliminate_subqueries [bug](https://github.com/tobymao/sqlglot/commit/b5df65e3fb5ee1ebc3cbab64b6d89598cf47a10b) related to unions. + +- Improvement: IFNULL is now transpiled to COALESCE by default for every dialect. + +- Improvement: Refactored the way properties are handled. Now it's easier to add them and specify their position in a SQL expression. + +- Improvement: Fixed alias quoting bug. + +- Improvement: Fixed CUBE / ROLLUP / GROUPING SETS parsing and generation. + +- Improvement: Fixed get_or_raise Dialect/t.Type[Dialect] argument bug. + +- Improvement: Improved python type hints. + v10.6.0 ------ diff --git a/Makefile b/Makefile index 8f27ecf..134637b 100644 --- a/Makefile +++ b/Makefile @@ -18,7 +18,7 @@ style: check: style test docs: - python pdoc/cli.py -o pdoc/docs + python pdoc/cli.py -o docs docs-serve: - python pdoc/cli.py + python pdoc/cli.py --port 8002 diff --git a/docs/CNAME b/docs/CNAME new file mode 100644 index 0000000..bd9a7c8 --- /dev/null +++ b/docs/CNAME @@ -0,0 +1 @@ +sqlglot.com \ No newline at end of file diff --git a/docs/index.html b/docs/index.html new file mode 100644 index 0000000..ef64b90 --- /dev/null +++ b/docs/index.html @@ -0,0 +1,7 @@ + + + + + + + diff --git a/docs/search.js b/docs/search.js new file mode 100644 index 0000000..0cb5e81 --- /dev/null +++ b/docs/search.js @@ -0,0 +1,46 @@ +window.pdocSearch = (function(){ +/** elasticlunr - http://weixsong.github.io * Copyright (C) 2017 Oliver Nightingale * Copyright (C) 2017 Wei Song * MIT Licensed */!function(){function e(e){if(null===e||"object"!=typeof e)return e;var t=e.constructor();for(var n in e)e.hasOwnProperty(n)&&(t[n]=e[n]);return t}var t=function(e){var n=new t.Index;return n.pipeline.add(t.trimmer,t.stopWordFilter,t.stemmer),e&&e.call(n,n),n};t.version="0.9.5",lunr=t,t.utils={},t.utils.warn=function(e){return function(t){e.console&&console.warn&&console.warn(t)}}(this),t.utils.toString=function(e){return void 0===e||null===e?"":e.toString()},t.EventEmitter=function(){this.events={}},t.EventEmitter.prototype.addListener=function(){var e=Array.prototype.slice.call(arguments),t=e.pop(),n=e;if("function"!=typeof t)throw new TypeError("last argument must be a function");n.forEach(function(e){this.hasHandler(e)||(this.events[e]=[]),this.events[e].push(t)},this)},t.EventEmitter.prototype.removeListener=function(e,t){if(this.hasHandler(e)){var n=this.events[e].indexOf(t);-1!==n&&(this.events[e].splice(n,1),0==this.events[e].length&&delete this.events[e])}},t.EventEmitter.prototype.emit=function(e){if(this.hasHandler(e)){var t=Array.prototype.slice.call(arguments,1);this.events[e].forEach(function(e){e.apply(void 0,t)},this)}},t.EventEmitter.prototype.hasHandler=function(e){return e in this.events},t.tokenizer=function(e){if(!arguments.length||null===e||void 0===e)return[];if(Array.isArray(e)){var n=e.filter(function(e){return null===e||void 0===e?!1:!0});n=n.map(function(e){return t.utils.toString(e).toLowerCase()});var i=[];return n.forEach(function(e){var n=e.split(t.tokenizer.seperator);i=i.concat(n)},this),i}return e.toString().trim().toLowerCase().split(t.tokenizer.seperator)},t.tokenizer.defaultSeperator=/[\s\-]+/,t.tokenizer.seperator=t.tokenizer.defaultSeperator,t.tokenizer.setSeperator=function(e){null!==e&&void 0!==e&&"object"==typeof e&&(t.tokenizer.seperator=e)},t.tokenizer.resetSeperator=function(){t.tokenizer.seperator=t.tokenizer.defaultSeperator},t.tokenizer.getSeperator=function(){return t.tokenizer.seperator},t.Pipeline=function(){this._queue=[]},t.Pipeline.registeredFunctions={},t.Pipeline.registerFunction=function(e,n){n in t.Pipeline.registeredFunctions&&t.utils.warn("Overwriting existing registered function: "+n),e.label=n,t.Pipeline.registeredFunctions[n]=e},t.Pipeline.getRegisteredFunction=function(e){return e in t.Pipeline.registeredFunctions!=!0?null:t.Pipeline.registeredFunctions[e]},t.Pipeline.warnIfFunctionNotRegistered=function(e){var n=e.label&&e.label in this.registeredFunctions;n||t.utils.warn("Function is not registered with pipeline. This may cause problems when serialising the index.\n",e)},t.Pipeline.load=function(e){var n=new t.Pipeline;return e.forEach(function(e){var i=t.Pipeline.getRegisteredFunction(e);if(!i)throw new Error("Cannot load un-registered function: "+e);n.add(i)}),n},t.Pipeline.prototype.add=function(){var e=Array.prototype.slice.call(arguments);e.forEach(function(e){t.Pipeline.warnIfFunctionNotRegistered(e),this._queue.push(e)},this)},t.Pipeline.prototype.after=function(e,n){t.Pipeline.warnIfFunctionNotRegistered(n);var i=this._queue.indexOf(e);if(-1===i)throw new Error("Cannot find existingFn");this._queue.splice(i+1,0,n)},t.Pipeline.prototype.before=function(e,n){t.Pipeline.warnIfFunctionNotRegistered(n);var i=this._queue.indexOf(e);if(-1===i)throw new Error("Cannot find existingFn");this._queue.splice(i,0,n)},t.Pipeline.prototype.remove=function(e){var t=this._queue.indexOf(e);-1!==t&&this._queue.splice(t,1)},t.Pipeline.prototype.run=function(e){for(var t=[],n=e.length,i=this._queue.length,o=0;n>o;o++){for(var r=e[o],s=0;i>s&&(r=this._queue[s](r,o,e),void 0!==r&&null!==r);s++);void 0!==r&&null!==r&&t.push(r)}return t},t.Pipeline.prototype.reset=function(){this._queue=[]},t.Pipeline.prototype.get=function(){return this._queue},t.Pipeline.prototype.toJSON=function(){return this._queue.map(function(e){return t.Pipeline.warnIfFunctionNotRegistered(e),e.label})},t.Index=function(){this._fields=[],this._ref="id",this.pipeline=new t.Pipeline,this.documentStore=new t.DocumentStore,this.index={},this.eventEmitter=new t.EventEmitter,this._idfCache={},this.on("add","remove","update",function(){this._idfCache={}}.bind(this))},t.Index.prototype.on=function(){var e=Array.prototype.slice.call(arguments);return this.eventEmitter.addListener.apply(this.eventEmitter,e)},t.Index.prototype.off=function(e,t){return this.eventEmitter.removeListener(e,t)},t.Index.load=function(e){e.version!==t.version&&t.utils.warn("version mismatch: current "+t.version+" importing "+e.version);var n=new this;n._fields=e.fields,n._ref=e.ref,n.documentStore=t.DocumentStore.load(e.documentStore),n.pipeline=t.Pipeline.load(e.pipeline),n.index={};for(var i in e.index)n.index[i]=t.InvertedIndex.load(e.index[i]);return n},t.Index.prototype.addField=function(e){return this._fields.push(e),this.index[e]=new t.InvertedIndex,this},t.Index.prototype.setRef=function(e){return this._ref=e,this},t.Index.prototype.saveDocument=function(e){return this.documentStore=new t.DocumentStore(e),this},t.Index.prototype.addDoc=function(e,n){if(e){var n=void 0===n?!0:n,i=e[this._ref];this.documentStore.addDoc(i,e),this._fields.forEach(function(n){var o=this.pipeline.run(t.tokenizer(e[n]));this.documentStore.addFieldLength(i,n,o.length);var r={};o.forEach(function(e){e in r?r[e]+=1:r[e]=1},this);for(var s in r){var u=r[s];u=Math.sqrt(u),this.index[n].addToken(s,{ref:i,tf:u})}},this),n&&this.eventEmitter.emit("add",e,this)}},t.Index.prototype.removeDocByRef=function(e){if(e&&this.documentStore.isDocStored()!==!1&&this.documentStore.hasDoc(e)){var t=this.documentStore.getDoc(e);this.removeDoc(t,!1)}},t.Index.prototype.removeDoc=function(e,n){if(e){var n=void 0===n?!0:n,i=e[this._ref];this.documentStore.hasDoc(i)&&(this.documentStore.removeDoc(i),this._fields.forEach(function(n){var o=this.pipeline.run(t.tokenizer(e[n]));o.forEach(function(e){this.index[n].removeToken(e,i)},this)},this),n&&this.eventEmitter.emit("remove",e,this))}},t.Index.prototype.updateDoc=function(e,t){var t=void 0===t?!0:t;this.removeDocByRef(e[this._ref],!1),this.addDoc(e,!1),t&&this.eventEmitter.emit("update",e,this)},t.Index.prototype.idf=function(e,t){var n="@"+t+"/"+e;if(Object.prototype.hasOwnProperty.call(this._idfCache,n))return this._idfCache[n];var i=this.index[t].getDocFreq(e),o=1+Math.log(this.documentStore.length/(i+1));return this._idfCache[n]=o,o},t.Index.prototype.getFields=function(){return this._fields.slice()},t.Index.prototype.search=function(e,n){if(!e)return[];e="string"==typeof e?{any:e}:JSON.parse(JSON.stringify(e));var i=null;null!=n&&(i=JSON.stringify(n));for(var o=new t.Configuration(i,this.getFields()).get(),r={},s=Object.keys(e),u=0;u0&&t.push(e);for(var i in n)"docs"!==i&&"df"!==i&&this.expandToken(e+i,t,n[i]);return t},t.InvertedIndex.prototype.toJSON=function(){return{root:this.root}},t.Configuration=function(e,n){var e=e||"";if(void 0==n||null==n)throw new Error("fields should not be null");this.config={};var i;try{i=JSON.parse(e),this.buildUserConfig(i,n)}catch(o){t.utils.warn("user configuration parse failed, will use default configuration"),this.buildDefaultConfig(n)}},t.Configuration.prototype.buildDefaultConfig=function(e){this.reset(),e.forEach(function(e){this.config[e]={boost:1,bool:"OR",expand:!1}},this)},t.Configuration.prototype.buildUserConfig=function(e,n){var i="OR",o=!1;if(this.reset(),"bool"in e&&(i=e.bool||i),"expand"in e&&(o=e.expand||o),"fields"in e)for(var r in e.fields)if(n.indexOf(r)>-1){var s=e.fields[r],u=o;void 0!=s.expand&&(u=s.expand),this.config[r]={boost:s.boost||0===s.boost?s.boost:1,bool:s.bool||i,expand:u}}else t.utils.warn("field name in user configuration not found in index instance fields");else this.addAllFields2UserConfig(i,o,n)},t.Configuration.prototype.addAllFields2UserConfig=function(e,t,n){n.forEach(function(n){this.config[n]={boost:1,bool:e,expand:t}},this)},t.Configuration.prototype.get=function(){return this.config},t.Configuration.prototype.reset=function(){this.config={}},lunr.SortedSet=function(){this.length=0,this.elements=[]},lunr.SortedSet.load=function(e){var t=new this;return t.elements=e,t.length=e.length,t},lunr.SortedSet.prototype.add=function(){var e,t;for(e=0;e1;){if(r===e)return o;e>r&&(t=o),r>e&&(n=o),i=n-t,o=t+Math.floor(i/2),r=this.elements[o]}return r===e?o:-1},lunr.SortedSet.prototype.locationFor=function(e){for(var t=0,n=this.elements.length,i=n-t,o=t+Math.floor(i/2),r=this.elements[o];i>1;)e>r&&(t=o),r>e&&(n=o),i=n-t,o=t+Math.floor(i/2),r=this.elements[o];return r>e?o:e>r?o+1:void 0},lunr.SortedSet.prototype.intersect=function(e){for(var t=new lunr.SortedSet,n=0,i=0,o=this.length,r=e.length,s=this.elements,u=e.elements;;){if(n>o-1||i>r-1)break;s[n]!==u[i]?s[n]u[i]&&i++:(t.add(s[n]),n++,i++)}return t},lunr.SortedSet.prototype.clone=function(){var e=new lunr.SortedSet;return e.elements=this.toArray(),e.length=e.elements.length,e},lunr.SortedSet.prototype.union=function(e){var t,n,i;this.length>=e.length?(t=this,n=e):(t=e,n=this),i=t.clone();for(var o=0,r=n.toArray();oSQLGlot\n\n

SQLGlot is a no-dependency SQL parser, transpiler, optimizer, and engine. It can be used to format SQL or translate between 19 different dialects like DuckDB, Presto, Spark, Snowflake, and BigQuery. It aims to read a wide variety of SQL inputs and output syntactically correct SQL in the targeted dialects.

\n\n

It is a very comprehensive generic SQL parser with a robust test suite. It is also quite performant, while being written purely in Python.

\n\n

You can easily customize the parser, analyze queries, traverse expression trees, and programmatically build SQL.

\n\n

Syntax errors are highlighted and dialect incompatibilities can warn or raise depending on configurations. However, it should be noted that SQL validation is not SQLGlot\u2019s goal, so some syntax errors may go unnoticed.

\n\n

Contributions are very welcome in SQLGlot; read the contribution guide to get started!

\n\n

Table of Contents

\n\n\n\n

Install

\n\n

From PyPI:

\n\n
pip3 install sqlglot\n
\n\n

Or with a local checkout:

\n\n
make install\n
\n\n

Requirements for development (optional):

\n\n
make install-dev\n
\n\n

Get in Touch

\n\n

We'd love to hear from you. Join our community Slack channel!

\n\n

Examples

\n\n

Formatting and Transpiling

\n\n

Easily translate from one dialect to another. For example, date/time functions vary from dialects and can be hard to deal with:

\n\n
\n
import sqlglot\nsqlglot.transpile("SELECT EPOCH_MS(1618088028295)", read="duckdb", write="hive")[0]\n
\n
\n\n
\n
'SELECT FROM_UNIXTIME(1618088028295 / 1000)'\n
\n
\n\n

SQLGlot can even translate custom time formats:

\n\n
\n
import sqlglot\nsqlglot.transpile("SELECT STRFTIME(x, '%y-%-m-%S')", read="duckdb", write="hive")[0]\n
\n
\n\n
\n
"SELECT DATE_FORMAT(x, 'yy-M-ss')"\n
\n
\n\n

As another example, let's suppose that we want to read in a SQL query that contains a CTE and a cast to REAL, and then transpile it to Spark, which uses backticks for identifiers and FLOAT instead of REAL:

\n\n
\n
import sqlglot\n\nsql = """WITH baz AS (SELECT a, c FROM foo WHERE a = 1) SELECT f.a, b.b, baz.c, CAST("b"."a" AS REAL) d FROM foo f JOIN bar b ON f.a = b.a LEFT JOIN baz ON f.a = baz.a"""\nprint(sqlglot.transpile(sql, write="spark", identify=True, pretty=True)[0])\n
\n
\n\n
\n
WITH `baz` AS (\n  SELECT\n    `a`,\n    `c`\n  FROM `foo`\n  WHERE\n    `a` = 1\n)\nSELECT\n  `f`.`a`,\n  `b`.`b`,\n  `baz`.`c`,\n  CAST(`b`.`a` AS FLOAT) AS `d`\nFROM `foo` AS `f`\nJOIN `bar` AS `b`\n  ON `f`.`a` = `b`.`a`\nLEFT JOIN `baz`\n  ON `f`.`a` = `baz`.`a`\n
\n
\n\n

Comments are also preserved in a best-effort basis when transpiling SQL code:

\n\n
\n
sql = """\n/* multi\n   line\n   comment\n*/\nSELECT\n  tbl.cola /* comment 1 */ + tbl.colb /* comment 2 */,\n  CAST(x AS INT), # comment 3\n  y               -- comment 4\nFROM\n  bar /* comment 5 */,\n  tbl #          comment 6\n"""\n\nprint(sqlglot.transpile(sql, read='mysql', pretty=True)[0])\n
\n
\n\n
\n
/* multi\n   line\n   comment\n*/\nSELECT\n  tbl.cola /* comment 1 */ + tbl.colb /* comment 2 */,\n  CAST(x AS INT), /* comment 3 */\n  y /* comment 4 */\nFROM bar /* comment 5 */, tbl /*          comment 6 */\n
\n
\n\n

Metadata

\n\n

You can explore SQL with expression helpers to do things like find columns and tables:

\n\n
\n
from sqlglot import parse_one, exp\n\n# print all column references (a and b)\nfor column in parse_one("SELECT a, b + 1 AS c FROM d").find_all(exp.Column):\n    print(column.alias_or_name)\n\n# find all projections in select statements (a and c)\nfor select in parse_one("SELECT a, b + 1 AS c FROM d").find_all(exp.Select):\n    for projection in select.expressions:\n        print(projection.alias_or_name)\n\n# find all tables (x, y, z)\nfor table in parse_one("SELECT * FROM x JOIN y JOIN z").find_all(exp.Table):\n    print(table.name)\n
\n
\n\n

Parser Errors

\n\n

When the parser detects an error in the syntax, it raises a ParserError:

\n\n
\n
import sqlglot\nsqlglot.transpile("SELECT foo( FROM bar")\n
\n
\n\n
sqlglot.errors.ParseError: Expecting ). Line 1, Col: 13.\n  select foo( FROM bar\n              ~~~~\n
\n\n

Structured syntax errors are accessible for programmatic use:

\n\n
\n
import sqlglot\ntry:\n    sqlglot.transpile("SELECT foo( FROM bar")\nexcept sqlglot.errors.ParseError as e:\n    print(e.errors)\n
\n
\n\n
\n
[{\n  'description': 'Expecting )',\n  'line': 1,\n  'col': 13,\n  'start_context': 'SELECT foo( ',\n  'highlight': 'FROM',\n  'end_context': ' bar'\n}]\n
\n
\n\n

Unsupported Errors

\n\n

Presto APPROX_DISTINCT supports the accuracy argument which is not supported in Hive:

\n\n
\n
import sqlglot\nsqlglot.transpile("SELECT APPROX_DISTINCT(a, 0.1) FROM foo", read="presto", write="hive")\n
\n
\n\n
\n
APPROX_COUNT_DISTINCT does not support accuracy\n'SELECT APPROX_COUNT_DISTINCT(a) FROM foo'\n
\n
\n\n

Build and Modify SQL

\n\n

SQLGlot supports incrementally building sql expressions:

\n\n
\n
from sqlglot import select, condition\n\nwhere = condition("x=1").and_("y=1")\nselect("*").from_("y").where(where).sql()\n
\n
\n\n
\n
'SELECT * FROM y WHERE x = 1 AND y = 1'\n
\n
\n\n

You can also modify a parsed tree:

\n\n
\n
from sqlglot import parse_one\nparse_one("SELECT x FROM y").from_("z").sql()\n
\n
\n\n
\n
'SELECT x FROM y, z'\n
\n
\n\n

There is also a way to recursively transform the parsed tree by applying a mapping function to each tree node:

\n\n
\n
from sqlglot import exp, parse_one\n\nexpression_tree = parse_one("SELECT a FROM x")\n\ndef transformer(node):\n    if isinstance(node, exp.Column) and node.name == "a":\n        return parse_one("FUN(a)")\n    return node\n\ntransformed_tree = expression_tree.transform(transformer)\ntransformed_tree.sql()\n
\n
\n\n
\n
'SELECT FUN(a) FROM x'\n
\n
\n\n

SQL Optimizer

\n\n

SQLGlot can rewrite queries into an \"optimized\" form. It performs a variety of techniques to create a new canonical AST. This AST can be used to standardize queries or provide the foundations for implementing an actual engine. For example:

\n\n
\n
import sqlglot\nfrom sqlglot.optimizer import optimize\n\nprint(\n    optimize(\n        sqlglot.parse_one("""\n            SELECT A OR (B OR (C AND D))\n            FROM x\n            WHERE Z = date '2021-01-01' + INTERVAL '1' month OR 1 = 0\n        """),\n        schema={"x": {"A": "INT", "B": "INT", "C": "INT", "D": "INT", "Z": "STRING"}}\n    ).sql(pretty=True)\n)\n
\n
\n\n
\n
SELECT\n  (\n    "x"."a" OR "x"."b" OR "x"."c"\n  ) AND (\n    "x"."a" OR "x"."b" OR "x"."d"\n  ) AS "_col_0"\nFROM "x" AS "x"\nWHERE\n  CAST("x"."z" AS DATE) = CAST('2021-02-01' AS DATE)\n
\n
\n\n

AST Introspection

\n\n

You can see the AST version of the sql by calling repr:

\n\n
\n
from sqlglot import parse_one\nprint(repr(parse_one("SELECT a + 1 AS z")))\n
\n
\n\n
\n
(SELECT expressions:\n  (ALIAS this:\n    (ADD this:\n      (COLUMN this:\n        (IDENTIFIER this: a, quoted: False)), expression:\n      (LITERAL this: 1, is_string: False)), alias:\n    (IDENTIFIER this: z, quoted: False)))\n
\n
\n\n

AST Diff

\n\n

SQLGlot can calculate the difference between two expressions and output changes in a form of a sequence of actions needed to transform a source expression into a target one:

\n\n
\n
from sqlglot import diff, parse_one\ndiff(parse_one("SELECT a + b, c, d"), parse_one("SELECT c, a - b, d"))\n
\n
\n\n
\n
[\n  Remove(expression=(ADD this:\n    (COLUMN this:\n      (IDENTIFIER this: a, quoted: False)), expression:\n    (COLUMN this:\n      (IDENTIFIER this: b, quoted: False)))),\n  Insert(expression=(SUB this:\n    (COLUMN this:\n      (IDENTIFIER this: a, quoted: False)), expression:\n    (COLUMN this:\n      (IDENTIFIER this: b, quoted: False)))),\n  Move(expression=(COLUMN this:\n    (IDENTIFIER this: c, quoted: False))),\n  Keep(source=(IDENTIFIER this: b, quoted: False), target=(IDENTIFIER this: b, quoted: False)),\n  ...\n]\n
\n
\n\n

See also: Semantic Diff for SQL.

\n\n

Custom Dialects

\n\n

Dialects can be added by subclassing Dialect:

\n\n
\n
from sqlglot import exp\nfrom sqlglot.dialects.dialect import Dialect\nfrom sqlglot.generator import Generator\nfrom sqlglot.tokens import Tokenizer, TokenType\n\n\nclass Custom(Dialect):\n    class Tokenizer(Tokenizer):\n        QUOTES = ["'", '"']\n        IDENTIFIERS = ["`"]\n\n        KEYWORDS = {\n            **Tokenizer.KEYWORDS,\n            "INT64": TokenType.BIGINT,\n            "FLOAT64": TokenType.DOUBLE,\n        }\n\n    class Generator(Generator):\n        TRANSFORMS = {exp.Array: lambda self, e: f"[{self.expressions(e)}]"}\n\n        TYPE_MAPPING = {\n            exp.DataType.Type.TINYINT: "INT64",\n            exp.DataType.Type.SMALLINT: "INT64",\n            exp.DataType.Type.INT: "INT64",\n            exp.DataType.Type.BIGINT: "INT64",\n            exp.DataType.Type.DECIMAL: "NUMERIC",\n            exp.DataType.Type.FLOAT: "FLOAT64",\n            exp.DataType.Type.DOUBLE: "FLOAT64",\n            exp.DataType.Type.BOOLEAN: "BOOL",\n            exp.DataType.Type.TEXT: "STRING",\n        }\n\nprint(Dialect["custom"])\n
\n
\n\n
<class '__main__.Custom'>\n
\n\n

SQL Execution

\n\n

One can even interpret SQL queries using SQLGlot, where the tables are represented as Python dictionaries. Although the engine is not very fast (it's not supposed to be) and is in a relatively early stage of development, it can be useful for unit testing and running SQL natively across Python objects. Additionally, the foundation can be easily integrated with fast compute kernels (arrow, pandas). Below is an example showcasing the execution of a SELECT expression that involves aggregations and JOINs:

\n\n
\n
from sqlglot.executor import execute\n\ntables = {\n    "sushi": [\n        {"id": 1, "price": 1.0},\n        {"id": 2, "price": 2.0},\n        {"id": 3, "price": 3.0},\n    ],\n    "order_items": [\n        {"sushi_id": 1, "order_id": 1},\n        {"sushi_id": 1, "order_id": 1},\n        {"sushi_id": 2, "order_id": 1},\n        {"sushi_id": 3, "order_id": 2},\n    ],\n    "orders": [\n        {"id": 1, "user_id": 1},\n        {"id": 2, "user_id": 2},\n    ],\n}\n\nexecute(\n    """\n    SELECT\n      o.user_id,\n      SUM(s.price) AS price\n    FROM orders o\n    JOIN order_items i\n      ON o.id = i.order_id\n    JOIN sushi s\n      ON i.sushi_id = s.id\n    GROUP BY o.user_id\n    """,\n    tables=tables\n)\n
\n
\n\n
\n
user_id price\n      1   4.0\n      2   3.0\n
\n
\n\n

See also: Writing a Python SQL engine from scratch.

\n\n

Used By

\n\n\n\n

Documentation

\n\n

SQLGlot uses pdoc to serve its API documentation:

\n\n
make docs-serve\n
\n\n

Run Tests and Lint

\n\n
make check  # Set SKIP_INTEGRATION=1 to skip integration tests\n
\n\n

Benchmarks

\n\n

Benchmarks run on Python 3.10.5 in seconds.

\n\n\n\n\n \n \n \n \n \n \n \n\n\n\n\n \n \n \n \n \n \n \n\n\n \n \n \n \n \n \n \n\n\n \n \n \n \n \n \n \n\n\n \n \n \n \n \n \n \n\n\n
Querysqlglotsqlfluffsqltreesqlparsemoz_sql_parsersqloxide
tpch0.01308 (1.0)1.60626 (122.7)0.01168 (0.893)0.04958 (3.791)0.08543 (6.531)0.00136 (0.104)
short0.00109 (1.0)0.14134 (129.2)0.00099 (0.906)0.00342 (3.131)0.00652 (5.970)8.76E-5 (0.080)
long0.01399 (1.0)2.12632 (151.9)0.01126 (0.805)0.04410 (3.151)0.06671 (4.767)0.00107 (0.076)
crazy0.03969 (1.0)24.3777 (614.1)0.03917 (0.987)11.7043 (294.8)1.03280 (26.02)0.00625 (0.157)
\n\n

Optional Dependencies

\n\n

SQLGlot uses dateutil to simplify literal timedelta expressions. The optimizer will not simplify expressions like the following if the module cannot be found:

\n\n
\n
x + interval '1' month\n
\n
\n\n
\n"}, "sqlglot.pretty": {"fullname": "sqlglot.pretty", "modulename": "sqlglot", "qualname": "pretty", "kind": "variable", "doc": "

Whether to format generated SQL by default.

\n", "default_value": " = False"}, "sqlglot.schema": {"fullname": "sqlglot.schema", "modulename": "sqlglot.schema", "kind": "module", "doc": "

\n"}, "sqlglot.parse": {"fullname": "sqlglot.parse", "modulename": "sqlglot", "qualname": "parse", "kind": "function", "doc": "

Parses the given SQL string into a collection of syntax trees, one per parsed SQL statement.

\n\n
Arguments:
\n\n
    \n
  • sql: the SQL code string to parse.
  • \n
  • read: the SQL dialect to apply during parsing (eg. \"spark\", \"hive\", \"presto\", \"mysql\").
  • \n
  • **opts: other sqlglot.parser.Parser options.
  • \n
\n\n
Returns:
\n\n
\n

The resulting syntax tree collection.

\n
\n", "signature": "(\tsql: str,\tread: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\t**opts) -> List[Optional[sqlglot.expressions.Expression]]:", "funcdef": "def"}, "sqlglot.parse_one": {"fullname": "sqlglot.parse_one", "modulename": "sqlglot", "qualname": "parse_one", "kind": "function", "doc": "

Parses the given SQL string and returns a syntax tree for the first parsed SQL statement.

\n\n
Arguments:
\n\n
    \n
  • sql: the SQL code string to parse.
  • \n
  • read: the SQL dialect to apply during parsing (eg. \"spark\", \"hive\", \"presto\", \"mysql\").
  • \n
  • into: the SQLGlot Expression to parse into.
  • \n
  • **opts: other sqlglot.parser.Parser options.
  • \n
\n\n
Returns:
\n\n
\n

The syntax tree for the first parsed statement.

\n
\n", "signature": "(\tsql: str,\tread: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\tinto: Union[str, Type[sqlglot.expressions.Expression], Collection[Union[str, Type[sqlglot.expressions.Expression]]], NoneType] = None,\t**opts) -> sqlglot.expressions.Expression:", "funcdef": "def"}, "sqlglot.transpile": {"fullname": "sqlglot.transpile", "modulename": "sqlglot", "qualname": "transpile", "kind": "function", "doc": "

Parses the given SQL string in accordance with the source dialect and returns a list of SQL strings transformed\nto conform to the target dialect. Each string in the returned list represents a single transformed SQL statement.

\n\n
Arguments:
\n\n
    \n
  • sql: the SQL code string to transpile.
  • \n
  • read: the source dialect used to parse the input string (eg. \"spark\", \"hive\", \"presto\", \"mysql\").
  • \n
  • write: the target dialect into which the input should be transformed (eg. \"spark\", \"hive\", \"presto\", \"mysql\").
  • \n
  • identity: if set to True and if the target dialect is not specified the source dialect will be used as both:\nthe source and the target dialect.
  • \n
  • error_level: the desired error level of the parser.
  • \n
  • **opts: other sqlglot.generator.Generator options.
  • \n
\n\n
Returns:
\n\n
\n

The list of transpiled SQL statements.

\n
\n", "signature": "(\tsql: str,\tread: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\twrite: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\tidentity: bool = True,\terror_level: Optional[sqlglot.errors.ErrorLevel] = None,\t**opts) -> List[str]:", "funcdef": "def"}, "sqlglot.dataframe": {"fullname": "sqlglot.dataframe", "modulename": "sqlglot.dataframe", "kind": "module", "doc": "

PySpark DataFrame SQL Generator

\n\n

This is a drop-in replacement for the PySpark DataFrame API that will generate SQL instead of executing DataFrame operations directly. This, when combined with the transpiling support in SQLGlot, allows one to write PySpark DataFrame code and execute it on other engines like DuckDB, Presto, Spark, Snowflake, and BigQuery.

\n\n

Currently many of the common operations are covered and more functionality will be added over time. Please open an issue or PR with your feedback or contribution to help influence what should be prioritized next and make sure your use case is properly supported.

\n\n

How to use

\n\n

Instructions

\n\n
    \n
  • Install SQLGlot and that is all that is required to just generate SQL. The examples show generating SQL and then executing that SQL on a specific engine and that will require that engine's client library.
  • \n
  • Find/replace all from pyspark.sql with from sqlglot.dataframe.
  • \n
  • Prior to any spark.read.table or spark.table run sqlglot.schema.add_table('<table_name>', <column_structure>).\n
      \n
    • The column structure can be defined the following ways:\n
        \n
      • Dictionary where the keys are column names and values are string of the Spark SQL type name.\n
          \n
        • Ex: {'cola': 'string', 'colb': 'int'}
        • \n
      • \n
      • PySpark DataFrame StructType similar to when using createDataFrame.\n
          \n
        • Ex: StructType([StructField('cola', StringType()), StructField('colb', IntegerType())])
        • \n
      • \n
      • A string of names and types similar to what is supported in createDataFrame.\n
          \n
        • Ex: cola: STRING, colb: INT
        • \n
      • \n
      • [Not Recommended] A list of string column names without type.\n
          \n
        • Ex: ['cola', 'colb']
        • \n
        • The lack of types may limit functionality in future releases.
        • \n
      • \n
    • \n
    • See Registering Custom Schema for information on how to skip this step if the information is stored externally.
    • \n
  • \n
  • Add .sql(pretty=True) to your final DataFrame command to return a list of sql statements to run that command.\n
      \n
    • In most cases a single SQL statement is returned. Currently the only exception is when caching DataFrames which isn't supported in other dialects.
    • \n
    • Spark is the default output dialect. See dialects for a full list of dialects.
    • \n
    • Ex: .sql(pretty=True, dialect='bigquery')
    • \n
  • \n
\n\n

Examples

\n\n
\n
import sqlglot\nfrom sqlglot.dataframe.sql.session import SparkSession\nfrom sqlglot.dataframe.sql import functions as F\n\nsqlglot.schema.add_table('employee', {\n  'employee_id': 'INT',\n  'fname': 'STRING',\n  'lname': 'STRING',\n  'age': 'INT',\n})  # Register the table structure prior to reading from the table\n\nspark = SparkSession()\n\ndf = (\n    spark\n    .table('employee')\n    .groupBy(F.col("age"))\n    .agg(F.countDistinct(F.col("employee_id")).alias("num_employees")) \n)\n\nprint(df.sql(pretty=True))  # Spark will be the dialect used by default\n
\n
\n\n
SELECT\n  `employee`.`age` AS `age`,\n  COUNT(DISTINCT `employee`.`employee_id`) AS `num_employees`\nFROM `employee` AS `employee`\nGROUP BY\n  `employee`.`age`\n
\n\n

Registering Custom Schema Class

\n\n

The step of adding sqlglot.schema.add_table can be skipped if you have the column structure stored externally like in a file or from an external metadata table. This can be done by writing a class that implements the sqlglot.schema.Schema abstract class and then assigning that class to sqlglot.schema.

\n\n
\n
import sqlglot\nfrom sqlglot.dataframe.sql.session import SparkSession\nfrom sqlglot.dataframe.sql import functions as F\nfrom sqlglot.schema import Schema\n\n\nclass ExternalSchema(Schema):\n  ...\n\nsqlglot.schema = ExternalSchema()\n\nspark = SparkSession()\n\ndf = (\n    spark\n    .table('employee')\n    .groupBy(F.col("age"))\n    .agg(F.countDistinct(F.col("employee_id")).alias("num_employees")) \n)\n\nprint(df.sql(pretty=True))\n
\n
\n\n

Example Implementations

\n\n

Bigquery

\n\n
\n
from google.cloud import bigquery\nfrom sqlglot.dataframe.sql.session import SparkSession\nfrom sqlglot.dataframe.sql import types\nfrom sqlglot.dataframe.sql import functions as F\n\nclient = bigquery.Client()\n\ndata = [\n    (1, "Jack", "Shephard", 34),\n    (2, "John", "Locke", 48),\n    (3, "Kate", "Austen", 34),\n    (4, "Claire", "Littleton", 22),\n    (5, "Hugo", "Reyes", 26),\n]\nschema = types.StructType([\n    types.StructField('employee_id', types.IntegerType(), False),\n    types.StructField('fname', types.StringType(), False),\n    types.StructField('lname', types.StringType(), False),\n    types.StructField('age', types.IntegerType(), False),\n])\n\nsql_statements = (\n    SparkSession()\n    .createDataFrame(data, schema)\n    .groupBy(F.col("age"))\n    .agg(F.countDistinct(F.col("employee_id")).alias("num_employees"))\n    .sql(dialect="bigquery")\n)\n\nresult = None\nfor sql in sql_statements:\n  result = client.query(sql)\n\nassert result is not None\nfor row in client.query(result):\n    print(f"Age: {row['age']}, Num Employees: {row['num_employees']}")\n
\n
\n\n

Snowflake

\n\n
\n
import os\n\nimport snowflake.connector\nfrom sqlglot.dataframe.session import SparkSession\nfrom sqlglot.dataframe import types\nfrom sqlglot.dataframe import functions as F\n\nctx = snowflake.connector.connect(\n    user=os.environ["SNOWFLAKE_USER"],\n    password=os.environ["SNOWFLAKE_PASS"],\n    account=os.environ["SNOWFLAKE_ACCOUNT"]\n)\ncs = ctx.cursor()\n\ndata = [\n    (1, "Jack", "Shephard", 34),\n    (2, "John", "Locke", 48),\n    (3, "Kate", "Austen", 34),\n    (4, "Claire", "Littleton", 22),\n    (5, "Hugo", "Reyes", 26),\n]\nschema = types.StructType([\n    types.StructField('employee_id', types.IntegerType(), False),\n    types.StructField('fname', types.StringType(), False),\n    types.StructField('lname', types.StringType(), False),\n    types.StructField('age', types.IntegerType(), False),\n])\n\nsql_statements = (\n    SparkSession()\n    .createDataFrame(data, schema)\n    .groupBy(F.col("age"))\n    .agg(F.countDistinct(F.col("lname")).alias("num_employees"))\n    .sql(dialect="snowflake")\n)\n\ntry:\n    for sql in sql_statements:\n        cs.execute(sql)\n    results = cs.fetchall()\n    for row in results:\n        print(f"Age: {row[0]}, Num Employees: {row[1]}")\nfinally:\n    cs.close()\nctx.close()\n
\n
\n\n

Spark

\n\n
\n
from pyspark.sql.session import SparkSession as PySparkSession\nfrom sqlglot.dataframe.sql.session import SparkSession\nfrom sqlglot.dataframe.sql import types\nfrom sqlglot.dataframe.sql import functions as F\n\ndata = [\n    (1, "Jack", "Shephard", 34),\n    (2, "John", "Locke", 48),\n    (3, "Kate", "Austen", 34),\n    (4, "Claire", "Littleton", 22),\n    (5, "Hugo", "Reyes", 26),\n]\nschema = types.StructType([\n    types.StructField('employee_id', types.IntegerType(), False),\n    types.StructField('fname', types.StringType(), False),\n    types.StructField('lname', types.StringType(), False),\n    types.StructField('age', types.IntegerType(), False),\n])\n\nsql_statements = (\n    SparkSession()\n    .createDataFrame(data, schema)\n    .groupBy(F.col("age"))\n    .agg(F.countDistinct(F.col("employee_id")).alias("num_employees"))\n    .sql(dialect="spark")\n)\n\npyspark = PySparkSession.builder.master("local[*]").getOrCreate()\n\ndf = None\nfor sql in sql_statements:\n    df = pyspark.sql(sql)\n\nassert df is not None\ndf.show()\n
\n
\n\n

Unsupportable Operations

\n\n

Any operation that lacks a way to represent it in SQL cannot be supported by this tool. An example of this would be rdd operations. Since the DataFrame API though is mostly modeled around SQL concepts most operations can be supported.

\n"}, "sqlglot.dataframe.sql": {"fullname": "sqlglot.dataframe.sql", "modulename": "sqlglot.dataframe.sql", "kind": "module", "doc": "

\n"}, "sqlglot.dataframe.sql.SparkSession": {"fullname": "sqlglot.dataframe.sql.SparkSession", "modulename": "sqlglot.dataframe.sql", "qualname": "SparkSession", "kind": "class", "doc": "

\n"}, "sqlglot.dataframe.sql.SparkSession.__init__": {"fullname": "sqlglot.dataframe.sql.SparkSession.__init__", "modulename": "sqlglot.dataframe.sql", "qualname": "SparkSession.__init__", "kind": "function", "doc": "

\n", "signature": "()"}, "sqlglot.dataframe.sql.SparkSession.table": {"fullname": "sqlglot.dataframe.sql.SparkSession.table", "modulename": "sqlglot.dataframe.sql", "qualname": "SparkSession.table", "kind": "function", "doc": "

\n", "signature": "(self, tableName: str) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.SparkSession.createDataFrame": {"fullname": "sqlglot.dataframe.sql.SparkSession.createDataFrame", "modulename": "sqlglot.dataframe.sql", "qualname": "SparkSession.createDataFrame", "kind": "function", "doc": "

\n", "signature": "(\tself,\tdata: Sequence[Union[Dict[str, <MagicMock id='140700333898336'>], List[<MagicMock id='140700333898336'>], Tuple]],\tschema: Optional[<MagicMock id='140700333867312'>] = None,\tsamplingRatio: Optional[float] = None,\tverifySchema: bool = False) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.SparkSession.sql": {"fullname": "sqlglot.dataframe.sql.SparkSession.sql", "modulename": "sqlglot.dataframe.sql", "qualname": "SparkSession.sql", "kind": "function", "doc": "

\n", "signature": "(self, sqlQuery: str) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrame": {"fullname": "sqlglot.dataframe.sql.DataFrame", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrame", "kind": "class", "doc": "

\n"}, "sqlglot.dataframe.sql.DataFrame.__init__": {"fullname": "sqlglot.dataframe.sql.DataFrame.__init__", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrame.__init__", "kind": "function", "doc": "

\n", "signature": "(\tspark: <MagicMock id='140700332957056'>,\texpression: sqlglot.expressions.Select,\tbranch_id: Optional[str] = None,\tsequence_id: Optional[str] = None,\tlast_op: sqlglot.dataframe.sql.operations.Operation = <Operation.INIT: -1>,\tpending_hints: Optional[List[sqlglot.expressions.Expression]] = None,\toutput_expression_container: Optional[<MagicMock id='140700332981504'>] = None,\t**kwargs)"}, "sqlglot.dataframe.sql.DataFrame.sql": {"fullname": "sqlglot.dataframe.sql.DataFrame.sql", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrame.sql", "kind": "function", "doc": "

\n", "signature": "(self, dialect='spark', optimize=True, **kwargs) -> List[str]:", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrame.copy": {"fullname": "sqlglot.dataframe.sql.DataFrame.copy", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrame.copy", "kind": "function", "doc": "

\n", "signature": "(self, **kwargs) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrame.select": {"fullname": "sqlglot.dataframe.sql.DataFrame.select", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrame.select", "kind": "function", "doc": "

\n", "signature": "(self, *cols, **kwargs) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrame.alias": {"fullname": "sqlglot.dataframe.sql.DataFrame.alias", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrame.alias", "kind": "function", "doc": "

\n", "signature": "(self, name: str, **kwargs) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrame.where": {"fullname": "sqlglot.dataframe.sql.DataFrame.where", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrame.where", "kind": "function", "doc": "

\n", "signature": "(\tself,\tcolumn: Union[sqlglot.dataframe.sql.column.Column, bool],\t**kwargs) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrame.filter": {"fullname": "sqlglot.dataframe.sql.DataFrame.filter", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrame.filter", "kind": "function", "doc": "

\n", "signature": "(\tself,\tcolumn: Union[sqlglot.dataframe.sql.column.Column, bool],\t**kwargs) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrame.groupBy": {"fullname": "sqlglot.dataframe.sql.DataFrame.groupBy", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrame.groupBy", "kind": "function", "doc": "

\n", "signature": "(self, *cols, **kwargs) -> sqlglot.dataframe.sql.group.GroupedData:", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrame.agg": {"fullname": "sqlglot.dataframe.sql.DataFrame.agg", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrame.agg", "kind": "function", "doc": "

\n", "signature": "(self, *exprs, **kwargs) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrame.join": {"fullname": "sqlglot.dataframe.sql.DataFrame.join", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrame.join", "kind": "function", "doc": "

\n", "signature": "(\tself,\tother_df: sqlglot.dataframe.sql.dataframe.DataFrame,\ton: Union[str, List[str], sqlglot.dataframe.sql.column.Column, List[sqlglot.dataframe.sql.column.Column]],\thow: str = 'inner',\t**kwargs) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrame.orderBy": {"fullname": "sqlglot.dataframe.sql.DataFrame.orderBy", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrame.orderBy", "kind": "function", "doc": "

This implementation lets any ordered columns take priority over whatever is provided in ascending. Spark\nhas irregular behavior and can result in runtime errors. Users shouldn't be mixing the two anyways so this\nis unlikely to come up.

\n", "signature": "(\tself,\t*cols: Union[str, sqlglot.dataframe.sql.column.Column],\tascending: Union[Any, List[Any], NoneType] = None) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrame.sort": {"fullname": "sqlglot.dataframe.sql.DataFrame.sort", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrame.sort", "kind": "function", "doc": "

This implementation lets any ordered columns take priority over whatever is provided in ascending. Spark\nhas irregular behavior and can result in runtime errors. Users shouldn't be mixing the two anyways so this\nis unlikely to come up.

\n", "signature": "(\tself,\t*cols: Union[str, sqlglot.dataframe.sql.column.Column],\tascending: Union[Any, List[Any], NoneType] = None) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrame.union": {"fullname": "sqlglot.dataframe.sql.DataFrame.union", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrame.union", "kind": "function", "doc": "

\n", "signature": "(\tself,\tother: sqlglot.dataframe.sql.dataframe.DataFrame) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrame.unionAll": {"fullname": "sqlglot.dataframe.sql.DataFrame.unionAll", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrame.unionAll", "kind": "function", "doc": "

\n", "signature": "(\tself,\tother: sqlglot.dataframe.sql.dataframe.DataFrame) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrame.unionByName": {"fullname": "sqlglot.dataframe.sql.DataFrame.unionByName", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrame.unionByName", "kind": "function", "doc": "

\n", "signature": "(\tself,\tother: sqlglot.dataframe.sql.dataframe.DataFrame,\tallowMissingColumns: bool = False):", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrame.intersect": {"fullname": "sqlglot.dataframe.sql.DataFrame.intersect", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrame.intersect", "kind": "function", "doc": "

\n", "signature": "(\tself,\tother: sqlglot.dataframe.sql.dataframe.DataFrame) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrame.intersectAll": {"fullname": "sqlglot.dataframe.sql.DataFrame.intersectAll", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrame.intersectAll", "kind": "function", "doc": "

\n", "signature": "(\tself,\tother: sqlglot.dataframe.sql.dataframe.DataFrame) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrame.exceptAll": {"fullname": "sqlglot.dataframe.sql.DataFrame.exceptAll", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrame.exceptAll", "kind": "function", "doc": "

\n", "signature": "(\tself,\tother: sqlglot.dataframe.sql.dataframe.DataFrame) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrame.distinct": {"fullname": "sqlglot.dataframe.sql.DataFrame.distinct", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrame.distinct", "kind": "function", "doc": "

\n", "signature": "(self) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrame.dropDuplicates": {"fullname": "sqlglot.dataframe.sql.DataFrame.dropDuplicates", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrame.dropDuplicates", "kind": "function", "doc": "

\n", "signature": "(self, subset: Optional[List[str]] = None):", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrame.dropna": {"fullname": "sqlglot.dataframe.sql.DataFrame.dropna", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrame.dropna", "kind": "function", "doc": "

\n", "signature": "(\tself,\thow: str = 'any',\tthresh: Optional[int] = None,\tsubset: Union[str, Tuple[str, ...], List[str], NoneType] = None) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrame.fillna": {"fullname": "sqlglot.dataframe.sql.DataFrame.fillna", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrame.fillna", "kind": "function", "doc": "

Functionality Difference: If you provide a value to replace a null and that type conflicts\nwith the type of the column then PySpark will just ignore your replacement.\nThis will try to cast them to be the same in some cases. So they won't always match.\nBest to not mix types so make sure replacement is the same type as the column

\n\n

Possibility for improvement: Use typeof function to get the type of the column\nand check if it matches the type of the value provided. If not then make it null.

\n", "signature": "(\tself,\tvalue: <MagicMock id='140700331804992'>,\tsubset: Union[str, Tuple[str, ...], List[str], NoneType] = None) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrame.replace": {"fullname": "sqlglot.dataframe.sql.DataFrame.replace", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrame.replace", "kind": "function", "doc": "

\n", "signature": "(\tself,\tto_replace: Union[bool, int, float, str, List, Dict],\tvalue: Union[bool, int, float, str, List, NoneType] = None,\tsubset: Union[Collection[<MagicMock id='140700331990208'>], <MagicMock id='140700331990208'>, NoneType] = None) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrame.withColumn": {"fullname": "sqlglot.dataframe.sql.DataFrame.withColumn", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrame.withColumn", "kind": "function", "doc": "

\n", "signature": "(\tself,\tcolName: str,\tcol: sqlglot.dataframe.sql.column.Column) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrame.withColumnRenamed": {"fullname": "sqlglot.dataframe.sql.DataFrame.withColumnRenamed", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrame.withColumnRenamed", "kind": "function", "doc": "

\n", "signature": "(self, existing: str, new: str):", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrame.drop": {"fullname": "sqlglot.dataframe.sql.DataFrame.drop", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrame.drop", "kind": "function", "doc": "

\n", "signature": "(\tself,\t*cols: Union[str, sqlglot.dataframe.sql.column.Column]) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrame.limit": {"fullname": "sqlglot.dataframe.sql.DataFrame.limit", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrame.limit", "kind": "function", "doc": "

\n", "signature": "(self, num: int) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrame.hint": {"fullname": "sqlglot.dataframe.sql.DataFrame.hint", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrame.hint", "kind": "function", "doc": "

\n", "signature": "(\tself,\tname: str,\t*parameters: Union[str, int, NoneType]) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrame.repartition": {"fullname": "sqlglot.dataframe.sql.DataFrame.repartition", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrame.repartition", "kind": "function", "doc": "

\n", "signature": "(\tself,\tnumPartitions: Union[int, <MagicMock id='140700332136032'>],\t*cols: <MagicMock id='140700332245248'>) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrame.coalesce": {"fullname": "sqlglot.dataframe.sql.DataFrame.coalesce", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrame.coalesce", "kind": "function", "doc": "

\n", "signature": "(self, numPartitions: int) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrame.cache": {"fullname": "sqlglot.dataframe.sql.DataFrame.cache", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrame.cache", "kind": "function", "doc": "

\n", "signature": "(self) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrame.persist": {"fullname": "sqlglot.dataframe.sql.DataFrame.persist", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrame.persist", "kind": "function", "doc": "

Storage Level Options: https://spark.apache.org/docs/3.0.0-preview/sql-ref-syntax-aux-cache-cache-table.html

\n", "signature": "(\tself,\tstorageLevel: str = 'MEMORY_AND_DISK_SER') -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.GroupedData": {"fullname": "sqlglot.dataframe.sql.GroupedData", "modulename": "sqlglot.dataframe.sql", "qualname": "GroupedData", "kind": "class", "doc": "

\n"}, "sqlglot.dataframe.sql.GroupedData.__init__": {"fullname": "sqlglot.dataframe.sql.GroupedData.__init__", "modulename": "sqlglot.dataframe.sql", "qualname": "GroupedData.__init__", "kind": "function", "doc": "

\n", "signature": "(\tdf: sqlglot.dataframe.sql.dataframe.DataFrame,\tgroup_by_cols: List[sqlglot.dataframe.sql.column.Column],\tlast_op: sqlglot.dataframe.sql.operations.Operation)"}, "sqlglot.dataframe.sql.GroupedData.agg": {"fullname": "sqlglot.dataframe.sql.GroupedData.agg", "modulename": "sqlglot.dataframe.sql", "qualname": "GroupedData.agg", "kind": "function", "doc": "

\n", "signature": "(\tself,\t*exprs: Union[sqlglot.dataframe.sql.column.Column, Dict[str, str]]) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.GroupedData.count": {"fullname": "sqlglot.dataframe.sql.GroupedData.count", "modulename": "sqlglot.dataframe.sql", "qualname": "GroupedData.count", "kind": "function", "doc": "

\n", "signature": "(self) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.GroupedData.mean": {"fullname": "sqlglot.dataframe.sql.GroupedData.mean", "modulename": "sqlglot.dataframe.sql", "qualname": "GroupedData.mean", "kind": "function", "doc": "

\n", "signature": "(self, *cols: str) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.GroupedData.avg": {"fullname": "sqlglot.dataframe.sql.GroupedData.avg", "modulename": "sqlglot.dataframe.sql", "qualname": "GroupedData.avg", "kind": "function", "doc": "

\n", "signature": "(self, *cols: str) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.GroupedData.max": {"fullname": "sqlglot.dataframe.sql.GroupedData.max", "modulename": "sqlglot.dataframe.sql", "qualname": "GroupedData.max", "kind": "function", "doc": "

\n", "signature": "(self, *cols: str) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.GroupedData.min": {"fullname": "sqlglot.dataframe.sql.GroupedData.min", "modulename": "sqlglot.dataframe.sql", "qualname": "GroupedData.min", "kind": "function", "doc": "

\n", "signature": "(self, *cols: str) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.GroupedData.sum": {"fullname": "sqlglot.dataframe.sql.GroupedData.sum", "modulename": "sqlglot.dataframe.sql", "qualname": "GroupedData.sum", "kind": "function", "doc": "

\n", "signature": "(self, *cols: str) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.GroupedData.pivot": {"fullname": "sqlglot.dataframe.sql.GroupedData.pivot", "modulename": "sqlglot.dataframe.sql", "qualname": "GroupedData.pivot", "kind": "function", "doc": "

\n", "signature": "(self, *cols: str) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.Column": {"fullname": "sqlglot.dataframe.sql.Column", "modulename": "sqlglot.dataframe.sql", "qualname": "Column", "kind": "class", "doc": "

\n"}, "sqlglot.dataframe.sql.Column.__init__": {"fullname": "sqlglot.dataframe.sql.Column.__init__", "modulename": "sqlglot.dataframe.sql", "qualname": "Column.__init__", "kind": "function", "doc": "

\n", "signature": "(\texpression: Union[<MagicMock id='140700332259696'>, sqlglot.expressions.Expression, NoneType])"}, "sqlglot.dataframe.sql.Column.ensure_col": {"fullname": "sqlglot.dataframe.sql.Column.ensure_col", "modulename": "sqlglot.dataframe.sql", "qualname": "Column.ensure_col", "kind": "function", "doc": "

\n", "signature": "(\tcls,\tvalue: Union[<MagicMock id='140700330611696'>, sqlglot.expressions.Expression, NoneType]):", "funcdef": "def"}, "sqlglot.dataframe.sql.Column.ensure_cols": {"fullname": "sqlglot.dataframe.sql.Column.ensure_cols", "modulename": "sqlglot.dataframe.sql", "qualname": "Column.ensure_cols", "kind": "function", "doc": "

\n", "signature": "(\tcls,\targs: List[Union[<MagicMock id='140700330840736'>, sqlglot.expressions.Expression]]) -> List[sqlglot.dataframe.sql.column.Column]:", "funcdef": "def"}, "sqlglot.dataframe.sql.Column.invoke_anonymous_function": {"fullname": "sqlglot.dataframe.sql.Column.invoke_anonymous_function", "modulename": "sqlglot.dataframe.sql", "qualname": "Column.invoke_anonymous_function", "kind": "function", "doc": "

\n", "signature": "(\tcls,\tcolumn: Optional[<MagicMock id='140700330924096'>],\tfunc_name: str,\t*args: Optional[<MagicMock id='140700330964112'>]) -> sqlglot.dataframe.sql.column.Column:", "funcdef": "def"}, "sqlglot.dataframe.sql.Column.invoke_expression_over_column": {"fullname": "sqlglot.dataframe.sql.Column.invoke_expression_over_column", "modulename": "sqlglot.dataframe.sql", "qualname": "Column.invoke_expression_over_column", "kind": "function", "doc": "

\n", "signature": "(\tcls,\tcolumn: Optional[<MagicMock id='140700331029648'>],\tcallable_expression: Callable,\t**kwargs) -> sqlglot.dataframe.sql.column.Column:", "funcdef": "def"}, "sqlglot.dataframe.sql.Column.binary_op": {"fullname": "sqlglot.dataframe.sql.Column.binary_op", "modulename": "sqlglot.dataframe.sql", "qualname": "Column.binary_op", "kind": "function", "doc": "

\n", "signature": "(\tself,\tklass: Callable,\tother: <MagicMock id='140700331083136'>,\t**kwargs) -> sqlglot.dataframe.sql.column.Column:", "funcdef": "def"}, "sqlglot.dataframe.sql.Column.inverse_binary_op": {"fullname": "sqlglot.dataframe.sql.Column.inverse_binary_op", "modulename": "sqlglot.dataframe.sql", "qualname": "Column.inverse_binary_op", "kind": "function", "doc": "

\n", "signature": "(\tself,\tklass: Callable,\tother: <MagicMock id='140700331093216'>,\t**kwargs) -> sqlglot.dataframe.sql.column.Column:", "funcdef": "def"}, "sqlglot.dataframe.sql.Column.unary_op": {"fullname": "sqlglot.dataframe.sql.Column.unary_op", "modulename": "sqlglot.dataframe.sql", "qualname": "Column.unary_op", "kind": "function", "doc": "

\n", "signature": "(self, klass: Callable, **kwargs) -> sqlglot.dataframe.sql.column.Column:", "funcdef": "def"}, "sqlglot.dataframe.sql.Column.ensure_literal": {"fullname": "sqlglot.dataframe.sql.Column.ensure_literal", "modulename": "sqlglot.dataframe.sql", "qualname": "Column.ensure_literal", "kind": "function", "doc": "

\n", "signature": "(cls, value) -> sqlglot.dataframe.sql.column.Column:", "funcdef": "def"}, "sqlglot.dataframe.sql.Column.copy": {"fullname": "sqlglot.dataframe.sql.Column.copy", "modulename": "sqlglot.dataframe.sql", "qualname": "Column.copy", "kind": "function", "doc": "

\n", "signature": "(self) -> sqlglot.dataframe.sql.column.Column:", "funcdef": "def"}, "sqlglot.dataframe.sql.Column.set_table_name": {"fullname": "sqlglot.dataframe.sql.Column.set_table_name", "modulename": "sqlglot.dataframe.sql", "qualname": "Column.set_table_name", "kind": "function", "doc": "

\n", "signature": "(self, table_name: str, copy=False) -> sqlglot.dataframe.sql.column.Column:", "funcdef": "def"}, "sqlglot.dataframe.sql.Column.sql": {"fullname": "sqlglot.dataframe.sql.Column.sql", "modulename": "sqlglot.dataframe.sql", "qualname": "Column.sql", "kind": "function", "doc": "

\n", "signature": "(self, **kwargs) -> str:", "funcdef": "def"}, "sqlglot.dataframe.sql.Column.alias": {"fullname": "sqlglot.dataframe.sql.Column.alias", "modulename": "sqlglot.dataframe.sql", "qualname": "Column.alias", "kind": "function", "doc": "

\n", "signature": "(self, name: str) -> sqlglot.dataframe.sql.column.Column:", "funcdef": "def"}, "sqlglot.dataframe.sql.Column.asc": {"fullname": "sqlglot.dataframe.sql.Column.asc", "modulename": "sqlglot.dataframe.sql", "qualname": "Column.asc", "kind": "function", "doc": "

\n", "signature": "(self) -> sqlglot.dataframe.sql.column.Column:", "funcdef": "def"}, "sqlglot.dataframe.sql.Column.desc": {"fullname": "sqlglot.dataframe.sql.Column.desc", "modulename": "sqlglot.dataframe.sql", "qualname": "Column.desc", "kind": "function", "doc": "

\n", "signature": "(self) -> sqlglot.dataframe.sql.column.Column:", "funcdef": "def"}, "sqlglot.dataframe.sql.Column.asc_nulls_first": {"fullname": "sqlglot.dataframe.sql.Column.asc_nulls_first", "modulename": "sqlglot.dataframe.sql", "qualname": "Column.asc_nulls_first", "kind": "function", "doc": "

\n", "signature": "(self) -> sqlglot.dataframe.sql.column.Column:", "funcdef": "def"}, "sqlglot.dataframe.sql.Column.asc_nulls_last": {"fullname": "sqlglot.dataframe.sql.Column.asc_nulls_last", "modulename": "sqlglot.dataframe.sql", "qualname": "Column.asc_nulls_last", "kind": "function", "doc": "

\n", "signature": "(self) -> sqlglot.dataframe.sql.column.Column:", "funcdef": "def"}, "sqlglot.dataframe.sql.Column.desc_nulls_first": {"fullname": "sqlglot.dataframe.sql.Column.desc_nulls_first", "modulename": "sqlglot.dataframe.sql", "qualname": "Column.desc_nulls_first", "kind": "function", "doc": "

\n", "signature": "(self) -> sqlglot.dataframe.sql.column.Column:", "funcdef": "def"}, "sqlglot.dataframe.sql.Column.desc_nulls_last": {"fullname": "sqlglot.dataframe.sql.Column.desc_nulls_last", "modulename": "sqlglot.dataframe.sql", "qualname": "Column.desc_nulls_last", "kind": "function", "doc": "

\n", "signature": "(self) -> sqlglot.dataframe.sql.column.Column:", "funcdef": "def"}, "sqlglot.dataframe.sql.Column.when": {"fullname": "sqlglot.dataframe.sql.Column.when", "modulename": "sqlglot.dataframe.sql", "qualname": "Column.when", "kind": "function", "doc": "

\n", "signature": "(\tself,\tcondition: sqlglot.dataframe.sql.column.Column,\tvalue: Any) -> sqlglot.dataframe.sql.column.Column:", "funcdef": "def"}, "sqlglot.dataframe.sql.Column.otherwise": {"fullname": "sqlglot.dataframe.sql.Column.otherwise", "modulename": "sqlglot.dataframe.sql", "qualname": "Column.otherwise", "kind": "function", "doc": "

\n", "signature": "(self, value: Any) -> sqlglot.dataframe.sql.column.Column:", "funcdef": "def"}, "sqlglot.dataframe.sql.Column.isNull": {"fullname": "sqlglot.dataframe.sql.Column.isNull", "modulename": "sqlglot.dataframe.sql", "qualname": "Column.isNull", "kind": "function", "doc": "

\n", "signature": "(self) -> sqlglot.dataframe.sql.column.Column:", "funcdef": "def"}, "sqlglot.dataframe.sql.Column.isNotNull": {"fullname": "sqlglot.dataframe.sql.Column.isNotNull", "modulename": "sqlglot.dataframe.sql", "qualname": "Column.isNotNull", "kind": "function", "doc": "

\n", "signature": "(self) -> sqlglot.dataframe.sql.column.Column:", "funcdef": "def"}, "sqlglot.dataframe.sql.Column.cast": {"fullname": "sqlglot.dataframe.sql.Column.cast", "modulename": "sqlglot.dataframe.sql", "qualname": "Column.cast", "kind": "function", "doc": "

Functionality Difference: PySpark cast accepts a datatype instance of the datatype class\nSqlglot doesn't currently replicate this class so it only accepts a string

\n", "signature": "(self, dataType: Union[str, sqlglot.dataframe.sql.types.DataType]):", "funcdef": "def"}, "sqlglot.dataframe.sql.Column.startswith": {"fullname": "sqlglot.dataframe.sql.Column.startswith", "modulename": "sqlglot.dataframe.sql", "qualname": "Column.startswith", "kind": "function", "doc": "

\n", "signature": "(\tself,\tvalue: Union[str, sqlglot.dataframe.sql.column.Column]) -> sqlglot.dataframe.sql.column.Column:", "funcdef": "def"}, "sqlglot.dataframe.sql.Column.endswith": {"fullname": "sqlglot.dataframe.sql.Column.endswith", "modulename": "sqlglot.dataframe.sql", "qualname": "Column.endswith", "kind": "function", "doc": "

\n", "signature": "(\tself,\tvalue: Union[str, sqlglot.dataframe.sql.column.Column]) -> sqlglot.dataframe.sql.column.Column:", "funcdef": "def"}, "sqlglot.dataframe.sql.Column.rlike": {"fullname": "sqlglot.dataframe.sql.Column.rlike", "modulename": "sqlglot.dataframe.sql", "qualname": "Column.rlike", "kind": "function", "doc": "

\n", "signature": "(self, regexp: str) -> sqlglot.dataframe.sql.column.Column:", "funcdef": "def"}, "sqlglot.dataframe.sql.Column.like": {"fullname": "sqlglot.dataframe.sql.Column.like", "modulename": "sqlglot.dataframe.sql", "qualname": "Column.like", "kind": "function", "doc": "

\n", "signature": "(self, other: str):", "funcdef": "def"}, "sqlglot.dataframe.sql.Column.ilike": {"fullname": "sqlglot.dataframe.sql.Column.ilike", "modulename": "sqlglot.dataframe.sql", "qualname": "Column.ilike", "kind": "function", "doc": "

\n", "signature": "(self, other: str):", "funcdef": "def"}, "sqlglot.dataframe.sql.Column.substr": {"fullname": "sqlglot.dataframe.sql.Column.substr", "modulename": "sqlglot.dataframe.sql", "qualname": "Column.substr", "kind": "function", "doc": "

\n", "signature": "(\tself,\tstartPos: Union[int, sqlglot.dataframe.sql.column.Column],\tlength: Union[int, sqlglot.dataframe.sql.column.Column]) -> sqlglot.dataframe.sql.column.Column:", "funcdef": "def"}, "sqlglot.dataframe.sql.Column.isin": {"fullname": "sqlglot.dataframe.sql.Column.isin", "modulename": "sqlglot.dataframe.sql", "qualname": "Column.isin", "kind": "function", "doc": "

\n", "signature": "(\tself,\t*cols: Union[<MagicMock id='140700331213104'>, Iterable[<MagicMock id='140700331213104'>]]):", "funcdef": "def"}, "sqlglot.dataframe.sql.Column.between": {"fullname": "sqlglot.dataframe.sql.Column.between", "modulename": "sqlglot.dataframe.sql", "qualname": "Column.between", "kind": "function", "doc": "

\n", "signature": "(\tself,\tlowerBound: <MagicMock id='140700331299440'>,\tupperBound: <MagicMock id='140700329240384'>) -> sqlglot.dataframe.sql.column.Column:", "funcdef": "def"}, "sqlglot.dataframe.sql.Column.over": {"fullname": "sqlglot.dataframe.sql.Column.over", "modulename": "sqlglot.dataframe.sql", "qualname": "Column.over", "kind": "function", "doc": "

\n", "signature": "(\tself,\twindow: <MagicMock id='140700329314480'>) -> sqlglot.dataframe.sql.column.Column:", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrameNaFunctions": {"fullname": "sqlglot.dataframe.sql.DataFrameNaFunctions", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrameNaFunctions", "kind": "class", "doc": "

\n"}, "sqlglot.dataframe.sql.DataFrameNaFunctions.__init__": {"fullname": "sqlglot.dataframe.sql.DataFrameNaFunctions.__init__", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrameNaFunctions.__init__", "kind": "function", "doc": "

\n", "signature": "(df: sqlglot.dataframe.sql.dataframe.DataFrame)"}, "sqlglot.dataframe.sql.DataFrameNaFunctions.drop": {"fullname": "sqlglot.dataframe.sql.DataFrameNaFunctions.drop", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrameNaFunctions.drop", "kind": "function", "doc": "

\n", "signature": "(\tself,\thow: str = 'any',\tthresh: Optional[int] = None,\tsubset: Union[str, Tuple[str, ...], List[str], NoneType] = None) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrameNaFunctions.fill": {"fullname": "sqlglot.dataframe.sql.DataFrameNaFunctions.fill", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrameNaFunctions.fill", "kind": "function", "doc": "

\n", "signature": "(\tself,\tvalue: Union[int, bool, float, str, Dict[str, Any]],\tsubset: Union[str, Tuple[str, ...], List[str], NoneType] = None) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrameNaFunctions.replace": {"fullname": "sqlglot.dataframe.sql.DataFrameNaFunctions.replace", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrameNaFunctions.replace", "kind": "function", "doc": "

\n", "signature": "(\tself,\tto_replace: Union[bool, int, float, str, List, Dict],\tvalue: Union[bool, int, float, str, List, NoneType] = None,\tsubset: Union[str, List[str], NoneType] = None) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.Window": {"fullname": "sqlglot.dataframe.sql.Window", "modulename": "sqlglot.dataframe.sql", "qualname": "Window", "kind": "class", "doc": "

\n"}, "sqlglot.dataframe.sql.Window.__init__": {"fullname": "sqlglot.dataframe.sql.Window.__init__", "modulename": "sqlglot.dataframe.sql", "qualname": "Window.__init__", "kind": "function", "doc": "

\n", "signature": "()"}, "sqlglot.dataframe.sql.Window.partitionBy": {"fullname": "sqlglot.dataframe.sql.Window.partitionBy", "modulename": "sqlglot.dataframe.sql", "qualname": "Window.partitionBy", "kind": "function", "doc": "

\n", "signature": "(\tcls,\t*cols: Union[<MagicMock id='140700329626592'>, List[<MagicMock id='140700329626592'>]]) -> sqlglot.dataframe.sql.window.WindowSpec:", "funcdef": "def"}, "sqlglot.dataframe.sql.Window.orderBy": {"fullname": "sqlglot.dataframe.sql.Window.orderBy", "modulename": "sqlglot.dataframe.sql", "qualname": "Window.orderBy", "kind": "function", "doc": "

\n", "signature": "(\tcls,\t*cols: Union[<MagicMock id='140700329828768'>, List[<MagicMock id='140700329828768'>]]) -> sqlglot.dataframe.sql.window.WindowSpec:", "funcdef": "def"}, "sqlglot.dataframe.sql.Window.rowsBetween": {"fullname": "sqlglot.dataframe.sql.Window.rowsBetween", "modulename": "sqlglot.dataframe.sql", "qualname": "Window.rowsBetween", "kind": "function", "doc": "

\n", "signature": "(cls, start: int, end: int) -> sqlglot.dataframe.sql.window.WindowSpec:", "funcdef": "def"}, "sqlglot.dataframe.sql.Window.rangeBetween": {"fullname": "sqlglot.dataframe.sql.Window.rangeBetween", "modulename": "sqlglot.dataframe.sql", "qualname": "Window.rangeBetween", "kind": "function", "doc": "

\n", "signature": "(cls, start: int, end: int) -> sqlglot.dataframe.sql.window.WindowSpec:", "funcdef": "def"}, "sqlglot.dataframe.sql.WindowSpec": {"fullname": "sqlglot.dataframe.sql.WindowSpec", "modulename": "sqlglot.dataframe.sql", "qualname": "WindowSpec", "kind": "class", "doc": "

\n"}, "sqlglot.dataframe.sql.WindowSpec.__init__": {"fullname": "sqlglot.dataframe.sql.WindowSpec.__init__", "modulename": "sqlglot.dataframe.sql", "qualname": "WindowSpec.__init__", "kind": "function", "doc": "

\n", "signature": "(expression: sqlglot.expressions.Expression = (WINDOW ))"}, "sqlglot.dataframe.sql.WindowSpec.copy": {"fullname": "sqlglot.dataframe.sql.WindowSpec.copy", "modulename": "sqlglot.dataframe.sql", "qualname": "WindowSpec.copy", "kind": "function", "doc": "

\n", "signature": "(self):", "funcdef": "def"}, "sqlglot.dataframe.sql.WindowSpec.sql": {"fullname": "sqlglot.dataframe.sql.WindowSpec.sql", "modulename": "sqlglot.dataframe.sql", "qualname": "WindowSpec.sql", "kind": "function", "doc": "

\n", "signature": "(self, **kwargs) -> str:", "funcdef": "def"}, "sqlglot.dataframe.sql.WindowSpec.partitionBy": {"fullname": "sqlglot.dataframe.sql.WindowSpec.partitionBy", "modulename": "sqlglot.dataframe.sql", "qualname": "WindowSpec.partitionBy", "kind": "function", "doc": "

\n", "signature": "(\tself,\t*cols: Union[<MagicMock id='140700329685440'>, List[<MagicMock id='140700329685440'>]]) -> sqlglot.dataframe.sql.window.WindowSpec:", "funcdef": "def"}, "sqlglot.dataframe.sql.WindowSpec.orderBy": {"fullname": "sqlglot.dataframe.sql.WindowSpec.orderBy", "modulename": "sqlglot.dataframe.sql", "qualname": "WindowSpec.orderBy", "kind": "function", "doc": "

\n", "signature": "(\tself,\t*cols: Union[<MagicMock id='140700329654400'>, List[<MagicMock id='140700329654400'>]]) -> sqlglot.dataframe.sql.window.WindowSpec:", "funcdef": "def"}, "sqlglot.dataframe.sql.WindowSpec.rowsBetween": {"fullname": "sqlglot.dataframe.sql.WindowSpec.rowsBetween", "modulename": "sqlglot.dataframe.sql", "qualname": "WindowSpec.rowsBetween", "kind": "function", "doc": "

\n", "signature": "(self, start: int, end: int) -> sqlglot.dataframe.sql.window.WindowSpec:", "funcdef": "def"}, "sqlglot.dataframe.sql.WindowSpec.rangeBetween": {"fullname": "sqlglot.dataframe.sql.WindowSpec.rangeBetween", "modulename": "sqlglot.dataframe.sql", "qualname": "WindowSpec.rangeBetween", "kind": "function", "doc": "

\n", "signature": "(self, start: int, end: int) -> sqlglot.dataframe.sql.window.WindowSpec:", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrameReader": {"fullname": "sqlglot.dataframe.sql.DataFrameReader", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrameReader", "kind": "class", "doc": "

\n"}, "sqlglot.dataframe.sql.DataFrameReader.__init__": {"fullname": "sqlglot.dataframe.sql.DataFrameReader.__init__", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrameReader.__init__", "kind": "function", "doc": "

\n", "signature": "(spark: sqlglot.dataframe.sql.session.SparkSession)"}, "sqlglot.dataframe.sql.DataFrameReader.table": {"fullname": "sqlglot.dataframe.sql.DataFrameReader.table", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrameReader.table", "kind": "function", "doc": "

\n", "signature": "(self, tableName: str) -> sqlglot.dataframe.sql.dataframe.DataFrame:", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrameWriter": {"fullname": "sqlglot.dataframe.sql.DataFrameWriter", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrameWriter", "kind": "class", "doc": "

\n"}, "sqlglot.dataframe.sql.DataFrameWriter.__init__": {"fullname": "sqlglot.dataframe.sql.DataFrameWriter.__init__", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrameWriter.__init__", "kind": "function", "doc": "

\n", "signature": "(\tdf: sqlglot.dataframe.sql.dataframe.DataFrame,\tspark: Optional[sqlglot.dataframe.sql.session.SparkSession] = None,\tmode: Optional[str] = None,\tby_name: bool = False)"}, "sqlglot.dataframe.sql.DataFrameWriter.copy": {"fullname": "sqlglot.dataframe.sql.DataFrameWriter.copy", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrameWriter.copy", "kind": "function", "doc": "

\n", "signature": "(self, **kwargs) -> sqlglot.dataframe.sql.readwriter.DataFrameWriter:", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrameWriter.sql": {"fullname": "sqlglot.dataframe.sql.DataFrameWriter.sql", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrameWriter.sql", "kind": "function", "doc": "

\n", "signature": "(self, **kwargs) -> List[str]:", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrameWriter.mode": {"fullname": "sqlglot.dataframe.sql.DataFrameWriter.mode", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrameWriter.mode", "kind": "function", "doc": "

\n", "signature": "(\tself,\tsaveMode: Optional[str]) -> sqlglot.dataframe.sql.readwriter.DataFrameWriter:", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrameWriter.insertInto": {"fullname": "sqlglot.dataframe.sql.DataFrameWriter.insertInto", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrameWriter.insertInto", "kind": "function", "doc": "

\n", "signature": "(\tself,\ttableName: str,\toverwrite: Optional[bool] = None) -> sqlglot.dataframe.sql.readwriter.DataFrameWriter:", "funcdef": "def"}, "sqlglot.dataframe.sql.DataFrameWriter.saveAsTable": {"fullname": "sqlglot.dataframe.sql.DataFrameWriter.saveAsTable", "modulename": "sqlglot.dataframe.sql", "qualname": "DataFrameWriter.saveAsTable", "kind": "function", "doc": "

\n", "signature": "(\tself,\tname: str,\tformat: Optional[str] = None,\tmode: Optional[str] = None):", "funcdef": "def"}, "sqlglot.dialects": {"fullname": "sqlglot.dialects", "modulename": "sqlglot.dialects", "kind": "module", "doc": "

Dialects

\n\n

While there is a SQL standard, most SQL engines support a variation of that standard. This makes it difficult\nto write portable SQL code. SQLGlot bridges all the different variations, called \"dialects\", with an extensible\nSQL transpilation framework.

\n\n

The base sqlglot.dialects.dialect.Dialect class implements a generic dialect that aims to be as universal as possible.

\n\n

Each SQL variation has its own Dialect subclass, extending the corresponding Tokenizer, Parser and Generator\nclasses as needed.

\n\n

Implementing a custom Dialect

\n\n

Consider the following example:

\n\n
\n
from sqlglot import exp\nfrom sqlglot.dialects.dialect import Dialect\nfrom sqlglot.generator import Generator\nfrom sqlglot.tokens import Tokenizer, TokenType\n\n\nclass Custom(Dialect):\n    class Tokenizer(Tokenizer):\n        QUOTES = ["'", '"']\n        IDENTIFIERS = ["`"]\n\n        KEYWORDS = {\n            **Tokenizer.KEYWORDS,\n            "INT64": TokenType.BIGINT,\n            "FLOAT64": TokenType.DOUBLE,\n        }\n\n    class Generator(Generator):\n        TRANSFORMS = {exp.Array: lambda self, e: f"[{self.expressions(e)}]"}\n\n        TYPE_MAPPING = {\n            exp.DataType.Type.TINYINT: "INT64",\n            exp.DataType.Type.SMALLINT: "INT64",\n            exp.DataType.Type.INT: "INT64",\n            exp.DataType.Type.BIGINT: "INT64",\n            exp.DataType.Type.DECIMAL: "NUMERIC",\n            exp.DataType.Type.FLOAT: "FLOAT64",\n            exp.DataType.Type.DOUBLE: "FLOAT64",\n            exp.DataType.Type.BOOLEAN: "BOOL",\n            exp.DataType.Type.TEXT: "STRING",\n        }\n
\n
\n\n

This is a typical example of adding a new dialect implementation in SQLGlot: we specify its identifier and string\ndelimiters, as well as what tokens it uses for its types and how they're associated with SQLGlot types. Since\nthe Expression classes are common for each dialect supported in SQLGlot, we may also need to override the generation\nlogic for some expressions; this is usually done by adding new entries to the TRANSFORMS mapping.

\n\n
\n"}, "sqlglot.dialects.bigquery": {"fullname": "sqlglot.dialects.bigquery", "modulename": "sqlglot.dialects.bigquery", "kind": "module", "doc": "

Supports BigQuery Standard SQL.

\n"}, "sqlglot.dialects.bigquery.BigQuery": {"fullname": "sqlglot.dialects.bigquery.BigQuery", "modulename": "sqlglot.dialects.bigquery", "qualname": "BigQuery", "kind": "class", "doc": "

\n", "bases": "sqlglot.dialects.dialect.Dialect"}, "sqlglot.dialects.bigquery.BigQuery.__init__": {"fullname": "sqlglot.dialects.bigquery.BigQuery.__init__", "modulename": "sqlglot.dialects.bigquery", "qualname": "BigQuery.__init__", "kind": "function", "doc": "

\n", "signature": "()"}, "sqlglot.dialects.bigquery.BigQuery.Tokenizer": {"fullname": "sqlglot.dialects.bigquery.BigQuery.Tokenizer", "modulename": "sqlglot.dialects.bigquery", "qualname": "BigQuery.Tokenizer", "kind": "class", "doc": "

\n", "bases": "sqlglot.tokens.Tokenizer"}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"fullname": "sqlglot.dialects.bigquery.BigQuery.Parser", "modulename": "sqlglot.dialects.bigquery", "qualname": "BigQuery.Parser", "kind": "class", "doc": "

Parser consumes a list of tokens produced by the sqlglot.tokens.Tokenizer and produces\na parsed syntax tree.

\n\n
Arguments:
\n\n
    \n
  • error_level: the desired error level.\nDefault: ErrorLevel.RAISE
  • \n
  • error_message_context: determines the amount of context to capture from a\nquery string when displaying the error message (in number of characters).\nDefault: 50.
  • \n
  • index_offset: Index offset for arrays eg ARRAY[0] vs ARRAY[1] as the head of a list.\nDefault: 0
  • \n
  • alias_post_tablesample: If the table alias comes after tablesample.\nDefault: False
  • \n
  • max_errors: Maximum number of error messages to include in a raised ParseError.\nThis is only relevant if error_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
  • null_ordering: Indicates the default null ordering method to use if not explicitly set.\nOptions are \"nulls_are_small\", \"nulls_are_large\", \"nulls_are_last\".\nDefault: \"nulls_are_small\"
  • \n
\n", "bases": "sqlglot.parser.Parser"}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"fullname": "sqlglot.dialects.bigquery.BigQuery.Generator", "modulename": "sqlglot.dialects.bigquery", "qualname": "BigQuery.Generator", "kind": "class", "doc": "

Generator interprets the given syntax tree and produces a SQL string as an output.

\n\n
Arguments:
\n\n
    \n
  • time_mapping (dict): the dictionary of custom time mappings in which the key\nrepresents a python time format and the output the target time format
  • \n
  • time_trie (trie): a trie of the time_mapping keys
  • \n
  • pretty (bool): if set to True the returned string will be formatted. Default: False.
  • \n
  • quote_start (str): specifies which starting character to use to delimit quotes. Default: '.
  • \n
  • quote_end (str): specifies which ending character to use to delimit quotes. Default: '.
  • \n
  • identifier_start (str): specifies which starting character to use to delimit identifiers. Default: \".
  • \n
  • identifier_end (str): specifies which ending character to use to delimit identifiers. Default: \".
  • \n
  • identify (bool): if set to True all identifiers will be delimited by the corresponding\ncharacter.
  • \n
  • normalize (bool): if set to True all identifiers will lower cased
  • \n
  • string_escape (str): specifies a string escape character. Default: '.
  • \n
  • identifier_escape (str): specifies an identifier escape character. Default: \".
  • \n
  • pad (int): determines padding in a formatted string. Default: 2.
  • \n
  • indent (int): determines the size of indentation in a formatted string. Default: 4.
  • \n
  • unnest_column_only (bool): if true unnest table aliases are considered only as column aliases
  • \n
  • normalize_functions (str): normalize function names, \"upper\", \"lower\", or None\nDefault: \"upper\"
  • \n
  • alias_post_tablesample (bool): if the table alias comes after tablesample\nDefault: False
  • \n
  • unsupported_level (ErrorLevel): determines the generator's behavior when it encounters\nunsupported expressions. Default ErrorLevel.WARN.
  • \n
  • null_ordering (str): Indicates the default null ordering method to use if not explicitly set.\nOptions are \"nulls_are_small\", \"nulls_are_large\", \"nulls_are_last\".\nDefault: \"nulls_are_small\"
  • \n
  • max_unsupported (int): Maximum number of unsupported messages to include in a raised UnsupportedError.\nThis is only relevant if unsupported_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
  • leading_comma (bool): if the the comma is leading or trailing in select statements\nDefault: False
  • \n
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode.\nThe default is on the smaller end because the length only represents a segment and not the true\nline length.\nDefault: 80
  • \n
  • comments: Whether or not to preserve comments in the output SQL code.\nDefault: True
  • \n
\n", "bases": "sqlglot.generator.Generator"}, "sqlglot.dialects.bigquery.BigQuery.Generator.array_sql": {"fullname": "sqlglot.dialects.bigquery.BigQuery.Generator.array_sql", "modulename": "sqlglot.dialects.bigquery", "qualname": "BigQuery.Generator.array_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Array) -> str:", "funcdef": "def"}, "sqlglot.dialects.bigquery.BigQuery.Generator.transaction_sql": {"fullname": "sqlglot.dialects.bigquery.BigQuery.Generator.transaction_sql", "modulename": "sqlglot.dialects.bigquery", "qualname": "BigQuery.Generator.transaction_sql", "kind": "function", "doc": "

\n", "signature": "(self, *_) -> str:", "funcdef": "def"}, "sqlglot.dialects.bigquery.BigQuery.Generator.commit_sql": {"fullname": "sqlglot.dialects.bigquery.BigQuery.Generator.commit_sql", "modulename": "sqlglot.dialects.bigquery", "qualname": "BigQuery.Generator.commit_sql", "kind": "function", "doc": "

\n", "signature": "(self, *_) -> str:", "funcdef": "def"}, "sqlglot.dialects.bigquery.BigQuery.Generator.rollback_sql": {"fullname": "sqlglot.dialects.bigquery.BigQuery.Generator.rollback_sql", "modulename": "sqlglot.dialects.bigquery", "qualname": "BigQuery.Generator.rollback_sql", "kind": "function", "doc": "

\n", "signature": "(self, *_) -> str:", "funcdef": "def"}, "sqlglot.dialects.bigquery.BigQuery.Generator.in_unnest_op": {"fullname": "sqlglot.dialects.bigquery.BigQuery.Generator.in_unnest_op", "modulename": "sqlglot.dialects.bigquery", "qualname": "BigQuery.Generator.in_unnest_op", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Unnest) -> str:", "funcdef": "def"}, "sqlglot.dialects.bigquery.BigQuery.Generator.except_op": {"fullname": "sqlglot.dialects.bigquery.BigQuery.Generator.except_op", "modulename": "sqlglot.dialects.bigquery", "qualname": "BigQuery.Generator.except_op", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Except) -> str:", "funcdef": "def"}, "sqlglot.dialects.bigquery.BigQuery.Generator.intersect_op": {"fullname": "sqlglot.dialects.bigquery.BigQuery.Generator.intersect_op", "modulename": "sqlglot.dialects.bigquery", "qualname": "BigQuery.Generator.intersect_op", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Intersect) -> str:", "funcdef": "def"}, "sqlglot.dialects.clickhouse": {"fullname": "sqlglot.dialects.clickhouse", "modulename": "sqlglot.dialects.clickhouse", "kind": "module", "doc": "

\n"}, "sqlglot.dialects.clickhouse.ClickHouse": {"fullname": "sqlglot.dialects.clickhouse.ClickHouse", "modulename": "sqlglot.dialects.clickhouse", "qualname": "ClickHouse", "kind": "class", "doc": "

\n", "bases": "sqlglot.dialects.dialect.Dialect"}, "sqlglot.dialects.clickhouse.ClickHouse.__init__": {"fullname": "sqlglot.dialects.clickhouse.ClickHouse.__init__", "modulename": "sqlglot.dialects.clickhouse", "qualname": "ClickHouse.__init__", "kind": "function", "doc": "

\n", "signature": "()"}, "sqlglot.dialects.clickhouse.ClickHouse.Tokenizer": {"fullname": "sqlglot.dialects.clickhouse.ClickHouse.Tokenizer", "modulename": "sqlglot.dialects.clickhouse", "qualname": "ClickHouse.Tokenizer", "kind": "class", "doc": "

\n", "bases": "sqlglot.tokens.Tokenizer"}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"fullname": "sqlglot.dialects.clickhouse.ClickHouse.Parser", "modulename": "sqlglot.dialects.clickhouse", "qualname": "ClickHouse.Parser", "kind": "class", "doc": "

Parser consumes a list of tokens produced by the sqlglot.tokens.Tokenizer and produces\na parsed syntax tree.

\n\n
Arguments:
\n\n
    \n
  • error_level: the desired error level.\nDefault: ErrorLevel.RAISE
  • \n
  • error_message_context: determines the amount of context to capture from a\nquery string when displaying the error message (in number of characters).\nDefault: 50.
  • \n
  • index_offset: Index offset for arrays eg ARRAY[0] vs ARRAY[1] as the head of a list.\nDefault: 0
  • \n
  • alias_post_tablesample: If the table alias comes after tablesample.\nDefault: False
  • \n
  • max_errors: Maximum number of error messages to include in a raised ParseError.\nThis is only relevant if error_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
  • null_ordering: Indicates the default null ordering method to use if not explicitly set.\nOptions are \"nulls_are_small\", \"nulls_are_large\", \"nulls_are_last\".\nDefault: \"nulls_are_small\"
  • \n
\n", "bases": "sqlglot.parser.Parser"}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"fullname": "sqlglot.dialects.clickhouse.ClickHouse.Generator", "modulename": "sqlglot.dialects.clickhouse", "qualname": "ClickHouse.Generator", "kind": "class", "doc": "

Generator interprets the given syntax tree and produces a SQL string as an output.

\n\n
Arguments:
\n\n
    \n
  • time_mapping (dict): the dictionary of custom time mappings in which the key\nrepresents a python time format and the output the target time format
  • \n
  • time_trie (trie): a trie of the time_mapping keys
  • \n
  • pretty (bool): if set to True the returned string will be formatted. Default: False.
  • \n
  • quote_start (str): specifies which starting character to use to delimit quotes. Default: '.
  • \n
  • quote_end (str): specifies which ending character to use to delimit quotes. Default: '.
  • \n
  • identifier_start (str): specifies which starting character to use to delimit identifiers. Default: \".
  • \n
  • identifier_end (str): specifies which ending character to use to delimit identifiers. Default: \".
  • \n
  • identify (bool): if set to True all identifiers will be delimited by the corresponding\ncharacter.
  • \n
  • normalize (bool): if set to True all identifiers will lower cased
  • \n
  • string_escape (str): specifies a string escape character. Default: '.
  • \n
  • identifier_escape (str): specifies an identifier escape character. Default: \".
  • \n
  • pad (int): determines padding in a formatted string. Default: 2.
  • \n
  • indent (int): determines the size of indentation in a formatted string. Default: 4.
  • \n
  • unnest_column_only (bool): if true unnest table aliases are considered only as column aliases
  • \n
  • normalize_functions (str): normalize function names, \"upper\", \"lower\", or None\nDefault: \"upper\"
  • \n
  • alias_post_tablesample (bool): if the table alias comes after tablesample\nDefault: False
  • \n
  • unsupported_level (ErrorLevel): determines the generator's behavior when it encounters\nunsupported expressions. Default ErrorLevel.WARN.
  • \n
  • null_ordering (str): Indicates the default null ordering method to use if not explicitly set.\nOptions are \"nulls_are_small\", \"nulls_are_large\", \"nulls_are_last\".\nDefault: \"nulls_are_small\"
  • \n
  • max_unsupported (int): Maximum number of unsupported messages to include in a raised UnsupportedError.\nThis is only relevant if unsupported_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
  • leading_comma (bool): if the the comma is leading or trailing in select statements\nDefault: False
  • \n
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode.\nThe default is on the smaller end because the length only represents a segment and not the true\nline length.\nDefault: 80
  • \n
  • comments: Whether or not to preserve comments in the output SQL code.\nDefault: True
  • \n
\n", "bases": "sqlglot.generator.Generator"}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.cte_sql": {"fullname": "sqlglot.dialects.clickhouse.ClickHouse.Generator.cte_sql", "modulename": "sqlglot.dialects.clickhouse", "qualname": "ClickHouse.Generator.cte_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.CTE) -> str:", "funcdef": "def"}, "sqlglot.dialects.databricks": {"fullname": "sqlglot.dialects.databricks", "modulename": "sqlglot.dialects.databricks", "kind": "module", "doc": "

\n"}, "sqlglot.dialects.databricks.Databricks": {"fullname": "sqlglot.dialects.databricks.Databricks", "modulename": "sqlglot.dialects.databricks", "qualname": "Databricks", "kind": "class", "doc": "

\n", "bases": "sqlglot.dialects.spark.Spark"}, "sqlglot.dialects.databricks.Databricks.__init__": {"fullname": "sqlglot.dialects.databricks.Databricks.__init__", "modulename": "sqlglot.dialects.databricks", "qualname": "Databricks.__init__", "kind": "function", "doc": "

\n", "signature": "()"}, "sqlglot.dialects.databricks.Databricks.Parser": {"fullname": "sqlglot.dialects.databricks.Databricks.Parser", "modulename": "sqlglot.dialects.databricks", "qualname": "Databricks.Parser", "kind": "class", "doc": "

Parser consumes a list of tokens produced by the sqlglot.tokens.Tokenizer and produces\na parsed syntax tree.

\n\n
Arguments:
\n\n
    \n
  • error_level: the desired error level.\nDefault: ErrorLevel.RAISE
  • \n
  • error_message_context: determines the amount of context to capture from a\nquery string when displaying the error message (in number of characters).\nDefault: 50.
  • \n
  • index_offset: Index offset for arrays eg ARRAY[0] vs ARRAY[1] as the head of a list.\nDefault: 0
  • \n
  • alias_post_tablesample: If the table alias comes after tablesample.\nDefault: False
  • \n
  • max_errors: Maximum number of error messages to include in a raised ParseError.\nThis is only relevant if error_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
  • null_ordering: Indicates the default null ordering method to use if not explicitly set.\nOptions are \"nulls_are_small\", \"nulls_are_large\", \"nulls_are_last\".\nDefault: \"nulls_are_small\"
  • \n
\n", "bases": "sqlglot.dialects.spark.Spark.Parser"}, "sqlglot.dialects.databricks.Databricks.Generator": {"fullname": "sqlglot.dialects.databricks.Databricks.Generator", "modulename": "sqlglot.dialects.databricks", "qualname": "Databricks.Generator", "kind": "class", "doc": "

Generator interprets the given syntax tree and produces a SQL string as an output.

\n\n
Arguments:
\n\n
    \n
  • time_mapping (dict): the dictionary of custom time mappings in which the key\nrepresents a python time format and the output the target time format
  • \n
  • time_trie (trie): a trie of the time_mapping keys
  • \n
  • pretty (bool): if set to True the returned string will be formatted. Default: False.
  • \n
  • quote_start (str): specifies which starting character to use to delimit quotes. Default: '.
  • \n
  • quote_end (str): specifies which ending character to use to delimit quotes. Default: '.
  • \n
  • identifier_start (str): specifies which starting character to use to delimit identifiers. Default: \".
  • \n
  • identifier_end (str): specifies which ending character to use to delimit identifiers. Default: \".
  • \n
  • identify (bool): if set to True all identifiers will be delimited by the corresponding\ncharacter.
  • \n
  • normalize (bool): if set to True all identifiers will lower cased
  • \n
  • string_escape (str): specifies a string escape character. Default: '.
  • \n
  • identifier_escape (str): specifies an identifier escape character. Default: \".
  • \n
  • pad (int): determines padding in a formatted string. Default: 2.
  • \n
  • indent (int): determines the size of indentation in a formatted string. Default: 4.
  • \n
  • unnest_column_only (bool): if true unnest table aliases are considered only as column aliases
  • \n
  • normalize_functions (str): normalize function names, \"upper\", \"lower\", or None\nDefault: \"upper\"
  • \n
  • alias_post_tablesample (bool): if the table alias comes after tablesample\nDefault: False
  • \n
  • unsupported_level (ErrorLevel): determines the generator's behavior when it encounters\nunsupported expressions. Default ErrorLevel.WARN.
  • \n
  • null_ordering (str): Indicates the default null ordering method to use if not explicitly set.\nOptions are \"nulls_are_small\", \"nulls_are_large\", \"nulls_are_last\".\nDefault: \"nulls_are_small\"
  • \n
  • max_unsupported (int): Maximum number of unsupported messages to include in a raised UnsupportedError.\nThis is only relevant if unsupported_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
  • leading_comma (bool): if the the comma is leading or trailing in select statements\nDefault: False
  • \n
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode.\nThe default is on the smaller end because the length only represents a segment and not the true\nline length.\nDefault: 80
  • \n
  • comments: Whether or not to preserve comments in the output SQL code.\nDefault: True
  • \n
\n", "bases": "sqlglot.dialects.spark.Spark.Generator"}, "sqlglot.dialects.dialect": {"fullname": "sqlglot.dialects.dialect", "modulename": "sqlglot.dialects.dialect", "kind": "module", "doc": "

\n"}, "sqlglot.dialects.dialect.Dialects": {"fullname": "sqlglot.dialects.dialect.Dialects", "modulename": "sqlglot.dialects.dialect", "qualname": "Dialects", "kind": "class", "doc": "

An enumeration.

\n", "bases": "builtins.str, enum.Enum"}, "sqlglot.dialects.dialect.Dialects.DIALECT": {"fullname": "sqlglot.dialects.dialect.Dialects.DIALECT", "modulename": "sqlglot.dialects.dialect", "qualname": "Dialects.DIALECT", "kind": "variable", "doc": "

\n", "default_value": " = <Dialects.DIALECT: ''>"}, "sqlglot.dialects.dialect.Dialects.BIGQUERY": {"fullname": "sqlglot.dialects.dialect.Dialects.BIGQUERY", "modulename": "sqlglot.dialects.dialect", "qualname": "Dialects.BIGQUERY", "kind": "variable", "doc": "

\n", "default_value": " = <Dialects.BIGQUERY: 'bigquery'>"}, "sqlglot.dialects.dialect.Dialects.CLICKHOUSE": {"fullname": "sqlglot.dialects.dialect.Dialects.CLICKHOUSE", "modulename": "sqlglot.dialects.dialect", "qualname": "Dialects.CLICKHOUSE", "kind": "variable", "doc": "

\n", "default_value": " = <Dialects.CLICKHOUSE: 'clickhouse'>"}, "sqlglot.dialects.dialect.Dialects.DUCKDB": {"fullname": "sqlglot.dialects.dialect.Dialects.DUCKDB", "modulename": "sqlglot.dialects.dialect", "qualname": "Dialects.DUCKDB", "kind": "variable", "doc": "

\n", "default_value": " = <Dialects.DUCKDB: 'duckdb'>"}, "sqlglot.dialects.dialect.Dialects.HIVE": {"fullname": "sqlglot.dialects.dialect.Dialects.HIVE", "modulename": "sqlglot.dialects.dialect", "qualname": "Dialects.HIVE", "kind": "variable", "doc": "

\n", "default_value": " = <Dialects.HIVE: 'hive'>"}, "sqlglot.dialects.dialect.Dialects.MYSQL": {"fullname": "sqlglot.dialects.dialect.Dialects.MYSQL", "modulename": "sqlglot.dialects.dialect", "qualname": "Dialects.MYSQL", "kind": "variable", "doc": "

\n", "default_value": " = <Dialects.MYSQL: 'mysql'>"}, "sqlglot.dialects.dialect.Dialects.ORACLE": {"fullname": "sqlglot.dialects.dialect.Dialects.ORACLE", "modulename": "sqlglot.dialects.dialect", "qualname": "Dialects.ORACLE", "kind": "variable", "doc": "

\n", "default_value": " = <Dialects.ORACLE: 'oracle'>"}, "sqlglot.dialects.dialect.Dialects.POSTGRES": {"fullname": "sqlglot.dialects.dialect.Dialects.POSTGRES", "modulename": "sqlglot.dialects.dialect", "qualname": "Dialects.POSTGRES", "kind": "variable", "doc": "

\n", "default_value": " = <Dialects.POSTGRES: 'postgres'>"}, "sqlglot.dialects.dialect.Dialects.PRESTO": {"fullname": "sqlglot.dialects.dialect.Dialects.PRESTO", "modulename": "sqlglot.dialects.dialect", "qualname": "Dialects.PRESTO", "kind": "variable", "doc": "

\n", "default_value": " = <Dialects.PRESTO: 'presto'>"}, "sqlglot.dialects.dialect.Dialects.REDSHIFT": {"fullname": "sqlglot.dialects.dialect.Dialects.REDSHIFT", "modulename": "sqlglot.dialects.dialect", "qualname": "Dialects.REDSHIFT", "kind": "variable", "doc": "

\n", "default_value": " = <Dialects.REDSHIFT: 'redshift'>"}, "sqlglot.dialects.dialect.Dialects.SNOWFLAKE": {"fullname": "sqlglot.dialects.dialect.Dialects.SNOWFLAKE", "modulename": "sqlglot.dialects.dialect", "qualname": "Dialects.SNOWFLAKE", "kind": "variable", "doc": "

\n", "default_value": " = <Dialects.SNOWFLAKE: 'snowflake'>"}, "sqlglot.dialects.dialect.Dialects.SPARK": {"fullname": "sqlglot.dialects.dialect.Dialects.SPARK", "modulename": "sqlglot.dialects.dialect", "qualname": "Dialects.SPARK", "kind": "variable", "doc": "

\n", "default_value": " = <Dialects.SPARK: 'spark'>"}, "sqlglot.dialects.dialect.Dialects.SQLITE": {"fullname": "sqlglot.dialects.dialect.Dialects.SQLITE", "modulename": "sqlglot.dialects.dialect", "qualname": "Dialects.SQLITE", "kind": "variable", "doc": "

\n", "default_value": " = <Dialects.SQLITE: 'sqlite'>"}, "sqlglot.dialects.dialect.Dialects.STARROCKS": {"fullname": "sqlglot.dialects.dialect.Dialects.STARROCKS", "modulename": "sqlglot.dialects.dialect", "qualname": "Dialects.STARROCKS", "kind": "variable", "doc": "

\n", "default_value": " = <Dialects.STARROCKS: 'starrocks'>"}, "sqlglot.dialects.dialect.Dialects.TABLEAU": {"fullname": "sqlglot.dialects.dialect.Dialects.TABLEAU", "modulename": "sqlglot.dialects.dialect", "qualname": "Dialects.TABLEAU", "kind": "variable", "doc": "

\n", "default_value": " = <Dialects.TABLEAU: 'tableau'>"}, "sqlglot.dialects.dialect.Dialects.TRINO": {"fullname": "sqlglot.dialects.dialect.Dialects.TRINO", "modulename": "sqlglot.dialects.dialect", "qualname": "Dialects.TRINO", "kind": "variable", "doc": "

\n", "default_value": " = <Dialects.TRINO: 'trino'>"}, "sqlglot.dialects.dialect.Dialects.TSQL": {"fullname": "sqlglot.dialects.dialect.Dialects.TSQL", "modulename": "sqlglot.dialects.dialect", "qualname": "Dialects.TSQL", "kind": "variable", "doc": "

\n", "default_value": " = <Dialects.TSQL: 'tsql'>"}, "sqlglot.dialects.dialect.Dialects.DATABRICKS": {"fullname": "sqlglot.dialects.dialect.Dialects.DATABRICKS", "modulename": "sqlglot.dialects.dialect", "qualname": "Dialects.DATABRICKS", "kind": "variable", "doc": "

\n", "default_value": " = <Dialects.DATABRICKS: 'databricks'>"}, "sqlglot.dialects.dialect.Dialects.DRILL": {"fullname": "sqlglot.dialects.dialect.Dialects.DRILL", "modulename": "sqlglot.dialects.dialect", "qualname": "Dialects.DRILL", "kind": "variable", "doc": "

\n", "default_value": " = <Dialects.DRILL: 'drill'>"}, "sqlglot.dialects.dialect.Dialects.TERADATA": {"fullname": "sqlglot.dialects.dialect.Dialects.TERADATA", "modulename": "sqlglot.dialects.dialect", "qualname": "Dialects.TERADATA", "kind": "variable", "doc": "

\n", "default_value": " = <Dialects.TERADATA: 'teradata'>"}, "sqlglot.dialects.dialect.Dialect": {"fullname": "sqlglot.dialects.dialect.Dialect", "modulename": "sqlglot.dialects.dialect", "qualname": "Dialect", "kind": "class", "doc": "

\n"}, "sqlglot.dialects.dialect.Dialect.__init__": {"fullname": "sqlglot.dialects.dialect.Dialect.__init__", "modulename": "sqlglot.dialects.dialect", "qualname": "Dialect.__init__", "kind": "function", "doc": "

\n", "signature": "()"}, "sqlglot.dialects.dialect.Dialect.get_or_raise": {"fullname": "sqlglot.dialects.dialect.Dialect.get_or_raise", "modulename": "sqlglot.dialects.dialect", "qualname": "Dialect.get_or_raise", "kind": "function", "doc": "

\n", "signature": "(\tcls,\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType]) -> Type[sqlglot.dialects.dialect.Dialect]:", "funcdef": "def"}, "sqlglot.dialects.dialect.Dialect.format_time": {"fullname": "sqlglot.dialects.dialect.Dialect.format_time", "modulename": "sqlglot.dialects.dialect", "qualname": "Dialect.format_time", "kind": "function", "doc": "

\n", "signature": "(\tcls,\texpression: Union[str, sqlglot.expressions.Expression, NoneType]) -> Optional[sqlglot.expressions.Expression]:", "funcdef": "def"}, "sqlglot.dialects.dialect.Dialect.parse": {"fullname": "sqlglot.dialects.dialect.Dialect.parse", "modulename": "sqlglot.dialects.dialect", "qualname": "Dialect.parse", "kind": "function", "doc": "

\n", "signature": "(self, sql: str, **opts) -> List[Optional[sqlglot.expressions.Expression]]:", "funcdef": "def"}, "sqlglot.dialects.dialect.Dialect.parse_into": {"fullname": "sqlglot.dialects.dialect.Dialect.parse_into", "modulename": "sqlglot.dialects.dialect", "qualname": "Dialect.parse_into", "kind": "function", "doc": "

\n", "signature": "(\tself,\texpression_type: Union[str, Type[sqlglot.expressions.Expression], Collection[Union[str, Type[sqlglot.expressions.Expression]]]],\tsql: str,\t**opts) -> List[Optional[sqlglot.expressions.Expression]]:", "funcdef": "def"}, "sqlglot.dialects.dialect.Dialect.generate": {"fullname": "sqlglot.dialects.dialect.Dialect.generate", "modulename": "sqlglot.dialects.dialect", "qualname": "Dialect.generate", "kind": "function", "doc": "

\n", "signature": "(\tself,\texpression: Optional[sqlglot.expressions.Expression],\t**opts) -> str:", "funcdef": "def"}, "sqlglot.dialects.dialect.Dialect.transpile": {"fullname": "sqlglot.dialects.dialect.Dialect.transpile", "modulename": "sqlglot.dialects.dialect", "qualname": "Dialect.transpile", "kind": "function", "doc": "

\n", "signature": "(self, sql: str, **opts) -> List[str]:", "funcdef": "def"}, "sqlglot.dialects.dialect.Dialect.parser": {"fullname": "sqlglot.dialects.dialect.Dialect.parser", "modulename": "sqlglot.dialects.dialect", "qualname": "Dialect.parser", "kind": "function", "doc": "

\n", "signature": "(self, **opts) -> sqlglot.parser.Parser:", "funcdef": "def"}, "sqlglot.dialects.dialect.Dialect.generator": {"fullname": "sqlglot.dialects.dialect.Dialect.generator", "modulename": "sqlglot.dialects.dialect", "qualname": "Dialect.generator", "kind": "function", "doc": "

\n", "signature": "(self, **opts) -> sqlglot.generator.Generator:", "funcdef": "def"}, "sqlglot.dialects.dialect.rename_func": {"fullname": "sqlglot.dialects.dialect.rename_func", "modulename": "sqlglot.dialects.dialect", "qualname": "rename_func", "kind": "function", "doc": "

\n", "signature": "(\tname: str) -> Callable[[sqlglot.generator.Generator, sqlglot.expressions.Expression], str]:", "funcdef": "def"}, "sqlglot.dialects.dialect.approx_count_distinct_sql": {"fullname": "sqlglot.dialects.dialect.approx_count_distinct_sql", "modulename": "sqlglot.dialects.dialect", "qualname": "approx_count_distinct_sql", "kind": "function", "doc": "

\n", "signature": "(\tself: sqlglot.generator.Generator,\texpression: sqlglot.expressions.ApproxDistinct) -> str:", "funcdef": "def"}, "sqlglot.dialects.dialect.if_sql": {"fullname": "sqlglot.dialects.dialect.if_sql", "modulename": "sqlglot.dialects.dialect", "qualname": "if_sql", "kind": "function", "doc": "

\n", "signature": "(\tself: sqlglot.generator.Generator,\texpression: sqlglot.expressions.If) -> str:", "funcdef": "def"}, "sqlglot.dialects.dialect.arrow_json_extract_sql": {"fullname": "sqlglot.dialects.dialect.arrow_json_extract_sql", "modulename": "sqlglot.dialects.dialect", "qualname": "arrow_json_extract_sql", "kind": "function", "doc": "

\n", "signature": "(\tself: sqlglot.generator.Generator,\texpression: sqlglot.expressions.JSONExtract | sqlglot.expressions.JSONBExtract) -> str:", "funcdef": "def"}, "sqlglot.dialects.dialect.arrow_json_extract_scalar_sql": {"fullname": "sqlglot.dialects.dialect.arrow_json_extract_scalar_sql", "modulename": "sqlglot.dialects.dialect", "qualname": "arrow_json_extract_scalar_sql", "kind": "function", "doc": "

\n", "signature": "(\tself: sqlglot.generator.Generator,\texpression: sqlglot.expressions.JSONExtractScalar | sqlglot.expressions.JSONBExtractScalar) -> str:", "funcdef": "def"}, "sqlglot.dialects.dialect.inline_array_sql": {"fullname": "sqlglot.dialects.dialect.inline_array_sql", "modulename": "sqlglot.dialects.dialect", "qualname": "inline_array_sql", "kind": "function", "doc": "

\n", "signature": "(\tself: sqlglot.generator.Generator,\texpression: sqlglot.expressions.Array) -> str:", "funcdef": "def"}, "sqlglot.dialects.dialect.no_ilike_sql": {"fullname": "sqlglot.dialects.dialect.no_ilike_sql", "modulename": "sqlglot.dialects.dialect", "qualname": "no_ilike_sql", "kind": "function", "doc": "

\n", "signature": "(\tself: sqlglot.generator.Generator,\texpression: sqlglot.expressions.ILike) -> str:", "funcdef": "def"}, "sqlglot.dialects.dialect.no_paren_current_date_sql": {"fullname": "sqlglot.dialects.dialect.no_paren_current_date_sql", "modulename": "sqlglot.dialects.dialect", "qualname": "no_paren_current_date_sql", "kind": "function", "doc": "

\n", "signature": "(\tself: sqlglot.generator.Generator,\texpression: sqlglot.expressions.CurrentDate) -> str:", "funcdef": "def"}, "sqlglot.dialects.dialect.no_recursive_cte_sql": {"fullname": "sqlglot.dialects.dialect.no_recursive_cte_sql", "modulename": "sqlglot.dialects.dialect", "qualname": "no_recursive_cte_sql", "kind": "function", "doc": "

\n", "signature": "(\tself: sqlglot.generator.Generator,\texpression: sqlglot.expressions.With) -> str:", "funcdef": "def"}, "sqlglot.dialects.dialect.no_safe_divide_sql": {"fullname": "sqlglot.dialects.dialect.no_safe_divide_sql", "modulename": "sqlglot.dialects.dialect", "qualname": "no_safe_divide_sql", "kind": "function", "doc": "

\n", "signature": "(\tself: sqlglot.generator.Generator,\texpression: sqlglot.expressions.SafeDivide) -> str:", "funcdef": "def"}, "sqlglot.dialects.dialect.no_tablesample_sql": {"fullname": "sqlglot.dialects.dialect.no_tablesample_sql", "modulename": "sqlglot.dialects.dialect", "qualname": "no_tablesample_sql", "kind": "function", "doc": "

\n", "signature": "(\tself: sqlglot.generator.Generator,\texpression: sqlglot.expressions.TableSample) -> str:", "funcdef": "def"}, "sqlglot.dialects.dialect.no_pivot_sql": {"fullname": "sqlglot.dialects.dialect.no_pivot_sql", "modulename": "sqlglot.dialects.dialect", "qualname": "no_pivot_sql", "kind": "function", "doc": "

\n", "signature": "(\tself: sqlglot.generator.Generator,\texpression: sqlglot.expressions.Pivot) -> str:", "funcdef": "def"}, "sqlglot.dialects.dialect.no_trycast_sql": {"fullname": "sqlglot.dialects.dialect.no_trycast_sql", "modulename": "sqlglot.dialects.dialect", "qualname": "no_trycast_sql", "kind": "function", "doc": "

\n", "signature": "(\tself: sqlglot.generator.Generator,\texpression: sqlglot.expressions.TryCast) -> str:", "funcdef": "def"}, "sqlglot.dialects.dialect.no_properties_sql": {"fullname": "sqlglot.dialects.dialect.no_properties_sql", "modulename": "sqlglot.dialects.dialect", "qualname": "no_properties_sql", "kind": "function", "doc": "

\n", "signature": "(\tself: sqlglot.generator.Generator,\texpression: sqlglot.expressions.Properties) -> str:", "funcdef": "def"}, "sqlglot.dialects.dialect.str_position_sql": {"fullname": "sqlglot.dialects.dialect.str_position_sql", "modulename": "sqlglot.dialects.dialect", "qualname": "str_position_sql", "kind": "function", "doc": "

\n", "signature": "(\tself: sqlglot.generator.Generator,\texpression: sqlglot.expressions.StrPosition) -> str:", "funcdef": "def"}, "sqlglot.dialects.dialect.struct_extract_sql": {"fullname": "sqlglot.dialects.dialect.struct_extract_sql", "modulename": "sqlglot.dialects.dialect", "qualname": "struct_extract_sql", "kind": "function", "doc": "

\n", "signature": "(\tself: sqlglot.generator.Generator,\texpression: sqlglot.expressions.StructExtract) -> str:", "funcdef": "def"}, "sqlglot.dialects.dialect.var_map_sql": {"fullname": "sqlglot.dialects.dialect.var_map_sql", "modulename": "sqlglot.dialects.dialect", "qualname": "var_map_sql", "kind": "function", "doc": "

\n", "signature": "(\tself: sqlglot.generator.Generator,\texpression: sqlglot.expressions.Map | sqlglot.expressions.VarMap,\tmap_func_name: str = 'MAP') -> str:", "funcdef": "def"}, "sqlglot.dialects.dialect.format_time_lambda": {"fullname": "sqlglot.dialects.dialect.format_time_lambda", "modulename": "sqlglot.dialects.dialect", "qualname": "format_time_lambda", "kind": "function", "doc": "

Helper used for time expressions.

\n\n
Arguments:
\n\n
    \n
  • exp_class: the expression class to instantiate.
  • \n
  • dialect: target sql dialect.
  • \n
  • default: the default format, True being time.
  • \n
\n\n
Returns:
\n\n
\n

A callable that can be used to return the appropriately formatted time expression.

\n
\n", "signature": "(\texp_class: Type[~E],\tdialect: str,\tdefault: Union[bool, str, NoneType] = None) -> Callable[[Sequence], ~E]:", "funcdef": "def"}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"fullname": "sqlglot.dialects.dialect.create_with_partitions_sql", "modulename": "sqlglot.dialects.dialect", "qualname": "create_with_partitions_sql", "kind": "function", "doc": "

In Hive and Spark, the PARTITIONED BY property acts as an extension of a table's schema. When the\nPARTITIONED BY value is an array of column names, they are transformed into a schema. The corresponding\ncolumns are removed from the create statement.

\n", "signature": "(\tself: sqlglot.generator.Generator,\texpression: sqlglot.expressions.Create) -> str:", "funcdef": "def"}, "sqlglot.dialects.dialect.parse_date_delta": {"fullname": "sqlglot.dialects.dialect.parse_date_delta", "modulename": "sqlglot.dialects.dialect", "qualname": "parse_date_delta", "kind": "function", "doc": "

\n", "signature": "(\texp_class: Type[~E],\tunit_mapping: Optional[Dict[str, str]] = None) -> Callable[[Sequence], ~E]:", "funcdef": "def"}, "sqlglot.dialects.dialect.locate_to_strposition": {"fullname": "sqlglot.dialects.dialect.locate_to_strposition", "modulename": "sqlglot.dialects.dialect", "qualname": "locate_to_strposition", "kind": "function", "doc": "

\n", "signature": "(args: Sequence) -> sqlglot.expressions.Expression:", "funcdef": "def"}, "sqlglot.dialects.dialect.strposition_to_locate_sql": {"fullname": "sqlglot.dialects.dialect.strposition_to_locate_sql", "modulename": "sqlglot.dialects.dialect", "qualname": "strposition_to_locate_sql", "kind": "function", "doc": "

\n", "signature": "(\tself: sqlglot.generator.Generator,\texpression: sqlglot.expressions.StrPosition) -> str:", "funcdef": "def"}, "sqlglot.dialects.dialect.timestrtotime_sql": {"fullname": "sqlglot.dialects.dialect.timestrtotime_sql", "modulename": "sqlglot.dialects.dialect", "qualname": "timestrtotime_sql", "kind": "function", "doc": "

\n", "signature": "(\tself: sqlglot.generator.Generator,\texpression: sqlglot.expressions.TimeStrToTime) -> str:", "funcdef": "def"}, "sqlglot.dialects.dialect.datestrtodate_sql": {"fullname": "sqlglot.dialects.dialect.datestrtodate_sql", "modulename": "sqlglot.dialects.dialect", "qualname": "datestrtodate_sql", "kind": "function", "doc": "

\n", "signature": "(\tself: sqlglot.generator.Generator,\texpression: sqlglot.expressions.DateStrToDate) -> str:", "funcdef": "def"}, "sqlglot.dialects.dialect.trim_sql": {"fullname": "sqlglot.dialects.dialect.trim_sql", "modulename": "sqlglot.dialects.dialect", "qualname": "trim_sql", "kind": "function", "doc": "

\n", "signature": "(\tself: sqlglot.generator.Generator,\texpression: sqlglot.expressions.Trim) -> str:", "funcdef": "def"}, "sqlglot.dialects.drill": {"fullname": "sqlglot.dialects.drill", "modulename": "sqlglot.dialects.drill", "kind": "module", "doc": "

\n"}, "sqlglot.dialects.drill.if_sql": {"fullname": "sqlglot.dialects.drill.if_sql", "modulename": "sqlglot.dialects.drill", "qualname": "if_sql", "kind": "function", "doc": "

Drill requires backticks around certain SQL reserved words, IF being one of them, This function\nadds the backticks around the keyword IF.

\n\n
Arguments:
\n\n
    \n
  • self: The Drill dialect
  • \n
  • expression: The input IF expression
  • \n
\n\n

Returns: The expression with IF in backticks.

\n", "signature": "(\tself: sqlglot.generator.Generator,\texpression: sqlglot.expressions.If) -> str:", "funcdef": "def"}, "sqlglot.dialects.drill.Drill": {"fullname": "sqlglot.dialects.drill.Drill", "modulename": "sqlglot.dialects.drill", "qualname": "Drill", "kind": "class", "doc": "

\n", "bases": "sqlglot.dialects.dialect.Dialect"}, "sqlglot.dialects.drill.Drill.__init__": {"fullname": "sqlglot.dialects.drill.Drill.__init__", "modulename": "sqlglot.dialects.drill", "qualname": "Drill.__init__", "kind": "function", "doc": "

\n", "signature": "()"}, "sqlglot.dialects.drill.Drill.Tokenizer": {"fullname": "sqlglot.dialects.drill.Drill.Tokenizer", "modulename": "sqlglot.dialects.drill", "qualname": "Drill.Tokenizer", "kind": "class", "doc": "

\n", "bases": "sqlglot.tokens.Tokenizer"}, "sqlglot.dialects.drill.Drill.Parser": {"fullname": "sqlglot.dialects.drill.Drill.Parser", "modulename": "sqlglot.dialects.drill", "qualname": "Drill.Parser", "kind": "class", "doc": "

Parser consumes a list of tokens produced by the sqlglot.tokens.Tokenizer and produces\na parsed syntax tree.

\n\n
Arguments:
\n\n
    \n
  • error_level: the desired error level.\nDefault: ErrorLevel.RAISE
  • \n
  • error_message_context: determines the amount of context to capture from a\nquery string when displaying the error message (in number of characters).\nDefault: 50.
  • \n
  • index_offset: Index offset for arrays eg ARRAY[0] vs ARRAY[1] as the head of a list.\nDefault: 0
  • \n
  • alias_post_tablesample: If the table alias comes after tablesample.\nDefault: False
  • \n
  • max_errors: Maximum number of error messages to include in a raised ParseError.\nThis is only relevant if error_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
  • null_ordering: Indicates the default null ordering method to use if not explicitly set.\nOptions are \"nulls_are_small\", \"nulls_are_large\", \"nulls_are_last\".\nDefault: \"nulls_are_small\"
  • \n
\n", "bases": "sqlglot.parser.Parser"}, "sqlglot.dialects.drill.Drill.Generator": {"fullname": "sqlglot.dialects.drill.Drill.Generator", "modulename": "sqlglot.dialects.drill", "qualname": "Drill.Generator", "kind": "class", "doc": "

Generator interprets the given syntax tree and produces a SQL string as an output.

\n\n
Arguments:
\n\n
    \n
  • time_mapping (dict): the dictionary of custom time mappings in which the key\nrepresents a python time format and the output the target time format
  • \n
  • time_trie (trie): a trie of the time_mapping keys
  • \n
  • pretty (bool): if set to True the returned string will be formatted. Default: False.
  • \n
  • quote_start (str): specifies which starting character to use to delimit quotes. Default: '.
  • \n
  • quote_end (str): specifies which ending character to use to delimit quotes. Default: '.
  • \n
  • identifier_start (str): specifies which starting character to use to delimit identifiers. Default: \".
  • \n
  • identifier_end (str): specifies which ending character to use to delimit identifiers. Default: \".
  • \n
  • identify (bool): if set to True all identifiers will be delimited by the corresponding\ncharacter.
  • \n
  • normalize (bool): if set to True all identifiers will lower cased
  • \n
  • string_escape (str): specifies a string escape character. Default: '.
  • \n
  • identifier_escape (str): specifies an identifier escape character. Default: \".
  • \n
  • pad (int): determines padding in a formatted string. Default: 2.
  • \n
  • indent (int): determines the size of indentation in a formatted string. Default: 4.
  • \n
  • unnest_column_only (bool): if true unnest table aliases are considered only as column aliases
  • \n
  • normalize_functions (str): normalize function names, \"upper\", \"lower\", or None\nDefault: \"upper\"
  • \n
  • alias_post_tablesample (bool): if the table alias comes after tablesample\nDefault: False
  • \n
  • unsupported_level (ErrorLevel): determines the generator's behavior when it encounters\nunsupported expressions. Default ErrorLevel.WARN.
  • \n
  • null_ordering (str): Indicates the default null ordering method to use if not explicitly set.\nOptions are \"nulls_are_small\", \"nulls_are_large\", \"nulls_are_last\".\nDefault: \"nulls_are_small\"
  • \n
  • max_unsupported (int): Maximum number of unsupported messages to include in a raised UnsupportedError.\nThis is only relevant if unsupported_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
  • leading_comma (bool): if the the comma is leading or trailing in select statements\nDefault: False
  • \n
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode.\nThe default is on the smaller end because the length only represents a segment and not the true\nline length.\nDefault: 80
  • \n
  • comments: Whether or not to preserve comments in the output SQL code.\nDefault: True
  • \n
\n", "bases": "sqlglot.generator.Generator"}, "sqlglot.dialects.drill.Drill.Generator.normalize_func": {"fullname": "sqlglot.dialects.drill.Drill.Generator.normalize_func", "modulename": "sqlglot.dialects.drill", "qualname": "Drill.Generator.normalize_func", "kind": "function", "doc": "

\n", "signature": "(self, name: str) -> str:", "funcdef": "def"}, "sqlglot.dialects.duckdb": {"fullname": "sqlglot.dialects.duckdb", "modulename": "sqlglot.dialects.duckdb", "kind": "module", "doc": "

\n"}, "sqlglot.dialects.duckdb.DuckDB": {"fullname": "sqlglot.dialects.duckdb.DuckDB", "modulename": "sqlglot.dialects.duckdb", "qualname": "DuckDB", "kind": "class", "doc": "

\n", "bases": "sqlglot.dialects.dialect.Dialect"}, "sqlglot.dialects.duckdb.DuckDB.__init__": {"fullname": "sqlglot.dialects.duckdb.DuckDB.__init__", "modulename": "sqlglot.dialects.duckdb", "qualname": "DuckDB.__init__", "kind": "function", "doc": "

\n", "signature": "()"}, "sqlglot.dialects.duckdb.DuckDB.Tokenizer": {"fullname": "sqlglot.dialects.duckdb.DuckDB.Tokenizer", "modulename": "sqlglot.dialects.duckdb", "qualname": "DuckDB.Tokenizer", "kind": "class", "doc": "

\n", "bases": "sqlglot.tokens.Tokenizer"}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"fullname": "sqlglot.dialects.duckdb.DuckDB.Parser", "modulename": "sqlglot.dialects.duckdb", "qualname": "DuckDB.Parser", "kind": "class", "doc": "

Parser consumes a list of tokens produced by the sqlglot.tokens.Tokenizer and produces\na parsed syntax tree.

\n\n
Arguments:
\n\n
    \n
  • error_level: the desired error level.\nDefault: ErrorLevel.RAISE
  • \n
  • error_message_context: determines the amount of context to capture from a\nquery string when displaying the error message (in number of characters).\nDefault: 50.
  • \n
  • index_offset: Index offset for arrays eg ARRAY[0] vs ARRAY[1] as the head of a list.\nDefault: 0
  • \n
  • alias_post_tablesample: If the table alias comes after tablesample.\nDefault: False
  • \n
  • max_errors: Maximum number of error messages to include in a raised ParseError.\nThis is only relevant if error_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
  • null_ordering: Indicates the default null ordering method to use if not explicitly set.\nOptions are \"nulls_are_small\", \"nulls_are_large\", \"nulls_are_last\".\nDefault: \"nulls_are_small\"
  • \n
\n", "bases": "sqlglot.parser.Parser"}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"fullname": "sqlglot.dialects.duckdb.DuckDB.Generator", "modulename": "sqlglot.dialects.duckdb", "qualname": "DuckDB.Generator", "kind": "class", "doc": "

Generator interprets the given syntax tree and produces a SQL string as an output.

\n\n
Arguments:
\n\n
    \n
  • time_mapping (dict): the dictionary of custom time mappings in which the key\nrepresents a python time format and the output the target time format
  • \n
  • time_trie (trie): a trie of the time_mapping keys
  • \n
  • pretty (bool): if set to True the returned string will be formatted. Default: False.
  • \n
  • quote_start (str): specifies which starting character to use to delimit quotes. Default: '.
  • \n
  • quote_end (str): specifies which ending character to use to delimit quotes. Default: '.
  • \n
  • identifier_start (str): specifies which starting character to use to delimit identifiers. Default: \".
  • \n
  • identifier_end (str): specifies which ending character to use to delimit identifiers. Default: \".
  • \n
  • identify (bool): if set to True all identifiers will be delimited by the corresponding\ncharacter.
  • \n
  • normalize (bool): if set to True all identifiers will lower cased
  • \n
  • string_escape (str): specifies a string escape character. Default: '.
  • \n
  • identifier_escape (str): specifies an identifier escape character. Default: \".
  • \n
  • pad (int): determines padding in a formatted string. Default: 2.
  • \n
  • indent (int): determines the size of indentation in a formatted string. Default: 4.
  • \n
  • unnest_column_only (bool): if true unnest table aliases are considered only as column aliases
  • \n
  • normalize_functions (str): normalize function names, \"upper\", \"lower\", or None\nDefault: \"upper\"
  • \n
  • alias_post_tablesample (bool): if the table alias comes after tablesample\nDefault: False
  • \n
  • unsupported_level (ErrorLevel): determines the generator's behavior when it encounters\nunsupported expressions. Default ErrorLevel.WARN.
  • \n
  • null_ordering (str): Indicates the default null ordering method to use if not explicitly set.\nOptions are \"nulls_are_small\", \"nulls_are_large\", \"nulls_are_last\".\nDefault: \"nulls_are_small\"
  • \n
  • max_unsupported (int): Maximum number of unsupported messages to include in a raised UnsupportedError.\nThis is only relevant if unsupported_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
  • leading_comma (bool): if the the comma is leading or trailing in select statements\nDefault: False
  • \n
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode.\nThe default is on the smaller end because the length only represents a segment and not the true\nline length.\nDefault: 80
  • \n
  • comments: Whether or not to preserve comments in the output SQL code.\nDefault: True
  • \n
\n", "bases": "sqlglot.generator.Generator"}, "sqlglot.dialects.hive": {"fullname": "sqlglot.dialects.hive", "modulename": "sqlglot.dialects.hive", "kind": "module", "doc": "

\n"}, "sqlglot.dialects.hive.Hive": {"fullname": "sqlglot.dialects.hive.Hive", "modulename": "sqlglot.dialects.hive", "qualname": "Hive", "kind": "class", "doc": "

\n", "bases": "sqlglot.dialects.dialect.Dialect"}, "sqlglot.dialects.hive.Hive.__init__": {"fullname": "sqlglot.dialects.hive.Hive.__init__", "modulename": "sqlglot.dialects.hive", "qualname": "Hive.__init__", "kind": "function", "doc": "

\n", "signature": "()"}, "sqlglot.dialects.hive.Hive.Tokenizer": {"fullname": "sqlglot.dialects.hive.Hive.Tokenizer", "modulename": "sqlglot.dialects.hive", "qualname": "Hive.Tokenizer", "kind": "class", "doc": "

\n", "bases": "sqlglot.tokens.Tokenizer"}, "sqlglot.dialects.hive.Hive.Parser": {"fullname": "sqlglot.dialects.hive.Hive.Parser", "modulename": "sqlglot.dialects.hive", "qualname": "Hive.Parser", "kind": "class", "doc": "

Parser consumes a list of tokens produced by the sqlglot.tokens.Tokenizer and produces\na parsed syntax tree.

\n\n
Arguments:
\n\n
    \n
  • error_level: the desired error level.\nDefault: ErrorLevel.RAISE
  • \n
  • error_message_context: determines the amount of context to capture from a\nquery string when displaying the error message (in number of characters).\nDefault: 50.
  • \n
  • index_offset: Index offset for arrays eg ARRAY[0] vs ARRAY[1] as the head of a list.\nDefault: 0
  • \n
  • alias_post_tablesample: If the table alias comes after tablesample.\nDefault: False
  • \n
  • max_errors: Maximum number of error messages to include in a raised ParseError.\nThis is only relevant if error_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
  • null_ordering: Indicates the default null ordering method to use if not explicitly set.\nOptions are \"nulls_are_small\", \"nulls_are_large\", \"nulls_are_last\".\nDefault: \"nulls_are_small\"
  • \n
\n", "bases": "sqlglot.parser.Parser"}, "sqlglot.dialects.hive.Hive.Generator": {"fullname": "sqlglot.dialects.hive.Hive.Generator", "modulename": "sqlglot.dialects.hive", "qualname": "Hive.Generator", "kind": "class", "doc": "

Generator interprets the given syntax tree and produces a SQL string as an output.

\n\n
Arguments:
\n\n
    \n
  • time_mapping (dict): the dictionary of custom time mappings in which the key\nrepresents a python time format and the output the target time format
  • \n
  • time_trie (trie): a trie of the time_mapping keys
  • \n
  • pretty (bool): if set to True the returned string will be formatted. Default: False.
  • \n
  • quote_start (str): specifies which starting character to use to delimit quotes. Default: '.
  • \n
  • quote_end (str): specifies which ending character to use to delimit quotes. Default: '.
  • \n
  • identifier_start (str): specifies which starting character to use to delimit identifiers. Default: \".
  • \n
  • identifier_end (str): specifies which ending character to use to delimit identifiers. Default: \".
  • \n
  • identify (bool): if set to True all identifiers will be delimited by the corresponding\ncharacter.
  • \n
  • normalize (bool): if set to True all identifiers will lower cased
  • \n
  • string_escape (str): specifies a string escape character. Default: '.
  • \n
  • identifier_escape (str): specifies an identifier escape character. Default: \".
  • \n
  • pad (int): determines padding in a formatted string. Default: 2.
  • \n
  • indent (int): determines the size of indentation in a formatted string. Default: 4.
  • \n
  • unnest_column_only (bool): if true unnest table aliases are considered only as column aliases
  • \n
  • normalize_functions (str): normalize function names, \"upper\", \"lower\", or None\nDefault: \"upper\"
  • \n
  • alias_post_tablesample (bool): if the table alias comes after tablesample\nDefault: False
  • \n
  • unsupported_level (ErrorLevel): determines the generator's behavior when it encounters\nunsupported expressions. Default ErrorLevel.WARN.
  • \n
  • null_ordering (str): Indicates the default null ordering method to use if not explicitly set.\nOptions are \"nulls_are_small\", \"nulls_are_large\", \"nulls_are_last\".\nDefault: \"nulls_are_small\"
  • \n
  • max_unsupported (int): Maximum number of unsupported messages to include in a raised UnsupportedError.\nThis is only relevant if unsupported_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
  • leading_comma (bool): if the the comma is leading or trailing in select statements\nDefault: False
  • \n
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode.\nThe default is on the smaller end because the length only represents a segment and not the true\nline length.\nDefault: 80
  • \n
  • comments: Whether or not to preserve comments in the output SQL code.\nDefault: True
  • \n
\n", "bases": "sqlglot.generator.Generator"}, "sqlglot.dialects.hive.Hive.Generator.with_properties": {"fullname": "sqlglot.dialects.hive.Hive.Generator.with_properties", "modulename": "sqlglot.dialects.hive", "qualname": "Hive.Generator.with_properties", "kind": "function", "doc": "

\n", "signature": "(self, properties):", "funcdef": "def"}, "sqlglot.dialects.hive.Hive.Generator.datatype_sql": {"fullname": "sqlglot.dialects.hive.Hive.Generator.datatype_sql", "modulename": "sqlglot.dialects.hive", "qualname": "Hive.Generator.datatype_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression):", "funcdef": "def"}, "sqlglot.dialects.mysql": {"fullname": "sqlglot.dialects.mysql", "modulename": "sqlglot.dialects.mysql", "kind": "module", "doc": "

\n"}, "sqlglot.dialects.mysql.MySQL": {"fullname": "sqlglot.dialects.mysql.MySQL", "modulename": "sqlglot.dialects.mysql", "qualname": "MySQL", "kind": "class", "doc": "

\n", "bases": "sqlglot.dialects.dialect.Dialect"}, "sqlglot.dialects.mysql.MySQL.__init__": {"fullname": "sqlglot.dialects.mysql.MySQL.__init__", "modulename": "sqlglot.dialects.mysql", "qualname": "MySQL.__init__", "kind": "function", "doc": "

\n", "signature": "()"}, "sqlglot.dialects.mysql.MySQL.Tokenizer": {"fullname": "sqlglot.dialects.mysql.MySQL.Tokenizer", "modulename": "sqlglot.dialects.mysql", "qualname": "MySQL.Tokenizer", "kind": "class", "doc": "

\n", "bases": "sqlglot.tokens.Tokenizer"}, "sqlglot.dialects.mysql.MySQL.Parser": {"fullname": "sqlglot.dialects.mysql.MySQL.Parser", "modulename": "sqlglot.dialects.mysql", "qualname": "MySQL.Parser", "kind": "class", "doc": "

Parser consumes a list of tokens produced by the sqlglot.tokens.Tokenizer and produces\na parsed syntax tree.

\n\n
Arguments:
\n\n
    \n
  • error_level: the desired error level.\nDefault: ErrorLevel.RAISE
  • \n
  • error_message_context: determines the amount of context to capture from a\nquery string when displaying the error message (in number of characters).\nDefault: 50.
  • \n
  • index_offset: Index offset for arrays eg ARRAY[0] vs ARRAY[1] as the head of a list.\nDefault: 0
  • \n
  • alias_post_tablesample: If the table alias comes after tablesample.\nDefault: False
  • \n
  • max_errors: Maximum number of error messages to include in a raised ParseError.\nThis is only relevant if error_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
  • null_ordering: Indicates the default null ordering method to use if not explicitly set.\nOptions are \"nulls_are_small\", \"nulls_are_large\", \"nulls_are_last\".\nDefault: \"nulls_are_small\"
  • \n
\n", "bases": "sqlglot.parser.Parser"}, "sqlglot.dialects.mysql.MySQL.Generator": {"fullname": "sqlglot.dialects.mysql.MySQL.Generator", "modulename": "sqlglot.dialects.mysql", "qualname": "MySQL.Generator", "kind": "class", "doc": "

Generator interprets the given syntax tree and produces a SQL string as an output.

\n\n
Arguments:
\n\n
    \n
  • time_mapping (dict): the dictionary of custom time mappings in which the key\nrepresents a python time format and the output the target time format
  • \n
  • time_trie (trie): a trie of the time_mapping keys
  • \n
  • pretty (bool): if set to True the returned string will be formatted. Default: False.
  • \n
  • quote_start (str): specifies which starting character to use to delimit quotes. Default: '.
  • \n
  • quote_end (str): specifies which ending character to use to delimit quotes. Default: '.
  • \n
  • identifier_start (str): specifies which starting character to use to delimit identifiers. Default: \".
  • \n
  • identifier_end (str): specifies which ending character to use to delimit identifiers. Default: \".
  • \n
  • identify (bool): if set to True all identifiers will be delimited by the corresponding\ncharacter.
  • \n
  • normalize (bool): if set to True all identifiers will lower cased
  • \n
  • string_escape (str): specifies a string escape character. Default: '.
  • \n
  • identifier_escape (str): specifies an identifier escape character. Default: \".
  • \n
  • pad (int): determines padding in a formatted string. Default: 2.
  • \n
  • indent (int): determines the size of indentation in a formatted string. Default: 4.
  • \n
  • unnest_column_only (bool): if true unnest table aliases are considered only as column aliases
  • \n
  • normalize_functions (str): normalize function names, \"upper\", \"lower\", or None\nDefault: \"upper\"
  • \n
  • alias_post_tablesample (bool): if the table alias comes after tablesample\nDefault: False
  • \n
  • unsupported_level (ErrorLevel): determines the generator's behavior when it encounters\nunsupported expressions. Default ErrorLevel.WARN.
  • \n
  • null_ordering (str): Indicates the default null ordering method to use if not explicitly set.\nOptions are \"nulls_are_small\", \"nulls_are_large\", \"nulls_are_last\".\nDefault: \"nulls_are_small\"
  • \n
  • max_unsupported (int): Maximum number of unsupported messages to include in a raised UnsupportedError.\nThis is only relevant if unsupported_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
  • leading_comma (bool): if the the comma is leading or trailing in select statements\nDefault: False
  • \n
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode.\nThe default is on the smaller end because the length only represents a segment and not the true\nline length.\nDefault: 80
  • \n
  • comments: Whether or not to preserve comments in the output SQL code.\nDefault: True
  • \n
\n", "bases": "sqlglot.generator.Generator"}, "sqlglot.dialects.mysql.MySQL.Generator.show_sql": {"fullname": "sqlglot.dialects.mysql.MySQL.Generator.show_sql", "modulename": "sqlglot.dialects.mysql", "qualname": "MySQL.Generator.show_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression):", "funcdef": "def"}, "sqlglot.dialects.mysql.MySQL.Generator.setitem_sql": {"fullname": "sqlglot.dialects.mysql.MySQL.Generator.setitem_sql", "modulename": "sqlglot.dialects.mysql", "qualname": "MySQL.Generator.setitem_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression):", "funcdef": "def"}, "sqlglot.dialects.mysql.MySQL.Generator.set_sql": {"fullname": "sqlglot.dialects.mysql.MySQL.Generator.set_sql", "modulename": "sqlglot.dialects.mysql", "qualname": "MySQL.Generator.set_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression):", "funcdef": "def"}, "sqlglot.dialects.oracle": {"fullname": "sqlglot.dialects.oracle", "modulename": "sqlglot.dialects.oracle", "kind": "module", "doc": "

\n"}, "sqlglot.dialects.oracle.Oracle": {"fullname": "sqlglot.dialects.oracle.Oracle", "modulename": "sqlglot.dialects.oracle", "qualname": "Oracle", "kind": "class", "doc": "

\n", "bases": "sqlglot.dialects.dialect.Dialect"}, "sqlglot.dialects.oracle.Oracle.__init__": {"fullname": "sqlglot.dialects.oracle.Oracle.__init__", "modulename": "sqlglot.dialects.oracle", "qualname": "Oracle.__init__", "kind": "function", "doc": "

\n", "signature": "()"}, "sqlglot.dialects.oracle.Oracle.Parser": {"fullname": "sqlglot.dialects.oracle.Oracle.Parser", "modulename": "sqlglot.dialects.oracle", "qualname": "Oracle.Parser", "kind": "class", "doc": "

Parser consumes a list of tokens produced by the sqlglot.tokens.Tokenizer and produces\na parsed syntax tree.

\n\n
Arguments:
\n\n
    \n
  • error_level: the desired error level.\nDefault: ErrorLevel.RAISE
  • \n
  • error_message_context: determines the amount of context to capture from a\nquery string when displaying the error message (in number of characters).\nDefault: 50.
  • \n
  • index_offset: Index offset for arrays eg ARRAY[0] vs ARRAY[1] as the head of a list.\nDefault: 0
  • \n
  • alias_post_tablesample: If the table alias comes after tablesample.\nDefault: False
  • \n
  • max_errors: Maximum number of error messages to include in a raised ParseError.\nThis is only relevant if error_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
  • null_ordering: Indicates the default null ordering method to use if not explicitly set.\nOptions are \"nulls_are_small\", \"nulls_are_large\", \"nulls_are_last\".\nDefault: \"nulls_are_small\"
  • \n
\n", "bases": "sqlglot.parser.Parser"}, "sqlglot.dialects.oracle.Oracle.Generator": {"fullname": "sqlglot.dialects.oracle.Oracle.Generator", "modulename": "sqlglot.dialects.oracle", "qualname": "Oracle.Generator", "kind": "class", "doc": "

Generator interprets the given syntax tree and produces a SQL string as an output.

\n\n
Arguments:
\n\n
    \n
  • time_mapping (dict): the dictionary of custom time mappings in which the key\nrepresents a python time format and the output the target time format
  • \n
  • time_trie (trie): a trie of the time_mapping keys
  • \n
  • pretty (bool): if set to True the returned string will be formatted. Default: False.
  • \n
  • quote_start (str): specifies which starting character to use to delimit quotes. Default: '.
  • \n
  • quote_end (str): specifies which ending character to use to delimit quotes. Default: '.
  • \n
  • identifier_start (str): specifies which starting character to use to delimit identifiers. Default: \".
  • \n
  • identifier_end (str): specifies which ending character to use to delimit identifiers. Default: \".
  • \n
  • identify (bool): if set to True all identifiers will be delimited by the corresponding\ncharacter.
  • \n
  • normalize (bool): if set to True all identifiers will lower cased
  • \n
  • string_escape (str): specifies a string escape character. Default: '.
  • \n
  • identifier_escape (str): specifies an identifier escape character. Default: \".
  • \n
  • pad (int): determines padding in a formatted string. Default: 2.
  • \n
  • indent (int): determines the size of indentation in a formatted string. Default: 4.
  • \n
  • unnest_column_only (bool): if true unnest table aliases are considered only as column aliases
  • \n
  • normalize_functions (str): normalize function names, \"upper\", \"lower\", or None\nDefault: \"upper\"
  • \n
  • alias_post_tablesample (bool): if the table alias comes after tablesample\nDefault: False
  • \n
  • unsupported_level (ErrorLevel): determines the generator's behavior when it encounters\nunsupported expressions. Default ErrorLevel.WARN.
  • \n
  • null_ordering (str): Indicates the default null ordering method to use if not explicitly set.\nOptions are \"nulls_are_small\", \"nulls_are_large\", \"nulls_are_last\".\nDefault: \"nulls_are_small\"
  • \n
  • max_unsupported (int): Maximum number of unsupported messages to include in a raised UnsupportedError.\nThis is only relevant if unsupported_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
  • leading_comma (bool): if the the comma is leading or trailing in select statements\nDefault: False
  • \n
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode.\nThe default is on the smaller end because the length only represents a segment and not the true\nline length.\nDefault: 80
  • \n
  • comments: Whether or not to preserve comments in the output SQL code.\nDefault: True
  • \n
\n", "bases": "sqlglot.generator.Generator"}, "sqlglot.dialects.oracle.Oracle.Generator.query_modifiers": {"fullname": "sqlglot.dialects.oracle.Oracle.Generator.query_modifiers", "modulename": "sqlglot.dialects.oracle", "qualname": "Oracle.Generator.query_modifiers", "kind": "function", "doc": "

\n", "signature": "(self, expression, *sqls):", "funcdef": "def"}, "sqlglot.dialects.oracle.Oracle.Generator.offset_sql": {"fullname": "sqlglot.dialects.oracle.Oracle.Generator.offset_sql", "modulename": "sqlglot.dialects.oracle", "qualname": "Oracle.Generator.offset_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression):", "funcdef": "def"}, "sqlglot.dialects.oracle.Oracle.Generator.table_sql": {"fullname": "sqlglot.dialects.oracle.Oracle.Generator.table_sql", "modulename": "sqlglot.dialects.oracle", "qualname": "Oracle.Generator.table_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression):", "funcdef": "def"}, "sqlglot.dialects.oracle.Oracle.Tokenizer": {"fullname": "sqlglot.dialects.oracle.Oracle.Tokenizer", "modulename": "sqlglot.dialects.oracle", "qualname": "Oracle.Tokenizer", "kind": "class", "doc": "

\n", "bases": "sqlglot.tokens.Tokenizer"}, "sqlglot.dialects.postgres": {"fullname": "sqlglot.dialects.postgres", "modulename": "sqlglot.dialects.postgres", "kind": "module", "doc": "

\n"}, "sqlglot.dialects.postgres.Postgres": {"fullname": "sqlglot.dialects.postgres.Postgres", "modulename": "sqlglot.dialects.postgres", "qualname": "Postgres", "kind": "class", "doc": "

\n", "bases": "sqlglot.dialects.dialect.Dialect"}, "sqlglot.dialects.postgres.Postgres.__init__": {"fullname": "sqlglot.dialects.postgres.Postgres.__init__", "modulename": "sqlglot.dialects.postgres", "qualname": "Postgres.__init__", "kind": "function", "doc": "

\n", "signature": "()"}, "sqlglot.dialects.postgres.Postgres.Tokenizer": {"fullname": "sqlglot.dialects.postgres.Postgres.Tokenizer", "modulename": "sqlglot.dialects.postgres", "qualname": "Postgres.Tokenizer", "kind": "class", "doc": "

\n", "bases": "sqlglot.tokens.Tokenizer"}, "sqlglot.dialects.postgres.Postgres.Parser": {"fullname": "sqlglot.dialects.postgres.Postgres.Parser", "modulename": "sqlglot.dialects.postgres", "qualname": "Postgres.Parser", "kind": "class", "doc": "

Parser consumes a list of tokens produced by the sqlglot.tokens.Tokenizer and produces\na parsed syntax tree.

\n\n
Arguments:
\n\n
    \n
  • error_level: the desired error level.\nDefault: ErrorLevel.RAISE
  • \n
  • error_message_context: determines the amount of context to capture from a\nquery string when displaying the error message (in number of characters).\nDefault: 50.
  • \n
  • index_offset: Index offset for arrays eg ARRAY[0] vs ARRAY[1] as the head of a list.\nDefault: 0
  • \n
  • alias_post_tablesample: If the table alias comes after tablesample.\nDefault: False
  • \n
  • max_errors: Maximum number of error messages to include in a raised ParseError.\nThis is only relevant if error_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
  • null_ordering: Indicates the default null ordering method to use if not explicitly set.\nOptions are \"nulls_are_small\", \"nulls_are_large\", \"nulls_are_last\".\nDefault: \"nulls_are_small\"
  • \n
\n", "bases": "sqlglot.parser.Parser"}, "sqlglot.dialects.postgres.Postgres.Generator": {"fullname": "sqlglot.dialects.postgres.Postgres.Generator", "modulename": "sqlglot.dialects.postgres", "qualname": "Postgres.Generator", "kind": "class", "doc": "

Generator interprets the given syntax tree and produces a SQL string as an output.

\n\n
Arguments:
\n\n
    \n
  • time_mapping (dict): the dictionary of custom time mappings in which the key\nrepresents a python time format and the output the target time format
  • \n
  • time_trie (trie): a trie of the time_mapping keys
  • \n
  • pretty (bool): if set to True the returned string will be formatted. Default: False.
  • \n
  • quote_start (str): specifies which starting character to use to delimit quotes. Default: '.
  • \n
  • quote_end (str): specifies which ending character to use to delimit quotes. Default: '.
  • \n
  • identifier_start (str): specifies which starting character to use to delimit identifiers. Default: \".
  • \n
  • identifier_end (str): specifies which ending character to use to delimit identifiers. Default: \".
  • \n
  • identify (bool): if set to True all identifiers will be delimited by the corresponding\ncharacter.
  • \n
  • normalize (bool): if set to True all identifiers will lower cased
  • \n
  • string_escape (str): specifies a string escape character. Default: '.
  • \n
  • identifier_escape (str): specifies an identifier escape character. Default: \".
  • \n
  • pad (int): determines padding in a formatted string. Default: 2.
  • \n
  • indent (int): determines the size of indentation in a formatted string. Default: 4.
  • \n
  • unnest_column_only (bool): if true unnest table aliases are considered only as column aliases
  • \n
  • normalize_functions (str): normalize function names, \"upper\", \"lower\", or None\nDefault: \"upper\"
  • \n
  • alias_post_tablesample (bool): if the table alias comes after tablesample\nDefault: False
  • \n
  • unsupported_level (ErrorLevel): determines the generator's behavior when it encounters\nunsupported expressions. Default ErrorLevel.WARN.
  • \n
  • null_ordering (str): Indicates the default null ordering method to use if not explicitly set.\nOptions are \"nulls_are_small\", \"nulls_are_large\", \"nulls_are_last\".\nDefault: \"nulls_are_small\"
  • \n
  • max_unsupported (int): Maximum number of unsupported messages to include in a raised UnsupportedError.\nThis is only relevant if unsupported_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
  • leading_comma (bool): if the the comma is leading or trailing in select statements\nDefault: False
  • \n
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode.\nThe default is on the smaller end because the length only represents a segment and not the true\nline length.\nDefault: 80
  • \n
  • comments: Whether or not to preserve comments in the output SQL code.\nDefault: True
  • \n
\n", "bases": "sqlglot.generator.Generator"}, "sqlglot.dialects.presto": {"fullname": "sqlglot.dialects.presto", "modulename": "sqlglot.dialects.presto", "kind": "module", "doc": "

\n"}, "sqlglot.dialects.presto.Presto": {"fullname": "sqlglot.dialects.presto.Presto", "modulename": "sqlglot.dialects.presto", "qualname": "Presto", "kind": "class", "doc": "

\n", "bases": "sqlglot.dialects.dialect.Dialect"}, "sqlglot.dialects.presto.Presto.__init__": {"fullname": "sqlglot.dialects.presto.Presto.__init__", "modulename": "sqlglot.dialects.presto", "qualname": "Presto.__init__", "kind": "function", "doc": "

\n", "signature": "()"}, "sqlglot.dialects.presto.Presto.Tokenizer": {"fullname": "sqlglot.dialects.presto.Presto.Tokenizer", "modulename": "sqlglot.dialects.presto", "qualname": "Presto.Tokenizer", "kind": "class", "doc": "

\n", "bases": "sqlglot.tokens.Tokenizer"}, "sqlglot.dialects.presto.Presto.Parser": {"fullname": "sqlglot.dialects.presto.Presto.Parser", "modulename": "sqlglot.dialects.presto", "qualname": "Presto.Parser", "kind": "class", "doc": "

Parser consumes a list of tokens produced by the sqlglot.tokens.Tokenizer and produces\na parsed syntax tree.

\n\n
Arguments:
\n\n
    \n
  • error_level: the desired error level.\nDefault: ErrorLevel.RAISE
  • \n
  • error_message_context: determines the amount of context to capture from a\nquery string when displaying the error message (in number of characters).\nDefault: 50.
  • \n
  • index_offset: Index offset for arrays eg ARRAY[0] vs ARRAY[1] as the head of a list.\nDefault: 0
  • \n
  • alias_post_tablesample: If the table alias comes after tablesample.\nDefault: False
  • \n
  • max_errors: Maximum number of error messages to include in a raised ParseError.\nThis is only relevant if error_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
  • null_ordering: Indicates the default null ordering method to use if not explicitly set.\nOptions are \"nulls_are_small\", \"nulls_are_large\", \"nulls_are_last\".\nDefault: \"nulls_are_small\"
  • \n
\n", "bases": "sqlglot.parser.Parser"}, "sqlglot.dialects.presto.Presto.Generator": {"fullname": "sqlglot.dialects.presto.Presto.Generator", "modulename": "sqlglot.dialects.presto", "qualname": "Presto.Generator", "kind": "class", "doc": "

Generator interprets the given syntax tree and produces a SQL string as an output.

\n\n
Arguments:
\n\n
    \n
  • time_mapping (dict): the dictionary of custom time mappings in which the key\nrepresents a python time format and the output the target time format
  • \n
  • time_trie (trie): a trie of the time_mapping keys
  • \n
  • pretty (bool): if set to True the returned string will be formatted. Default: False.
  • \n
  • quote_start (str): specifies which starting character to use to delimit quotes. Default: '.
  • \n
  • quote_end (str): specifies which ending character to use to delimit quotes. Default: '.
  • \n
  • identifier_start (str): specifies which starting character to use to delimit identifiers. Default: \".
  • \n
  • identifier_end (str): specifies which ending character to use to delimit identifiers. Default: \".
  • \n
  • identify (bool): if set to True all identifiers will be delimited by the corresponding\ncharacter.
  • \n
  • normalize (bool): if set to True all identifiers will lower cased
  • \n
  • string_escape (str): specifies a string escape character. Default: '.
  • \n
  • identifier_escape (str): specifies an identifier escape character. Default: \".
  • \n
  • pad (int): determines padding in a formatted string. Default: 2.
  • \n
  • indent (int): determines the size of indentation in a formatted string. Default: 4.
  • \n
  • unnest_column_only (bool): if true unnest table aliases are considered only as column aliases
  • \n
  • normalize_functions (str): normalize function names, \"upper\", \"lower\", or None\nDefault: \"upper\"
  • \n
  • alias_post_tablesample (bool): if the table alias comes after tablesample\nDefault: False
  • \n
  • unsupported_level (ErrorLevel): determines the generator's behavior when it encounters\nunsupported expressions. Default ErrorLevel.WARN.
  • \n
  • null_ordering (str): Indicates the default null ordering method to use if not explicitly set.\nOptions are \"nulls_are_small\", \"nulls_are_large\", \"nulls_are_last\".\nDefault: \"nulls_are_small\"
  • \n
  • max_unsupported (int): Maximum number of unsupported messages to include in a raised UnsupportedError.\nThis is only relevant if unsupported_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
  • leading_comma (bool): if the the comma is leading or trailing in select statements\nDefault: False
  • \n
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode.\nThe default is on the smaller end because the length only represents a segment and not the true\nline length.\nDefault: 80
  • \n
  • comments: Whether or not to preserve comments in the output SQL code.\nDefault: True
  • \n
\n", "bases": "sqlglot.generator.Generator"}, "sqlglot.dialects.presto.Presto.Generator.transaction_sql": {"fullname": "sqlglot.dialects.presto.Presto.Generator.transaction_sql", "modulename": "sqlglot.dialects.presto", "qualname": "Presto.Generator.transaction_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression):", "funcdef": "def"}, "sqlglot.dialects.redshift": {"fullname": "sqlglot.dialects.redshift", "modulename": "sqlglot.dialects.redshift", "kind": "module", "doc": "

\n"}, "sqlglot.dialects.redshift.Redshift": {"fullname": "sqlglot.dialects.redshift.Redshift", "modulename": "sqlglot.dialects.redshift", "qualname": "Redshift", "kind": "class", "doc": "

\n", "bases": "sqlglot.dialects.postgres.Postgres"}, "sqlglot.dialects.redshift.Redshift.__init__": {"fullname": "sqlglot.dialects.redshift.Redshift.__init__", "modulename": "sqlglot.dialects.redshift", "qualname": "Redshift.__init__", "kind": "function", "doc": "

\n", "signature": "()"}, "sqlglot.dialects.redshift.Redshift.Parser": {"fullname": "sqlglot.dialects.redshift.Redshift.Parser", "modulename": "sqlglot.dialects.redshift", "qualname": "Redshift.Parser", "kind": "class", "doc": "

Parser consumes a list of tokens produced by the sqlglot.tokens.Tokenizer and produces\na parsed syntax tree.

\n\n
Arguments:
\n\n
    \n
  • error_level: the desired error level.\nDefault: ErrorLevel.RAISE
  • \n
  • error_message_context: determines the amount of context to capture from a\nquery string when displaying the error message (in number of characters).\nDefault: 50.
  • \n
  • index_offset: Index offset for arrays eg ARRAY[0] vs ARRAY[1] as the head of a list.\nDefault: 0
  • \n
  • alias_post_tablesample: If the table alias comes after tablesample.\nDefault: False
  • \n
  • max_errors: Maximum number of error messages to include in a raised ParseError.\nThis is only relevant if error_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
  • null_ordering: Indicates the default null ordering method to use if not explicitly set.\nOptions are \"nulls_are_small\", \"nulls_are_large\", \"nulls_are_last\".\nDefault: \"nulls_are_small\"
  • \n
\n", "bases": "sqlglot.dialects.postgres.Postgres.Parser"}, "sqlglot.dialects.redshift.Redshift.Tokenizer": {"fullname": "sqlglot.dialects.redshift.Redshift.Tokenizer", "modulename": "sqlglot.dialects.redshift", "qualname": "Redshift.Tokenizer", "kind": "class", "doc": "

\n", "bases": "sqlglot.dialects.postgres.Postgres.Tokenizer"}, "sqlglot.dialects.redshift.Redshift.Generator": {"fullname": "sqlglot.dialects.redshift.Redshift.Generator", "modulename": "sqlglot.dialects.redshift", "qualname": "Redshift.Generator", "kind": "class", "doc": "

Generator interprets the given syntax tree and produces a SQL string as an output.

\n\n
Arguments:
\n\n
    \n
  • time_mapping (dict): the dictionary of custom time mappings in which the key\nrepresents a python time format and the output the target time format
  • \n
  • time_trie (trie): a trie of the time_mapping keys
  • \n
  • pretty (bool): if set to True the returned string will be formatted. Default: False.
  • \n
  • quote_start (str): specifies which starting character to use to delimit quotes. Default: '.
  • \n
  • quote_end (str): specifies which ending character to use to delimit quotes. Default: '.
  • \n
  • identifier_start (str): specifies which starting character to use to delimit identifiers. Default: \".
  • \n
  • identifier_end (str): specifies which ending character to use to delimit identifiers. Default: \".
  • \n
  • identify (bool): if set to True all identifiers will be delimited by the corresponding\ncharacter.
  • \n
  • normalize (bool): if set to True all identifiers will lower cased
  • \n
  • string_escape (str): specifies a string escape character. Default: '.
  • \n
  • identifier_escape (str): specifies an identifier escape character. Default: \".
  • \n
  • pad (int): determines padding in a formatted string. Default: 2.
  • \n
  • indent (int): determines the size of indentation in a formatted string. Default: 4.
  • \n
  • unnest_column_only (bool): if true unnest table aliases are considered only as column aliases
  • \n
  • normalize_functions (str): normalize function names, \"upper\", \"lower\", or None\nDefault: \"upper\"
  • \n
  • alias_post_tablesample (bool): if the table alias comes after tablesample\nDefault: False
  • \n
  • unsupported_level (ErrorLevel): determines the generator's behavior when it encounters\nunsupported expressions. Default ErrorLevel.WARN.
  • \n
  • null_ordering (str): Indicates the default null ordering method to use if not explicitly set.\nOptions are \"nulls_are_small\", \"nulls_are_large\", \"nulls_are_last\".\nDefault: \"nulls_are_small\"
  • \n
  • max_unsupported (int): Maximum number of unsupported messages to include in a raised UnsupportedError.\nThis is only relevant if unsupported_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
  • leading_comma (bool): if the the comma is leading or trailing in select statements\nDefault: False
  • \n
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode.\nThe default is on the smaller end because the length only represents a segment and not the true\nline length.\nDefault: 80
  • \n
  • comments: Whether or not to preserve comments in the output SQL code.\nDefault: True
  • \n
\n", "bases": "sqlglot.dialects.postgres.Postgres.Generator"}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"fullname": "sqlglot.dialects.redshift.Redshift.Generator.values_sql", "modulename": "sqlglot.dialects.redshift", "qualname": "Redshift.Generator.values_sql", "kind": "function", "doc": "

Converts VALUES... expression into a series of unions.

\n\n

Note: If you have a lot of unions then this will result in a large number of recursive statements to\nevaluate the expression. You may need to increase sys.setrecursionlimit to run and it can also be\nvery slow.

\n", "signature": "(self, expression: sqlglot.expressions.Values) -> str:", "funcdef": "def"}, "sqlglot.dialects.redshift.Redshift.Generator.with_properties": {"fullname": "sqlglot.dialects.redshift.Redshift.Generator.with_properties", "modulename": "sqlglot.dialects.redshift", "qualname": "Redshift.Generator.with_properties", "kind": "function", "doc": "

Redshift doesn't have WITH as part of their with_properties so we remove it

\n", "signature": "(self, properties: sqlglot.expressions.Properties) -> str:", "funcdef": "def"}, "sqlglot.dialects.redshift.Redshift.Generator.renametable_sql": {"fullname": "sqlglot.dialects.redshift.Redshift.Generator.renametable_sql", "modulename": "sqlglot.dialects.redshift", "qualname": "Redshift.Generator.renametable_sql", "kind": "function", "doc": "

Redshift only supports defining the table name itself (not the db) when renaming tables

\n", "signature": "(self, expression: sqlglot.expressions.RenameTable) -> str:", "funcdef": "def"}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"fullname": "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql", "modulename": "sqlglot.dialects.redshift", "qualname": "Redshift.Generator.datatype_sql", "kind": "function", "doc": "

Redshift converts the TEXT data type to VARCHAR(255) by default when people more generally mean\nVARCHAR of max length which is VARCHAR(max) in Redshift. Therefore if we get a TEXT data type\nwithout precision we convert it to VARCHAR(max) and if it does have precision then we just convert\nTEXT to VARCHAR.

\n", "signature": "(self, expression: sqlglot.expressions.DataType) -> str:", "funcdef": "def"}, "sqlglot.dialects.snowflake": {"fullname": "sqlglot.dialects.snowflake", "modulename": "sqlglot.dialects.snowflake", "kind": "module", "doc": "

\n"}, "sqlglot.dialects.snowflake.Snowflake": {"fullname": "sqlglot.dialects.snowflake.Snowflake", "modulename": "sqlglot.dialects.snowflake", "qualname": "Snowflake", "kind": "class", "doc": "

\n", "bases": "sqlglot.dialects.dialect.Dialect"}, "sqlglot.dialects.snowflake.Snowflake.__init__": {"fullname": "sqlglot.dialects.snowflake.Snowflake.__init__", "modulename": "sqlglot.dialects.snowflake", "qualname": "Snowflake.__init__", "kind": "function", "doc": "

\n", "signature": "()"}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"fullname": "sqlglot.dialects.snowflake.Snowflake.Parser", "modulename": "sqlglot.dialects.snowflake", "qualname": "Snowflake.Parser", "kind": "class", "doc": "

Parser consumes a list of tokens produced by the sqlglot.tokens.Tokenizer and produces\na parsed syntax tree.

\n\n
Arguments:
\n\n
    \n
  • error_level: the desired error level.\nDefault: ErrorLevel.RAISE
  • \n
  • error_message_context: determines the amount of context to capture from a\nquery string when displaying the error message (in number of characters).\nDefault: 50.
  • \n
  • index_offset: Index offset for arrays eg ARRAY[0] vs ARRAY[1] as the head of a list.\nDefault: 0
  • \n
  • alias_post_tablesample: If the table alias comes after tablesample.\nDefault: False
  • \n
  • max_errors: Maximum number of error messages to include in a raised ParseError.\nThis is only relevant if error_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
  • null_ordering: Indicates the default null ordering method to use if not explicitly set.\nOptions are \"nulls_are_small\", \"nulls_are_large\", \"nulls_are_last\".\nDefault: \"nulls_are_small\"
  • \n
\n", "bases": "sqlglot.parser.Parser"}, "sqlglot.dialects.snowflake.Snowflake.Tokenizer": {"fullname": "sqlglot.dialects.snowflake.Snowflake.Tokenizer", "modulename": "sqlglot.dialects.snowflake", "qualname": "Snowflake.Tokenizer", "kind": "class", "doc": "

\n", "bases": "sqlglot.tokens.Tokenizer"}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"fullname": "sqlglot.dialects.snowflake.Snowflake.Generator", "modulename": "sqlglot.dialects.snowflake", "qualname": "Snowflake.Generator", "kind": "class", "doc": "

Generator interprets the given syntax tree and produces a SQL string as an output.

\n\n
Arguments:
\n\n
    \n
  • time_mapping (dict): the dictionary of custom time mappings in which the key\nrepresents a python time format and the output the target time format
  • \n
  • time_trie (trie): a trie of the time_mapping keys
  • \n
  • pretty (bool): if set to True the returned string will be formatted. Default: False.
  • \n
  • quote_start (str): specifies which starting character to use to delimit quotes. Default: '.
  • \n
  • quote_end (str): specifies which ending character to use to delimit quotes. Default: '.
  • \n
  • identifier_start (str): specifies which starting character to use to delimit identifiers. Default: \".
  • \n
  • identifier_end (str): specifies which ending character to use to delimit identifiers. Default: \".
  • \n
  • identify (bool): if set to True all identifiers will be delimited by the corresponding\ncharacter.
  • \n
  • normalize (bool): if set to True all identifiers will lower cased
  • \n
  • string_escape (str): specifies a string escape character. Default: '.
  • \n
  • identifier_escape (str): specifies an identifier escape character. Default: \".
  • \n
  • pad (int): determines padding in a formatted string. Default: 2.
  • \n
  • indent (int): determines the size of indentation in a formatted string. Default: 4.
  • \n
  • unnest_column_only (bool): if true unnest table aliases are considered only as column aliases
  • \n
  • normalize_functions (str): normalize function names, \"upper\", \"lower\", or None\nDefault: \"upper\"
  • \n
  • alias_post_tablesample (bool): if the table alias comes after tablesample\nDefault: False
  • \n
  • unsupported_level (ErrorLevel): determines the generator's behavior when it encounters\nunsupported expressions. Default ErrorLevel.WARN.
  • \n
  • null_ordering (str): Indicates the default null ordering method to use if not explicitly set.\nOptions are \"nulls_are_small\", \"nulls_are_large\", \"nulls_are_last\".\nDefault: \"nulls_are_small\"
  • \n
  • max_unsupported (int): Maximum number of unsupported messages to include in a raised UnsupportedError.\nThis is only relevant if unsupported_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
  • leading_comma (bool): if the the comma is leading or trailing in select statements\nDefault: False
  • \n
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode.\nThe default is on the smaller end because the length only represents a segment and not the true\nline length.\nDefault: 80
  • \n
  • comments: Whether or not to preserve comments in the output SQL code.\nDefault: True
  • \n
\n", "bases": "sqlglot.generator.Generator"}, "sqlglot.dialects.snowflake.Snowflake.Generator.except_op": {"fullname": "sqlglot.dialects.snowflake.Snowflake.Generator.except_op", "modulename": "sqlglot.dialects.snowflake", "qualname": "Snowflake.Generator.except_op", "kind": "function", "doc": "

\n", "signature": "(self, expression):", "funcdef": "def"}, "sqlglot.dialects.snowflake.Snowflake.Generator.intersect_op": {"fullname": "sqlglot.dialects.snowflake.Snowflake.Generator.intersect_op", "modulename": "sqlglot.dialects.snowflake", "qualname": "Snowflake.Generator.intersect_op", "kind": "function", "doc": "

\n", "signature": "(self, expression):", "funcdef": "def"}, "sqlglot.dialects.snowflake.Snowflake.Generator.values_sql": {"fullname": "sqlglot.dialects.snowflake.Snowflake.Generator.values_sql", "modulename": "sqlglot.dialects.snowflake", "qualname": "Snowflake.Generator.values_sql", "kind": "function", "doc": "

Due to a bug in Snowflake we want to make sure that all columns in a VALUES table alias are unquoted.

\n\n

We also want to make sure that after we find matches where we need to unquote a column that we prevent users\nfrom adding quotes to the column by using the identify argument when generating the SQL.

\n", "signature": "(self, expression: sqlglot.expressions.Values) -> str:", "funcdef": "def"}, "sqlglot.dialects.snowflake.Snowflake.Generator.select_sql": {"fullname": "sqlglot.dialects.snowflake.Snowflake.Generator.select_sql", "modulename": "sqlglot.dialects.snowflake", "qualname": "Snowflake.Generator.select_sql", "kind": "function", "doc": "

Due to a bug in Snowflake we want to make sure that all columns in a VALUES table alias are unquoted and also\nthat all columns in a SELECT are unquoted. We also want to make sure that after we find matches where we need\nto unquote a column that we prevent users from adding quotes to the column by using the identify argument when\ngenerating the SQL.

\n\n

Note: We make an assumption that any columns referenced in a VALUES expression should be unquoted throughout the\nexpression. This might not be true in a case where the same column name can be sourced from another table that can\nproperly quote but should be true in most cases.

\n", "signature": "(self, expression: sqlglot.expressions.Select) -> str:", "funcdef": "def"}, "sqlglot.dialects.snowflake.Snowflake.Generator.describe_sql": {"fullname": "sqlglot.dialects.snowflake.Snowflake.Generator.describe_sql", "modulename": "sqlglot.dialects.snowflake", "qualname": "Snowflake.Generator.describe_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Describe) -> str:", "funcdef": "def"}, "sqlglot.dialects.snowflake.Snowflake.Generator.generatedasidentitycolumnconstraint_sql": {"fullname": "sqlglot.dialects.snowflake.Snowflake.Generator.generatedasidentitycolumnconstraint_sql", "modulename": "sqlglot.dialects.snowflake", "qualname": "Snowflake.Generator.generatedasidentitycolumnconstraint_sql", "kind": "function", "doc": "

\n", "signature": "(\tself,\texpression: sqlglot.expressions.GeneratedAsIdentityColumnConstraint) -> str:", "funcdef": "def"}, "sqlglot.dialects.spark": {"fullname": "sqlglot.dialects.spark", "modulename": "sqlglot.dialects.spark", "kind": "module", "doc": "

\n"}, "sqlglot.dialects.spark.Spark": {"fullname": "sqlglot.dialects.spark.Spark", "modulename": "sqlglot.dialects.spark", "qualname": "Spark", "kind": "class", "doc": "

\n", "bases": "sqlglot.dialects.hive.Hive"}, "sqlglot.dialects.spark.Spark.__init__": {"fullname": "sqlglot.dialects.spark.Spark.__init__", "modulename": "sqlglot.dialects.spark", "qualname": "Spark.__init__", "kind": "function", "doc": "

\n", "signature": "()"}, "sqlglot.dialects.spark.Spark.Parser": {"fullname": "sqlglot.dialects.spark.Spark.Parser", "modulename": "sqlglot.dialects.spark", "qualname": "Spark.Parser", "kind": "class", "doc": "

Parser consumes a list of tokens produced by the sqlglot.tokens.Tokenizer and produces\na parsed syntax tree.

\n\n
Arguments:
\n\n
    \n
  • error_level: the desired error level.\nDefault: ErrorLevel.RAISE
  • \n
  • error_message_context: determines the amount of context to capture from a\nquery string when displaying the error message (in number of characters).\nDefault: 50.
  • \n
  • index_offset: Index offset for arrays eg ARRAY[0] vs ARRAY[1] as the head of a list.\nDefault: 0
  • \n
  • alias_post_tablesample: If the table alias comes after tablesample.\nDefault: False
  • \n
  • max_errors: Maximum number of error messages to include in a raised ParseError.\nThis is only relevant if error_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
  • null_ordering: Indicates the default null ordering method to use if not explicitly set.\nOptions are \"nulls_are_small\", \"nulls_are_large\", \"nulls_are_last\".\nDefault: \"nulls_are_small\"
  • \n
\n", "bases": "sqlglot.dialects.hive.Hive.Parser"}, "sqlglot.dialects.spark.Spark.Generator": {"fullname": "sqlglot.dialects.spark.Spark.Generator", "modulename": "sqlglot.dialects.spark", "qualname": "Spark.Generator", "kind": "class", "doc": "

Generator interprets the given syntax tree and produces a SQL string as an output.

\n\n
Arguments:
\n\n
    \n
  • time_mapping (dict): the dictionary of custom time mappings in which the key\nrepresents a python time format and the output the target time format
  • \n
  • time_trie (trie): a trie of the time_mapping keys
  • \n
  • pretty (bool): if set to True the returned string will be formatted. Default: False.
  • \n
  • quote_start (str): specifies which starting character to use to delimit quotes. Default: '.
  • \n
  • quote_end (str): specifies which ending character to use to delimit quotes. Default: '.
  • \n
  • identifier_start (str): specifies which starting character to use to delimit identifiers. Default: \".
  • \n
  • identifier_end (str): specifies which ending character to use to delimit identifiers. Default: \".
  • \n
  • identify (bool): if set to True all identifiers will be delimited by the corresponding\ncharacter.
  • \n
  • normalize (bool): if set to True all identifiers will lower cased
  • \n
  • string_escape (str): specifies a string escape character. Default: '.
  • \n
  • identifier_escape (str): specifies an identifier escape character. Default: \".
  • \n
  • pad (int): determines padding in a formatted string. Default: 2.
  • \n
  • indent (int): determines the size of indentation in a formatted string. Default: 4.
  • \n
  • unnest_column_only (bool): if true unnest table aliases are considered only as column aliases
  • \n
  • normalize_functions (str): normalize function names, \"upper\", \"lower\", or None\nDefault: \"upper\"
  • \n
  • alias_post_tablesample (bool): if the table alias comes after tablesample\nDefault: False
  • \n
  • unsupported_level (ErrorLevel): determines the generator's behavior when it encounters\nunsupported expressions. Default ErrorLevel.WARN.
  • \n
  • null_ordering (str): Indicates the default null ordering method to use if not explicitly set.\nOptions are \"nulls_are_small\", \"nulls_are_large\", \"nulls_are_last\".\nDefault: \"nulls_are_small\"
  • \n
  • max_unsupported (int): Maximum number of unsupported messages to include in a raised UnsupportedError.\nThis is only relevant if unsupported_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
  • leading_comma (bool): if the the comma is leading or trailing in select statements\nDefault: False
  • \n
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode.\nThe default is on the smaller end because the length only represents a segment and not the true\nline length.\nDefault: 80
  • \n
  • comments: Whether or not to preserve comments in the output SQL code.\nDefault: True
  • \n
\n", "bases": "sqlglot.dialects.hive.Hive.Generator"}, "sqlglot.dialects.spark.Spark.Generator.cast_sql": {"fullname": "sqlglot.dialects.spark.Spark.Generator.cast_sql", "modulename": "sqlglot.dialects.spark", "qualname": "Spark.Generator.cast_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Cast) -> str:", "funcdef": "def"}, "sqlglot.dialects.spark.Spark.Tokenizer": {"fullname": "sqlglot.dialects.spark.Spark.Tokenizer", "modulename": "sqlglot.dialects.spark", "qualname": "Spark.Tokenizer", "kind": "class", "doc": "

\n", "bases": "sqlglot.dialects.hive.Hive.Tokenizer"}, "sqlglot.dialects.sqlite": {"fullname": "sqlglot.dialects.sqlite", "modulename": "sqlglot.dialects.sqlite", "kind": "module", "doc": "

\n"}, "sqlglot.dialects.sqlite.SQLite": {"fullname": "sqlglot.dialects.sqlite.SQLite", "modulename": "sqlglot.dialects.sqlite", "qualname": "SQLite", "kind": "class", "doc": "

\n", "bases": "sqlglot.dialects.dialect.Dialect"}, "sqlglot.dialects.sqlite.SQLite.__init__": {"fullname": "sqlglot.dialects.sqlite.SQLite.__init__", "modulename": "sqlglot.dialects.sqlite", "qualname": "SQLite.__init__", "kind": "function", "doc": "

\n", "signature": "()"}, "sqlglot.dialects.sqlite.SQLite.Tokenizer": {"fullname": "sqlglot.dialects.sqlite.SQLite.Tokenizer", "modulename": "sqlglot.dialects.sqlite", "qualname": "SQLite.Tokenizer", "kind": "class", "doc": "

\n", "bases": "sqlglot.tokens.Tokenizer"}, "sqlglot.dialects.sqlite.SQLite.Parser": {"fullname": "sqlglot.dialects.sqlite.SQLite.Parser", "modulename": "sqlglot.dialects.sqlite", "qualname": "SQLite.Parser", "kind": "class", "doc": "

Parser consumes a list of tokens produced by the sqlglot.tokens.Tokenizer and produces\na parsed syntax tree.

\n\n
Arguments:
\n\n
    \n
  • error_level: the desired error level.\nDefault: ErrorLevel.RAISE
  • \n
  • error_message_context: determines the amount of context to capture from a\nquery string when displaying the error message (in number of characters).\nDefault: 50.
  • \n
  • index_offset: Index offset for arrays eg ARRAY[0] vs ARRAY[1] as the head of a list.\nDefault: 0
  • \n
  • alias_post_tablesample: If the table alias comes after tablesample.\nDefault: False
  • \n
  • max_errors: Maximum number of error messages to include in a raised ParseError.\nThis is only relevant if error_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
  • null_ordering: Indicates the default null ordering method to use if not explicitly set.\nOptions are \"nulls_are_small\", \"nulls_are_large\", \"nulls_are_last\".\nDefault: \"nulls_are_small\"
  • \n
\n", "bases": "sqlglot.parser.Parser"}, "sqlglot.dialects.sqlite.SQLite.Generator": {"fullname": "sqlglot.dialects.sqlite.SQLite.Generator", "modulename": "sqlglot.dialects.sqlite", "qualname": "SQLite.Generator", "kind": "class", "doc": "

Generator interprets the given syntax tree and produces a SQL string as an output.

\n\n
Arguments:
\n\n
    \n
  • time_mapping (dict): the dictionary of custom time mappings in which the key\nrepresents a python time format and the output the target time format
  • \n
  • time_trie (trie): a trie of the time_mapping keys
  • \n
  • pretty (bool): if set to True the returned string will be formatted. Default: False.
  • \n
  • quote_start (str): specifies which starting character to use to delimit quotes. Default: '.
  • \n
  • quote_end (str): specifies which ending character to use to delimit quotes. Default: '.
  • \n
  • identifier_start (str): specifies which starting character to use to delimit identifiers. Default: \".
  • \n
  • identifier_end (str): specifies which ending character to use to delimit identifiers. Default: \".
  • \n
  • identify (bool): if set to True all identifiers will be delimited by the corresponding\ncharacter.
  • \n
  • normalize (bool): if set to True all identifiers will lower cased
  • \n
  • string_escape (str): specifies a string escape character. Default: '.
  • \n
  • identifier_escape (str): specifies an identifier escape character. Default: \".
  • \n
  • pad (int): determines padding in a formatted string. Default: 2.
  • \n
  • indent (int): determines the size of indentation in a formatted string. Default: 4.
  • \n
  • unnest_column_only (bool): if true unnest table aliases are considered only as column aliases
  • \n
  • normalize_functions (str): normalize function names, \"upper\", \"lower\", or None\nDefault: \"upper\"
  • \n
  • alias_post_tablesample (bool): if the table alias comes after tablesample\nDefault: False
  • \n
  • unsupported_level (ErrorLevel): determines the generator's behavior when it encounters\nunsupported expressions. Default ErrorLevel.WARN.
  • \n
  • null_ordering (str): Indicates the default null ordering method to use if not explicitly set.\nOptions are \"nulls_are_small\", \"nulls_are_large\", \"nulls_are_last\".\nDefault: \"nulls_are_small\"
  • \n
  • max_unsupported (int): Maximum number of unsupported messages to include in a raised UnsupportedError.\nThis is only relevant if unsupported_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
  • leading_comma (bool): if the the comma is leading or trailing in select statements\nDefault: False
  • \n
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode.\nThe default is on the smaller end because the length only represents a segment and not the true\nline length.\nDefault: 80
  • \n
  • comments: Whether or not to preserve comments in the output SQL code.\nDefault: True
  • \n
\n", "bases": "sqlglot.generator.Generator"}, "sqlglot.dialects.sqlite.SQLite.Generator.transaction_sql": {"fullname": "sqlglot.dialects.sqlite.SQLite.Generator.transaction_sql", "modulename": "sqlglot.dialects.sqlite", "qualname": "SQLite.Generator.transaction_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression):", "funcdef": "def"}, "sqlglot.dialects.starrocks": {"fullname": "sqlglot.dialects.starrocks", "modulename": "sqlglot.dialects.starrocks", "kind": "module", "doc": "

\n"}, "sqlglot.dialects.starrocks.StarRocks": {"fullname": "sqlglot.dialects.starrocks.StarRocks", "modulename": "sqlglot.dialects.starrocks", "qualname": "StarRocks", "kind": "class", "doc": "

\n", "bases": "sqlglot.dialects.mysql.MySQL"}, "sqlglot.dialects.starrocks.StarRocks.__init__": {"fullname": "sqlglot.dialects.starrocks.StarRocks.__init__", "modulename": "sqlglot.dialects.starrocks", "qualname": "StarRocks.__init__", "kind": "function", "doc": "

\n", "signature": "()"}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"fullname": "sqlglot.dialects.starrocks.StarRocks.Generator", "modulename": "sqlglot.dialects.starrocks", "qualname": "StarRocks.Generator", "kind": "class", "doc": "

Generator interprets the given syntax tree and produces a SQL string as an output.

\n\n
Arguments:
\n\n
    \n
  • time_mapping (dict): the dictionary of custom time mappings in which the key\nrepresents a python time format and the output the target time format
  • \n
  • time_trie (trie): a trie of the time_mapping keys
  • \n
  • pretty (bool): if set to True the returned string will be formatted. Default: False.
  • \n
  • quote_start (str): specifies which starting character to use to delimit quotes. Default: '.
  • \n
  • quote_end (str): specifies which ending character to use to delimit quotes. Default: '.
  • \n
  • identifier_start (str): specifies which starting character to use to delimit identifiers. Default: \".
  • \n
  • identifier_end (str): specifies which ending character to use to delimit identifiers. Default: \".
  • \n
  • identify (bool): if set to True all identifiers will be delimited by the corresponding\ncharacter.
  • \n
  • normalize (bool): if set to True all identifiers will lower cased
  • \n
  • string_escape (str): specifies a string escape character. Default: '.
  • \n
  • identifier_escape (str): specifies an identifier escape character. Default: \".
  • \n
  • pad (int): determines padding in a formatted string. Default: 2.
  • \n
  • indent (int): determines the size of indentation in a formatted string. Default: 4.
  • \n
  • unnest_column_only (bool): if true unnest table aliases are considered only as column aliases
  • \n
  • normalize_functions (str): normalize function names, \"upper\", \"lower\", or None\nDefault: \"upper\"
  • \n
  • alias_post_tablesample (bool): if the table alias comes after tablesample\nDefault: False
  • \n
  • unsupported_level (ErrorLevel): determines the generator's behavior when it encounters\nunsupported expressions. Default ErrorLevel.WARN.
  • \n
  • null_ordering (str): Indicates the default null ordering method to use if not explicitly set.\nOptions are \"nulls_are_small\", \"nulls_are_large\", \"nulls_are_last\".\nDefault: \"nulls_are_small\"
  • \n
  • max_unsupported (int): Maximum number of unsupported messages to include in a raised UnsupportedError.\nThis is only relevant if unsupported_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
  • leading_comma (bool): if the the comma is leading or trailing in select statements\nDefault: False
  • \n
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode.\nThe default is on the smaller end because the length only represents a segment and not the true\nline length.\nDefault: 80
  • \n
  • comments: Whether or not to preserve comments in the output SQL code.\nDefault: True
  • \n
\n", "bases": "sqlglot.dialects.mysql.MySQL.Generator"}, "sqlglot.dialects.tableau": {"fullname": "sqlglot.dialects.tableau", "modulename": "sqlglot.dialects.tableau", "kind": "module", "doc": "

\n"}, "sqlglot.dialects.tableau.Tableau": {"fullname": "sqlglot.dialects.tableau.Tableau", "modulename": "sqlglot.dialects.tableau", "qualname": "Tableau", "kind": "class", "doc": "

\n", "bases": "sqlglot.dialects.dialect.Dialect"}, "sqlglot.dialects.tableau.Tableau.__init__": {"fullname": "sqlglot.dialects.tableau.Tableau.__init__", "modulename": "sqlglot.dialects.tableau", "qualname": "Tableau.__init__", "kind": "function", "doc": "

\n", "signature": "()"}, "sqlglot.dialects.tableau.Tableau.Generator": {"fullname": "sqlglot.dialects.tableau.Tableau.Generator", "modulename": "sqlglot.dialects.tableau", "qualname": "Tableau.Generator", "kind": "class", "doc": "

Generator interprets the given syntax tree and produces a SQL string as an output.

\n\n
Arguments:
\n\n
    \n
  • time_mapping (dict): the dictionary of custom time mappings in which the key\nrepresents a python time format and the output the target time format
  • \n
  • time_trie (trie): a trie of the time_mapping keys
  • \n
  • pretty (bool): if set to True the returned string will be formatted. Default: False.
  • \n
  • quote_start (str): specifies which starting character to use to delimit quotes. Default: '.
  • \n
  • quote_end (str): specifies which ending character to use to delimit quotes. Default: '.
  • \n
  • identifier_start (str): specifies which starting character to use to delimit identifiers. Default: \".
  • \n
  • identifier_end (str): specifies which ending character to use to delimit identifiers. Default: \".
  • \n
  • identify (bool): if set to True all identifiers will be delimited by the corresponding\ncharacter.
  • \n
  • normalize (bool): if set to True all identifiers will lower cased
  • \n
  • string_escape (str): specifies a string escape character. Default: '.
  • \n
  • identifier_escape (str): specifies an identifier escape character. Default: \".
  • \n
  • pad (int): determines padding in a formatted string. Default: 2.
  • \n
  • indent (int): determines the size of indentation in a formatted string. Default: 4.
  • \n
  • unnest_column_only (bool): if true unnest table aliases are considered only as column aliases
  • \n
  • normalize_functions (str): normalize function names, \"upper\", \"lower\", or None\nDefault: \"upper\"
  • \n
  • alias_post_tablesample (bool): if the table alias comes after tablesample\nDefault: False
  • \n
  • unsupported_level (ErrorLevel): determines the generator's behavior when it encounters\nunsupported expressions. Default ErrorLevel.WARN.
  • \n
  • null_ordering (str): Indicates the default null ordering method to use if not explicitly set.\nOptions are \"nulls_are_small\", \"nulls_are_large\", \"nulls_are_last\".\nDefault: \"nulls_are_small\"
  • \n
  • max_unsupported (int): Maximum number of unsupported messages to include in a raised UnsupportedError.\nThis is only relevant if unsupported_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
  • leading_comma (bool): if the the comma is leading or trailing in select statements\nDefault: False
  • \n
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode.\nThe default is on the smaller end because the length only represents a segment and not the true\nline length.\nDefault: 80
  • \n
  • comments: Whether or not to preserve comments in the output SQL code.\nDefault: True
  • \n
\n", "bases": "sqlglot.generator.Generator"}, "sqlglot.dialects.tableau.Tableau.Parser": {"fullname": "sqlglot.dialects.tableau.Tableau.Parser", "modulename": "sqlglot.dialects.tableau", "qualname": "Tableau.Parser", "kind": "class", "doc": "

Parser consumes a list of tokens produced by the sqlglot.tokens.Tokenizer and produces\na parsed syntax tree.

\n\n
Arguments:
\n\n
    \n
  • error_level: the desired error level.\nDefault: ErrorLevel.RAISE
  • \n
  • error_message_context: determines the amount of context to capture from a\nquery string when displaying the error message (in number of characters).\nDefault: 50.
  • \n
  • index_offset: Index offset for arrays eg ARRAY[0] vs ARRAY[1] as the head of a list.\nDefault: 0
  • \n
  • alias_post_tablesample: If the table alias comes after tablesample.\nDefault: False
  • \n
  • max_errors: Maximum number of error messages to include in a raised ParseError.\nThis is only relevant if error_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
  • null_ordering: Indicates the default null ordering method to use if not explicitly set.\nOptions are \"nulls_are_small\", \"nulls_are_large\", \"nulls_are_last\".\nDefault: \"nulls_are_small\"
  • \n
\n", "bases": "sqlglot.parser.Parser"}, "sqlglot.dialects.teradata": {"fullname": "sqlglot.dialects.teradata", "modulename": "sqlglot.dialects.teradata", "kind": "module", "doc": "

\n"}, "sqlglot.dialects.teradata.Teradata": {"fullname": "sqlglot.dialects.teradata.Teradata", "modulename": "sqlglot.dialects.teradata", "qualname": "Teradata", "kind": "class", "doc": "

\n", "bases": "sqlglot.dialects.dialect.Dialect"}, "sqlglot.dialects.teradata.Teradata.__init__": {"fullname": "sqlglot.dialects.teradata.Teradata.__init__", "modulename": "sqlglot.dialects.teradata", "qualname": "Teradata.__init__", "kind": "function", "doc": "

\n", "signature": "()"}, "sqlglot.dialects.teradata.Teradata.Parser": {"fullname": "sqlglot.dialects.teradata.Teradata.Parser", "modulename": "sqlglot.dialects.teradata", "qualname": "Teradata.Parser", "kind": "class", "doc": "

Parser consumes a list of tokens produced by the sqlglot.tokens.Tokenizer and produces\na parsed syntax tree.

\n\n
Arguments:
\n\n
    \n
  • error_level: the desired error level.\nDefault: ErrorLevel.RAISE
  • \n
  • error_message_context: determines the amount of context to capture from a\nquery string when displaying the error message (in number of characters).\nDefault: 50.
  • \n
  • index_offset: Index offset for arrays eg ARRAY[0] vs ARRAY[1] as the head of a list.\nDefault: 0
  • \n
  • alias_post_tablesample: If the table alias comes after tablesample.\nDefault: False
  • \n
  • max_errors: Maximum number of error messages to include in a raised ParseError.\nThis is only relevant if error_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
  • null_ordering: Indicates the default null ordering method to use if not explicitly set.\nOptions are \"nulls_are_small\", \"nulls_are_large\", \"nulls_are_last\".\nDefault: \"nulls_are_small\"
  • \n
\n", "bases": "sqlglot.parser.Parser"}, "sqlglot.dialects.teradata.Teradata.Generator": {"fullname": "sqlglot.dialects.teradata.Teradata.Generator", "modulename": "sqlglot.dialects.teradata", "qualname": "Teradata.Generator", "kind": "class", "doc": "

Generator interprets the given syntax tree and produces a SQL string as an output.

\n\n
Arguments:
\n\n
    \n
  • time_mapping (dict): the dictionary of custom time mappings in which the key\nrepresents a python time format and the output the target time format
  • \n
  • time_trie (trie): a trie of the time_mapping keys
  • \n
  • pretty (bool): if set to True the returned string will be formatted. Default: False.
  • \n
  • quote_start (str): specifies which starting character to use to delimit quotes. Default: '.
  • \n
  • quote_end (str): specifies which ending character to use to delimit quotes. Default: '.
  • \n
  • identifier_start (str): specifies which starting character to use to delimit identifiers. Default: \".
  • \n
  • identifier_end (str): specifies which ending character to use to delimit identifiers. Default: \".
  • \n
  • identify (bool): if set to True all identifiers will be delimited by the corresponding\ncharacter.
  • \n
  • normalize (bool): if set to True all identifiers will lower cased
  • \n
  • string_escape (str): specifies a string escape character. Default: '.
  • \n
  • identifier_escape (str): specifies an identifier escape character. Default: \".
  • \n
  • pad (int): determines padding in a formatted string. Default: 2.
  • \n
  • indent (int): determines the size of indentation in a formatted string. Default: 4.
  • \n
  • unnest_column_only (bool): if true unnest table aliases are considered only as column aliases
  • \n
  • normalize_functions (str): normalize function names, \"upper\", \"lower\", or None\nDefault: \"upper\"
  • \n
  • alias_post_tablesample (bool): if the table alias comes after tablesample\nDefault: False
  • \n
  • unsupported_level (ErrorLevel): determines the generator's behavior when it encounters\nunsupported expressions. Default ErrorLevel.WARN.
  • \n
  • null_ordering (str): Indicates the default null ordering method to use if not explicitly set.\nOptions are \"nulls_are_small\", \"nulls_are_large\", \"nulls_are_last\".\nDefault: \"nulls_are_small\"
  • \n
  • max_unsupported (int): Maximum number of unsupported messages to include in a raised UnsupportedError.\nThis is only relevant if unsupported_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
  • leading_comma (bool): if the the comma is leading or trailing in select statements\nDefault: False
  • \n
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode.\nThe default is on the smaller end because the length only represents a segment and not the true\nline length.\nDefault: 80
  • \n
  • comments: Whether or not to preserve comments in the output SQL code.\nDefault: True
  • \n
\n", "bases": "sqlglot.generator.Generator"}, "sqlglot.dialects.teradata.Teradata.Generator.partitionedbyproperty_sql": {"fullname": "sqlglot.dialects.teradata.Teradata.Generator.partitionedbyproperty_sql", "modulename": "sqlglot.dialects.teradata", "qualname": "Teradata.Generator.partitionedbyproperty_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.PartitionedByProperty) -> str:", "funcdef": "def"}, "sqlglot.dialects.teradata.Teradata.Generator.update_sql": {"fullname": "sqlglot.dialects.teradata.Teradata.Generator.update_sql", "modulename": "sqlglot.dialects.teradata", "qualname": "Teradata.Generator.update_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Update) -> str:", "funcdef": "def"}, "sqlglot.dialects.trino": {"fullname": "sqlglot.dialects.trino", "modulename": "sqlglot.dialects.trino", "kind": "module", "doc": "

\n"}, "sqlglot.dialects.trino.Trino": {"fullname": "sqlglot.dialects.trino.Trino", "modulename": "sqlglot.dialects.trino", "qualname": "Trino", "kind": "class", "doc": "

\n", "bases": "sqlglot.dialects.presto.Presto"}, "sqlglot.dialects.trino.Trino.__init__": {"fullname": "sqlglot.dialects.trino.Trino.__init__", "modulename": "sqlglot.dialects.trino", "qualname": "Trino.__init__", "kind": "function", "doc": "

\n", "signature": "()"}, "sqlglot.dialects.trino.Trino.Generator": {"fullname": "sqlglot.dialects.trino.Trino.Generator", "modulename": "sqlglot.dialects.trino", "qualname": "Trino.Generator", "kind": "class", "doc": "

Generator interprets the given syntax tree and produces a SQL string as an output.

\n\n
Arguments:
\n\n
    \n
  • time_mapping (dict): the dictionary of custom time mappings in which the key\nrepresents a python time format and the output the target time format
  • \n
  • time_trie (trie): a trie of the time_mapping keys
  • \n
  • pretty (bool): if set to True the returned string will be formatted. Default: False.
  • \n
  • quote_start (str): specifies which starting character to use to delimit quotes. Default: '.
  • \n
  • quote_end (str): specifies which ending character to use to delimit quotes. Default: '.
  • \n
  • identifier_start (str): specifies which starting character to use to delimit identifiers. Default: \".
  • \n
  • identifier_end (str): specifies which ending character to use to delimit identifiers. Default: \".
  • \n
  • identify (bool): if set to True all identifiers will be delimited by the corresponding\ncharacter.
  • \n
  • normalize (bool): if set to True all identifiers will lower cased
  • \n
  • string_escape (str): specifies a string escape character. Default: '.
  • \n
  • identifier_escape (str): specifies an identifier escape character. Default: \".
  • \n
  • pad (int): determines padding in a formatted string. Default: 2.
  • \n
  • indent (int): determines the size of indentation in a formatted string. Default: 4.
  • \n
  • unnest_column_only (bool): if true unnest table aliases are considered only as column aliases
  • \n
  • normalize_functions (str): normalize function names, \"upper\", \"lower\", or None\nDefault: \"upper\"
  • \n
  • alias_post_tablesample (bool): if the table alias comes after tablesample\nDefault: False
  • \n
  • unsupported_level (ErrorLevel): determines the generator's behavior when it encounters\nunsupported expressions. Default ErrorLevel.WARN.
  • \n
  • null_ordering (str): Indicates the default null ordering method to use if not explicitly set.\nOptions are \"nulls_are_small\", \"nulls_are_large\", \"nulls_are_last\".\nDefault: \"nulls_are_small\"
  • \n
  • max_unsupported (int): Maximum number of unsupported messages to include in a raised UnsupportedError.\nThis is only relevant if unsupported_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
  • leading_comma (bool): if the the comma is leading or trailing in select statements\nDefault: False
  • \n
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode.\nThe default is on the smaller end because the length only represents a segment and not the true\nline length.\nDefault: 80
  • \n
  • comments: Whether or not to preserve comments in the output SQL code.\nDefault: True
  • \n
\n", "bases": "sqlglot.dialects.presto.Presto.Generator"}, "sqlglot.dialects.trino.Trino.Tokenizer": {"fullname": "sqlglot.dialects.trino.Trino.Tokenizer", "modulename": "sqlglot.dialects.trino", "qualname": "Trino.Tokenizer", "kind": "class", "doc": "

\n", "bases": "sqlglot.dialects.presto.Presto.Tokenizer"}, "sqlglot.dialects.tsql": {"fullname": "sqlglot.dialects.tsql", "modulename": "sqlglot.dialects.tsql", "kind": "module", "doc": "

\n"}, "sqlglot.dialects.tsql.generate_date_delta_with_unit_sql": {"fullname": "sqlglot.dialects.tsql.generate_date_delta_with_unit_sql", "modulename": "sqlglot.dialects.tsql", "qualname": "generate_date_delta_with_unit_sql", "kind": "function", "doc": "

\n", "signature": "(self, e):", "funcdef": "def"}, "sqlglot.dialects.tsql.TSQL": {"fullname": "sqlglot.dialects.tsql.TSQL", "modulename": "sqlglot.dialects.tsql", "qualname": "TSQL", "kind": "class", "doc": "

\n", "bases": "sqlglot.dialects.dialect.Dialect"}, "sqlglot.dialects.tsql.TSQL.__init__": {"fullname": "sqlglot.dialects.tsql.TSQL.__init__", "modulename": "sqlglot.dialects.tsql", "qualname": "TSQL.__init__", "kind": "function", "doc": "

\n", "signature": "()"}, "sqlglot.dialects.tsql.TSQL.Tokenizer": {"fullname": "sqlglot.dialects.tsql.TSQL.Tokenizer", "modulename": "sqlglot.dialects.tsql", "qualname": "TSQL.Tokenizer", "kind": "class", "doc": "

\n", "bases": "sqlglot.tokens.Tokenizer"}, "sqlglot.dialects.tsql.TSQL.Parser": {"fullname": "sqlglot.dialects.tsql.TSQL.Parser", "modulename": "sqlglot.dialects.tsql", "qualname": "TSQL.Parser", "kind": "class", "doc": "

Parser consumes a list of tokens produced by the sqlglot.tokens.Tokenizer and produces\na parsed syntax tree.

\n\n
Arguments:
\n\n
    \n
  • error_level: the desired error level.\nDefault: ErrorLevel.RAISE
  • \n
  • error_message_context: determines the amount of context to capture from a\nquery string when displaying the error message (in number of characters).\nDefault: 50.
  • \n
  • index_offset: Index offset for arrays eg ARRAY[0] vs ARRAY[1] as the head of a list.\nDefault: 0
  • \n
  • alias_post_tablesample: If the table alias comes after tablesample.\nDefault: False
  • \n
  • max_errors: Maximum number of error messages to include in a raised ParseError.\nThis is only relevant if error_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
  • null_ordering: Indicates the default null ordering method to use if not explicitly set.\nOptions are \"nulls_are_small\", \"nulls_are_large\", \"nulls_are_last\".\nDefault: \"nulls_are_small\"
  • \n
\n", "bases": "sqlglot.parser.Parser"}, "sqlglot.dialects.tsql.TSQL.Generator": {"fullname": "sqlglot.dialects.tsql.TSQL.Generator", "modulename": "sqlglot.dialects.tsql", "qualname": "TSQL.Generator", "kind": "class", "doc": "

Generator interprets the given syntax tree and produces a SQL string as an output.

\n\n
Arguments:
\n\n
    \n
  • time_mapping (dict): the dictionary of custom time mappings in which the key\nrepresents a python time format and the output the target time format
  • \n
  • time_trie (trie): a trie of the time_mapping keys
  • \n
  • pretty (bool): if set to True the returned string will be formatted. Default: False.
  • \n
  • quote_start (str): specifies which starting character to use to delimit quotes. Default: '.
  • \n
  • quote_end (str): specifies which ending character to use to delimit quotes. Default: '.
  • \n
  • identifier_start (str): specifies which starting character to use to delimit identifiers. Default: \".
  • \n
  • identifier_end (str): specifies which ending character to use to delimit identifiers. Default: \".
  • \n
  • identify (bool): if set to True all identifiers will be delimited by the corresponding\ncharacter.
  • \n
  • normalize (bool): if set to True all identifiers will lower cased
  • \n
  • string_escape (str): specifies a string escape character. Default: '.
  • \n
  • identifier_escape (str): specifies an identifier escape character. Default: \".
  • \n
  • pad (int): determines padding in a formatted string. Default: 2.
  • \n
  • indent (int): determines the size of indentation in a formatted string. Default: 4.
  • \n
  • unnest_column_only (bool): if true unnest table aliases are considered only as column aliases
  • \n
  • normalize_functions (str): normalize function names, \"upper\", \"lower\", or None\nDefault: \"upper\"
  • \n
  • alias_post_tablesample (bool): if the table alias comes after tablesample\nDefault: False
  • \n
  • unsupported_level (ErrorLevel): determines the generator's behavior when it encounters\nunsupported expressions. Default ErrorLevel.WARN.
  • \n
  • null_ordering (str): Indicates the default null ordering method to use if not explicitly set.\nOptions are \"nulls_are_small\", \"nulls_are_large\", \"nulls_are_last\".\nDefault: \"nulls_are_small\"
  • \n
  • max_unsupported (int): Maximum number of unsupported messages to include in a raised UnsupportedError.\nThis is only relevant if unsupported_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
  • leading_comma (bool): if the the comma is leading or trailing in select statements\nDefault: False
  • \n
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode.\nThe default is on the smaller end because the length only represents a segment and not the true\nline length.\nDefault: 80
  • \n
  • comments: Whether or not to preserve comments in the output SQL code.\nDefault: True
  • \n
\n", "bases": "sqlglot.generator.Generator"}, "sqlglot.dialects.tsql.TSQL.Generator.systemtime_sql": {"fullname": "sqlglot.dialects.tsql.TSQL.Generator.systemtime_sql", "modulename": "sqlglot.dialects.tsql", "qualname": "TSQL.Generator.systemtime_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.SystemTime) -> str:", "funcdef": "def"}, "sqlglot.dialects.tsql.TSQL.Generator.returnsproperty_sql": {"fullname": "sqlglot.dialects.tsql.TSQL.Generator.returnsproperty_sql", "modulename": "sqlglot.dialects.tsql", "qualname": "TSQL.Generator.returnsproperty_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.ReturnsProperty) -> str:", "funcdef": "def"}, "sqlglot.diff": {"fullname": "sqlglot.diff", "modulename": "sqlglot.diff", "kind": "module", "doc": "

Semantic Diff for SQL

\n\n

by Iaroslav Zeigerman

\n\n

Motivation

\n\n

Software is constantly changing and evolving, and identifying what has changed and reviewing those changes is an integral part of the development process. SQL code is no exception to this.

\n\n

Text-based diff tools such as git diff, when applied to a code base, have certain limitations. First, they can only detect insertions and deletions, not movements or updates of individual pieces of code. Second, such tools can only detect changes between lines of text, which is too coarse for something as granular and detailed as source code. Additionally, the outcome of such a diff is dependent on the underlying code formatting, and yields different results if the formatting should change.

\n\n

Consider the following diff generated by Git:

\n\n

\"Git

\n\n

Semantically the query hasn\u2019t changed. The two arguments b and c have been swapped (moved), posing no impact on the output of the query. Yet Git replaced the whole affected expression alongside a bulk of unrelated elements.

\n\n

The alternative to text-based diffing is to compare Abstract Syntax Trees (AST) instead. The main advantage of ASTs are that they are a direct product of code parsing, which represents the underlying code structure at any desired level of granularity. Comparing ASTs may yield extremely precise diffs; changes such as code movements and updates can also be detected. Even more importantly, this approach facilitates additional use cases beyond eyeballing two versions of source code side by side.

\n\n

The use cases I had in mind for SQL when I decided to embark on this journey of semantic diffing were the following:

\n\n
    \n
  • Query similarity score. Identifying which parts the two queries have in common to automatically suggest opportunities for consolidation, creation of intermediate/staging tables, and so on.
  • \n
  • Differentiating between cosmetic / structural changes and functional ones. For example when a nested query is refactored into a common table expression (CTE), this kind of change doesn\u2019t have any functional impact on either a query or its outcome.
  • \n
  • Automatic suggestions about the need to retroactively backfill data. This is especially important for pipelines that populate very large tables for which restatement is a runtime-intensive procedure. The ability to discern between simple code movements and actual modifications can help assess the impact of a change and make suggestions accordingly.
  • \n
\n\n

The implementation discussed in this post is now a part of the SQLGlot library. You can find a complete source code in the diff.py module. The choice of SQLglot was an obvious one due to its simple but powerful API, lack of external dependencies and, more importantly, extensive list of supported SQL dialects.

\n\n

The Search for a Solution

\n\n

When it comes to any diffing tool (not just a semantic one), the primary challenge is to match as many elements of compared entities as possible. Once such a set of matching elements is available, deriving a sequence of changes becomes an easy task.

\n\n

If our elements have unique identifiers associated with them (for example, an element\u2019s ID in DOM), the matching problem is trivial. However, the SQL syntax trees that we are comparing have neither unique keys nor object identifiers that can be used for the purposes of matching. So, how do we suppose to find pairs of nodes that are related?

\n\n

To better illustrate the problem, consider comparing the following SQL expressions: SELECT a + b + c, d, e and SELECT a - b + c, e, f. Matching individual nodes from respective syntax trees can be visualized as follows:

\n\n

\"Figure\nFigure 1: Example of node matching for two SQL expression trees.

\n\n

By looking at the figure of node matching for two SQL expression trees above, we conclude that the following changes should be captured by our solution:

\n\n
    \n
  • Inserted nodes: Sub and f. These are the nodes from the target AST which do not have a matching node in the source AST.
  • \n
  • Removed nodes: Add and d. These are the nodes from the source AST which do not have a counterpart in the target AST.
  • \n
  • Remaining nodes must be identified as unchanged.
  • \n
\n\n

It should be clear at this point that if we manage to match nodes in the source tree with their counterparts in the target tree, then computing the diff becomes a trivial matter.

\n\n

Na\u00efve Brute-Force

\n\n

The na\u00efve solution would be to try all different permutations of node pair combinations, and see which set of pairs performs the best based on some type of heuristics. The runtime cost of such a solution quickly reaches the escape velocity; if both trees had only 10 nodes each, the number of such sets would approximately be 10! ^ 2 = 3.6M ^ 2 ~= 13 * 10^12. This is a very bad case of factorial complexity (to be precise, it\u2019s actually much worse - O(n! ^ 2) - but I couldn\u2019t come up with a name for it), so there is little need to explore this approach any further.

\n\n

Myers Algorithm

\n\n

After the na\u00efve approach was proven to be infeasible, the next question I asked myself was \u201chow does git diff work?\u201d. This question led me to discover the Myers diff algorithm [1]. This algorithm has been designed to compare sequences of strings. At its core, it\u2019s looking for the shortest path on a graph of possible edits that transform the first sequence into the second one, while heavily rewarding those paths that lead to longest subsequences of unchanged elements. There\u2019s a lot of material out there describing this algorithm in greater detail. I found James Coglan\u2019s series of blog posts to be the most comprehensive.

\n\n

Therefore, I had this \u201cbrilliant\u201d (actually not) idea to transform trees into sequences by traversing them in topological order, and then applying the Myers algorithm on resulting sequences while using a custom heuristics when checking the equality of two nodes. Unsurprisingly, comparing sequences of strings is quite different from comparing hierarchical tree structures, and by flattening trees into sequences, we lose a lot of relevant context. This resulted in a terrible performance of this algorithm on ASTs. It often matched completely unrelated nodes, even when the two trees were mostly the same, and produced extremely inaccurate lists of changes overall. After playing around with it a little and tweaking my equality heuristics to improve accuracy, I ultimately scrapped the whole implementation and went back to the drawing board.

\n\n

Change Distiller

\n\n

The algorithm I settled on at the end was Change Distiller, created by Fluri et al. [2], which in turn is an improvement over the core idea described by Chawathe et al. [3].

\n\n

The algorithm consists of two high-level steps:

\n\n
    \n
  1. Finding appropriate matchings between pairs of nodes that are part of compared ASTs. Identifying what is meant by \u201cappropriate\u201d matching is also a part of this step.
  2. \n
  3. Generating the so-called \u201cedit script\u201d from the matching set built in the 1st step. The edit script is a sequence of edit operations (for example, insert, remove, update, etc.) on individual tree nodes, such that when applied as transformations on the source AST, it eventually becomes the target AST. In general, the shorter the sequence, the better. The length of the edit script can be used to compare the performance of different algorithms, though this is not the only metric that matters.
  4. \n
\n\n

The rest of this section is dedicated to the Python implementation of the steps above using the AST implementation provided by the SQLGlot library.

\n\n

Building the Matching Set

\n\n

Matching Leaves

\n\n

We begin composing the matching set by matching the leaf nodes. Leaf nodes are the nodes that do not have any children nodes (such as literals, identifiers, etc.). In order to match them, we gather all the leaf nodes from the source tree and generate a cartesian product with all the leaves from the target tree, while comparing pairs created this way and assigning them a similarity score. During this stage, we also exclude pairs that don\u2019t pass basic matching criteria. Then, we pick pairs that scored the highest while making sure that each node is matched no more than once.

\n\n

Using the example provided at the beginning of the post, the process of building an initial set of candidate matchings can be seen on Figure 2.

\n\n

\"Figure\nFigure 2: Building a set of candidate matchings between leaf nodes. The third item in each triplet represents a similarity score between two nodes.

\n\n

First, let\u2019s analyze the similarity score. Then, we\u2019ll discuss matching criteria.

\n\n

The similarity score proposed by Fluri et al. [2] is a dice coefficient applied to bigrams of respective node values. A bigram is a sequence of two adjacent elements from a string computed in a sliding window fashion:

\n\n
\n
def bigram(string):\n    count = max(0, len(string) - 1)\n    return [string[i : i + 2] for i in range(count)]\n
\n
\n\n

For reasons that will become clear shortly, we actually need to compute bigram histograms rather than just sequences:

\n\n
\n
from collections import defaultdict\n\ndef bigram_histo(string):\n    count = max(0, len(string) - 1)\n    bigram_histo = defaultdict(int)\n    for i in range(count):\n        bigram_histo[string[i : i + 2]] += 1\n    return bigram_histo\n
\n
\n\n

The dice coefficient formula looks like following:

\n\n

\"Dice

\n\n

Where X is a bigram of the source node and Y is a bigram of the second one. What this essentially does is count the number of bigram elements the two nodes have in common, multiply it by 2, and then divide by the total number of elements in both bigrams. This is where bigram histograms come in handy:

\n\n
\n
def dice_coefficient(source, target):\n    source_histo = bigram_histo(source.sql())\n    target_histo = bigram_histo(target.sql())\n\n    total_grams = (\n        sum(source_histo.values()) + sum(target_histo.values())\n    )\n    if not total_grams:\n        return 1.0 if source == target else 0.0\n\n    overlap_len = 0\n    overlapping_grams = set(source_histo) & set(target_histo)\n    for g in overlapping_grams:\n        overlap_len += min(source_histo[g], target_histo[g])\n\n    return 2 * overlap_len / total_grams\n
\n
\n\n

To compute a bigram given a tree node, we first transform the node into its canonical SQL representation,so that the Literal(123) node becomes just \u201c123\u201d and the Identifier(\u201ca\u201d) node becomes just \u201ca\u201d. We also handle a scenario when strings are too short to derive bigrams. In this case, we fallback to checking the two nodes for equality.

\n\n

Now when we know how to compute the similarity score, we can take care of the matching criteria for leaf nodes. In the original paper [2], the matching criteria is formalized as follows:

\n\n

\"Matching

\n\n

The two nodes are matched if two conditions are met:

\n\n
    \n
  1. The node labels match (in our case labels are just node types).
  2. \n
  3. The similarity score for node values is greater than or equal to some threshold \u201cf\u201d. The authors of the paper recommend setting the value of \u201cf\u201d to 0.6.
  4. \n
\n\n

With building blocks in place, we can now build a matching set for leaf nodes. First, we generate a list of candidates for matching:

\n\n
\n
from heapq import heappush, heappop\n\ncandidate_matchings = []\nsource_leaves = _get_leaves(self._source)\ntarget_leaves = _get_leaves(self._target)\nfor source_leaf in source_leaves:\n    for target_leaf in target_leaves:\n        if _is_same_type(source_leaf, target_leaf):\n            similarity_score = dice_coefficient(\n                source_leaf, target_leaf\n            )\n            if similarity_score >= 0.6:\n                heappush(\n                    candidate_matchings,\n                    (\n                        -similarity_score,\n                        len(candidate_matchings),\n                        source_leaf,\n                        target_leaf,\n                    ),\n                )\n
\n
\n\n

In the implementation above, we push each matching pair onto the heap to automatically maintain the correct order based on the assigned similarity score.

\n\n

Finally, we build the initial matching set by picking leaf pairs with the highest score:

\n\n
\n
matching_set = set()\nwhile candidate_matchings:\n    _, _, source_leaf, target_leaf = heappop(candidate_matchings)\n    if (\n        source_leaf in unmatched_source_nodes\n        and target_leaf in unmatched_target_nodes\n    ):\n        matching_set.add((source_leaf, target_leaf))\n        unmatched_source_nodes.remove(source_leaf)\n        unmatched_target_nodes.remove(target_leaf)\n
\n
\n\n

To finalize the matching set, we should now proceed with matching inner nodes.

\n\n

Matching Inner Nodes

\n\n

Matching inner nodes is quite similar to matching leaf nodes, with the following two distinctions:

\n\n
    \n
  • Rather than ranking a set of possible candidates, we pick the first node pair that passes the matching criteria.
  • \n
  • The matching criteria itself has been extended to account for the number of leaf nodes the pair of inner nodes have in common.
  • \n
\n\n

\"Figure\nFigure 3: Matching inner nodes based on their type as well as how many of their leaf nodes have been previously matched.

\n\n

Let\u2019s start with the matching criteria. The criteria is formalized as follows:

\n\n

\"Matching

\n\n

Alongside already familiar similarity score and node type criteria, there is a new one in the middle: the ratio of leaf nodes that the two nodes have in common must exceed some threshold \u201ct\u201d. The recommended value for \u201ct\u201d is also 0.6. Counting the number of common leaf nodes is pretty straightforward, since we already have the complete matching set for leaves. All we need to do is count how many matching pairs do leaf nodes from the two compared inner nodes form.

\n\n

There are two additional heuristics associated with this matching criteria:

\n\n
    \n
  • Inner node similarity weighting: if the similarity score between the node values doesn\u2019t pass the threshold \u201cf\u201d but the ratio of common leaf nodes (\u201ct\u201d) is greater than or equal to 0.8, then the matching is considered successful.
  • \n
  • The threshold \u201ct\u201d is reduced to 0.4 for inner nodes with the number of leaf nodes equal to 4 or less, in order to decrease the false negative rate for small subtrees.
  • \n
\n\n

We now only have to iterate through the remaining unmatched nodes and form matching pairs based on the outlined criteria:

\n\n
\n
leaves_matching_set = matching_set.copy()\n\nfor source_node in unmatched_source_nodes.copy():\n    for target_node in unmatched_target_nodes:\n        if _is_same_type(source_node, target_node):\n            source_leaves = set(_get_leaves(source_node))\n            target_leaves = set(_get_leaves(target_node))\n\n            max_leaves_num = max(len(source_leaves), len(target_leaves))\n            if max_leaves_num:\n                common_leaves_num = sum(\n                    1 if s in source_leaves and t in target_leaves else 0\n                    for s, t in leaves_matching_set\n                )\n                leaf_similarity_score = common_leaves_num / max_leaves_num\n            else:\n                leaf_similarity_score = 0.0\n\n            adjusted_t = (\n                0.6\n                if min(len(source_leaves), len(target_leaves)) > 4\n                else 0.4\n            )\n\n            if leaf_similarity_score >= 0.8 or (\n                leaf_similarity_score >= adjusted_t\n                and dice_coefficient(source_node, target_node) >= 0.6\n            ):\n                matching_set.add((source_node, target_node))\n                unmatched_source_nodes.remove(source_node)\n                unmatched_target_nodes.remove(target_node)\n                break\n
\n
\n\n

After the matching set is formed, we can proceed with generation of the edit script, which will be the algorithm\u2019s output.

\n\n

Generating the Edit Script

\n\n

At this point, we should have the following 3 sets at our disposal:

\n\n
    \n
  • The set of matched node pairs.
  • \n
  • The set of remaining unmatched nodes from the source tree.
  • \n
  • The set of remaining unmatched nodes from the target tree.
  • \n
\n\n

We can derive 3 kinds of edits from the matching set: either the node\u2019s value was updated (Update), the node was moved to a different position within the tree (Move), or the node remained unchanged (Keep). Note that the Move case is not mutually exclusive with the other two. The node could have been updated or could have remained the same while at the same time its position within its parent node or the parent node itself could have changed. All unmatched nodes from the source tree are the ones that were removed (Remove), while unmatched nodes from the target tree are the ones that were inserted (Insert).

\n\n

The latter two cases are pretty straightforward to implement:

\n\n
\n
edit_script = []\n\nfor removed_node in unmatched_source_nodes:\n    edit_script.append(Remove(removed_node))\nfor inserted_node in unmatched_target_nodes:\n    edit_script.append(Insert(inserted_node))\n
\n
\n\n

Traversing the matching set requires a little more thought:

\n\n
\n
for source_node, target_node in matching_set:\n    if (\n        not isinstance(source_node, LEAF_EXPRESSION_TYPES)\n        or source_node == target_node\n    ):\n        move_edits = generate_move_edits(\n            source_node, target_node, matching_set\n        )\n        edit_script.extend(move_edits)\n        edit_script.append(Keep(source_node, target_node))\n    else:\n        edit_script.append(Update(source_node, target_node))\n
\n
\n\n

If a matching pair represents a pair of leaf nodes, we check if they are the same to decide whether an update took place. For inner node pairs, we also need to compare the positions of their respective children to detect node movements. Chawathe et al. [3] suggest applying the longest common subsequence (LCS) algorithm which, no surprise here, was described by Myers himself [1]. There is a small catch, however: instead of checking the equality of two children nodes, we need to check whether the two nodes form a pair that is a part of our matching set.

\n\n

Now with this knowledge, the implementation becomes straightforward:

\n\n
\n
def generate_move_edits(source, target, matching_set):\n    source_children = _get_child_nodes(source)\n    target_children = _get_child_nodes(target)\n\n    lcs = set(\n        _longest_common_subsequence(\n            source_children,\n            target_children,\n            lambda l, r: (l, r) in matching_set\n        )\n    )\n\n    move_edits = []\n    for node in source_children:\n        if node not in lcs and node not in unmatched_source_nodes:\n            move_edits.append(Move(node))\n\n    return move_edits\n
\n
\n\n

I left out the implementation of the LCS algorithm itself here, but there are plenty of implementation choices out there that can be easily looked up.

\n\n

Output

\n\n

The implemented algorithm produces the output that resembles the following:

\n\n
\n
>>> from sqlglot import parse_one, diff\n>>> diff(parse_one("SELECT a + b + c, d, e"), parse_one("SELECT a - b + c, e, f"))\n\nRemove(Add)\nRemove(Column(d))\nRemove(Identifier(d))\nInsert(Sub)\nInsert(Column(f))\nInsert(Identifier(f))\nKeep(Select, Select)\nKeep(Add, Add)\nKeep(Column(a), Column(a))\nKeep(Identifier(a), Identifier(a))\nKeep(Column(b), Column(b))\nKeep(Identifier(b), Identifier(b))\nKeep(Column(c), Column(c))\nKeep(Identifier(c), Identifier(c))\nKeep(Column(e), Column(e))\nKeep(Identifier(e), Identifier(e))\n
\n
\n\n

Note that the output above is abbreviated. The string representation of actual AST nodes is significantly more verbose.

\n\n

The implementation works especially well when coupled with the SQLGlot\u2019s query optimizer which can be used to produce canonical representations of compared queries:

\n\n
\n
>>> schema={"t": {"a": "INT", "b": "INT", "c": "INT", "d": "INT"}}\n>>> source = """\n... SELECT 1 + 1 + a\n... FROM t\n... WHERE b = 1 OR (c = 2 AND d = 3)\n... """\n>>> target = """\n... SELECT 2 + a\n... FROM t\n... WHERE (b = 1 OR c = 2) AND (b = 1 OR d = 3)\n... """\n>>> optimized_source = optimize(parse_one(source), schema=schema)\n>>> optimized_target = optimize(parse_one(target), schema=schema)\n>>> edit_script = diff(optimized_source, optimized_target)\n>>> sum(0 if isinstance(e, Keep) else 1 for e in edit_script)\n0\n
\n
\n\n

Optimizations

\n\n

The worst case runtime complexity of this algorithm is not exactly stellar: O(n^2 * log n^2). This is because of the leaf matching process, which involves ranking a cartesian product between all leaf nodes of compared trees. Unsurprisingly, the algorithm takes a considerable time to finish for bigger queries.

\n\n

There are still a few basic things we can do in our implementation to help improve performance:

\n\n
    \n
  • Refer to individual node objects using their identifiers (Python\u2019s id()) instead of direct references in sets. This helps avoid costly recursive hash calculations and equality checks.
  • \n
  • Cache bigram histograms to avoid computing them more than once for the same node.
  • \n
  • Compute the canonical SQL string representation for each tree once while caching string representations of all inner nodes. This prevents redundant tree traversals when bigrams are computed.
  • \n
\n\n

At the time of writing only the first two optimizations have been implemented, so there is an opportunity to contribute for anyone who\u2019s interested.

\n\n

Alternative Solutions

\n\n

This section is dedicated to solutions that I\u2019ve investigated, but haven\u2019t tried.

\n\n

First, this section wouldn\u2019t be complete without Tristan Hume\u2019s blog post. Tristan\u2019s solution has a lot in common with the Myers algorithm plus heuristics that is much more clever than what I came up with. The implementation relies on a combination of dynamic programming and A* search algorithm to explore the space of possible matchings and pick the best ones. It seemed to have worked well for Tistan\u2019s specific use case, but after my negative experience with the Myers algorithm, I decided to try something different.

\n\n

Another notable approach is the Gumtree algorithm by Falleri et al. [4]. I discovered this paper after I\u2019d already implemented the algorithm that is the main focus of this post. In sections 5.2 and 5.3 of their paper, the authors compare the two algorithms side by side and claim that Gumtree is significantly better in terms of both runtime performance and accuracy when evaluated on 12 792 pairs of Java source files. This doesn\u2019t surprise me, as the algorithm takes the height of subtrees into account. In my tests, I definitely saw scenarios in which this context would have helped. On top of that, the authors promise O(n^2) runtime complexity in the worst case which, given the Change Distiller's O(n^2 * log n^2), looks particularly tempting. I hope to try this algorithm out at some point, and there is a good chance you see me writing about it in my future posts.

\n\n

Conclusion

\n\n

The Change Distiller algorithm yielded quite satisfactory results in most of my tests. The scenarios in which it fell short mostly concerned identical (or very similar) subtrees located in different parts of the AST. In those cases, node mismatches were frequent and, as a result, edit scripts were somewhat suboptimal.

\n\n

Additionally, the runtime performance of the algorithm leaves a lot to be desired. On trees with 1000 leaf nodes each, the algorithm takes a little under 2 seconds to complete. My implementation still has room for improvement, but this should give you a rough idea of what to expect. It appears that the Gumtree algorithm [4] can help address both of these points. I hope to find bandwidth to work on it soon and then compare the two algorithms side-by-side to find out which one performs better on SQL specifically. In the meantime, Change Distiller definitely gets the job done, and I can now proceed with applying it to some of the use cases I mentioned at the beginning of this post.

\n\n

I\u2019m also curious to learn whether other folks in the industry faced a similar problem, and how they approached it. If you did something similar, I\u2019m interested to hear about your experience.

\n\n

References

\n\n

[1] Eugene W. Myers. An O(ND) Difference Algorithm and Its Variations. Algorithmica 1(2): 251-266 (1986)

\n\n

[2] B. Fluri, M. Wursch, M. Pinzger, and H. Gall. Change Distilling: Tree differencing for fine-grained source code change extraction. IEEE Trans. Software Eng., 33(11):725\u2013743, 2007.

\n\n

[3] S.S. Chawathe, A. Rajaraman, H. Garcia-Molina, and J. Widom. Change Detection in Hierarchically Structured Information. Proc. ACM Sigmod Int\u2019l Conf. Management of Data, pp. 493-504, June 1996

\n\n

[4] Jean-R\u00e9my Falleri, Flor\u00e9al Morandat, Xavier Blanc, Matias Martinez, Martin Monperrus. Fine-grained and Accurate Source Code Differencing. Proceedings of the International Conference on Automated Software Engineering, 2014, V\u00e4steras, Sweden. pp.313-324, 10.1145/2642937.2642982. hal-01054552

\n\n
\n"}, "sqlglot.diff.Insert": {"fullname": "sqlglot.diff.Insert", "modulename": "sqlglot.diff", "qualname": "Insert", "kind": "class", "doc": "

Indicates that a new node has been inserted

\n"}, "sqlglot.diff.Insert.__init__": {"fullname": "sqlglot.diff.Insert.__init__", "modulename": "sqlglot.diff", "qualname": "Insert.__init__", "kind": "function", "doc": "

\n", "signature": "(expression: sqlglot.expressions.Expression)"}, "sqlglot.diff.Remove": {"fullname": "sqlglot.diff.Remove", "modulename": "sqlglot.diff", "qualname": "Remove", "kind": "class", "doc": "

Indicates that an existing node has been removed

\n"}, "sqlglot.diff.Remove.__init__": {"fullname": "sqlglot.diff.Remove.__init__", "modulename": "sqlglot.diff", "qualname": "Remove.__init__", "kind": "function", "doc": "

\n", "signature": "(expression: sqlglot.expressions.Expression)"}, "sqlglot.diff.Move": {"fullname": "sqlglot.diff.Move", "modulename": "sqlglot.diff", "qualname": "Move", "kind": "class", "doc": "

Indicates that an existing node's position within the tree has changed

\n"}, "sqlglot.diff.Move.__init__": {"fullname": "sqlglot.diff.Move.__init__", "modulename": "sqlglot.diff", "qualname": "Move.__init__", "kind": "function", "doc": "

\n", "signature": "(expression: sqlglot.expressions.Expression)"}, "sqlglot.diff.Update": {"fullname": "sqlglot.diff.Update", "modulename": "sqlglot.diff", "qualname": "Update", "kind": "class", "doc": "

Indicates that an existing node has been updated

\n"}, "sqlglot.diff.Update.__init__": {"fullname": "sqlglot.diff.Update.__init__", "modulename": "sqlglot.diff", "qualname": "Update.__init__", "kind": "function", "doc": "

\n", "signature": "(\tsource: sqlglot.expressions.Expression,\ttarget: sqlglot.expressions.Expression)"}, "sqlglot.diff.Keep": {"fullname": "sqlglot.diff.Keep", "modulename": "sqlglot.diff", "qualname": "Keep", "kind": "class", "doc": "

Indicates that an existing node hasn't been changed

\n"}, "sqlglot.diff.Keep.__init__": {"fullname": "sqlglot.diff.Keep.__init__", "modulename": "sqlglot.diff", "qualname": "Keep.__init__", "kind": "function", "doc": "

\n", "signature": "(\tsource: sqlglot.expressions.Expression,\ttarget: sqlglot.expressions.Expression)"}, "sqlglot.diff.diff": {"fullname": "sqlglot.diff.diff", "modulename": "sqlglot.diff", "qualname": "diff", "kind": "function", "doc": "

Returns the list of changes between the source and the target expressions.

\n\n
Examples:
\n\n
\n
\n
>>> diff(parse_one("a + b"), parse_one("a + c"))\n[\n    Remove(expression=(COLUMN this: (IDENTIFIER this: b, quoted: False))),\n    Insert(expression=(COLUMN this: (IDENTIFIER this: c, quoted: False))),\n    Keep(\n        source=(ADD this: ...),\n        target=(ADD this: ...)\n    ),\n    Keep(\n        source=(COLUMN this: (IDENTIFIER this: a, quoted: False)),\n        target=(COLUMN this: (IDENTIFIER this: a, quoted: False))\n    ),\n]\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • source: the source expression.
  • \n
  • target: the target expression against which the diff should be calculated.
  • \n
\n\n
Returns:
\n\n
\n

the list of Insert, Remove, Move, Update and Keep objects for each node in the source and the\n target expression trees. This list represents a sequence of steps needed to transform the source\n expression tree into the target one.

\n
\n", "signature": "(\tsource: sqlglot.expressions.Expression,\ttarget: sqlglot.expressions.Expression) -> List[Union[sqlglot.diff.Insert, sqlglot.diff.Remove, sqlglot.diff.Move, sqlglot.diff.Update, sqlglot.diff.Keep]]:", "funcdef": "def"}, "sqlglot.diff.ChangeDistiller": {"fullname": "sqlglot.diff.ChangeDistiller", "modulename": "sqlglot.diff", "qualname": "ChangeDistiller", "kind": "class", "doc": "

The implementation of the Change Distiller algorithm described by Beat Fluri and Martin Pinzger in\ntheir paper https://ieeexplore.ieee.org/document/4339230, which in turn is based on the algorithm by\nChawathe et al. described in http://ilpubs.stanford.edu:8090/115/1/1995-46.pdf.

\n"}, "sqlglot.diff.ChangeDistiller.__init__": {"fullname": "sqlglot.diff.ChangeDistiller.__init__", "modulename": "sqlglot.diff", "qualname": "ChangeDistiller.__init__", "kind": "function", "doc": "

\n", "signature": "(f: float = 0.6, t: float = 0.6)"}, "sqlglot.diff.ChangeDistiller.diff": {"fullname": "sqlglot.diff.ChangeDistiller.diff", "modulename": "sqlglot.diff", "qualname": "ChangeDistiller.diff", "kind": "function", "doc": "

\n", "signature": "(\tself,\tsource: sqlglot.expressions.Expression,\ttarget: sqlglot.expressions.Expression) -> List[Union[sqlglot.diff.Insert, sqlglot.diff.Remove, sqlglot.diff.Move, sqlglot.diff.Update, sqlglot.diff.Keep]]:", "funcdef": "def"}, "sqlglot.errors": {"fullname": "sqlglot.errors", "modulename": "sqlglot.errors", "kind": "module", "doc": "

\n"}, "sqlglot.errors.ErrorLevel": {"fullname": "sqlglot.errors.ErrorLevel", "modulename": "sqlglot.errors", "qualname": "ErrorLevel", "kind": "class", "doc": "

An enumeration.

\n", "bases": "sqlglot.helper.AutoName"}, "sqlglot.errors.ErrorLevel.IGNORE": {"fullname": "sqlglot.errors.ErrorLevel.IGNORE", "modulename": "sqlglot.errors", "qualname": "ErrorLevel.IGNORE", "kind": "variable", "doc": "

Ignore all errors.

\n", "default_value": " = <ErrorLevel.IGNORE: 'IGNORE'>"}, "sqlglot.errors.ErrorLevel.WARN": {"fullname": "sqlglot.errors.ErrorLevel.WARN", "modulename": "sqlglot.errors", "qualname": "ErrorLevel.WARN", "kind": "variable", "doc": "

Log all errors.

\n", "default_value": " = <ErrorLevel.WARN: 'WARN'>"}, "sqlglot.errors.ErrorLevel.RAISE": {"fullname": "sqlglot.errors.ErrorLevel.RAISE", "modulename": "sqlglot.errors", "qualname": "ErrorLevel.RAISE", "kind": "variable", "doc": "

Collect all errors and raise a single exception.

\n", "default_value": " = <ErrorLevel.RAISE: 'RAISE'>"}, "sqlglot.errors.ErrorLevel.IMMEDIATE": {"fullname": "sqlglot.errors.ErrorLevel.IMMEDIATE", "modulename": "sqlglot.errors", "qualname": "ErrorLevel.IMMEDIATE", "kind": "variable", "doc": "

Immediately raise an exception on the first error found.

\n", "default_value": " = <ErrorLevel.IMMEDIATE: 'IMMEDIATE'>"}, "sqlglot.errors.SqlglotError": {"fullname": "sqlglot.errors.SqlglotError", "modulename": "sqlglot.errors", "qualname": "SqlglotError", "kind": "class", "doc": "

Common base class for all non-exit exceptions.

\n", "bases": "builtins.Exception"}, "sqlglot.errors.UnsupportedError": {"fullname": "sqlglot.errors.UnsupportedError", "modulename": "sqlglot.errors", "qualname": "UnsupportedError", "kind": "class", "doc": "

Common base class for all non-exit exceptions.

\n", "bases": "SqlglotError"}, "sqlglot.errors.ParseError": {"fullname": "sqlglot.errors.ParseError", "modulename": "sqlglot.errors", "qualname": "ParseError", "kind": "class", "doc": "

Common base class for all non-exit exceptions.

\n", "bases": "SqlglotError"}, "sqlglot.errors.ParseError.__init__": {"fullname": "sqlglot.errors.ParseError.__init__", "modulename": "sqlglot.errors", "qualname": "ParseError.__init__", "kind": "function", "doc": "

\n", "signature": "(message: str, errors: Optional[List[Dict[str, Any]]] = None)"}, "sqlglot.errors.ParseError.new": {"fullname": "sqlglot.errors.ParseError.new", "modulename": "sqlglot.errors", "qualname": "ParseError.new", "kind": "function", "doc": "

\n", "signature": "(\tcls,\tmessage: str,\tdescription: Optional[str] = None,\tline: Optional[int] = None,\tcol: Optional[int] = None,\tstart_context: Optional[str] = None,\thighlight: Optional[str] = None,\tend_context: Optional[str] = None,\tinto_expression: Optional[str] = None) -> sqlglot.errors.ParseError:", "funcdef": "def"}, "sqlglot.errors.TokenError": {"fullname": "sqlglot.errors.TokenError", "modulename": "sqlglot.errors", "qualname": "TokenError", "kind": "class", "doc": "

Common base class for all non-exit exceptions.

\n", "bases": "SqlglotError"}, "sqlglot.errors.OptimizeError": {"fullname": "sqlglot.errors.OptimizeError", "modulename": "sqlglot.errors", "qualname": "OptimizeError", "kind": "class", "doc": "

Common base class for all non-exit exceptions.

\n", "bases": "SqlglotError"}, "sqlglot.errors.SchemaError": {"fullname": "sqlglot.errors.SchemaError", "modulename": "sqlglot.errors", "qualname": "SchemaError", "kind": "class", "doc": "

Common base class for all non-exit exceptions.

\n", "bases": "SqlglotError"}, "sqlglot.errors.ExecuteError": {"fullname": "sqlglot.errors.ExecuteError", "modulename": "sqlglot.errors", "qualname": "ExecuteError", "kind": "class", "doc": "

Common base class for all non-exit exceptions.

\n", "bases": "SqlglotError"}, "sqlglot.errors.concat_messages": {"fullname": "sqlglot.errors.concat_messages", "modulename": "sqlglot.errors", "qualname": "concat_messages", "kind": "function", "doc": "

\n", "signature": "(errors: Sequence[Any], maximum: int) -> str:", "funcdef": "def"}, "sqlglot.errors.merge_errors": {"fullname": "sqlglot.errors.merge_errors", "modulename": "sqlglot.errors", "qualname": "merge_errors", "kind": "function", "doc": "

\n", "signature": "(errors: Sequence[sqlglot.errors.ParseError]) -> List[Dict[str, Any]]:", "funcdef": "def"}, "sqlglot.executor": {"fullname": "sqlglot.executor", "modulename": "sqlglot.executor", "kind": "module", "doc": "

Writing a Python SQL engine from scratch

\n\n

Toby Mao

\n\n

Introduction

\n\n

When I first started writing SQLGlot in early 2021, my goal was just to translate SQL queries from SparkSQL to Presto and vice versa. However, over the last year and a half, I've ended up with a full-fledged SQL engine. SQLGlot can now parse and transpile between 18 SQL dialects and can execute all 24 TPC-H SQL queries. The parser and engine are all written from scratch using Python.

\n\n

This post will cover why I went through the effort of creating a Python SQL engine and how a simple query goes from a string to actually transforming data. The following steps are briefly summarized:

\n\n\n\n

Why?

\n\n

I started working on SQLGlot because of my work on the experimentation and metrics platform at Netflix, where I built tools that allowed data scientists to define and compute SQL-based metrics. Netflix relied on multiple engines to query data (Spark, Presto, and Druid), so my team built the metrics platform around PyPika, a Python SQL query builder. This way, definitions could be reused across multiple engines. However, it became quickly apparent that writing python code to programmatically generate SQL was challenging for data scientists, especially those with academic backgrounds, since they were mostly familiar with R and SQL. At the time, the only Python SQL parser was sqlparse, which is not actually a parser but a tokenizer, so having users write raw SQL into the platform wasn't really an option. Some time later, I randomly stumbled across Crafting Interpreters and realized that I could use it as a guide towards creating my own SQL parser/transpiler.

\n\n

Why did I do this? Isn't a Python SQL engine going to be extremely slow?

\n\n

The main reason why I ended up building a SQL engine was...just for entertainment. It's been fun learning about all the things required to actually run a SQL query, and seeing it actually work is extremely rewarding. Before SQLGlot, I had zero experience with lexers, parsers, or compilers.

\n\n

In terms of practical use cases, I planned to use the Python SQL engine for unit testing SQL pipelines. Big data pipelines are tough to test because many of the engines are not open source and cannot be run locally. With SQLGlot, you can take a SQL query targeting a warehouse such as Snowflake and seamlessly run it in CI on mock Python data. It's easy to mock data and create arbitrary UDFs because everything is just Python. Although the implementation is slow and unsuitable for large amounts of data (> 1 million rows), there's very little overhead/startup and you can run queries on test data in a couple of milliseconds.

\n\n

Finally, the components that have been built to support execution can be used as a foundation for a faster engine. I'm inspired by what Apache Calcite has done for the JVM world. Even though Python is commonly used for data, there hasn't been a Calcite for Python. So, you could say that SQLGlot aims to be that framework. For example, it wouldn't take much work to replace the Python execution engine with numpy/pandas/arrow to become a respectably-performing query engine. The implementation would be able to leverage the parser, optimizer, and logical planner, only needing to implement physical execution. There is a lot of work in the Python ecosystem around high performance vectorized computation, which I think could benefit from a pure Python-based AST/plan. Parsing and planning doesn't have to be fast when the bottleneck of running queries is processing terabytes of data. So, having a Python-based ecosystem around SQL is beneficial given the ease of development in Python, despite not having bare metal performance.

\n\n

Parts of SQLGlot's toolkit are being used today by the following:

\n\n
    \n
  • Ibis: A Python library that provides a lightweight, universal interface for data wrangling.\n
      \n
    • Uses the Python SQL expression builder and leverages the optimizer/planner to convert SQL into dataframe operations.
    • \n
  • \n
  • mysql-mimic: Pure-Python implementation of the MySQL server wire protocol\n
      \n
    • Parses / transforms SQL and executes INFORMATION_SCHEMA queries.
    • \n
  • \n
  • Quokka: Push-based vectorized query engine\n
      \n
    • Parse and optimizes SQL.
    • \n
  • \n
  • Splink: Fast, accurate and scalable probabilistic data linkage using your choice of SQL backend.\n
      \n
    • Transpiles queries.
    • \n
  • \n
\n\n

How?

\n\n

There are many steps involved with actually running a simple query like:

\n\n
\n
SELECT\n  bar.a,\n  b + 1 AS b\nFROM bar\nJOIN baz\n  ON bar.a = baz.a\nWHERE bar.a > 1\n
\n
\n\n

In this post, I'll walk through all the steps SQLGlot takes to run this query over Python objects.

\n\n

Tokenizing

\n\n

The first step is to convert the sql string into a list of tokens. SQLGlot's tokenizer is quite simple and can be found here. In a while loop, it checks each character and either appends the character to the current token, or makes a new token.

\n\n

Running the SQLGlot tokenizer shows the output.

\n\n

\"Tokenizer

\n\n

Each keyword has been converted to a SQLGlot Token object. Each token has some metadata associated with it, like line/column information for error messages. Comments are also a part of the token, so that comments can be preserved.

\n\n

Parsing

\n\n

Once a SQL statement is tokenized, we don't need to worry about white space and other formatting, so it's easier to work with. We can now convert the list of tokens into an AST. The SQLGlot parser is a handwritten recursive descent parser.

\n\n

Similar to the tokenizer, it consumes the tokens sequentially, but it instead uses a recursive algorithm. The tokens are converted into a single AST node that presents the SQL query. The SQLGlot parser was designed to support various dialects, so it contains many options for overriding parsing functionality.

\n\n

\"Parser

\n\n

The AST is a generic representation of a given SQL query. Each dialect can override or implement its own generator, which can convert an AST object into syntatically-correct SQL.

\n\n

Optimizing

\n\n

Once we have our AST, we can transform it into an equivalent query that produces the same results more efficiently. When optimizing queries, most engines first convert the AST into a logical plan and then optimize the plan. However, I chose to optimize the AST directly for the following reasons:

\n\n
    \n
  1. It's easier to debug and validate the optimizations when the input and output are both SQL.

  2. \n
  3. Rules can be applied a la carte to transform SQL into a more desirable form.

  4. \n
  5. I wanted a way to generate 'canonical sql'. Having a canonical representation of SQL is useful for understanding if two queries are semantically equivalent (e.g. SELECT 1 + 1 and SELECT 2).

  6. \n
\n\n

I've yet to find another engine that takes this approach, but I'm quite happy with this decision. The optimizer currently does not perform any \"physical optimizations\" such as join reordering. Those are left to the execution layer, as additional statistics and information could become relevant.

\n\n

\"Optimizer

\n\n

The optimizer currently has 17 rules. Each of these rules is applied, transforming the AST in place. The combination of these rules creates \"canonical\" sql that can then be more easily converted into a logical plan and executed.

\n\n

Some example rules are:

\n\n

qualify_tables and qualify_columns

\n\n
    \n
  • Adds all db/catalog qualifiers to tables and forces an alias.
  • \n
  • Ensure each column is unambiguous and expand stars.
  • \n
\n\n
\n
SELECT * FROM x;\n\nSELECT "db"."x" AS "x";\n
\n
\n\n

simplify

\n\n

Boolean and math simplification. Check out all the test cases.

\n\n
\n
((NOT FALSE) AND (x = x)) AND (TRUE OR 1 <> 3);\nx = x;\n\n1 + 1;\n2;\n
\n
\n\n

normalize

\n\n

Attempts to convert all predicates into conjunctive normal form.

\n\n
\n
-- DNF\n(A AND B) OR (B AND C AND D);\n\n-- CNF\n(A OR C) AND (A OR D) AND B;\n
\n
\n\n

unnest_subqueries

\n\n

Converts subqueries in predicates into joins.

\n\n
\n
-- The subquery can be converted into a left join\nSELECT *\nFROM x AS x\nWHERE (\n  SELECT y.a AS a\n  FROM y AS y\n  WHERE x.a = y.a\n) = 1;\n\nSELECT *\nFROM x AS x\nLEFT JOIN (\n  SELECT y.a AS a\n  FROM y AS y\n  WHERE TRUE\n  GROUP BY y.a\n) AS "_u_0"\n  ON x.a = "_u_0".a\nWHERE ("_u_0".a = 1 AND NOT "_u_0".a IS NULL)\n
\n
\n\n

pushdown_predicates

\n\n

Push down filters into the innermost query.

\n\n
\n
SELECT *\nFROM (\n  SELECT *\n  FROM x AS x\n) AS y\nWHERE y.a = 1;\n\nSELECT *\nFROM (\n  SELECT *\n  FROM x AS x\n  WHERE y.a = 1\n) AS y WHERE TRUE\n
\n
\n\n

annotate_types

\n\n

Infer all types throughout the AST given schema information and function type definitions.

\n\n

Planning

\n\n

After the SQL AST has been \"optimized\", it's much easier to convert into a logical plan. The AST is traversed and converted into a DAG consisting of one of five steps. The different steps are:

\n\n

Scan

\n\n

Selects columns from a table, applies projections, and finally filters the table.

\n\n

Sort

\n\n

Sorts a table for order by expressions.

\n\n

Set

\n\n

Applies the operators union/union all/except/intersect.

\n\n

Aggregate

\n\n

Applies an aggregation/group by.

\n\n

Join

\n\n

Joins multiple tables together.

\n\n

\"Planner

\n\n

The logical plan is quite simple and contains the information required to convert it into a physical plan (execution).

\n\n

Executing

\n\n

Finally, we can actually execute the SQL query. The Python engine is not fast, but it's very small (~400 LOC)! It iterates the DAG with a queue and runs each step, passing each intermediary table to the next step.

\n\n

In order to keep things simple, it evaluates expressions with eval. Because SQLGlot was built primarily to be a transpiler, it was simple to create a \"Python SQL\" dialect. So a SQL expression x + 1 can just be converted into scope['x'] + 1.

\n\n

\"Executor

\n\n

What's next

\n\n

SQLGlot's main focus will always be on parsing/transpiling, but I plan to continue development on the execution engine. I'd like to pass TPC-DS. If someone doesn't beat me to it, I may even take a stab at writing a Pandas/Arrow execution engine.

\n\n

I'm hoping that over time, SQLGlot will spark the Python SQL ecosystem just like Calcite has for Java.

\n\n

Special thanks

\n\n

SQLGlot would not be what it is without it's core contributors. In particular, the execution engine would not exist without Barak Alon and George Sittas.

\n\n

Get in touch

\n\n

If you'd like to chat more about SQLGlot, please join my Slack Channel!

\n\n
\n"}, "sqlglot.executor.execute": {"fullname": "sqlglot.executor.execute", "modulename": "sqlglot.executor", "qualname": "execute", "kind": "function", "doc": "

Run a sql query against data.

\n\n
Arguments:
\n\n
    \n
  • sql: a sql statement.
  • \n
  • schema: database schema.\nThis can either be an instance of Schema or a mapping in one of the following forms:\n
      \n
    1. {table: {col: type}}
    2. \n
    3. {db: {table: {col: type}}}
    4. \n
    5. {catalog: {db: {table: {col: type}}}}
    6. \n
  • \n
  • read: the SQL dialect to apply during parsing (eg. \"spark\", \"hive\", \"presto\", \"mysql\").
  • \n
  • tables: additional tables to register.
  • \n
\n\n
Returns:
\n\n
\n

Simple columnar data structure.

\n
\n", "signature": "(\tsql: str | sqlglot.expressions.Expression,\tschema: Union[Dict, sqlglot.schema.Schema, NoneType] = None,\tread: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\ttables: Optional[Dict] = None) -> sqlglot.executor.table.Table:", "funcdef": "def"}, "sqlglot.executor.context": {"fullname": "sqlglot.executor.context", "modulename": "sqlglot.executor.context", "kind": "module", "doc": "

\n"}, "sqlglot.executor.context.Context": {"fullname": "sqlglot.executor.context.Context", "modulename": "sqlglot.executor.context", "qualname": "Context", "kind": "class", "doc": "

Execution context for sql expressions.

\n\n

Context is used to hold relevant data tables which can then be queried on with eval.

\n\n

References to columns can either be scalar or vectors. When set_row is used, column references\nevaluate to scalars while set_range evaluates to vectors. This allows convenient and efficient\nevaluation of aggregation functions.

\n"}, "sqlglot.executor.context.Context.__init__": {"fullname": "sqlglot.executor.context.Context.__init__", "modulename": "sqlglot.executor.context", "qualname": "Context.__init__", "kind": "function", "doc": "

Args\n tables: representing the scope of the current execution context.\n env: dictionary of functions within the execution context.

\n", "signature": "(\ttables: Dict[str, sqlglot.executor.table.Table],\tenv: Optional[Dict] = None)"}, "sqlglot.executor.context.Context.eval": {"fullname": "sqlglot.executor.context.Context.eval", "modulename": "sqlglot.executor.context", "qualname": "Context.eval", "kind": "function", "doc": "

\n", "signature": "(self, code):", "funcdef": "def"}, "sqlglot.executor.context.Context.eval_tuple": {"fullname": "sqlglot.executor.context.Context.eval_tuple", "modulename": "sqlglot.executor.context", "qualname": "Context.eval_tuple", "kind": "function", "doc": "

\n", "signature": "(self, codes):", "funcdef": "def"}, "sqlglot.executor.context.Context.add_columns": {"fullname": "sqlglot.executor.context.Context.add_columns", "modulename": "sqlglot.executor.context", "qualname": "Context.add_columns", "kind": "function", "doc": "

\n", "signature": "(self, *columns: str) -> None:", "funcdef": "def"}, "sqlglot.executor.context.Context.table_iter": {"fullname": "sqlglot.executor.context.Context.table_iter", "modulename": "sqlglot.executor.context", "qualname": "Context.table_iter", "kind": "function", "doc": "

\n", "signature": "(\tself,\ttable: str) -> Iterator[Tuple[sqlglot.executor.table.TableIter, sqlglot.executor.context.Context]]:", "funcdef": "def"}, "sqlglot.executor.context.Context.filter": {"fullname": "sqlglot.executor.context.Context.filter", "modulename": "sqlglot.executor.context", "qualname": "Context.filter", "kind": "function", "doc": "

\n", "signature": "(self, condition) -> None:", "funcdef": "def"}, "sqlglot.executor.context.Context.sort": {"fullname": "sqlglot.executor.context.Context.sort", "modulename": "sqlglot.executor.context", "qualname": "Context.sort", "kind": "function", "doc": "

\n", "signature": "(self, key) -> None:", "funcdef": "def"}, "sqlglot.executor.context.Context.set_row": {"fullname": "sqlglot.executor.context.Context.set_row", "modulename": "sqlglot.executor.context", "qualname": "Context.set_row", "kind": "function", "doc": "

\n", "signature": "(self, row: Tuple) -> None:", "funcdef": "def"}, "sqlglot.executor.context.Context.set_index": {"fullname": "sqlglot.executor.context.Context.set_index", "modulename": "sqlglot.executor.context", "qualname": "Context.set_index", "kind": "function", "doc": "

\n", "signature": "(self, index: int) -> None:", "funcdef": "def"}, "sqlglot.executor.context.Context.set_range": {"fullname": "sqlglot.executor.context.Context.set_range", "modulename": "sqlglot.executor.context", "qualname": "Context.set_range", "kind": "function", "doc": "

\n", "signature": "(self, start: int, end: int) -> None:", "funcdef": "def"}, "sqlglot.executor.env": {"fullname": "sqlglot.executor.env", "modulename": "sqlglot.executor.env", "kind": "module", "doc": "

\n"}, "sqlglot.executor.env.reverse_key": {"fullname": "sqlglot.executor.env.reverse_key", "modulename": "sqlglot.executor.env", "qualname": "reverse_key", "kind": "class", "doc": "

\n"}, "sqlglot.executor.env.reverse_key.__init__": {"fullname": "sqlglot.executor.env.reverse_key.__init__", "modulename": "sqlglot.executor.env", "qualname": "reverse_key.__init__", "kind": "function", "doc": "

\n", "signature": "(obj)"}, "sqlglot.executor.env.filter_nulls": {"fullname": "sqlglot.executor.env.filter_nulls", "modulename": "sqlglot.executor.env", "qualname": "filter_nulls", "kind": "function", "doc": "

\n", "signature": "(func, empty_null=True):", "funcdef": "def"}, "sqlglot.executor.env.null_if_any": {"fullname": "sqlglot.executor.env.null_if_any", "modulename": "sqlglot.executor.env", "qualname": "null_if_any", "kind": "function", "doc": "

Decorator that makes a function return None if any of the required arguments are None.

\n\n

This also supports decoration with no arguments, e.g.:

\n\n
@null_if_any\ndef foo(a, b): ...\n
\n\n

In which case all arguments are required.

\n", "signature": "(*required):", "funcdef": "def"}, "sqlglot.executor.env.str_position": {"fullname": "sqlglot.executor.env.str_position", "modulename": "sqlglot.executor.env", "qualname": "str_position", "kind": "function", "doc": "

\n", "signature": "(substr, this, position=None):", "funcdef": "def"}, "sqlglot.executor.env.substring": {"fullname": "sqlglot.executor.env.substring", "modulename": "sqlglot.executor.env", "qualname": "substring", "kind": "function", "doc": "

\n", "signature": "(this, start=None, length=None):", "funcdef": "def"}, "sqlglot.executor.env.cast": {"fullname": "sqlglot.executor.env.cast", "modulename": "sqlglot.executor.env", "qualname": "cast", "kind": "function", "doc": "

\n", "signature": "(this, to):", "funcdef": "def"}, "sqlglot.executor.env.ordered": {"fullname": "sqlglot.executor.env.ordered", "modulename": "sqlglot.executor.env", "qualname": "ordered", "kind": "function", "doc": "

\n", "signature": "(this, desc, nulls_first):", "funcdef": "def"}, "sqlglot.executor.env.interval": {"fullname": "sqlglot.executor.env.interval", "modulename": "sqlglot.executor.env", "qualname": "interval", "kind": "function", "doc": "

\n", "signature": "(this, unit):", "funcdef": "def"}, "sqlglot.executor.python": {"fullname": "sqlglot.executor.python", "modulename": "sqlglot.executor.python", "kind": "module", "doc": "

\n"}, "sqlglot.executor.python.PythonExecutor": {"fullname": "sqlglot.executor.python.PythonExecutor", "modulename": "sqlglot.executor.python", "qualname": "PythonExecutor", "kind": "class", "doc": "

\n"}, "sqlglot.executor.python.PythonExecutor.__init__": {"fullname": "sqlglot.executor.python.PythonExecutor.__init__", "modulename": "sqlglot.executor.python", "qualname": "PythonExecutor.__init__", "kind": "function", "doc": "

\n", "signature": "(env=None, tables=None)"}, "sqlglot.executor.python.PythonExecutor.execute": {"fullname": "sqlglot.executor.python.PythonExecutor.execute", "modulename": "sqlglot.executor.python", "qualname": "PythonExecutor.execute", "kind": "function", "doc": "

\n", "signature": "(self, plan):", "funcdef": "def"}, "sqlglot.executor.python.PythonExecutor.generate": {"fullname": "sqlglot.executor.python.PythonExecutor.generate", "modulename": "sqlglot.executor.python", "qualname": "PythonExecutor.generate", "kind": "function", "doc": "

Convert a SQL expression into literal Python code and compile it into bytecode.

\n", "signature": "(self, expression):", "funcdef": "def"}, "sqlglot.executor.python.PythonExecutor.generate_tuple": {"fullname": "sqlglot.executor.python.PythonExecutor.generate_tuple", "modulename": "sqlglot.executor.python", "qualname": "PythonExecutor.generate_tuple", "kind": "function", "doc": "

Convert an array of SQL expressions into tuple of Python byte code.

\n", "signature": "(self, expressions):", "funcdef": "def"}, "sqlglot.executor.python.PythonExecutor.context": {"fullname": "sqlglot.executor.python.PythonExecutor.context", "modulename": "sqlglot.executor.python", "qualname": "PythonExecutor.context", "kind": "function", "doc": "

\n", "signature": "(self, tables):", "funcdef": "def"}, "sqlglot.executor.python.PythonExecutor.table": {"fullname": "sqlglot.executor.python.PythonExecutor.table", "modulename": "sqlglot.executor.python", "qualname": "PythonExecutor.table", "kind": "function", "doc": "

\n", "signature": "(self, expressions):", "funcdef": "def"}, "sqlglot.executor.python.PythonExecutor.scan": {"fullname": "sqlglot.executor.python.PythonExecutor.scan", "modulename": "sqlglot.executor.python", "qualname": "PythonExecutor.scan", "kind": "function", "doc": "

\n", "signature": "(self, step, context):", "funcdef": "def"}, "sqlglot.executor.python.PythonExecutor.static": {"fullname": "sqlglot.executor.python.PythonExecutor.static", "modulename": "sqlglot.executor.python", "qualname": "PythonExecutor.static", "kind": "function", "doc": "

\n", "signature": "(self):", "funcdef": "def"}, "sqlglot.executor.python.PythonExecutor.scan_table": {"fullname": "sqlglot.executor.python.PythonExecutor.scan_table", "modulename": "sqlglot.executor.python", "qualname": "PythonExecutor.scan_table", "kind": "function", "doc": "

\n", "signature": "(self, step):", "funcdef": "def"}, "sqlglot.executor.python.PythonExecutor.scan_csv": {"fullname": "sqlglot.executor.python.PythonExecutor.scan_csv", "modulename": "sqlglot.executor.python", "qualname": "PythonExecutor.scan_csv", "kind": "function", "doc": "

\n", "signature": "(self, step):", "funcdef": "def"}, "sqlglot.executor.python.PythonExecutor.join": {"fullname": "sqlglot.executor.python.PythonExecutor.join", "modulename": "sqlglot.executor.python", "qualname": "PythonExecutor.join", "kind": "function", "doc": "

\n", "signature": "(self, step, context):", "funcdef": "def"}, "sqlglot.executor.python.PythonExecutor.nested_loop_join": {"fullname": "sqlglot.executor.python.PythonExecutor.nested_loop_join", "modulename": "sqlglot.executor.python", "qualname": "PythonExecutor.nested_loop_join", "kind": "function", "doc": "

\n", "signature": "(self, _join, source_context, join_context):", "funcdef": "def"}, "sqlglot.executor.python.PythonExecutor.hash_join": {"fullname": "sqlglot.executor.python.PythonExecutor.hash_join", "modulename": "sqlglot.executor.python", "qualname": "PythonExecutor.hash_join", "kind": "function", "doc": "

\n", "signature": "(self, join, source_context, join_context):", "funcdef": "def"}, "sqlglot.executor.python.PythonExecutor.aggregate": {"fullname": "sqlglot.executor.python.PythonExecutor.aggregate", "modulename": "sqlglot.executor.python", "qualname": "PythonExecutor.aggregate", "kind": "function", "doc": "

\n", "signature": "(self, step, context):", "funcdef": "def"}, "sqlglot.executor.python.PythonExecutor.sort": {"fullname": "sqlglot.executor.python.PythonExecutor.sort", "modulename": "sqlglot.executor.python", "qualname": "PythonExecutor.sort", "kind": "function", "doc": "

\n", "signature": "(self, step, context):", "funcdef": "def"}, "sqlglot.executor.python.PythonExecutor.set_operation": {"fullname": "sqlglot.executor.python.PythonExecutor.set_operation", "modulename": "sqlglot.executor.python", "qualname": "PythonExecutor.set_operation", "kind": "function", "doc": "

\n", "signature": "(self, step, context):", "funcdef": "def"}, "sqlglot.executor.python.Python": {"fullname": "sqlglot.executor.python.Python", "modulename": "sqlglot.executor.python", "qualname": "Python", "kind": "class", "doc": "

\n", "bases": "sqlglot.dialects.dialect.Dialect"}, "sqlglot.executor.python.Python.__init__": {"fullname": "sqlglot.executor.python.Python.__init__", "modulename": "sqlglot.executor.python", "qualname": "Python.__init__", "kind": "function", "doc": "

\n", "signature": "()"}, "sqlglot.executor.python.Python.Tokenizer": {"fullname": "sqlglot.executor.python.Python.Tokenizer", "modulename": "sqlglot.executor.python", "qualname": "Python.Tokenizer", "kind": "class", "doc": "

\n", "bases": "sqlglot.tokens.Tokenizer"}, "sqlglot.executor.python.Python.Generator": {"fullname": "sqlglot.executor.python.Python.Generator", "modulename": "sqlglot.executor.python", "qualname": "Python.Generator", "kind": "class", "doc": "

Generator interprets the given syntax tree and produces a SQL string as an output.

\n\n
Arguments:
\n\n
    \n
  • time_mapping (dict): the dictionary of custom time mappings in which the key\nrepresents a python time format and the output the target time format
  • \n
  • time_trie (trie): a trie of the time_mapping keys
  • \n
  • pretty (bool): if set to True the returned string will be formatted. Default: False.
  • \n
  • quote_start (str): specifies which starting character to use to delimit quotes. Default: '.
  • \n
  • quote_end (str): specifies which ending character to use to delimit quotes. Default: '.
  • \n
  • identifier_start (str): specifies which starting character to use to delimit identifiers. Default: \".
  • \n
  • identifier_end (str): specifies which ending character to use to delimit identifiers. Default: \".
  • \n
  • identify (bool): if set to True all identifiers will be delimited by the corresponding\ncharacter.
  • \n
  • normalize (bool): if set to True all identifiers will lower cased
  • \n
  • string_escape (str): specifies a string escape character. Default: '.
  • \n
  • identifier_escape (str): specifies an identifier escape character. Default: \".
  • \n
  • pad (int): determines padding in a formatted string. Default: 2.
  • \n
  • indent (int): determines the size of indentation in a formatted string. Default: 4.
  • \n
  • unnest_column_only (bool): if true unnest table aliases are considered only as column aliases
  • \n
  • normalize_functions (str): normalize function names, \"upper\", \"lower\", or None\nDefault: \"upper\"
  • \n
  • alias_post_tablesample (bool): if the table alias comes after tablesample\nDefault: False
  • \n
  • unsupported_level (ErrorLevel): determines the generator's behavior when it encounters\nunsupported expressions. Default ErrorLevel.WARN.
  • \n
  • null_ordering (str): Indicates the default null ordering method to use if not explicitly set.\nOptions are \"nulls_are_small\", \"nulls_are_large\", \"nulls_are_last\".\nDefault: \"nulls_are_small\"
  • \n
  • max_unsupported (int): Maximum number of unsupported messages to include in a raised UnsupportedError.\nThis is only relevant if unsupported_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
  • leading_comma (bool): if the the comma is leading or trailing in select statements\nDefault: False
  • \n
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode.\nThe default is on the smaller end because the length only represents a segment and not the true\nline length.\nDefault: 80
  • \n
  • comments: Whether or not to preserve comments in the output SQL code.\nDefault: True
  • \n
\n", "bases": "sqlglot.generator.Generator"}, "sqlglot.executor.table": {"fullname": "sqlglot.executor.table", "modulename": "sqlglot.executor.table", "kind": "module", "doc": "

\n"}, "sqlglot.executor.table.Table": {"fullname": "sqlglot.executor.table.Table", "modulename": "sqlglot.executor.table", "qualname": "Table", "kind": "class", "doc": "

\n"}, "sqlglot.executor.table.Table.__init__": {"fullname": "sqlglot.executor.table.Table.__init__", "modulename": "sqlglot.executor.table", "qualname": "Table.__init__", "kind": "function", "doc": "

\n", "signature": "(columns, rows=None, column_range=None)"}, "sqlglot.executor.table.Table.add_columns": {"fullname": "sqlglot.executor.table.Table.add_columns", "modulename": "sqlglot.executor.table", "qualname": "Table.add_columns", "kind": "function", "doc": "

\n", "signature": "(self, *columns: str) -> None:", "funcdef": "def"}, "sqlglot.executor.table.Table.append": {"fullname": "sqlglot.executor.table.Table.append", "modulename": "sqlglot.executor.table", "qualname": "Table.append", "kind": "function", "doc": "

\n", "signature": "(self, row):", "funcdef": "def"}, "sqlglot.executor.table.Table.pop": {"fullname": "sqlglot.executor.table.Table.pop", "modulename": "sqlglot.executor.table", "qualname": "Table.pop", "kind": "function", "doc": "

\n", "signature": "(self):", "funcdef": "def"}, "sqlglot.executor.table.TableIter": {"fullname": "sqlglot.executor.table.TableIter", "modulename": "sqlglot.executor.table", "qualname": "TableIter", "kind": "class", "doc": "

\n"}, "sqlglot.executor.table.TableIter.__init__": {"fullname": "sqlglot.executor.table.TableIter.__init__", "modulename": "sqlglot.executor.table", "qualname": "TableIter.__init__", "kind": "function", "doc": "

\n", "signature": "(table)"}, "sqlglot.executor.table.RangeReader": {"fullname": "sqlglot.executor.table.RangeReader", "modulename": "sqlglot.executor.table", "qualname": "RangeReader", "kind": "class", "doc": "

\n"}, "sqlglot.executor.table.RangeReader.__init__": {"fullname": "sqlglot.executor.table.RangeReader.__init__", "modulename": "sqlglot.executor.table", "qualname": "RangeReader.__init__", "kind": "function", "doc": "

\n", "signature": "(table)"}, "sqlglot.executor.table.RowReader": {"fullname": "sqlglot.executor.table.RowReader", "modulename": "sqlglot.executor.table", "qualname": "RowReader", "kind": "class", "doc": "

\n"}, "sqlglot.executor.table.RowReader.__init__": {"fullname": "sqlglot.executor.table.RowReader.__init__", "modulename": "sqlglot.executor.table", "qualname": "RowReader.__init__", "kind": "function", "doc": "

\n", "signature": "(columns, column_range=None)"}, "sqlglot.executor.table.Tables": {"fullname": "sqlglot.executor.table.Tables", "modulename": "sqlglot.executor.table", "qualname": "Tables", "kind": "class", "doc": "

Abstract base class for generic types.

\n\n

A generic type is typically declared by inheriting from\nthis class parameterized with one or more type variables.\nFor example, a generic mapping type might be defined as::

\n\n

class Mapping(Generic[KT, VT]):\n def __getitem__(self, key: KT) -> VT:\n ...\n # Etc.

\n\n

This class can then be used as follows::

\n\n

def lookup_name(mapping: Mapping[KT, VT], key: KT, default: VT) -> VT:\n try:\n return mapping[key]\n except KeyError:\n return default

\n", "bases": "sqlglot.schema.AbstractMappingSchema[sqlglot.executor.table.Table]"}, "sqlglot.executor.table.ensure_tables": {"fullname": "sqlglot.executor.table.ensure_tables", "modulename": "sqlglot.executor.table", "qualname": "ensure_tables", "kind": "function", "doc": "

\n", "signature": "(d: Optional[Dict]) -> sqlglot.executor.table.Tables:", "funcdef": "def"}, "sqlglot.expressions": {"fullname": "sqlglot.expressions", "modulename": "sqlglot.expressions", "kind": "module", "doc": "

Expressions

\n\n

Every AST node in SQLGlot is represented by a subclass of Expression.

\n\n

This module contains the implementation of all supported Expression types. Additionally,\nit exposes a number of helper functions, which are mainly used to programmatically build\nSQL expressions, such as sqlglot.expressions.select.

\n\n
\n"}, "sqlglot.expressions.Expression": {"fullname": "sqlglot.expressions.Expression", "modulename": "sqlglot.expressions", "qualname": "Expression", "kind": "class", "doc": "

The base class for all expressions in a syntax tree. Each Expression encapsulates any necessary\ncontext, such as its child expressions, their names (arg keys), and whether a given child expression\nis optional or not.

\n\n
Attributes:
\n\n
    \n
  • key: a unique key for each class in the Expression hierarchy. This is useful for hashing\nand representing expressions as strings.
  • \n
  • arg_types: determines what arguments (child nodes) are supported by an expression. It\nmaps arg keys to booleans that indicate whether the corresponding args are optional.
  • \n
\n\n
Example:
\n\n
\n
\n
>>> class Foo(Expression):\n...     arg_types = {"this": True, "expression": False}\n
\n
\n \n

The above definition informs us that Foo is an Expression that requires an argument called\n \"this\" and may also optionally receive an argument called \"expression\".

\n
\n\n
Arguments:
\n\n
    \n
  • args: a mapping used for retrieving the arguments of an expression, given their arg keys.
  • \n
  • parent: a reference to the parent expression (or None, in case of root expressions).
  • \n
  • arg_key: the arg key an expression is associated with, i.e. the name its parent expression\nuses to refer to it.
  • \n
  • comments: a list of comments that are associated with a given expression. This is used in\norder to preserve comments when transpiling SQL code.
  • \n
  • _type: the sqlglot.expressions.DataType type of an expression. This is inferred by the\noptimizer, in order to enable some transformations that require type information.
  • \n
\n"}, "sqlglot.expressions.Expression.__init__": {"fullname": "sqlglot.expressions.Expression.__init__", "modulename": "sqlglot.expressions", "qualname": "Expression.__init__", "kind": "function", "doc": "

\n", "signature": "(**args: Any)"}, "sqlglot.expressions.Expression.this": {"fullname": "sqlglot.expressions.Expression.this", "modulename": "sqlglot.expressions", "qualname": "Expression.this", "kind": "variable", "doc": "

Retrieves the argument with key \"this\".

\n"}, "sqlglot.expressions.Expression.expression": {"fullname": "sqlglot.expressions.Expression.expression", "modulename": "sqlglot.expressions", "qualname": "Expression.expression", "kind": "variable", "doc": "

Retrieves the argument with key \"expression\".

\n"}, "sqlglot.expressions.Expression.expressions": {"fullname": "sqlglot.expressions.Expression.expressions", "modulename": "sqlglot.expressions", "qualname": "Expression.expressions", "kind": "variable", "doc": "

Retrieves the argument with key \"expressions\".

\n"}, "sqlglot.expressions.Expression.text": {"fullname": "sqlglot.expressions.Expression.text", "modulename": "sqlglot.expressions", "qualname": "Expression.text", "kind": "function", "doc": "

Returns a textual representation of the argument corresponding to \"key\". This can only be used\nfor args that are strings or leaf Expression instances, such as identifiers and literals.

\n", "signature": "(self, key):", "funcdef": "def"}, "sqlglot.expressions.Expression.is_string": {"fullname": "sqlglot.expressions.Expression.is_string", "modulename": "sqlglot.expressions", "qualname": "Expression.is_string", "kind": "variable", "doc": "

Checks whether a Literal expression is a string.

\n"}, "sqlglot.expressions.Expression.is_number": {"fullname": "sqlglot.expressions.Expression.is_number", "modulename": "sqlglot.expressions", "qualname": "Expression.is_number", "kind": "variable", "doc": "

Checks whether a Literal expression is a number.

\n"}, "sqlglot.expressions.Expression.is_int": {"fullname": "sqlglot.expressions.Expression.is_int", "modulename": "sqlglot.expressions", "qualname": "Expression.is_int", "kind": "variable", "doc": "

Checks whether a Literal expression is an integer.

\n"}, "sqlglot.expressions.Expression.alias": {"fullname": "sqlglot.expressions.Expression.alias", "modulename": "sqlglot.expressions", "qualname": "Expression.alias", "kind": "variable", "doc": "

Returns the alias of the expression, or an empty string if it's not aliased.

\n"}, "sqlglot.expressions.Expression.output_name": {"fullname": "sqlglot.expressions.Expression.output_name", "modulename": "sqlglot.expressions", "qualname": "Expression.output_name", "kind": "variable", "doc": "

Name of the output column if this expression is a selection.

\n\n

If the Expression has no output name, an empty string is returned.

\n\n
Example:
\n\n
\n
\n
>>> from sqlglot import parse_one\n>>> parse_one("SELECT a").expressions[0].output_name\n'a'\n>>> parse_one("SELECT b AS c").expressions[0].output_name\n'c'\n>>> parse_one("SELECT 1 + 2").expressions[0].output_name\n''\n
\n
\n
\n"}, "sqlglot.expressions.Expression.copy": {"fullname": "sqlglot.expressions.Expression.copy", "modulename": "sqlglot.expressions", "qualname": "Expression.copy", "kind": "function", "doc": "

Returns a deep copy of the expression.

\n", "signature": "(self):", "funcdef": "def"}, "sqlglot.expressions.Expression.append": {"fullname": "sqlglot.expressions.Expression.append", "modulename": "sqlglot.expressions", "qualname": "Expression.append", "kind": "function", "doc": "

Appends value to arg_key if it's a list or sets it as a new list.

\n\n
Arguments:
\n\n
    \n
  • arg_key (str): name of the list expression arg
  • \n
  • value (Any): value to append to the list
  • \n
\n", "signature": "(self, arg_key, value):", "funcdef": "def"}, "sqlglot.expressions.Expression.set": {"fullname": "sqlglot.expressions.Expression.set", "modulename": "sqlglot.expressions", "qualname": "Expression.set", "kind": "function", "doc": "

Sets arg_key to value.

\n\n
Arguments:
\n\n
    \n
  • arg_key (str): name of the expression arg.
  • \n
  • value: value to set the arg to.
  • \n
\n", "signature": "(self, arg_key, value):", "funcdef": "def"}, "sqlglot.expressions.Expression.depth": {"fullname": "sqlglot.expressions.Expression.depth", "modulename": "sqlglot.expressions", "qualname": "Expression.depth", "kind": "variable", "doc": "

Returns the depth of this tree.

\n"}, "sqlglot.expressions.Expression.find": {"fullname": "sqlglot.expressions.Expression.find", "modulename": "sqlglot.expressions", "qualname": "Expression.find", "kind": "function", "doc": "

Returns the first node in this tree which matches at least one of\nthe specified types.

\n\n
Arguments:
\n\n
    \n
  • expression_types (type): the expression type(s) to match.
  • \n
\n\n
Returns:
\n\n
\n

The node which matches the criteria or None if no such node was found.

\n
\n", "signature": "(self, *expression_types, bfs=True):", "funcdef": "def"}, "sqlglot.expressions.Expression.find_all": {"fullname": "sqlglot.expressions.Expression.find_all", "modulename": "sqlglot.expressions", "qualname": "Expression.find_all", "kind": "function", "doc": "

Returns a generator object which visits all nodes in this tree and only\nyields those that match at least one of the specified expression types.

\n\n
Arguments:
\n\n
    \n
  • expression_types (type): the expression type(s) to match.
  • \n
\n\n
Returns:
\n\n
\n

The generator object.

\n
\n", "signature": "(self, *expression_types, bfs=True):", "funcdef": "def"}, "sqlglot.expressions.Expression.find_ancestor": {"fullname": "sqlglot.expressions.Expression.find_ancestor", "modulename": "sqlglot.expressions", "qualname": "Expression.find_ancestor", "kind": "function", "doc": "

Returns a nearest parent matching expression_types.

\n\n
Arguments:
\n\n
    \n
  • expression_types (type): the expression type(s) to match.
  • \n
\n\n
Returns:
\n\n
\n

The parent node.

\n
\n", "signature": "(self, *expression_types):", "funcdef": "def"}, "sqlglot.expressions.Expression.parent_select": {"fullname": "sqlglot.expressions.Expression.parent_select", "modulename": "sqlglot.expressions", "qualname": "Expression.parent_select", "kind": "variable", "doc": "

Returns the parent select statement.

\n"}, "sqlglot.expressions.Expression.walk": {"fullname": "sqlglot.expressions.Expression.walk", "modulename": "sqlglot.expressions", "qualname": "Expression.walk", "kind": "function", "doc": "

Returns a generator object which visits all nodes in this tree.

\n\n
Arguments:
\n\n
    \n
  • bfs (bool): if set to True the BFS traversal order will be applied,\notherwise the DFS traversal will be used instead.
  • \n
  • prune ((node, parent, arg_key) -> bool): callable that returns True if\nthe generator should stop traversing this branch of the tree.
  • \n
\n\n
Returns:
\n\n
\n

the generator object.

\n
\n", "signature": "(self, bfs=True, prune=None):", "funcdef": "def"}, "sqlglot.expressions.Expression.dfs": {"fullname": "sqlglot.expressions.Expression.dfs", "modulename": "sqlglot.expressions", "qualname": "Expression.dfs", "kind": "function", "doc": "

Returns a generator object which visits all nodes in this tree in\nthe DFS (Depth-first) order.

\n\n
Returns:
\n\n
\n

The generator object.

\n
\n", "signature": "(self, parent=None, key=None, prune=None):", "funcdef": "def"}, "sqlglot.expressions.Expression.bfs": {"fullname": "sqlglot.expressions.Expression.bfs", "modulename": "sqlglot.expressions", "qualname": "Expression.bfs", "kind": "function", "doc": "

Returns a generator object which visits all nodes in this tree in\nthe BFS (Breadth-first) order.

\n\n
Returns:
\n\n
\n

The generator object.

\n
\n", "signature": "(self, prune=None):", "funcdef": "def"}, "sqlglot.expressions.Expression.unnest": {"fullname": "sqlglot.expressions.Expression.unnest", "modulename": "sqlglot.expressions", "qualname": "Expression.unnest", "kind": "function", "doc": "

Returns the first non parenthesis child or self.

\n", "signature": "(self):", "funcdef": "def"}, "sqlglot.expressions.Expression.unalias": {"fullname": "sqlglot.expressions.Expression.unalias", "modulename": "sqlglot.expressions", "qualname": "Expression.unalias", "kind": "function", "doc": "

Returns the inner expression if this is an Alias.

\n", "signature": "(self):", "funcdef": "def"}, "sqlglot.expressions.Expression.unnest_operands": {"fullname": "sqlglot.expressions.Expression.unnest_operands", "modulename": "sqlglot.expressions", "qualname": "Expression.unnest_operands", "kind": "function", "doc": "

Returns unnested operands as a tuple.

\n", "signature": "(self):", "funcdef": "def"}, "sqlglot.expressions.Expression.flatten": {"fullname": "sqlglot.expressions.Expression.flatten", "modulename": "sqlglot.expressions", "qualname": "Expression.flatten", "kind": "function", "doc": "

Returns a generator which yields child nodes who's parents are the same class.

\n\n

A AND B AND C -> [A, B, C]

\n", "signature": "(self, unnest=True):", "funcdef": "def"}, "sqlglot.expressions.Expression.sql": {"fullname": "sqlglot.expressions.Expression.sql", "modulename": "sqlglot.expressions", "qualname": "Expression.sql", "kind": "function", "doc": "

Returns SQL string representation of this tree.

\n\n
Arguments:
\n\n
    \n
  • dialect: the dialect of the output SQL string (eg. \"spark\", \"hive\", \"presto\", \"mysql\").
  • \n
  • opts: other sqlglot.generator.Generator options.
  • \n
\n\n
Returns:
\n\n
\n

The SQL string.

\n
\n", "signature": "(\tself,\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\t**opts) -> str:", "funcdef": "def"}, "sqlglot.expressions.Expression.transform": {"fullname": "sqlglot.expressions.Expression.transform", "modulename": "sqlglot.expressions", "qualname": "Expression.transform", "kind": "function", "doc": "

Recursively visits all tree nodes (excluding already transformed ones)\nand applies the given transformation function to each node.

\n\n
Arguments:
\n\n
    \n
  • fun (function): a function which takes a node as an argument and returns a\nnew transformed node or the same node without modifications. If the function\nreturns None, then the corresponding node will be removed from the syntax tree.
  • \n
  • copy (bool): if set to True a new tree instance is constructed, otherwise the tree is\nmodified in place.
  • \n
\n\n
Returns:
\n\n
\n

The transformed tree.

\n
\n", "signature": "(self, fun, *args, copy=True, **kwargs):", "funcdef": "def"}, "sqlglot.expressions.Expression.replace": {"fullname": "sqlglot.expressions.Expression.replace", "modulename": "sqlglot.expressions", "qualname": "Expression.replace", "kind": "function", "doc": "

Swap out this expression with a new expression.

\n\n

For example::

\n\n
>>> tree = Select().select(\"x\").from_(\"tbl\")\n>>> tree.find(Column).replace(Column(this=\"y\"))\n(COLUMN this: y)\n>>> tree.sql()\n'SELECT y FROM tbl'\n
\n\n
Arguments:
\n\n
    \n
  • expression (Expression|None): new node
  • \n
\n\n
Returns:
\n\n
\n

The new expression or expressions.

\n
\n", "signature": "(self, expression):", "funcdef": "def"}, "sqlglot.expressions.Expression.pop": {"fullname": "sqlglot.expressions.Expression.pop", "modulename": "sqlglot.expressions", "qualname": "Expression.pop", "kind": "function", "doc": "

Remove this expression from its AST.

\n", "signature": "(self):", "funcdef": "def"}, "sqlglot.expressions.Expression.assert_is": {"fullname": "sqlglot.expressions.Expression.assert_is", "modulename": "sqlglot.expressions", "qualname": "Expression.assert_is", "kind": "function", "doc": "

Assert that this Expression is an instance of type_.

\n\n

If it is NOT an instance of type_, this raises an assertion error.\nOtherwise, this returns this expression.

\n\n
Examples:
\n\n
\n

This is useful for type security in chained expressions:

\n \n
\n
>>> import sqlglot\n>>> sqlglot.parse_one("SELECT x from y").assert_is(Select).select("z").sql()\n'SELECT x, z FROM y'\n
\n
\n
\n", "signature": "(self, type_):", "funcdef": "def"}, "sqlglot.expressions.Expression.error_messages": {"fullname": "sqlglot.expressions.Expression.error_messages", "modulename": "sqlglot.expressions", "qualname": "Expression.error_messages", "kind": "function", "doc": "

Checks if this expression is valid (e.g. all mandatory args are set).

\n\n
Arguments:
\n\n
    \n
  • args: a sequence of values that were used to instantiate a Func expression. This is used\nto check that the provided arguments don't exceed the function argument limit.
  • \n
\n\n
Returns:
\n\n
\n

A list of error messages for all possible errors that were found.

\n
\n", "signature": "(self, args: Optional[Sequence] = None) -> List[str]:", "funcdef": "def"}, "sqlglot.expressions.Expression.dump": {"fullname": "sqlglot.expressions.Expression.dump", "modulename": "sqlglot.expressions", "qualname": "Expression.dump", "kind": "function", "doc": "

Dump this Expression to a JSON-serializable dict.

\n", "signature": "(self):", "funcdef": "def"}, "sqlglot.expressions.Expression.load": {"fullname": "sqlglot.expressions.Expression.load", "modulename": "sqlglot.expressions", "qualname": "Expression.load", "kind": "function", "doc": "

Load a dict (as returned by Expression.dump) into an Expression instance.

\n", "signature": "(cls, obj):", "funcdef": "def"}, "sqlglot.expressions.Condition": {"fullname": "sqlglot.expressions.Condition", "modulename": "sqlglot.expressions", "qualname": "Condition", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Condition.and_": {"fullname": "sqlglot.expressions.Condition.and_", "modulename": "sqlglot.expressions", "qualname": "Condition.and_", "kind": "function", "doc": "

AND this condition with one or multiple expressions.

\n\n
Example:
\n\n
\n
\n
>>> condition("x=1").and_("y=1").sql()\n'x = 1 AND y = 1'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • *expressions (str | Expression): the SQL code strings to parse.\nIf an Expression instance is passed, it will be used as-is.
  • \n
  • dialect (str): the dialect used to parse the input expression.
  • \n
  • opts (kwargs): other options to use to parse the input expressions.
  • \n
\n\n
Returns:
\n\n
\n

And: the new condition.

\n
\n", "signature": "(self, *expressions, dialect=None, **opts):", "funcdef": "def"}, "sqlglot.expressions.Condition.or_": {"fullname": "sqlglot.expressions.Condition.or_", "modulename": "sqlglot.expressions", "qualname": "Condition.or_", "kind": "function", "doc": "

OR this condition with one or multiple expressions.

\n\n
Example:
\n\n
\n
\n
>>> condition("x=1").or_("y=1").sql()\n'x = 1 OR y = 1'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • *expressions (str | Expression): the SQL code strings to parse.\nIf an Expression instance is passed, it will be used as-is.
  • \n
  • dialect (str): the dialect used to parse the input expression.
  • \n
  • opts (kwargs): other options to use to parse the input expressions.
  • \n
\n\n
Returns:
\n\n
\n

Or: the new condition.

\n
\n", "signature": "(self, *expressions, dialect=None, **opts):", "funcdef": "def"}, "sqlglot.expressions.Condition.not_": {"fullname": "sqlglot.expressions.Condition.not_", "modulename": "sqlglot.expressions", "qualname": "Condition.not_", "kind": "function", "doc": "

Wrap this condition with NOT.

\n\n
Example:
\n\n
\n
\n
>>> condition("x=1").not_().sql()\n'NOT x = 1'\n
\n
\n
\n\n
Returns:
\n\n
\n

Not: the new condition.

\n
\n", "signature": "(self):", "funcdef": "def"}, "sqlglot.expressions.Predicate": {"fullname": "sqlglot.expressions.Predicate", "modulename": "sqlglot.expressions", "qualname": "Predicate", "kind": "class", "doc": "

Relationships like x = y, x > 1, x >= y.

\n", "bases": "Condition"}, "sqlglot.expressions.DerivedTable": {"fullname": "sqlglot.expressions.DerivedTable", "modulename": "sqlglot.expressions", "qualname": "DerivedTable", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Unionable": {"fullname": "sqlglot.expressions.Unionable", "modulename": "sqlglot.expressions", "qualname": "Unionable", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Unionable.union": {"fullname": "sqlglot.expressions.Unionable.union", "modulename": "sqlglot.expressions", "qualname": "Unionable.union", "kind": "function", "doc": "

Builds a UNION expression.

\n\n
Example:
\n\n
\n
\n
>>> import sqlglot\n>>> sqlglot.parse_one("SELECT * FROM foo").union("SELECT * FROM bla").sql()\n'SELECT * FROM foo UNION SELECT * FROM bla'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • expression (str | Expression): the SQL code string.\nIf an Expression instance is passed, it will be used as-is.
  • \n
  • distinct (bool): set the DISTINCT flag if and only if this is true.
  • \n
  • dialect (str): the dialect used to parse the input expression.
  • \n
  • opts (kwargs): other options to use to parse the input expressions.
  • \n
\n\n
Returns:
\n\n
\n

Union: the Union expression.

\n
\n", "signature": "(self, expression, distinct=True, dialect=None, **opts):", "funcdef": "def"}, "sqlglot.expressions.Unionable.intersect": {"fullname": "sqlglot.expressions.Unionable.intersect", "modulename": "sqlglot.expressions", "qualname": "Unionable.intersect", "kind": "function", "doc": "

Builds an INTERSECT expression.

\n\n
Example:
\n\n
\n
\n
>>> import sqlglot\n>>> sqlglot.parse_one("SELECT * FROM foo").intersect("SELECT * FROM bla").sql()\n'SELECT * FROM foo INTERSECT SELECT * FROM bla'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • expression (str | Expression): the SQL code string.\nIf an Expression instance is passed, it will be used as-is.
  • \n
  • distinct (bool): set the DISTINCT flag if and only if this is true.
  • \n
  • dialect (str): the dialect used to parse the input expression.
  • \n
  • opts (kwargs): other options to use to parse the input expressions.
  • \n
\n\n
Returns:
\n\n
\n

Intersect: the Intersect expression

\n
\n", "signature": "(self, expression, distinct=True, dialect=None, **opts):", "funcdef": "def"}, "sqlglot.expressions.Unionable.except_": {"fullname": "sqlglot.expressions.Unionable.except_", "modulename": "sqlglot.expressions", "qualname": "Unionable.except_", "kind": "function", "doc": "

Builds an EXCEPT expression.

\n\n
Example:
\n\n
\n
\n
>>> import sqlglot\n>>> sqlglot.parse_one("SELECT * FROM foo").except_("SELECT * FROM bla").sql()\n'SELECT * FROM foo EXCEPT SELECT * FROM bla'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • expression (str | Expression): the SQL code string.\nIf an Expression instance is passed, it will be used as-is.
  • \n
  • distinct (bool): set the DISTINCT flag if and only if this is true.
  • \n
  • dialect (str): the dialect used to parse the input expression.
  • \n
  • opts (kwargs): other options to use to parse the input expressions.
  • \n
\n\n
Returns:
\n\n
\n

Except: the Except expression

\n
\n", "signature": "(self, expression, distinct=True, dialect=None, **opts):", "funcdef": "def"}, "sqlglot.expressions.UDTF": {"fullname": "sqlglot.expressions.UDTF", "modulename": "sqlglot.expressions", "qualname": "UDTF", "kind": "class", "doc": "

\n", "bases": "DerivedTable, Unionable"}, "sqlglot.expressions.Cache": {"fullname": "sqlglot.expressions.Cache", "modulename": "sqlglot.expressions", "qualname": "Cache", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Uncache": {"fullname": "sqlglot.expressions.Uncache", "modulename": "sqlglot.expressions", "qualname": "Uncache", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Create": {"fullname": "sqlglot.expressions.Create", "modulename": "sqlglot.expressions", "qualname": "Create", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Describe": {"fullname": "sqlglot.expressions.Describe", "modulename": "sqlglot.expressions", "qualname": "Describe", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Set": {"fullname": "sqlglot.expressions.Set", "modulename": "sqlglot.expressions", "qualname": "Set", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.SetItem": {"fullname": "sqlglot.expressions.SetItem", "modulename": "sqlglot.expressions", "qualname": "SetItem", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Show": {"fullname": "sqlglot.expressions.Show", "modulename": "sqlglot.expressions", "qualname": "Show", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.UserDefinedFunction": {"fullname": "sqlglot.expressions.UserDefinedFunction", "modulename": "sqlglot.expressions", "qualname": "UserDefinedFunction", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.UserDefinedFunctionKwarg": {"fullname": "sqlglot.expressions.UserDefinedFunctionKwarg", "modulename": "sqlglot.expressions", "qualname": "UserDefinedFunctionKwarg", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.CharacterSet": {"fullname": "sqlglot.expressions.CharacterSet", "modulename": "sqlglot.expressions", "qualname": "CharacterSet", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.With": {"fullname": "sqlglot.expressions.With", "modulename": "sqlglot.expressions", "qualname": "With", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.WithinGroup": {"fullname": "sqlglot.expressions.WithinGroup", "modulename": "sqlglot.expressions", "qualname": "WithinGroup", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.CTE": {"fullname": "sqlglot.expressions.CTE", "modulename": "sqlglot.expressions", "qualname": "CTE", "kind": "class", "doc": "

\n", "bases": "DerivedTable"}, "sqlglot.expressions.TableAlias": {"fullname": "sqlglot.expressions.TableAlias", "modulename": "sqlglot.expressions", "qualname": "TableAlias", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.BitString": {"fullname": "sqlglot.expressions.BitString", "modulename": "sqlglot.expressions", "qualname": "BitString", "kind": "class", "doc": "

\n", "bases": "Condition"}, "sqlglot.expressions.HexString": {"fullname": "sqlglot.expressions.HexString", "modulename": "sqlglot.expressions", "qualname": "HexString", "kind": "class", "doc": "

\n", "bases": "Condition"}, "sqlglot.expressions.ByteString": {"fullname": "sqlglot.expressions.ByteString", "modulename": "sqlglot.expressions", "qualname": "ByteString", "kind": "class", "doc": "

\n", "bases": "Condition"}, "sqlglot.expressions.Column": {"fullname": "sqlglot.expressions.Column", "modulename": "sqlglot.expressions", "qualname": "Column", "kind": "class", "doc": "

\n", "bases": "Condition"}, "sqlglot.expressions.Column.output_name": {"fullname": "sqlglot.expressions.Column.output_name", "modulename": "sqlglot.expressions", "qualname": "Column.output_name", "kind": "variable", "doc": "

Name of the output column if this expression is a selection.

\n\n

If the Expression has no output name, an empty string is returned.

\n\n
Example:
\n\n
\n
\n
>>> from sqlglot import parse_one\n>>> parse_one("SELECT a").expressions[0].output_name\n'a'\n>>> parse_one("SELECT b AS c").expressions[0].output_name\n'c'\n>>> parse_one("SELECT 1 + 2").expressions[0].output_name\n''\n
\n
\n
\n"}, "sqlglot.expressions.ColumnDef": {"fullname": "sqlglot.expressions.ColumnDef", "modulename": "sqlglot.expressions", "qualname": "ColumnDef", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.AlterColumn": {"fullname": "sqlglot.expressions.AlterColumn", "modulename": "sqlglot.expressions", "qualname": "AlterColumn", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.RenameTable": {"fullname": "sqlglot.expressions.RenameTable", "modulename": "sqlglot.expressions", "qualname": "RenameTable", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.ColumnConstraint": {"fullname": "sqlglot.expressions.ColumnConstraint", "modulename": "sqlglot.expressions", "qualname": "ColumnConstraint", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.ColumnConstraintKind": {"fullname": "sqlglot.expressions.ColumnConstraintKind", "modulename": "sqlglot.expressions", "qualname": "ColumnConstraintKind", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.AutoIncrementColumnConstraint": {"fullname": "sqlglot.expressions.AutoIncrementColumnConstraint", "modulename": "sqlglot.expressions", "qualname": "AutoIncrementColumnConstraint", "kind": "class", "doc": "

\n", "bases": "ColumnConstraintKind"}, "sqlglot.expressions.CheckColumnConstraint": {"fullname": "sqlglot.expressions.CheckColumnConstraint", "modulename": "sqlglot.expressions", "qualname": "CheckColumnConstraint", "kind": "class", "doc": "

\n", "bases": "ColumnConstraintKind"}, "sqlglot.expressions.CollateColumnConstraint": {"fullname": "sqlglot.expressions.CollateColumnConstraint", "modulename": "sqlglot.expressions", "qualname": "CollateColumnConstraint", "kind": "class", "doc": "

\n", "bases": "ColumnConstraintKind"}, "sqlglot.expressions.CommentColumnConstraint": {"fullname": "sqlglot.expressions.CommentColumnConstraint", "modulename": "sqlglot.expressions", "qualname": "CommentColumnConstraint", "kind": "class", "doc": "

\n", "bases": "ColumnConstraintKind"}, "sqlglot.expressions.DefaultColumnConstraint": {"fullname": "sqlglot.expressions.DefaultColumnConstraint", "modulename": "sqlglot.expressions", "qualname": "DefaultColumnConstraint", "kind": "class", "doc": "

\n", "bases": "ColumnConstraintKind"}, "sqlglot.expressions.EncodeColumnConstraint": {"fullname": "sqlglot.expressions.EncodeColumnConstraint", "modulename": "sqlglot.expressions", "qualname": "EncodeColumnConstraint", "kind": "class", "doc": "

\n", "bases": "ColumnConstraintKind"}, "sqlglot.expressions.GeneratedAsIdentityColumnConstraint": {"fullname": "sqlglot.expressions.GeneratedAsIdentityColumnConstraint", "modulename": "sqlglot.expressions", "qualname": "GeneratedAsIdentityColumnConstraint", "kind": "class", "doc": "

\n", "bases": "ColumnConstraintKind"}, "sqlglot.expressions.NotNullColumnConstraint": {"fullname": "sqlglot.expressions.NotNullColumnConstraint", "modulename": "sqlglot.expressions", "qualname": "NotNullColumnConstraint", "kind": "class", "doc": "

\n", "bases": "ColumnConstraintKind"}, "sqlglot.expressions.PrimaryKeyColumnConstraint": {"fullname": "sqlglot.expressions.PrimaryKeyColumnConstraint", "modulename": "sqlglot.expressions", "qualname": "PrimaryKeyColumnConstraint", "kind": "class", "doc": "

\n", "bases": "ColumnConstraintKind"}, "sqlglot.expressions.UniqueColumnConstraint": {"fullname": "sqlglot.expressions.UniqueColumnConstraint", "modulename": "sqlglot.expressions", "qualname": "UniqueColumnConstraint", "kind": "class", "doc": "

\n", "bases": "ColumnConstraintKind"}, "sqlglot.expressions.Constraint": {"fullname": "sqlglot.expressions.Constraint", "modulename": "sqlglot.expressions", "qualname": "Constraint", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Delete": {"fullname": "sqlglot.expressions.Delete", "modulename": "sqlglot.expressions", "qualname": "Delete", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Drop": {"fullname": "sqlglot.expressions.Drop", "modulename": "sqlglot.expressions", "qualname": "Drop", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Filter": {"fullname": "sqlglot.expressions.Filter", "modulename": "sqlglot.expressions", "qualname": "Filter", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Check": {"fullname": "sqlglot.expressions.Check", "modulename": "sqlglot.expressions", "qualname": "Check", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Directory": {"fullname": "sqlglot.expressions.Directory", "modulename": "sqlglot.expressions", "qualname": "Directory", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.ForeignKey": {"fullname": "sqlglot.expressions.ForeignKey", "modulename": "sqlglot.expressions", "qualname": "ForeignKey", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.PrimaryKey": {"fullname": "sqlglot.expressions.PrimaryKey", "modulename": "sqlglot.expressions", "qualname": "PrimaryKey", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Unique": {"fullname": "sqlglot.expressions.Unique", "modulename": "sqlglot.expressions", "qualname": "Unique", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Into": {"fullname": "sqlglot.expressions.Into", "modulename": "sqlglot.expressions", "qualname": "Into", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.From": {"fullname": "sqlglot.expressions.From", "modulename": "sqlglot.expressions", "qualname": "From", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Having": {"fullname": "sqlglot.expressions.Having", "modulename": "sqlglot.expressions", "qualname": "Having", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Hint": {"fullname": "sqlglot.expressions.Hint", "modulename": "sqlglot.expressions", "qualname": "Hint", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.JoinHint": {"fullname": "sqlglot.expressions.JoinHint", "modulename": "sqlglot.expressions", "qualname": "JoinHint", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Identifier": {"fullname": "sqlglot.expressions.Identifier", "modulename": "sqlglot.expressions", "qualname": "Identifier", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Identifier.output_name": {"fullname": "sqlglot.expressions.Identifier.output_name", "modulename": "sqlglot.expressions", "qualname": "Identifier.output_name", "kind": "variable", "doc": "

Name of the output column if this expression is a selection.

\n\n

If the Expression has no output name, an empty string is returned.

\n\n
Example:
\n\n
\n
\n
>>> from sqlglot import parse_one\n>>> parse_one("SELECT a").expressions[0].output_name\n'a'\n>>> parse_one("SELECT b AS c").expressions[0].output_name\n'c'\n>>> parse_one("SELECT 1 + 2").expressions[0].output_name\n''\n
\n
\n
\n"}, "sqlglot.expressions.Index": {"fullname": "sqlglot.expressions.Index", "modulename": "sqlglot.expressions", "qualname": "Index", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Insert": {"fullname": "sqlglot.expressions.Insert", "modulename": "sqlglot.expressions", "qualname": "Insert", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Introducer": {"fullname": "sqlglot.expressions.Introducer", "modulename": "sqlglot.expressions", "qualname": "Introducer", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.National": {"fullname": "sqlglot.expressions.National", "modulename": "sqlglot.expressions", "qualname": "National", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.LoadData": {"fullname": "sqlglot.expressions.LoadData", "modulename": "sqlglot.expressions", "qualname": "LoadData", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Partition": {"fullname": "sqlglot.expressions.Partition", "modulename": "sqlglot.expressions", "qualname": "Partition", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Fetch": {"fullname": "sqlglot.expressions.Fetch", "modulename": "sqlglot.expressions", "qualname": "Fetch", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Group": {"fullname": "sqlglot.expressions.Group", "modulename": "sqlglot.expressions", "qualname": "Group", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Lambda": {"fullname": "sqlglot.expressions.Lambda", "modulename": "sqlglot.expressions", "qualname": "Lambda", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Limit": {"fullname": "sqlglot.expressions.Limit", "modulename": "sqlglot.expressions", "qualname": "Limit", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Literal": {"fullname": "sqlglot.expressions.Literal", "modulename": "sqlglot.expressions", "qualname": "Literal", "kind": "class", "doc": "

\n", "bases": "Condition"}, "sqlglot.expressions.Literal.number": {"fullname": "sqlglot.expressions.Literal.number", "modulename": "sqlglot.expressions", "qualname": "Literal.number", "kind": "function", "doc": "

\n", "signature": "(cls, number) -> sqlglot.expressions.Literal:", "funcdef": "def"}, "sqlglot.expressions.Literal.string": {"fullname": "sqlglot.expressions.Literal.string", "modulename": "sqlglot.expressions", "qualname": "Literal.string", "kind": "function", "doc": "

\n", "signature": "(cls, string) -> sqlglot.expressions.Literal:", "funcdef": "def"}, "sqlglot.expressions.Literal.output_name": {"fullname": "sqlglot.expressions.Literal.output_name", "modulename": "sqlglot.expressions", "qualname": "Literal.output_name", "kind": "variable", "doc": "

Name of the output column if this expression is a selection.

\n\n

If the Expression has no output name, an empty string is returned.

\n\n
Example:
\n\n
\n
\n
>>> from sqlglot import parse_one\n>>> parse_one("SELECT a").expressions[0].output_name\n'a'\n>>> parse_one("SELECT b AS c").expressions[0].output_name\n'c'\n>>> parse_one("SELECT 1 + 2").expressions[0].output_name\n''\n
\n
\n
\n"}, "sqlglot.expressions.Join": {"fullname": "sqlglot.expressions.Join", "modulename": "sqlglot.expressions", "qualname": "Join", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Join.on": {"fullname": "sqlglot.expressions.Join.on", "modulename": "sqlglot.expressions", "qualname": "Join.on", "kind": "function", "doc": "

Append to or set the ON expressions.

\n\n
Example:
\n\n
\n
\n
>>> import sqlglot\n>>> sqlglot.parse_one("JOIN x", into=Join).on("y = 1").sql()\n'JOIN x ON y = 1'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • *expressions (str | Expression): the SQL code strings to parse.\nIf an Expression instance is passed, it will be used as-is.\nMultiple expressions are combined with an AND operator.
  • \n
  • append (bool): if True, AND the new expressions to any existing expression.\nOtherwise, this resets the expression.
  • \n
  • dialect (str): the dialect used to parse the input expressions.
  • \n
  • copy (bool): if False, modify this expression instance in-place.
  • \n
  • opts (kwargs): other options to use to parse the input expressions.
  • \n
\n\n
Returns:
\n\n
\n

Join: the modified join expression.

\n
\n", "signature": "(self, *expressions, append=True, dialect=None, copy=True, **opts):", "funcdef": "def"}, "sqlglot.expressions.Join.using": {"fullname": "sqlglot.expressions.Join.using", "modulename": "sqlglot.expressions", "qualname": "Join.using", "kind": "function", "doc": "

Append to or set the USING expressions.

\n\n
Example:
\n\n
\n
\n
>>> import sqlglot\n>>> sqlglot.parse_one("JOIN x", into=Join).using("foo", "bla").sql()\n'JOIN x USING (foo, bla)'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • *expressions (str | Expression): the SQL code strings to parse.\nIf an Expression instance is passed, it will be used as-is.
  • \n
  • append (bool): if True, concatenate the new expressions to the existing \"using\" list.\nOtherwise, this resets the expression.
  • \n
  • dialect (str): the dialect used to parse the input expressions.
  • \n
  • copy (bool): if False, modify this expression instance in-place.
  • \n
  • opts (kwargs): other options to use to parse the input expressions.
  • \n
\n\n
Returns:
\n\n
\n

Join: the modified join expression.

\n
\n", "signature": "(self, *expressions, append=True, dialect=None, copy=True, **opts):", "funcdef": "def"}, "sqlglot.expressions.Lateral": {"fullname": "sqlglot.expressions.Lateral", "modulename": "sqlglot.expressions", "qualname": "Lateral", "kind": "class", "doc": "

\n", "bases": "UDTF"}, "sqlglot.expressions.MatchRecognize": {"fullname": "sqlglot.expressions.MatchRecognize", "modulename": "sqlglot.expressions", "qualname": "MatchRecognize", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Final": {"fullname": "sqlglot.expressions.Final", "modulename": "sqlglot.expressions", "qualname": "Final", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Offset": {"fullname": "sqlglot.expressions.Offset", "modulename": "sqlglot.expressions", "qualname": "Offset", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Order": {"fullname": "sqlglot.expressions.Order", "modulename": "sqlglot.expressions", "qualname": "Order", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Cluster": {"fullname": "sqlglot.expressions.Cluster", "modulename": "sqlglot.expressions", "qualname": "Cluster", "kind": "class", "doc": "

\n", "bases": "Order"}, "sqlglot.expressions.Distribute": {"fullname": "sqlglot.expressions.Distribute", "modulename": "sqlglot.expressions", "qualname": "Distribute", "kind": "class", "doc": "

\n", "bases": "Order"}, "sqlglot.expressions.Sort": {"fullname": "sqlglot.expressions.Sort", "modulename": "sqlglot.expressions", "qualname": "Sort", "kind": "class", "doc": "

\n", "bases": "Order"}, "sqlglot.expressions.Ordered": {"fullname": "sqlglot.expressions.Ordered", "modulename": "sqlglot.expressions", "qualname": "Ordered", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Property": {"fullname": "sqlglot.expressions.Property", "modulename": "sqlglot.expressions", "qualname": "Property", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.AlgorithmProperty": {"fullname": "sqlglot.expressions.AlgorithmProperty", "modulename": "sqlglot.expressions", "qualname": "AlgorithmProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.DefinerProperty": {"fullname": "sqlglot.expressions.DefinerProperty", "modulename": "sqlglot.expressions", "qualname": "DefinerProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.SqlSecurityProperty": {"fullname": "sqlglot.expressions.SqlSecurityProperty", "modulename": "sqlglot.expressions", "qualname": "SqlSecurityProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.TableFormatProperty": {"fullname": "sqlglot.expressions.TableFormatProperty", "modulename": "sqlglot.expressions", "qualname": "TableFormatProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.PartitionedByProperty": {"fullname": "sqlglot.expressions.PartitionedByProperty", "modulename": "sqlglot.expressions", "qualname": "PartitionedByProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.FileFormatProperty": {"fullname": "sqlglot.expressions.FileFormatProperty", "modulename": "sqlglot.expressions", "qualname": "FileFormatProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.DistKeyProperty": {"fullname": "sqlglot.expressions.DistKeyProperty", "modulename": "sqlglot.expressions", "qualname": "DistKeyProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.SortKeyProperty": {"fullname": "sqlglot.expressions.SortKeyProperty", "modulename": "sqlglot.expressions", "qualname": "SortKeyProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.DistStyleProperty": {"fullname": "sqlglot.expressions.DistStyleProperty", "modulename": "sqlglot.expressions", "qualname": "DistStyleProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.LikeProperty": {"fullname": "sqlglot.expressions.LikeProperty", "modulename": "sqlglot.expressions", "qualname": "LikeProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.LocationProperty": {"fullname": "sqlglot.expressions.LocationProperty", "modulename": "sqlglot.expressions", "qualname": "LocationProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.EngineProperty": {"fullname": "sqlglot.expressions.EngineProperty", "modulename": "sqlglot.expressions", "qualname": "EngineProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.AutoIncrementProperty": {"fullname": "sqlglot.expressions.AutoIncrementProperty", "modulename": "sqlglot.expressions", "qualname": "AutoIncrementProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.CharacterSetProperty": {"fullname": "sqlglot.expressions.CharacterSetProperty", "modulename": "sqlglot.expressions", "qualname": "CharacterSetProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.CollateProperty": {"fullname": "sqlglot.expressions.CollateProperty", "modulename": "sqlglot.expressions", "qualname": "CollateProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.SchemaCommentProperty": {"fullname": "sqlglot.expressions.SchemaCommentProperty", "modulename": "sqlglot.expressions", "qualname": "SchemaCommentProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.ReturnsProperty": {"fullname": "sqlglot.expressions.ReturnsProperty", "modulename": "sqlglot.expressions", "qualname": "ReturnsProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.LanguageProperty": {"fullname": "sqlglot.expressions.LanguageProperty", "modulename": "sqlglot.expressions", "qualname": "LanguageProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.ExecuteAsProperty": {"fullname": "sqlglot.expressions.ExecuteAsProperty", "modulename": "sqlglot.expressions", "qualname": "ExecuteAsProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.VolatilityProperty": {"fullname": "sqlglot.expressions.VolatilityProperty", "modulename": "sqlglot.expressions", "qualname": "VolatilityProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.RowFormatDelimitedProperty": {"fullname": "sqlglot.expressions.RowFormatDelimitedProperty", "modulename": "sqlglot.expressions", "qualname": "RowFormatDelimitedProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.RowFormatSerdeProperty": {"fullname": "sqlglot.expressions.RowFormatSerdeProperty", "modulename": "sqlglot.expressions", "qualname": "RowFormatSerdeProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.SerdeProperties": {"fullname": "sqlglot.expressions.SerdeProperties", "modulename": "sqlglot.expressions", "qualname": "SerdeProperties", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.FallbackProperty": {"fullname": "sqlglot.expressions.FallbackProperty", "modulename": "sqlglot.expressions", "qualname": "FallbackProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.WithJournalTableProperty": {"fullname": "sqlglot.expressions.WithJournalTableProperty", "modulename": "sqlglot.expressions", "qualname": "WithJournalTableProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.LogProperty": {"fullname": "sqlglot.expressions.LogProperty", "modulename": "sqlglot.expressions", "qualname": "LogProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.JournalProperty": {"fullname": "sqlglot.expressions.JournalProperty", "modulename": "sqlglot.expressions", "qualname": "JournalProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.AfterJournalProperty": {"fullname": "sqlglot.expressions.AfterJournalProperty", "modulename": "sqlglot.expressions", "qualname": "AfterJournalProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.ChecksumProperty": {"fullname": "sqlglot.expressions.ChecksumProperty", "modulename": "sqlglot.expressions", "qualname": "ChecksumProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.FreespaceProperty": {"fullname": "sqlglot.expressions.FreespaceProperty", "modulename": "sqlglot.expressions", "qualname": "FreespaceProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.MergeBlockRatioProperty": {"fullname": "sqlglot.expressions.MergeBlockRatioProperty", "modulename": "sqlglot.expressions", "qualname": "MergeBlockRatioProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.DataBlocksizeProperty": {"fullname": "sqlglot.expressions.DataBlocksizeProperty", "modulename": "sqlglot.expressions", "qualname": "DataBlocksizeProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.BlockCompressionProperty": {"fullname": "sqlglot.expressions.BlockCompressionProperty", "modulename": "sqlglot.expressions", "qualname": "BlockCompressionProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.IsolatedLoadingProperty": {"fullname": "sqlglot.expressions.IsolatedLoadingProperty", "modulename": "sqlglot.expressions", "qualname": "IsolatedLoadingProperty", "kind": "class", "doc": "

\n", "bases": "Property"}, "sqlglot.expressions.Properties": {"fullname": "sqlglot.expressions.Properties", "modulename": "sqlglot.expressions", "qualname": "Properties", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Properties.Location": {"fullname": "sqlglot.expressions.Properties.Location", "modulename": "sqlglot.expressions", "qualname": "Properties.Location", "kind": "class", "doc": "

An enumeration.

\n", "bases": "sqlglot.helper.AutoName"}, "sqlglot.expressions.Properties.Location.POST_CREATE": {"fullname": "sqlglot.expressions.Properties.Location.POST_CREATE", "modulename": "sqlglot.expressions", "qualname": "Properties.Location.POST_CREATE", "kind": "variable", "doc": "

\n", "default_value": " = <Location.POST_CREATE: 'POST_CREATE'>"}, "sqlglot.expressions.Properties.Location.PRE_SCHEMA": {"fullname": "sqlglot.expressions.Properties.Location.PRE_SCHEMA", "modulename": "sqlglot.expressions", "qualname": "Properties.Location.PRE_SCHEMA", "kind": "variable", "doc": "

\n", "default_value": " = <Location.PRE_SCHEMA: 'PRE_SCHEMA'>"}, "sqlglot.expressions.Properties.Location.POST_INDEX": {"fullname": "sqlglot.expressions.Properties.Location.POST_INDEX", "modulename": "sqlglot.expressions", "qualname": "Properties.Location.POST_INDEX", "kind": "variable", "doc": "

\n", "default_value": " = <Location.POST_INDEX: 'POST_INDEX'>"}, "sqlglot.expressions.Properties.Location.POST_SCHEMA_ROOT": {"fullname": "sqlglot.expressions.Properties.Location.POST_SCHEMA_ROOT", "modulename": "sqlglot.expressions", "qualname": "Properties.Location.POST_SCHEMA_ROOT", "kind": "variable", "doc": "

\n", "default_value": " = <Location.POST_SCHEMA_ROOT: 'POST_SCHEMA_ROOT'>"}, "sqlglot.expressions.Properties.Location.POST_SCHEMA_WITH": {"fullname": "sqlglot.expressions.Properties.Location.POST_SCHEMA_WITH", "modulename": "sqlglot.expressions", "qualname": "Properties.Location.POST_SCHEMA_WITH", "kind": "variable", "doc": "

\n", "default_value": " = <Location.POST_SCHEMA_WITH: 'POST_SCHEMA_WITH'>"}, "sqlglot.expressions.Properties.Location.UNSUPPORTED": {"fullname": "sqlglot.expressions.Properties.Location.UNSUPPORTED", "modulename": "sqlglot.expressions", "qualname": "Properties.Location.UNSUPPORTED", "kind": "variable", "doc": "

\n", "default_value": " = <Location.UNSUPPORTED: 'UNSUPPORTED'>"}, "sqlglot.expressions.Properties.from_dict": {"fullname": "sqlglot.expressions.Properties.from_dict", "modulename": "sqlglot.expressions", "qualname": "Properties.from_dict", "kind": "function", "doc": "

\n", "signature": "(cls, properties_dict) -> sqlglot.expressions.Properties:", "funcdef": "def"}, "sqlglot.expressions.Qualify": {"fullname": "sqlglot.expressions.Qualify", "modulename": "sqlglot.expressions", "qualname": "Qualify", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Return": {"fullname": "sqlglot.expressions.Return", "modulename": "sqlglot.expressions", "qualname": "Return", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Reference": {"fullname": "sqlglot.expressions.Reference", "modulename": "sqlglot.expressions", "qualname": "Reference", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Tuple": {"fullname": "sqlglot.expressions.Tuple", "modulename": "sqlglot.expressions", "qualname": "Tuple", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Subqueryable": {"fullname": "sqlglot.expressions.Subqueryable", "modulename": "sqlglot.expressions", "qualname": "Subqueryable", "kind": "class", "doc": "

\n", "bases": "Unionable"}, "sqlglot.expressions.Subqueryable.subquery": {"fullname": "sqlglot.expressions.Subqueryable.subquery", "modulename": "sqlglot.expressions", "qualname": "Subqueryable.subquery", "kind": "function", "doc": "

Convert this expression to an aliased expression that can be used as a Subquery.

\n\n
Example:
\n\n
\n
\n
>>> subquery = Select().select("x").from_("tbl").subquery()\n>>> Select().select("x").from_(subquery).sql()\n'SELECT x FROM (SELECT x FROM tbl)'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • alias (str | Identifier): an optional alias for the subquery
  • \n
  • copy (bool): if False, modify this expression instance in-place.
  • \n
\n\n
Returns:
\n\n
\n

Alias: the subquery

\n
\n", "signature": "(self, alias=None, copy=True) -> sqlglot.expressions.Subquery:", "funcdef": "def"}, "sqlglot.expressions.Subqueryable.limit": {"fullname": "sqlglot.expressions.Subqueryable.limit", "modulename": "sqlglot.expressions", "qualname": "Subqueryable.limit", "kind": "function", "doc": "

\n", "signature": "(\tself,\texpression,\tdialect=None,\tcopy=True,\t**opts) -> sqlglot.expressions.Select:", "funcdef": "def"}, "sqlglot.expressions.Subqueryable.with_": {"fullname": "sqlglot.expressions.Subqueryable.with_", "modulename": "sqlglot.expressions", "qualname": "Subqueryable.with_", "kind": "function", "doc": "

Append to or set the common table expressions.

\n\n
Example:
\n\n
\n
\n
>>> Select().with_("tbl2", as_="SELECT * FROM tbl").select("x").from_("tbl2").sql()\n'WITH tbl2 AS (SELECT * FROM tbl) SELECT x FROM tbl2'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • alias (str | Expression): the SQL code string to parse as the table name.\nIf an Expression instance is passed, this is used as-is.
  • \n
  • as_ (str | Expression): the SQL code string to parse as the table expression.\nIf an Expression instance is passed, it will be used as-is.
  • \n
  • recursive (bool): set the RECURSIVE part of the expression. Defaults to False.
  • \n
  • append (bool): if True, add to any existing expressions.\nOtherwise, this resets the expressions.
  • \n
  • dialect (str): the dialect used to parse the input expression.
  • \n
  • copy (bool): if False, modify this expression instance in-place.
  • \n
  • opts (kwargs): other options to use to parse the input expressions.
  • \n
\n\n
Returns:
\n\n
\n

Select: the modified expression.

\n
\n", "signature": "(\tself,\talias,\tas_,\trecursive=None,\tappend=True,\tdialect=None,\tcopy=True,\t**opts):", "funcdef": "def"}, "sqlglot.expressions.Table": {"fullname": "sqlglot.expressions.Table", "modulename": "sqlglot.expressions", "qualname": "Table", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.SystemTime": {"fullname": "sqlglot.expressions.SystemTime", "modulename": "sqlglot.expressions", "qualname": "SystemTime", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Union": {"fullname": "sqlglot.expressions.Union", "modulename": "sqlglot.expressions", "qualname": "Union", "kind": "class", "doc": "

\n", "bases": "Subqueryable"}, "sqlglot.expressions.Union.limit": {"fullname": "sqlglot.expressions.Union.limit", "modulename": "sqlglot.expressions", "qualname": "Union.limit", "kind": "function", "doc": "

Set the LIMIT expression.

\n\n
Example:
\n\n
\n
\n
>>> select("1").union(select("1")).limit(1).sql()\n'SELECT * FROM (SELECT 1 UNION SELECT 1) AS _l_0 LIMIT 1'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • expression (str | int | Expression): the SQL code string to parse.\nThis can also be an integer.\nIf a Limit instance is passed, this is used as-is.\nIf another Expression instance is passed, it will be wrapped in a Limit.
  • \n
  • dialect (str): the dialect used to parse the input expression.
  • \n
  • copy (bool): if False, modify this expression instance in-place.
  • \n
  • opts (kwargs): other options to use to parse the input expressions.
  • \n
\n\n
Returns:
\n\n
\n

Select: The limited subqueryable.

\n
\n", "signature": "(\tself,\texpression,\tdialect=None,\tcopy=True,\t**opts) -> sqlglot.expressions.Select:", "funcdef": "def"}, "sqlglot.expressions.Except": {"fullname": "sqlglot.expressions.Except", "modulename": "sqlglot.expressions", "qualname": "Except", "kind": "class", "doc": "

\n", "bases": "Union"}, "sqlglot.expressions.Intersect": {"fullname": "sqlglot.expressions.Intersect", "modulename": "sqlglot.expressions", "qualname": "Intersect", "kind": "class", "doc": "

\n", "bases": "Union"}, "sqlglot.expressions.Unnest": {"fullname": "sqlglot.expressions.Unnest", "modulename": "sqlglot.expressions", "qualname": "Unnest", "kind": "class", "doc": "

\n", "bases": "UDTF"}, "sqlglot.expressions.Update": {"fullname": "sqlglot.expressions.Update", "modulename": "sqlglot.expressions", "qualname": "Update", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Values": {"fullname": "sqlglot.expressions.Values", "modulename": "sqlglot.expressions", "qualname": "Values", "kind": "class", "doc": "

\n", "bases": "UDTF"}, "sqlglot.expressions.Var": {"fullname": "sqlglot.expressions.Var", "modulename": "sqlglot.expressions", "qualname": "Var", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Schema": {"fullname": "sqlglot.expressions.Schema", "modulename": "sqlglot.expressions", "qualname": "Schema", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Lock": {"fullname": "sqlglot.expressions.Lock", "modulename": "sqlglot.expressions", "qualname": "Lock", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Select": {"fullname": "sqlglot.expressions.Select", "modulename": "sqlglot.expressions", "qualname": "Select", "kind": "class", "doc": "

\n", "bases": "Subqueryable"}, "sqlglot.expressions.Select.from_": {"fullname": "sqlglot.expressions.Select.from_", "modulename": "sqlglot.expressions", "qualname": "Select.from_", "kind": "function", "doc": "

Set the FROM expression.

\n\n
Example:
\n\n
\n
\n
>>> Select().from_("tbl").select("x").sql()\n'SELECT x FROM tbl'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • *expressions (str | Expression): the SQL code strings to parse.\nIf a From instance is passed, this is used as-is.\nIf another Expression instance is passed, it will be wrapped in a From.
  • \n
  • append (bool): if True, add to any existing expressions.\nOtherwise, this flattens all the From expression into a single expression.
  • \n
  • dialect (str): the dialect used to parse the input expression.
  • \n
  • copy (bool): if False, modify this expression instance in-place.
  • \n
  • opts (kwargs): other options to use to parse the input expressions.
  • \n
\n\n
Returns:
\n\n
\n

Select: the modified expression.

\n
\n", "signature": "(\tself,\t*expressions,\tappend=True,\tdialect=None,\tcopy=True,\t**opts) -> sqlglot.expressions.Select:", "funcdef": "def"}, "sqlglot.expressions.Select.group_by": {"fullname": "sqlglot.expressions.Select.group_by", "modulename": "sqlglot.expressions", "qualname": "Select.group_by", "kind": "function", "doc": "

Set the GROUP BY expression.

\n\n
Example:
\n\n
\n
\n
>>> Select().from_("tbl").select("x", "COUNT(1)").group_by("x").sql()\n'SELECT x, COUNT(1) FROM tbl GROUP BY x'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • *expressions (str | Expression): the SQL code strings to parse.\nIf a Group instance is passed, this is used as-is.\nIf another Expression instance is passed, it will be wrapped in a Group.\nIf nothing is passed in then a group by is not applied to the expression
  • \n
  • append (bool): if True, add to any existing expressions.\nOtherwise, this flattens all the Group expression into a single expression.
  • \n
  • dialect (str): the dialect used to parse the input expression.
  • \n
  • copy (bool): if False, modify this expression instance in-place.
  • \n
  • opts (kwargs): other options to use to parse the input expressions.
  • \n
\n\n
Returns:
\n\n
\n

Select: the modified expression.

\n
\n", "signature": "(\tself,\t*expressions,\tappend=True,\tdialect=None,\tcopy=True,\t**opts) -> sqlglot.expressions.Select:", "funcdef": "def"}, "sqlglot.expressions.Select.order_by": {"fullname": "sqlglot.expressions.Select.order_by", "modulename": "sqlglot.expressions", "qualname": "Select.order_by", "kind": "function", "doc": "

Set the ORDER BY expression.

\n\n
Example:
\n\n
\n
\n
>>> Select().from_("tbl").select("x").order_by("x DESC").sql()\n'SELECT x FROM tbl ORDER BY x DESC'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • *expressions (str | Expression): the SQL code strings to parse.\nIf a Group instance is passed, this is used as-is.\nIf another Expression instance is passed, it will be wrapped in a Order.
  • \n
  • append (bool): if True, add to any existing expressions.\nOtherwise, this flattens all the Order expression into a single expression.
  • \n
  • dialect (str): the dialect used to parse the input expression.
  • \n
  • copy (bool): if False, modify this expression instance in-place.
  • \n
  • opts (kwargs): other options to use to parse the input expressions.
  • \n
\n\n
Returns:
\n\n
\n

Select: the modified expression.

\n
\n", "signature": "(\tself,\t*expressions,\tappend=True,\tdialect=None,\tcopy=True,\t**opts) -> sqlglot.expressions.Select:", "funcdef": "def"}, "sqlglot.expressions.Select.sort_by": {"fullname": "sqlglot.expressions.Select.sort_by", "modulename": "sqlglot.expressions", "qualname": "Select.sort_by", "kind": "function", "doc": "

Set the SORT BY expression.

\n\n
Example:
\n\n
\n
\n
>>> Select().from_("tbl").select("x").sort_by("x DESC").sql()\n'SELECT x FROM tbl SORT BY x DESC'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • *expressions (str | Expression): the SQL code strings to parse.\nIf a Group instance is passed, this is used as-is.\nIf another Expression instance is passed, it will be wrapped in a SORT.
  • \n
  • append (bool): if True, add to any existing expressions.\nOtherwise, this flattens all the Order expression into a single expression.
  • \n
  • dialect (str): the dialect used to parse the input expression.
  • \n
  • copy (bool): if False, modify this expression instance in-place.
  • \n
  • opts (kwargs): other options to use to parse the input expressions.
  • \n
\n\n
Returns:
\n\n
\n

Select: the modified expression.

\n
\n", "signature": "(\tself,\t*expressions,\tappend=True,\tdialect=None,\tcopy=True,\t**opts) -> sqlglot.expressions.Select:", "funcdef": "def"}, "sqlglot.expressions.Select.cluster_by": {"fullname": "sqlglot.expressions.Select.cluster_by", "modulename": "sqlglot.expressions", "qualname": "Select.cluster_by", "kind": "function", "doc": "

Set the CLUSTER BY expression.

\n\n
Example:
\n\n
\n
\n
>>> Select().from_("tbl").select("x").cluster_by("x DESC").sql()\n'SELECT x FROM tbl CLUSTER BY x DESC'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • *expressions (str | Expression): the SQL code strings to parse.\nIf a Group instance is passed, this is used as-is.\nIf another Expression instance is passed, it will be wrapped in a Cluster.
  • \n
  • append (bool): if True, add to any existing expressions.\nOtherwise, this flattens all the Order expression into a single expression.
  • \n
  • dialect (str): the dialect used to parse the input expression.
  • \n
  • copy (bool): if False, modify this expression instance in-place.
  • \n
  • opts (kwargs): other options to use to parse the input expressions.
  • \n
\n\n
Returns:
\n\n
\n

Select: the modified expression.

\n
\n", "signature": "(\tself,\t*expressions,\tappend=True,\tdialect=None,\tcopy=True,\t**opts) -> sqlglot.expressions.Select:", "funcdef": "def"}, "sqlglot.expressions.Select.limit": {"fullname": "sqlglot.expressions.Select.limit", "modulename": "sqlglot.expressions", "qualname": "Select.limit", "kind": "function", "doc": "

Set the LIMIT expression.

\n\n
Example:
\n\n
\n
\n
>>> Select().from_("tbl").select("x").limit(10).sql()\n'SELECT x FROM tbl LIMIT 10'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • expression (str | int | Expression): the SQL code string to parse.\nThis can also be an integer.\nIf a Limit instance is passed, this is used as-is.\nIf another Expression instance is passed, it will be wrapped in a Limit.
  • \n
  • dialect (str): the dialect used to parse the input expression.
  • \n
  • copy (bool): if False, modify this expression instance in-place.
  • \n
  • opts (kwargs): other options to use to parse the input expressions.
  • \n
\n\n
Returns:
\n\n
\n

Select: the modified expression.

\n
\n", "signature": "(\tself,\texpression,\tdialect=None,\tcopy=True,\t**opts) -> sqlglot.expressions.Select:", "funcdef": "def"}, "sqlglot.expressions.Select.offset": {"fullname": "sqlglot.expressions.Select.offset", "modulename": "sqlglot.expressions", "qualname": "Select.offset", "kind": "function", "doc": "

Set the OFFSET expression.

\n\n
Example:
\n\n
\n
\n
>>> Select().from_("tbl").select("x").offset(10).sql()\n'SELECT x FROM tbl OFFSET 10'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • expression (str | int | Expression): the SQL code string to parse.\nThis can also be an integer.\nIf a Offset instance is passed, this is used as-is.\nIf another Expression instance is passed, it will be wrapped in a Offset.
  • \n
  • dialect (str): the dialect used to parse the input expression.
  • \n
  • copy (bool): if False, modify this expression instance in-place.
  • \n
  • opts (kwargs): other options to use to parse the input expressions.
  • \n
\n\n
Returns:
\n\n
\n

Select: the modified expression.

\n
\n", "signature": "(\tself,\texpression,\tdialect=None,\tcopy=True,\t**opts) -> sqlglot.expressions.Select:", "funcdef": "def"}, "sqlglot.expressions.Select.select": {"fullname": "sqlglot.expressions.Select.select", "modulename": "sqlglot.expressions", "qualname": "Select.select", "kind": "function", "doc": "

Append to or set the SELECT expressions.

\n\n
Example:
\n\n
\n
\n
>>> Select().select("x", "y").sql()\n'SELECT x, y'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • *expressions (str | Expression): the SQL code strings to parse.\nIf an Expression instance is passed, it will be used as-is.
  • \n
  • append (bool): if True, add to any existing expressions.\nOtherwise, this resets the expressions.
  • \n
  • dialect (str): the dialect used to parse the input expressions.
  • \n
  • copy (bool): if False, modify this expression instance in-place.
  • \n
  • opts (kwargs): other options to use to parse the input expressions.
  • \n
\n\n
Returns:
\n\n
\n

Select: the modified expression.

\n
\n", "signature": "(\tself,\t*expressions,\tappend=True,\tdialect=None,\tcopy=True,\t**opts) -> sqlglot.expressions.Select:", "funcdef": "def"}, "sqlglot.expressions.Select.lateral": {"fullname": "sqlglot.expressions.Select.lateral", "modulename": "sqlglot.expressions", "qualname": "Select.lateral", "kind": "function", "doc": "

Append to or set the LATERAL expressions.

\n\n
Example:
\n\n
\n
\n
>>> Select().select("x").lateral("OUTER explode(y) tbl2 AS z").from_("tbl").sql()\n'SELECT x FROM tbl LATERAL VIEW OUTER EXPLODE(y) tbl2 AS z'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • *expressions (str | Expression): the SQL code strings to parse.\nIf an Expression instance is passed, it will be used as-is.
  • \n
  • append (bool): if True, add to any existing expressions.\nOtherwise, this resets the expressions.
  • \n
  • dialect (str): the dialect used to parse the input expressions.
  • \n
  • copy (bool): if False, modify this expression instance in-place.
  • \n
  • opts (kwargs): other options to use to parse the input expressions.
  • \n
\n\n
Returns:
\n\n
\n

Select: the modified expression.

\n
\n", "signature": "(\tself,\t*expressions,\tappend=True,\tdialect=None,\tcopy=True,\t**opts) -> sqlglot.expressions.Select:", "funcdef": "def"}, "sqlglot.expressions.Select.join": {"fullname": "sqlglot.expressions.Select.join", "modulename": "sqlglot.expressions", "qualname": "Select.join", "kind": "function", "doc": "

Append to or set the JOIN expressions.

\n\n
Example:
\n\n
\n
\n
>>> Select().select("*").from_("tbl").join("tbl2", on="tbl1.y = tbl2.y").sql()\n'SELECT * FROM tbl JOIN tbl2 ON tbl1.y = tbl2.y'\n
\n
\n \n
\n
>>> Select().select("1").from_("a").join("b", using=["x", "y", "z"]).sql()\n'SELECT 1 FROM a JOIN b USING (x, y, z)'\n
\n
\n \n

Use join_type to change the type of join:

\n \n
\n
>>> Select().select("*").from_("tbl").join("tbl2", on="tbl1.y = tbl2.y", join_type="left outer").sql()\n'SELECT * FROM tbl LEFT OUTER JOIN tbl2 ON tbl1.y = tbl2.y'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • expression (str | Expression): the SQL code string to parse.\nIf an Expression instance is passed, it will be used as-is.
  • \n
  • on (str | Expression): optionally specify the join \"on\" criteria as a SQL string.\nIf an Expression instance is passed, it will be used as-is.
  • \n
  • using (str | Expression): optionally specify the join \"using\" criteria as a SQL string.\nIf an Expression instance is passed, it will be used as-is.
  • \n
  • append (bool): if True, add to any existing expressions.\nOtherwise, this resets the expressions.
  • \n
  • join_type (str): If set, alter the parsed join type
  • \n
  • dialect (str): the dialect used to parse the input expressions.
  • \n
  • copy (bool): if False, modify this expression instance in-place.
  • \n
  • opts (kwargs): other options to use to parse the input expressions.
  • \n
\n\n
Returns:
\n\n
\n

Select: the modified expression.

\n
\n", "signature": "(\tself,\texpression,\ton=None,\tusing=None,\tappend=True,\tjoin_type=None,\tjoin_alias=None,\tdialect=None,\tcopy=True,\t**opts) -> sqlglot.expressions.Select:", "funcdef": "def"}, "sqlglot.expressions.Select.where": {"fullname": "sqlglot.expressions.Select.where", "modulename": "sqlglot.expressions", "qualname": "Select.where", "kind": "function", "doc": "

Append to or set the WHERE expressions.

\n\n
Example:
\n\n
\n
\n
>>> Select().select("x").from_("tbl").where("x = 'a' OR x < 'b'").sql()\n"SELECT x FROM tbl WHERE x = 'a' OR x < 'b'"\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • *expressions (str | Expression): the SQL code strings to parse.\nIf an Expression instance is passed, it will be used as-is.\nMultiple expressions are combined with an AND operator.
  • \n
  • append (bool): if True, AND the new expressions to any existing expression.\nOtherwise, this resets the expression.
  • \n
  • dialect (str): the dialect used to parse the input expressions.
  • \n
  • copy (bool): if False, modify this expression instance in-place.
  • \n
  • opts (kwargs): other options to use to parse the input expressions.
  • \n
\n\n
Returns:
\n\n
\n

Select: the modified expression.

\n
\n", "signature": "(\tself,\t*expressions,\tappend=True,\tdialect=None,\tcopy=True,\t**opts) -> sqlglot.expressions.Select:", "funcdef": "def"}, "sqlglot.expressions.Select.having": {"fullname": "sqlglot.expressions.Select.having", "modulename": "sqlglot.expressions", "qualname": "Select.having", "kind": "function", "doc": "

Append to or set the HAVING expressions.

\n\n
Example:
\n\n
\n
\n
>>> Select().select("x", "COUNT(y)").from_("tbl").group_by("x").having("COUNT(y) > 3").sql()\n'SELECT x, COUNT(y) FROM tbl GROUP BY x HAVING COUNT(y) > 3'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • *expressions (str | Expression): the SQL code strings to parse.\nIf an Expression instance is passed, it will be used as-is.\nMultiple expressions are combined with an AND operator.
  • \n
  • append (bool): if True, AND the new expressions to any existing expression.\nOtherwise, this resets the expression.
  • \n
  • dialect (str): the dialect used to parse the input expressions.
  • \n
  • copy (bool): if False, modify this expression instance in-place.
  • \n
  • opts (kwargs): other options to use to parse the input expressions.
  • \n
\n\n
Returns:
\n\n
\n

Select: the modified expression.

\n
\n", "signature": "(\tself,\t*expressions,\tappend=True,\tdialect=None,\tcopy=True,\t**opts) -> sqlglot.expressions.Select:", "funcdef": "def"}, "sqlglot.expressions.Select.window": {"fullname": "sqlglot.expressions.Select.window", "modulename": "sqlglot.expressions", "qualname": "Select.window", "kind": "function", "doc": "

\n", "signature": "(\tself,\t*expressions,\tappend=True,\tdialect=None,\tcopy=True,\t**opts) -> sqlglot.expressions.Select:", "funcdef": "def"}, "sqlglot.expressions.Select.distinct": {"fullname": "sqlglot.expressions.Select.distinct", "modulename": "sqlglot.expressions", "qualname": "Select.distinct", "kind": "function", "doc": "

Set the OFFSET expression.

\n\n
Example:
\n\n
\n
\n
>>> Select().from_("tbl").select("x").distinct().sql()\n'SELECT DISTINCT x FROM tbl'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • distinct (bool): whether the Select should be distinct
  • \n
  • copy (bool): if False, modify this expression instance in-place.
  • \n
\n\n
Returns:
\n\n
\n

Select: the modified expression.

\n
\n", "signature": "(self, distinct=True, copy=True) -> sqlglot.expressions.Select:", "funcdef": "def"}, "sqlglot.expressions.Select.ctas": {"fullname": "sqlglot.expressions.Select.ctas", "modulename": "sqlglot.expressions", "qualname": "Select.ctas", "kind": "function", "doc": "

Convert this expression to a CREATE TABLE AS statement.

\n\n
Example:
\n\n
\n
\n
>>> Select().select("*").from_("tbl").ctas("x").sql()\n'CREATE TABLE x AS SELECT * FROM tbl'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • table (str | Expression): the SQL code string to parse as the table name.\nIf another Expression instance is passed, it will be used as-is.
  • \n
  • properties (dict): an optional mapping of table properties
  • \n
  • dialect (str): the dialect used to parse the input table.
  • \n
  • copy (bool): if False, modify this expression instance in-place.
  • \n
  • opts (kwargs): other options to use to parse the input table.
  • \n
\n\n
Returns:
\n\n
\n

Create: the CREATE TABLE AS expression

\n
\n", "signature": "(\tself,\ttable,\tproperties=None,\tdialect=None,\tcopy=True,\t**opts) -> sqlglot.expressions.Create:", "funcdef": "def"}, "sqlglot.expressions.Select.lock": {"fullname": "sqlglot.expressions.Select.lock", "modulename": "sqlglot.expressions", "qualname": "Select.lock", "kind": "function", "doc": "

Set the locking read mode for this expression.

\n\n
Examples:
\n\n
\n
\n
>>> Select().select("x").from_("tbl").where("x = 'a'").lock().sql("mysql")\n"SELECT x FROM tbl WHERE x = 'a' FOR UPDATE"\n
\n
\n \n
\n
>>> Select().select("x").from_("tbl").where("x = 'a'").lock(update=False).sql("mysql")\n"SELECT x FROM tbl WHERE x = 'a' FOR SHARE"\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • update: if True, the locking type will be FOR UPDATE, else it will be FOR SHARE.
  • \n
  • copy: if False, modify this expression instance in-place.
  • \n
\n\n
Returns:
\n\n
\n

The modified expression.

\n
\n", "signature": "(\tself,\tupdate: bool = True,\tcopy: bool = True) -> sqlglot.expressions.Select:", "funcdef": "def"}, "sqlglot.expressions.Subquery": {"fullname": "sqlglot.expressions.Subquery", "modulename": "sqlglot.expressions", "qualname": "Subquery", "kind": "class", "doc": "

\n", "bases": "DerivedTable, Unionable"}, "sqlglot.expressions.Subquery.unnest": {"fullname": "sqlglot.expressions.Subquery.unnest", "modulename": "sqlglot.expressions", "qualname": "Subquery.unnest", "kind": "function", "doc": "

Returns the first non subquery.

\n", "signature": "(self):", "funcdef": "def"}, "sqlglot.expressions.Subquery.output_name": {"fullname": "sqlglot.expressions.Subquery.output_name", "modulename": "sqlglot.expressions", "qualname": "Subquery.output_name", "kind": "variable", "doc": "

Name of the output column if this expression is a selection.

\n\n

If the Expression has no output name, an empty string is returned.

\n\n
Example:
\n\n
\n
\n
>>> from sqlglot import parse_one\n>>> parse_one("SELECT a").expressions[0].output_name\n'a'\n>>> parse_one("SELECT b AS c").expressions[0].output_name\n'c'\n>>> parse_one("SELECT 1 + 2").expressions[0].output_name\n''\n
\n
\n
\n"}, "sqlglot.expressions.TableSample": {"fullname": "sqlglot.expressions.TableSample", "modulename": "sqlglot.expressions", "qualname": "TableSample", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Tag": {"fullname": "sqlglot.expressions.Tag", "modulename": "sqlglot.expressions", "qualname": "Tag", "kind": "class", "doc": "

Tags are used for generating arbitrary sql like SELECT x.

\n", "bases": "Expression"}, "sqlglot.expressions.Pivot": {"fullname": "sqlglot.expressions.Pivot", "modulename": "sqlglot.expressions", "qualname": "Pivot", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Window": {"fullname": "sqlglot.expressions.Window", "modulename": "sqlglot.expressions", "qualname": "Window", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.WindowSpec": {"fullname": "sqlglot.expressions.WindowSpec", "modulename": "sqlglot.expressions", "qualname": "WindowSpec", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Where": {"fullname": "sqlglot.expressions.Where", "modulename": "sqlglot.expressions", "qualname": "Where", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Star": {"fullname": "sqlglot.expressions.Star", "modulename": "sqlglot.expressions", "qualname": "Star", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Star.output_name": {"fullname": "sqlglot.expressions.Star.output_name", "modulename": "sqlglot.expressions", "qualname": "Star.output_name", "kind": "variable", "doc": "

Name of the output column if this expression is a selection.

\n\n

If the Expression has no output name, an empty string is returned.

\n\n
Example:
\n\n
\n
\n
>>> from sqlglot import parse_one\n>>> parse_one("SELECT a").expressions[0].output_name\n'a'\n>>> parse_one("SELECT b AS c").expressions[0].output_name\n'c'\n>>> parse_one("SELECT 1 + 2").expressions[0].output_name\n''\n
\n
\n
\n"}, "sqlglot.expressions.Parameter": {"fullname": "sqlglot.expressions.Parameter", "modulename": "sqlglot.expressions", "qualname": "Parameter", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.SessionParameter": {"fullname": "sqlglot.expressions.SessionParameter", "modulename": "sqlglot.expressions", "qualname": "SessionParameter", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Placeholder": {"fullname": "sqlglot.expressions.Placeholder", "modulename": "sqlglot.expressions", "qualname": "Placeholder", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Null": {"fullname": "sqlglot.expressions.Null", "modulename": "sqlglot.expressions", "qualname": "Null", "kind": "class", "doc": "

\n", "bases": "Condition"}, "sqlglot.expressions.Boolean": {"fullname": "sqlglot.expressions.Boolean", "modulename": "sqlglot.expressions", "qualname": "Boolean", "kind": "class", "doc": "

\n", "bases": "Condition"}, "sqlglot.expressions.DataType": {"fullname": "sqlglot.expressions.DataType", "modulename": "sqlglot.expressions", "qualname": "DataType", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.DataType.Type": {"fullname": "sqlglot.expressions.DataType.Type", "modulename": "sqlglot.expressions", "qualname": "DataType.Type", "kind": "class", "doc": "

An enumeration.

\n", "bases": "sqlglot.helper.AutoName"}, "sqlglot.expressions.DataType.Type.CHAR": {"fullname": "sqlglot.expressions.DataType.Type.CHAR", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.CHAR", "kind": "variable", "doc": "

\n", "default_value": " = <Type.CHAR: 'CHAR'>"}, "sqlglot.expressions.DataType.Type.NCHAR": {"fullname": "sqlglot.expressions.DataType.Type.NCHAR", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.NCHAR", "kind": "variable", "doc": "

\n", "default_value": " = <Type.NCHAR: 'NCHAR'>"}, "sqlglot.expressions.DataType.Type.VARCHAR": {"fullname": "sqlglot.expressions.DataType.Type.VARCHAR", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.VARCHAR", "kind": "variable", "doc": "

\n", "default_value": " = <Type.VARCHAR: 'VARCHAR'>"}, "sqlglot.expressions.DataType.Type.NVARCHAR": {"fullname": "sqlglot.expressions.DataType.Type.NVARCHAR", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.NVARCHAR", "kind": "variable", "doc": "

\n", "default_value": " = <Type.NVARCHAR: 'NVARCHAR'>"}, "sqlglot.expressions.DataType.Type.TEXT": {"fullname": "sqlglot.expressions.DataType.Type.TEXT", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.TEXT", "kind": "variable", "doc": "

\n", "default_value": " = <Type.TEXT: 'TEXT'>"}, "sqlglot.expressions.DataType.Type.MEDIUMTEXT": {"fullname": "sqlglot.expressions.DataType.Type.MEDIUMTEXT", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.MEDIUMTEXT", "kind": "variable", "doc": "

\n", "default_value": " = <Type.MEDIUMTEXT: 'MEDIUMTEXT'>"}, "sqlglot.expressions.DataType.Type.LONGTEXT": {"fullname": "sqlglot.expressions.DataType.Type.LONGTEXT", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.LONGTEXT", "kind": "variable", "doc": "

\n", "default_value": " = <Type.LONGTEXT: 'LONGTEXT'>"}, "sqlglot.expressions.DataType.Type.MEDIUMBLOB": {"fullname": "sqlglot.expressions.DataType.Type.MEDIUMBLOB", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.MEDIUMBLOB", "kind": "variable", "doc": "

\n", "default_value": " = <Type.MEDIUMBLOB: 'MEDIUMBLOB'>"}, "sqlglot.expressions.DataType.Type.LONGBLOB": {"fullname": "sqlglot.expressions.DataType.Type.LONGBLOB", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.LONGBLOB", "kind": "variable", "doc": "

\n", "default_value": " = <Type.LONGBLOB: 'LONGBLOB'>"}, "sqlglot.expressions.DataType.Type.BINARY": {"fullname": "sqlglot.expressions.DataType.Type.BINARY", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.BINARY", "kind": "variable", "doc": "

\n", "default_value": " = <Type.BINARY: 'BINARY'>"}, "sqlglot.expressions.DataType.Type.VARBINARY": {"fullname": "sqlglot.expressions.DataType.Type.VARBINARY", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.VARBINARY", "kind": "variable", "doc": "

\n", "default_value": " = <Type.VARBINARY: 'VARBINARY'>"}, "sqlglot.expressions.DataType.Type.INT": {"fullname": "sqlglot.expressions.DataType.Type.INT", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.INT", "kind": "variable", "doc": "

\n", "default_value": " = <Type.INT: 'INT'>"}, "sqlglot.expressions.DataType.Type.TINYINT": {"fullname": "sqlglot.expressions.DataType.Type.TINYINT", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.TINYINT", "kind": "variable", "doc": "

\n", "default_value": " = <Type.TINYINT: 'TINYINT'>"}, "sqlglot.expressions.DataType.Type.SMALLINT": {"fullname": "sqlglot.expressions.DataType.Type.SMALLINT", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.SMALLINT", "kind": "variable", "doc": "

\n", "default_value": " = <Type.SMALLINT: 'SMALLINT'>"}, "sqlglot.expressions.DataType.Type.BIGINT": {"fullname": "sqlglot.expressions.DataType.Type.BIGINT", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.BIGINT", "kind": "variable", "doc": "

\n", "default_value": " = <Type.BIGINT: 'BIGINT'>"}, "sqlglot.expressions.DataType.Type.FLOAT": {"fullname": "sqlglot.expressions.DataType.Type.FLOAT", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.FLOAT", "kind": "variable", "doc": "

\n", "default_value": " = <Type.FLOAT: 'FLOAT'>"}, "sqlglot.expressions.DataType.Type.DOUBLE": {"fullname": "sqlglot.expressions.DataType.Type.DOUBLE", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.DOUBLE", "kind": "variable", "doc": "

\n", "default_value": " = <Type.DOUBLE: 'DOUBLE'>"}, "sqlglot.expressions.DataType.Type.DECIMAL": {"fullname": "sqlglot.expressions.DataType.Type.DECIMAL", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.DECIMAL", "kind": "variable", "doc": "

\n", "default_value": " = <Type.DECIMAL: 'DECIMAL'>"}, "sqlglot.expressions.DataType.Type.BOOLEAN": {"fullname": "sqlglot.expressions.DataType.Type.BOOLEAN", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.BOOLEAN", "kind": "variable", "doc": "

\n", "default_value": " = <Type.BOOLEAN: 'BOOLEAN'>"}, "sqlglot.expressions.DataType.Type.JSON": {"fullname": "sqlglot.expressions.DataType.Type.JSON", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.JSON", "kind": "variable", "doc": "

\n", "default_value": " = <Type.JSON: 'JSON'>"}, "sqlglot.expressions.DataType.Type.JSONB": {"fullname": "sqlglot.expressions.DataType.Type.JSONB", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.JSONB", "kind": "variable", "doc": "

\n", "default_value": " = <Type.JSONB: 'JSONB'>"}, "sqlglot.expressions.DataType.Type.INTERVAL": {"fullname": "sqlglot.expressions.DataType.Type.INTERVAL", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.INTERVAL", "kind": "variable", "doc": "

\n", "default_value": " = <Type.INTERVAL: 'INTERVAL'>"}, "sqlglot.expressions.DataType.Type.TIME": {"fullname": "sqlglot.expressions.DataType.Type.TIME", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.TIME", "kind": "variable", "doc": "

\n", "default_value": " = <Type.TIME: 'TIME'>"}, "sqlglot.expressions.DataType.Type.TIMESTAMP": {"fullname": "sqlglot.expressions.DataType.Type.TIMESTAMP", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.TIMESTAMP", "kind": "variable", "doc": "

\n", "default_value": " = <Type.TIMESTAMP: 'TIMESTAMP'>"}, "sqlglot.expressions.DataType.Type.TIMESTAMPTZ": {"fullname": "sqlglot.expressions.DataType.Type.TIMESTAMPTZ", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.TIMESTAMPTZ", "kind": "variable", "doc": "

\n", "default_value": " = <Type.TIMESTAMPTZ: 'TIMESTAMPTZ'>"}, "sqlglot.expressions.DataType.Type.TIMESTAMPLTZ": {"fullname": "sqlglot.expressions.DataType.Type.TIMESTAMPLTZ", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.TIMESTAMPLTZ", "kind": "variable", "doc": "

\n", "default_value": " = <Type.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>"}, "sqlglot.expressions.DataType.Type.DATE": {"fullname": "sqlglot.expressions.DataType.Type.DATE", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.DATE", "kind": "variable", "doc": "

\n", "default_value": " = <Type.DATE: 'DATE'>"}, "sqlglot.expressions.DataType.Type.DATETIME": {"fullname": "sqlglot.expressions.DataType.Type.DATETIME", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.DATETIME", "kind": "variable", "doc": "

\n", "default_value": " = <Type.DATETIME: 'DATETIME'>"}, "sqlglot.expressions.DataType.Type.ARRAY": {"fullname": "sqlglot.expressions.DataType.Type.ARRAY", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.ARRAY", "kind": "variable", "doc": "

\n", "default_value": " = <Type.ARRAY: 'ARRAY'>"}, "sqlglot.expressions.DataType.Type.MAP": {"fullname": "sqlglot.expressions.DataType.Type.MAP", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.MAP", "kind": "variable", "doc": "

\n", "default_value": " = <Type.MAP: 'MAP'>"}, "sqlglot.expressions.DataType.Type.UUID": {"fullname": "sqlglot.expressions.DataType.Type.UUID", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.UUID", "kind": "variable", "doc": "

\n", "default_value": " = <Type.UUID: 'UUID'>"}, "sqlglot.expressions.DataType.Type.GEOGRAPHY": {"fullname": "sqlglot.expressions.DataType.Type.GEOGRAPHY", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.GEOGRAPHY", "kind": "variable", "doc": "

\n", "default_value": " = <Type.GEOGRAPHY: 'GEOGRAPHY'>"}, "sqlglot.expressions.DataType.Type.GEOMETRY": {"fullname": "sqlglot.expressions.DataType.Type.GEOMETRY", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.GEOMETRY", "kind": "variable", "doc": "

\n", "default_value": " = <Type.GEOMETRY: 'GEOMETRY'>"}, "sqlglot.expressions.DataType.Type.STRUCT": {"fullname": "sqlglot.expressions.DataType.Type.STRUCT", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.STRUCT", "kind": "variable", "doc": "

\n", "default_value": " = <Type.STRUCT: 'STRUCT'>"}, "sqlglot.expressions.DataType.Type.NULLABLE": {"fullname": "sqlglot.expressions.DataType.Type.NULLABLE", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.NULLABLE", "kind": "variable", "doc": "

\n", "default_value": " = <Type.NULLABLE: 'NULLABLE'>"}, "sqlglot.expressions.DataType.Type.HLLSKETCH": {"fullname": "sqlglot.expressions.DataType.Type.HLLSKETCH", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.HLLSKETCH", "kind": "variable", "doc": "

\n", "default_value": " = <Type.HLLSKETCH: 'HLLSKETCH'>"}, "sqlglot.expressions.DataType.Type.HSTORE": {"fullname": "sqlglot.expressions.DataType.Type.HSTORE", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.HSTORE", "kind": "variable", "doc": "

\n", "default_value": " = <Type.HSTORE: 'HSTORE'>"}, "sqlglot.expressions.DataType.Type.SUPER": {"fullname": "sqlglot.expressions.DataType.Type.SUPER", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.SUPER", "kind": "variable", "doc": "

\n", "default_value": " = <Type.SUPER: 'SUPER'>"}, "sqlglot.expressions.DataType.Type.SERIAL": {"fullname": "sqlglot.expressions.DataType.Type.SERIAL", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.SERIAL", "kind": "variable", "doc": "

\n", "default_value": " = <Type.SERIAL: 'SERIAL'>"}, "sqlglot.expressions.DataType.Type.SMALLSERIAL": {"fullname": "sqlglot.expressions.DataType.Type.SMALLSERIAL", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.SMALLSERIAL", "kind": "variable", "doc": "

\n", "default_value": " = <Type.SMALLSERIAL: 'SMALLSERIAL'>"}, "sqlglot.expressions.DataType.Type.BIGSERIAL": {"fullname": "sqlglot.expressions.DataType.Type.BIGSERIAL", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.BIGSERIAL", "kind": "variable", "doc": "

\n", "default_value": " = <Type.BIGSERIAL: 'BIGSERIAL'>"}, "sqlglot.expressions.DataType.Type.XML": {"fullname": "sqlglot.expressions.DataType.Type.XML", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.XML", "kind": "variable", "doc": "

\n", "default_value": " = <Type.XML: 'XML'>"}, "sqlglot.expressions.DataType.Type.UNIQUEIDENTIFIER": {"fullname": "sqlglot.expressions.DataType.Type.UNIQUEIDENTIFIER", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.UNIQUEIDENTIFIER", "kind": "variable", "doc": "

\n", "default_value": " = <Type.UNIQUEIDENTIFIER: 'UNIQUEIDENTIFIER'>"}, "sqlglot.expressions.DataType.Type.MONEY": {"fullname": "sqlglot.expressions.DataType.Type.MONEY", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.MONEY", "kind": "variable", "doc": "

\n", "default_value": " = <Type.MONEY: 'MONEY'>"}, "sqlglot.expressions.DataType.Type.SMALLMONEY": {"fullname": "sqlglot.expressions.DataType.Type.SMALLMONEY", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.SMALLMONEY", "kind": "variable", "doc": "

\n", "default_value": " = <Type.SMALLMONEY: 'SMALLMONEY'>"}, "sqlglot.expressions.DataType.Type.ROWVERSION": {"fullname": "sqlglot.expressions.DataType.Type.ROWVERSION", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.ROWVERSION", "kind": "variable", "doc": "

\n", "default_value": " = <Type.ROWVERSION: 'ROWVERSION'>"}, "sqlglot.expressions.DataType.Type.IMAGE": {"fullname": "sqlglot.expressions.DataType.Type.IMAGE", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.IMAGE", "kind": "variable", "doc": "

\n", "default_value": " = <Type.IMAGE: 'IMAGE'>"}, "sqlglot.expressions.DataType.Type.VARIANT": {"fullname": "sqlglot.expressions.DataType.Type.VARIANT", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.VARIANT", "kind": "variable", "doc": "

\n", "default_value": " = <Type.VARIANT: 'VARIANT'>"}, "sqlglot.expressions.DataType.Type.OBJECT": {"fullname": "sqlglot.expressions.DataType.Type.OBJECT", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.OBJECT", "kind": "variable", "doc": "

\n", "default_value": " = <Type.OBJECT: 'OBJECT'>"}, "sqlglot.expressions.DataType.Type.NULL": {"fullname": "sqlglot.expressions.DataType.Type.NULL", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.NULL", "kind": "variable", "doc": "

\n", "default_value": " = <Type.NULL: 'NULL'>"}, "sqlglot.expressions.DataType.Type.UNKNOWN": {"fullname": "sqlglot.expressions.DataType.Type.UNKNOWN", "modulename": "sqlglot.expressions", "qualname": "DataType.Type.UNKNOWN", "kind": "variable", "doc": "

\n", "default_value": " = <Type.UNKNOWN: 'UNKNOWN'>"}, "sqlglot.expressions.DataType.build": {"fullname": "sqlglot.expressions.DataType.build", "modulename": "sqlglot.expressions", "qualname": "DataType.build", "kind": "function", "doc": "

\n", "signature": "(\tcls,\tdtype: str | sqlglot.expressions.DataType | sqlglot.expressions.DataType.Type,\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\t**kwargs) -> sqlglot.expressions.DataType:", "funcdef": "def"}, "sqlglot.expressions.DataType.is_type": {"fullname": "sqlglot.expressions.DataType.is_type", "modulename": "sqlglot.expressions", "qualname": "DataType.is_type", "kind": "function", "doc": "

\n", "signature": "(self, dtype: sqlglot.expressions.DataType.Type) -> bool:", "funcdef": "def"}, "sqlglot.expressions.PseudoType": {"fullname": "sqlglot.expressions.PseudoType", "modulename": "sqlglot.expressions", "qualname": "PseudoType", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.StructKwarg": {"fullname": "sqlglot.expressions.StructKwarg", "modulename": "sqlglot.expressions", "qualname": "StructKwarg", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.SubqueryPredicate": {"fullname": "sqlglot.expressions.SubqueryPredicate", "modulename": "sqlglot.expressions", "qualname": "SubqueryPredicate", "kind": "class", "doc": "

\n", "bases": "Predicate"}, "sqlglot.expressions.All": {"fullname": "sqlglot.expressions.All", "modulename": "sqlglot.expressions", "qualname": "All", "kind": "class", "doc": "

\n", "bases": "SubqueryPredicate"}, "sqlglot.expressions.Any": {"fullname": "sqlglot.expressions.Any", "modulename": "sqlglot.expressions", "qualname": "Any", "kind": "class", "doc": "

\n", "bases": "SubqueryPredicate"}, "sqlglot.expressions.Exists": {"fullname": "sqlglot.expressions.Exists", "modulename": "sqlglot.expressions", "qualname": "Exists", "kind": "class", "doc": "

\n", "bases": "SubqueryPredicate"}, "sqlglot.expressions.Command": {"fullname": "sqlglot.expressions.Command", "modulename": "sqlglot.expressions", "qualname": "Command", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Transaction": {"fullname": "sqlglot.expressions.Transaction", "modulename": "sqlglot.expressions", "qualname": "Transaction", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Commit": {"fullname": "sqlglot.expressions.Commit", "modulename": "sqlglot.expressions", "qualname": "Commit", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Rollback": {"fullname": "sqlglot.expressions.Rollback", "modulename": "sqlglot.expressions", "qualname": "Rollback", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.AlterTable": {"fullname": "sqlglot.expressions.AlterTable", "modulename": "sqlglot.expressions", "qualname": "AlterTable", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.AddConstraint": {"fullname": "sqlglot.expressions.AddConstraint", "modulename": "sqlglot.expressions", "qualname": "AddConstraint", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.DropPartition": {"fullname": "sqlglot.expressions.DropPartition", "modulename": "sqlglot.expressions", "qualname": "DropPartition", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Binary": {"fullname": "sqlglot.expressions.Binary", "modulename": "sqlglot.expressions", "qualname": "Binary", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Add": {"fullname": "sqlglot.expressions.Add", "modulename": "sqlglot.expressions", "qualname": "Add", "kind": "class", "doc": "

\n", "bases": "Binary"}, "sqlglot.expressions.Connector": {"fullname": "sqlglot.expressions.Connector", "modulename": "sqlglot.expressions", "qualname": "Connector", "kind": "class", "doc": "

\n", "bases": "Binary, Condition"}, "sqlglot.expressions.And": {"fullname": "sqlglot.expressions.And", "modulename": "sqlglot.expressions", "qualname": "And", "kind": "class", "doc": "

\n", "bases": "Connector"}, "sqlglot.expressions.Or": {"fullname": "sqlglot.expressions.Or", "modulename": "sqlglot.expressions", "qualname": "Or", "kind": "class", "doc": "

\n", "bases": "Connector"}, "sqlglot.expressions.BitwiseAnd": {"fullname": "sqlglot.expressions.BitwiseAnd", "modulename": "sqlglot.expressions", "qualname": "BitwiseAnd", "kind": "class", "doc": "

\n", "bases": "Binary"}, "sqlglot.expressions.BitwiseLeftShift": {"fullname": "sqlglot.expressions.BitwiseLeftShift", "modulename": "sqlglot.expressions", "qualname": "BitwiseLeftShift", "kind": "class", "doc": "

\n", "bases": "Binary"}, "sqlglot.expressions.BitwiseOr": {"fullname": "sqlglot.expressions.BitwiseOr", "modulename": "sqlglot.expressions", "qualname": "BitwiseOr", "kind": "class", "doc": "

\n", "bases": "Binary"}, "sqlglot.expressions.BitwiseRightShift": {"fullname": "sqlglot.expressions.BitwiseRightShift", "modulename": "sqlglot.expressions", "qualname": "BitwiseRightShift", "kind": "class", "doc": "

\n", "bases": "Binary"}, "sqlglot.expressions.BitwiseXor": {"fullname": "sqlglot.expressions.BitwiseXor", "modulename": "sqlglot.expressions", "qualname": "BitwiseXor", "kind": "class", "doc": "

\n", "bases": "Binary"}, "sqlglot.expressions.Div": {"fullname": "sqlglot.expressions.Div", "modulename": "sqlglot.expressions", "qualname": "Div", "kind": "class", "doc": "

\n", "bases": "Binary"}, "sqlglot.expressions.Dot": {"fullname": "sqlglot.expressions.Dot", "modulename": "sqlglot.expressions", "qualname": "Dot", "kind": "class", "doc": "

\n", "bases": "Binary"}, "sqlglot.expressions.DPipe": {"fullname": "sqlglot.expressions.DPipe", "modulename": "sqlglot.expressions", "qualname": "DPipe", "kind": "class", "doc": "

\n", "bases": "Binary"}, "sqlglot.expressions.EQ": {"fullname": "sqlglot.expressions.EQ", "modulename": "sqlglot.expressions", "qualname": "EQ", "kind": "class", "doc": "

\n", "bases": "Binary, Predicate"}, "sqlglot.expressions.NullSafeEQ": {"fullname": "sqlglot.expressions.NullSafeEQ", "modulename": "sqlglot.expressions", "qualname": "NullSafeEQ", "kind": "class", "doc": "

\n", "bases": "Binary, Predicate"}, "sqlglot.expressions.NullSafeNEQ": {"fullname": "sqlglot.expressions.NullSafeNEQ", "modulename": "sqlglot.expressions", "qualname": "NullSafeNEQ", "kind": "class", "doc": "

\n", "bases": "Binary, Predicate"}, "sqlglot.expressions.Distance": {"fullname": "sqlglot.expressions.Distance", "modulename": "sqlglot.expressions", "qualname": "Distance", "kind": "class", "doc": "

\n", "bases": "Binary"}, "sqlglot.expressions.Escape": {"fullname": "sqlglot.expressions.Escape", "modulename": "sqlglot.expressions", "qualname": "Escape", "kind": "class", "doc": "

\n", "bases": "Binary"}, "sqlglot.expressions.Glob": {"fullname": "sqlglot.expressions.Glob", "modulename": "sqlglot.expressions", "qualname": "Glob", "kind": "class", "doc": "

\n", "bases": "Binary, Predicate"}, "sqlglot.expressions.GT": {"fullname": "sqlglot.expressions.GT", "modulename": "sqlglot.expressions", "qualname": "GT", "kind": "class", "doc": "

\n", "bases": "Binary, Predicate"}, "sqlglot.expressions.GTE": {"fullname": "sqlglot.expressions.GTE", "modulename": "sqlglot.expressions", "qualname": "GTE", "kind": "class", "doc": "

\n", "bases": "Binary, Predicate"}, "sqlglot.expressions.ILike": {"fullname": "sqlglot.expressions.ILike", "modulename": "sqlglot.expressions", "qualname": "ILike", "kind": "class", "doc": "

\n", "bases": "Binary, Predicate"}, "sqlglot.expressions.IntDiv": {"fullname": "sqlglot.expressions.IntDiv", "modulename": "sqlglot.expressions", "qualname": "IntDiv", "kind": "class", "doc": "

\n", "bases": "Binary"}, "sqlglot.expressions.Is": {"fullname": "sqlglot.expressions.Is", "modulename": "sqlglot.expressions", "qualname": "Is", "kind": "class", "doc": "

\n", "bases": "Binary, Predicate"}, "sqlglot.expressions.Kwarg": {"fullname": "sqlglot.expressions.Kwarg", "modulename": "sqlglot.expressions", "qualname": "Kwarg", "kind": "class", "doc": "

Kwarg in special functions like func(kwarg => y).

\n", "bases": "Binary"}, "sqlglot.expressions.Like": {"fullname": "sqlglot.expressions.Like", "modulename": "sqlglot.expressions", "qualname": "Like", "kind": "class", "doc": "

\n", "bases": "Binary, Predicate"}, "sqlglot.expressions.LT": {"fullname": "sqlglot.expressions.LT", "modulename": "sqlglot.expressions", "qualname": "LT", "kind": "class", "doc": "

\n", "bases": "Binary, Predicate"}, "sqlglot.expressions.LTE": {"fullname": "sqlglot.expressions.LTE", "modulename": "sqlglot.expressions", "qualname": "LTE", "kind": "class", "doc": "

\n", "bases": "Binary, Predicate"}, "sqlglot.expressions.Mod": {"fullname": "sqlglot.expressions.Mod", "modulename": "sqlglot.expressions", "qualname": "Mod", "kind": "class", "doc": "

\n", "bases": "Binary"}, "sqlglot.expressions.Mul": {"fullname": "sqlglot.expressions.Mul", "modulename": "sqlglot.expressions", "qualname": "Mul", "kind": "class", "doc": "

\n", "bases": "Binary"}, "sqlglot.expressions.NEQ": {"fullname": "sqlglot.expressions.NEQ", "modulename": "sqlglot.expressions", "qualname": "NEQ", "kind": "class", "doc": "

\n", "bases": "Binary, Predicate"}, "sqlglot.expressions.SimilarTo": {"fullname": "sqlglot.expressions.SimilarTo", "modulename": "sqlglot.expressions", "qualname": "SimilarTo", "kind": "class", "doc": "

\n", "bases": "Binary, Predicate"}, "sqlglot.expressions.Slice": {"fullname": "sqlglot.expressions.Slice", "modulename": "sqlglot.expressions", "qualname": "Slice", "kind": "class", "doc": "

\n", "bases": "Binary"}, "sqlglot.expressions.Sub": {"fullname": "sqlglot.expressions.Sub", "modulename": "sqlglot.expressions", "qualname": "Sub", "kind": "class", "doc": "

\n", "bases": "Binary"}, "sqlglot.expressions.Unary": {"fullname": "sqlglot.expressions.Unary", "modulename": "sqlglot.expressions", "qualname": "Unary", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.BitwiseNot": {"fullname": "sqlglot.expressions.BitwiseNot", "modulename": "sqlglot.expressions", "qualname": "BitwiseNot", "kind": "class", "doc": "

\n", "bases": "Unary"}, "sqlglot.expressions.Not": {"fullname": "sqlglot.expressions.Not", "modulename": "sqlglot.expressions", "qualname": "Not", "kind": "class", "doc": "

\n", "bases": "Unary, Condition"}, "sqlglot.expressions.Paren": {"fullname": "sqlglot.expressions.Paren", "modulename": "sqlglot.expressions", "qualname": "Paren", "kind": "class", "doc": "

\n", "bases": "Unary, Condition"}, "sqlglot.expressions.Neg": {"fullname": "sqlglot.expressions.Neg", "modulename": "sqlglot.expressions", "qualname": "Neg", "kind": "class", "doc": "

\n", "bases": "Unary"}, "sqlglot.expressions.Alias": {"fullname": "sqlglot.expressions.Alias", "modulename": "sqlglot.expressions", "qualname": "Alias", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Alias.output_name": {"fullname": "sqlglot.expressions.Alias.output_name", "modulename": "sqlglot.expressions", "qualname": "Alias.output_name", "kind": "variable", "doc": "

Name of the output column if this expression is a selection.

\n\n

If the Expression has no output name, an empty string is returned.

\n\n
Example:
\n\n
\n
\n
>>> from sqlglot import parse_one\n>>> parse_one("SELECT a").expressions[0].output_name\n'a'\n>>> parse_one("SELECT b AS c").expressions[0].output_name\n'c'\n>>> parse_one("SELECT 1 + 2").expressions[0].output_name\n''\n
\n
\n
\n"}, "sqlglot.expressions.Aliases": {"fullname": "sqlglot.expressions.Aliases", "modulename": "sqlglot.expressions", "qualname": "Aliases", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.AtTimeZone": {"fullname": "sqlglot.expressions.AtTimeZone", "modulename": "sqlglot.expressions", "qualname": "AtTimeZone", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Between": {"fullname": "sqlglot.expressions.Between", "modulename": "sqlglot.expressions", "qualname": "Between", "kind": "class", "doc": "

\n", "bases": "Predicate"}, "sqlglot.expressions.Bracket": {"fullname": "sqlglot.expressions.Bracket", "modulename": "sqlglot.expressions", "qualname": "Bracket", "kind": "class", "doc": "

\n", "bases": "Condition"}, "sqlglot.expressions.Distinct": {"fullname": "sqlglot.expressions.Distinct", "modulename": "sqlglot.expressions", "qualname": "Distinct", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.In": {"fullname": "sqlglot.expressions.In", "modulename": "sqlglot.expressions", "qualname": "In", "kind": "class", "doc": "

\n", "bases": "Predicate"}, "sqlglot.expressions.TimeUnit": {"fullname": "sqlglot.expressions.TimeUnit", "modulename": "sqlglot.expressions", "qualname": "TimeUnit", "kind": "class", "doc": "

Automatically converts unit arg into a var.

\n", "bases": "Expression"}, "sqlglot.expressions.TimeUnit.__init__": {"fullname": "sqlglot.expressions.TimeUnit.__init__", "modulename": "sqlglot.expressions", "qualname": "TimeUnit.__init__", "kind": "function", "doc": "

\n", "signature": "(**args)"}, "sqlglot.expressions.Interval": {"fullname": "sqlglot.expressions.Interval", "modulename": "sqlglot.expressions", "qualname": "Interval", "kind": "class", "doc": "

\n", "bases": "TimeUnit"}, "sqlglot.expressions.IgnoreNulls": {"fullname": "sqlglot.expressions.IgnoreNulls", "modulename": "sqlglot.expressions", "qualname": "IgnoreNulls", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.RespectNulls": {"fullname": "sqlglot.expressions.RespectNulls", "modulename": "sqlglot.expressions", "qualname": "RespectNulls", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Func": {"fullname": "sqlglot.expressions.Func", "modulename": "sqlglot.expressions", "qualname": "Func", "kind": "class", "doc": "

The base class for all function expressions.

\n\n
Attributes:
\n\n
    \n
  • is_var_len_args (bool): if set to True the last argument defined in arg_types will be\ntreated as a variable length argument and the argument's value will be stored as a list.
  • \n
  • _sql_names (list): determines the SQL name (1st item in the list) and aliases (subsequent items)\nfor this function expression. These values are used to map this node to a name during parsing\nas well as to provide the function's name during SQL string generation. By default the SQL\nname is set to the expression's class name transformed to snake case.
  • \n
\n", "bases": "Condition"}, "sqlglot.expressions.Func.from_arg_list": {"fullname": "sqlglot.expressions.Func.from_arg_list", "modulename": "sqlglot.expressions", "qualname": "Func.from_arg_list", "kind": "function", "doc": "

\n", "signature": "(cls, args):", "funcdef": "def"}, "sqlglot.expressions.Func.sql_names": {"fullname": "sqlglot.expressions.Func.sql_names", "modulename": "sqlglot.expressions", "qualname": "Func.sql_names", "kind": "function", "doc": "

\n", "signature": "(cls):", "funcdef": "def"}, "sqlglot.expressions.Func.sql_name": {"fullname": "sqlglot.expressions.Func.sql_name", "modulename": "sqlglot.expressions", "qualname": "Func.sql_name", "kind": "function", "doc": "

\n", "signature": "(cls):", "funcdef": "def"}, "sqlglot.expressions.Func.default_parser_mappings": {"fullname": "sqlglot.expressions.Func.default_parser_mappings", "modulename": "sqlglot.expressions", "qualname": "Func.default_parser_mappings", "kind": "function", "doc": "

\n", "signature": "(cls):", "funcdef": "def"}, "sqlglot.expressions.AggFunc": {"fullname": "sqlglot.expressions.AggFunc", "modulename": "sqlglot.expressions", "qualname": "AggFunc", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Abs": {"fullname": "sqlglot.expressions.Abs", "modulename": "sqlglot.expressions", "qualname": "Abs", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Anonymous": {"fullname": "sqlglot.expressions.Anonymous", "modulename": "sqlglot.expressions", "qualname": "Anonymous", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.ApproxDistinct": {"fullname": "sqlglot.expressions.ApproxDistinct", "modulename": "sqlglot.expressions", "qualname": "ApproxDistinct", "kind": "class", "doc": "

\n", "bases": "AggFunc"}, "sqlglot.expressions.Array": {"fullname": "sqlglot.expressions.Array", "modulename": "sqlglot.expressions", "qualname": "Array", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.GenerateSeries": {"fullname": "sqlglot.expressions.GenerateSeries", "modulename": "sqlglot.expressions", "qualname": "GenerateSeries", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.ArrayAgg": {"fullname": "sqlglot.expressions.ArrayAgg", "modulename": "sqlglot.expressions", "qualname": "ArrayAgg", "kind": "class", "doc": "

\n", "bases": "AggFunc"}, "sqlglot.expressions.ArrayAll": {"fullname": "sqlglot.expressions.ArrayAll", "modulename": "sqlglot.expressions", "qualname": "ArrayAll", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.ArrayAny": {"fullname": "sqlglot.expressions.ArrayAny", "modulename": "sqlglot.expressions", "qualname": "ArrayAny", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.ArrayConcat": {"fullname": "sqlglot.expressions.ArrayConcat", "modulename": "sqlglot.expressions", "qualname": "ArrayConcat", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.ArrayContains": {"fullname": "sqlglot.expressions.ArrayContains", "modulename": "sqlglot.expressions", "qualname": "ArrayContains", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.ArrayFilter": {"fullname": "sqlglot.expressions.ArrayFilter", "modulename": "sqlglot.expressions", "qualname": "ArrayFilter", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.ArraySize": {"fullname": "sqlglot.expressions.ArraySize", "modulename": "sqlglot.expressions", "qualname": "ArraySize", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.ArraySort": {"fullname": "sqlglot.expressions.ArraySort", "modulename": "sqlglot.expressions", "qualname": "ArraySort", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.ArraySum": {"fullname": "sqlglot.expressions.ArraySum", "modulename": "sqlglot.expressions", "qualname": "ArraySum", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.ArrayUnionAgg": {"fullname": "sqlglot.expressions.ArrayUnionAgg", "modulename": "sqlglot.expressions", "qualname": "ArrayUnionAgg", "kind": "class", "doc": "

\n", "bases": "AggFunc"}, "sqlglot.expressions.Avg": {"fullname": "sqlglot.expressions.Avg", "modulename": "sqlglot.expressions", "qualname": "Avg", "kind": "class", "doc": "

\n", "bases": "AggFunc"}, "sqlglot.expressions.AnyValue": {"fullname": "sqlglot.expressions.AnyValue", "modulename": "sqlglot.expressions", "qualname": "AnyValue", "kind": "class", "doc": "

\n", "bases": "AggFunc"}, "sqlglot.expressions.Case": {"fullname": "sqlglot.expressions.Case", "modulename": "sqlglot.expressions", "qualname": "Case", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Cast": {"fullname": "sqlglot.expressions.Cast", "modulename": "sqlglot.expressions", "qualname": "Cast", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Cast.output_name": {"fullname": "sqlglot.expressions.Cast.output_name", "modulename": "sqlglot.expressions", "qualname": "Cast.output_name", "kind": "variable", "doc": "

Name of the output column if this expression is a selection.

\n\n

If the Expression has no output name, an empty string is returned.

\n\n
Example:
\n\n
\n
\n
>>> from sqlglot import parse_one\n>>> parse_one("SELECT a").expressions[0].output_name\n'a'\n>>> parse_one("SELECT b AS c").expressions[0].output_name\n'c'\n>>> parse_one("SELECT 1 + 2").expressions[0].output_name\n''\n
\n
\n
\n"}, "sqlglot.expressions.Cast.is_type": {"fullname": "sqlglot.expressions.Cast.is_type", "modulename": "sqlglot.expressions", "qualname": "Cast.is_type", "kind": "function", "doc": "

\n", "signature": "(self, dtype: sqlglot.expressions.DataType.Type) -> bool:", "funcdef": "def"}, "sqlglot.expressions.Collate": {"fullname": "sqlglot.expressions.Collate", "modulename": "sqlglot.expressions", "qualname": "Collate", "kind": "class", "doc": "

\n", "bases": "Binary"}, "sqlglot.expressions.TryCast": {"fullname": "sqlglot.expressions.TryCast", "modulename": "sqlglot.expressions", "qualname": "TryCast", "kind": "class", "doc": "

\n", "bases": "Cast"}, "sqlglot.expressions.Ceil": {"fullname": "sqlglot.expressions.Ceil", "modulename": "sqlglot.expressions", "qualname": "Ceil", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Coalesce": {"fullname": "sqlglot.expressions.Coalesce", "modulename": "sqlglot.expressions", "qualname": "Coalesce", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Concat": {"fullname": "sqlglot.expressions.Concat", "modulename": "sqlglot.expressions", "qualname": "Concat", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.ConcatWs": {"fullname": "sqlglot.expressions.ConcatWs", "modulename": "sqlglot.expressions", "qualname": "ConcatWs", "kind": "class", "doc": "

\n", "bases": "Concat"}, "sqlglot.expressions.Count": {"fullname": "sqlglot.expressions.Count", "modulename": "sqlglot.expressions", "qualname": "Count", "kind": "class", "doc": "

\n", "bases": "AggFunc"}, "sqlglot.expressions.CurrentDate": {"fullname": "sqlglot.expressions.CurrentDate", "modulename": "sqlglot.expressions", "qualname": "CurrentDate", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.CurrentDatetime": {"fullname": "sqlglot.expressions.CurrentDatetime", "modulename": "sqlglot.expressions", "qualname": "CurrentDatetime", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.CurrentTime": {"fullname": "sqlglot.expressions.CurrentTime", "modulename": "sqlglot.expressions", "qualname": "CurrentTime", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.CurrentTimestamp": {"fullname": "sqlglot.expressions.CurrentTimestamp", "modulename": "sqlglot.expressions", "qualname": "CurrentTimestamp", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.DateAdd": {"fullname": "sqlglot.expressions.DateAdd", "modulename": "sqlglot.expressions", "qualname": "DateAdd", "kind": "class", "doc": "

\n", "bases": "Func, TimeUnit"}, "sqlglot.expressions.DateSub": {"fullname": "sqlglot.expressions.DateSub", "modulename": "sqlglot.expressions", "qualname": "DateSub", "kind": "class", "doc": "

\n", "bases": "Func, TimeUnit"}, "sqlglot.expressions.DateDiff": {"fullname": "sqlglot.expressions.DateDiff", "modulename": "sqlglot.expressions", "qualname": "DateDiff", "kind": "class", "doc": "

\n", "bases": "Func, TimeUnit"}, "sqlglot.expressions.DateTrunc": {"fullname": "sqlglot.expressions.DateTrunc", "modulename": "sqlglot.expressions", "qualname": "DateTrunc", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.DatetimeAdd": {"fullname": "sqlglot.expressions.DatetimeAdd", "modulename": "sqlglot.expressions", "qualname": "DatetimeAdd", "kind": "class", "doc": "

\n", "bases": "Func, TimeUnit"}, "sqlglot.expressions.DatetimeSub": {"fullname": "sqlglot.expressions.DatetimeSub", "modulename": "sqlglot.expressions", "qualname": "DatetimeSub", "kind": "class", "doc": "

\n", "bases": "Func, TimeUnit"}, "sqlglot.expressions.DatetimeDiff": {"fullname": "sqlglot.expressions.DatetimeDiff", "modulename": "sqlglot.expressions", "qualname": "DatetimeDiff", "kind": "class", "doc": "

\n", "bases": "Func, TimeUnit"}, "sqlglot.expressions.DatetimeTrunc": {"fullname": "sqlglot.expressions.DatetimeTrunc", "modulename": "sqlglot.expressions", "qualname": "DatetimeTrunc", "kind": "class", "doc": "

\n", "bases": "Func, TimeUnit"}, "sqlglot.expressions.DayOfWeek": {"fullname": "sqlglot.expressions.DayOfWeek", "modulename": "sqlglot.expressions", "qualname": "DayOfWeek", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.DayOfMonth": {"fullname": "sqlglot.expressions.DayOfMonth", "modulename": "sqlglot.expressions", "qualname": "DayOfMonth", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.DayOfYear": {"fullname": "sqlglot.expressions.DayOfYear", "modulename": "sqlglot.expressions", "qualname": "DayOfYear", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.WeekOfYear": {"fullname": "sqlglot.expressions.WeekOfYear", "modulename": "sqlglot.expressions", "qualname": "WeekOfYear", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.LastDateOfMonth": {"fullname": "sqlglot.expressions.LastDateOfMonth", "modulename": "sqlglot.expressions", "qualname": "LastDateOfMonth", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Extract": {"fullname": "sqlglot.expressions.Extract", "modulename": "sqlglot.expressions", "qualname": "Extract", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.TimestampAdd": {"fullname": "sqlglot.expressions.TimestampAdd", "modulename": "sqlglot.expressions", "qualname": "TimestampAdd", "kind": "class", "doc": "

\n", "bases": "Func, TimeUnit"}, "sqlglot.expressions.TimestampSub": {"fullname": "sqlglot.expressions.TimestampSub", "modulename": "sqlglot.expressions", "qualname": "TimestampSub", "kind": "class", "doc": "

\n", "bases": "Func, TimeUnit"}, "sqlglot.expressions.TimestampDiff": {"fullname": "sqlglot.expressions.TimestampDiff", "modulename": "sqlglot.expressions", "qualname": "TimestampDiff", "kind": "class", "doc": "

\n", "bases": "Func, TimeUnit"}, "sqlglot.expressions.TimestampTrunc": {"fullname": "sqlglot.expressions.TimestampTrunc", "modulename": "sqlglot.expressions", "qualname": "TimestampTrunc", "kind": "class", "doc": "

\n", "bases": "Func, TimeUnit"}, "sqlglot.expressions.TimeAdd": {"fullname": "sqlglot.expressions.TimeAdd", "modulename": "sqlglot.expressions", "qualname": "TimeAdd", "kind": "class", "doc": "

\n", "bases": "Func, TimeUnit"}, "sqlglot.expressions.TimeSub": {"fullname": "sqlglot.expressions.TimeSub", "modulename": "sqlglot.expressions", "qualname": "TimeSub", "kind": "class", "doc": "

\n", "bases": "Func, TimeUnit"}, "sqlglot.expressions.TimeDiff": {"fullname": "sqlglot.expressions.TimeDiff", "modulename": "sqlglot.expressions", "qualname": "TimeDiff", "kind": "class", "doc": "

\n", "bases": "Func, TimeUnit"}, "sqlglot.expressions.TimeTrunc": {"fullname": "sqlglot.expressions.TimeTrunc", "modulename": "sqlglot.expressions", "qualname": "TimeTrunc", "kind": "class", "doc": "

\n", "bases": "Func, TimeUnit"}, "sqlglot.expressions.DateFromParts": {"fullname": "sqlglot.expressions.DateFromParts", "modulename": "sqlglot.expressions", "qualname": "DateFromParts", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.DateStrToDate": {"fullname": "sqlglot.expressions.DateStrToDate", "modulename": "sqlglot.expressions", "qualname": "DateStrToDate", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.DateToDateStr": {"fullname": "sqlglot.expressions.DateToDateStr", "modulename": "sqlglot.expressions", "qualname": "DateToDateStr", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.DateToDi": {"fullname": "sqlglot.expressions.DateToDi", "modulename": "sqlglot.expressions", "qualname": "DateToDi", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Day": {"fullname": "sqlglot.expressions.Day", "modulename": "sqlglot.expressions", "qualname": "Day", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Decode": {"fullname": "sqlglot.expressions.Decode", "modulename": "sqlglot.expressions", "qualname": "Decode", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.DiToDate": {"fullname": "sqlglot.expressions.DiToDate", "modulename": "sqlglot.expressions", "qualname": "DiToDate", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Encode": {"fullname": "sqlglot.expressions.Encode", "modulename": "sqlglot.expressions", "qualname": "Encode", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Exp": {"fullname": "sqlglot.expressions.Exp", "modulename": "sqlglot.expressions", "qualname": "Exp", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Explode": {"fullname": "sqlglot.expressions.Explode", "modulename": "sqlglot.expressions", "qualname": "Explode", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Floor": {"fullname": "sqlglot.expressions.Floor", "modulename": "sqlglot.expressions", "qualname": "Floor", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Greatest": {"fullname": "sqlglot.expressions.Greatest", "modulename": "sqlglot.expressions", "qualname": "Greatest", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.GroupConcat": {"fullname": "sqlglot.expressions.GroupConcat", "modulename": "sqlglot.expressions", "qualname": "GroupConcat", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Hex": {"fullname": "sqlglot.expressions.Hex", "modulename": "sqlglot.expressions", "qualname": "Hex", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.If": {"fullname": "sqlglot.expressions.If", "modulename": "sqlglot.expressions", "qualname": "If", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.IfNull": {"fullname": "sqlglot.expressions.IfNull", "modulename": "sqlglot.expressions", "qualname": "IfNull", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Initcap": {"fullname": "sqlglot.expressions.Initcap", "modulename": "sqlglot.expressions", "qualname": "Initcap", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.JSONBContains": {"fullname": "sqlglot.expressions.JSONBContains", "modulename": "sqlglot.expressions", "qualname": "JSONBContains", "kind": "class", "doc": "

\n", "bases": "Binary"}, "sqlglot.expressions.JSONExtract": {"fullname": "sqlglot.expressions.JSONExtract", "modulename": "sqlglot.expressions", "qualname": "JSONExtract", "kind": "class", "doc": "

\n", "bases": "Binary, Func"}, "sqlglot.expressions.JSONExtractScalar": {"fullname": "sqlglot.expressions.JSONExtractScalar", "modulename": "sqlglot.expressions", "qualname": "JSONExtractScalar", "kind": "class", "doc": "

\n", "bases": "JSONExtract"}, "sqlglot.expressions.JSONBExtract": {"fullname": "sqlglot.expressions.JSONBExtract", "modulename": "sqlglot.expressions", "qualname": "JSONBExtract", "kind": "class", "doc": "

\n", "bases": "JSONExtract"}, "sqlglot.expressions.JSONBExtractScalar": {"fullname": "sqlglot.expressions.JSONBExtractScalar", "modulename": "sqlglot.expressions", "qualname": "JSONBExtractScalar", "kind": "class", "doc": "

\n", "bases": "JSONExtract"}, "sqlglot.expressions.Least": {"fullname": "sqlglot.expressions.Least", "modulename": "sqlglot.expressions", "qualname": "Least", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Length": {"fullname": "sqlglot.expressions.Length", "modulename": "sqlglot.expressions", "qualname": "Length", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Levenshtein": {"fullname": "sqlglot.expressions.Levenshtein", "modulename": "sqlglot.expressions", "qualname": "Levenshtein", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Ln": {"fullname": "sqlglot.expressions.Ln", "modulename": "sqlglot.expressions", "qualname": "Ln", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Log": {"fullname": "sqlglot.expressions.Log", "modulename": "sqlglot.expressions", "qualname": "Log", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Log2": {"fullname": "sqlglot.expressions.Log2", "modulename": "sqlglot.expressions", "qualname": "Log2", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Log10": {"fullname": "sqlglot.expressions.Log10", "modulename": "sqlglot.expressions", "qualname": "Log10", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.LogicalOr": {"fullname": "sqlglot.expressions.LogicalOr", "modulename": "sqlglot.expressions", "qualname": "LogicalOr", "kind": "class", "doc": "

\n", "bases": "AggFunc"}, "sqlglot.expressions.Lower": {"fullname": "sqlglot.expressions.Lower", "modulename": "sqlglot.expressions", "qualname": "Lower", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Map": {"fullname": "sqlglot.expressions.Map", "modulename": "sqlglot.expressions", "qualname": "Map", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.VarMap": {"fullname": "sqlglot.expressions.VarMap", "modulename": "sqlglot.expressions", "qualname": "VarMap", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Matches": {"fullname": "sqlglot.expressions.Matches", "modulename": "sqlglot.expressions", "qualname": "Matches", "kind": "class", "doc": "

Oracle/Snowflake decode.\nhttps://docs.oracle.com/cd/B19306_01/server.102/b14200/functions040.htm\nPattern matching MATCHES(value, search1, result1, ...searchN, resultN, else)

\n", "bases": "Func"}, "sqlglot.expressions.Max": {"fullname": "sqlglot.expressions.Max", "modulename": "sqlglot.expressions", "qualname": "Max", "kind": "class", "doc": "

\n", "bases": "AggFunc"}, "sqlglot.expressions.Min": {"fullname": "sqlglot.expressions.Min", "modulename": "sqlglot.expressions", "qualname": "Min", "kind": "class", "doc": "

\n", "bases": "AggFunc"}, "sqlglot.expressions.Month": {"fullname": "sqlglot.expressions.Month", "modulename": "sqlglot.expressions", "qualname": "Month", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Nvl2": {"fullname": "sqlglot.expressions.Nvl2", "modulename": "sqlglot.expressions", "qualname": "Nvl2", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Posexplode": {"fullname": "sqlglot.expressions.Posexplode", "modulename": "sqlglot.expressions", "qualname": "Posexplode", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Pow": {"fullname": "sqlglot.expressions.Pow", "modulename": "sqlglot.expressions", "qualname": "Pow", "kind": "class", "doc": "

\n", "bases": "Binary, Func"}, "sqlglot.expressions.PercentileCont": {"fullname": "sqlglot.expressions.PercentileCont", "modulename": "sqlglot.expressions", "qualname": "PercentileCont", "kind": "class", "doc": "

\n", "bases": "AggFunc"}, "sqlglot.expressions.PercentileDisc": {"fullname": "sqlglot.expressions.PercentileDisc", "modulename": "sqlglot.expressions", "qualname": "PercentileDisc", "kind": "class", "doc": "

\n", "bases": "AggFunc"}, "sqlglot.expressions.Quantile": {"fullname": "sqlglot.expressions.Quantile", "modulename": "sqlglot.expressions", "qualname": "Quantile", "kind": "class", "doc": "

\n", "bases": "AggFunc"}, "sqlglot.expressions.Quantiles": {"fullname": "sqlglot.expressions.Quantiles", "modulename": "sqlglot.expressions", "qualname": "Quantiles", "kind": "class", "doc": "

\n", "bases": "AggFunc"}, "sqlglot.expressions.QuantileIf": {"fullname": "sqlglot.expressions.QuantileIf", "modulename": "sqlglot.expressions", "qualname": "QuantileIf", "kind": "class", "doc": "

\n", "bases": "AggFunc"}, "sqlglot.expressions.ApproxQuantile": {"fullname": "sqlglot.expressions.ApproxQuantile", "modulename": "sqlglot.expressions", "qualname": "ApproxQuantile", "kind": "class", "doc": "

\n", "bases": "Quantile"}, "sqlglot.expressions.ReadCSV": {"fullname": "sqlglot.expressions.ReadCSV", "modulename": "sqlglot.expressions", "qualname": "ReadCSV", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Reduce": {"fullname": "sqlglot.expressions.Reduce", "modulename": "sqlglot.expressions", "qualname": "Reduce", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.RegexpLike": {"fullname": "sqlglot.expressions.RegexpLike", "modulename": "sqlglot.expressions", "qualname": "RegexpLike", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.RegexpILike": {"fullname": "sqlglot.expressions.RegexpILike", "modulename": "sqlglot.expressions", "qualname": "RegexpILike", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.RegexpSplit": {"fullname": "sqlglot.expressions.RegexpSplit", "modulename": "sqlglot.expressions", "qualname": "RegexpSplit", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Repeat": {"fullname": "sqlglot.expressions.Repeat", "modulename": "sqlglot.expressions", "qualname": "Repeat", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Round": {"fullname": "sqlglot.expressions.Round", "modulename": "sqlglot.expressions", "qualname": "Round", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.RowNumber": {"fullname": "sqlglot.expressions.RowNumber", "modulename": "sqlglot.expressions", "qualname": "RowNumber", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.SafeDivide": {"fullname": "sqlglot.expressions.SafeDivide", "modulename": "sqlglot.expressions", "qualname": "SafeDivide", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.SetAgg": {"fullname": "sqlglot.expressions.SetAgg", "modulename": "sqlglot.expressions", "qualname": "SetAgg", "kind": "class", "doc": "

\n", "bases": "AggFunc"}, "sqlglot.expressions.SortArray": {"fullname": "sqlglot.expressions.SortArray", "modulename": "sqlglot.expressions", "qualname": "SortArray", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Split": {"fullname": "sqlglot.expressions.Split", "modulename": "sqlglot.expressions", "qualname": "Split", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Substring": {"fullname": "sqlglot.expressions.Substring", "modulename": "sqlglot.expressions", "qualname": "Substring", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.StrPosition": {"fullname": "sqlglot.expressions.StrPosition", "modulename": "sqlglot.expressions", "qualname": "StrPosition", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.StrToDate": {"fullname": "sqlglot.expressions.StrToDate", "modulename": "sqlglot.expressions", "qualname": "StrToDate", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.StrToTime": {"fullname": "sqlglot.expressions.StrToTime", "modulename": "sqlglot.expressions", "qualname": "StrToTime", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.StrToUnix": {"fullname": "sqlglot.expressions.StrToUnix", "modulename": "sqlglot.expressions", "qualname": "StrToUnix", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.NumberToStr": {"fullname": "sqlglot.expressions.NumberToStr", "modulename": "sqlglot.expressions", "qualname": "NumberToStr", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Struct": {"fullname": "sqlglot.expressions.Struct", "modulename": "sqlglot.expressions", "qualname": "Struct", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.StructExtract": {"fullname": "sqlglot.expressions.StructExtract", "modulename": "sqlglot.expressions", "qualname": "StructExtract", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Sum": {"fullname": "sqlglot.expressions.Sum", "modulename": "sqlglot.expressions", "qualname": "Sum", "kind": "class", "doc": "

\n", "bases": "AggFunc"}, "sqlglot.expressions.Sqrt": {"fullname": "sqlglot.expressions.Sqrt", "modulename": "sqlglot.expressions", "qualname": "Sqrt", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Stddev": {"fullname": "sqlglot.expressions.Stddev", "modulename": "sqlglot.expressions", "qualname": "Stddev", "kind": "class", "doc": "

\n", "bases": "AggFunc"}, "sqlglot.expressions.StddevPop": {"fullname": "sqlglot.expressions.StddevPop", "modulename": "sqlglot.expressions", "qualname": "StddevPop", "kind": "class", "doc": "

\n", "bases": "AggFunc"}, "sqlglot.expressions.StddevSamp": {"fullname": "sqlglot.expressions.StddevSamp", "modulename": "sqlglot.expressions", "qualname": "StddevSamp", "kind": "class", "doc": "

\n", "bases": "AggFunc"}, "sqlglot.expressions.TimeToStr": {"fullname": "sqlglot.expressions.TimeToStr", "modulename": "sqlglot.expressions", "qualname": "TimeToStr", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.TimeToTimeStr": {"fullname": "sqlglot.expressions.TimeToTimeStr", "modulename": "sqlglot.expressions", "qualname": "TimeToTimeStr", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.TimeToUnix": {"fullname": "sqlglot.expressions.TimeToUnix", "modulename": "sqlglot.expressions", "qualname": "TimeToUnix", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.TimeStrToDate": {"fullname": "sqlglot.expressions.TimeStrToDate", "modulename": "sqlglot.expressions", "qualname": "TimeStrToDate", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.TimeStrToTime": {"fullname": "sqlglot.expressions.TimeStrToTime", "modulename": "sqlglot.expressions", "qualname": "TimeStrToTime", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.TimeStrToUnix": {"fullname": "sqlglot.expressions.TimeStrToUnix", "modulename": "sqlglot.expressions", "qualname": "TimeStrToUnix", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Trim": {"fullname": "sqlglot.expressions.Trim", "modulename": "sqlglot.expressions", "qualname": "Trim", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.TsOrDsAdd": {"fullname": "sqlglot.expressions.TsOrDsAdd", "modulename": "sqlglot.expressions", "qualname": "TsOrDsAdd", "kind": "class", "doc": "

\n", "bases": "Func, TimeUnit"}, "sqlglot.expressions.TsOrDsToDateStr": {"fullname": "sqlglot.expressions.TsOrDsToDateStr", "modulename": "sqlglot.expressions", "qualname": "TsOrDsToDateStr", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.TsOrDsToDate": {"fullname": "sqlglot.expressions.TsOrDsToDate", "modulename": "sqlglot.expressions", "qualname": "TsOrDsToDate", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.TsOrDiToDi": {"fullname": "sqlglot.expressions.TsOrDiToDi", "modulename": "sqlglot.expressions", "qualname": "TsOrDiToDi", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Unhex": {"fullname": "sqlglot.expressions.Unhex", "modulename": "sqlglot.expressions", "qualname": "Unhex", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.UnixToStr": {"fullname": "sqlglot.expressions.UnixToStr", "modulename": "sqlglot.expressions", "qualname": "UnixToStr", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.UnixToTime": {"fullname": "sqlglot.expressions.UnixToTime", "modulename": "sqlglot.expressions", "qualname": "UnixToTime", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.UnixToTimeStr": {"fullname": "sqlglot.expressions.UnixToTimeStr", "modulename": "sqlglot.expressions", "qualname": "UnixToTimeStr", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Upper": {"fullname": "sqlglot.expressions.Upper", "modulename": "sqlglot.expressions", "qualname": "Upper", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Variance": {"fullname": "sqlglot.expressions.Variance", "modulename": "sqlglot.expressions", "qualname": "Variance", "kind": "class", "doc": "

\n", "bases": "AggFunc"}, "sqlglot.expressions.VariancePop": {"fullname": "sqlglot.expressions.VariancePop", "modulename": "sqlglot.expressions", "qualname": "VariancePop", "kind": "class", "doc": "

\n", "bases": "AggFunc"}, "sqlglot.expressions.Week": {"fullname": "sqlglot.expressions.Week", "modulename": "sqlglot.expressions", "qualname": "Week", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Year": {"fullname": "sqlglot.expressions.Year", "modulename": "sqlglot.expressions", "qualname": "Year", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.Use": {"fullname": "sqlglot.expressions.Use", "modulename": "sqlglot.expressions", "qualname": "Use", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.Merge": {"fullname": "sqlglot.expressions.Merge", "modulename": "sqlglot.expressions", "qualname": "Merge", "kind": "class", "doc": "

\n", "bases": "Expression"}, "sqlglot.expressions.When": {"fullname": "sqlglot.expressions.When", "modulename": "sqlglot.expressions", "qualname": "When", "kind": "class", "doc": "

\n", "bases": "Func"}, "sqlglot.expressions.maybe_parse": {"fullname": "sqlglot.expressions.maybe_parse", "modulename": "sqlglot.expressions", "qualname": "maybe_parse", "kind": "function", "doc": "

Gracefully handle a possible string or expression.

\n\n
Example:
\n\n
\n
\n
>>> maybe_parse("1")\n(LITERAL this: 1, is_string: False)\n>>> maybe_parse(to_identifier("x"))\n(IDENTIFIER this: x, quoted: False)\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • sql_or_expression: the SQL code string or an expression
  • \n
  • into: the SQLGlot Expression to parse into
  • \n
  • dialect: the dialect used to parse the input expressions (in the case that an\ninput expression is a SQL string).
  • \n
  • prefix: a string to prefix the sql with before it gets parsed\n(automatically includes a space)
  • \n
  • **opts: other options to use to parse the input expressions (again, in the case\nthat an input expression is a SQL string).
  • \n
\n\n
Returns:
\n\n
\n

Expression: the parsed or given expression.

\n
\n", "signature": "(\tsql_or_expression: str | sqlglot.expressions.Expression,\t*,\tinto: Union[str, Type[sqlglot.expressions.Expression], Collection[Union[str, Type[sqlglot.expressions.Expression]]], NoneType] = None,\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\tprefix: Optional[str] = None,\t**opts) -> sqlglot.expressions.Expression:", "funcdef": "def"}, "sqlglot.expressions.union": {"fullname": "sqlglot.expressions.union", "modulename": "sqlglot.expressions", "qualname": "union", "kind": "function", "doc": "

Initializes a syntax tree from one UNION expression.

\n\n
Example:
\n\n
\n
\n
>>> union("SELECT * FROM foo", "SELECT * FROM bla").sql()\n'SELECT * FROM foo UNION SELECT * FROM bla'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • left (str | Expression): the SQL code string corresponding to the left-hand side.\nIf an Expression instance is passed, it will be used as-is.
  • \n
  • right (str | Expression): the SQL code string corresponding to the right-hand side.\nIf an Expression instance is passed, it will be used as-is.
  • \n
  • distinct (bool): set the DISTINCT flag if and only if this is true.
  • \n
  • dialect (str): the dialect used to parse the input expression.
  • \n
  • opts (kwargs): other options to use to parse the input expressions.
  • \n
\n\n
Returns:
\n\n
\n

Union: the syntax tree for the UNION expression.

\n
\n", "signature": "(left, right, distinct=True, dialect=None, **opts):", "funcdef": "def"}, "sqlglot.expressions.intersect": {"fullname": "sqlglot.expressions.intersect", "modulename": "sqlglot.expressions", "qualname": "intersect", "kind": "function", "doc": "

Initializes a syntax tree from one INTERSECT expression.

\n\n
Example:
\n\n
\n
\n
>>> intersect("SELECT * FROM foo", "SELECT * FROM bla").sql()\n'SELECT * FROM foo INTERSECT SELECT * FROM bla'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • left (str | Expression): the SQL code string corresponding to the left-hand side.\nIf an Expression instance is passed, it will be used as-is.
  • \n
  • right (str | Expression): the SQL code string corresponding to the right-hand side.\nIf an Expression instance is passed, it will be used as-is.
  • \n
  • distinct (bool): set the DISTINCT flag if and only if this is true.
  • \n
  • dialect (str): the dialect used to parse the input expression.
  • \n
  • opts (kwargs): other options to use to parse the input expressions.
  • \n
\n\n
Returns:
\n\n
\n

Intersect: the syntax tree for the INTERSECT expression.

\n
\n", "signature": "(left, right, distinct=True, dialect=None, **opts):", "funcdef": "def"}, "sqlglot.expressions.except_": {"fullname": "sqlglot.expressions.except_", "modulename": "sqlglot.expressions", "qualname": "except_", "kind": "function", "doc": "

Initializes a syntax tree from one EXCEPT expression.

\n\n
Example:
\n\n
\n
\n
>>> except_("SELECT * FROM foo", "SELECT * FROM bla").sql()\n'SELECT * FROM foo EXCEPT SELECT * FROM bla'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • left (str | Expression): the SQL code string corresponding to the left-hand side.\nIf an Expression instance is passed, it will be used as-is.
  • \n
  • right (str | Expression): the SQL code string corresponding to the right-hand side.\nIf an Expression instance is passed, it will be used as-is.
  • \n
  • distinct (bool): set the DISTINCT flag if and only if this is true.
  • \n
  • dialect (str): the dialect used to parse the input expression.
  • \n
  • opts (kwargs): other options to use to parse the input expressions.
  • \n
\n\n
Returns:
\n\n
\n

Except: the syntax tree for the EXCEPT statement.

\n
\n", "signature": "(left, right, distinct=True, dialect=None, **opts):", "funcdef": "def"}, "sqlglot.expressions.select": {"fullname": "sqlglot.expressions.select", "modulename": "sqlglot.expressions", "qualname": "select", "kind": "function", "doc": "

Initializes a syntax tree from one or multiple SELECT expressions.

\n\n
Example:
\n\n
\n
\n
>>> select("col1", "col2").from_("tbl").sql()\n'SELECT col1, col2 FROM tbl'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • *expressions (str | Expression): the SQL code string to parse as the expressions of a\nSELECT statement. If an Expression instance is passed, this is used as-is.
  • \n
  • dialect (str): the dialect used to parse the input expressions (in the case that an\ninput expression is a SQL string).
  • \n
  • **opts: other options to use to parse the input expressions (again, in the case\nthat an input expression is a SQL string).
  • \n
\n\n
Returns:
\n\n
\n

Select: the syntax tree for the SELECT statement.

\n
\n", "signature": "(*expressions, dialect=None, **opts) -> sqlglot.expressions.Select:", "funcdef": "def"}, "sqlglot.expressions.from_": {"fullname": "sqlglot.expressions.from_", "modulename": "sqlglot.expressions", "qualname": "from_", "kind": "function", "doc": "

Initializes a syntax tree from a FROM expression.

\n\n
Example:
\n\n
\n
\n
>>> from_("tbl").select("col1", "col2").sql()\n'SELECT col1, col2 FROM tbl'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • *expressions (str | Expression): the SQL code string to parse as the FROM expressions of a\nSELECT statement. If an Expression instance is passed, this is used as-is.
  • \n
  • dialect (str): the dialect used to parse the input expression (in the case that the\ninput expression is a SQL string).
  • \n
  • **opts: other options to use to parse the input expressions (again, in the case\nthat the input expression is a SQL string).
  • \n
\n\n
Returns:
\n\n
\n

Select: the syntax tree for the SELECT statement.

\n
\n", "signature": "(*expressions, dialect=None, **opts) -> sqlglot.expressions.Select:", "funcdef": "def"}, "sqlglot.expressions.update": {"fullname": "sqlglot.expressions.update", "modulename": "sqlglot.expressions", "qualname": "update", "kind": "function", "doc": "

Creates an update statement.

\n\n
Example:
\n\n
\n
\n
>>> update("my_table", {"x": 1, "y": "2", "z": None}, from_="baz", where="id > 1").sql()\n"UPDATE my_table SET x = 1, y = '2', z = NULL FROM baz WHERE id > 1"\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • *properties (Dict[str, Any]): dictionary of properties to set which are\nauto converted to sql objects eg None -> NULL
  • \n
  • where (str): sql conditional parsed into a WHERE statement
  • \n
  • from_ (str): sql statement parsed into a FROM statement
  • \n
  • dialect (str): the dialect used to parse the input expressions.
  • \n
  • **opts: other options to use to parse the input expressions.
  • \n
\n\n
Returns:
\n\n
\n

Update: the syntax tree for the UPDATE statement.

\n
\n", "signature": "(\ttable,\tproperties,\twhere=None,\tfrom_=None,\tdialect=None,\t**opts) -> sqlglot.expressions.Update:", "funcdef": "def"}, "sqlglot.expressions.delete": {"fullname": "sqlglot.expressions.delete", "modulename": "sqlglot.expressions", "qualname": "delete", "kind": "function", "doc": "

Builds a delete statement.

\n\n
Example:
\n\n
\n
\n
>>> delete("my_table", where="id > 1").sql()\n'DELETE FROM my_table WHERE id > 1'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • where (str|Condition): sql conditional parsed into a WHERE statement
  • \n
  • dialect (str): the dialect used to parse the input expressions.
  • \n
  • **opts: other options to use to parse the input expressions.
  • \n
\n\n
Returns:
\n\n
\n

Delete: the syntax tree for the DELETE statement.

\n
\n", "signature": "(table, where=None, dialect=None, **opts) -> sqlglot.expressions.Delete:", "funcdef": "def"}, "sqlglot.expressions.condition": {"fullname": "sqlglot.expressions.condition", "modulename": "sqlglot.expressions", "qualname": "condition", "kind": "function", "doc": "

Initialize a logical condition expression.

\n\n
Example:
\n\n
\n
\n
>>> condition("x=1").sql()\n'x = 1'\n
\n
\n \n

This is helpful for composing larger logical syntax trees:

\n \n
\n
>>> where = condition("x=1")\n>>> where = where.and_("y=1")\n>>> Select().from_("tbl").select("*").where(where).sql()\n'SELECT * FROM tbl WHERE x = 1 AND y = 1'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • *expression (str | Expression): the SQL code string to parse.\nIf an Expression instance is passed, this is used as-is.
  • \n
  • dialect (str): the dialect used to parse the input expression (in the case that the\ninput expression is a SQL string).
  • \n
  • **opts: other options to use to parse the input expressions (again, in the case\nthat the input expression is a SQL string).
  • \n
\n\n
Returns:
\n\n
\n

Condition: the expression

\n
\n", "signature": "(expression, dialect=None, **opts) -> sqlglot.expressions.Condition:", "funcdef": "def"}, "sqlglot.expressions.and_": {"fullname": "sqlglot.expressions.and_", "modulename": "sqlglot.expressions", "qualname": "and_", "kind": "function", "doc": "

Combine multiple conditions with an AND logical operator.

\n\n
Example:
\n\n
\n
\n
>>> and_("x=1", and_("y=1", "z=1")).sql()\n'x = 1 AND (y = 1 AND z = 1)'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • *expressions (str | Expression): the SQL code strings to parse.\nIf an Expression instance is passed, this is used as-is.
  • \n
  • dialect (str): the dialect used to parse the input expression.
  • \n
  • **opts: other options to use to parse the input expressions.
  • \n
\n\n
Returns:
\n\n
\n

And: the new condition

\n
\n", "signature": "(*expressions, dialect=None, **opts) -> sqlglot.expressions.And:", "funcdef": "def"}, "sqlglot.expressions.or_": {"fullname": "sqlglot.expressions.or_", "modulename": "sqlglot.expressions", "qualname": "or_", "kind": "function", "doc": "

Combine multiple conditions with an OR logical operator.

\n\n
Example:
\n\n
\n
\n
>>> or_("x=1", or_("y=1", "z=1")).sql()\n'x = 1 OR (y = 1 OR z = 1)'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • *expressions (str | Expression): the SQL code strings to parse.\nIf an Expression instance is passed, this is used as-is.
  • \n
  • dialect (str): the dialect used to parse the input expression.
  • \n
  • **opts: other options to use to parse the input expressions.
  • \n
\n\n
Returns:
\n\n
\n

Or: the new condition

\n
\n", "signature": "(*expressions, dialect=None, **opts) -> sqlglot.expressions.Or:", "funcdef": "def"}, "sqlglot.expressions.not_": {"fullname": "sqlglot.expressions.not_", "modulename": "sqlglot.expressions", "qualname": "not_", "kind": "function", "doc": "

Wrap a condition with a NOT operator.

\n\n
Example:
\n\n
\n
\n
>>> not_("this_suit='black'").sql()\n"NOT this_suit = 'black'"\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • expression (str | Expression): the SQL code strings to parse.\nIf an Expression instance is passed, this is used as-is.
  • \n
  • dialect (str): the dialect used to parse the input expression.
  • \n
  • **opts: other options to use to parse the input expressions.
  • \n
\n\n
Returns:
\n\n
\n

Not: the new condition

\n
\n", "signature": "(expression, dialect=None, **opts) -> sqlglot.expressions.Not:", "funcdef": "def"}, "sqlglot.expressions.paren": {"fullname": "sqlglot.expressions.paren", "modulename": "sqlglot.expressions", "qualname": "paren", "kind": "function", "doc": "

\n", "signature": "(expression) -> sqlglot.expressions.Paren:", "funcdef": "def"}, "sqlglot.expressions.to_identifier": {"fullname": "sqlglot.expressions.to_identifier", "modulename": "sqlglot.expressions", "qualname": "to_identifier", "kind": "function", "doc": "

Builds an identifier.

\n\n
Arguments:
\n\n
    \n
  • name: The name to turn into an identifier.
  • \n
  • quoted: Whether or not force quote the identifier.
  • \n
\n\n
Returns:
\n\n
\n

The identifier ast node.

\n
\n", "signature": "(name, quoted=None):", "funcdef": "def"}, "sqlglot.expressions.to_interval": {"fullname": "sqlglot.expressions.to_interval", "modulename": "sqlglot.expressions", "qualname": "to_interval", "kind": "function", "doc": "

Builds an interval expression from a string like '1 day' or '5 months'.

\n", "signature": "(\tinterval: str | sqlglot.expressions.Literal) -> sqlglot.expressions.Interval:", "funcdef": "def"}, "sqlglot.expressions.to_table": {"fullname": "sqlglot.expressions.to_table", "modulename": "sqlglot.expressions", "qualname": "to_table", "kind": "function", "doc": "

Create a table expression from a [catalog].[schema].[table] sql path. Catalog and schema are optional.\nIf a table is passed in then that table is returned.

\n\n
Arguments:
\n\n
    \n
  • sql_path: a [catalog].[schema].[table] string.
  • \n
\n\n
Returns:
\n\n
\n

A table expression.

\n
\n", "signature": "(\tsql_path: Union[str, sqlglot.expressions.Table, NoneType],\t**kwargs) -> Optional[sqlglot.expressions.Table]:", "funcdef": "def"}, "sqlglot.expressions.to_column": {"fullname": "sqlglot.expressions.to_column", "modulename": "sqlglot.expressions", "qualname": "to_column", "kind": "function", "doc": "

Create a column from a [table].[column] sql path. Schema is optional.

\n\n

If a column is passed in then that column is returned.

\n\n
Arguments:
\n\n
    \n
  • sql_path: [table].[column] string
  • \n
\n\n
Returns:
\n\n
\n

Table: A column expression

\n
\n", "signature": "(\tsql_path: str | sqlglot.expressions.Column,\t**kwargs) -> sqlglot.expressions.Column:", "funcdef": "def"}, "sqlglot.expressions.alias_": {"fullname": "sqlglot.expressions.alias_", "modulename": "sqlglot.expressions", "qualname": "alias_", "kind": "function", "doc": "

Create an Alias expression.

\n\n
Example:
\n\n
\n
\n
>>> alias_('foo', 'bar').sql()\n'foo AS bar'\n
\n
\n \n
\n
>>> alias_('(select 1, 2)', 'bar', table=['a', 'b']).sql()\n'(SELECT 1, 2) AS bar(a, b)'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • expression: the SQL code strings to parse.\nIf an Expression instance is passed, this is used as-is.
  • \n
  • alias: the alias name to use. If the name has\nspecial characters it is quoted.
  • \n
  • table: Whether or not to create a table alias, can also be a list of columns.
  • \n
  • quoted: whether or not to quote the alias
  • \n
  • dialect: the dialect used to parse the input expression.
  • \n
  • **opts: other options to use to parse the input expressions.
  • \n
\n\n
Returns:
\n\n
\n

Alias: the aliased expression

\n
\n", "signature": "(\texpression: str | sqlglot.expressions.Expression,\talias: str | sqlglot.expressions.Identifier,\ttable: Union[bool, Sequence[str | sqlglot.expressions.Identifier]] = False,\tquoted: Optional[bool] = None,\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\t**opts):", "funcdef": "def"}, "sqlglot.expressions.subquery": {"fullname": "sqlglot.expressions.subquery", "modulename": "sqlglot.expressions", "qualname": "subquery", "kind": "function", "doc": "

Build a subquery expression.

\n\n
Example:
\n\n
\n
\n
>>> subquery('select x from tbl', 'bar').select('x').sql()\n'SELECT x FROM (SELECT x FROM tbl) AS bar'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • expression (str | Expression): the SQL code strings to parse.\nIf an Expression instance is passed, this is used as-is.
  • \n
  • alias (str | Expression): the alias name to use.
  • \n
  • dialect (str): the dialect used to parse the input expression.
  • \n
  • **opts: other options to use to parse the input expressions.
  • \n
\n\n
Returns:
\n\n
\n

Select: a new select with the subquery expression included

\n
\n", "signature": "(expression, alias=None, dialect=None, **opts):", "funcdef": "def"}, "sqlglot.expressions.column": {"fullname": "sqlglot.expressions.column", "modulename": "sqlglot.expressions", "qualname": "column", "kind": "function", "doc": "

Build a Column.

\n\n
Arguments:
\n\n
    \n
  • col (str | Expression): column name
  • \n
  • table (str | Expression): table name
  • \n
\n\n
Returns:
\n\n
\n

Column: column instance

\n
\n", "signature": "(col, table=None, quoted=None) -> sqlglot.expressions.Column:", "funcdef": "def"}, "sqlglot.expressions.cast": {"fullname": "sqlglot.expressions.cast", "modulename": "sqlglot.expressions", "qualname": "cast", "kind": "function", "doc": "

Cast an expression to a data type.

\n\n
Example:
\n\n
\n
\n
>>> cast('x + 1', 'int').sql()\n'CAST(x + 1 AS INT)'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • expression: The expression to cast.
  • \n
  • to: The datatype to cast to.
  • \n
\n\n
Returns:
\n\n
\n

A cast node.

\n
\n", "signature": "(\texpression: str | sqlglot.expressions.Expression,\tto: str | sqlglot.expressions.DataType | sqlglot.expressions.DataType.Type,\t**opts) -> sqlglot.expressions.Cast:", "funcdef": "def"}, "sqlglot.expressions.table_": {"fullname": "sqlglot.expressions.table_", "modulename": "sqlglot.expressions", "qualname": "table_", "kind": "function", "doc": "

Build a Table.

\n\n
Arguments:
\n\n
    \n
  • table (str | Expression): column name
  • \n
  • db (str | Expression): db name
  • \n
  • catalog (str | Expression): catalog name
  • \n
\n\n
Returns:
\n\n
\n

Table: table instance

\n
\n", "signature": "(\ttable,\tdb=None,\tcatalog=None,\tquoted=None,\talias=None) -> sqlglot.expressions.Table:", "funcdef": "def"}, "sqlglot.expressions.values": {"fullname": "sqlglot.expressions.values", "modulename": "sqlglot.expressions", "qualname": "values", "kind": "function", "doc": "

Build VALUES statement.

\n\n
Example:
\n\n
\n
\n
>>> values([(1, '2')]).sql()\n"VALUES (1, '2')"\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • values: values statements that will be converted to SQL
  • \n
  • alias: optional alias
  • \n
  • columns: Optional list of ordered column names or ordered dictionary of column names to types.\nIf either are provided then an alias is also required.\nIf a dictionary is provided then the first column of the values will be casted to the expected type\nin order to help with type inference.
  • \n
\n\n
Returns:
\n\n
\n

Values: the Values expression object

\n
\n", "signature": "(\tvalues: Iterable[Tuple[Any, ...]],\talias: Optional[str] = None,\tcolumns: Union[Iterable[str], Dict[str, sqlglot.expressions.DataType], NoneType] = None) -> sqlglot.expressions.Values:", "funcdef": "def"}, "sqlglot.expressions.rename_table": {"fullname": "sqlglot.expressions.rename_table", "modulename": "sqlglot.expressions", "qualname": "rename_table", "kind": "function", "doc": "

Build ALTER TABLE... RENAME... expression

\n\n
Arguments:
\n\n
    \n
  • old_name: The old name of the table
  • \n
  • new_name: The new name of the table
  • \n
\n\n
Returns:
\n\n
\n

Alter table expression

\n
\n", "signature": "(\told_name: str | sqlglot.expressions.Table,\tnew_name: str | sqlglot.expressions.Table) -> sqlglot.expressions.AlterTable:", "funcdef": "def"}, "sqlglot.expressions.convert": {"fullname": "sqlglot.expressions.convert", "modulename": "sqlglot.expressions", "qualname": "convert", "kind": "function", "doc": "

Convert a python value into an expression object.

\n\n

Raises an error if a conversion is not possible.

\n\n
Arguments:
\n\n
    \n
  • value (Any): a python object
  • \n
\n\n
Returns:
\n\n
\n

Expression: the equivalent expression object

\n
\n", "signature": "(value) -> sqlglot.expressions.Expression:", "funcdef": "def"}, "sqlglot.expressions.replace_children": {"fullname": "sqlglot.expressions.replace_children", "modulename": "sqlglot.expressions", "qualname": "replace_children", "kind": "function", "doc": "

Replace children of an expression with the result of a lambda fun(child) -> exp.

\n", "signature": "(expression, fun):", "funcdef": "def"}, "sqlglot.expressions.column_table_names": {"fullname": "sqlglot.expressions.column_table_names", "modulename": "sqlglot.expressions", "qualname": "column_table_names", "kind": "function", "doc": "

Return all table names referenced through columns in an expression.

\n\n
Example:
\n\n
\n
\n
>>> import sqlglot\n>>> column_table_names(sqlglot.parse_one("a.b AND c.d AND c.e"))\n['c', 'a']\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • expression (sqlglot.Expression): expression to find table names
  • \n
\n\n
Returns:
\n\n
\n

list: A list of unique names

\n
\n", "signature": "(expression):", "funcdef": "def"}, "sqlglot.expressions.table_name": {"fullname": "sqlglot.expressions.table_name", "modulename": "sqlglot.expressions", "qualname": "table_name", "kind": "function", "doc": "

Get the full name of a table as a string.

\n\n
Arguments:
\n\n
    \n
  • table (exp.Table | str): table expression node or string.
  • \n
\n\n
Examples:
\n\n
\n
\n
>>> from sqlglot import exp, parse_one\n>>> table_name(parse_one("select * from a.b.c").find(exp.Table))\n'a.b.c'\n
\n
\n
\n\n
Returns:
\n\n
\n

The table name.

\n
\n", "signature": "(table) -> str:", "funcdef": "def"}, "sqlglot.expressions.replace_tables": {"fullname": "sqlglot.expressions.replace_tables", "modulename": "sqlglot.expressions", "qualname": "replace_tables", "kind": "function", "doc": "

Replace all tables in expression according to the mapping.

\n\n
Arguments:
\n\n
    \n
  • expression (sqlglot.Expression): expression node to be transformed and replaced.
  • \n
  • mapping (Dict[str, str]): mapping of table names.
  • \n
\n\n
Examples:
\n\n
\n
\n
>>> from sqlglot import exp, parse_one\n>>> replace_tables(parse_one("select * from a.b"), {"a.b": "c"}).sql()\n'SELECT * FROM c'\n
\n
\n
\n\n
Returns:
\n\n
\n

The mapped expression.

\n
\n", "signature": "(expression, mapping):", "funcdef": "def"}, "sqlglot.expressions.replace_placeholders": {"fullname": "sqlglot.expressions.replace_placeholders", "modulename": "sqlglot.expressions", "qualname": "replace_placeholders", "kind": "function", "doc": "

Replace placeholders in an expression.

\n\n
Arguments:
\n\n
    \n
  • expression (sqlglot.Expression): expression node to be transformed and replaced.
  • \n
  • args: positional names that will substitute unnamed placeholders in the given order.
  • \n
  • kwargs: keyword arguments that will substitute named placeholders.
  • \n
\n\n
Examples:
\n\n
\n
\n
>>> from sqlglot import exp, parse_one\n>>> replace_placeholders(\n...     parse_one("select * from :tbl where ? = ?"), "a", "b", tbl="foo"\n... ).sql()\n'SELECT * FROM foo WHERE a = b'\n
\n
\n
\n\n
Returns:
\n\n
\n

The mapped expression.

\n
\n", "signature": "(expression, *args, **kwargs):", "funcdef": "def"}, "sqlglot.expressions.expand": {"fullname": "sqlglot.expressions.expand", "modulename": "sqlglot.expressions", "qualname": "expand", "kind": "function", "doc": "

Transforms an expression by expanding all referenced sources into subqueries.

\n\n
Examples:
\n\n
\n
\n
>>> from sqlglot import parse_one\n>>> expand(parse_one("select * from x AS z"), {"x": parse_one("select * from y")}).sql()\n'SELECT * FROM (SELECT * FROM y) AS z /* source: x */'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • expression: The expression to expand.
  • \n
  • sources: A dictionary of name to Subqueryables.
  • \n
  • copy: Whether or not to copy the expression during transformation. Defaults to True.
  • \n
\n\n
Returns:
\n\n
\n

The transformed expression.

\n
\n", "signature": "(\texpression: sqlglot.expressions.Expression,\tsources: Dict[str, sqlglot.expressions.Subqueryable],\tcopy=True) -> sqlglot.expressions.Expression:", "funcdef": "def"}, "sqlglot.expressions.func": {"fullname": "sqlglot.expressions.func", "modulename": "sqlglot.expressions", "qualname": "func", "kind": "function", "doc": "

Returns a Func expression.

\n\n
Examples:
\n\n
\n
\n
>>> func("abs", 5).sql()\n'ABS(5)'\n
\n
\n \n
\n
>>> func("cast", this=5, to=DataType.build("DOUBLE")).sql()\n'CAST(5 AS DOUBLE)'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • name: the name of the function to build.
  • \n
  • args: the args used to instantiate the function of interest.
  • \n
  • dialect: the source dialect.
  • \n
  • kwargs: the kwargs used to instantiate the function of interest.
  • \n
\n\n
Note:
\n\n
\n

The arguments args and kwargs are mutually exclusive.

\n
\n\n
Returns:
\n\n
\n

An instance of the function of interest, or an anonymous function, if name doesn't\n correspond to an existing sqlglot.expressions.Func class.

\n
\n", "signature": "(\tname: str,\t*args,\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\t**kwargs) -> sqlglot.expressions.Func:", "funcdef": "def"}, "sqlglot.expressions.true": {"fullname": "sqlglot.expressions.true", "modulename": "sqlglot.expressions", "qualname": "true", "kind": "function", "doc": "

Returns a true Boolean expression.

\n", "signature": "():", "funcdef": "def"}, "sqlglot.expressions.false": {"fullname": "sqlglot.expressions.false", "modulename": "sqlglot.expressions", "qualname": "false", "kind": "function", "doc": "

Returns a false Boolean expression.

\n", "signature": "():", "funcdef": "def"}, "sqlglot.expressions.null": {"fullname": "sqlglot.expressions.null", "modulename": "sqlglot.expressions", "qualname": "null", "kind": "function", "doc": "

Returns a Null expression.

\n", "signature": "():", "funcdef": "def"}, "sqlglot.generator": {"fullname": "sqlglot.generator", "modulename": "sqlglot.generator", "kind": "module", "doc": "

\n"}, "sqlglot.generator.Generator": {"fullname": "sqlglot.generator.Generator", "modulename": "sqlglot.generator", "qualname": "Generator", "kind": "class", "doc": "

Generator interprets the given syntax tree and produces a SQL string as an output.

\n\n
Arguments:
\n\n
    \n
  • time_mapping (dict): the dictionary of custom time mappings in which the key\nrepresents a python time format and the output the target time format
  • \n
  • time_trie (trie): a trie of the time_mapping keys
  • \n
  • pretty (bool): if set to True the returned string will be formatted. Default: False.
  • \n
  • quote_start (str): specifies which starting character to use to delimit quotes. Default: '.
  • \n
  • quote_end (str): specifies which ending character to use to delimit quotes. Default: '.
  • \n
  • identifier_start (str): specifies which starting character to use to delimit identifiers. Default: \".
  • \n
  • identifier_end (str): specifies which ending character to use to delimit identifiers. Default: \".
  • \n
  • identify (bool): if set to True all identifiers will be delimited by the corresponding\ncharacter.
  • \n
  • normalize (bool): if set to True all identifiers will lower cased
  • \n
  • string_escape (str): specifies a string escape character. Default: '.
  • \n
  • identifier_escape (str): specifies an identifier escape character. Default: \".
  • \n
  • pad (int): determines padding in a formatted string. Default: 2.
  • \n
  • indent (int): determines the size of indentation in a formatted string. Default: 4.
  • \n
  • unnest_column_only (bool): if true unnest table aliases are considered only as column aliases
  • \n
  • normalize_functions (str): normalize function names, \"upper\", \"lower\", or None\nDefault: \"upper\"
  • \n
  • alias_post_tablesample (bool): if the table alias comes after tablesample\nDefault: False
  • \n
  • unsupported_level (ErrorLevel): determines the generator's behavior when it encounters\nunsupported expressions. Default ErrorLevel.WARN.
  • \n
  • null_ordering (str): Indicates the default null ordering method to use if not explicitly set.\nOptions are \"nulls_are_small\", \"nulls_are_large\", \"nulls_are_last\".\nDefault: \"nulls_are_small\"
  • \n
  • max_unsupported (int): Maximum number of unsupported messages to include in a raised UnsupportedError.\nThis is only relevant if unsupported_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
  • leading_comma (bool): if the the comma is leading or trailing in select statements\nDefault: False
  • \n
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode.\nThe default is on the smaller end because the length only represents a segment and not the true\nline length.\nDefault: 80
  • \n
  • comments: Whether or not to preserve comments in the output SQL code.\nDefault: True
  • \n
\n"}, "sqlglot.generator.Generator.__init__": {"fullname": "sqlglot.generator.Generator.__init__", "modulename": "sqlglot.generator", "qualname": "Generator.__init__", "kind": "function", "doc": "

\n", "signature": "(\ttime_mapping=None,\ttime_trie=None,\tpretty=None,\tquote_start=None,\tquote_end=None,\tidentifier_start=None,\tidentifier_end=None,\tidentify=False,\tnormalize=False,\tstring_escape=None,\tidentifier_escape=None,\tpad=2,\tindent=2,\tindex_offset=0,\tunnest_column_only=False,\talias_post_tablesample=False,\tnormalize_functions='upper',\tunsupported_level=<ErrorLevel.WARN: 'WARN'>,\tnull_ordering=None,\tmax_unsupported=3,\tleading_comma=False,\tmax_text_width=80,\tcomments=True)"}, "sqlglot.generator.Generator.generate": {"fullname": "sqlglot.generator.Generator.generate", "modulename": "sqlglot.generator", "qualname": "Generator.generate", "kind": "function", "doc": "

Generates a SQL string by interpreting the given syntax tree.

\n\n

Args\n expression: the syntax tree.

\n\n

Returns\n the SQL string.

\n", "signature": "(self, expression: Optional[sqlglot.expressions.Expression]) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.unsupported": {"fullname": "sqlglot.generator.Generator.unsupported", "modulename": "sqlglot.generator", "qualname": "Generator.unsupported", "kind": "function", "doc": "

\n", "signature": "(self, message: str) -> None:", "funcdef": "def"}, "sqlglot.generator.Generator.sep": {"fullname": "sqlglot.generator.Generator.sep", "modulename": "sqlglot.generator", "qualname": "Generator.sep", "kind": "function", "doc": "

\n", "signature": "(self, sep: str = ' ') -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.seg": {"fullname": "sqlglot.generator.Generator.seg", "modulename": "sqlglot.generator", "qualname": "Generator.seg", "kind": "function", "doc": "

\n", "signature": "(self, sql: str, sep: str = ' ') -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.pad_comment": {"fullname": "sqlglot.generator.Generator.pad_comment", "modulename": "sqlglot.generator", "qualname": "Generator.pad_comment", "kind": "function", "doc": "

\n", "signature": "(self, comment: str) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.maybe_comment": {"fullname": "sqlglot.generator.Generator.maybe_comment", "modulename": "sqlglot.generator", "qualname": "Generator.maybe_comment", "kind": "function", "doc": "

\n", "signature": "(self, sql: str, expression: sqlglot.expressions.Expression) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.wrap": {"fullname": "sqlglot.generator.Generator.wrap", "modulename": "sqlglot.generator", "qualname": "Generator.wrap", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Expression | str) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.no_identify": {"fullname": "sqlglot.generator.Generator.no_identify", "modulename": "sqlglot.generator", "qualname": "Generator.no_identify", "kind": "function", "doc": "

\n", "signature": "(self, func: Callable[..., str], *args, **kwargs) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.normalize_func": {"fullname": "sqlglot.generator.Generator.normalize_func", "modulename": "sqlglot.generator", "qualname": "Generator.normalize_func", "kind": "function", "doc": "

\n", "signature": "(self, name: str) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.indent": {"fullname": "sqlglot.generator.Generator.indent", "modulename": "sqlglot.generator", "qualname": "Generator.indent", "kind": "function", "doc": "

\n", "signature": "(\tself,\tsql: str,\tlevel: int = 0,\tpad: Optional[int] = None,\tskip_first: bool = False,\tskip_last: bool = False) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.sql": {"fullname": "sqlglot.generator.Generator.sql", "modulename": "sqlglot.generator", "qualname": "Generator.sql", "kind": "function", "doc": "

\n", "signature": "(\tself,\texpression: Union[str, sqlglot.expressions.Expression, NoneType],\tkey: Optional[str] = None,\tcomment: bool = True) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.uncache_sql": {"fullname": "sqlglot.generator.Generator.uncache_sql", "modulename": "sqlglot.generator", "qualname": "Generator.uncache_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Uncache) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.cache_sql": {"fullname": "sqlglot.generator.Generator.cache_sql", "modulename": "sqlglot.generator", "qualname": "Generator.cache_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Cache) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.characterset_sql": {"fullname": "sqlglot.generator.Generator.characterset_sql", "modulename": "sqlglot.generator", "qualname": "Generator.characterset_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.CharacterSet) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.column_sql": {"fullname": "sqlglot.generator.Generator.column_sql", "modulename": "sqlglot.generator", "qualname": "Generator.column_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Column) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.columndef_sql": {"fullname": "sqlglot.generator.Generator.columndef_sql", "modulename": "sqlglot.generator", "qualname": "Generator.columndef_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.ColumnDef) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.columnconstraint_sql": {"fullname": "sqlglot.generator.Generator.columnconstraint_sql", "modulename": "sqlglot.generator", "qualname": "Generator.columnconstraint_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.ColumnConstraint) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.autoincrementcolumnconstraint_sql": {"fullname": "sqlglot.generator.Generator.autoincrementcolumnconstraint_sql", "modulename": "sqlglot.generator", "qualname": "Generator.autoincrementcolumnconstraint_sql", "kind": "function", "doc": "

\n", "signature": "(self, _) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.checkcolumnconstraint_sql": {"fullname": "sqlglot.generator.Generator.checkcolumnconstraint_sql", "modulename": "sqlglot.generator", "qualname": "Generator.checkcolumnconstraint_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.CheckColumnConstraint) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.commentcolumnconstraint_sql": {"fullname": "sqlglot.generator.Generator.commentcolumnconstraint_sql", "modulename": "sqlglot.generator", "qualname": "Generator.commentcolumnconstraint_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.CommentColumnConstraint) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.collatecolumnconstraint_sql": {"fullname": "sqlglot.generator.Generator.collatecolumnconstraint_sql", "modulename": "sqlglot.generator", "qualname": "Generator.collatecolumnconstraint_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.CollateColumnConstraint) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.encodecolumnconstraint_sql": {"fullname": "sqlglot.generator.Generator.encodecolumnconstraint_sql", "modulename": "sqlglot.generator", "qualname": "Generator.encodecolumnconstraint_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.EncodeColumnConstraint) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.defaultcolumnconstraint_sql": {"fullname": "sqlglot.generator.Generator.defaultcolumnconstraint_sql", "modulename": "sqlglot.generator", "qualname": "Generator.defaultcolumnconstraint_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.DefaultColumnConstraint) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.generatedasidentitycolumnconstraint_sql": {"fullname": "sqlglot.generator.Generator.generatedasidentitycolumnconstraint_sql", "modulename": "sqlglot.generator", "qualname": "Generator.generatedasidentitycolumnconstraint_sql", "kind": "function", "doc": "

\n", "signature": "(\tself,\texpression: sqlglot.expressions.GeneratedAsIdentityColumnConstraint) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.notnullcolumnconstraint_sql": {"fullname": "sqlglot.generator.Generator.notnullcolumnconstraint_sql", "modulename": "sqlglot.generator", "qualname": "Generator.notnullcolumnconstraint_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.NotNullColumnConstraint) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.primarykeycolumnconstraint_sql": {"fullname": "sqlglot.generator.Generator.primarykeycolumnconstraint_sql", "modulename": "sqlglot.generator", "qualname": "Generator.primarykeycolumnconstraint_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.PrimaryKeyColumnConstraint) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.uniquecolumnconstraint_sql": {"fullname": "sqlglot.generator.Generator.uniquecolumnconstraint_sql", "modulename": "sqlglot.generator", "qualname": "Generator.uniquecolumnconstraint_sql", "kind": "function", "doc": "

\n", "signature": "(self, _) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.create_sql": {"fullname": "sqlglot.generator.Generator.create_sql", "modulename": "sqlglot.generator", "qualname": "Generator.create_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Create) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.describe_sql": {"fullname": "sqlglot.generator.Generator.describe_sql", "modulename": "sqlglot.generator", "qualname": "Generator.describe_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Describe) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.prepend_ctes": {"fullname": "sqlglot.generator.Generator.prepend_ctes", "modulename": "sqlglot.generator", "qualname": "Generator.prepend_ctes", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Expression, sql: str) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.with_sql": {"fullname": "sqlglot.generator.Generator.with_sql", "modulename": "sqlglot.generator", "qualname": "Generator.with_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.With) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.cte_sql": {"fullname": "sqlglot.generator.Generator.cte_sql", "modulename": "sqlglot.generator", "qualname": "Generator.cte_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.CTE) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.tablealias_sql": {"fullname": "sqlglot.generator.Generator.tablealias_sql", "modulename": "sqlglot.generator", "qualname": "Generator.tablealias_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.TableAlias) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.bitstring_sql": {"fullname": "sqlglot.generator.Generator.bitstring_sql", "modulename": "sqlglot.generator", "qualname": "Generator.bitstring_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.BitString) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.hexstring_sql": {"fullname": "sqlglot.generator.Generator.hexstring_sql", "modulename": "sqlglot.generator", "qualname": "Generator.hexstring_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.HexString) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.datatype_sql": {"fullname": "sqlglot.generator.Generator.datatype_sql", "modulename": "sqlglot.generator", "qualname": "Generator.datatype_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.DataType) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.directory_sql": {"fullname": "sqlglot.generator.Generator.directory_sql", "modulename": "sqlglot.generator", "qualname": "Generator.directory_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Directory) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.delete_sql": {"fullname": "sqlglot.generator.Generator.delete_sql", "modulename": "sqlglot.generator", "qualname": "Generator.delete_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Delete) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.drop_sql": {"fullname": "sqlglot.generator.Generator.drop_sql", "modulename": "sqlglot.generator", "qualname": "Generator.drop_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Drop) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.except_sql": {"fullname": "sqlglot.generator.Generator.except_sql", "modulename": "sqlglot.generator", "qualname": "Generator.except_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Except) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.except_op": {"fullname": "sqlglot.generator.Generator.except_op", "modulename": "sqlglot.generator", "qualname": "Generator.except_op", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Except) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.fetch_sql": {"fullname": "sqlglot.generator.Generator.fetch_sql", "modulename": "sqlglot.generator", "qualname": "Generator.fetch_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Fetch) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.filter_sql": {"fullname": "sqlglot.generator.Generator.filter_sql", "modulename": "sqlglot.generator", "qualname": "Generator.filter_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Filter) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.hint_sql": {"fullname": "sqlglot.generator.Generator.hint_sql", "modulename": "sqlglot.generator", "qualname": "Generator.hint_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Hint) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.index_sql": {"fullname": "sqlglot.generator.Generator.index_sql", "modulename": "sqlglot.generator", "qualname": "Generator.index_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Index) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.identifier_sql": {"fullname": "sqlglot.generator.Generator.identifier_sql", "modulename": "sqlglot.generator", "qualname": "Generator.identifier_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Identifier) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.national_sql": {"fullname": "sqlglot.generator.Generator.national_sql", "modulename": "sqlglot.generator", "qualname": "Generator.national_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.National) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.partition_sql": {"fullname": "sqlglot.generator.Generator.partition_sql", "modulename": "sqlglot.generator", "qualname": "Generator.partition_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Partition) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.properties_sql": {"fullname": "sqlglot.generator.Generator.properties_sql", "modulename": "sqlglot.generator", "qualname": "Generator.properties_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Properties) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.root_properties": {"fullname": "sqlglot.generator.Generator.root_properties", "modulename": "sqlglot.generator", "qualname": "Generator.root_properties", "kind": "function", "doc": "

\n", "signature": "(self, properties: sqlglot.expressions.Properties) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.properties": {"fullname": "sqlglot.generator.Generator.properties", "modulename": "sqlglot.generator", "qualname": "Generator.properties", "kind": "function", "doc": "

\n", "signature": "(\tself,\tproperties: sqlglot.expressions.Properties,\tprefix: str = '',\tsep: str = ', ',\tsuffix: str = '',\twrapped: bool = True) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.with_properties": {"fullname": "sqlglot.generator.Generator.with_properties", "modulename": "sqlglot.generator", "qualname": "Generator.with_properties", "kind": "function", "doc": "

\n", "signature": "(self, properties: sqlglot.expressions.Properties) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.locate_properties": {"fullname": "sqlglot.generator.Generator.locate_properties", "modulename": "sqlglot.generator", "qualname": "Generator.locate_properties", "kind": "function", "doc": "

\n", "signature": "(\tself,\tproperties: sqlglot.expressions.Properties) -> Dict[sqlglot.expressions.Properties.Location, list[sqlglot.expressions.Property]]:", "funcdef": "def"}, "sqlglot.generator.Generator.property_sql": {"fullname": "sqlglot.generator.Generator.property_sql", "modulename": "sqlglot.generator", "qualname": "Generator.property_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Property) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.likeproperty_sql": {"fullname": "sqlglot.generator.Generator.likeproperty_sql", "modulename": "sqlglot.generator", "qualname": "Generator.likeproperty_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.LikeProperty) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.fallbackproperty_sql": {"fullname": "sqlglot.generator.Generator.fallbackproperty_sql", "modulename": "sqlglot.generator", "qualname": "Generator.fallbackproperty_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.FallbackProperty) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.journalproperty_sql": {"fullname": "sqlglot.generator.Generator.journalproperty_sql", "modulename": "sqlglot.generator", "qualname": "Generator.journalproperty_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.JournalProperty) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.freespaceproperty_sql": {"fullname": "sqlglot.generator.Generator.freespaceproperty_sql", "modulename": "sqlglot.generator", "qualname": "Generator.freespaceproperty_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.FreespaceProperty) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.afterjournalproperty_sql": {"fullname": "sqlglot.generator.Generator.afterjournalproperty_sql", "modulename": "sqlglot.generator", "qualname": "Generator.afterjournalproperty_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.AfterJournalProperty) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.checksumproperty_sql": {"fullname": "sqlglot.generator.Generator.checksumproperty_sql", "modulename": "sqlglot.generator", "qualname": "Generator.checksumproperty_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.ChecksumProperty) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.mergeblockratioproperty_sql": {"fullname": "sqlglot.generator.Generator.mergeblockratioproperty_sql", "modulename": "sqlglot.generator", "qualname": "Generator.mergeblockratioproperty_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.MergeBlockRatioProperty) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.datablocksizeproperty_sql": {"fullname": "sqlglot.generator.Generator.datablocksizeproperty_sql", "modulename": "sqlglot.generator", "qualname": "Generator.datablocksizeproperty_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.DataBlocksizeProperty) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.blockcompressionproperty_sql": {"fullname": "sqlglot.generator.Generator.blockcompressionproperty_sql", "modulename": "sqlglot.generator", "qualname": "Generator.blockcompressionproperty_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.BlockCompressionProperty) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.isolatedloadingproperty_sql": {"fullname": "sqlglot.generator.Generator.isolatedloadingproperty_sql", "modulename": "sqlglot.generator", "qualname": "Generator.isolatedloadingproperty_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.IsolatedLoadingProperty) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.insert_sql": {"fullname": "sqlglot.generator.Generator.insert_sql", "modulename": "sqlglot.generator", "qualname": "Generator.insert_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Insert) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.intersect_sql": {"fullname": "sqlglot.generator.Generator.intersect_sql", "modulename": "sqlglot.generator", "qualname": "Generator.intersect_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Intersect) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.intersect_op": {"fullname": "sqlglot.generator.Generator.intersect_op", "modulename": "sqlglot.generator", "qualname": "Generator.intersect_op", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Intersect) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.introducer_sql": {"fullname": "sqlglot.generator.Generator.introducer_sql", "modulename": "sqlglot.generator", "qualname": "Generator.introducer_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Introducer) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.pseudotype_sql": {"fullname": "sqlglot.generator.Generator.pseudotype_sql", "modulename": "sqlglot.generator", "qualname": "Generator.pseudotype_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.PseudoType) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.rowformatdelimitedproperty_sql": {"fullname": "sqlglot.generator.Generator.rowformatdelimitedproperty_sql", "modulename": "sqlglot.generator", "qualname": "Generator.rowformatdelimitedproperty_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.RowFormatDelimitedProperty) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.table_sql": {"fullname": "sqlglot.generator.Generator.table_sql", "modulename": "sqlglot.generator", "qualname": "Generator.table_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Table, sep: str = ' AS ') -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.tablesample_sql": {"fullname": "sqlglot.generator.Generator.tablesample_sql", "modulename": "sqlglot.generator", "qualname": "Generator.tablesample_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.TableSample) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.pivot_sql": {"fullname": "sqlglot.generator.Generator.pivot_sql", "modulename": "sqlglot.generator", "qualname": "Generator.pivot_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Pivot) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.tuple_sql": {"fullname": "sqlglot.generator.Generator.tuple_sql", "modulename": "sqlglot.generator", "qualname": "Generator.tuple_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Tuple) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.update_sql": {"fullname": "sqlglot.generator.Generator.update_sql", "modulename": "sqlglot.generator", "qualname": "Generator.update_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Update) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.values_sql": {"fullname": "sqlglot.generator.Generator.values_sql", "modulename": "sqlglot.generator", "qualname": "Generator.values_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Values) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.var_sql": {"fullname": "sqlglot.generator.Generator.var_sql", "modulename": "sqlglot.generator", "qualname": "Generator.var_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Var) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.into_sql": {"fullname": "sqlglot.generator.Generator.into_sql", "modulename": "sqlglot.generator", "qualname": "Generator.into_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Into) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.from_sql": {"fullname": "sqlglot.generator.Generator.from_sql", "modulename": "sqlglot.generator", "qualname": "Generator.from_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.From) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.group_sql": {"fullname": "sqlglot.generator.Generator.group_sql", "modulename": "sqlglot.generator", "qualname": "Generator.group_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Group) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.having_sql": {"fullname": "sqlglot.generator.Generator.having_sql", "modulename": "sqlglot.generator", "qualname": "Generator.having_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Having) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.join_sql": {"fullname": "sqlglot.generator.Generator.join_sql", "modulename": "sqlglot.generator", "qualname": "Generator.join_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Join) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.lambda_sql": {"fullname": "sqlglot.generator.Generator.lambda_sql", "modulename": "sqlglot.generator", "qualname": "Generator.lambda_sql", "kind": "function", "doc": "

\n", "signature": "(\tself,\texpression: sqlglot.expressions.Lambda,\tarrow_sep: str = '->') -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.lateral_sql": {"fullname": "sqlglot.generator.Generator.lateral_sql", "modulename": "sqlglot.generator", "qualname": "Generator.lateral_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Lateral) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.limit_sql": {"fullname": "sqlglot.generator.Generator.limit_sql", "modulename": "sqlglot.generator", "qualname": "Generator.limit_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Limit) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.offset_sql": {"fullname": "sqlglot.generator.Generator.offset_sql", "modulename": "sqlglot.generator", "qualname": "Generator.offset_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Offset) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.lock_sql": {"fullname": "sqlglot.generator.Generator.lock_sql", "modulename": "sqlglot.generator", "qualname": "Generator.lock_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Lock) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.literal_sql": {"fullname": "sqlglot.generator.Generator.literal_sql", "modulename": "sqlglot.generator", "qualname": "Generator.literal_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Literal) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.loaddata_sql": {"fullname": "sqlglot.generator.Generator.loaddata_sql", "modulename": "sqlglot.generator", "qualname": "Generator.loaddata_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.LoadData) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.null_sql": {"fullname": "sqlglot.generator.Generator.null_sql", "modulename": "sqlglot.generator", "qualname": "Generator.null_sql", "kind": "function", "doc": "

\n", "signature": "(self, *_) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.boolean_sql": {"fullname": "sqlglot.generator.Generator.boolean_sql", "modulename": "sqlglot.generator", "qualname": "Generator.boolean_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Boolean) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.order_sql": {"fullname": "sqlglot.generator.Generator.order_sql", "modulename": "sqlglot.generator", "qualname": "Generator.order_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Order, flat: bool = False) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.cluster_sql": {"fullname": "sqlglot.generator.Generator.cluster_sql", "modulename": "sqlglot.generator", "qualname": "Generator.cluster_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Cluster) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.distribute_sql": {"fullname": "sqlglot.generator.Generator.distribute_sql", "modulename": "sqlglot.generator", "qualname": "Generator.distribute_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Distribute) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.sort_sql": {"fullname": "sqlglot.generator.Generator.sort_sql", "modulename": "sqlglot.generator", "qualname": "Generator.sort_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Sort) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.ordered_sql": {"fullname": "sqlglot.generator.Generator.ordered_sql", "modulename": "sqlglot.generator", "qualname": "Generator.ordered_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Ordered) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.matchrecognize_sql": {"fullname": "sqlglot.generator.Generator.matchrecognize_sql", "modulename": "sqlglot.generator", "qualname": "Generator.matchrecognize_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.MatchRecognize) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.query_modifiers": {"fullname": "sqlglot.generator.Generator.query_modifiers", "modulename": "sqlglot.generator", "qualname": "Generator.query_modifiers", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Expression, *sqls: str) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.select_sql": {"fullname": "sqlglot.generator.Generator.select_sql", "modulename": "sqlglot.generator", "qualname": "Generator.select_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Select) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.schema_sql": {"fullname": "sqlglot.generator.Generator.schema_sql", "modulename": "sqlglot.generator", "qualname": "Generator.schema_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Schema) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.star_sql": {"fullname": "sqlglot.generator.Generator.star_sql", "modulename": "sqlglot.generator", "qualname": "Generator.star_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Star) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.structkwarg_sql": {"fullname": "sqlglot.generator.Generator.structkwarg_sql", "modulename": "sqlglot.generator", "qualname": "Generator.structkwarg_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.StructKwarg) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.parameter_sql": {"fullname": "sqlglot.generator.Generator.parameter_sql", "modulename": "sqlglot.generator", "qualname": "Generator.parameter_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Parameter) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.sessionparameter_sql": {"fullname": "sqlglot.generator.Generator.sessionparameter_sql", "modulename": "sqlglot.generator", "qualname": "Generator.sessionparameter_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.SessionParameter) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.placeholder_sql": {"fullname": "sqlglot.generator.Generator.placeholder_sql", "modulename": "sqlglot.generator", "qualname": "Generator.placeholder_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Placeholder) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.subquery_sql": {"fullname": "sqlglot.generator.Generator.subquery_sql", "modulename": "sqlglot.generator", "qualname": "Generator.subquery_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Subquery) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.qualify_sql": {"fullname": "sqlglot.generator.Generator.qualify_sql", "modulename": "sqlglot.generator", "qualname": "Generator.qualify_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Qualify) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.union_sql": {"fullname": "sqlglot.generator.Generator.union_sql", "modulename": "sqlglot.generator", "qualname": "Generator.union_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Union) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.union_op": {"fullname": "sqlglot.generator.Generator.union_op", "modulename": "sqlglot.generator", "qualname": "Generator.union_op", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Union) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.unnest_sql": {"fullname": "sqlglot.generator.Generator.unnest_sql", "modulename": "sqlglot.generator", "qualname": "Generator.unnest_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Unnest) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.where_sql": {"fullname": "sqlglot.generator.Generator.where_sql", "modulename": "sqlglot.generator", "qualname": "Generator.where_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Where) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.window_sql": {"fullname": "sqlglot.generator.Generator.window_sql", "modulename": "sqlglot.generator", "qualname": "Generator.window_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Window) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.partition_by_sql": {"fullname": "sqlglot.generator.Generator.partition_by_sql", "modulename": "sqlglot.generator", "qualname": "Generator.partition_by_sql", "kind": "function", "doc": "

\n", "signature": "(\tself,\texpression: sqlglot.expressions.Window | sqlglot.expressions.MatchRecognize) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.window_spec_sql": {"fullname": "sqlglot.generator.Generator.window_spec_sql", "modulename": "sqlglot.generator", "qualname": "Generator.window_spec_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.WindowSpec) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.withingroup_sql": {"fullname": "sqlglot.generator.Generator.withingroup_sql", "modulename": "sqlglot.generator", "qualname": "Generator.withingroup_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.WithinGroup) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.between_sql": {"fullname": "sqlglot.generator.Generator.between_sql", "modulename": "sqlglot.generator", "qualname": "Generator.between_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Between) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.bracket_sql": {"fullname": "sqlglot.generator.Generator.bracket_sql", "modulename": "sqlglot.generator", "qualname": "Generator.bracket_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Bracket) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.all_sql": {"fullname": "sqlglot.generator.Generator.all_sql", "modulename": "sqlglot.generator", "qualname": "Generator.all_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.All) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.any_sql": {"fullname": "sqlglot.generator.Generator.any_sql", "modulename": "sqlglot.generator", "qualname": "Generator.any_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Any) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.exists_sql": {"fullname": "sqlglot.generator.Generator.exists_sql", "modulename": "sqlglot.generator", "qualname": "Generator.exists_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Exists) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.case_sql": {"fullname": "sqlglot.generator.Generator.case_sql", "modulename": "sqlglot.generator", "qualname": "Generator.case_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Case) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.constraint_sql": {"fullname": "sqlglot.generator.Generator.constraint_sql", "modulename": "sqlglot.generator", "qualname": "Generator.constraint_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Constraint) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.extract_sql": {"fullname": "sqlglot.generator.Generator.extract_sql", "modulename": "sqlglot.generator", "qualname": "Generator.extract_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Extract) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.trim_sql": {"fullname": "sqlglot.generator.Generator.trim_sql", "modulename": "sqlglot.generator", "qualname": "Generator.trim_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Trim) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.concat_sql": {"fullname": "sqlglot.generator.Generator.concat_sql", "modulename": "sqlglot.generator", "qualname": "Generator.concat_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Concat) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.check_sql": {"fullname": "sqlglot.generator.Generator.check_sql", "modulename": "sqlglot.generator", "qualname": "Generator.check_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Check) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.foreignkey_sql": {"fullname": "sqlglot.generator.Generator.foreignkey_sql", "modulename": "sqlglot.generator", "qualname": "Generator.foreignkey_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.ForeignKey) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.primarykey_sql": {"fullname": "sqlglot.generator.Generator.primarykey_sql", "modulename": "sqlglot.generator", "qualname": "Generator.primarykey_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.ForeignKey) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.unique_sql": {"fullname": "sqlglot.generator.Generator.unique_sql", "modulename": "sqlglot.generator", "qualname": "Generator.unique_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Unique) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.if_sql": {"fullname": "sqlglot.generator.Generator.if_sql", "modulename": "sqlglot.generator", "qualname": "Generator.if_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.If) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.in_sql": {"fullname": "sqlglot.generator.Generator.in_sql", "modulename": "sqlglot.generator", "qualname": "Generator.in_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.In) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.in_unnest_op": {"fullname": "sqlglot.generator.Generator.in_unnest_op", "modulename": "sqlglot.generator", "qualname": "Generator.in_unnest_op", "kind": "function", "doc": "

\n", "signature": "(self, unnest: sqlglot.expressions.Unnest) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.interval_sql": {"fullname": "sqlglot.generator.Generator.interval_sql", "modulename": "sqlglot.generator", "qualname": "Generator.interval_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Interval) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.return_sql": {"fullname": "sqlglot.generator.Generator.return_sql", "modulename": "sqlglot.generator", "qualname": "Generator.return_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Return) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.reference_sql": {"fullname": "sqlglot.generator.Generator.reference_sql", "modulename": "sqlglot.generator", "qualname": "Generator.reference_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Reference) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.anonymous_sql": {"fullname": "sqlglot.generator.Generator.anonymous_sql", "modulename": "sqlglot.generator", "qualname": "Generator.anonymous_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Anonymous) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.paren_sql": {"fullname": "sqlglot.generator.Generator.paren_sql", "modulename": "sqlglot.generator", "qualname": "Generator.paren_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Paren) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.neg_sql": {"fullname": "sqlglot.generator.Generator.neg_sql", "modulename": "sqlglot.generator", "qualname": "Generator.neg_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Neg) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.not_sql": {"fullname": "sqlglot.generator.Generator.not_sql", "modulename": "sqlglot.generator", "qualname": "Generator.not_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Not) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.alias_sql": {"fullname": "sqlglot.generator.Generator.alias_sql", "modulename": "sqlglot.generator", "qualname": "Generator.alias_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Alias) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.aliases_sql": {"fullname": "sqlglot.generator.Generator.aliases_sql", "modulename": "sqlglot.generator", "qualname": "Generator.aliases_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Aliases) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.attimezone_sql": {"fullname": "sqlglot.generator.Generator.attimezone_sql", "modulename": "sqlglot.generator", "qualname": "Generator.attimezone_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.AtTimeZone) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.add_sql": {"fullname": "sqlglot.generator.Generator.add_sql", "modulename": "sqlglot.generator", "qualname": "Generator.add_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Add) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.and_sql": {"fullname": "sqlglot.generator.Generator.and_sql", "modulename": "sqlglot.generator", "qualname": "Generator.and_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.And) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.connector_sql": {"fullname": "sqlglot.generator.Generator.connector_sql", "modulename": "sqlglot.generator", "qualname": "Generator.connector_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Connector, op: str) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.bitwiseand_sql": {"fullname": "sqlglot.generator.Generator.bitwiseand_sql", "modulename": "sqlglot.generator", "qualname": "Generator.bitwiseand_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.BitwiseAnd) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.bitwiseleftshift_sql": {"fullname": "sqlglot.generator.Generator.bitwiseleftshift_sql", "modulename": "sqlglot.generator", "qualname": "Generator.bitwiseleftshift_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.BitwiseLeftShift) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.bitwisenot_sql": {"fullname": "sqlglot.generator.Generator.bitwisenot_sql", "modulename": "sqlglot.generator", "qualname": "Generator.bitwisenot_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.BitwiseNot) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.bitwiseor_sql": {"fullname": "sqlglot.generator.Generator.bitwiseor_sql", "modulename": "sqlglot.generator", "qualname": "Generator.bitwiseor_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.BitwiseOr) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.bitwiserightshift_sql": {"fullname": "sqlglot.generator.Generator.bitwiserightshift_sql", "modulename": "sqlglot.generator", "qualname": "Generator.bitwiserightshift_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.BitwiseRightShift) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.bitwisexor_sql": {"fullname": "sqlglot.generator.Generator.bitwisexor_sql", "modulename": "sqlglot.generator", "qualname": "Generator.bitwisexor_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.BitwiseXor) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.cast_sql": {"fullname": "sqlglot.generator.Generator.cast_sql", "modulename": "sqlglot.generator", "qualname": "Generator.cast_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Cast) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.currentdate_sql": {"fullname": "sqlglot.generator.Generator.currentdate_sql", "modulename": "sqlglot.generator", "qualname": "Generator.currentdate_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.CurrentDate) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.collate_sql": {"fullname": "sqlglot.generator.Generator.collate_sql", "modulename": "sqlglot.generator", "qualname": "Generator.collate_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Collate) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.command_sql": {"fullname": "sqlglot.generator.Generator.command_sql", "modulename": "sqlglot.generator", "qualname": "Generator.command_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Command) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.transaction_sql": {"fullname": "sqlglot.generator.Generator.transaction_sql", "modulename": "sqlglot.generator", "qualname": "Generator.transaction_sql", "kind": "function", "doc": "

\n", "signature": "(self, *_) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.commit_sql": {"fullname": "sqlglot.generator.Generator.commit_sql", "modulename": "sqlglot.generator", "qualname": "Generator.commit_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Commit) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.rollback_sql": {"fullname": "sqlglot.generator.Generator.rollback_sql", "modulename": "sqlglot.generator", "qualname": "Generator.rollback_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Rollback) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.altercolumn_sql": {"fullname": "sqlglot.generator.Generator.altercolumn_sql", "modulename": "sqlglot.generator", "qualname": "Generator.altercolumn_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.AlterColumn) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.renametable_sql": {"fullname": "sqlglot.generator.Generator.renametable_sql", "modulename": "sqlglot.generator", "qualname": "Generator.renametable_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.RenameTable) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.altertable_sql": {"fullname": "sqlglot.generator.Generator.altertable_sql", "modulename": "sqlglot.generator", "qualname": "Generator.altertable_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.AlterTable) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.droppartition_sql": {"fullname": "sqlglot.generator.Generator.droppartition_sql", "modulename": "sqlglot.generator", "qualname": "Generator.droppartition_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.DropPartition) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.addconstraint_sql": {"fullname": "sqlglot.generator.Generator.addconstraint_sql", "modulename": "sqlglot.generator", "qualname": "Generator.addconstraint_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.AddConstraint) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.distinct_sql": {"fullname": "sqlglot.generator.Generator.distinct_sql", "modulename": "sqlglot.generator", "qualname": "Generator.distinct_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Distinct) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.ignorenulls_sql": {"fullname": "sqlglot.generator.Generator.ignorenulls_sql", "modulename": "sqlglot.generator", "qualname": "Generator.ignorenulls_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.IgnoreNulls) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.respectnulls_sql": {"fullname": "sqlglot.generator.Generator.respectnulls_sql", "modulename": "sqlglot.generator", "qualname": "Generator.respectnulls_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.RespectNulls) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.intdiv_sql": {"fullname": "sqlglot.generator.Generator.intdiv_sql", "modulename": "sqlglot.generator", "qualname": "Generator.intdiv_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.IntDiv) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.dpipe_sql": {"fullname": "sqlglot.generator.Generator.dpipe_sql", "modulename": "sqlglot.generator", "qualname": "Generator.dpipe_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.DPipe) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.div_sql": {"fullname": "sqlglot.generator.Generator.div_sql", "modulename": "sqlglot.generator", "qualname": "Generator.div_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Div) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.distance_sql": {"fullname": "sqlglot.generator.Generator.distance_sql", "modulename": "sqlglot.generator", "qualname": "Generator.distance_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Distance) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.dot_sql": {"fullname": "sqlglot.generator.Generator.dot_sql", "modulename": "sqlglot.generator", "qualname": "Generator.dot_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Dot) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.eq_sql": {"fullname": "sqlglot.generator.Generator.eq_sql", "modulename": "sqlglot.generator", "qualname": "Generator.eq_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.EQ) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.escape_sql": {"fullname": "sqlglot.generator.Generator.escape_sql", "modulename": "sqlglot.generator", "qualname": "Generator.escape_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Escape) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.glob_sql": {"fullname": "sqlglot.generator.Generator.glob_sql", "modulename": "sqlglot.generator", "qualname": "Generator.glob_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Glob) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.gt_sql": {"fullname": "sqlglot.generator.Generator.gt_sql", "modulename": "sqlglot.generator", "qualname": "Generator.gt_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.GT) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.gte_sql": {"fullname": "sqlglot.generator.Generator.gte_sql", "modulename": "sqlglot.generator", "qualname": "Generator.gte_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.GTE) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.ilike_sql": {"fullname": "sqlglot.generator.Generator.ilike_sql", "modulename": "sqlglot.generator", "qualname": "Generator.ilike_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.ILike) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.is_sql": {"fullname": "sqlglot.generator.Generator.is_sql", "modulename": "sqlglot.generator", "qualname": "Generator.is_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Is) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.like_sql": {"fullname": "sqlglot.generator.Generator.like_sql", "modulename": "sqlglot.generator", "qualname": "Generator.like_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Like) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.similarto_sql": {"fullname": "sqlglot.generator.Generator.similarto_sql", "modulename": "sqlglot.generator", "qualname": "Generator.similarto_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.SimilarTo) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.lt_sql": {"fullname": "sqlglot.generator.Generator.lt_sql", "modulename": "sqlglot.generator", "qualname": "Generator.lt_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.LT) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.lte_sql": {"fullname": "sqlglot.generator.Generator.lte_sql", "modulename": "sqlglot.generator", "qualname": "Generator.lte_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.LTE) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.mod_sql": {"fullname": "sqlglot.generator.Generator.mod_sql", "modulename": "sqlglot.generator", "qualname": "Generator.mod_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Mod) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.mul_sql": {"fullname": "sqlglot.generator.Generator.mul_sql", "modulename": "sqlglot.generator", "qualname": "Generator.mul_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Mul) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.neq_sql": {"fullname": "sqlglot.generator.Generator.neq_sql", "modulename": "sqlglot.generator", "qualname": "Generator.neq_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.NEQ) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.nullsafeeq_sql": {"fullname": "sqlglot.generator.Generator.nullsafeeq_sql", "modulename": "sqlglot.generator", "qualname": "Generator.nullsafeeq_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.NullSafeEQ) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.nullsafeneq_sql": {"fullname": "sqlglot.generator.Generator.nullsafeneq_sql", "modulename": "sqlglot.generator", "qualname": "Generator.nullsafeneq_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.NullSafeNEQ) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.or_sql": {"fullname": "sqlglot.generator.Generator.or_sql", "modulename": "sqlglot.generator", "qualname": "Generator.or_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Or) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.slice_sql": {"fullname": "sqlglot.generator.Generator.slice_sql", "modulename": "sqlglot.generator", "qualname": "Generator.slice_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Slice) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.sub_sql": {"fullname": "sqlglot.generator.Generator.sub_sql", "modulename": "sqlglot.generator", "qualname": "Generator.sub_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Sub) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.trycast_sql": {"fullname": "sqlglot.generator.Generator.trycast_sql", "modulename": "sqlglot.generator", "qualname": "Generator.trycast_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.TryCast) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.use_sql": {"fullname": "sqlglot.generator.Generator.use_sql", "modulename": "sqlglot.generator", "qualname": "Generator.use_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Use) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.binary": {"fullname": "sqlglot.generator.Generator.binary", "modulename": "sqlglot.generator", "qualname": "Generator.binary", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Binary, op: str) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.function_fallback_sql": {"fullname": "sqlglot.generator.Generator.function_fallback_sql", "modulename": "sqlglot.generator", "qualname": "Generator.function_fallback_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Func) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.format_args": {"fullname": "sqlglot.generator.Generator.format_args", "modulename": "sqlglot.generator", "qualname": "Generator.format_args", "kind": "function", "doc": "

\n", "signature": "(self, *args: Union[str, sqlglot.expressions.Expression, NoneType]) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.text_width": {"fullname": "sqlglot.generator.Generator.text_width", "modulename": "sqlglot.generator", "qualname": "Generator.text_width", "kind": "function", "doc": "

\n", "signature": "(self, args: Iterable) -> int:", "funcdef": "def"}, "sqlglot.generator.Generator.format_time": {"fullname": "sqlglot.generator.Generator.format_time", "modulename": "sqlglot.generator", "qualname": "Generator.format_time", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Expression) -> Optional[str]:", "funcdef": "def"}, "sqlglot.generator.Generator.expressions": {"fullname": "sqlglot.generator.Generator.expressions", "modulename": "sqlglot.generator", "qualname": "Generator.expressions", "kind": "function", "doc": "

\n", "signature": "(\tself,\texpression: sqlglot.expressions.Expression,\tkey: Optional[str] = None,\tflat: bool = False,\tindent: bool = True,\tsep: str = ', ',\tprefix: str = '') -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.op_expressions": {"fullname": "sqlglot.generator.Generator.op_expressions", "modulename": "sqlglot.generator", "qualname": "Generator.op_expressions", "kind": "function", "doc": "

\n", "signature": "(\tself,\top: str,\texpression: sqlglot.expressions.Expression,\tflat: bool = False) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.naked_property": {"fullname": "sqlglot.generator.Generator.naked_property", "modulename": "sqlglot.generator", "qualname": "Generator.naked_property", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Property) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.set_operation": {"fullname": "sqlglot.generator.Generator.set_operation", "modulename": "sqlglot.generator", "qualname": "Generator.set_operation", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Expression, op: str) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.tag_sql": {"fullname": "sqlglot.generator.Generator.tag_sql", "modulename": "sqlglot.generator", "qualname": "Generator.tag_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Tag) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.token_sql": {"fullname": "sqlglot.generator.Generator.token_sql", "modulename": "sqlglot.generator", "qualname": "Generator.token_sql", "kind": "function", "doc": "

\n", "signature": "(self, token_type: sqlglot.tokens.TokenType) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.userdefinedfunction_sql": {"fullname": "sqlglot.generator.Generator.userdefinedfunction_sql", "modulename": "sqlglot.generator", "qualname": "Generator.userdefinedfunction_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.UserDefinedFunction) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.userdefinedfunctionkwarg_sql": {"fullname": "sqlglot.generator.Generator.userdefinedfunctionkwarg_sql", "modulename": "sqlglot.generator", "qualname": "Generator.userdefinedfunctionkwarg_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.UserDefinedFunctionKwarg) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.joinhint_sql": {"fullname": "sqlglot.generator.Generator.joinhint_sql", "modulename": "sqlglot.generator", "qualname": "Generator.joinhint_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.JoinHint) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.kwarg_sql": {"fullname": "sqlglot.generator.Generator.kwarg_sql", "modulename": "sqlglot.generator", "qualname": "Generator.kwarg_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Kwarg) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.when_sql": {"fullname": "sqlglot.generator.Generator.when_sql", "modulename": "sqlglot.generator", "qualname": "Generator.when_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.When) -> str:", "funcdef": "def"}, "sqlglot.generator.Generator.merge_sql": {"fullname": "sqlglot.generator.Generator.merge_sql", "modulename": "sqlglot.generator", "qualname": "Generator.merge_sql", "kind": "function", "doc": "

\n", "signature": "(self, expression: sqlglot.expressions.Merge) -> str:", "funcdef": "def"}, "sqlglot.helper": {"fullname": "sqlglot.helper", "modulename": "sqlglot.helper", "kind": "module", "doc": "

\n"}, "sqlglot.helper.AutoName": {"fullname": "sqlglot.helper.AutoName", "modulename": "sqlglot.helper", "qualname": "AutoName", "kind": "class", "doc": "

This is used for creating enum classes where auto() is the string form of the corresponding value's name.

\n", "bases": "enum.Enum"}, "sqlglot.helper.seq_get": {"fullname": "sqlglot.helper.seq_get", "modulename": "sqlglot.helper", "qualname": "seq_get", "kind": "function", "doc": "

Returns the value in seq at position index, or None if index is out of bounds.

\n", "signature": "(seq: Sequence[~T], index: int) -> Optional[~T]:", "funcdef": "def"}, "sqlglot.helper.ensure_list": {"fullname": "sqlglot.helper.ensure_list", "modulename": "sqlglot.helper", "qualname": "ensure_list", "kind": "function", "doc": "

Ensures that a value is a list, otherwise casts or wraps it into one.

\n\n
Arguments:
\n\n
    \n
  • value: the value of interest.
  • \n
\n\n
Returns:
\n\n
\n

The value cast as a list if it's a list or a tuple, or else the value wrapped in a list.

\n
\n", "signature": "(value):", "funcdef": "def"}, "sqlglot.helper.ensure_collection": {"fullname": "sqlglot.helper.ensure_collection", "modulename": "sqlglot.helper", "qualname": "ensure_collection", "kind": "function", "doc": "

Ensures that a value is a collection (excluding str and bytes), otherwise wraps it into a list.

\n\n
Arguments:
\n\n
    \n
  • value: the value of interest.
  • \n
\n\n
Returns:
\n\n
\n

The value if it's a collection, or else the value wrapped in a list.

\n
\n", "signature": "(value):", "funcdef": "def"}, "sqlglot.helper.csv": {"fullname": "sqlglot.helper.csv", "modulename": "sqlglot.helper", "qualname": "csv", "kind": "function", "doc": "

Formats any number of string arguments as CSV.

\n\n
Arguments:
\n\n
    \n
  • args: the string arguments to format.
  • \n
  • sep: the argument separator.
  • \n
\n\n
Returns:
\n\n
\n

The arguments formatted as a CSV string.

\n
\n", "signature": "(*args, sep: str = ', ') -> str:", "funcdef": "def"}, "sqlglot.helper.subclasses": {"fullname": "sqlglot.helper.subclasses", "modulename": "sqlglot.helper", "qualname": "subclasses", "kind": "function", "doc": "

Returns all subclasses for a collection of classes, possibly excluding some of them.

\n\n
Arguments:
\n\n
    \n
  • module_name: the name of the module to search for subclasses in.
  • \n
  • classes: class(es) we want to find the subclasses of.
  • \n
  • exclude: class(es) we want to exclude from the returned list.
  • \n
\n\n
Returns:
\n\n
\n

The target subclasses.

\n
\n", "signature": "(\tmodule_name: str,\tclasses: Union[Type, Tuple[Type, ...]],\texclude: Union[Type, Tuple[Type, ...]] = ()) -> List[Type]:", "funcdef": "def"}, "sqlglot.helper.apply_index_offset": {"fullname": "sqlglot.helper.apply_index_offset", "modulename": "sqlglot.helper", "qualname": "apply_index_offset", "kind": "function", "doc": "

Applies an offset to a given integer literal expression.

\n\n
Arguments:
\n\n
    \n
  • expressions: the expression the offset will be applied to, wrapped in a list.
  • \n
  • offset: the offset that will be applied.
  • \n
\n\n
Returns:
\n\n
\n

The original expression with the offset applied to it, wrapped in a list. If the provided\n expressions argument contains more than one expressions, it's returned unaffected.

\n
\n", "signature": "(expressions: List[Optional[~E]], offset: int) -> List[Optional[~E]]:", "funcdef": "def"}, "sqlglot.helper.camel_to_snake_case": {"fullname": "sqlglot.helper.camel_to_snake_case", "modulename": "sqlglot.helper", "qualname": "camel_to_snake_case", "kind": "function", "doc": "

Converts name from camelCase to snake_case and returns the result.

\n", "signature": "(name: str) -> str:", "funcdef": "def"}, "sqlglot.helper.while_changing": {"fullname": "sqlglot.helper.while_changing", "modulename": "sqlglot.helper", "qualname": "while_changing", "kind": "function", "doc": "

Applies a transformation to a given expression until a fix point is reached.

\n\n
Arguments:
\n\n
    \n
  • expression: the expression to be transformed.
  • \n
  • func: the transformation to be applied.
  • \n
\n\n
Returns:
\n\n
\n

The transformed expression.

\n
\n", "signature": "(\texpression: Optional[sqlglot.expressions.Expression],\tfunc: Callable[[Optional[sqlglot.expressions.Expression]], ~E]) -> ~E:", "funcdef": "def"}, "sqlglot.helper.tsort": {"fullname": "sqlglot.helper.tsort", "modulename": "sqlglot.helper", "qualname": "tsort", "kind": "function", "doc": "

Sorts a given directed acyclic graph in topological order.

\n\n
Arguments:
\n\n
    \n
  • dag: the graph to be sorted.
  • \n
\n\n
Returns:
\n\n
\n

A list that contains all of the graph's nodes in topological order.

\n
\n", "signature": "(dag: Dict[~T, List[~T]]) -> List[~T]:", "funcdef": "def"}, "sqlglot.helper.open_file": {"fullname": "sqlglot.helper.open_file", "modulename": "sqlglot.helper", "qualname": "open_file", "kind": "function", "doc": "

Open a file that may be compressed as gzip and return it in universal newline mode.

\n", "signature": "(file_name: str) -> <class 'TextIO'>:", "funcdef": "def"}, "sqlglot.helper.csv_reader": {"fullname": "sqlglot.helper.csv_reader", "modulename": "sqlglot.helper", "qualname": "csv_reader", "kind": "function", "doc": "

Returns a csv reader given the expression READ_CSV(name, ['delimiter', '|', ...]).

\n\n
Arguments:
\n\n
    \n
  • read_csv: a ReadCSV function call
  • \n
\n\n
Yields:
\n\n
\n

A python csv reader.

\n
\n", "signature": "(read_csv: sqlglot.expressions.ReadCSV) -> Any:", "funcdef": "def"}, "sqlglot.helper.find_new_name": {"fullname": "sqlglot.helper.find_new_name", "modulename": "sqlglot.helper", "qualname": "find_new_name", "kind": "function", "doc": "

Searches for a new name.

\n\n
Arguments:
\n\n
    \n
  • taken: a collection of taken names.
  • \n
  • base: base name to alter.
  • \n
\n\n
Returns:
\n\n
\n

The new, available name.

\n
\n", "signature": "(taken: Collection[str], base: str) -> str:", "funcdef": "def"}, "sqlglot.helper.object_to_dict": {"fullname": "sqlglot.helper.object_to_dict", "modulename": "sqlglot.helper", "qualname": "object_to_dict", "kind": "function", "doc": "

Returns a dictionary created from an object's attributes.

\n", "signature": "(obj: Any, **kwargs) -> Dict:", "funcdef": "def"}, "sqlglot.helper.split_num_words": {"fullname": "sqlglot.helper.split_num_words", "modulename": "sqlglot.helper", "qualname": "split_num_words", "kind": "function", "doc": "

Perform a split on a value and return N words as a result with None used for words that don't exist.

\n\n
Arguments:
\n\n
    \n
  • value: the value to be split.
  • \n
  • sep: the value to use to split on.
  • \n
  • min_num_words: the minimum number of words that are going to be in the result.
  • \n
  • fill_from_start: indicates that if None values should be inserted at the start or end of the list.
  • \n
\n\n
Examples:
\n\n
\n
\n
>>> split_num_words("db.table", ".", 3)\n[None, 'db', 'table']\n>>> split_num_words("db.table", ".", 3, fill_from_start=False)\n['db', 'table', None]\n>>> split_num_words("db.table", ".", 1)\n['db', 'table']\n
\n
\n
\n\n
Returns:
\n\n
\n

The list of words returned by split, possibly augmented by a number of None values.

\n
\n", "signature": "(\tvalue: str,\tsep: str,\tmin_num_words: int,\tfill_from_start: bool = True) -> List[Optional[str]]:", "funcdef": "def"}, "sqlglot.helper.is_iterable": {"fullname": "sqlglot.helper.is_iterable", "modulename": "sqlglot.helper", "qualname": "is_iterable", "kind": "function", "doc": "

Checks if the value is an iterable, excluding the types str and bytes.

\n\n
Examples:
\n\n
\n
\n
>>> is_iterable([1,2])\nTrue\n>>> is_iterable("test")\nFalse\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • value: the value to check if it is an iterable.
  • \n
\n\n
Returns:
\n\n
\n

A bool value indicating if it is an iterable.

\n
\n", "signature": "(value: Any) -> bool:", "funcdef": "def"}, "sqlglot.helper.flatten": {"fullname": "sqlglot.helper.flatten", "modulename": "sqlglot.helper", "qualname": "flatten", "kind": "function", "doc": "

Flattens an iterable that can contain both iterable and non-iterable elements. Objects of\ntype str and bytes are not regarded as iterables.

\n\n
Examples:
\n\n
\n
\n
>>> list(flatten([[1, 2], 3, {4}, (5, "bla")]))\n[1, 2, 3, 4, 5, 'bla']\n>>> list(flatten([1, 2, 3]))\n[1, 2, 3]\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • values: the value to be flattened.
  • \n
\n\n
Yields:
\n\n
\n

Non-iterable elements in values.

\n
\n", "signature": "(values: Iterable[Union[Iterable[Any], Any]]) -> Iterator[Any]:", "funcdef": "def"}, "sqlglot.helper.count_params": {"fullname": "sqlglot.helper.count_params", "modulename": "sqlglot.helper", "qualname": "count_params", "kind": "function", "doc": "

Returns the number of formal parameters expected by a function, without counting \"self\"\nand \"cls\", in case of instance and class methods, respectively.

\n", "signature": "(function: Callable) -> int:", "funcdef": "def"}, "sqlglot.helper.dict_depth": {"fullname": "sqlglot.helper.dict_depth", "modulename": "sqlglot.helper", "qualname": "dict_depth", "kind": "function", "doc": "

Get the nesting depth of a dictionary.

\n\n
For example:
\n\n
\n
\n
>>> dict_depth(None)\n0\n>>> dict_depth({})\n1\n>>> dict_depth({"a": "b"})\n1\n>>> dict_depth({"a": {}})\n2\n>>> dict_depth({"a": {"b": {}}})\n3\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • d (dict): dictionary
  • \n
\n\n
Returns:
\n\n
\n

int: depth

\n
\n", "signature": "(d: Dict) -> int:", "funcdef": "def"}, "sqlglot.helper.first": {"fullname": "sqlglot.helper.first", "modulename": "sqlglot.helper", "qualname": "first", "kind": "function", "doc": "

Returns the first element from an iterable.

\n\n

Useful for sets.

\n", "signature": "(it: Iterable[~T]) -> ~T:", "funcdef": "def"}, "sqlglot.lineage": {"fullname": "sqlglot.lineage", "modulename": "sqlglot.lineage", "kind": "module", "doc": "

\n"}, "sqlglot.lineage.Node": {"fullname": "sqlglot.lineage.Node", "modulename": "sqlglot.lineage", "qualname": "Node", "kind": "class", "doc": "

\n"}, "sqlglot.lineage.Node.__init__": {"fullname": "sqlglot.lineage.Node.__init__", "modulename": "sqlglot.lineage", "qualname": "Node.__init__", "kind": "function", "doc": "

\n", "signature": "(\tname: str,\texpression: sqlglot.expressions.Expression,\tsource: sqlglot.expressions.Expression,\tdownstream: List[sqlglot.lineage.Node] = <factory>)"}, "sqlglot.lineage.Node.walk": {"fullname": "sqlglot.lineage.Node.walk", "modulename": "sqlglot.lineage", "qualname": "Node.walk", "kind": "function", "doc": "

\n", "signature": "(self) -> Iterator[sqlglot.lineage.Node]:", "funcdef": "def"}, "sqlglot.lineage.Node.to_html": {"fullname": "sqlglot.lineage.Node.to_html", "modulename": "sqlglot.lineage", "qualname": "Node.to_html", "kind": "function", "doc": "

\n", "signature": "(self, **opts) -> sqlglot.lineage.LineageHTML:", "funcdef": "def"}, "sqlglot.lineage.lineage": {"fullname": "sqlglot.lineage.lineage", "modulename": "sqlglot.lineage", "qualname": "lineage", "kind": "function", "doc": "

Build the lineage graph for a column of a SQL query.

\n\n
Arguments:
\n\n
    \n
  • column: The column to build the lineage for.
  • \n
  • sql: The SQL string or expression.
  • \n
  • schema: The schema of tables.
  • \n
  • sources: A mapping of queries which will be used to continue building lineage.
  • \n
  • rules: Optimizer rules to apply, by default only qualifying tables and columns.
  • \n
  • dialect: The dialect of input SQL.
  • \n
\n\n
Returns:
\n\n
\n

A lineage node.

\n
\n", "signature": "(\tcolumn: str | sqlglot.expressions.Column,\tsql: str | sqlglot.expressions.Expression,\tschema: Union[Dict, sqlglot.schema.Schema, NoneType] = None,\tsources: Optional[Dict[str, str | sqlglot.expressions.Subqueryable]] = None,\trules: Sequence[Callable] = (<function qualify_tables at 0x7ff75a9d9240>, <function qualify_columns at 0x7ff75a9d8820>, <function expand_laterals at 0x7ff75a9b2b90>),\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None) -> sqlglot.lineage.Node:", "funcdef": "def"}, "sqlglot.lineage.LineageHTML": {"fullname": "sqlglot.lineage.LineageHTML", "modulename": "sqlglot.lineage", "qualname": "LineageHTML", "kind": "class", "doc": "

Node to HTML generator using vis.js.

\n\n

https://visjs.github.io/vis-network/docs/network/

\n"}, "sqlglot.lineage.LineageHTML.__init__": {"fullname": "sqlglot.lineage.LineageHTML.__init__", "modulename": "sqlglot.lineage", "qualname": "LineageHTML.__init__", "kind": "function", "doc": "

\n", "signature": "(\tnode: sqlglot.lineage.Node,\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None,\timports: bool = True,\t**opts: Any)"}, "sqlglot.optimizer": {"fullname": "sqlglot.optimizer", "modulename": "sqlglot.optimizer", "kind": "module", "doc": "

\n"}, "sqlglot.optimizer.annotate_types": {"fullname": "sqlglot.optimizer.annotate_types", "modulename": "sqlglot.optimizer.annotate_types", "kind": "module", "doc": "

\n"}, "sqlglot.optimizer.annotate_types.annotate_types": {"fullname": "sqlglot.optimizer.annotate_types.annotate_types", "modulename": "sqlglot.optimizer.annotate_types", "qualname": "annotate_types", "kind": "function", "doc": "

Recursively infer & annotate types in an expression syntax tree against a schema.\nAssumes that we've already executed the optimizer's qualify_columns step.

\n\n
Example:
\n\n
\n
\n
>>> import sqlglot\n>>> schema = {"y": {"cola": "SMALLINT"}}\n>>> sql = "SELECT x.cola + 2.5 AS cola FROM (SELECT y.cola AS cola FROM y AS y) AS x"\n>>> annotated_expr = annotate_types(sqlglot.parse_one(sql), schema=schema)\n>>> annotated_expr.expressions[0].type.this  # Get the type of "x.cola + 2.5 AS cola"\n<Type.DOUBLE: 'DOUBLE'>\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • expression (sqlglot.Expression): Expression to annotate.
  • \n
  • schema (dict|sqlglot.optimizer.Schema): Database schema.
  • \n
  • annotators (dict): Maps expression type to corresponding annotation function.
  • \n
  • coerces_to (dict): Maps expression type to set of types that it can be coerced into.
  • \n
\n\n
Returns:
\n\n
\n

sqlglot.Expression: expression annotated with types

\n
\n", "signature": "(expression, schema=None, annotators=None, coerces_to=None):", "funcdef": "def"}, "sqlglot.optimizer.annotate_types.TypeAnnotator": {"fullname": "sqlglot.optimizer.annotate_types.TypeAnnotator", "modulename": "sqlglot.optimizer.annotate_types", "qualname": "TypeAnnotator", "kind": "class", "doc": "

\n"}, "sqlglot.optimizer.annotate_types.TypeAnnotator.__init__": {"fullname": "sqlglot.optimizer.annotate_types.TypeAnnotator.__init__", "modulename": "sqlglot.optimizer.annotate_types", "qualname": "TypeAnnotator.__init__", "kind": "function", "doc": "

\n", "signature": "(schema=None, annotators=None, coerces_to=None)"}, "sqlglot.optimizer.annotate_types.TypeAnnotator.annotate": {"fullname": "sqlglot.optimizer.annotate_types.TypeAnnotator.annotate", "modulename": "sqlglot.optimizer.annotate_types", "qualname": "TypeAnnotator.annotate", "kind": "function", "doc": "

\n", "signature": "(self, expression):", "funcdef": "def"}, "sqlglot.optimizer.canonicalize": {"fullname": "sqlglot.optimizer.canonicalize", "modulename": "sqlglot.optimizer.canonicalize", "kind": "module", "doc": "

\n"}, "sqlglot.optimizer.canonicalize.canonicalize": {"fullname": "sqlglot.optimizer.canonicalize.canonicalize", "modulename": "sqlglot.optimizer.canonicalize", "qualname": "canonicalize", "kind": "function", "doc": "

Converts a sql expression into a standard form.

\n\n

This method relies on annotate_types because many of the\nconversions rely on type inference.

\n\n
Arguments:
\n\n
    \n
  • expression: The expression to canonicalize.
  • \n
\n", "signature": "(\texpression: sqlglot.expressions.Expression) -> sqlglot.expressions.Expression:", "funcdef": "def"}, "sqlglot.optimizer.canonicalize.add_text_to_concat": {"fullname": "sqlglot.optimizer.canonicalize.add_text_to_concat", "modulename": "sqlglot.optimizer.canonicalize", "qualname": "add_text_to_concat", "kind": "function", "doc": "

\n", "signature": "(node: sqlglot.expressions.Expression) -> sqlglot.expressions.Expression:", "funcdef": "def"}, "sqlglot.optimizer.canonicalize.coerce_type": {"fullname": "sqlglot.optimizer.canonicalize.coerce_type", "modulename": "sqlglot.optimizer.canonicalize", "qualname": "coerce_type", "kind": "function", "doc": "

\n", "signature": "(node: sqlglot.expressions.Expression) -> sqlglot.expressions.Expression:", "funcdef": "def"}, "sqlglot.optimizer.canonicalize.remove_redundant_casts": {"fullname": "sqlglot.optimizer.canonicalize.remove_redundant_casts", "modulename": "sqlglot.optimizer.canonicalize", "qualname": "remove_redundant_casts", "kind": "function", "doc": "

\n", "signature": "(\texpression: sqlglot.expressions.Expression) -> sqlglot.expressions.Expression:", "funcdef": "def"}, "sqlglot.optimizer.eliminate_ctes": {"fullname": "sqlglot.optimizer.eliminate_ctes", "modulename": "sqlglot.optimizer.eliminate_ctes", "kind": "module", "doc": "

\n"}, "sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"fullname": "sqlglot.optimizer.eliminate_ctes.eliminate_ctes", "modulename": "sqlglot.optimizer.eliminate_ctes", "qualname": "eliminate_ctes", "kind": "function", "doc": "

Remove unused CTEs from an expression.

\n\n
Example:
\n\n
\n
\n
>>> import sqlglot\n>>> sql = "WITH y AS (SELECT a FROM x) SELECT a FROM z"\n>>> expression = sqlglot.parse_one(sql)\n>>> eliminate_ctes(expression).sql()\n'SELECT a FROM z'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • expression (sqlglot.Expression): expression to optimize
  • \n
\n\n
Returns:
\n\n
\n

sqlglot.Expression: optimized expression

\n
\n", "signature": "(expression):", "funcdef": "def"}, "sqlglot.optimizer.eliminate_joins": {"fullname": "sqlglot.optimizer.eliminate_joins", "modulename": "sqlglot.optimizer.eliminate_joins", "kind": "module", "doc": "

\n"}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"fullname": "sqlglot.optimizer.eliminate_joins.eliminate_joins", "modulename": "sqlglot.optimizer.eliminate_joins", "qualname": "eliminate_joins", "kind": "function", "doc": "

Remove unused joins from an expression.

\n\n

This only removes joins when we know that the join condition doesn't produce duplicate rows.

\n\n
Example:
\n\n
\n
\n
>>> import sqlglot\n>>> sql = "SELECT x.a FROM x LEFT JOIN (SELECT DISTINCT y.b FROM y) AS y ON x.b = y.b"\n>>> expression = sqlglot.parse_one(sql)\n>>> eliminate_joins(expression).sql()\n'SELECT x.a FROM x'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • expression (sqlglot.Expression): expression to optimize
  • \n
\n\n
Returns:
\n\n
\n

sqlglot.Expression: optimized expression

\n
\n", "signature": "(expression):", "funcdef": "def"}, "sqlglot.optimizer.eliminate_joins.join_condition": {"fullname": "sqlglot.optimizer.eliminate_joins.join_condition", "modulename": "sqlglot.optimizer.eliminate_joins", "qualname": "join_condition", "kind": "function", "doc": "

Extract the join condition from a join expression.

\n\n
Arguments:
\n\n
    \n
  • join (exp.Join)
  • \n
\n\n
Returns:
\n\n
\n

tuple[list[str], list[str], exp.Expression]:\n Tuple of (source key, join key, remaining predicate)

\n
\n", "signature": "(join):", "funcdef": "def"}, "sqlglot.optimizer.eliminate_subqueries": {"fullname": "sqlglot.optimizer.eliminate_subqueries", "modulename": "sqlglot.optimizer.eliminate_subqueries", "kind": "module", "doc": "

\n"}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"fullname": "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries", "modulename": "sqlglot.optimizer.eliminate_subqueries", "qualname": "eliminate_subqueries", "kind": "function", "doc": "

Rewrite derived tables as CTES, deduplicating if possible.

\n\n
Example:
\n\n
\n
\n
>>> import sqlglot\n>>> expression = sqlglot.parse_one("SELECT a FROM (SELECT * FROM x) AS y")\n>>> eliminate_subqueries(expression).sql()\n'WITH y AS (SELECT * FROM x) SELECT a FROM y AS y'\n
\n
\n
\n\n
This also deduplicates common subqueries:
\n\n
\n
\n
>>> expression = sqlglot.parse_one("SELECT a FROM (SELECT * FROM x) AS y JOIN (SELECT * FROM x) AS z")\n>>> eliminate_subqueries(expression).sql()\n'WITH y AS (SELECT * FROM x) SELECT a FROM y AS y JOIN y AS z'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • expression (sqlglot.Expression): expression
  • \n
\n\n
Returns:
\n\n
\n

sqlglot.Expression: expression

\n
\n", "signature": "(expression):", "funcdef": "def"}, "sqlglot.optimizer.expand_laterals": {"fullname": "sqlglot.optimizer.expand_laterals", "modulename": "sqlglot.optimizer.expand_laterals", "kind": "module", "doc": "

\n"}, "sqlglot.optimizer.expand_laterals.expand_laterals": {"fullname": "sqlglot.optimizer.expand_laterals.expand_laterals", "modulename": "sqlglot.optimizer.expand_laterals", "qualname": "expand_laterals", "kind": "function", "doc": "

Expand lateral column alias references.

\n\n

This assumes qualify_columns as already run.

\n\n
Example:
\n\n
\n
\n
>>> import sqlglot\n>>> sql = "SELECT x.a + 1 AS b, b + 1 AS c FROM x"\n>>> expression = sqlglot.parse_one(sql)\n>>> expand_laterals(expression).sql()\n'SELECT x.a + 1 AS b, x.a + 1 + 1 AS c FROM x'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • expression: expression to optimize
  • \n
\n\n
Returns:
\n\n
\n

optimized expression

\n
\n", "signature": "(\texpression: sqlglot.expressions.Expression) -> sqlglot.expressions.Expression:", "funcdef": "def"}, "sqlglot.optimizer.expand_multi_table_selects": {"fullname": "sqlglot.optimizer.expand_multi_table_selects", "modulename": "sqlglot.optimizer.expand_multi_table_selects", "kind": "module", "doc": "

\n"}, "sqlglot.optimizer.expand_multi_table_selects.expand_multi_table_selects": {"fullname": "sqlglot.optimizer.expand_multi_table_selects.expand_multi_table_selects", "modulename": "sqlglot.optimizer.expand_multi_table_selects", "qualname": "expand_multi_table_selects", "kind": "function", "doc": "

Replace multiple FROM expressions with JOINs.

\n\n
Example:
\n\n
\n
\n
>>> from sqlglot import parse_one\n>>> expand_multi_table_selects(parse_one("SELECT * FROM x, y")).sql()\n'SELECT * FROM x CROSS JOIN y'\n
\n
\n
\n", "signature": "(expression):", "funcdef": "def"}, "sqlglot.optimizer.isolate_table_selects": {"fullname": "sqlglot.optimizer.isolate_table_selects", "modulename": "sqlglot.optimizer.isolate_table_selects", "kind": "module", "doc": "

\n"}, "sqlglot.optimizer.isolate_table_selects.isolate_table_selects": {"fullname": "sqlglot.optimizer.isolate_table_selects.isolate_table_selects", "modulename": "sqlglot.optimizer.isolate_table_selects", "qualname": "isolate_table_selects", "kind": "function", "doc": "

\n", "signature": "(expression, schema=None):", "funcdef": "def"}, "sqlglot.optimizer.lower_identities": {"fullname": "sqlglot.optimizer.lower_identities", "modulename": "sqlglot.optimizer.lower_identities", "kind": "module", "doc": "

\n"}, "sqlglot.optimizer.lower_identities.lower_identities": {"fullname": "sqlglot.optimizer.lower_identities.lower_identities", "modulename": "sqlglot.optimizer.lower_identities", "qualname": "lower_identities", "kind": "function", "doc": "

Convert all unquoted identifiers to lower case.

\n\n

Assuming the schema is all lower case, this essentially makes identifiers case-insensitive.

\n\n
Example:
\n\n
\n
\n
>>> import sqlglot\n>>> expression = sqlglot.parse_one('SELECT Bar.A AS A FROM "Foo".Bar')\n>>> lower_identities(expression).sql()\n'SELECT bar.a AS A FROM "Foo".bar'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • expression (sqlglot.Expression): expression to quote
  • \n
\n\n
Returns:
\n\n
\n

sqlglot.Expression: quoted expression

\n
\n", "signature": "(expression):", "funcdef": "def"}, "sqlglot.optimizer.merge_subqueries": {"fullname": "sqlglot.optimizer.merge_subqueries", "modulename": "sqlglot.optimizer.merge_subqueries", "kind": "module", "doc": "

\n"}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"fullname": "sqlglot.optimizer.merge_subqueries.merge_subqueries", "modulename": "sqlglot.optimizer.merge_subqueries", "qualname": "merge_subqueries", "kind": "function", "doc": "

Rewrite sqlglot AST to merge derived tables into the outer query.

\n\n

This also merges CTEs if they are selected from only once.

\n\n
Example:
\n\n
\n
\n
>>> import sqlglot\n>>> expression = sqlglot.parse_one("SELECT a FROM (SELECT x.a FROM x) JOIN y")\n>>> merge_subqueries(expression).sql()\n'SELECT x.a FROM x JOIN y'\n
\n
\n
\n\n

If leave_tables_isolated is True, this will not merge inner queries into outer\nqueries if it would result in multiple table selects in a single query:

\n\n
\n
\n
\n

expression = sqlglot.parse_one(\"SELECT a FROM (SELECT x.a FROM x) JOIN y\")\n merge_subqueries(expression, leave_tables_isolated=True).sql()\n 'SELECT a FROM (SELECT x.a FROM x) JOIN y'

\n
\n
\n
\n\n

Inspired by https://dev.mysql.com/doc/refman/8.0/en/derived-table-optimization.html

\n\n
Arguments:
\n\n
    \n
  • expression (sqlglot.Expression): expression to optimize
  • \n
  • leave_tables_isolated (bool):
  • \n
\n\n
Returns:
\n\n
\n

sqlglot.Expression: optimized expression

\n
\n", "signature": "(expression, leave_tables_isolated=False):", "funcdef": "def"}, "sqlglot.optimizer.merge_subqueries.merge_ctes": {"fullname": "sqlglot.optimizer.merge_subqueries.merge_ctes", "modulename": "sqlglot.optimizer.merge_subqueries", "qualname": "merge_ctes", "kind": "function", "doc": "

\n", "signature": "(expression, leave_tables_isolated=False):", "funcdef": "def"}, "sqlglot.optimizer.merge_subqueries.merge_derived_tables": {"fullname": "sqlglot.optimizer.merge_subqueries.merge_derived_tables", "modulename": "sqlglot.optimizer.merge_subqueries", "qualname": "merge_derived_tables", "kind": "function", "doc": "

\n", "signature": "(expression, leave_tables_isolated=False):", "funcdef": "def"}, "sqlglot.optimizer.normalize": {"fullname": "sqlglot.optimizer.normalize", "modulename": "sqlglot.optimizer.normalize", "kind": "module", "doc": "

\n"}, "sqlglot.optimizer.normalize.normalize": {"fullname": "sqlglot.optimizer.normalize.normalize", "modulename": "sqlglot.optimizer.normalize", "qualname": "normalize", "kind": "function", "doc": "

Rewrite sqlglot AST into conjunctive normal form.

\n\n
Example:
\n\n
\n
\n
>>> import sqlglot\n>>> expression = sqlglot.parse_one("(x AND y) OR z")\n>>> normalize(expression).sql()\n'(x OR z) AND (y OR z)'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • expression (sqlglot.Expression): expression to normalize
  • \n
  • dnf (bool): rewrite in disjunctive normal form instead
  • \n
  • max_distance (int): the maximal estimated distance from cnf to attempt conversion
  • \n
\n\n
Returns:
\n\n
\n

sqlglot.Expression: normalized expression

\n
\n", "signature": "(expression, dnf=False, max_distance=128):", "funcdef": "def"}, "sqlglot.optimizer.normalize.normalized": {"fullname": "sqlglot.optimizer.normalize.normalized", "modulename": "sqlglot.optimizer.normalize", "qualname": "normalized", "kind": "function", "doc": "

\n", "signature": "(expression, dnf=False):", "funcdef": "def"}, "sqlglot.optimizer.normalize.normalization_distance": {"fullname": "sqlglot.optimizer.normalize.normalization_distance", "modulename": "sqlglot.optimizer.normalize", "qualname": "normalization_distance", "kind": "function", "doc": "

The difference in the number of predicates between the current expression and the normalized form.

\n\n

This is used as an estimate of the cost of the conversion which is exponential in complexity.

\n\n
Example:
\n\n
\n
\n
>>> import sqlglot\n>>> expression = sqlglot.parse_one("(a AND b) OR (c AND d)")\n>>> normalization_distance(expression)\n4\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • expression (sqlglot.Expression): expression to compute distance
  • \n
  • dnf (bool): compute to dnf distance instead
  • \n
\n\n
Returns:
\n\n
\n

int: difference

\n
\n", "signature": "(expression, dnf=False):", "funcdef": "def"}, "sqlglot.optimizer.normalize.distributive_law": {"fullname": "sqlglot.optimizer.normalize.distributive_law", "modulename": "sqlglot.optimizer.normalize", "qualname": "distributive_law", "kind": "function", "doc": "

x OR (y AND z) -> (x OR y) AND (x OR z)\n(x AND y) OR (y AND z) -> (x OR y) AND (x OR z) AND (y OR y) AND (y OR z)

\n", "signature": "(expression, dnf, max_distance):", "funcdef": "def"}, "sqlglot.optimizer.optimize_joins": {"fullname": "sqlglot.optimizer.optimize_joins", "modulename": "sqlglot.optimizer.optimize_joins", "kind": "module", "doc": "

\n"}, "sqlglot.optimizer.optimize_joins.optimize_joins": {"fullname": "sqlglot.optimizer.optimize_joins.optimize_joins", "modulename": "sqlglot.optimizer.optimize_joins", "qualname": "optimize_joins", "kind": "function", "doc": "

Removes cross joins if possible and reorder joins based on predicate dependencies.

\n\n
Example:
\n\n
\n
\n
>>> from sqlglot import parse_one\n>>> optimize_joins(parse_one("SELECT * FROM x CROSS JOIN y JOIN z ON x.a = z.a AND y.a = z.a")).sql()\n'SELECT * FROM x JOIN z ON x.a = z.a AND TRUE JOIN y ON y.a = z.a'\n
\n
\n
\n", "signature": "(expression):", "funcdef": "def"}, "sqlglot.optimizer.optimize_joins.reorder_joins": {"fullname": "sqlglot.optimizer.optimize_joins.reorder_joins", "modulename": "sqlglot.optimizer.optimize_joins", "qualname": "reorder_joins", "kind": "function", "doc": "

Reorder joins by topological sort order based on predicate references.

\n", "signature": "(expression):", "funcdef": "def"}, "sqlglot.optimizer.optimize_joins.normalize": {"fullname": "sqlglot.optimizer.optimize_joins.normalize", "modulename": "sqlglot.optimizer.optimize_joins", "qualname": "normalize", "kind": "function", "doc": "

Remove INNER and OUTER from joins as they are optional.

\n", "signature": "(expression):", "funcdef": "def"}, "sqlglot.optimizer.optimize_joins.other_table_names": {"fullname": "sqlglot.optimizer.optimize_joins.other_table_names", "modulename": "sqlglot.optimizer.optimize_joins", "qualname": "other_table_names", "kind": "function", "doc": "

\n", "signature": "(join, exclude):", "funcdef": "def"}, "sqlglot.optimizer.optimizer": {"fullname": "sqlglot.optimizer.optimizer", "modulename": "sqlglot.optimizer.optimizer", "kind": "module", "doc": "

\n"}, "sqlglot.optimizer.optimizer.optimize": {"fullname": "sqlglot.optimizer.optimizer.optimize", "modulename": "sqlglot.optimizer.optimizer", "qualname": "optimize", "kind": "function", "doc": "

Rewrite a sqlglot AST into an optimized form.

\n\n
Arguments:
\n\n
    \n
  • expression (sqlglot.Expression): expression to optimize
  • \n
  • schema (dict|sqlglot.optimizer.Schema): database schema.\nThis can either be an instance of sqlglot.optimizer.Schema or a mapping in one of\nthe following forms:\n 1. {table: {col: type}}\n 2. {db: {table: {col: type}}}\n 3. {catalog: {db: {table: {col: type}}}}\nIf no schema is provided then the default schema defined at sqlgot.schema will be used
  • \n
  • db (str): specify the default database, as might be set by a USE DATABASE db statement
  • \n
  • catalog (str): specify the default catalog, as might be set by a USE CATALOG c statement
  • \n
  • rules (sequence): sequence of optimizer rules to use.\nMany of the rules require tables and columns to be qualified.\nDo not remove qualify_tables or qualify_columns from the sequence of rules unless you know\nwhat you're doing!
  • \n
  • *kwargs: If a rule has a keyword argument with a same name in *kwargs, it will be passed in.
  • \n
\n\n
Returns:
\n\n
\n

sqlglot.Expression: optimized expression

\n
\n", "signature": "(\texpression,\tschema=None,\tdb=None,\tcatalog=None,\trules=(<function lower_identities at 0x7ff75a9b2f80>, <function qualify_tables at 0x7ff75a9d9240>, <function isolate_table_selects at 0x7ff75a9b2e60>, <function qualify_columns at 0x7ff75a9d8820>, <function expand_laterals at 0x7ff75a9b2b90>, <function validate_qualify_columns at 0x7ff75a9d8c10>, <function pushdown_projections at 0x7ff75a9d85e0>, <function normalize at 0x7ff75a9b0820>, <function unnest_subqueries at 0x7ff75a9d9900>, <function expand_multi_table_selects at 0x7ff75a9b2dd0>, <function pushdown_predicates at 0x7ff75a9d81f0>, <function optimize_joins at 0x7ff75a9b3d00>, <function eliminate_subqueries at 0x7ff75a9b2830>, <function merge_subqueries at 0x7ff75a9b35b0>, <function eliminate_joins at 0x7ff75a9b0700>, <function eliminate_ctes at 0x7ff75a9b05e0>, <function annotate_types at 0x7ff75a989480>, <function canonicalize at 0x7ff75a9b0160>),\t**kwargs):", "funcdef": "def"}, "sqlglot.optimizer.pushdown_predicates": {"fullname": "sqlglot.optimizer.pushdown_predicates", "modulename": "sqlglot.optimizer.pushdown_predicates", "kind": "module", "doc": "

\n"}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"fullname": "sqlglot.optimizer.pushdown_predicates.pushdown_predicates", "modulename": "sqlglot.optimizer.pushdown_predicates", "qualname": "pushdown_predicates", "kind": "function", "doc": "

Rewrite sqlglot AST to pushdown predicates in FROMS and JOINS

\n\n
Example:
\n\n
\n
\n
>>> import sqlglot\n>>> sql = "SELECT * FROM (SELECT * FROM x AS x) AS y WHERE y.a = 1"\n>>> expression = sqlglot.parse_one(sql)\n>>> pushdown_predicates(expression).sql()\n'SELECT * FROM (SELECT * FROM x AS x WHERE y.a = 1) AS y WHERE TRUE'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • expression (sqlglot.Expression): expression to optimize
  • \n
\n\n
Returns:
\n\n
\n

sqlglot.Expression: optimized expression

\n
\n", "signature": "(expression):", "funcdef": "def"}, "sqlglot.optimizer.pushdown_predicates.pushdown": {"fullname": "sqlglot.optimizer.pushdown_predicates.pushdown", "modulename": "sqlglot.optimizer.pushdown_predicates", "qualname": "pushdown", "kind": "function", "doc": "

\n", "signature": "(condition, sources, scope_ref_count):", "funcdef": "def"}, "sqlglot.optimizer.pushdown_predicates.pushdown_cnf": {"fullname": "sqlglot.optimizer.pushdown_predicates.pushdown_cnf", "modulename": "sqlglot.optimizer.pushdown_predicates", "qualname": "pushdown_cnf", "kind": "function", "doc": "

If the predicates are in CNF like form, we can simply replace each block in the parent.

\n", "signature": "(predicates, scope, scope_ref_count):", "funcdef": "def"}, "sqlglot.optimizer.pushdown_predicates.pushdown_dnf": {"fullname": "sqlglot.optimizer.pushdown_predicates.pushdown_dnf", "modulename": "sqlglot.optimizer.pushdown_predicates", "qualname": "pushdown_dnf", "kind": "function", "doc": "

If the predicates are in DNF form, we can only push down conditions that are in all blocks.\nAdditionally, we can't remove predicates from their original form.

\n", "signature": "(predicates, scope, scope_ref_count):", "funcdef": "def"}, "sqlglot.optimizer.pushdown_predicates.nodes_for_predicate": {"fullname": "sqlglot.optimizer.pushdown_predicates.nodes_for_predicate", "modulename": "sqlglot.optimizer.pushdown_predicates", "qualname": "nodes_for_predicate", "kind": "function", "doc": "

\n", "signature": "(predicate, sources, scope_ref_count):", "funcdef": "def"}, "sqlglot.optimizer.pushdown_predicates.replace_aliases": {"fullname": "sqlglot.optimizer.pushdown_predicates.replace_aliases", "modulename": "sqlglot.optimizer.pushdown_predicates", "qualname": "replace_aliases", "kind": "function", "doc": "

\n", "signature": "(source, predicate):", "funcdef": "def"}, "sqlglot.optimizer.pushdown_projections": {"fullname": "sqlglot.optimizer.pushdown_projections", "modulename": "sqlglot.optimizer.pushdown_projections", "kind": "module", "doc": "

\n"}, "sqlglot.optimizer.pushdown_projections.DEFAULT_SELECTION": {"fullname": "sqlglot.optimizer.pushdown_projections.DEFAULT_SELECTION", "modulename": "sqlglot.optimizer.pushdown_projections", "qualname": "DEFAULT_SELECTION", "kind": "function", "doc": "

\n", "signature": "():", "funcdef": "def"}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"fullname": "sqlglot.optimizer.pushdown_projections.pushdown_projections", "modulename": "sqlglot.optimizer.pushdown_projections", "qualname": "pushdown_projections", "kind": "function", "doc": "

Rewrite sqlglot AST to remove unused columns projections.

\n\n
Example:
\n\n
\n
\n
>>> import sqlglot\n>>> sql = "SELECT y.a AS a FROM (SELECT x.a AS a, x.b AS b FROM x) AS y"\n>>> expression = sqlglot.parse_one(sql)\n>>> pushdown_projections(expression).sql()\n'SELECT y.a AS a FROM (SELECT x.a AS a FROM x) AS y'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • expression (sqlglot.Expression): expression to optimize
  • \n
\n\n
Returns:
\n\n
\n

sqlglot.Expression: optimized expression

\n
\n", "signature": "(expression):", "funcdef": "def"}, "sqlglot.optimizer.qualify_columns": {"fullname": "sqlglot.optimizer.qualify_columns", "modulename": "sqlglot.optimizer.qualify_columns", "kind": "module", "doc": "

\n"}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"fullname": "sqlglot.optimizer.qualify_columns.qualify_columns", "modulename": "sqlglot.optimizer.qualify_columns", "qualname": "qualify_columns", "kind": "function", "doc": "

Rewrite sqlglot AST to have fully qualified columns.

\n\n
Example:
\n\n
\n
\n
>>> import sqlglot\n>>> schema = {"tbl": {"col": "INT"}}\n>>> expression = sqlglot.parse_one("SELECT col FROM tbl")\n>>> qualify_columns(expression, schema).sql()\n'SELECT tbl.col AS col FROM tbl'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • expression (sqlglot.Expression): expression to qualify
  • \n
  • schema (dict|sqlglot.optimizer.Schema): Database schema
  • \n
\n\n
Returns:
\n\n
\n

sqlglot.Expression: qualified expression

\n
\n", "signature": "(expression, schema):", "funcdef": "def"}, "sqlglot.optimizer.qualify_columns.validate_qualify_columns": {"fullname": "sqlglot.optimizer.qualify_columns.validate_qualify_columns", "modulename": "sqlglot.optimizer.qualify_columns", "qualname": "validate_qualify_columns", "kind": "function", "doc": "

Raise an OptimizeError if any columns aren't qualified

\n", "signature": "(expression):", "funcdef": "def"}, "sqlglot.optimizer.qualify_tables": {"fullname": "sqlglot.optimizer.qualify_tables", "modulename": "sqlglot.optimizer.qualify_tables", "kind": "module", "doc": "

\n"}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"fullname": "sqlglot.optimizer.qualify_tables.qualify_tables", "modulename": "sqlglot.optimizer.qualify_tables", "qualname": "qualify_tables", "kind": "function", "doc": "

Rewrite sqlglot AST to have fully qualified tables.

\n\n
Example:
\n\n
\n
\n
>>> import sqlglot\n>>> expression = sqlglot.parse_one("SELECT 1 FROM tbl")\n>>> qualify_tables(expression, db="db").sql()\n'SELECT 1 FROM db.tbl AS tbl'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • expression (sqlglot.Expression): expression to qualify
  • \n
  • db (str): Database name
  • \n
  • catalog (str): Catalog name
  • \n
  • schema: A schema to populate
  • \n
\n\n
Returns:
\n\n
\n

sqlglot.Expression: qualified expression

\n
\n", "signature": "(expression, db=None, catalog=None, schema=None):", "funcdef": "def"}, "sqlglot.optimizer.scope": {"fullname": "sqlglot.optimizer.scope", "modulename": "sqlglot.optimizer.scope", "kind": "module", "doc": "

\n"}, "sqlglot.optimizer.scope.ScopeType": {"fullname": "sqlglot.optimizer.scope.ScopeType", "modulename": "sqlglot.optimizer.scope", "qualname": "ScopeType", "kind": "class", "doc": "

An enumeration.

\n", "bases": "enum.Enum"}, "sqlglot.optimizer.scope.ScopeType.ROOT": {"fullname": "sqlglot.optimizer.scope.ScopeType.ROOT", "modulename": "sqlglot.optimizer.scope", "qualname": "ScopeType.ROOT", "kind": "variable", "doc": "

\n", "default_value": " = <ScopeType.ROOT: 1>"}, "sqlglot.optimizer.scope.ScopeType.SUBQUERY": {"fullname": "sqlglot.optimizer.scope.ScopeType.SUBQUERY", "modulename": "sqlglot.optimizer.scope", "qualname": "ScopeType.SUBQUERY", "kind": "variable", "doc": "

\n", "default_value": " = <ScopeType.SUBQUERY: 2>"}, "sqlglot.optimizer.scope.ScopeType.DERIVED_TABLE": {"fullname": "sqlglot.optimizer.scope.ScopeType.DERIVED_TABLE", "modulename": "sqlglot.optimizer.scope", "qualname": "ScopeType.DERIVED_TABLE", "kind": "variable", "doc": "

\n", "default_value": " = <ScopeType.DERIVED_TABLE: 3>"}, "sqlglot.optimizer.scope.ScopeType.CTE": {"fullname": "sqlglot.optimizer.scope.ScopeType.CTE", "modulename": "sqlglot.optimizer.scope", "qualname": "ScopeType.CTE", "kind": "variable", "doc": "

\n", "default_value": " = <ScopeType.CTE: 4>"}, "sqlglot.optimizer.scope.ScopeType.UNION": {"fullname": "sqlglot.optimizer.scope.ScopeType.UNION", "modulename": "sqlglot.optimizer.scope", "qualname": "ScopeType.UNION", "kind": "variable", "doc": "

\n", "default_value": " = <ScopeType.UNION: 5>"}, "sqlglot.optimizer.scope.ScopeType.UDTF": {"fullname": "sqlglot.optimizer.scope.ScopeType.UDTF", "modulename": "sqlglot.optimizer.scope", "qualname": "ScopeType.UDTF", "kind": "variable", "doc": "

\n", "default_value": " = <ScopeType.UDTF: 6>"}, "sqlglot.optimizer.scope.Scope": {"fullname": "sqlglot.optimizer.scope.Scope", "modulename": "sqlglot.optimizer.scope", "qualname": "Scope", "kind": "class", "doc": "

Selection scope.

\n\n
Attributes:
\n\n
    \n
  • expression (exp.Select|exp.Union): Root expression of this scope
  • \n
  • sources (dict[str, exp.Table|Scope]): Mapping of source name to either\na Table expression or another Scope instance. For example:\n SELECT * FROM x {\"x\": Table(this=\"x\")}\n SELECT * FROM x AS y {\"y\": Table(this=\"x\")}\n SELECT * FROM (SELECT ...) AS y {\"y\": Scope(...)}
  • \n
  • outer_column_list (list[str]): If this is a derived table or CTE, and the outer query\ndefines a column list of it's alias of this scope, this is that list of columns.\nFor example:\n SELECT * FROM (SELECT ...) AS y(col1, col2)\nThe inner query would have [\"col1\", \"col2\"] for its outer_column_list
  • \n
  • parent (Scope): Parent scope
  • \n
  • scope_type (ScopeType): Type of this scope, relative to it's parent
  • \n
  • subquery_scopes (list[Scope]): List of all child scopes for subqueries
  • \n
  • cte_scopes = (list[Scope]) List of all child scopes for CTEs
  • \n
  • derived_table_scopes = (list[Scope]) List of all child scopes for derived_tables
  • \n
  • union_scopes (list[Scope, Scope]): If this Scope is for a Union expression, this will be\na list of the left and right child scopes.
  • \n
\n"}, "sqlglot.optimizer.scope.Scope.__init__": {"fullname": "sqlglot.optimizer.scope.Scope.__init__", "modulename": "sqlglot.optimizer.scope", "qualname": "Scope.__init__", "kind": "function", "doc": "

\n", "signature": "(\texpression,\tsources=None,\touter_column_list=None,\tparent=None,\tscope_type=<ScopeType.ROOT: 1>)"}, "sqlglot.optimizer.scope.Scope.clear_cache": {"fullname": "sqlglot.optimizer.scope.Scope.clear_cache", "modulename": "sqlglot.optimizer.scope", "qualname": "Scope.clear_cache", "kind": "function", "doc": "

\n", "signature": "(self):", "funcdef": "def"}, "sqlglot.optimizer.scope.Scope.branch": {"fullname": "sqlglot.optimizer.scope.Scope.branch", "modulename": "sqlglot.optimizer.scope", "qualname": "Scope.branch", "kind": "function", "doc": "

Branch from the current scope to a new, inner scope

\n", "signature": "(self, expression, scope_type, chain_sources=None, **kwargs):", "funcdef": "def"}, "sqlglot.optimizer.scope.Scope.walk": {"fullname": "sqlglot.optimizer.scope.Scope.walk", "modulename": "sqlglot.optimizer.scope", "qualname": "Scope.walk", "kind": "function", "doc": "

\n", "signature": "(self, bfs=True):", "funcdef": "def"}, "sqlglot.optimizer.scope.Scope.find": {"fullname": "sqlglot.optimizer.scope.Scope.find", "modulename": "sqlglot.optimizer.scope", "qualname": "Scope.find", "kind": "function", "doc": "

Returns the first node in this scope which matches at least one of the specified types.

\n\n

This does NOT traverse into subscopes.

\n\n
Arguments:
\n\n
    \n
  • expression_types (type): the expression type(s) to match.
  • \n
  • bfs (bool): True to use breadth-first search, False to use depth-first.
  • \n
\n\n
Returns:
\n\n
\n

exp.Expression: the node which matches the criteria or None if no node matching\n the criteria was found.

\n
\n", "signature": "(self, *expression_types, bfs=True):", "funcdef": "def"}, "sqlglot.optimizer.scope.Scope.find_all": {"fullname": "sqlglot.optimizer.scope.Scope.find_all", "modulename": "sqlglot.optimizer.scope", "qualname": "Scope.find_all", "kind": "function", "doc": "

Returns a generator object which visits all nodes in this scope and only yields those that\nmatch at least one of the specified expression types.

\n\n

This does NOT traverse into subscopes.

\n\n
Arguments:
\n\n
    \n
  • expression_types (type): the expression type(s) to match.
  • \n
  • bfs (bool): True to use breadth-first search, False to use depth-first.
  • \n
\n\n
Yields:
\n\n
\n

exp.Expression: nodes

\n
\n", "signature": "(self, *expression_types, bfs=True):", "funcdef": "def"}, "sqlglot.optimizer.scope.Scope.replace": {"fullname": "sqlglot.optimizer.scope.Scope.replace", "modulename": "sqlglot.optimizer.scope", "qualname": "Scope.replace", "kind": "function", "doc": "

Replace old with new.

\n\n

This can be used instead of exp.Expression.replace to ensure the Scope is kept up-to-date.

\n\n
Arguments:
\n\n
    \n
  • old (exp.Expression): old node
  • \n
  • new (exp.Expression): new node
  • \n
\n", "signature": "(self, old, new):", "funcdef": "def"}, "sqlglot.optimizer.scope.Scope.tables": {"fullname": "sqlglot.optimizer.scope.Scope.tables", "modulename": "sqlglot.optimizer.scope", "qualname": "Scope.tables", "kind": "variable", "doc": "

List of tables in this scope.

\n\n
Returns:
\n\n
\n

list[exp.Table]: tables

\n
\n"}, "sqlglot.optimizer.scope.Scope.ctes": {"fullname": "sqlglot.optimizer.scope.Scope.ctes", "modulename": "sqlglot.optimizer.scope", "qualname": "Scope.ctes", "kind": "variable", "doc": "

List of CTEs in this scope.

\n\n
Returns:
\n\n
\n

list[exp.CTE]: ctes

\n
\n"}, "sqlglot.optimizer.scope.Scope.derived_tables": {"fullname": "sqlglot.optimizer.scope.Scope.derived_tables", "modulename": "sqlglot.optimizer.scope", "qualname": "Scope.derived_tables", "kind": "variable", "doc": "

List of derived tables in this scope.

\n\n
For example:
\n\n
\n

SELECT * FROM (SELECT ...) <- that's a derived table

\n
\n\n
Returns:
\n\n
\n

list[exp.Subquery]: derived tables

\n
\n"}, "sqlglot.optimizer.scope.Scope.subqueries": {"fullname": "sqlglot.optimizer.scope.Scope.subqueries", "modulename": "sqlglot.optimizer.scope", "qualname": "Scope.subqueries", "kind": "variable", "doc": "

List of subqueries in this scope.

\n\n
For example:
\n\n
\n

SELECT * FROM x WHERE a IN (SELECT ...) <- that's a subquery

\n
\n\n
Returns:
\n\n
\n

list[exp.Subqueryable]: subqueries

\n
\n"}, "sqlglot.optimizer.scope.Scope.columns": {"fullname": "sqlglot.optimizer.scope.Scope.columns", "modulename": "sqlglot.optimizer.scope", "qualname": "Scope.columns", "kind": "variable", "doc": "

List of columns in this scope.

\n\n
Returns:
\n\n
\n

list[exp.Column]: Column instances in this scope, plus any\n Columns that reference this scope from correlated subqueries.

\n
\n"}, "sqlglot.optimizer.scope.Scope.selected_sources": {"fullname": "sqlglot.optimizer.scope.Scope.selected_sources", "modulename": "sqlglot.optimizer.scope", "qualname": "Scope.selected_sources", "kind": "variable", "doc": "

Mapping of nodes and sources that are actually selected from in this scope.

\n\n

That is, all tables in a schema are selectable at any point. But a\ntable only becomes a selected source if it's included in a FROM or JOIN clause.

\n\n
Returns:
\n\n
\n

dict[str, (exp.Table|exp.Select, exp.Table|Scope)]: selected sources and nodes

\n
\n"}, "sqlglot.optimizer.scope.Scope.cte_sources": {"fullname": "sqlglot.optimizer.scope.Scope.cte_sources", "modulename": "sqlglot.optimizer.scope", "qualname": "Scope.cte_sources", "kind": "variable", "doc": "

Sources that are CTEs.

\n\n
Returns:
\n\n
\n

dict[str, Scope]: Mapping of source alias to Scope

\n
\n"}, "sqlglot.optimizer.scope.Scope.selects": {"fullname": "sqlglot.optimizer.scope.Scope.selects", "modulename": "sqlglot.optimizer.scope", "qualname": "Scope.selects", "kind": "variable", "doc": "

Select expressions of this scope.

\n\n

For example, for the following expression:\n SELECT 1 as a, 2 as b FROM x

\n\n

The outputs are the \"1 as a\" and \"2 as b\" expressions.

\n\n
Returns:
\n\n
\n

list[exp.Expression]: expressions

\n
\n"}, "sqlglot.optimizer.scope.Scope.external_columns": {"fullname": "sqlglot.optimizer.scope.Scope.external_columns", "modulename": "sqlglot.optimizer.scope", "qualname": "Scope.external_columns", "kind": "variable", "doc": "

Columns that appear to reference sources in outer scopes.

\n\n
Returns:
\n\n
\n

list[exp.Column]: Column instances that don't reference\n sources in the current scope.

\n
\n"}, "sqlglot.optimizer.scope.Scope.unqualified_columns": {"fullname": "sqlglot.optimizer.scope.Scope.unqualified_columns", "modulename": "sqlglot.optimizer.scope", "qualname": "Scope.unqualified_columns", "kind": "variable", "doc": "

Unqualified columns in the current scope.

\n\n
Returns:
\n\n
\n

list[exp.Column]: Unqualified columns

\n
\n"}, "sqlglot.optimizer.scope.Scope.join_hints": {"fullname": "sqlglot.optimizer.scope.Scope.join_hints", "modulename": "sqlglot.optimizer.scope", "qualname": "Scope.join_hints", "kind": "variable", "doc": "

Hints that exist in the scope that reference tables

\n\n
Returns:
\n\n
\n

list[exp.JoinHint]: Join hints that are referenced within the scope

\n
\n"}, "sqlglot.optimizer.scope.Scope.source_columns": {"fullname": "sqlglot.optimizer.scope.Scope.source_columns", "modulename": "sqlglot.optimizer.scope", "qualname": "Scope.source_columns", "kind": "function", "doc": "

Get all columns in the current scope for a particular source.

\n\n
Arguments:
\n\n
    \n
  • source_name (str): Name of the source
  • \n
\n\n
Returns:
\n\n
\n

list[exp.Column]: Column instances that reference source_name

\n
\n", "signature": "(self, source_name):", "funcdef": "def"}, "sqlglot.optimizer.scope.Scope.is_subquery": {"fullname": "sqlglot.optimizer.scope.Scope.is_subquery", "modulename": "sqlglot.optimizer.scope", "qualname": "Scope.is_subquery", "kind": "variable", "doc": "

Determine if this scope is a subquery

\n"}, "sqlglot.optimizer.scope.Scope.is_derived_table": {"fullname": "sqlglot.optimizer.scope.Scope.is_derived_table", "modulename": "sqlglot.optimizer.scope", "qualname": "Scope.is_derived_table", "kind": "variable", "doc": "

Determine if this scope is a derived table

\n"}, "sqlglot.optimizer.scope.Scope.is_union": {"fullname": "sqlglot.optimizer.scope.Scope.is_union", "modulename": "sqlglot.optimizer.scope", "qualname": "Scope.is_union", "kind": "variable", "doc": "

Determine if this scope is a union

\n"}, "sqlglot.optimizer.scope.Scope.is_cte": {"fullname": "sqlglot.optimizer.scope.Scope.is_cte", "modulename": "sqlglot.optimizer.scope", "qualname": "Scope.is_cte", "kind": "variable", "doc": "

Determine if this scope is a common table expression

\n"}, "sqlglot.optimizer.scope.Scope.is_root": {"fullname": "sqlglot.optimizer.scope.Scope.is_root", "modulename": "sqlglot.optimizer.scope", "qualname": "Scope.is_root", "kind": "variable", "doc": "

Determine if this is the root scope

\n"}, "sqlglot.optimizer.scope.Scope.is_udtf": {"fullname": "sqlglot.optimizer.scope.Scope.is_udtf", "modulename": "sqlglot.optimizer.scope", "qualname": "Scope.is_udtf", "kind": "variable", "doc": "

Determine if this scope is a UDTF (User Defined Table Function)

\n"}, "sqlglot.optimizer.scope.Scope.is_correlated_subquery": {"fullname": "sqlglot.optimizer.scope.Scope.is_correlated_subquery", "modulename": "sqlglot.optimizer.scope", "qualname": "Scope.is_correlated_subquery", "kind": "variable", "doc": "

Determine if this scope is a correlated subquery

\n"}, "sqlglot.optimizer.scope.Scope.rename_source": {"fullname": "sqlglot.optimizer.scope.Scope.rename_source", "modulename": "sqlglot.optimizer.scope", "qualname": "Scope.rename_source", "kind": "function", "doc": "

Rename a source in this scope

\n", "signature": "(self, old_name, new_name):", "funcdef": "def"}, "sqlglot.optimizer.scope.Scope.add_source": {"fullname": "sqlglot.optimizer.scope.Scope.add_source", "modulename": "sqlglot.optimizer.scope", "qualname": "Scope.add_source", "kind": "function", "doc": "

Add a source to this scope

\n", "signature": "(self, name, source):", "funcdef": "def"}, "sqlglot.optimizer.scope.Scope.remove_source": {"fullname": "sqlglot.optimizer.scope.Scope.remove_source", "modulename": "sqlglot.optimizer.scope", "qualname": "Scope.remove_source", "kind": "function", "doc": "

Remove a source from this scope

\n", "signature": "(self, name):", "funcdef": "def"}, "sqlglot.optimizer.scope.Scope.traverse": {"fullname": "sqlglot.optimizer.scope.Scope.traverse", "modulename": "sqlglot.optimizer.scope", "qualname": "Scope.traverse", "kind": "function", "doc": "

Traverse the scope tree from this node.

\n\n
Yields:
\n\n
\n

Scope: scope instances in depth-first-search post-order

\n
\n", "signature": "(self):", "funcdef": "def"}, "sqlglot.optimizer.scope.Scope.ref_count": {"fullname": "sqlglot.optimizer.scope.Scope.ref_count", "modulename": "sqlglot.optimizer.scope", "qualname": "Scope.ref_count", "kind": "function", "doc": "

Count the number of times each scope in this tree is referenced.

\n\n
Returns:
\n\n
\n

dict[int, int]: Mapping of Scope instance ID to reference count

\n
\n", "signature": "(self):", "funcdef": "def"}, "sqlglot.optimizer.scope.traverse_scope": {"fullname": "sqlglot.optimizer.scope.traverse_scope", "modulename": "sqlglot.optimizer.scope", "qualname": "traverse_scope", "kind": "function", "doc": "

Traverse an expression by it's \"scopes\".

\n\n

\"Scope\" represents the current context of a Select statement.

\n\n

This is helpful for optimizing queries, where we need more information than\nthe expression tree itself. For example, we might care about the source\nnames within a subquery. Returns a list because a generator could result in\nincomplete properties which is confusing.

\n\n
Examples:
\n\n
\n
\n
>>> import sqlglot\n>>> expression = sqlglot.parse_one("SELECT a FROM (SELECT a FROM x) AS y")\n>>> scopes = traverse_scope(expression)\n>>> scopes[0].expression.sql(), list(scopes[0].sources)\n('SELECT a FROM x', ['x'])\n>>> scopes[1].expression.sql(), list(scopes[1].sources)\n('SELECT a FROM (SELECT a FROM x) AS y', ['y'])\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • expression (exp.Expression): expression to traverse
  • \n
\n\n
Returns:
\n\n
\n

list[Scope]: scope instances

\n
\n", "signature": "(expression):", "funcdef": "def"}, "sqlglot.optimizer.scope.build_scope": {"fullname": "sqlglot.optimizer.scope.build_scope", "modulename": "sqlglot.optimizer.scope", "qualname": "build_scope", "kind": "function", "doc": "

Build a scope tree.

\n\n
Arguments:
\n\n
    \n
  • expression (exp.Expression): expression to build the scope tree for
  • \n
\n\n
Returns:
\n\n
\n

Scope: root scope

\n
\n", "signature": "(expression):", "funcdef": "def"}, "sqlglot.optimizer.scope.walk_in_scope": {"fullname": "sqlglot.optimizer.scope.walk_in_scope", "modulename": "sqlglot.optimizer.scope", "qualname": "walk_in_scope", "kind": "function", "doc": "

Returns a generator object which visits all nodes in the syntrax tree, stopping at\nnodes that start child scopes.

\n\n
Arguments:
\n\n
    \n
  • expression (exp.Expression):
  • \n
  • bfs (bool): if set to True the BFS traversal order will be applied,\notherwise the DFS traversal will be used instead.
  • \n
\n\n
Yields:
\n\n
\n

tuple[exp.Expression, Optional[exp.Expression], str]: node, parent, arg key

\n
\n", "signature": "(expression, bfs=True):", "funcdef": "def"}, "sqlglot.optimizer.simplify": {"fullname": "sqlglot.optimizer.simplify", "modulename": "sqlglot.optimizer.simplify", "kind": "module", "doc": "

\n"}, "sqlglot.optimizer.simplify.simplify": {"fullname": "sqlglot.optimizer.simplify.simplify", "modulename": "sqlglot.optimizer.simplify", "qualname": "simplify", "kind": "function", "doc": "

Rewrite sqlglot AST to simplify expressions.

\n\n
Example:
\n\n
\n
\n
>>> import sqlglot\n>>> expression = sqlglot.parse_one("TRUE AND TRUE")\n>>> simplify(expression).sql()\n'TRUE'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • expression (sqlglot.Expression): expression to simplify
  • \n
\n\n
Returns:
\n\n
\n

sqlglot.Expression: simplified expression

\n
\n", "signature": "(expression):", "funcdef": "def"}, "sqlglot.optimizer.simplify.rewrite_between": {"fullname": "sqlglot.optimizer.simplify.rewrite_between", "modulename": "sqlglot.optimizer.simplify", "qualname": "rewrite_between", "kind": "function", "doc": "

Rewrite x between y and z to x >= y AND x <= z.

\n\n

This is done because comparison simplification is only done on lt/lte/gt/gte.

\n", "signature": "(\texpression: sqlglot.expressions.Expression) -> sqlglot.expressions.Expression:", "funcdef": "def"}, "sqlglot.optimizer.simplify.simplify_not": {"fullname": "sqlglot.optimizer.simplify.simplify_not", "modulename": "sqlglot.optimizer.simplify", "qualname": "simplify_not", "kind": "function", "doc": "

Demorgan's Law\nNOT (x OR y) -> NOT x AND NOT y\nNOT (x AND y) -> NOT x OR NOT y

\n", "signature": "(expression):", "funcdef": "def"}, "sqlglot.optimizer.simplify.flatten": {"fullname": "sqlglot.optimizer.simplify.flatten", "modulename": "sqlglot.optimizer.simplify", "qualname": "flatten", "kind": "function", "doc": "

A AND (B AND C) -> A AND B AND C\nA OR (B OR C) -> A OR B OR C

\n", "signature": "(expression):", "funcdef": "def"}, "sqlglot.optimizer.simplify.simplify_connectors": {"fullname": "sqlglot.optimizer.simplify.simplify_connectors", "modulename": "sqlglot.optimizer.simplify", "qualname": "simplify_connectors", "kind": "function", "doc": "

\n", "signature": "(expression):", "funcdef": "def"}, "sqlglot.optimizer.simplify.remove_compliments": {"fullname": "sqlglot.optimizer.simplify.remove_compliments", "modulename": "sqlglot.optimizer.simplify", "qualname": "remove_compliments", "kind": "function", "doc": "

Removing compliments.

\n\n

A AND NOT A -> FALSE\nA OR NOT A -> TRUE

\n", "signature": "(expression):", "funcdef": "def"}, "sqlglot.optimizer.simplify.uniq_sort": {"fullname": "sqlglot.optimizer.simplify.uniq_sort", "modulename": "sqlglot.optimizer.simplify", "qualname": "uniq_sort", "kind": "function", "doc": "

Uniq and sort a connector.

\n\n

C AND A AND B AND B -> A AND B AND C

\n", "signature": "(expression):", "funcdef": "def"}, "sqlglot.optimizer.simplify.absorb_and_eliminate": {"fullname": "sqlglot.optimizer.simplify.absorb_and_eliminate", "modulename": "sqlglot.optimizer.simplify", "qualname": "absorb_and_eliminate", "kind": "function", "doc": "

absorption:\n A AND (A OR B) -> A\n A OR (A AND B) -> A\n A AND (NOT A OR B) -> A AND B\n A OR (NOT A AND B) -> A OR B\nelimination:\n (A AND B) OR (A AND NOT B) -> A\n (A OR B) AND (A OR NOT B) -> A

\n", "signature": "(expression):", "funcdef": "def"}, "sqlglot.optimizer.simplify.simplify_literals": {"fullname": "sqlglot.optimizer.simplify.simplify_literals", "modulename": "sqlglot.optimizer.simplify", "qualname": "simplify_literals", "kind": "function", "doc": "

\n", "signature": "(expression):", "funcdef": "def"}, "sqlglot.optimizer.simplify.simplify_parens": {"fullname": "sqlglot.optimizer.simplify.simplify_parens", "modulename": "sqlglot.optimizer.simplify", "qualname": "simplify_parens", "kind": "function", "doc": "

\n", "signature": "(expression):", "funcdef": "def"}, "sqlglot.optimizer.simplify.remove_where_true": {"fullname": "sqlglot.optimizer.simplify.remove_where_true", "modulename": "sqlglot.optimizer.simplify", "qualname": "remove_where_true", "kind": "function", "doc": "

\n", "signature": "(expression):", "funcdef": "def"}, "sqlglot.optimizer.simplify.always_true": {"fullname": "sqlglot.optimizer.simplify.always_true", "modulename": "sqlglot.optimizer.simplify", "qualname": "always_true", "kind": "function", "doc": "

\n", "signature": "(expression):", "funcdef": "def"}, "sqlglot.optimizer.simplify.is_complement": {"fullname": "sqlglot.optimizer.simplify.is_complement", "modulename": "sqlglot.optimizer.simplify", "qualname": "is_complement", "kind": "function", "doc": "

\n", "signature": "(a, b):", "funcdef": "def"}, "sqlglot.optimizer.simplify.eval_boolean": {"fullname": "sqlglot.optimizer.simplify.eval_boolean", "modulename": "sqlglot.optimizer.simplify", "qualname": "eval_boolean", "kind": "function", "doc": "

\n", "signature": "(expression, a, b):", "funcdef": "def"}, "sqlglot.optimizer.simplify.extract_date": {"fullname": "sqlglot.optimizer.simplify.extract_date", "modulename": "sqlglot.optimizer.simplify", "qualname": "extract_date", "kind": "function", "doc": "

\n", "signature": "(cast):", "funcdef": "def"}, "sqlglot.optimizer.simplify.extract_interval": {"fullname": "sqlglot.optimizer.simplify.extract_interval", "modulename": "sqlglot.optimizer.simplify", "qualname": "extract_interval", "kind": "function", "doc": "

\n", "signature": "(interval):", "funcdef": "def"}, "sqlglot.optimizer.simplify.date_literal": {"fullname": "sqlglot.optimizer.simplify.date_literal", "modulename": "sqlglot.optimizer.simplify", "qualname": "date_literal", "kind": "function", "doc": "

\n", "signature": "(date):", "funcdef": "def"}, "sqlglot.optimizer.simplify.boolean_literal": {"fullname": "sqlglot.optimizer.simplify.boolean_literal", "modulename": "sqlglot.optimizer.simplify", "qualname": "boolean_literal", "kind": "function", "doc": "

\n", "signature": "(condition):", "funcdef": "def"}, "sqlglot.optimizer.unnest_subqueries": {"fullname": "sqlglot.optimizer.unnest_subqueries", "modulename": "sqlglot.optimizer.unnest_subqueries", "kind": "module", "doc": "

\n"}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"fullname": "sqlglot.optimizer.unnest_subqueries.unnest_subqueries", "modulename": "sqlglot.optimizer.unnest_subqueries", "qualname": "unnest_subqueries", "kind": "function", "doc": "

Rewrite sqlglot AST to convert some predicates with subqueries into joins.

\n\n

Convert scalar subqueries into cross joins.\nConvert correlated or vectorized subqueries into a group by so it is not a many to many left join.

\n\n
Example:
\n\n
\n
\n
>>> import sqlglot\n>>> expression = sqlglot.parse_one("SELECT * FROM x AS x WHERE (SELECT y.a AS a FROM y AS y WHERE x.a = y.a) = 1 ")\n>>> unnest_subqueries(expression).sql()\n'SELECT * FROM x AS x LEFT JOIN (SELECT y.a AS a FROM y AS y WHERE TRUE GROUP BY y.a) AS _u_0 ON x.a = _u_0.a WHERE _u_0.a = 1'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • expression (sqlglot.Expression): expression to unnest
  • \n
\n\n
Returns:
\n\n
\n

sqlglot.Expression: unnested expression

\n
\n", "signature": "(expression):", "funcdef": "def"}, "sqlglot.optimizer.unnest_subqueries.unnest": {"fullname": "sqlglot.optimizer.unnest_subqueries.unnest", "modulename": "sqlglot.optimizer.unnest_subqueries", "qualname": "unnest", "kind": "function", "doc": "

\n", "signature": "(select, parent_select, sequence):", "funcdef": "def"}, "sqlglot.optimizer.unnest_subqueries.decorrelate": {"fullname": "sqlglot.optimizer.unnest_subqueries.decorrelate", "modulename": "sqlglot.optimizer.unnest_subqueries", "qualname": "decorrelate", "kind": "function", "doc": "

\n", "signature": "(select, parent_select, external_columns, sequence):", "funcdef": "def"}, "sqlglot.parser": {"fullname": "sqlglot.parser", "modulename": "sqlglot.parser", "kind": "module", "doc": "

\n"}, "sqlglot.parser.parse_var_map": {"fullname": "sqlglot.parser.parse_var_map", "modulename": "sqlglot.parser", "qualname": "parse_var_map", "kind": "function", "doc": "

\n", "signature": "(args):", "funcdef": "def"}, "sqlglot.parser.Parser": {"fullname": "sqlglot.parser.Parser", "modulename": "sqlglot.parser", "qualname": "Parser", "kind": "class", "doc": "

Parser consumes a list of tokens produced by the sqlglot.tokens.Tokenizer and produces\na parsed syntax tree.

\n\n
Arguments:
\n\n
    \n
  • error_level: the desired error level.\nDefault: ErrorLevel.RAISE
  • \n
  • error_message_context: determines the amount of context to capture from a\nquery string when displaying the error message (in number of characters).\nDefault: 50.
  • \n
  • index_offset: Index offset for arrays eg ARRAY[0] vs ARRAY[1] as the head of a list.\nDefault: 0
  • \n
  • alias_post_tablesample: If the table alias comes after tablesample.\nDefault: False
  • \n
  • max_errors: Maximum number of error messages to include in a raised ParseError.\nThis is only relevant if error_level is ErrorLevel.RAISE.\nDefault: 3
  • \n
  • null_ordering: Indicates the default null ordering method to use if not explicitly set.\nOptions are \"nulls_are_small\", \"nulls_are_large\", \"nulls_are_last\".\nDefault: \"nulls_are_small\"
  • \n
\n"}, "sqlglot.parser.Parser.__init__": {"fullname": "sqlglot.parser.Parser.__init__", "modulename": "sqlglot.parser", "qualname": "Parser.__init__", "kind": "function", "doc": "

\n", "signature": "(\terror_level: Optional[sqlglot.errors.ErrorLevel] = None,\terror_message_context: int = 100,\tindex_offset: int = 0,\tunnest_column_only: bool = False,\talias_post_tablesample: bool = False,\tmax_errors: int = 3,\tnull_ordering: Optional[str] = None)"}, "sqlglot.parser.Parser.reset": {"fullname": "sqlglot.parser.Parser.reset", "modulename": "sqlglot.parser", "qualname": "Parser.reset", "kind": "function", "doc": "

\n", "signature": "(self):", "funcdef": "def"}, "sqlglot.parser.Parser.parse": {"fullname": "sqlglot.parser.Parser.parse", "modulename": "sqlglot.parser", "qualname": "Parser.parse", "kind": "function", "doc": "

Parses a list of tokens and returns a list of syntax trees, one tree\nper parsed SQL statement.

\n\n
Arguments:
\n\n
    \n
  • raw_tokens: the list of tokens.
  • \n
  • sql: the original SQL string, used to produce helpful debug messages.
  • \n
\n\n
Returns:
\n\n
\n

The list of syntax trees.

\n
\n", "signature": "(\tself,\traw_tokens: List[sqlglot.tokens.Token],\tsql: Optional[str] = None) -> List[Optional[sqlglot.expressions.Expression]]:", "funcdef": "def"}, "sqlglot.parser.Parser.parse_into": {"fullname": "sqlglot.parser.Parser.parse_into", "modulename": "sqlglot.parser", "qualname": "Parser.parse_into", "kind": "function", "doc": "

Parses a list of tokens into a given Expression type. If a collection of Expression\ntypes is given instead, this method will try to parse the token list into each one\nof them, stopping at the first for which the parsing succeeds.

\n\n
Arguments:
\n\n
    \n
  • expression_types: the expression type(s) to try and parse the token list into.
  • \n
  • raw_tokens: the list of tokens.
  • \n
  • sql: the original SQL string, used to produce helpful debug messages.
  • \n
\n\n
Returns:
\n\n
\n

The target Expression.

\n
\n", "signature": "(\tself,\texpression_types: Union[str, Type[sqlglot.expressions.Expression], Collection[Union[str, Type[sqlglot.expressions.Expression]]]],\traw_tokens: List[sqlglot.tokens.Token],\tsql: Optional[str] = None) -> List[Optional[sqlglot.expressions.Expression]]:", "funcdef": "def"}, "sqlglot.parser.Parser.check_errors": {"fullname": "sqlglot.parser.Parser.check_errors", "modulename": "sqlglot.parser", "qualname": "Parser.check_errors", "kind": "function", "doc": "

Logs or raises any found errors, depending on the chosen error level setting.

\n", "signature": "(self) -> None:", "funcdef": "def"}, "sqlglot.parser.Parser.raise_error": {"fullname": "sqlglot.parser.Parser.raise_error", "modulename": "sqlglot.parser", "qualname": "Parser.raise_error", "kind": "function", "doc": "

Appends an error in the list of recorded errors or raises it, depending on the chosen\nerror level setting.

\n", "signature": "(self, message: str, token: Optional[sqlglot.tokens.Token] = None) -> None:", "funcdef": "def"}, "sqlglot.parser.Parser.expression": {"fullname": "sqlglot.parser.Parser.expression", "modulename": "sqlglot.parser", "qualname": "Parser.expression", "kind": "function", "doc": "

Creates a new, validated Expression.

\n\n
Arguments:
\n\n
    \n
  • exp_class: the expression class to instantiate.
  • \n
  • comments: an optional list of comments to attach to the expression.
  • \n
  • kwargs: the arguments to set for the expression along with their respective values.
  • \n
\n\n
Returns:
\n\n
\n

The target expression.

\n
\n", "signature": "(\tself,\texp_class: Type[sqlglot.expressions.Expression],\tcomments: Optional[List[str]] = None,\t**kwargs) -> sqlglot.expressions.Expression:", "funcdef": "def"}, "sqlglot.parser.Parser.validate_expression": {"fullname": "sqlglot.parser.Parser.validate_expression", "modulename": "sqlglot.parser", "qualname": "Parser.validate_expression", "kind": "function", "doc": "

Validates an already instantiated expression, making sure that all its mandatory arguments\nare set.

\n\n
Arguments:
\n\n
    \n
  • expression: the expression to validate.
  • \n
  • args: an optional list of items that was used to instantiate the expression, if it's a Func.
  • \n
\n", "signature": "(\tself,\texpression: sqlglot.expressions.Expression,\targs: Optional[List] = None) -> None:", "funcdef": "def"}, "sqlglot.planner": {"fullname": "sqlglot.planner", "modulename": "sqlglot.planner", "kind": "module", "doc": "

\n"}, "sqlglot.planner.Plan": {"fullname": "sqlglot.planner.Plan", "modulename": "sqlglot.planner", "qualname": "Plan", "kind": "class", "doc": "

\n"}, "sqlglot.planner.Plan.__init__": {"fullname": "sqlglot.planner.Plan.__init__", "modulename": "sqlglot.planner", "qualname": "Plan.__init__", "kind": "function", "doc": "

\n", "signature": "(expression: sqlglot.expressions.Expression)"}, "sqlglot.planner.Step": {"fullname": "sqlglot.planner.Step", "modulename": "sqlglot.planner", "qualname": "Step", "kind": "class", "doc": "

\n"}, "sqlglot.planner.Step.__init__": {"fullname": "sqlglot.planner.Step.__init__", "modulename": "sqlglot.planner", "qualname": "Step.__init__", "kind": "function", "doc": "

\n", "signature": "()"}, "sqlglot.planner.Step.from_expression": {"fullname": "sqlglot.planner.Step.from_expression", "modulename": "sqlglot.planner", "qualname": "Step.from_expression", "kind": "function", "doc": "

Builds a DAG of Steps from a SQL expression so that it's easier to execute in an engine.\nNote: the expression's tables and subqueries must be aliased for this method to work. For\nexample, given the following expression:

\n\n

SELECT\n x.a,\n SUM(x.b)\nFROM x AS x\nJOIN y AS y\n ON x.a = y.a\nGROUP BY x.a

\n\n

the following DAG is produced (the expression IDs might differ per execution):

\n\n
    \n
  • Aggregate: x (4347984624)\nContext:\n Aggregations:\n - SUM(x.b)\n Group:\n - x.a\nProjections:\n
      \n
    • x.a
    • \n
    • \"x\".\"\"\nDependencies:\n
        \n
      • Join: x (4347985296)\nContext:\ny:\nOn: x.a = y.a\nProjections:\nDependencies:
      • \n
    • \n
    • Scan: x (4347983136)\nContext:\n Source: x AS x\nProjections:
    • \n
    • Scan: y (4343416624)\nContext:\n Source: y AS y\nProjections:
    • \n
  • \n
\n\n
Arguments:
\n\n
    \n
  • expression: the expression to build the DAG from.
  • \n
  • ctes: a dictionary that maps CTEs to their corresponding Step DAG by name.
  • \n
\n\n
Returns:
\n\n
\n

A Step DAG corresponding to expression.

\n
\n", "signature": "(\tcls,\texpression: sqlglot.expressions.Expression,\tctes: Optional[Dict[str, sqlglot.planner.Step]] = None) -> sqlglot.planner.Step:", "funcdef": "def"}, "sqlglot.planner.Step.add_dependency": {"fullname": "sqlglot.planner.Step.add_dependency", "modulename": "sqlglot.planner", "qualname": "Step.add_dependency", "kind": "function", "doc": "

\n", "signature": "(self, dependency: sqlglot.planner.Step) -> None:", "funcdef": "def"}, "sqlglot.planner.Step.to_s": {"fullname": "sqlglot.planner.Step.to_s", "modulename": "sqlglot.planner", "qualname": "Step.to_s", "kind": "function", "doc": "

\n", "signature": "(self, level: int = 0) -> str:", "funcdef": "def"}, "sqlglot.planner.Scan": {"fullname": "sqlglot.planner.Scan", "modulename": "sqlglot.planner", "qualname": "Scan", "kind": "class", "doc": "

\n", "bases": "Step"}, "sqlglot.planner.Scan.__init__": {"fullname": "sqlglot.planner.Scan.__init__", "modulename": "sqlglot.planner", "qualname": "Scan.__init__", "kind": "function", "doc": "

\n", "signature": "()"}, "sqlglot.planner.Scan.from_expression": {"fullname": "sqlglot.planner.Scan.from_expression", "modulename": "sqlglot.planner", "qualname": "Scan.from_expression", "kind": "function", "doc": "

Builds a DAG of Steps from a SQL expression so that it's easier to execute in an engine.\nNote: the expression's tables and subqueries must be aliased for this method to work. For\nexample, given the following expression:

\n\n

SELECT\n x.a,\n SUM(x.b)\nFROM x AS x\nJOIN y AS y\n ON x.a = y.a\nGROUP BY x.a

\n\n

the following DAG is produced (the expression IDs might differ per execution):

\n\n
    \n
  • Aggregate: x (4347984624)\nContext:\n Aggregations:\n - SUM(x.b)\n Group:\n - x.a\nProjections:\n
      \n
    • x.a
    • \n
    • \"x\".\"\"\nDependencies:\n
        \n
      • Join: x (4347985296)\nContext:\ny:\nOn: x.a = y.a\nProjections:\nDependencies:
      • \n
    • \n
    • Scan: x (4347983136)\nContext:\n Source: x AS x\nProjections:
    • \n
    • Scan: y (4343416624)\nContext:\n Source: y AS y\nProjections:
    • \n
  • \n
\n\n
Arguments:
\n\n
    \n
  • expression: the expression to build the DAG from.
  • \n
  • ctes: a dictionary that maps CTEs to their corresponding Step DAG by name.
  • \n
\n\n
Returns:
\n\n
\n

A Step DAG corresponding to expression.

\n
\n", "signature": "(\tcls,\texpression: sqlglot.expressions.Expression,\tctes: Optional[Dict[str, sqlglot.planner.Step]] = None) -> sqlglot.planner.Step:", "funcdef": "def"}, "sqlglot.planner.Join": {"fullname": "sqlglot.planner.Join", "modulename": "sqlglot.planner", "qualname": "Join", "kind": "class", "doc": "

\n", "bases": "Step"}, "sqlglot.planner.Join.__init__": {"fullname": "sqlglot.planner.Join.__init__", "modulename": "sqlglot.planner", "qualname": "Join.__init__", "kind": "function", "doc": "

\n", "signature": "()"}, "sqlglot.planner.Join.from_joins": {"fullname": "sqlglot.planner.Join.from_joins", "modulename": "sqlglot.planner", "qualname": "Join.from_joins", "kind": "function", "doc": "

\n", "signature": "(\tcls,\tjoins: Iterable[sqlglot.expressions.Join],\tctes: Optional[Dict[str, sqlglot.planner.Step]] = None) -> sqlglot.planner.Step:", "funcdef": "def"}, "sqlglot.planner.Aggregate": {"fullname": "sqlglot.planner.Aggregate", "modulename": "sqlglot.planner", "qualname": "Aggregate", "kind": "class", "doc": "

\n", "bases": "Step"}, "sqlglot.planner.Aggregate.__init__": {"fullname": "sqlglot.planner.Aggregate.__init__", "modulename": "sqlglot.planner", "qualname": "Aggregate.__init__", "kind": "function", "doc": "

\n", "signature": "()"}, "sqlglot.planner.Sort": {"fullname": "sqlglot.planner.Sort", "modulename": "sqlglot.planner", "qualname": "Sort", "kind": "class", "doc": "

\n", "bases": "Step"}, "sqlglot.planner.Sort.__init__": {"fullname": "sqlglot.planner.Sort.__init__", "modulename": "sqlglot.planner", "qualname": "Sort.__init__", "kind": "function", "doc": "

\n", "signature": "()"}, "sqlglot.planner.SetOperation": {"fullname": "sqlglot.planner.SetOperation", "modulename": "sqlglot.planner", "qualname": "SetOperation", "kind": "class", "doc": "

\n", "bases": "Step"}, "sqlglot.planner.SetOperation.__init__": {"fullname": "sqlglot.planner.SetOperation.__init__", "modulename": "sqlglot.planner", "qualname": "SetOperation.__init__", "kind": "function", "doc": "

\n", "signature": "(\top: Type[sqlglot.expressions.Expression],\tleft: str | None,\tright: str | None,\tdistinct: bool = False)"}, "sqlglot.planner.SetOperation.from_expression": {"fullname": "sqlglot.planner.SetOperation.from_expression", "modulename": "sqlglot.planner", "qualname": "SetOperation.from_expression", "kind": "function", "doc": "

Builds a DAG of Steps from a SQL expression so that it's easier to execute in an engine.\nNote: the expression's tables and subqueries must be aliased for this method to work. For\nexample, given the following expression:

\n\n

SELECT\n x.a,\n SUM(x.b)\nFROM x AS x\nJOIN y AS y\n ON x.a = y.a\nGROUP BY x.a

\n\n

the following DAG is produced (the expression IDs might differ per execution):

\n\n
    \n
  • Aggregate: x (4347984624)\nContext:\n Aggregations:\n - SUM(x.b)\n Group:\n - x.a\nProjections:\n
      \n
    • x.a
    • \n
    • \"x\".\"\"\nDependencies:\n
        \n
      • Join: x (4347985296)\nContext:\ny:\nOn: x.a = y.a\nProjections:\nDependencies:
      • \n
    • \n
    • Scan: x (4347983136)\nContext:\n Source: x AS x\nProjections:
    • \n
    • Scan: y (4343416624)\nContext:\n Source: y AS y\nProjections:
    • \n
  • \n
\n\n
Arguments:
\n\n
    \n
  • expression: the expression to build the DAG from.
  • \n
  • ctes: a dictionary that maps CTEs to their corresponding Step DAG by name.
  • \n
\n\n
Returns:
\n\n
\n

A Step DAG corresponding to expression.

\n
\n", "signature": "(\tcls,\texpression: sqlglot.expressions.Expression,\tctes: Optional[Dict[str, sqlglot.planner.Step]] = None) -> sqlglot.planner.Step:", "funcdef": "def"}, "sqlglot.schema.Schema": {"fullname": "sqlglot.schema.Schema", "modulename": "sqlglot.schema", "qualname": "Schema", "kind": "class", "doc": "

Abstract base class for database schemas

\n", "bases": "abc.ABC"}, "sqlglot.schema.Schema.add_table": {"fullname": "sqlglot.schema.Schema.add_table", "modulename": "sqlglot.schema", "qualname": "Schema.add_table", "kind": "function", "doc": "

Register or update a table. Some implementing classes may require column information to also be provided.

\n\n
Arguments:
\n\n
    \n
  • table: table expression instance or string representing the table.
  • \n
  • column_mapping: a column mapping that describes the structure of the table.
  • \n
\n", "signature": "(\tself,\ttable: sqlglot.expressions.Table | str,\tcolumn_mapping: Union[Dict, str, sqlglot.dataframe.sql.types.StructType, List, NoneType] = None) -> None:", "funcdef": "def"}, "sqlglot.schema.Schema.column_names": {"fullname": "sqlglot.schema.Schema.column_names", "modulename": "sqlglot.schema", "qualname": "Schema.column_names", "kind": "function", "doc": "

Get the column names for a table.

\n\n
Arguments:
\n\n
    \n
  • table: the Table expression instance.
  • \n
  • only_visible: whether to include invisible columns.
  • \n
\n\n
Returns:
\n\n
\n

The list of column names.

\n
\n", "signature": "(\tself,\ttable: sqlglot.expressions.Table | str,\tonly_visible: bool = False) -> List[str]:", "funcdef": "def"}, "sqlglot.schema.Schema.get_column_type": {"fullname": "sqlglot.schema.Schema.get_column_type", "modulename": "sqlglot.schema", "qualname": "Schema.get_column_type", "kind": "function", "doc": "

Get the sqlglot.exp.DataType type of a column in the schema.

\n\n
Arguments:
\n\n
    \n
  • table: the source table.
  • \n
  • column: the target column.
  • \n
\n\n
Returns:
\n\n
\n

The resulting column type.

\n
\n", "signature": "(\tself,\ttable: sqlglot.expressions.Table | str,\tcolumn: sqlglot.expressions.Column) -> sqlglot.expressions.DataType:", "funcdef": "def"}, "sqlglot.schema.Schema.supported_table_args": {"fullname": "sqlglot.schema.Schema.supported_table_args", "modulename": "sqlglot.schema", "qualname": "Schema.supported_table_args", "kind": "variable", "doc": "

Table arguments this schema support, e.g. (\"this\", \"db\", \"catalog\")

\n", "annotation": ": Tuple[str, ...]"}, "sqlglot.schema.AbstractMappingSchema": {"fullname": "sqlglot.schema.AbstractMappingSchema", "modulename": "sqlglot.schema", "qualname": "AbstractMappingSchema", "kind": "class", "doc": "

Abstract base class for generic types.

\n\n

A generic type is typically declared by inheriting from\nthis class parameterized with one or more type variables.\nFor example, a generic mapping type might be defined as::

\n\n

class Mapping(Generic[KT, VT]):\n def __getitem__(self, key: KT) -> VT:\n ...\n # Etc.

\n\n

This class can then be used as follows::

\n\n

def lookup_name(mapping: Mapping[KT, VT], key: KT, default: VT) -> VT:\n try:\n return mapping[key]\n except KeyError:\n return default

\n", "bases": "typing.Generic[~T]"}, "sqlglot.schema.AbstractMappingSchema.__init__": {"fullname": "sqlglot.schema.AbstractMappingSchema.__init__", "modulename": "sqlglot.schema", "qualname": "AbstractMappingSchema.__init__", "kind": "function", "doc": "

\n", "signature": "(mapping: dict | None = None)"}, "sqlglot.schema.AbstractMappingSchema.table_parts": {"fullname": "sqlglot.schema.AbstractMappingSchema.table_parts", "modulename": "sqlglot.schema", "qualname": "AbstractMappingSchema.table_parts", "kind": "function", "doc": "

\n", "signature": "(self, table: sqlglot.expressions.Table) -> List[str]:", "funcdef": "def"}, "sqlglot.schema.AbstractMappingSchema.find": {"fullname": "sqlglot.schema.AbstractMappingSchema.find", "modulename": "sqlglot.schema", "qualname": "AbstractMappingSchema.find", "kind": "function", "doc": "

\n", "signature": "(\tself,\ttable: sqlglot.expressions.Table,\ttrie: Optional[Dict] = None,\traise_on_missing: bool = True) -> Optional[~T]:", "funcdef": "def"}, "sqlglot.schema.MappingSchema": {"fullname": "sqlglot.schema.MappingSchema", "modulename": "sqlglot.schema", "qualname": "MappingSchema", "kind": "class", "doc": "

Schema based on a nested mapping.

\n\n
Arguments:
\n\n
    \n
  • schema (dict): Mapping in one of the following forms:\n
      \n
    1. {table: {col: type}}
    2. \n
    3. {db: {table: {col: type}}}
    4. \n
    5. {catalog: {db: {table: {col: type}}}}
    6. \n
    7. None - Tables will be added later
    8. \n
  • \n
  • visible (dict): Optional mapping of which columns in the schema are visible. If not provided, all columns\nare assumed to be visible. The nesting should mirror that of the schema:\n
      \n
    1. {table: set(cols)}}
    2. \n
    3. {db: {table: set(cols)}}}
    4. \n
    5. {catalog: {db: {table: set(*cols)}}}}
    6. \n
  • \n
  • dialect (str): The dialect to be used for custom type mappings.
  • \n
\n", "bases": "sqlglot.schema.AbstractMappingSchema[typing.Dict[str, str]], Schema"}, "sqlglot.schema.MappingSchema.__init__": {"fullname": "sqlglot.schema.MappingSchema.__init__", "modulename": "sqlglot.schema", "qualname": "MappingSchema.__init__", "kind": "function", "doc": "

\n", "signature": "(\tschema: Optional[Dict] = None,\tvisible: Optional[Dict] = None,\tdialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None)"}, "sqlglot.schema.MappingSchema.from_mapping_schema": {"fullname": "sqlglot.schema.MappingSchema.from_mapping_schema", "modulename": "sqlglot.schema", "qualname": "MappingSchema.from_mapping_schema", "kind": "function", "doc": "

\n", "signature": "(\tcls,\tmapping_schema: sqlglot.schema.MappingSchema) -> sqlglot.schema.MappingSchema:", "funcdef": "def"}, "sqlglot.schema.MappingSchema.copy": {"fullname": "sqlglot.schema.MappingSchema.copy", "modulename": "sqlglot.schema", "qualname": "MappingSchema.copy", "kind": "function", "doc": "

\n", "signature": "(self, **kwargs) -> sqlglot.schema.MappingSchema:", "funcdef": "def"}, "sqlglot.schema.MappingSchema.add_table": {"fullname": "sqlglot.schema.MappingSchema.add_table", "modulename": "sqlglot.schema", "qualname": "MappingSchema.add_table", "kind": "function", "doc": "

Register or update a table. Updates are only performed if a new column mapping is provided.

\n\n
Arguments:
\n\n
    \n
  • table: the Table expression instance or string representing the table.
  • \n
  • column_mapping: a column mapping that describes the structure of the table.
  • \n
\n", "signature": "(\tself,\ttable: sqlglot.expressions.Table | str,\tcolumn_mapping: Union[Dict, str, sqlglot.dataframe.sql.types.StructType, List, NoneType] = None) -> None:", "funcdef": "def"}, "sqlglot.schema.MappingSchema.column_names": {"fullname": "sqlglot.schema.MappingSchema.column_names", "modulename": "sqlglot.schema", "qualname": "MappingSchema.column_names", "kind": "function", "doc": "

Get the column names for a table.

\n\n
Arguments:
\n\n
    \n
  • table: the Table expression instance.
  • \n
  • only_visible: whether to include invisible columns.
  • \n
\n\n
Returns:
\n\n
\n

The list of column names.

\n
\n", "signature": "(\tself,\ttable: sqlglot.expressions.Table | str,\tonly_visible: bool = False) -> List[str]:", "funcdef": "def"}, "sqlglot.schema.MappingSchema.get_column_type": {"fullname": "sqlglot.schema.MappingSchema.get_column_type", "modulename": "sqlglot.schema", "qualname": "MappingSchema.get_column_type", "kind": "function", "doc": "

Get the sqlglot.exp.DataType type of a column in the schema.

\n\n
Arguments:
\n\n
    \n
  • table: the source table.
  • \n
  • column: the target column.
  • \n
\n\n
Returns:
\n\n
\n

The resulting column type.

\n
\n", "signature": "(\tself,\ttable: sqlglot.expressions.Table | str,\tcolumn: sqlglot.expressions.Column | str) -> sqlglot.expressions.DataType:", "funcdef": "def"}, "sqlglot.schema.ensure_schema": {"fullname": "sqlglot.schema.ensure_schema", "modulename": "sqlglot.schema", "qualname": "ensure_schema", "kind": "function", "doc": "

\n", "signature": "(schema: Any) -> sqlglot.schema.Schema:", "funcdef": "def"}, "sqlglot.schema.ensure_column_mapping": {"fullname": "sqlglot.schema.ensure_column_mapping", "modulename": "sqlglot.schema", "qualname": "ensure_column_mapping", "kind": "function", "doc": "

\n", "signature": "(\tmapping: Union[Dict, str, sqlglot.dataframe.sql.types.StructType, List, NoneType]):", "funcdef": "def"}, "sqlglot.schema.flatten_schema": {"fullname": "sqlglot.schema.flatten_schema", "modulename": "sqlglot.schema", "qualname": "flatten_schema", "kind": "function", "doc": "

\n", "signature": "(\tschema: Dict,\tdepth: int,\tkeys: Optional[List[str]] = None) -> List[List[str]]:", "funcdef": "def"}, "sqlglot.serde": {"fullname": "sqlglot.serde", "modulename": "sqlglot.serde", "kind": "module", "doc": "

\n"}, "sqlglot.serde.dump": {"fullname": "sqlglot.serde.dump", "modulename": "sqlglot.serde", "qualname": "dump", "kind": "function", "doc": "

Recursively dump an AST into a JSON-serializable dict.

\n", "signature": "(\tnode: Union[List[ForwardRef('Node')], sqlglot.expressions.DataType.Type, sqlglot.expressions.Expression, dict, list, str, float, int, bool]) -> Union[dict, list, str, float, int, bool]:", "funcdef": "def"}, "sqlglot.serde.load": {"fullname": "sqlglot.serde.load", "modulename": "sqlglot.serde", "qualname": "load", "kind": "function", "doc": "

Recursively load a dict (as returned by dump) into an AST.

\n", "signature": "(\tobj: Union[dict, list, str, float, int, bool]) -> Union[List[ForwardRef('Node')], sqlglot.expressions.DataType.Type, sqlglot.expressions.Expression, dict, list, str, float, int, bool]:", "funcdef": "def"}, "sqlglot.time": {"fullname": "sqlglot.time", "modulename": "sqlglot.time", "kind": "module", "doc": "

\n"}, "sqlglot.time.format_time": {"fullname": "sqlglot.time.format_time", "modulename": "sqlglot.time", "qualname": "format_time", "kind": "function", "doc": "

Converts a time string given a mapping.

\n\n
Examples:
\n\n
\n
\n
>>> format_time("%Y", {"%Y": "YYYY"})\n'YYYY'\n
\n
\n \n

Args:\n mapping: dictionary of time format to target time format.\n trie: optional trie, can be passed in for performance.

\n \n

Returns:\n The converted time string.

\n
\n", "signature": "(\tstring: str,\tmapping: Dict[str, str],\ttrie: Optional[Dict] = None) -> Optional[str]:", "funcdef": "def"}, "sqlglot.tokens": {"fullname": "sqlglot.tokens", "modulename": "sqlglot.tokens", "kind": "module", "doc": "

\n"}, "sqlglot.tokens.TokenType": {"fullname": "sqlglot.tokens.TokenType", "modulename": "sqlglot.tokens", "qualname": "TokenType", "kind": "class", "doc": "

An enumeration.

\n", "bases": "sqlglot.helper.AutoName"}, "sqlglot.tokens.TokenType.L_PAREN": {"fullname": "sqlglot.tokens.TokenType.L_PAREN", "modulename": "sqlglot.tokens", "qualname": "TokenType.L_PAREN", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.L_PAREN: 'L_PAREN'>"}, "sqlglot.tokens.TokenType.R_PAREN": {"fullname": "sqlglot.tokens.TokenType.R_PAREN", "modulename": "sqlglot.tokens", "qualname": "TokenType.R_PAREN", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.R_PAREN: 'R_PAREN'>"}, "sqlglot.tokens.TokenType.L_BRACKET": {"fullname": "sqlglot.tokens.TokenType.L_BRACKET", "modulename": "sqlglot.tokens", "qualname": "TokenType.L_BRACKET", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.L_BRACKET: 'L_BRACKET'>"}, "sqlglot.tokens.TokenType.R_BRACKET": {"fullname": "sqlglot.tokens.TokenType.R_BRACKET", "modulename": "sqlglot.tokens", "qualname": "TokenType.R_BRACKET", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.R_BRACKET: 'R_BRACKET'>"}, "sqlglot.tokens.TokenType.L_BRACE": {"fullname": "sqlglot.tokens.TokenType.L_BRACE", "modulename": "sqlglot.tokens", "qualname": "TokenType.L_BRACE", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.L_BRACE: 'L_BRACE'>"}, "sqlglot.tokens.TokenType.R_BRACE": {"fullname": "sqlglot.tokens.TokenType.R_BRACE", "modulename": "sqlglot.tokens", "qualname": "TokenType.R_BRACE", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.R_BRACE: 'R_BRACE'>"}, "sqlglot.tokens.TokenType.COMMA": {"fullname": "sqlglot.tokens.TokenType.COMMA", "modulename": "sqlglot.tokens", "qualname": "TokenType.COMMA", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.COMMA: 'COMMA'>"}, "sqlglot.tokens.TokenType.DOT": {"fullname": "sqlglot.tokens.TokenType.DOT", "modulename": "sqlglot.tokens", "qualname": "TokenType.DOT", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.DOT: 'DOT'>"}, "sqlglot.tokens.TokenType.DASH": {"fullname": "sqlglot.tokens.TokenType.DASH", "modulename": "sqlglot.tokens", "qualname": "TokenType.DASH", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.DASH: 'DASH'>"}, "sqlglot.tokens.TokenType.PLUS": {"fullname": "sqlglot.tokens.TokenType.PLUS", "modulename": "sqlglot.tokens", "qualname": "TokenType.PLUS", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.PLUS: 'PLUS'>"}, "sqlglot.tokens.TokenType.COLON": {"fullname": "sqlglot.tokens.TokenType.COLON", "modulename": "sqlglot.tokens", "qualname": "TokenType.COLON", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.COLON: 'COLON'>"}, "sqlglot.tokens.TokenType.DCOLON": {"fullname": "sqlglot.tokens.TokenType.DCOLON", "modulename": "sqlglot.tokens", "qualname": "TokenType.DCOLON", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.DCOLON: 'DCOLON'>"}, "sqlglot.tokens.TokenType.SEMICOLON": {"fullname": "sqlglot.tokens.TokenType.SEMICOLON", "modulename": "sqlglot.tokens", "qualname": "TokenType.SEMICOLON", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.SEMICOLON: 'SEMICOLON'>"}, "sqlglot.tokens.TokenType.STAR": {"fullname": "sqlglot.tokens.TokenType.STAR", "modulename": "sqlglot.tokens", "qualname": "TokenType.STAR", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.STAR: 'STAR'>"}, "sqlglot.tokens.TokenType.BACKSLASH": {"fullname": "sqlglot.tokens.TokenType.BACKSLASH", "modulename": "sqlglot.tokens", "qualname": "TokenType.BACKSLASH", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.BACKSLASH: 'BACKSLASH'>"}, "sqlglot.tokens.TokenType.SLASH": {"fullname": "sqlglot.tokens.TokenType.SLASH", "modulename": "sqlglot.tokens", "qualname": "TokenType.SLASH", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.SLASH: 'SLASH'>"}, "sqlglot.tokens.TokenType.LT": {"fullname": "sqlglot.tokens.TokenType.LT", "modulename": "sqlglot.tokens", "qualname": "TokenType.LT", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.LT: 'LT'>"}, "sqlglot.tokens.TokenType.LTE": {"fullname": "sqlglot.tokens.TokenType.LTE", "modulename": "sqlglot.tokens", "qualname": "TokenType.LTE", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.LTE: 'LTE'>"}, "sqlglot.tokens.TokenType.GT": {"fullname": "sqlglot.tokens.TokenType.GT", "modulename": "sqlglot.tokens", "qualname": "TokenType.GT", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.GT: 'GT'>"}, "sqlglot.tokens.TokenType.GTE": {"fullname": "sqlglot.tokens.TokenType.GTE", "modulename": "sqlglot.tokens", "qualname": "TokenType.GTE", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.GTE: 'GTE'>"}, "sqlglot.tokens.TokenType.NOT": {"fullname": "sqlglot.tokens.TokenType.NOT", "modulename": "sqlglot.tokens", "qualname": "TokenType.NOT", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.NOT: 'NOT'>"}, "sqlglot.tokens.TokenType.EQ": {"fullname": "sqlglot.tokens.TokenType.EQ", "modulename": "sqlglot.tokens", "qualname": "TokenType.EQ", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.EQ: 'EQ'>"}, "sqlglot.tokens.TokenType.NEQ": {"fullname": "sqlglot.tokens.TokenType.NEQ", "modulename": "sqlglot.tokens", "qualname": "TokenType.NEQ", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.NEQ: 'NEQ'>"}, "sqlglot.tokens.TokenType.NULLSAFE_EQ": {"fullname": "sqlglot.tokens.TokenType.NULLSAFE_EQ", "modulename": "sqlglot.tokens", "qualname": "TokenType.NULLSAFE_EQ", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.NULLSAFE_EQ: 'NULLSAFE_EQ'>"}, "sqlglot.tokens.TokenType.AND": {"fullname": "sqlglot.tokens.TokenType.AND", "modulename": "sqlglot.tokens", "qualname": "TokenType.AND", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.AND: 'AND'>"}, "sqlglot.tokens.TokenType.OR": {"fullname": "sqlglot.tokens.TokenType.OR", "modulename": "sqlglot.tokens", "qualname": "TokenType.OR", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.OR: 'OR'>"}, "sqlglot.tokens.TokenType.AMP": {"fullname": "sqlglot.tokens.TokenType.AMP", "modulename": "sqlglot.tokens", "qualname": "TokenType.AMP", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.AMP: 'AMP'>"}, "sqlglot.tokens.TokenType.DPIPE": {"fullname": "sqlglot.tokens.TokenType.DPIPE", "modulename": "sqlglot.tokens", "qualname": "TokenType.DPIPE", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.DPIPE: 'DPIPE'>"}, "sqlglot.tokens.TokenType.PIPE": {"fullname": "sqlglot.tokens.TokenType.PIPE", "modulename": "sqlglot.tokens", "qualname": "TokenType.PIPE", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.PIPE: 'PIPE'>"}, "sqlglot.tokens.TokenType.CARET": {"fullname": "sqlglot.tokens.TokenType.CARET", "modulename": "sqlglot.tokens", "qualname": "TokenType.CARET", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.CARET: 'CARET'>"}, "sqlglot.tokens.TokenType.TILDA": {"fullname": "sqlglot.tokens.TokenType.TILDA", "modulename": "sqlglot.tokens", "qualname": "TokenType.TILDA", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.TILDA: 'TILDA'>"}, "sqlglot.tokens.TokenType.ARROW": {"fullname": "sqlglot.tokens.TokenType.ARROW", "modulename": "sqlglot.tokens", "qualname": "TokenType.ARROW", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.ARROW: 'ARROW'>"}, "sqlglot.tokens.TokenType.DARROW": {"fullname": "sqlglot.tokens.TokenType.DARROW", "modulename": "sqlglot.tokens", "qualname": "TokenType.DARROW", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.DARROW: 'DARROW'>"}, "sqlglot.tokens.TokenType.FARROW": {"fullname": "sqlglot.tokens.TokenType.FARROW", "modulename": "sqlglot.tokens", "qualname": "TokenType.FARROW", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.FARROW: 'FARROW'>"}, "sqlglot.tokens.TokenType.HASH": {"fullname": "sqlglot.tokens.TokenType.HASH", "modulename": "sqlglot.tokens", "qualname": "TokenType.HASH", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.HASH: 'HASH'>"}, "sqlglot.tokens.TokenType.HASH_ARROW": {"fullname": "sqlglot.tokens.TokenType.HASH_ARROW", "modulename": "sqlglot.tokens", "qualname": "TokenType.HASH_ARROW", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.HASH_ARROW: 'HASH_ARROW'>"}, "sqlglot.tokens.TokenType.DHASH_ARROW": {"fullname": "sqlglot.tokens.TokenType.DHASH_ARROW", "modulename": "sqlglot.tokens", "qualname": "TokenType.DHASH_ARROW", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.DHASH_ARROW: 'DHASH_ARROW'>"}, "sqlglot.tokens.TokenType.LR_ARROW": {"fullname": "sqlglot.tokens.TokenType.LR_ARROW", "modulename": "sqlglot.tokens", "qualname": "TokenType.LR_ARROW", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.LR_ARROW: 'LR_ARROW'>"}, "sqlglot.tokens.TokenType.DOLLAR": {"fullname": "sqlglot.tokens.TokenType.DOLLAR", "modulename": "sqlglot.tokens", "qualname": "TokenType.DOLLAR", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.DOLLAR: 'DOLLAR'>"}, "sqlglot.tokens.TokenType.PARAMETER": {"fullname": "sqlglot.tokens.TokenType.PARAMETER", "modulename": "sqlglot.tokens", "qualname": "TokenType.PARAMETER", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.PARAMETER: 'PARAMETER'>"}, "sqlglot.tokens.TokenType.SESSION_PARAMETER": {"fullname": "sqlglot.tokens.TokenType.SESSION_PARAMETER", "modulename": "sqlglot.tokens", "qualname": "TokenType.SESSION_PARAMETER", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.SESSION_PARAMETER: 'SESSION_PARAMETER'>"}, "sqlglot.tokens.TokenType.NATIONAL": {"fullname": "sqlglot.tokens.TokenType.NATIONAL", "modulename": "sqlglot.tokens", "qualname": "TokenType.NATIONAL", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.NATIONAL: 'NATIONAL'>"}, "sqlglot.tokens.TokenType.BLOCK_START": {"fullname": "sqlglot.tokens.TokenType.BLOCK_START", "modulename": "sqlglot.tokens", "qualname": "TokenType.BLOCK_START", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.BLOCK_START: 'BLOCK_START'>"}, "sqlglot.tokens.TokenType.BLOCK_END": {"fullname": "sqlglot.tokens.TokenType.BLOCK_END", "modulename": "sqlglot.tokens", "qualname": "TokenType.BLOCK_END", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.BLOCK_END: 'BLOCK_END'>"}, "sqlglot.tokens.TokenType.SPACE": {"fullname": "sqlglot.tokens.TokenType.SPACE", "modulename": "sqlglot.tokens", "qualname": "TokenType.SPACE", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.SPACE: 'SPACE'>"}, "sqlglot.tokens.TokenType.BREAK": {"fullname": "sqlglot.tokens.TokenType.BREAK", "modulename": "sqlglot.tokens", "qualname": "TokenType.BREAK", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.BREAK: 'BREAK'>"}, "sqlglot.tokens.TokenType.STRING": {"fullname": "sqlglot.tokens.TokenType.STRING", "modulename": "sqlglot.tokens", "qualname": "TokenType.STRING", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.STRING: 'STRING'>"}, "sqlglot.tokens.TokenType.NUMBER": {"fullname": "sqlglot.tokens.TokenType.NUMBER", "modulename": "sqlglot.tokens", "qualname": "TokenType.NUMBER", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.NUMBER: 'NUMBER'>"}, "sqlglot.tokens.TokenType.IDENTIFIER": {"fullname": "sqlglot.tokens.TokenType.IDENTIFIER", "modulename": "sqlglot.tokens", "qualname": "TokenType.IDENTIFIER", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.IDENTIFIER: 'IDENTIFIER'>"}, "sqlglot.tokens.TokenType.COLUMN": {"fullname": "sqlglot.tokens.TokenType.COLUMN", "modulename": "sqlglot.tokens", "qualname": "TokenType.COLUMN", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.COLUMN: 'COLUMN'>"}, "sqlglot.tokens.TokenType.COLUMN_DEF": {"fullname": "sqlglot.tokens.TokenType.COLUMN_DEF", "modulename": "sqlglot.tokens", "qualname": "TokenType.COLUMN_DEF", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.COLUMN_DEF: 'COLUMN_DEF'>"}, "sqlglot.tokens.TokenType.SCHEMA": {"fullname": "sqlglot.tokens.TokenType.SCHEMA", "modulename": "sqlglot.tokens", "qualname": "TokenType.SCHEMA", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.SCHEMA: 'SCHEMA'>"}, "sqlglot.tokens.TokenType.TABLE": {"fullname": "sqlglot.tokens.TokenType.TABLE", "modulename": "sqlglot.tokens", "qualname": "TokenType.TABLE", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.TABLE: 'TABLE'>"}, "sqlglot.tokens.TokenType.VAR": {"fullname": "sqlglot.tokens.TokenType.VAR", "modulename": "sqlglot.tokens", "qualname": "TokenType.VAR", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.VAR: 'VAR'>"}, "sqlglot.tokens.TokenType.BIT_STRING": {"fullname": "sqlglot.tokens.TokenType.BIT_STRING", "modulename": "sqlglot.tokens", "qualname": "TokenType.BIT_STRING", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.BIT_STRING: 'BIT_STRING'>"}, "sqlglot.tokens.TokenType.HEX_STRING": {"fullname": "sqlglot.tokens.TokenType.HEX_STRING", "modulename": "sqlglot.tokens", "qualname": "TokenType.HEX_STRING", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.HEX_STRING: 'HEX_STRING'>"}, "sqlglot.tokens.TokenType.BYTE_STRING": {"fullname": "sqlglot.tokens.TokenType.BYTE_STRING", "modulename": "sqlglot.tokens", "qualname": "TokenType.BYTE_STRING", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.BYTE_STRING: 'BYTE_STRING'>"}, "sqlglot.tokens.TokenType.BOOLEAN": {"fullname": "sqlglot.tokens.TokenType.BOOLEAN", "modulename": "sqlglot.tokens", "qualname": "TokenType.BOOLEAN", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.BOOLEAN: 'BOOLEAN'>"}, "sqlglot.tokens.TokenType.TINYINT": {"fullname": "sqlglot.tokens.TokenType.TINYINT", "modulename": "sqlglot.tokens", "qualname": "TokenType.TINYINT", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.TINYINT: 'TINYINT'>"}, "sqlglot.tokens.TokenType.SMALLINT": {"fullname": "sqlglot.tokens.TokenType.SMALLINT", "modulename": "sqlglot.tokens", "qualname": "TokenType.SMALLINT", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.SMALLINT: 'SMALLINT'>"}, "sqlglot.tokens.TokenType.INT": {"fullname": "sqlglot.tokens.TokenType.INT", "modulename": "sqlglot.tokens", "qualname": "TokenType.INT", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.INT: 'INT'>"}, "sqlglot.tokens.TokenType.BIGINT": {"fullname": "sqlglot.tokens.TokenType.BIGINT", "modulename": "sqlglot.tokens", "qualname": "TokenType.BIGINT", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.BIGINT: 'BIGINT'>"}, "sqlglot.tokens.TokenType.FLOAT": {"fullname": "sqlglot.tokens.TokenType.FLOAT", "modulename": "sqlglot.tokens", "qualname": "TokenType.FLOAT", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.FLOAT: 'FLOAT'>"}, "sqlglot.tokens.TokenType.DOUBLE": {"fullname": "sqlglot.tokens.TokenType.DOUBLE", "modulename": "sqlglot.tokens", "qualname": "TokenType.DOUBLE", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.DOUBLE: 'DOUBLE'>"}, "sqlglot.tokens.TokenType.DECIMAL": {"fullname": "sqlglot.tokens.TokenType.DECIMAL", "modulename": "sqlglot.tokens", "qualname": "TokenType.DECIMAL", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.DECIMAL: 'DECIMAL'>"}, "sqlglot.tokens.TokenType.CHAR": {"fullname": "sqlglot.tokens.TokenType.CHAR", "modulename": "sqlglot.tokens", "qualname": "TokenType.CHAR", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.CHAR: 'CHAR'>"}, "sqlglot.tokens.TokenType.NCHAR": {"fullname": "sqlglot.tokens.TokenType.NCHAR", "modulename": "sqlglot.tokens", "qualname": "TokenType.NCHAR", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.NCHAR: 'NCHAR'>"}, "sqlglot.tokens.TokenType.VARCHAR": {"fullname": "sqlglot.tokens.TokenType.VARCHAR", "modulename": "sqlglot.tokens", "qualname": "TokenType.VARCHAR", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.VARCHAR: 'VARCHAR'>"}, "sqlglot.tokens.TokenType.NVARCHAR": {"fullname": "sqlglot.tokens.TokenType.NVARCHAR", "modulename": "sqlglot.tokens", "qualname": "TokenType.NVARCHAR", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.NVARCHAR: 'NVARCHAR'>"}, "sqlglot.tokens.TokenType.TEXT": {"fullname": "sqlglot.tokens.TokenType.TEXT", "modulename": "sqlglot.tokens", "qualname": "TokenType.TEXT", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.TEXT: 'TEXT'>"}, "sqlglot.tokens.TokenType.MEDIUMTEXT": {"fullname": "sqlglot.tokens.TokenType.MEDIUMTEXT", "modulename": "sqlglot.tokens", "qualname": "TokenType.MEDIUMTEXT", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>"}, "sqlglot.tokens.TokenType.LONGTEXT": {"fullname": "sqlglot.tokens.TokenType.LONGTEXT", "modulename": "sqlglot.tokens", "qualname": "TokenType.LONGTEXT", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.LONGTEXT: 'LONGTEXT'>"}, "sqlglot.tokens.TokenType.MEDIUMBLOB": {"fullname": "sqlglot.tokens.TokenType.MEDIUMBLOB", "modulename": "sqlglot.tokens", "qualname": "TokenType.MEDIUMBLOB", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>"}, "sqlglot.tokens.TokenType.LONGBLOB": {"fullname": "sqlglot.tokens.TokenType.LONGBLOB", "modulename": "sqlglot.tokens", "qualname": "TokenType.LONGBLOB", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.LONGBLOB: 'LONGBLOB'>"}, "sqlglot.tokens.TokenType.BINARY": {"fullname": "sqlglot.tokens.TokenType.BINARY", "modulename": "sqlglot.tokens", "qualname": "TokenType.BINARY", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.BINARY: 'BINARY'>"}, "sqlglot.tokens.TokenType.VARBINARY": {"fullname": "sqlglot.tokens.TokenType.VARBINARY", "modulename": "sqlglot.tokens", "qualname": "TokenType.VARBINARY", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.VARBINARY: 'VARBINARY'>"}, "sqlglot.tokens.TokenType.JSON": {"fullname": "sqlglot.tokens.TokenType.JSON", "modulename": "sqlglot.tokens", "qualname": "TokenType.JSON", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.JSON: 'JSON'>"}, "sqlglot.tokens.TokenType.JSONB": {"fullname": "sqlglot.tokens.TokenType.JSONB", "modulename": "sqlglot.tokens", "qualname": "TokenType.JSONB", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.JSONB: 'JSONB'>"}, "sqlglot.tokens.TokenType.TIME": {"fullname": "sqlglot.tokens.TokenType.TIME", "modulename": "sqlglot.tokens", "qualname": "TokenType.TIME", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.TIME: 'TIME'>"}, "sqlglot.tokens.TokenType.TIMESTAMP": {"fullname": "sqlglot.tokens.TokenType.TIMESTAMP", "modulename": "sqlglot.tokens", "qualname": "TokenType.TIMESTAMP", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.TIMESTAMP: 'TIMESTAMP'>"}, "sqlglot.tokens.TokenType.TIMESTAMPTZ": {"fullname": "sqlglot.tokens.TokenType.TIMESTAMPTZ", "modulename": "sqlglot.tokens", "qualname": "TokenType.TIMESTAMPTZ", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>"}, "sqlglot.tokens.TokenType.TIMESTAMPLTZ": {"fullname": "sqlglot.tokens.TokenType.TIMESTAMPLTZ", "modulename": "sqlglot.tokens", "qualname": "TokenType.TIMESTAMPLTZ", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>"}, "sqlglot.tokens.TokenType.DATETIME": {"fullname": "sqlglot.tokens.TokenType.DATETIME", "modulename": "sqlglot.tokens", "qualname": "TokenType.DATETIME", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.DATETIME: 'DATETIME'>"}, "sqlglot.tokens.TokenType.DATE": {"fullname": "sqlglot.tokens.TokenType.DATE", "modulename": "sqlglot.tokens", "qualname": "TokenType.DATE", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.DATE: 'DATE'>"}, "sqlglot.tokens.TokenType.UUID": {"fullname": "sqlglot.tokens.TokenType.UUID", "modulename": "sqlglot.tokens", "qualname": "TokenType.UUID", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.UUID: 'UUID'>"}, "sqlglot.tokens.TokenType.GEOGRAPHY": {"fullname": "sqlglot.tokens.TokenType.GEOGRAPHY", "modulename": "sqlglot.tokens", "qualname": "TokenType.GEOGRAPHY", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.GEOGRAPHY: 'GEOGRAPHY'>"}, "sqlglot.tokens.TokenType.NULLABLE": {"fullname": "sqlglot.tokens.TokenType.NULLABLE", "modulename": "sqlglot.tokens", "qualname": "TokenType.NULLABLE", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.NULLABLE: 'NULLABLE'>"}, "sqlglot.tokens.TokenType.GEOMETRY": {"fullname": "sqlglot.tokens.TokenType.GEOMETRY", "modulename": "sqlglot.tokens", "qualname": "TokenType.GEOMETRY", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.GEOMETRY: 'GEOMETRY'>"}, "sqlglot.tokens.TokenType.HLLSKETCH": {"fullname": "sqlglot.tokens.TokenType.HLLSKETCH", "modulename": "sqlglot.tokens", "qualname": "TokenType.HLLSKETCH", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.HLLSKETCH: 'HLLSKETCH'>"}, "sqlglot.tokens.TokenType.HSTORE": {"fullname": "sqlglot.tokens.TokenType.HSTORE", "modulename": "sqlglot.tokens", "qualname": "TokenType.HSTORE", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.HSTORE: 'HSTORE'>"}, "sqlglot.tokens.TokenType.SUPER": {"fullname": "sqlglot.tokens.TokenType.SUPER", "modulename": "sqlglot.tokens", "qualname": "TokenType.SUPER", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.SUPER: 'SUPER'>"}, "sqlglot.tokens.TokenType.SERIAL": {"fullname": "sqlglot.tokens.TokenType.SERIAL", "modulename": "sqlglot.tokens", "qualname": "TokenType.SERIAL", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.SERIAL: 'SERIAL'>"}, "sqlglot.tokens.TokenType.SMALLSERIAL": {"fullname": "sqlglot.tokens.TokenType.SMALLSERIAL", "modulename": "sqlglot.tokens", "qualname": "TokenType.SMALLSERIAL", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.SMALLSERIAL: 'SMALLSERIAL'>"}, "sqlglot.tokens.TokenType.BIGSERIAL": {"fullname": "sqlglot.tokens.TokenType.BIGSERIAL", "modulename": "sqlglot.tokens", "qualname": "TokenType.BIGSERIAL", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.BIGSERIAL: 'BIGSERIAL'>"}, "sqlglot.tokens.TokenType.XML": {"fullname": "sqlglot.tokens.TokenType.XML", "modulename": "sqlglot.tokens", "qualname": "TokenType.XML", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.XML: 'XML'>"}, "sqlglot.tokens.TokenType.UNIQUEIDENTIFIER": {"fullname": "sqlglot.tokens.TokenType.UNIQUEIDENTIFIER", "modulename": "sqlglot.tokens", "qualname": "TokenType.UNIQUEIDENTIFIER", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.UNIQUEIDENTIFIER: 'UNIQUEIDENTIFIER'>"}, "sqlglot.tokens.TokenType.MONEY": {"fullname": "sqlglot.tokens.TokenType.MONEY", "modulename": "sqlglot.tokens", "qualname": "TokenType.MONEY", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.MONEY: 'MONEY'>"}, "sqlglot.tokens.TokenType.SMALLMONEY": {"fullname": "sqlglot.tokens.TokenType.SMALLMONEY", "modulename": "sqlglot.tokens", "qualname": "TokenType.SMALLMONEY", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.SMALLMONEY: 'SMALLMONEY'>"}, "sqlglot.tokens.TokenType.ROWVERSION": {"fullname": "sqlglot.tokens.TokenType.ROWVERSION", "modulename": "sqlglot.tokens", "qualname": "TokenType.ROWVERSION", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.ROWVERSION: 'ROWVERSION'>"}, "sqlglot.tokens.TokenType.IMAGE": {"fullname": "sqlglot.tokens.TokenType.IMAGE", "modulename": "sqlglot.tokens", "qualname": "TokenType.IMAGE", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.IMAGE: 'IMAGE'>"}, "sqlglot.tokens.TokenType.VARIANT": {"fullname": "sqlglot.tokens.TokenType.VARIANT", "modulename": "sqlglot.tokens", "qualname": "TokenType.VARIANT", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.VARIANT: 'VARIANT'>"}, "sqlglot.tokens.TokenType.OBJECT": {"fullname": "sqlglot.tokens.TokenType.OBJECT", "modulename": "sqlglot.tokens", "qualname": "TokenType.OBJECT", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.OBJECT: 'OBJECT'>"}, "sqlglot.tokens.TokenType.ALIAS": {"fullname": "sqlglot.tokens.TokenType.ALIAS", "modulename": "sqlglot.tokens", "qualname": "TokenType.ALIAS", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.ALIAS: 'ALIAS'>"}, "sqlglot.tokens.TokenType.ALTER": {"fullname": "sqlglot.tokens.TokenType.ALTER", "modulename": "sqlglot.tokens", "qualname": "TokenType.ALTER", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.ALTER: 'ALTER'>"}, "sqlglot.tokens.TokenType.ALWAYS": {"fullname": "sqlglot.tokens.TokenType.ALWAYS", "modulename": "sqlglot.tokens", "qualname": "TokenType.ALWAYS", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.ALWAYS: 'ALWAYS'>"}, "sqlglot.tokens.TokenType.ALL": {"fullname": "sqlglot.tokens.TokenType.ALL", "modulename": "sqlglot.tokens", "qualname": "TokenType.ALL", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.ALL: 'ALL'>"}, "sqlglot.tokens.TokenType.ANTI": {"fullname": "sqlglot.tokens.TokenType.ANTI", "modulename": "sqlglot.tokens", "qualname": "TokenType.ANTI", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.ANTI: 'ANTI'>"}, "sqlglot.tokens.TokenType.ANY": {"fullname": "sqlglot.tokens.TokenType.ANY", "modulename": "sqlglot.tokens", "qualname": "TokenType.ANY", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.ANY: 'ANY'>"}, "sqlglot.tokens.TokenType.APPLY": {"fullname": "sqlglot.tokens.TokenType.APPLY", "modulename": "sqlglot.tokens", "qualname": "TokenType.APPLY", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.APPLY: 'APPLY'>"}, "sqlglot.tokens.TokenType.ARRAY": {"fullname": "sqlglot.tokens.TokenType.ARRAY", "modulename": "sqlglot.tokens", "qualname": "TokenType.ARRAY", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.ARRAY: 'ARRAY'>"}, "sqlglot.tokens.TokenType.ASC": {"fullname": "sqlglot.tokens.TokenType.ASC", "modulename": "sqlglot.tokens", "qualname": "TokenType.ASC", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.ASC: 'ASC'>"}, "sqlglot.tokens.TokenType.ASOF": {"fullname": "sqlglot.tokens.TokenType.ASOF", "modulename": "sqlglot.tokens", "qualname": "TokenType.ASOF", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.ASOF: 'ASOF'>"}, "sqlglot.tokens.TokenType.AT_TIME_ZONE": {"fullname": "sqlglot.tokens.TokenType.AT_TIME_ZONE", "modulename": "sqlglot.tokens", "qualname": "TokenType.AT_TIME_ZONE", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.AT_TIME_ZONE: 'AT_TIME_ZONE'>"}, "sqlglot.tokens.TokenType.AUTO_INCREMENT": {"fullname": "sqlglot.tokens.TokenType.AUTO_INCREMENT", "modulename": "sqlglot.tokens", "qualname": "TokenType.AUTO_INCREMENT", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>"}, "sqlglot.tokens.TokenType.BEGIN": {"fullname": "sqlglot.tokens.TokenType.BEGIN", "modulename": "sqlglot.tokens", "qualname": "TokenType.BEGIN", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.BEGIN: 'BEGIN'>"}, "sqlglot.tokens.TokenType.BETWEEN": {"fullname": "sqlglot.tokens.TokenType.BETWEEN", "modulename": "sqlglot.tokens", "qualname": "TokenType.BETWEEN", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.BETWEEN: 'BETWEEN'>"}, "sqlglot.tokens.TokenType.BOTH": {"fullname": "sqlglot.tokens.TokenType.BOTH", "modulename": "sqlglot.tokens", "qualname": "TokenType.BOTH", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.BOTH: 'BOTH'>"}, "sqlglot.tokens.TokenType.BUCKET": {"fullname": "sqlglot.tokens.TokenType.BUCKET", "modulename": "sqlglot.tokens", "qualname": "TokenType.BUCKET", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.BUCKET: 'BUCKET'>"}, "sqlglot.tokens.TokenType.BY_DEFAULT": {"fullname": "sqlglot.tokens.TokenType.BY_DEFAULT", "modulename": "sqlglot.tokens", "qualname": "TokenType.BY_DEFAULT", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.BY_DEFAULT: 'BY_DEFAULT'>"}, "sqlglot.tokens.TokenType.CACHE": {"fullname": "sqlglot.tokens.TokenType.CACHE", "modulename": "sqlglot.tokens", "qualname": "TokenType.CACHE", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.CACHE: 'CACHE'>"}, "sqlglot.tokens.TokenType.CASCADE": {"fullname": "sqlglot.tokens.TokenType.CASCADE", "modulename": "sqlglot.tokens", "qualname": "TokenType.CASCADE", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.CASCADE: 'CASCADE'>"}, "sqlglot.tokens.TokenType.CASE": {"fullname": "sqlglot.tokens.TokenType.CASE", "modulename": "sqlglot.tokens", "qualname": "TokenType.CASE", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.CASE: 'CASE'>"}, "sqlglot.tokens.TokenType.CHARACTER_SET": {"fullname": "sqlglot.tokens.TokenType.CHARACTER_SET", "modulename": "sqlglot.tokens", "qualname": "TokenType.CHARACTER_SET", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.CHARACTER_SET: 'CHARACTER_SET'>"}, "sqlglot.tokens.TokenType.CHECK": {"fullname": "sqlglot.tokens.TokenType.CHECK", "modulename": "sqlglot.tokens", "qualname": "TokenType.CHECK", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.CHECK: 'CHECK'>"}, "sqlglot.tokens.TokenType.CLUSTER_BY": {"fullname": "sqlglot.tokens.TokenType.CLUSTER_BY", "modulename": "sqlglot.tokens", "qualname": "TokenType.CLUSTER_BY", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.CLUSTER_BY: 'CLUSTER_BY'>"}, "sqlglot.tokens.TokenType.COLLATE": {"fullname": "sqlglot.tokens.TokenType.COLLATE", "modulename": "sqlglot.tokens", "qualname": "TokenType.COLLATE", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.COLLATE: 'COLLATE'>"}, "sqlglot.tokens.TokenType.COMMAND": {"fullname": "sqlglot.tokens.TokenType.COMMAND", "modulename": "sqlglot.tokens", "qualname": "TokenType.COMMAND", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.COMMAND: 'COMMAND'>"}, "sqlglot.tokens.TokenType.COMMENT": {"fullname": "sqlglot.tokens.TokenType.COMMENT", "modulename": "sqlglot.tokens", "qualname": "TokenType.COMMENT", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.COMMENT: 'COMMENT'>"}, "sqlglot.tokens.TokenType.COMMIT": {"fullname": "sqlglot.tokens.TokenType.COMMIT", "modulename": "sqlglot.tokens", "qualname": "TokenType.COMMIT", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.COMMIT: 'COMMIT'>"}, "sqlglot.tokens.TokenType.COMPOUND": {"fullname": "sqlglot.tokens.TokenType.COMPOUND", "modulename": "sqlglot.tokens", "qualname": "TokenType.COMPOUND", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.COMPOUND: 'COMPOUND'>"}, "sqlglot.tokens.TokenType.CONSTRAINT": {"fullname": "sqlglot.tokens.TokenType.CONSTRAINT", "modulename": "sqlglot.tokens", "qualname": "TokenType.CONSTRAINT", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.CONSTRAINT: 'CONSTRAINT'>"}, "sqlglot.tokens.TokenType.CREATE": {"fullname": "sqlglot.tokens.TokenType.CREATE", "modulename": "sqlglot.tokens", "qualname": "TokenType.CREATE", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.CREATE: 'CREATE'>"}, "sqlglot.tokens.TokenType.CROSS": {"fullname": "sqlglot.tokens.TokenType.CROSS", "modulename": "sqlglot.tokens", "qualname": "TokenType.CROSS", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.CROSS: 'CROSS'>"}, "sqlglot.tokens.TokenType.CUBE": {"fullname": "sqlglot.tokens.TokenType.CUBE", "modulename": "sqlglot.tokens", "qualname": "TokenType.CUBE", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.CUBE: 'CUBE'>"}, "sqlglot.tokens.TokenType.CURRENT_DATE": {"fullname": "sqlglot.tokens.TokenType.CURRENT_DATE", "modulename": "sqlglot.tokens", "qualname": "TokenType.CURRENT_DATE", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.CURRENT_DATE: 'CURRENT_DATE'>"}, "sqlglot.tokens.TokenType.CURRENT_DATETIME": {"fullname": "sqlglot.tokens.TokenType.CURRENT_DATETIME", "modulename": "sqlglot.tokens", "qualname": "TokenType.CURRENT_DATETIME", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>"}, "sqlglot.tokens.TokenType.CURRENT_ROW": {"fullname": "sqlglot.tokens.TokenType.CURRENT_ROW", "modulename": "sqlglot.tokens", "qualname": "TokenType.CURRENT_ROW", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.CURRENT_ROW: 'CURRENT_ROW'>"}, "sqlglot.tokens.TokenType.CURRENT_TIME": {"fullname": "sqlglot.tokens.TokenType.CURRENT_TIME", "modulename": "sqlglot.tokens", "qualname": "TokenType.CURRENT_TIME", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.CURRENT_TIME: 'CURRENT_TIME'>"}, "sqlglot.tokens.TokenType.CURRENT_TIMESTAMP": {"fullname": "sqlglot.tokens.TokenType.CURRENT_TIMESTAMP", "modulename": "sqlglot.tokens", "qualname": "TokenType.CURRENT_TIMESTAMP", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>"}, "sqlglot.tokens.TokenType.DEFAULT": {"fullname": "sqlglot.tokens.TokenType.DEFAULT", "modulename": "sqlglot.tokens", "qualname": "TokenType.DEFAULT", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.DEFAULT: 'DEFAULT'>"}, "sqlglot.tokens.TokenType.DELETE": {"fullname": "sqlglot.tokens.TokenType.DELETE", "modulename": "sqlglot.tokens", "qualname": "TokenType.DELETE", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.DELETE: 'DELETE'>"}, "sqlglot.tokens.TokenType.DESC": {"fullname": "sqlglot.tokens.TokenType.DESC", "modulename": "sqlglot.tokens", "qualname": "TokenType.DESC", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.DESC: 'DESC'>"}, "sqlglot.tokens.TokenType.DESCRIBE": {"fullname": "sqlglot.tokens.TokenType.DESCRIBE", "modulename": "sqlglot.tokens", "qualname": "TokenType.DESCRIBE", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.DESCRIBE: 'DESCRIBE'>"}, "sqlglot.tokens.TokenType.DISTINCT": {"fullname": "sqlglot.tokens.TokenType.DISTINCT", "modulename": "sqlglot.tokens", "qualname": "TokenType.DISTINCT", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.DISTINCT: 'DISTINCT'>"}, "sqlglot.tokens.TokenType.DISTINCT_FROM": {"fullname": "sqlglot.tokens.TokenType.DISTINCT_FROM", "modulename": "sqlglot.tokens", "qualname": "TokenType.DISTINCT_FROM", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.DISTINCT_FROM: 'DISTINCT_FROM'>"}, "sqlglot.tokens.TokenType.DISTRIBUTE_BY": {"fullname": "sqlglot.tokens.TokenType.DISTRIBUTE_BY", "modulename": "sqlglot.tokens", "qualname": "TokenType.DISTRIBUTE_BY", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.DISTRIBUTE_BY: 'DISTRIBUTE_BY'>"}, "sqlglot.tokens.TokenType.DIV": {"fullname": "sqlglot.tokens.TokenType.DIV", "modulename": "sqlglot.tokens", "qualname": "TokenType.DIV", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.DIV: 'DIV'>"}, "sqlglot.tokens.TokenType.DROP": {"fullname": "sqlglot.tokens.TokenType.DROP", "modulename": "sqlglot.tokens", "qualname": "TokenType.DROP", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.DROP: 'DROP'>"}, "sqlglot.tokens.TokenType.ELSE": {"fullname": "sqlglot.tokens.TokenType.ELSE", "modulename": "sqlglot.tokens", "qualname": "TokenType.ELSE", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.ELSE: 'ELSE'>"}, "sqlglot.tokens.TokenType.ENCODE": {"fullname": "sqlglot.tokens.TokenType.ENCODE", "modulename": "sqlglot.tokens", "qualname": "TokenType.ENCODE", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.ENCODE: 'ENCODE'>"}, "sqlglot.tokens.TokenType.END": {"fullname": "sqlglot.tokens.TokenType.END", "modulename": "sqlglot.tokens", "qualname": "TokenType.END", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.END: 'END'>"}, "sqlglot.tokens.TokenType.ESCAPE": {"fullname": "sqlglot.tokens.TokenType.ESCAPE", "modulename": "sqlglot.tokens", "qualname": "TokenType.ESCAPE", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.ESCAPE: 'ESCAPE'>"}, "sqlglot.tokens.TokenType.EXCEPT": {"fullname": "sqlglot.tokens.TokenType.EXCEPT", "modulename": "sqlglot.tokens", "qualname": "TokenType.EXCEPT", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.EXCEPT: 'EXCEPT'>"}, "sqlglot.tokens.TokenType.EXECUTE": {"fullname": "sqlglot.tokens.TokenType.EXECUTE", "modulename": "sqlglot.tokens", "qualname": "TokenType.EXECUTE", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.EXECUTE: 'EXECUTE'>"}, "sqlglot.tokens.TokenType.EXISTS": {"fullname": "sqlglot.tokens.TokenType.EXISTS", "modulename": "sqlglot.tokens", "qualname": "TokenType.EXISTS", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.EXISTS: 'EXISTS'>"}, "sqlglot.tokens.TokenType.FALSE": {"fullname": "sqlglot.tokens.TokenType.FALSE", "modulename": "sqlglot.tokens", "qualname": "TokenType.FALSE", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.FALSE: 'FALSE'>"}, "sqlglot.tokens.TokenType.FETCH": {"fullname": "sqlglot.tokens.TokenType.FETCH", "modulename": "sqlglot.tokens", "qualname": "TokenType.FETCH", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.FETCH: 'FETCH'>"}, "sqlglot.tokens.TokenType.FILTER": {"fullname": "sqlglot.tokens.TokenType.FILTER", "modulename": "sqlglot.tokens", "qualname": "TokenType.FILTER", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.FILTER: 'FILTER'>"}, "sqlglot.tokens.TokenType.FINAL": {"fullname": "sqlglot.tokens.TokenType.FINAL", "modulename": "sqlglot.tokens", "qualname": "TokenType.FINAL", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.FINAL: 'FINAL'>"}, "sqlglot.tokens.TokenType.FIRST": {"fullname": "sqlglot.tokens.TokenType.FIRST", "modulename": "sqlglot.tokens", "qualname": "TokenType.FIRST", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.FIRST: 'FIRST'>"}, "sqlglot.tokens.TokenType.FOLLOWING": {"fullname": "sqlglot.tokens.TokenType.FOLLOWING", "modulename": "sqlglot.tokens", "qualname": "TokenType.FOLLOWING", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.FOLLOWING: 'FOLLOWING'>"}, "sqlglot.tokens.TokenType.FOR": {"fullname": "sqlglot.tokens.TokenType.FOR", "modulename": "sqlglot.tokens", "qualname": "TokenType.FOR", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.FOR: 'FOR'>"}, "sqlglot.tokens.TokenType.FOREIGN_KEY": {"fullname": "sqlglot.tokens.TokenType.FOREIGN_KEY", "modulename": "sqlglot.tokens", "qualname": "TokenType.FOREIGN_KEY", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>"}, "sqlglot.tokens.TokenType.FORMAT": {"fullname": "sqlglot.tokens.TokenType.FORMAT", "modulename": "sqlglot.tokens", "qualname": "TokenType.FORMAT", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.FORMAT: 'FORMAT'>"}, "sqlglot.tokens.TokenType.FROM": {"fullname": "sqlglot.tokens.TokenType.FROM", "modulename": "sqlglot.tokens", "qualname": "TokenType.FROM", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.FROM: 'FROM'>"}, "sqlglot.tokens.TokenType.FULL": {"fullname": "sqlglot.tokens.TokenType.FULL", "modulename": "sqlglot.tokens", "qualname": "TokenType.FULL", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.FULL: 'FULL'>"}, "sqlglot.tokens.TokenType.FUNCTION": {"fullname": "sqlglot.tokens.TokenType.FUNCTION", "modulename": "sqlglot.tokens", "qualname": "TokenType.FUNCTION", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.FUNCTION: 'FUNCTION'>"}, "sqlglot.tokens.TokenType.GENERATED": {"fullname": "sqlglot.tokens.TokenType.GENERATED", "modulename": "sqlglot.tokens", "qualname": "TokenType.GENERATED", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.GENERATED: 'GENERATED'>"}, "sqlglot.tokens.TokenType.GLOB": {"fullname": "sqlglot.tokens.TokenType.GLOB", "modulename": "sqlglot.tokens", "qualname": "TokenType.GLOB", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.GLOB: 'GLOB'>"}, "sqlglot.tokens.TokenType.GLOBAL": {"fullname": "sqlglot.tokens.TokenType.GLOBAL", "modulename": "sqlglot.tokens", "qualname": "TokenType.GLOBAL", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.GLOBAL: 'GLOBAL'>"}, "sqlglot.tokens.TokenType.GROUP_BY": {"fullname": "sqlglot.tokens.TokenType.GROUP_BY", "modulename": "sqlglot.tokens", "qualname": "TokenType.GROUP_BY", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.GROUP_BY: 'GROUP_BY'>"}, "sqlglot.tokens.TokenType.GROUPING_SETS": {"fullname": "sqlglot.tokens.TokenType.GROUPING_SETS", "modulename": "sqlglot.tokens", "qualname": "TokenType.GROUPING_SETS", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.GROUPING_SETS: 'GROUPING_SETS'>"}, "sqlglot.tokens.TokenType.HAVING": {"fullname": "sqlglot.tokens.TokenType.HAVING", "modulename": "sqlglot.tokens", "qualname": "TokenType.HAVING", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.HAVING: 'HAVING'>"}, "sqlglot.tokens.TokenType.HINT": {"fullname": "sqlglot.tokens.TokenType.HINT", "modulename": "sqlglot.tokens", "qualname": "TokenType.HINT", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.HINT: 'HINT'>"}, "sqlglot.tokens.TokenType.IDENTITY": {"fullname": "sqlglot.tokens.TokenType.IDENTITY", "modulename": "sqlglot.tokens", "qualname": "TokenType.IDENTITY", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.IDENTITY: 'IDENTITY'>"}, "sqlglot.tokens.TokenType.IF": {"fullname": "sqlglot.tokens.TokenType.IF", "modulename": "sqlglot.tokens", "qualname": "TokenType.IF", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.IF: 'IF'>"}, "sqlglot.tokens.TokenType.IGNORE_NULLS": {"fullname": "sqlglot.tokens.TokenType.IGNORE_NULLS", "modulename": "sqlglot.tokens", "qualname": "TokenType.IGNORE_NULLS", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.IGNORE_NULLS: 'IGNORE_NULLS'>"}, "sqlglot.tokens.TokenType.ILIKE": {"fullname": "sqlglot.tokens.TokenType.ILIKE", "modulename": "sqlglot.tokens", "qualname": "TokenType.ILIKE", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.ILIKE: 'ILIKE'>"}, "sqlglot.tokens.TokenType.IN": {"fullname": "sqlglot.tokens.TokenType.IN", "modulename": "sqlglot.tokens", "qualname": "TokenType.IN", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.IN: 'IN'>"}, "sqlglot.tokens.TokenType.INDEX": {"fullname": "sqlglot.tokens.TokenType.INDEX", "modulename": "sqlglot.tokens", "qualname": "TokenType.INDEX", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.INDEX: 'INDEX'>"}, "sqlglot.tokens.TokenType.INNER": {"fullname": "sqlglot.tokens.TokenType.INNER", "modulename": "sqlglot.tokens", "qualname": "TokenType.INNER", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.INNER: 'INNER'>"}, "sqlglot.tokens.TokenType.INSERT": {"fullname": "sqlglot.tokens.TokenType.INSERT", "modulename": "sqlglot.tokens", "qualname": "TokenType.INSERT", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.INSERT: 'INSERT'>"}, "sqlglot.tokens.TokenType.INTERSECT": {"fullname": "sqlglot.tokens.TokenType.INTERSECT", "modulename": "sqlglot.tokens", "qualname": "TokenType.INTERSECT", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.INTERSECT: 'INTERSECT'>"}, "sqlglot.tokens.TokenType.INTERVAL": {"fullname": "sqlglot.tokens.TokenType.INTERVAL", "modulename": "sqlglot.tokens", "qualname": "TokenType.INTERVAL", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.INTERVAL: 'INTERVAL'>"}, "sqlglot.tokens.TokenType.INTO": {"fullname": "sqlglot.tokens.TokenType.INTO", "modulename": "sqlglot.tokens", "qualname": "TokenType.INTO", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.INTO: 'INTO'>"}, "sqlglot.tokens.TokenType.INTRODUCER": {"fullname": "sqlglot.tokens.TokenType.INTRODUCER", "modulename": "sqlglot.tokens", "qualname": "TokenType.INTRODUCER", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.INTRODUCER: 'INTRODUCER'>"}, "sqlglot.tokens.TokenType.IRLIKE": {"fullname": "sqlglot.tokens.TokenType.IRLIKE", "modulename": "sqlglot.tokens", "qualname": "TokenType.IRLIKE", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.IRLIKE: 'IRLIKE'>"}, "sqlglot.tokens.TokenType.IS": {"fullname": "sqlglot.tokens.TokenType.IS", "modulename": "sqlglot.tokens", "qualname": "TokenType.IS", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.IS: 'IS'>"}, "sqlglot.tokens.TokenType.ISNULL": {"fullname": "sqlglot.tokens.TokenType.ISNULL", "modulename": "sqlglot.tokens", "qualname": "TokenType.ISNULL", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.ISNULL: 'ISNULL'>"}, "sqlglot.tokens.TokenType.JOIN": {"fullname": "sqlglot.tokens.TokenType.JOIN", "modulename": "sqlglot.tokens", "qualname": "TokenType.JOIN", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.JOIN: 'JOIN'>"}, "sqlglot.tokens.TokenType.LANGUAGE": {"fullname": "sqlglot.tokens.TokenType.LANGUAGE", "modulename": "sqlglot.tokens", "qualname": "TokenType.LANGUAGE", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.LANGUAGE: 'LANGUAGE'>"}, "sqlglot.tokens.TokenType.LATERAL": {"fullname": "sqlglot.tokens.TokenType.LATERAL", "modulename": "sqlglot.tokens", "qualname": "TokenType.LATERAL", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.LATERAL: 'LATERAL'>"}, "sqlglot.tokens.TokenType.LAZY": {"fullname": "sqlglot.tokens.TokenType.LAZY", "modulename": "sqlglot.tokens", "qualname": "TokenType.LAZY", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.LAZY: 'LAZY'>"}, "sqlglot.tokens.TokenType.LEADING": {"fullname": "sqlglot.tokens.TokenType.LEADING", "modulename": "sqlglot.tokens", "qualname": "TokenType.LEADING", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.LEADING: 'LEADING'>"}, "sqlglot.tokens.TokenType.LEFT": {"fullname": "sqlglot.tokens.TokenType.LEFT", "modulename": "sqlglot.tokens", "qualname": "TokenType.LEFT", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.LEFT: 'LEFT'>"}, "sqlglot.tokens.TokenType.LIKE": {"fullname": "sqlglot.tokens.TokenType.LIKE", "modulename": "sqlglot.tokens", "qualname": "TokenType.LIKE", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.LIKE: 'LIKE'>"}, "sqlglot.tokens.TokenType.LIMIT": {"fullname": "sqlglot.tokens.TokenType.LIMIT", "modulename": "sqlglot.tokens", "qualname": "TokenType.LIMIT", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.LIMIT: 'LIMIT'>"}, "sqlglot.tokens.TokenType.LOAD_DATA": {"fullname": "sqlglot.tokens.TokenType.LOAD_DATA", "modulename": "sqlglot.tokens", "qualname": "TokenType.LOAD_DATA", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.LOAD_DATA: 'LOAD_DATA'>"}, "sqlglot.tokens.TokenType.LOCAL": {"fullname": "sqlglot.tokens.TokenType.LOCAL", "modulename": "sqlglot.tokens", "qualname": "TokenType.LOCAL", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.LOCAL: 'LOCAL'>"}, "sqlglot.tokens.TokenType.MAP": {"fullname": "sqlglot.tokens.TokenType.MAP", "modulename": "sqlglot.tokens", "qualname": "TokenType.MAP", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.MAP: 'MAP'>"}, "sqlglot.tokens.TokenType.MATCH_RECOGNIZE": {"fullname": "sqlglot.tokens.TokenType.MATCH_RECOGNIZE", "modulename": "sqlglot.tokens", "qualname": "TokenType.MATCH_RECOGNIZE", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.MATCH_RECOGNIZE: 'MATCH_RECOGNIZE'>"}, "sqlglot.tokens.TokenType.MATERIALIZED": {"fullname": "sqlglot.tokens.TokenType.MATERIALIZED", "modulename": "sqlglot.tokens", "qualname": "TokenType.MATERIALIZED", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.MATERIALIZED: 'MATERIALIZED'>"}, "sqlglot.tokens.TokenType.MERGE": {"fullname": "sqlglot.tokens.TokenType.MERGE", "modulename": "sqlglot.tokens", "qualname": "TokenType.MERGE", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.MERGE: 'MERGE'>"}, "sqlglot.tokens.TokenType.MOD": {"fullname": "sqlglot.tokens.TokenType.MOD", "modulename": "sqlglot.tokens", "qualname": "TokenType.MOD", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.MOD: 'MOD'>"}, "sqlglot.tokens.TokenType.NATURAL": {"fullname": "sqlglot.tokens.TokenType.NATURAL", "modulename": "sqlglot.tokens", "qualname": "TokenType.NATURAL", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.NATURAL: 'NATURAL'>"}, "sqlglot.tokens.TokenType.NEXT": {"fullname": "sqlglot.tokens.TokenType.NEXT", "modulename": "sqlglot.tokens", "qualname": "TokenType.NEXT", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.NEXT: 'NEXT'>"}, "sqlglot.tokens.TokenType.NO_ACTION": {"fullname": "sqlglot.tokens.TokenType.NO_ACTION", "modulename": "sqlglot.tokens", "qualname": "TokenType.NO_ACTION", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.NO_ACTION: 'NO_ACTION'>"}, "sqlglot.tokens.TokenType.NOTNULL": {"fullname": "sqlglot.tokens.TokenType.NOTNULL", "modulename": "sqlglot.tokens", "qualname": "TokenType.NOTNULL", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.NOTNULL: 'NOTNULL'>"}, "sqlglot.tokens.TokenType.NULL": {"fullname": "sqlglot.tokens.TokenType.NULL", "modulename": "sqlglot.tokens", "qualname": "TokenType.NULL", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.NULL: 'NULL'>"}, "sqlglot.tokens.TokenType.NULLS_FIRST": {"fullname": "sqlglot.tokens.TokenType.NULLS_FIRST", "modulename": "sqlglot.tokens", "qualname": "TokenType.NULLS_FIRST", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.NULLS_FIRST: 'NULLS_FIRST'>"}, "sqlglot.tokens.TokenType.NULLS_LAST": {"fullname": "sqlglot.tokens.TokenType.NULLS_LAST", "modulename": "sqlglot.tokens", "qualname": "TokenType.NULLS_LAST", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.NULLS_LAST: 'NULLS_LAST'>"}, "sqlglot.tokens.TokenType.OFFSET": {"fullname": "sqlglot.tokens.TokenType.OFFSET", "modulename": "sqlglot.tokens", "qualname": "TokenType.OFFSET", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.OFFSET: 'OFFSET'>"}, "sqlglot.tokens.TokenType.ON": {"fullname": "sqlglot.tokens.TokenType.ON", "modulename": "sqlglot.tokens", "qualname": "TokenType.ON", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.ON: 'ON'>"}, "sqlglot.tokens.TokenType.ONLY": {"fullname": "sqlglot.tokens.TokenType.ONLY", "modulename": "sqlglot.tokens", "qualname": "TokenType.ONLY", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.ONLY: 'ONLY'>"}, "sqlglot.tokens.TokenType.OPTIONS": {"fullname": "sqlglot.tokens.TokenType.OPTIONS", "modulename": "sqlglot.tokens", "qualname": "TokenType.OPTIONS", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.OPTIONS: 'OPTIONS'>"}, "sqlglot.tokens.TokenType.ORDER_BY": {"fullname": "sqlglot.tokens.TokenType.ORDER_BY", "modulename": "sqlglot.tokens", "qualname": "TokenType.ORDER_BY", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.ORDER_BY: 'ORDER_BY'>"}, "sqlglot.tokens.TokenType.ORDERED": {"fullname": "sqlglot.tokens.TokenType.ORDERED", "modulename": "sqlglot.tokens", "qualname": "TokenType.ORDERED", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.ORDERED: 'ORDERED'>"}, "sqlglot.tokens.TokenType.ORDINALITY": {"fullname": "sqlglot.tokens.TokenType.ORDINALITY", "modulename": "sqlglot.tokens", "qualname": "TokenType.ORDINALITY", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.ORDINALITY: 'ORDINALITY'>"}, "sqlglot.tokens.TokenType.OUTER": {"fullname": "sqlglot.tokens.TokenType.OUTER", "modulename": "sqlglot.tokens", "qualname": "TokenType.OUTER", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.OUTER: 'OUTER'>"}, "sqlglot.tokens.TokenType.OUT_OF": {"fullname": "sqlglot.tokens.TokenType.OUT_OF", "modulename": "sqlglot.tokens", "qualname": "TokenType.OUT_OF", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.OUT_OF: 'OUT_OF'>"}, "sqlglot.tokens.TokenType.OVER": {"fullname": "sqlglot.tokens.TokenType.OVER", "modulename": "sqlglot.tokens", "qualname": "TokenType.OVER", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.OVER: 'OVER'>"}, "sqlglot.tokens.TokenType.OVERWRITE": {"fullname": "sqlglot.tokens.TokenType.OVERWRITE", "modulename": "sqlglot.tokens", "qualname": "TokenType.OVERWRITE", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.OVERWRITE: 'OVERWRITE'>"}, "sqlglot.tokens.TokenType.PARTITION": {"fullname": "sqlglot.tokens.TokenType.PARTITION", "modulename": "sqlglot.tokens", "qualname": "TokenType.PARTITION", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.PARTITION: 'PARTITION'>"}, "sqlglot.tokens.TokenType.PARTITION_BY": {"fullname": "sqlglot.tokens.TokenType.PARTITION_BY", "modulename": "sqlglot.tokens", "qualname": "TokenType.PARTITION_BY", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.PARTITION_BY: 'PARTITION_BY'>"}, "sqlglot.tokens.TokenType.PERCENT": {"fullname": "sqlglot.tokens.TokenType.PERCENT", "modulename": "sqlglot.tokens", "qualname": "TokenType.PERCENT", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.PERCENT: 'PERCENT'>"}, "sqlglot.tokens.TokenType.PIVOT": {"fullname": "sqlglot.tokens.TokenType.PIVOT", "modulename": "sqlglot.tokens", "qualname": "TokenType.PIVOT", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.PIVOT: 'PIVOT'>"}, "sqlglot.tokens.TokenType.PLACEHOLDER": {"fullname": "sqlglot.tokens.TokenType.PLACEHOLDER", "modulename": "sqlglot.tokens", "qualname": "TokenType.PLACEHOLDER", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.PLACEHOLDER: 'PLACEHOLDER'>"}, "sqlglot.tokens.TokenType.PRECEDING": {"fullname": "sqlglot.tokens.TokenType.PRECEDING", "modulename": "sqlglot.tokens", "qualname": "TokenType.PRECEDING", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.PRECEDING: 'PRECEDING'>"}, "sqlglot.tokens.TokenType.PRIMARY_KEY": {"fullname": "sqlglot.tokens.TokenType.PRIMARY_KEY", "modulename": "sqlglot.tokens", "qualname": "TokenType.PRIMARY_KEY", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.PRIMARY_KEY: 'PRIMARY_KEY'>"}, "sqlglot.tokens.TokenType.PROCEDURE": {"fullname": "sqlglot.tokens.TokenType.PROCEDURE", "modulename": "sqlglot.tokens", "qualname": "TokenType.PROCEDURE", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.PROCEDURE: 'PROCEDURE'>"}, "sqlglot.tokens.TokenType.PROPERTIES": {"fullname": "sqlglot.tokens.TokenType.PROPERTIES", "modulename": "sqlglot.tokens", "qualname": "TokenType.PROPERTIES", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.PROPERTIES: 'PROPERTIES'>"}, "sqlglot.tokens.TokenType.PSEUDO_TYPE": {"fullname": "sqlglot.tokens.TokenType.PSEUDO_TYPE", "modulename": "sqlglot.tokens", "qualname": "TokenType.PSEUDO_TYPE", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>"}, "sqlglot.tokens.TokenType.QUALIFY": {"fullname": "sqlglot.tokens.TokenType.QUALIFY", "modulename": "sqlglot.tokens", "qualname": "TokenType.QUALIFY", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.QUALIFY: 'QUALIFY'>"}, "sqlglot.tokens.TokenType.QUOTE": {"fullname": "sqlglot.tokens.TokenType.QUOTE", "modulename": "sqlglot.tokens", "qualname": "TokenType.QUOTE", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.QUOTE: 'QUOTE'>"}, "sqlglot.tokens.TokenType.RANGE": {"fullname": "sqlglot.tokens.TokenType.RANGE", "modulename": "sqlglot.tokens", "qualname": "TokenType.RANGE", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.RANGE: 'RANGE'>"}, "sqlglot.tokens.TokenType.RECURSIVE": {"fullname": "sqlglot.tokens.TokenType.RECURSIVE", "modulename": "sqlglot.tokens", "qualname": "TokenType.RECURSIVE", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.RECURSIVE: 'RECURSIVE'>"}, "sqlglot.tokens.TokenType.REPLACE": {"fullname": "sqlglot.tokens.TokenType.REPLACE", "modulename": "sqlglot.tokens", "qualname": "TokenType.REPLACE", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.REPLACE: 'REPLACE'>"}, "sqlglot.tokens.TokenType.RESPECT_NULLS": {"fullname": "sqlglot.tokens.TokenType.RESPECT_NULLS", "modulename": "sqlglot.tokens", "qualname": "TokenType.RESPECT_NULLS", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.RESPECT_NULLS: 'RESPECT_NULLS'>"}, "sqlglot.tokens.TokenType.REFERENCES": {"fullname": "sqlglot.tokens.TokenType.REFERENCES", "modulename": "sqlglot.tokens", "qualname": "TokenType.REFERENCES", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.REFERENCES: 'REFERENCES'>"}, "sqlglot.tokens.TokenType.RIGHT": {"fullname": "sqlglot.tokens.TokenType.RIGHT", "modulename": "sqlglot.tokens", "qualname": "TokenType.RIGHT", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.RIGHT: 'RIGHT'>"}, "sqlglot.tokens.TokenType.RLIKE": {"fullname": "sqlglot.tokens.TokenType.RLIKE", "modulename": "sqlglot.tokens", "qualname": "TokenType.RLIKE", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.RLIKE: 'RLIKE'>"}, "sqlglot.tokens.TokenType.ROLLBACK": {"fullname": "sqlglot.tokens.TokenType.ROLLBACK", "modulename": "sqlglot.tokens", "qualname": "TokenType.ROLLBACK", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.ROLLBACK: 'ROLLBACK'>"}, "sqlglot.tokens.TokenType.ROLLUP": {"fullname": "sqlglot.tokens.TokenType.ROLLUP", "modulename": "sqlglot.tokens", "qualname": "TokenType.ROLLUP", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.ROLLUP: 'ROLLUP'>"}, "sqlglot.tokens.TokenType.ROW": {"fullname": "sqlglot.tokens.TokenType.ROW", "modulename": "sqlglot.tokens", "qualname": "TokenType.ROW", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.ROW: 'ROW'>"}, "sqlglot.tokens.TokenType.ROWS": {"fullname": "sqlglot.tokens.TokenType.ROWS", "modulename": "sqlglot.tokens", "qualname": "TokenType.ROWS", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.ROWS: 'ROWS'>"}, "sqlglot.tokens.TokenType.SCHEMA_COMMENT": {"fullname": "sqlglot.tokens.TokenType.SCHEMA_COMMENT", "modulename": "sqlglot.tokens", "qualname": "TokenType.SCHEMA_COMMENT", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.SCHEMA_COMMENT: 'SCHEMA_COMMENT'>"}, "sqlglot.tokens.TokenType.SEED": {"fullname": "sqlglot.tokens.TokenType.SEED", "modulename": "sqlglot.tokens", "qualname": "TokenType.SEED", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.SEED: 'SEED'>"}, "sqlglot.tokens.TokenType.SELECT": {"fullname": "sqlglot.tokens.TokenType.SELECT", "modulename": "sqlglot.tokens", "qualname": "TokenType.SELECT", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.SELECT: 'SELECT'>"}, "sqlglot.tokens.TokenType.SEMI": {"fullname": "sqlglot.tokens.TokenType.SEMI", "modulename": "sqlglot.tokens", "qualname": "TokenType.SEMI", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.SEMI: 'SEMI'>"}, "sqlglot.tokens.TokenType.SEPARATOR": {"fullname": "sqlglot.tokens.TokenType.SEPARATOR", "modulename": "sqlglot.tokens", "qualname": "TokenType.SEPARATOR", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.SEPARATOR: 'SEPARATOR'>"}, "sqlglot.tokens.TokenType.SERDE_PROPERTIES": {"fullname": "sqlglot.tokens.TokenType.SERDE_PROPERTIES", "modulename": "sqlglot.tokens", "qualname": "TokenType.SERDE_PROPERTIES", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.SERDE_PROPERTIES: 'SERDE_PROPERTIES'>"}, "sqlglot.tokens.TokenType.SET": {"fullname": "sqlglot.tokens.TokenType.SET", "modulename": "sqlglot.tokens", "qualname": "TokenType.SET", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.SET: 'SET'>"}, "sqlglot.tokens.TokenType.SHOW": {"fullname": "sqlglot.tokens.TokenType.SHOW", "modulename": "sqlglot.tokens", "qualname": "TokenType.SHOW", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.SHOW: 'SHOW'>"}, "sqlglot.tokens.TokenType.SIMILAR_TO": {"fullname": "sqlglot.tokens.TokenType.SIMILAR_TO", "modulename": "sqlglot.tokens", "qualname": "TokenType.SIMILAR_TO", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.SIMILAR_TO: 'SIMILAR_TO'>"}, "sqlglot.tokens.TokenType.SOME": {"fullname": "sqlglot.tokens.TokenType.SOME", "modulename": "sqlglot.tokens", "qualname": "TokenType.SOME", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.SOME: 'SOME'>"}, "sqlglot.tokens.TokenType.SORTKEY": {"fullname": "sqlglot.tokens.TokenType.SORTKEY", "modulename": "sqlglot.tokens", "qualname": "TokenType.SORTKEY", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.SORTKEY: 'SORTKEY'>"}, "sqlglot.tokens.TokenType.SORT_BY": {"fullname": "sqlglot.tokens.TokenType.SORT_BY", "modulename": "sqlglot.tokens", "qualname": "TokenType.SORT_BY", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.SORT_BY: 'SORT_BY'>"}, "sqlglot.tokens.TokenType.STRUCT": {"fullname": "sqlglot.tokens.TokenType.STRUCT", "modulename": "sqlglot.tokens", "qualname": "TokenType.STRUCT", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.STRUCT: 'STRUCT'>"}, "sqlglot.tokens.TokenType.TABLE_SAMPLE": {"fullname": "sqlglot.tokens.TokenType.TABLE_SAMPLE", "modulename": "sqlglot.tokens", "qualname": "TokenType.TABLE_SAMPLE", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.TABLE_SAMPLE: 'TABLE_SAMPLE'>"}, "sqlglot.tokens.TokenType.TEMPORARY": {"fullname": "sqlglot.tokens.TokenType.TEMPORARY", "modulename": "sqlglot.tokens", "qualname": "TokenType.TEMPORARY", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.TEMPORARY: 'TEMPORARY'>"}, "sqlglot.tokens.TokenType.TOP": {"fullname": "sqlglot.tokens.TokenType.TOP", "modulename": "sqlglot.tokens", "qualname": "TokenType.TOP", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.TOP: 'TOP'>"}, "sqlglot.tokens.TokenType.THEN": {"fullname": "sqlglot.tokens.TokenType.THEN", "modulename": "sqlglot.tokens", "qualname": "TokenType.THEN", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.THEN: 'THEN'>"}, "sqlglot.tokens.TokenType.TRAILING": {"fullname": "sqlglot.tokens.TokenType.TRAILING", "modulename": "sqlglot.tokens", "qualname": "TokenType.TRAILING", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.TRAILING: 'TRAILING'>"}, "sqlglot.tokens.TokenType.TRUE": {"fullname": "sqlglot.tokens.TokenType.TRUE", "modulename": "sqlglot.tokens", "qualname": "TokenType.TRUE", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.TRUE: 'TRUE'>"}, "sqlglot.tokens.TokenType.UNBOUNDED": {"fullname": "sqlglot.tokens.TokenType.UNBOUNDED", "modulename": "sqlglot.tokens", "qualname": "TokenType.UNBOUNDED", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.UNBOUNDED: 'UNBOUNDED'>"}, "sqlglot.tokens.TokenType.UNCACHE": {"fullname": "sqlglot.tokens.TokenType.UNCACHE", "modulename": "sqlglot.tokens", "qualname": "TokenType.UNCACHE", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.UNCACHE: 'UNCACHE'>"}, "sqlglot.tokens.TokenType.UNION": {"fullname": "sqlglot.tokens.TokenType.UNION", "modulename": "sqlglot.tokens", "qualname": "TokenType.UNION", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.UNION: 'UNION'>"}, "sqlglot.tokens.TokenType.UNLOGGED": {"fullname": "sqlglot.tokens.TokenType.UNLOGGED", "modulename": "sqlglot.tokens", "qualname": "TokenType.UNLOGGED", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.UNLOGGED: 'UNLOGGED'>"}, "sqlglot.tokens.TokenType.UNNEST": {"fullname": "sqlglot.tokens.TokenType.UNNEST", "modulename": "sqlglot.tokens", "qualname": "TokenType.UNNEST", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.UNNEST: 'UNNEST'>"}, "sqlglot.tokens.TokenType.UNPIVOT": {"fullname": "sqlglot.tokens.TokenType.UNPIVOT", "modulename": "sqlglot.tokens", "qualname": "TokenType.UNPIVOT", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.UNPIVOT: 'UNPIVOT'>"}, "sqlglot.tokens.TokenType.UPDATE": {"fullname": "sqlglot.tokens.TokenType.UPDATE", "modulename": "sqlglot.tokens", "qualname": "TokenType.UPDATE", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.UPDATE: 'UPDATE'>"}, "sqlglot.tokens.TokenType.USE": {"fullname": "sqlglot.tokens.TokenType.USE", "modulename": "sqlglot.tokens", "qualname": "TokenType.USE", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.USE: 'USE'>"}, "sqlglot.tokens.TokenType.USING": {"fullname": "sqlglot.tokens.TokenType.USING", "modulename": "sqlglot.tokens", "qualname": "TokenType.USING", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.USING: 'USING'>"}, "sqlglot.tokens.TokenType.VALUES": {"fullname": "sqlglot.tokens.TokenType.VALUES", "modulename": "sqlglot.tokens", "qualname": "TokenType.VALUES", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.VALUES: 'VALUES'>"}, "sqlglot.tokens.TokenType.VIEW": {"fullname": "sqlglot.tokens.TokenType.VIEW", "modulename": "sqlglot.tokens", "qualname": "TokenType.VIEW", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.VIEW: 'VIEW'>"}, "sqlglot.tokens.TokenType.VOLATILE": {"fullname": "sqlglot.tokens.TokenType.VOLATILE", "modulename": "sqlglot.tokens", "qualname": "TokenType.VOLATILE", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.VOLATILE: 'VOLATILE'>"}, "sqlglot.tokens.TokenType.WHEN": {"fullname": "sqlglot.tokens.TokenType.WHEN", "modulename": "sqlglot.tokens", "qualname": "TokenType.WHEN", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.WHEN: 'WHEN'>"}, "sqlglot.tokens.TokenType.WHERE": {"fullname": "sqlglot.tokens.TokenType.WHERE", "modulename": "sqlglot.tokens", "qualname": "TokenType.WHERE", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.WHERE: 'WHERE'>"}, "sqlglot.tokens.TokenType.WINDOW": {"fullname": "sqlglot.tokens.TokenType.WINDOW", "modulename": "sqlglot.tokens", "qualname": "TokenType.WINDOW", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.WINDOW: 'WINDOW'>"}, "sqlglot.tokens.TokenType.WITH": {"fullname": "sqlglot.tokens.TokenType.WITH", "modulename": "sqlglot.tokens", "qualname": "TokenType.WITH", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.WITH: 'WITH'>"}, "sqlglot.tokens.TokenType.WITH_TIME_ZONE": {"fullname": "sqlglot.tokens.TokenType.WITH_TIME_ZONE", "modulename": "sqlglot.tokens", "qualname": "TokenType.WITH_TIME_ZONE", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.WITH_TIME_ZONE: 'WITH_TIME_ZONE'>"}, "sqlglot.tokens.TokenType.WITH_LOCAL_TIME_ZONE": {"fullname": "sqlglot.tokens.TokenType.WITH_LOCAL_TIME_ZONE", "modulename": "sqlglot.tokens", "qualname": "TokenType.WITH_LOCAL_TIME_ZONE", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.WITH_LOCAL_TIME_ZONE: 'WITH_LOCAL_TIME_ZONE'>"}, "sqlglot.tokens.TokenType.WITHIN_GROUP": {"fullname": "sqlglot.tokens.TokenType.WITHIN_GROUP", "modulename": "sqlglot.tokens", "qualname": "TokenType.WITHIN_GROUP", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.WITHIN_GROUP: 'WITHIN_GROUP'>"}, "sqlglot.tokens.TokenType.WITHOUT_TIME_ZONE": {"fullname": "sqlglot.tokens.TokenType.WITHOUT_TIME_ZONE", "modulename": "sqlglot.tokens", "qualname": "TokenType.WITHOUT_TIME_ZONE", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.WITHOUT_TIME_ZONE: 'WITHOUT_TIME_ZONE'>"}, "sqlglot.tokens.TokenType.UNIQUE": {"fullname": "sqlglot.tokens.TokenType.UNIQUE", "modulename": "sqlglot.tokens", "qualname": "TokenType.UNIQUE", "kind": "variable", "doc": "

\n", "default_value": " = <TokenType.UNIQUE: 'UNIQUE'>"}, "sqlglot.tokens.Token": {"fullname": "sqlglot.tokens.Token", "modulename": "sqlglot.tokens", "qualname": "Token", "kind": "class", "doc": "

\n"}, "sqlglot.tokens.Token.__init__": {"fullname": "sqlglot.tokens.Token.__init__", "modulename": "sqlglot.tokens", "qualname": "Token.__init__", "kind": "function", "doc": "

\n", "signature": "(\ttoken_type: sqlglot.tokens.TokenType,\ttext: str,\tline: int = 1,\tcol: int = 1,\tcomments: List[str] = [])"}, "sqlglot.tokens.Token.number": {"fullname": "sqlglot.tokens.Token.number", "modulename": "sqlglot.tokens", "qualname": "Token.number", "kind": "function", "doc": "

Returns a NUMBER token with number as its text.

\n", "signature": "(cls, number: int) -> sqlglot.tokens.Token:", "funcdef": "def"}, "sqlglot.tokens.Token.string": {"fullname": "sqlglot.tokens.Token.string", "modulename": "sqlglot.tokens", "qualname": "Token.string", "kind": "function", "doc": "

Returns a STRING token with string as its text.

\n", "signature": "(cls, string: str) -> sqlglot.tokens.Token:", "funcdef": "def"}, "sqlglot.tokens.Token.identifier": {"fullname": "sqlglot.tokens.Token.identifier", "modulename": "sqlglot.tokens", "qualname": "Token.identifier", "kind": "function", "doc": "

Returns an IDENTIFIER token with identifier as its text.

\n", "signature": "(cls, identifier: str) -> sqlglot.tokens.Token:", "funcdef": "def"}, "sqlglot.tokens.Token.var": {"fullname": "sqlglot.tokens.Token.var", "modulename": "sqlglot.tokens", "qualname": "Token.var", "kind": "function", "doc": "

Returns an VAR token with var as its text.

\n", "signature": "(cls, var: str) -> sqlglot.tokens.Token:", "funcdef": "def"}, "sqlglot.tokens.Tokenizer": {"fullname": "sqlglot.tokens.Tokenizer", "modulename": "sqlglot.tokens", "qualname": "Tokenizer", "kind": "class", "doc": "

\n"}, "sqlglot.tokens.Tokenizer.__init__": {"fullname": "sqlglot.tokens.Tokenizer.__init__", "modulename": "sqlglot.tokens", "qualname": "Tokenizer.__init__", "kind": "function", "doc": "

\n", "signature": "()"}, "sqlglot.tokens.Tokenizer.reset": {"fullname": "sqlglot.tokens.Tokenizer.reset", "modulename": "sqlglot.tokens", "qualname": "Tokenizer.reset", "kind": "function", "doc": "

\n", "signature": "(self) -> None:", "funcdef": "def"}, "sqlglot.tokens.Tokenizer.tokenize": {"fullname": "sqlglot.tokens.Tokenizer.tokenize", "modulename": "sqlglot.tokens", "qualname": "Tokenizer.tokenize", "kind": "function", "doc": "

Returns a list of tokens corresponding to the SQL string sql.

\n", "signature": "(self, sql: str) -> List[sqlglot.tokens.Token]:", "funcdef": "def"}, "sqlglot.transforms": {"fullname": "sqlglot.transforms", "modulename": "sqlglot.transforms", "kind": "module", "doc": "

\n"}, "sqlglot.transforms.unalias_group": {"fullname": "sqlglot.transforms.unalias_group", "modulename": "sqlglot.transforms", "qualname": "unalias_group", "kind": "function", "doc": "

Replace references to select aliases in GROUP BY clauses.

\n\n
Example:
\n\n
\n
\n
>>> import sqlglot\n>>> sqlglot.parse_one("SELECT a AS b FROM x GROUP BY b").transform(unalias_group).sql()\n'SELECT a AS b FROM x GROUP BY 1'\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • expression: the expression that will be transformed.
  • \n
\n\n
Returns:
\n\n
\n

The transformed expression.

\n
\n", "signature": "(\texpression: sqlglot.expressions.Expression) -> sqlglot.expressions.Expression:", "funcdef": "def"}, "sqlglot.transforms.eliminate_distinct_on": {"fullname": "sqlglot.transforms.eliminate_distinct_on", "modulename": "sqlglot.transforms", "qualname": "eliminate_distinct_on", "kind": "function", "doc": "

Convert SELECT DISTINCT ON statements to a subquery with a window function.

\n\n

This is useful for dialects that don't support SELECT DISTINCT ON but support window functions.

\n\n
Arguments:
\n\n
    \n
  • expression: the expression that will be transformed.
  • \n
\n\n
Returns:
\n\n
\n

The transformed expression.

\n
\n", "signature": "(\texpression: sqlglot.expressions.Expression) -> sqlglot.expressions.Expression:", "funcdef": "def"}, "sqlglot.transforms.remove_precision_parameterized_types": {"fullname": "sqlglot.transforms.remove_precision_parameterized_types", "modulename": "sqlglot.transforms", "qualname": "remove_precision_parameterized_types", "kind": "function", "doc": "

Some dialects only allow the precision for parameterized types to be defined in the DDL and not in other expressions.\nThis transforms removes the precision from parameterized types in expressions.

\n", "signature": "(\texpression: sqlglot.expressions.Expression) -> sqlglot.expressions.Expression:", "funcdef": "def"}, "sqlglot.transforms.preprocess": {"fullname": "sqlglot.transforms.preprocess", "modulename": "sqlglot.transforms", "qualname": "preprocess", "kind": "function", "doc": "

Creates a new transform by chaining a sequence of transformations and converts the resulting\nexpression to SQL, using an appropriate Generator.TRANSFORMS function.

\n\n
Arguments:
\n\n
    \n
  • transforms: sequence of transform functions. These will be called in order.
  • \n
  • to_sql: final transform that converts the resulting expression to a SQL string.
  • \n
\n\n
Returns:
\n\n
\n

Function that can be used as a generator transform.

\n
\n", "signature": "(\ttransforms: List[Callable[[sqlglot.expressions.Expression], sqlglot.expressions.Expression]],\tto_sql: Callable[[sqlglot.generator.Generator, sqlglot.expressions.Expression], str]) -> Callable[[sqlglot.generator.Generator, sqlglot.expressions.Expression], str]:", "funcdef": "def"}, "sqlglot.transforms.delegate": {"fullname": "sqlglot.transforms.delegate", "modulename": "sqlglot.transforms", "qualname": "delegate", "kind": "function", "doc": "

Create a new method that delegates to attr. This is useful for creating Generator.TRANSFORMS\nfunctions that delegate to existing generator methods.

\n", "signature": "(attr: str) -> Callable:", "funcdef": "def"}, "sqlglot.trie": {"fullname": "sqlglot.trie", "modulename": "sqlglot.trie", "kind": "module", "doc": "

\n"}, "sqlglot.trie.new_trie": {"fullname": "sqlglot.trie.new_trie", "modulename": "sqlglot.trie", "qualname": "new_trie", "kind": "function", "doc": "

Creates a new trie out of a collection of keywords.

\n\n

The trie is represented as a sequence of nested dictionaries keyed by either single character\nstrings, or by 0, which is used to designate that a keyword is in the trie.

\n\n
Example:
\n\n
\n
\n
>>> new_trie(["bla", "foo", "blab"])\n{'b': {'l': {'a': {0: True, 'b': {0: True}}}}, 'f': {'o': {'o': {0: True}}}}\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • keywords: the keywords to create the trie from.
  • \n
\n\n
Returns:
\n\n
\n

The trie corresponding to keywords.

\n
\n", "signature": "(keywords: Iterable[Sequence[Hashable]]) -> Dict:", "funcdef": "def"}, "sqlglot.trie.in_trie": {"fullname": "sqlglot.trie.in_trie", "modulename": "sqlglot.trie", "qualname": "in_trie", "kind": "function", "doc": "

Checks whether a key is in a trie.

\n\n
Examples:
\n\n
\n
\n
>>> in_trie(new_trie(["cat"]), "bob")\n(0, {'c': {'a': {'t': {0: True}}}})\n
\n
\n \n
\n
>>> in_trie(new_trie(["cat"]), "ca")\n(1, {'t': {0: True}})\n
\n
\n \n
\n
>>> in_trie(new_trie(["cat"]), "cat")\n(2, {0: True})\n
\n
\n
\n\n
Arguments:
\n\n
    \n
  • trie: the trie to be searched.
  • \n
  • key: the target key.
  • \n
\n\n
Returns:
\n\n
\n

A pair (value, subtrie), where subtrie is the sub-trie we get at the point where the search stops, and value\n is either 0 (search was unsuccessful), 1 (value is a prefix of a keyword in trie) or 2 (key is intrie`).

\n
\n", "signature": "(trie: Dict, key: Sequence[Hashable]) -> Tuple[int, Dict]:", "funcdef": "def"}}, "docInfo": {"sqlglot": {"qualname": 0, "fullname": 1, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 5685}, "sqlglot.pretty": {"qualname": 1, "fullname": 2, "annotation": 0, "default_value": 2, "signature": 0, "bases": 0, "doc": 10}, "sqlglot.schema": {"qualname": 0, "fullname": 2, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.parse": {"qualname": 1, "fullname": 2, "annotation": 0, "default_value": 0, "signature": 124, "bases": 0, "doc": 84}, "sqlglot.parse_one": {"qualname": 2, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 198, "bases": 0, "doc": 99}, "sqlglot.transpile": {"qualname": 1, "fullname": 2, "annotation": 0, "default_value": 0, "signature": 239, "bases": 0, "doc": 177}, "sqlglot.dataframe": {"qualname": 0, "fullname": 2, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3543}, "sqlglot.dataframe.sql": {"qualname": 0, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.SparkSession": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.SparkSession.__init__": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 4, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.SparkSession.table": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 44, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.SparkSession.createDataFrame": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 208, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.SparkSession.sql": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 44, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrame": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrame.__init__": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 259, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrame.sql": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 51, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrame.copy": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 41, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrame.select": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 48, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrame.alias": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 51, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrame.where": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 86, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrame.filter": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 86, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrame.groupBy": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 48, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrame.agg": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 48, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrame.join": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 180, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrame.orderBy": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 120, "bases": 0, "doc": 44}, "sqlglot.dataframe.sql.DataFrame.sort": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 120, "bases": 0, "doc": 44}, "sqlglot.dataframe.sql.DataFrame.union": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 66, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrame.unionAll": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 66, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrame.unionByName": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 61, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrame.intersect": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 66, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrame.intersectAll": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 66, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrame.exceptAll": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 66, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrame.distinct": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrame.dropDuplicates": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 38, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrame.dropna": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 138, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrame.fillna": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 123, "bases": 0, "doc": 100}, "sqlglot.dataframe.sql.DataFrame.replace": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 217, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrame.withColumn": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 77, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrame.withColumnRenamed": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 31, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrame.drop": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 80, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrame.limit": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 44, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrame.hint": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 77, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrame.repartition": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 111, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrame.coalesce": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 44, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrame.cache": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrame.persist": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 60, "bases": 0, "doc": 20}, "sqlglot.dataframe.sql.GroupedData": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.GroupedData.__init__": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 106, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.GroupedData.agg": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 90, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.GroupedData.count": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.GroupedData.mean": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 46, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.GroupedData.avg": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 46, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.GroupedData.max": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 46, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.GroupedData.min": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 46, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.GroupedData.sum": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 46, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.GroupedData.pivot": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 46, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.Column": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.Column.__init__": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 63, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.Column.ensure_col": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 71, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.Column.ensure_cols": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 98, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.Column.invoke_anonymous_function": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 123, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.Column.invoke_expression_over_column": {"qualname": 5, "fullname": 8, "annotation": 0, "default_value": 0, "signature": 92, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.Column.binary_op": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 85, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.Column.inverse_binary_op": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 85, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.Column.unary_op": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 51, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.Column.ensure_literal": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 39, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.Column.copy": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.Column.set_table_name": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 55, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.Column.sql": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 21, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.Column.alias": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 44, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.Column.asc": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.Column.desc": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.Column.asc_nulls_first": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.Column.asc_nulls_last": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.Column.desc_nulls_first": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.Column.desc_nulls_last": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.Column.when": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 77, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.Column.otherwise": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 44, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.Column.isNull": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.Column.isNotNull": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.Column.cast": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 53, "bases": 0, "doc": 27}, "sqlglot.dataframe.sql.Column.startswith": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 78, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.Column.endswith": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 78, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.Column.rlike": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 44, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.Column.like": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 21, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.Column.ilike": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 21, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.Column.substr": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 121, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.Column.isin": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 81, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.Column.between": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 97, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.Column.over": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 66, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrameNaFunctions": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrameNaFunctions.__init__": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrameNaFunctions.drop": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 138, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrameNaFunctions.fill": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 143, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrameNaFunctions.replace": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 177, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.Window": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.Window.__init__": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 4, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.Window.partitionBy": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 104, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.Window.orderBy": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 104, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.Window.rowsBetween": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 54, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.Window.rangeBetween": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 54, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.WindowSpec": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.WindowSpec.__init__": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 38, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.WindowSpec.copy": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.WindowSpec.sql": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 21, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.WindowSpec.partitionBy": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 104, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.WindowSpec.orderBy": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 104, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.WindowSpec.rowsBetween": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 54, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.WindowSpec.rangeBetween": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 54, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrameReader": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrameReader.__init__": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrameReader.table": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 44, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrameWriter": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrameWriter.__init__": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 122, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrameWriter.copy": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 41, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrameWriter.sql": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 27, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrameWriter.mode": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 52, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrameWriter.insertInto": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 70, "bases": 0, "doc": 3}, "sqlglot.dataframe.sql.DataFrameWriter.saveAsTable": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 71, "bases": 0, "doc": 3}, "sqlglot.dialects": {"qualname": 0, "fullname": 2, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 764}, "sqlglot.dialects.bigquery": {"qualname": 0, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 7}, "sqlglot.dialects.bigquery.BigQuery": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 4, "doc": 3}, "sqlglot.dialects.bigquery.BigQuery.__init__": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 4, "bases": 0, "doc": 3}, "sqlglot.dialects.bigquery.BigQuery.Tokenizer": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 3}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 174}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 495}, "sqlglot.dialects.bigquery.BigQuery.Generator.array_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dialects.bigquery.BigQuery.Generator.transaction_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 22, "bases": 0, "doc": 3}, "sqlglot.dialects.bigquery.BigQuery.Generator.commit_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 22, "bases": 0, "doc": 3}, "sqlglot.dialects.bigquery.BigQuery.Generator.rollback_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 22, "bases": 0, "doc": 3}, "sqlglot.dialects.bigquery.BigQuery.Generator.in_unnest_op": {"qualname": 5, "fullname": 8, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dialects.bigquery.BigQuery.Generator.except_op": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dialects.bigquery.BigQuery.Generator.intersect_op": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dialects.clickhouse": {"qualname": 0, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.clickhouse.ClickHouse": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 4, "doc": 3}, "sqlglot.dialects.clickhouse.ClickHouse.__init__": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 4, "bases": 0, "doc": 3}, "sqlglot.dialects.clickhouse.ClickHouse.Tokenizer": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 3}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 174}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 495}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.cte_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dialects.databricks": {"qualname": 0, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.databricks.Databricks": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 4, "doc": 3}, "sqlglot.dialects.databricks.Databricks.__init__": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 4, "bases": 0, "doc": 3}, "sqlglot.dialects.databricks.Databricks.Parser": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 5, "doc": 174}, "sqlglot.dialects.databricks.Databricks.Generator": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 5, "doc": 495}, "sqlglot.dialects.dialect": {"qualname": 0, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.Dialects": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 4, "doc": 5}, "sqlglot.dialects.dialect.Dialects.DIALECT": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 8, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.Dialects.BIGQUERY": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.Dialects.CLICKHOUSE": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.Dialects.DUCKDB": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.Dialects.HIVE": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.Dialects.MYSQL": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.Dialects.ORACLE": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.Dialects.POSTGRES": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.Dialects.PRESTO": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.Dialects.REDSHIFT": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.Dialects.SNOWFLAKE": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.Dialects.SPARK": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.Dialects.SQLITE": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.Dialects.STARROCKS": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.Dialects.TABLEAU": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.Dialects.TRINO": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.Dialects.TSQL": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.Dialects.DATABRICKS": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.Dialects.DRILL": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.Dialects.TERADATA": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.Dialect": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.Dialect.__init__": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 4, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.Dialect.get_or_raise": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 105, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.Dialect.format_time": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 70, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.Dialect.parse": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 51, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.Dialect.parse_into": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 126, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.Dialect.generate": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 50, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.Dialect.transpile": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 37, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.Dialect.parser": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 31, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.Dialect.generator": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 31, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.rename_func": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 58, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.approx_count_distinct_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 51, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.if_sql": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 51, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.arrow_json_extract_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 67, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.arrow_json_extract_scalar_sql": {"qualname": 5, "fullname": 8, "annotation": 0, "default_value": 0, "signature": 67, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.inline_array_sql": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 51, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.no_ilike_sql": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 51, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.no_paren_current_date_sql": {"qualname": 5, "fullname": 8, "annotation": 0, "default_value": 0, "signature": 51, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.no_recursive_cte_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 51, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.no_safe_divide_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 51, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.no_tablesample_sql": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 51, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.no_pivot_sql": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 51, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.no_trycast_sql": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 51, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.no_properties_sql": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 51, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.str_position_sql": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 51, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.struct_extract_sql": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 51, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.var_map_sql": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 91, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.format_time_lambda": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 90, "bases": 0, "doc": 71}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 51, "bases": 0, "doc": 46}, "sqlglot.dialects.dialect.parse_date_delta": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 78, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.locate_to_strposition": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 29, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.strposition_to_locate_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 51, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.timestrtotime_sql": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 51, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.datestrtodate_sql": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 51, "bases": 0, "doc": 3}, "sqlglot.dialects.dialect.trim_sql": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 51, "bases": 0, "doc": 3}, "sqlglot.dialects.drill": {"qualname": 0, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.drill.if_sql": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 51, "bases": 0, "doc": 59}, "sqlglot.dialects.drill.Drill": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 4, "doc": 3}, "sqlglot.dialects.drill.Drill.__init__": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 4, "bases": 0, "doc": 3}, "sqlglot.dialects.drill.Drill.Tokenizer": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 3}, "sqlglot.dialects.drill.Drill.Parser": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 174}, "sqlglot.dialects.drill.Drill.Generator": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 495}, "sqlglot.dialects.drill.Drill.Generator.normalize_func": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 24, "bases": 0, "doc": 3}, "sqlglot.dialects.duckdb": {"qualname": 0, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.duckdb.DuckDB": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 4, "doc": 3}, "sqlglot.dialects.duckdb.DuckDB.__init__": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 4, "bases": 0, "doc": 3}, "sqlglot.dialects.duckdb.DuckDB.Tokenizer": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 3}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 174}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 495}, "sqlglot.dialects.hive": {"qualname": 0, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.hive.Hive": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 4, "doc": 3}, "sqlglot.dialects.hive.Hive.__init__": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 4, "bases": 0, "doc": 3}, "sqlglot.dialects.hive.Hive.Tokenizer": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 3}, "sqlglot.dialects.hive.Hive.Parser": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 174}, "sqlglot.dialects.hive.Hive.Generator": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 495}, "sqlglot.dialects.hive.Hive.Generator.with_properties": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 16, "bases": 0, "doc": 3}, "sqlglot.dialects.hive.Hive.Generator.datatype_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 16, "bases": 0, "doc": 3}, "sqlglot.dialects.mysql": {"qualname": 0, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.mysql.MySQL": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 4, "doc": 3}, "sqlglot.dialects.mysql.MySQL.__init__": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 4, "bases": 0, "doc": 3}, "sqlglot.dialects.mysql.MySQL.Tokenizer": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 3}, "sqlglot.dialects.mysql.MySQL.Parser": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 174}, "sqlglot.dialects.mysql.MySQL.Generator": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 495}, "sqlglot.dialects.mysql.MySQL.Generator.show_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 16, "bases": 0, "doc": 3}, "sqlglot.dialects.mysql.MySQL.Generator.setitem_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 16, "bases": 0, "doc": 3}, "sqlglot.dialects.mysql.MySQL.Generator.set_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 16, "bases": 0, "doc": 3}, "sqlglot.dialects.oracle": {"qualname": 0, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.oracle.Oracle": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 4, "doc": 3}, "sqlglot.dialects.oracle.Oracle.__init__": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 4, "bases": 0, "doc": 3}, "sqlglot.dialects.oracle.Oracle.Parser": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 174}, "sqlglot.dialects.oracle.Oracle.Generator": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 495}, "sqlglot.dialects.oracle.Oracle.Generator.query_modifiers": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 23, "bases": 0, "doc": 3}, "sqlglot.dialects.oracle.Oracle.Generator.offset_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 16, "bases": 0, "doc": 3}, "sqlglot.dialects.oracle.Oracle.Generator.table_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 16, "bases": 0, "doc": 3}, "sqlglot.dialects.oracle.Oracle.Tokenizer": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 3}, "sqlglot.dialects.postgres": {"qualname": 0, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.postgres.Postgres": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 4, "doc": 3}, "sqlglot.dialects.postgres.Postgres.__init__": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 4, "bases": 0, "doc": 3}, "sqlglot.dialects.postgres.Postgres.Tokenizer": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 3}, "sqlglot.dialects.postgres.Postgres.Parser": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 174}, "sqlglot.dialects.postgres.Postgres.Generator": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 495}, "sqlglot.dialects.presto": {"qualname": 0, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.presto.Presto": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 4, "doc": 3}, "sqlglot.dialects.presto.Presto.__init__": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 4, "bases": 0, "doc": 3}, "sqlglot.dialects.presto.Presto.Tokenizer": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 3}, "sqlglot.dialects.presto.Presto.Parser": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 174}, "sqlglot.dialects.presto.Presto.Generator": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 495}, "sqlglot.dialects.presto.Presto.Generator.transaction_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 16, "bases": 0, "doc": 3}, "sqlglot.dialects.redshift": {"qualname": 0, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.redshift.Redshift": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 4, "doc": 3}, "sqlglot.dialects.redshift.Redshift.__init__": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 4, "bases": 0, "doc": 3}, "sqlglot.dialects.redshift.Redshift.Parser": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 5, "doc": 174}, "sqlglot.dialects.redshift.Redshift.Tokenizer": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 5, "doc": 3}, "sqlglot.dialects.redshift.Redshift.Generator": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 5, "doc": 495}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 58}, "sqlglot.dialects.redshift.Redshift.Generator.with_properties": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 19}, "sqlglot.dialects.redshift.Redshift.Generator.renametable_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 16}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 75}, "sqlglot.dialects.snowflake": {"qualname": 0, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.snowflake.Snowflake": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 4, "doc": 3}, "sqlglot.dialects.snowflake.Snowflake.__init__": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 4, "bases": 0, "doc": 3}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 174}, "sqlglot.dialects.snowflake.Snowflake.Tokenizer": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 3}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 495}, "sqlglot.dialects.snowflake.Snowflake.Generator.except_op": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 16, "bases": 0, "doc": 3}, "sqlglot.dialects.snowflake.Snowflake.Generator.intersect_op": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 16, "bases": 0, "doc": 3}, "sqlglot.dialects.snowflake.Snowflake.Generator.values_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 66}, "sqlglot.dialects.snowflake.Snowflake.Generator.select_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 125}, "sqlglot.dialects.snowflake.Snowflake.Generator.describe_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dialects.snowflake.Snowflake.Generator.generatedasidentitycolumnconstraint_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 36, "bases": 0, "doc": 3}, "sqlglot.dialects.spark": {"qualname": 0, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.spark.Spark": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 4, "doc": 3}, "sqlglot.dialects.spark.Spark.__init__": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 4, "bases": 0, "doc": 3}, "sqlglot.dialects.spark.Spark.Parser": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 5, "doc": 174}, "sqlglot.dialects.spark.Spark.Generator": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 5, "doc": 495}, "sqlglot.dialects.spark.Spark.Generator.cast_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dialects.spark.Spark.Tokenizer": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 5, "doc": 3}, "sqlglot.dialects.sqlite": {"qualname": 0, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.sqlite.SQLite": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 4, "doc": 3}, "sqlglot.dialects.sqlite.SQLite.__init__": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 4, "bases": 0, "doc": 3}, "sqlglot.dialects.sqlite.SQLite.Tokenizer": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 3}, "sqlglot.dialects.sqlite.SQLite.Parser": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 174}, "sqlglot.dialects.sqlite.SQLite.Generator": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 495}, "sqlglot.dialects.sqlite.SQLite.Generator.transaction_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 16, "bases": 0, "doc": 3}, "sqlglot.dialects.starrocks": {"qualname": 0, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.starrocks.StarRocks": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 4, "doc": 3}, "sqlglot.dialects.starrocks.StarRocks.__init__": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 4, "bases": 0, "doc": 3}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 5, "doc": 495}, "sqlglot.dialects.tableau": {"qualname": 0, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.tableau.Tableau": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 4, "doc": 3}, "sqlglot.dialects.tableau.Tableau.__init__": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 4, "bases": 0, "doc": 3}, "sqlglot.dialects.tableau.Tableau.Generator": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 495}, "sqlglot.dialects.tableau.Tableau.Parser": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 174}, "sqlglot.dialects.teradata": {"qualname": 0, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.teradata.Teradata": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 4, "doc": 3}, "sqlglot.dialects.teradata.Teradata.__init__": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 4, "bases": 0, "doc": 3}, "sqlglot.dialects.teradata.Teradata.Parser": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 174}, "sqlglot.dialects.teradata.Teradata.Generator": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 495}, "sqlglot.dialects.teradata.Teradata.Generator.partitionedbyproperty_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dialects.teradata.Teradata.Generator.update_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dialects.trino": {"qualname": 0, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.trino.Trino": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 4, "doc": 3}, "sqlglot.dialects.trino.Trino.__init__": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 4, "bases": 0, "doc": 3}, "sqlglot.dialects.trino.Trino.Generator": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 5, "doc": 495}, "sqlglot.dialects.trino.Trino.Tokenizer": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 5, "doc": 3}, "sqlglot.dialects.tsql": {"qualname": 0, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.dialects.tsql.generate_date_delta_with_unit_sql": {"qualname": 6, "fullname": 9, "annotation": 0, "default_value": 0, "signature": 16, "bases": 0, "doc": 3}, "sqlglot.dialects.tsql.TSQL": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 4, "doc": 3}, "sqlglot.dialects.tsql.TSQL.__init__": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 4, "bases": 0, "doc": 3}, "sqlglot.dialects.tsql.TSQL.Tokenizer": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 3}, "sqlglot.dialects.tsql.TSQL.Parser": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 174}, "sqlglot.dialects.tsql.TSQL.Generator": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 495}, "sqlglot.dialects.tsql.TSQL.Generator.systemtime_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.dialects.tsql.TSQL.Generator.returnsproperty_sql": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.diff": {"qualname": 0, "fullname": 2, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 6444}, "sqlglot.diff.Insert": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 10}, "sqlglot.diff.Insert.__init__": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 24, "bases": 0, "doc": 3}, "sqlglot.diff.Remove": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 10}, "sqlglot.diff.Remove.__init__": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 24, "bases": 0, "doc": 3}, "sqlglot.diff.Move": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 14}, "sqlglot.diff.Move.__init__": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 24, "bases": 0, "doc": 3}, "sqlglot.diff.Update": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 10}, "sqlglot.diff.Update.__init__": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 46, "bases": 0, "doc": 3}, "sqlglot.diff.Keep": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 11}, "sqlglot.diff.Keep.__init__": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 46, "bases": 0, "doc": 3}, "sqlglot.diff.diff": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 135, "bases": 0, "doc": 249}, "sqlglot.diff.ChangeDistiller": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 46}, "sqlglot.diff.ChangeDistiller.__init__": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 40, "bases": 0, "doc": 3}, "sqlglot.diff.ChangeDistiller.diff": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 141, "bases": 0, "doc": 3}, "sqlglot.errors": {"qualname": 0, "fullname": 2, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.errors.ErrorLevel": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 5}, "sqlglot.errors.ErrorLevel.IGNORE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 6}, "sqlglot.errors.ErrorLevel.WARN": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 6}, "sqlglot.errors.ErrorLevel.RAISE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 11}, "sqlglot.errors.ErrorLevel.IMMEDIATE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 12}, "sqlglot.errors.SqlglotError": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 11}, "sqlglot.errors.UnsupportedError": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 11}, "sqlglot.errors.ParseError": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 11}, "sqlglot.errors.ParseError.__init__": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 51, "bases": 0, "doc": 3}, "sqlglot.errors.ParseError.new": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 207, "bases": 0, "doc": 3}, "sqlglot.errors.TokenError": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 11}, "sqlglot.errors.OptimizeError": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 11}, "sqlglot.errors.SchemaError": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 11}, "sqlglot.errors.ExecuteError": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 11}, "sqlglot.errors.concat_messages": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 35, "bases": 0, "doc": 3}, "sqlglot.errors.merge_errors": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 51, "bases": 0, "doc": 3}, "sqlglot.executor": {"qualname": 0, "fullname": 2, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 2950}, "sqlglot.executor.execute": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 197, "bases": 0, "doc": 115}, "sqlglot.executor.context": {"qualname": 0, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.executor.context.Context": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 64}, "sqlglot.executor.context.Context.__init__": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 66, "bases": 0, "doc": 21}, "sqlglot.executor.context.Context.eval": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 16, "bases": 0, "doc": 3}, "sqlglot.executor.context.Context.eval_tuple": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 16, "bases": 0, "doc": 3}, "sqlglot.executor.context.Context.add_columns": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 26, "bases": 0, "doc": 3}, "sqlglot.executor.context.Context.table_iter": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 72, "bases": 0, "doc": 3}, "sqlglot.executor.context.Context.filter": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 19, "bases": 0, "doc": 3}, "sqlglot.executor.context.Context.sort": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 19, "bases": 0, "doc": 3}, "sqlglot.executor.context.Context.set_row": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 24, "bases": 0, "doc": 3}, "sqlglot.executor.context.Context.set_index": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 24, "bases": 0, "doc": 3}, "sqlglot.executor.context.Context.set_range": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.executor.env": {"qualname": 0, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.executor.env.reverse_key": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.executor.env.reverse_key.__init__": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 9, "bases": 0, "doc": 3}, "sqlglot.executor.env.filter_nulls": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 22, "bases": 0, "doc": 3}, "sqlglot.executor.env.null_if_any": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 13, "bases": 0, "doc": 59}, "sqlglot.executor.env.str_position": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 26, "bases": 0, "doc": 3}, "sqlglot.executor.env.substring": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 31, "bases": 0, "doc": 3}, "sqlglot.executor.env.cast": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 16, "bases": 0, "doc": 3}, "sqlglot.executor.env.ordered": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 22, "bases": 0, "doc": 3}, "sqlglot.executor.env.interval": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 16, "bases": 0, "doc": 3}, "sqlglot.executor.python": {"qualname": 0, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.executor.python.PythonExecutor": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.executor.python.PythonExecutor.__init__": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 24, "bases": 0, "doc": 3}, "sqlglot.executor.python.PythonExecutor.execute": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 16, "bases": 0, "doc": 3}, "sqlglot.executor.python.PythonExecutor.generate": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 16, "bases": 0, "doc": 16}, "sqlglot.executor.python.PythonExecutor.generate_tuple": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 16, "bases": 0, "doc": 15}, "sqlglot.executor.python.PythonExecutor.context": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 16, "bases": 0, "doc": 3}, "sqlglot.executor.python.PythonExecutor.table": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 16, "bases": 0, "doc": 3}, "sqlglot.executor.python.PythonExecutor.scan": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 21, "bases": 0, "doc": 3}, "sqlglot.executor.python.PythonExecutor.static": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 3}, "sqlglot.executor.python.PythonExecutor.scan_table": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 16, "bases": 0, "doc": 3}, "sqlglot.executor.python.PythonExecutor.scan_csv": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 16, "bases": 0, "doc": 3}, "sqlglot.executor.python.PythonExecutor.join": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 21, "bases": 0, "doc": 3}, "sqlglot.executor.python.PythonExecutor.nested_loop_join": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 29, "bases": 0, "doc": 3}, "sqlglot.executor.python.PythonExecutor.hash_join": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 28, "bases": 0, "doc": 3}, "sqlglot.executor.python.PythonExecutor.aggregate": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 21, "bases": 0, "doc": 3}, "sqlglot.executor.python.PythonExecutor.sort": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 21, "bases": 0, "doc": 3}, "sqlglot.executor.python.PythonExecutor.set_operation": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 21, "bases": 0, "doc": 3}, "sqlglot.executor.python.Python": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 4, "doc": 3}, "sqlglot.executor.python.Python.__init__": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 4, "bases": 0, "doc": 3}, "sqlglot.executor.python.Python.Tokenizer": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 3}, "sqlglot.executor.python.Python.Generator": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 495}, "sqlglot.executor.table": {"qualname": 0, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.executor.table.Table": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.executor.table.Table.__init__": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 30, "bases": 0, "doc": 3}, "sqlglot.executor.table.Table.add_columns": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 26, "bases": 0, "doc": 3}, "sqlglot.executor.table.Table.append": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 16, "bases": 0, "doc": 3}, "sqlglot.executor.table.Table.pop": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 3}, "sqlglot.executor.table.TableIter": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.executor.table.TableIter.__init__": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 9, "bases": 0, "doc": 3}, "sqlglot.executor.table.RangeReader": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.executor.table.RangeReader.__init__": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 9, "bases": 0, "doc": 3}, "sqlglot.executor.table.RowReader": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.executor.table.RowReader.__init__": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 20, "bases": 0, "doc": 3}, "sqlglot.executor.table.Tables": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 6, "doc": 87}, "sqlglot.executor.table.ensure_tables": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 40, "bases": 0, "doc": 3}, "sqlglot.expressions": {"qualname": 0, "fullname": 2, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 61}, "sqlglot.expressions.Expression": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 346}, "sqlglot.expressions.Expression.__init__": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 16, "bases": 0, "doc": 3}, "sqlglot.expressions.Expression.this": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 9}, "sqlglot.expressions.Expression.expression": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 9}, "sqlglot.expressions.Expression.expressions": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 9}, "sqlglot.expressions.Expression.text": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 16, "bases": 0, "doc": 32}, "sqlglot.expressions.Expression.is_string": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 11}, "sqlglot.expressions.Expression.is_number": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 11}, "sqlglot.expressions.Expression.is_int": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 11}, "sqlglot.expressions.Expression.alias": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 18}, "sqlglot.expressions.Expression.output_name": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 210}, "sqlglot.expressions.Expression.copy": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 10}, "sqlglot.expressions.Expression.append": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 22, "bases": 0, "doc": 54}, "sqlglot.expressions.Expression.set": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 22, "bases": 0, "doc": 45}, "sqlglot.expressions.Expression.depth": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 9}, "sqlglot.expressions.Expression.find": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 29, "bases": 0, "doc": 65}, "sqlglot.expressions.Expression.find_all": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 29, "bases": 0, "doc": 63}, "sqlglot.expressions.Expression.find_ancestor": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 19, "bases": 0, "doc": 45}, "sqlglot.expressions.Expression.parent_select": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 8}, "sqlglot.expressions.Expression.walk": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 31, "bases": 0, "doc": 89}, "sqlglot.expressions.Expression.dfs": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 41, "bases": 0, "doc": 33}, "sqlglot.expressions.Expression.bfs": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 21, "bases": 0, "doc": 33}, "sqlglot.expressions.Expression.unnest": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 11}, "sqlglot.expressions.Expression.unalias": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 12}, "sqlglot.expressions.Expression.unnest_operands": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 9}, "sqlglot.expressions.Expression.flatten": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 21, "bases": 0, "doc": 28}, "sqlglot.expressions.Expression.sql": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 99, "bases": 0, "doc": 61}, "sqlglot.expressions.Expression.transform": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 40, "bases": 0, "doc": 111}, "sqlglot.expressions.Expression.replace": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 16, "bases": 0, "doc": 83}, "sqlglot.expressions.Expression.pop": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 9}, "sqlglot.expressions.Expression.assert_is": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 17, "bases": 0, "doc": 162}, "sqlglot.expressions.Expression.error_messages": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 43, "bases": 0, "doc": 79}, "sqlglot.expressions.Expression.dump": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 11}, "sqlglot.expressions.Expression.load": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 16, "bases": 0, "doc": 17}, "sqlglot.expressions.Condition": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Condition.and_": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 35, "bases": 0, "doc": 173}, "sqlglot.expressions.Condition.or_": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 35, "bases": 0, "doc": 173}, "sqlglot.expressions.Condition.not_": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 88}, "sqlglot.expressions.Predicate": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 13}, "sqlglot.expressions.DerivedTable": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Unionable": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Unionable.union": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 43, "bases": 0, "doc": 211}, "sqlglot.expressions.Unionable.intersect": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 43, "bases": 0, "doc": 210}, "sqlglot.expressions.Unionable.except_": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 43, "bases": 0, "doc": 211}, "sqlglot.expressions.UDTF": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 3}, "sqlglot.expressions.Cache": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Uncache": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Create": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Describe": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Set": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.SetItem": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Show": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.UserDefinedFunction": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.UserDefinedFunctionKwarg": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.CharacterSet": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.With": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.WithinGroup": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.CTE": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.TableAlias": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.BitString": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.HexString": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.ByteString": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Column": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Column.output_name": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 210}, "sqlglot.expressions.ColumnDef": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.AlterColumn": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.RenameTable": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.ColumnConstraint": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.ColumnConstraintKind": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.AutoIncrementColumnConstraint": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.CheckColumnConstraint": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.CollateColumnConstraint": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.CommentColumnConstraint": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.DefaultColumnConstraint": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.EncodeColumnConstraint": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.GeneratedAsIdentityColumnConstraint": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.NotNullColumnConstraint": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.PrimaryKeyColumnConstraint": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.UniqueColumnConstraint": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Constraint": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Delete": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Drop": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Filter": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Check": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Directory": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.ForeignKey": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.PrimaryKey": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Unique": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Into": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.From": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Having": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Hint": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.JoinHint": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Identifier": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Identifier.output_name": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 210}, "sqlglot.expressions.Index": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Insert": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Introducer": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.National": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.LoadData": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Partition": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Fetch": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Group": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Lambda": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Limit": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Literal": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Literal.number": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 29, "bases": 0, "doc": 3}, "sqlglot.expressions.Literal.string": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 29, "bases": 0, "doc": 3}, "sqlglot.expressions.Literal.output_name": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 210}, "sqlglot.expressions.Join": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Join.on": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 55, "bases": 0, "doc": 252}, "sqlglot.expressions.Join.using": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 55, "bases": 0, "doc": 254}, "sqlglot.expressions.Lateral": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.MatchRecognize": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Final": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Offset": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Order": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Cluster": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Distribute": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Sort": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Ordered": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Property": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.AlgorithmProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.DefinerProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.SqlSecurityProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.TableFormatProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.PartitionedByProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.FileFormatProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.DistKeyProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.SortKeyProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.DistStyleProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.LikeProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.LocationProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.EngineProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.AutoIncrementProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.CharacterSetProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.CollateProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.SchemaCommentProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.ReturnsProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.LanguageProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.ExecuteAsProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.VolatilityProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.RowFormatDelimitedProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.RowFormatSerdeProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.SerdeProperties": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.FallbackProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.WithJournalTableProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.LogProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.JournalProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.AfterJournalProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.ChecksumProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.FreespaceProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.MergeBlockRatioProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.DataBlocksizeProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.BlockCompressionProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.IsolatedLoadingProperty": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Properties": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Properties.Location": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 5}, "sqlglot.expressions.Properties.Location.POST_CREATE": {"qualname": 4, "fullname": 6, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.Properties.Location.PRE_SCHEMA": {"qualname": 4, "fullname": 6, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.Properties.Location.POST_INDEX": {"qualname": 4, "fullname": 6, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.Properties.Location.POST_SCHEMA_ROOT": {"qualname": 5, "fullname": 7, "annotation": 0, "default_value": 13, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.Properties.Location.POST_SCHEMA_WITH": {"qualname": 5, "fullname": 7, "annotation": 0, "default_value": 13, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.Properties.Location.UNSUPPORTED": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.Properties.from_dict": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 30, "bases": 0, "doc": 3}, "sqlglot.expressions.Qualify": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Return": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Reference": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Tuple": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Subqueryable": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Subqueryable.subquery": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 44, "bases": 0, "doc": 213}, "sqlglot.expressions.Subqueryable.limit": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 61, "bases": 0, "doc": 3}, "sqlglot.expressions.Subqueryable.with_": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 77, "bases": 0, "doc": 319}, "sqlglot.expressions.Table": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.SystemTime": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Union": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Union.limit": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 61, "bases": 0, "doc": 234}, "sqlglot.expressions.Except": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Intersect": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Unnest": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Update": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Values": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Var": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Schema": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Lock": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Select": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Select.from_": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 74, "bases": 0, "doc": 238}, "sqlglot.expressions.Select.group_by": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 74, "bases": 0, "doc": 287}, "sqlglot.expressions.Select.order_by": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 74, "bases": 0, "doc": 262}, "sqlglot.expressions.Select.sort_by": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 74, "bases": 0, "doc": 262}, "sqlglot.expressions.Select.cluster_by": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 74, "bases": 0, "doc": 262}, "sqlglot.expressions.Select.limit": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 61, "bases": 0, "doc": 231}, "sqlglot.expressions.Select.offset": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 61, "bases": 0, "doc": 231}, "sqlglot.expressions.Select.select": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 74, "bases": 0, "doc": 208}, "sqlglot.expressions.Select.lateral": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 74, "bases": 0, "doc": 247}, "sqlglot.expressions.Select.join": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 118, "bases": 0, "doc": 628}, "sqlglot.expressions.Select.where": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 74, "bases": 0, "doc": 265}, "sqlglot.expressions.Select.having": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 74, "bases": 0, "doc": 287}, "sqlglot.expressions.Select.window": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 74, "bases": 0, "doc": 3}, "sqlglot.expressions.Select.distinct": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 44, "bases": 0, "doc": 151}, "sqlglot.expressions.Select.ctas": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 72, "bases": 0, "doc": 236}, "sqlglot.expressions.Select.lock": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 61, "bases": 0, "doc": 331}, "sqlglot.expressions.Subquery": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 3}, "sqlglot.expressions.Subquery.unnest": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 8}, "sqlglot.expressions.Subquery.output_name": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 210}, "sqlglot.expressions.TableSample": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Tag": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 15}, "sqlglot.expressions.Pivot": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Window": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.WindowSpec": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Where": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Star": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Star.output_name": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 210}, "sqlglot.expressions.Parameter": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.SessionParameter": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Placeholder": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Null": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Boolean": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.DataType": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.DataType.Type": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 5}, "sqlglot.expressions.DataType.Type.CHAR": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.NCHAR": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.VARCHAR": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.NVARCHAR": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.TEXT": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.MEDIUMTEXT": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.LONGTEXT": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.MEDIUMBLOB": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.LONGBLOB": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.BINARY": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.VARBINARY": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.INT": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.TINYINT": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.SMALLINT": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.BIGINT": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.FLOAT": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.DOUBLE": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.DECIMAL": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.BOOLEAN": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.JSON": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.JSONB": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.INTERVAL": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.TIME": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.TIMESTAMP": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.TIMESTAMPTZ": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.TIMESTAMPLTZ": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.DATE": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.DATETIME": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.ARRAY": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.MAP": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.UUID": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.GEOGRAPHY": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.GEOMETRY": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.STRUCT": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.NULLABLE": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.HLLSKETCH": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.HSTORE": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.SUPER": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.SERIAL": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.SMALLSERIAL": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.BIGSERIAL": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.XML": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.UNIQUEIDENTIFIER": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.MONEY": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.SMALLMONEY": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.ROWVERSION": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.IMAGE": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.VARIANT": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.OBJECT": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.NULL": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.Type.UNKNOWN": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.build": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 157, "bases": 0, "doc": 3}, "sqlglot.expressions.DataType.is_type": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 39, "bases": 0, "doc": 3}, "sqlglot.expressions.PseudoType": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.StructKwarg": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.SubqueryPredicate": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.All": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Any": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Exists": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Command": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Transaction": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Commit": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Rollback": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.AlterTable": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.AddConstraint": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.DropPartition": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Binary": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Add": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Connector": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 3}, "sqlglot.expressions.And": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Or": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.BitwiseAnd": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.BitwiseLeftShift": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.BitwiseOr": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.BitwiseRightShift": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.BitwiseXor": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Div": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Dot": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.DPipe": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.EQ": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 3}, "sqlglot.expressions.NullSafeEQ": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 3}, "sqlglot.expressions.NullSafeNEQ": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 3}, "sqlglot.expressions.Distance": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Escape": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Glob": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 3}, "sqlglot.expressions.GT": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 3}, "sqlglot.expressions.GTE": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 3}, "sqlglot.expressions.ILike": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 3}, "sqlglot.expressions.IntDiv": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Is": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 3}, "sqlglot.expressions.Kwarg": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 12}, "sqlglot.expressions.Like": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 3}, "sqlglot.expressions.LT": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 3}, "sqlglot.expressions.LTE": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 3}, "sqlglot.expressions.Mod": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Mul": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.NEQ": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 3}, "sqlglot.expressions.SimilarTo": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 3}, "sqlglot.expressions.Slice": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Sub": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Unary": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.BitwiseNot": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Not": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 3}, "sqlglot.expressions.Paren": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 3}, "sqlglot.expressions.Neg": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Alias": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Alias.output_name": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 210}, "sqlglot.expressions.Aliases": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.AtTimeZone": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Between": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Bracket": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Distinct": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.In": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.TimeUnit": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 10}, "sqlglot.expressions.TimeUnit.__init__": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 3}, "sqlglot.expressions.Interval": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.IgnoreNulls": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.RespectNulls": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Func": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 128}, "sqlglot.expressions.Func.from_arg_list": {"qualname": 4, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 16, "bases": 0, "doc": 3}, "sqlglot.expressions.Func.sql_names": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 3}, "sqlglot.expressions.Func.sql_name": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 3}, "sqlglot.expressions.Func.default_parser_mappings": {"qualname": 4, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 3}, "sqlglot.expressions.AggFunc": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Abs": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Anonymous": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.ApproxDistinct": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Array": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.GenerateSeries": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.ArrayAgg": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.ArrayAll": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.ArrayAny": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.ArrayConcat": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.ArrayContains": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.ArrayFilter": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.ArraySize": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.ArraySort": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.ArraySum": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.ArrayUnionAgg": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Avg": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.AnyValue": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Case": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Cast": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Cast.output_name": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 210}, "sqlglot.expressions.Cast.is_type": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 39, "bases": 0, "doc": 3}, "sqlglot.expressions.Collate": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.TryCast": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Ceil": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Coalesce": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Concat": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.ConcatWs": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Count": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.CurrentDate": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.CurrentDatetime": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.CurrentTime": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.CurrentTimestamp": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.DateAdd": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 3}, "sqlglot.expressions.DateSub": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 3}, "sqlglot.expressions.DateDiff": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 3}, "sqlglot.expressions.DateTrunc": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.DatetimeAdd": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 3}, "sqlglot.expressions.DatetimeSub": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 3}, "sqlglot.expressions.DatetimeDiff": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 3}, "sqlglot.expressions.DatetimeTrunc": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 3}, "sqlglot.expressions.DayOfWeek": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.DayOfMonth": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.DayOfYear": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.WeekOfYear": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.LastDateOfMonth": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Extract": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.TimestampAdd": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 3}, "sqlglot.expressions.TimestampSub": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 3}, "sqlglot.expressions.TimestampDiff": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 3}, "sqlglot.expressions.TimestampTrunc": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 3}, "sqlglot.expressions.TimeAdd": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 3}, "sqlglot.expressions.TimeSub": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 3}, "sqlglot.expressions.TimeDiff": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 3}, "sqlglot.expressions.TimeTrunc": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 3}, "sqlglot.expressions.DateFromParts": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.DateStrToDate": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.DateToDateStr": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.DateToDi": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Day": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Decode": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.DiToDate": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Encode": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Exp": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Explode": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Floor": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Greatest": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.GroupConcat": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Hex": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.If": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.IfNull": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Initcap": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.JSONBContains": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.JSONExtract": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 3}, "sqlglot.expressions.JSONExtractScalar": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.JSONBExtract": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.JSONBExtractScalar": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Least": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Length": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Levenshtein": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Ln": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Log": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Log2": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Log10": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.LogicalOr": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Lower": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Map": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.VarMap": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Matches": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 22}, "sqlglot.expressions.Max": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Min": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Month": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Nvl2": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Posexplode": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Pow": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 3}, "sqlglot.expressions.PercentileCont": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.PercentileDisc": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Quantile": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Quantiles": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.QuantileIf": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.ApproxQuantile": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.ReadCSV": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Reduce": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.RegexpLike": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.RegexpILike": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.RegexpSplit": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Repeat": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Round": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.RowNumber": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.SafeDivide": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.SetAgg": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.SortArray": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Split": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Substring": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.StrPosition": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.StrToDate": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.StrToTime": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.StrToUnix": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.NumberToStr": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Struct": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.StructExtract": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Sum": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Sqrt": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Stddev": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.StddevPop": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.StddevSamp": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.TimeToStr": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.TimeToTimeStr": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.TimeToUnix": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.TimeStrToDate": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.TimeStrToTime": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.TimeStrToUnix": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Trim": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.TsOrDsAdd": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 3}, "sqlglot.expressions.TsOrDsToDateStr": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.TsOrDsToDate": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.TsOrDiToDi": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Unhex": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.UnixToStr": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.UnixToTime": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.UnixToTimeStr": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Upper": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Variance": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.VariancePop": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Week": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Year": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Use": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.Merge": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.When": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.expressions.maybe_parse": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 246, "bases": 0, "doc": 218}, "sqlglot.expressions.union": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 43, "bases": 0, "doc": 234}, "sqlglot.expressions.intersect": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 43, "bases": 0, "doc": 234}, "sqlglot.expressions.except_": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 43, "bases": 0, "doc": 235}, "sqlglot.expressions.select": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 43, "bases": 0, "doc": 212}, "sqlglot.expressions.from_": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 43, "bases": 0, "doc": 211}, "sqlglot.expressions.update": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 73, "bases": 0, "doc": 271}, "sqlglot.expressions.delete": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 51, "bases": 0, "doc": 156}, "sqlglot.expressions.condition": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 41, "bases": 0, "doc": 334}, "sqlglot.expressions.and_": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 43, "bases": 0, "doc": 180}, "sqlglot.expressions.or_": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 43, "bases": 0, "doc": 180}, "sqlglot.expressions.not_": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 41, "bases": 0, "doc": 152}, "sqlglot.expressions.paren": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 24, "bases": 0, "doc": 3}, "sqlglot.expressions.to_identifier": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 21, "bases": 0, "doc": 53}, "sqlglot.expressions.to_interval": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 46, "bases": 0, "doc": 16}, "sqlglot.expressions.to_table": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 73, "bases": 0, "doc": 66}, "sqlglot.expressions.to_column": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 55, "bases": 0, "doc": 62}, "sqlglot.expressions.alias_": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 218, "bases": 0, "doc": 292}, "sqlglot.expressions.subquery": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 38, "bases": 0, "doc": 197}, "sqlglot.expressions.column": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 44, "bases": 0, "doc": 47}, "sqlglot.expressions.cast": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 102, "bases": 0, "doc": 122}, "sqlglot.expressions.table_": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 69, "bases": 0, "doc": 58}, "sqlglot.expressions.values": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 131, "bases": 0, "doc": 169}, "sqlglot.expressions.rename_table": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 75, "bases": 0, "doc": 50}, "sqlglot.expressions.convert": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 24, "bases": 0, "doc": 54}, "sqlglot.expressions.replace_children": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 16, "bases": 0, "doc": 18}, "sqlglot.expressions.column_table_names": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 129}, "sqlglot.expressions.table_name": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 14, "bases": 0, "doc": 152}, "sqlglot.expressions.replace_tables": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 16, "bases": 0, "doc": 176}, "sqlglot.expressions.replace_placeholders": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 25, "bases": 0, "doc": 216}, "sqlglot.expressions.expand": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 84, "bases": 0, "doc": 200}, "sqlglot.expressions.func": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 122, "bases": 0, "doc": 272}, "sqlglot.expressions.true": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 7, "bases": 0, "doc": 8}, "sqlglot.expressions.false": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 7, "bases": 0, "doc": 8}, "sqlglot.expressions.null": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 7, "bases": 0, "doc": 7}, "sqlglot.generator": {"qualname": 0, "fullname": 2, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.generator.Generator": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 495}, "sqlglot.generator.Generator.__init__": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 300, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.generate": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 40, "bases": 0, "doc": 28}, "sqlglot.generator.Generator.unsupported": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 24, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.sep": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.seg": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 44, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.pad_comment": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 24, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.maybe_comment": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 44, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.wrap": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 40, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.no_identify": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 51, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.normalize_func": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 24, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.indent": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 106, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.sql": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 96, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.uncache_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.cache_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.characterset_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.column_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.columndef_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.columnconstraint_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.autoincrementcolumnconstraint_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 20, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.checkcolumnconstraint_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.commentcolumnconstraint_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.collatecolumnconstraint_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.encodecolumnconstraint_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.defaultcolumnconstraint_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.generatedasidentitycolumnconstraint_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 36, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.notnullcolumnconstraint_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.primarykeycolumnconstraint_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.uniquecolumnconstraint_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 20, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.create_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.describe_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.prepend_ctes": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 44, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.with_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.cte_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.tablealias_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.bitstring_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.hexstring_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.datatype_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.directory_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.delete_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.drop_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.except_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.except_op": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.fetch_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.filter_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.hint_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.index_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.identifier_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.national_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.partition_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.properties_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.root_properties": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.properties": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 117, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.with_properties": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.locate_properties": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 77, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.property_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.likeproperty_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.fallbackproperty_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.journalproperty_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.freespaceproperty_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.afterjournalproperty_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.checksumproperty_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.mergeblockratioproperty_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.datablocksizeproperty_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.blockcompressionproperty_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.isolatedloadingproperty_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.insert_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.intersect_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.intersect_op": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.introducer_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.pseudotype_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.rowformatdelimitedproperty_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.table_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 55, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.tablesample_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.pivot_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.tuple_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.update_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.values_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.var_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.into_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.from_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.group_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.having_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.join_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.lambda_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 59, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.lateral_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.limit_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.offset_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.lock_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.literal_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.loaddata_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.null_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 22, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.boolean_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.order_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 51, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.cluster_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.distribute_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.sort_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.ordered_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.matchrecognize_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.query_modifiers": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 46, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.select_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.schema_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.star_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.structkwarg_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.parameter_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.sessionparameter_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.placeholder_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.subquery_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.qualify_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.union_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.union_op": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.unnest_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.where_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.window_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.partition_by_sql": {"qualname": 4, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 52, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.window_spec_sql": {"qualname": 4, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.withingroup_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.between_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.bracket_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.all_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.any_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.exists_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.case_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.constraint_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.extract_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.trim_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.concat_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.check_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.foreignkey_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.primarykey_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.unique_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.if_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.in_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.in_unnest_op": {"qualname": 4, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.interval_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.return_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.reference_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.anonymous_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.paren_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.neg_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.not_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.alias_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.aliases_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.attimezone_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.add_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.and_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.connector_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 44, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.bitwiseand_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.bitwiseleftshift_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.bitwisenot_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.bitwiseor_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.bitwiserightshift_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.bitwisexor_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.cast_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.currentdate_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.collate_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.command_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.transaction_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 22, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.commit_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.rollback_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.altercolumn_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.renametable_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.altertable_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.droppartition_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.addconstraint_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.distinct_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.ignorenulls_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.respectnulls_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.intdiv_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.dpipe_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.div_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.distance_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.dot_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.eq_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.escape_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.glob_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.gt_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.gte_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.ilike_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.is_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.like_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.similarto_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.lt_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.lte_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.mod_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.mul_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.neq_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.nullsafeeq_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.nullsafeneq_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.or_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.slice_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.sub_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.trycast_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.use_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.binary": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 44, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.function_fallback_sql": {"qualname": 4, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.format_args": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 54, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.text_width": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 24, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.format_time": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 40, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.expressions": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 138, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.op_expressions": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 65, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.naked_property": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.set_operation": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 44, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.tag_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.token_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 35, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.userdefinedfunction_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.userdefinedfunctionkwarg_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.joinhint_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.kwarg_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.when_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.generator.Generator.merge_sql": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.helper": {"qualname": 0, "fullname": 2, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.helper.AutoName": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 25}, "sqlglot.helper.seq_get": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 45, "bases": 0, "doc": 27}, "sqlglot.helper.ensure_list": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 66}, "sqlglot.helper.ensure_collection": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 66}, "sqlglot.helper.csv": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 36, "bases": 0, "doc": 55}, "sqlglot.helper.subclasses": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 103, "bases": 0, "doc": 84}, "sqlglot.helper.apply_index_offset": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 53, "bases": 0, "doc": 88}, "sqlglot.helper.camel_to_snake_case": {"qualname": 4, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 19, "bases": 0, "doc": 16}, "sqlglot.helper.while_changing": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 77, "bases": 0, "doc": 58}, "sqlglot.helper.tsort": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 47, "bases": 0, "doc": 53}, "sqlglot.helper.open_file": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 24, "bases": 0, "doc": 19}, "sqlglot.helper.csv_reader": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 30, "bases": 0, "doc": 53}, "sqlglot.helper.find_new_name": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 35, "bases": 0, "doc": 50}, "sqlglot.helper.object_to_dict": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 26, "bases": 0, "doc": 12}, "sqlglot.helper.split_num_words": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 74, "bases": 0, "doc": 312}, "sqlglot.helper.is_iterable": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 19, "bases": 0, "doc": 132}, "sqlglot.helper.flatten": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 45, "bases": 0, "doc": 195}, "sqlglot.helper.count_params": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 19, "bases": 0, "doc": 26}, "sqlglot.helper.dict_depth": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 19, "bases": 0, "doc": 194}, "sqlglot.helper.first": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 29, "bases": 0, "doc": 16}, "sqlglot.lineage": {"qualname": 0, "fullname": 2, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.lineage.Node": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.lineage.Node.__init__": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 99, "bases": 0, "doc": 3}, "sqlglot.lineage.Node.walk": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 30, "bases": 0, "doc": 3}, "sqlglot.lineage.Node.to_html": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 31, "bases": 0, "doc": 3}, "sqlglot.lineage.lineage": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 341, "bases": 0, "doc": 114}, "sqlglot.lineage.LineageHTML": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 18}, "sqlglot.lineage.LineageHTML.__init__": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 132, "bases": 0, "doc": 3}, "sqlglot.optimizer": {"qualname": 0, "fullname": 2, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.optimizer.annotate_types": {"qualname": 0, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.optimizer.annotate_types.annotate_types": {"qualname": 2, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 42, "bases": 0, "doc": 331}, "sqlglot.optimizer.annotate_types.TypeAnnotator": {"qualname": 1, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.optimizer.annotate_types.TypeAnnotator.__init__": {"qualname": 3, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 35, "bases": 0, "doc": 3}, "sqlglot.optimizer.annotate_types.TypeAnnotator.annotate": {"qualname": 2, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 16, "bases": 0, "doc": 3}, "sqlglot.optimizer.canonicalize": {"qualname": 0, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.optimizer.canonicalize.canonicalize": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 40, "bases": 0, "doc": 46}, "sqlglot.optimizer.canonicalize.add_text_to_concat": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 39, "bases": 0, "doc": 3}, "sqlglot.optimizer.canonicalize.coerce_type": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 39, "bases": 0, "doc": 3}, "sqlglot.optimizer.canonicalize.remove_redundant_casts": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 40, "bases": 0, "doc": 3}, "sqlglot.optimizer.eliminate_ctes": {"qualname": 0, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"qualname": 2, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 166}, "sqlglot.optimizer.eliminate_joins": {"qualname": 0, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"qualname": 2, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 195}, "sqlglot.optimizer.eliminate_joins.join_condition": {"qualname": 2, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 46}, "sqlglot.optimizer.eliminate_subqueries": {"qualname": 0, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"qualname": 2, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 276}, "sqlglot.optimizer.expand_laterals": {"qualname": 0, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.optimizer.expand_laterals.expand_laterals": {"qualname": 2, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 40, "bases": 0, "doc": 188}, "sqlglot.optimizer.expand_multi_table_selects": {"qualname": 0, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.optimizer.expand_multi_table_selects.expand_multi_table_selects": {"qualname": 4, "fullname": 10, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 101}, "sqlglot.optimizer.isolate_table_selects": {"qualname": 0, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.optimizer.isolate_table_selects.isolate_table_selects": {"qualname": 3, "fullname": 8, "annotation": 0, "default_value": 0, "signature": 21, "bases": 0, "doc": 3}, "sqlglot.optimizer.lower_identities": {"qualname": 0, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.optimizer.lower_identities.lower_identities": {"qualname": 2, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 172}, "sqlglot.optimizer.merge_subqueries": {"qualname": 0, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"qualname": 2, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 23, "bases": 0, "doc": 272}, "sqlglot.optimizer.merge_subqueries.merge_ctes": {"qualname": 2, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 23, "bases": 0, "doc": 3}, "sqlglot.optimizer.merge_subqueries.merge_derived_tables": {"qualname": 3, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 23, "bases": 0, "doc": 3}, "sqlglot.optimizer.normalize": {"qualname": 0, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.optimizer.normalize.normalize": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 32, "bases": 0, "doc": 177}, "sqlglot.optimizer.normalize.normalized": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 21, "bases": 0, "doc": 3}, "sqlglot.optimizer.normalize.normalization_distance": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 21, "bases": 0, "doc": 171}, "sqlglot.optimizer.normalize.distributive_law": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 22, "bases": 0, "doc": 39}, "sqlglot.optimizer.optimize_joins": {"qualname": 0, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.optimizer.optimize_joins.optimize_joins": {"qualname": 2, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 132}, "sqlglot.optimizer.optimize_joins.reorder_joins": {"qualname": 2, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 13}, "sqlglot.optimizer.optimize_joins.normalize": {"qualname": 1, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 13}, "sqlglot.optimizer.optimize_joins.other_table_names": {"qualname": 3, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 16, "bases": 0, "doc": 3}, "sqlglot.optimizer.optimizer": {"qualname": 0, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.optimizer.optimizer.optimize": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 498, "bases": 0, "doc": 223}, "sqlglot.optimizer.pushdown_predicates": {"qualname": 0, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"qualname": 2, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 186}, "sqlglot.optimizer.pushdown_predicates.pushdown": {"qualname": 1, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 23, "bases": 0, "doc": 3}, "sqlglot.optimizer.pushdown_predicates.pushdown_cnf": {"qualname": 2, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 23, "bases": 0, "doc": 20}, "sqlglot.optimizer.pushdown_predicates.pushdown_dnf": {"qualname": 2, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 23, "bases": 0, "doc": 31}, "sqlglot.optimizer.pushdown_predicates.nodes_for_predicate": {"qualname": 3, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 23, "bases": 0, "doc": 3}, "sqlglot.optimizer.pushdown_predicates.replace_aliases": {"qualname": 2, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 16, "bases": 0, "doc": 3}, "sqlglot.optimizer.pushdown_projections": {"qualname": 0, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.optimizer.pushdown_projections.DEFAULT_SELECTION": {"qualname": 2, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 7, "bases": 0, "doc": 3}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"qualname": 2, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 187}, "sqlglot.optimizer.qualify_columns": {"qualname": 0, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"qualname": 2, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 16, "bases": 0, "doc": 209}, "sqlglot.optimizer.qualify_columns.validate_qualify_columns": {"qualname": 3, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 13}, "sqlglot.optimizer.qualify_tables": {"qualname": 0, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"qualname": 2, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 41, "bases": 0, "doc": 190}, "sqlglot.optimizer.scope": {"qualname": 0, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.optimizer.scope.ScopeType": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 5}, "sqlglot.optimizer.scope.ScopeType.ROOT": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 7, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.optimizer.scope.ScopeType.SUBQUERY": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 7, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.optimizer.scope.ScopeType.DERIVED_TABLE": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 8, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.optimizer.scope.ScopeType.CTE": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 7, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.optimizer.scope.ScopeType.UNION": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 7, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.optimizer.scope.ScopeType.UDTF": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 7, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.optimizer.scope.Scope": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 248}, "sqlglot.optimizer.scope.Scope.__init__": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 72, "bases": 0, "doc": 3}, "sqlglot.optimizer.scope.Scope.clear_cache": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 3}, "sqlglot.optimizer.scope.Scope.branch": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 40, "bases": 0, "doc": 12}, "sqlglot.optimizer.scope.Scope.walk": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 21, "bases": 0, "doc": 3}, "sqlglot.optimizer.scope.Scope.find": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 29, "bases": 0, "doc": 97}, "sqlglot.optimizer.scope.Scope.find_all": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 29, "bases": 0, "doc": 90}, "sqlglot.optimizer.scope.Scope.replace": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 21, "bases": 0, "doc": 63}, "sqlglot.optimizer.scope.Scope.tables": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 21}, "sqlglot.optimizer.scope.Scope.ctes": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 21}, "sqlglot.optimizer.scope.Scope.derived_tables": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 43}, "sqlglot.optimizer.scope.Scope.subqueries": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 44}, "sqlglot.optimizer.scope.Scope.columns": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 36}, "sqlglot.optimizer.scope.Scope.selected_sources": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 69}, "sqlglot.optimizer.scope.Scope.cte_sources": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 24}, "sqlglot.optimizer.scope.Scope.selects": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 52}, "sqlglot.optimizer.scope.Scope.external_columns": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 35}, "sqlglot.optimizer.scope.Scope.unqualified_columns": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 22}, "sqlglot.optimizer.scope.Scope.join_hints": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 30}, "sqlglot.optimizer.scope.Scope.source_columns": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 17, "bases": 0, "doc": 52}, "sqlglot.optimizer.scope.Scope.is_subquery": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 9}, "sqlglot.optimizer.scope.Scope.is_derived_table": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 10}, "sqlglot.optimizer.scope.Scope.is_union": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 9}, "sqlglot.optimizer.scope.Scope.is_cte": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 11}, "sqlglot.optimizer.scope.Scope.is_root": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 9}, "sqlglot.optimizer.scope.Scope.is_udtf": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 14}, "sqlglot.optimizer.scope.Scope.is_correlated_subquery": {"qualname": 4, "fullname": 7, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 10}, "sqlglot.optimizer.scope.Scope.rename_source": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 23, "bases": 0, "doc": 8}, "sqlglot.optimizer.scope.Scope.add_source": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 21, "bases": 0, "doc": 8}, "sqlglot.optimizer.scope.Scope.remove_source": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 16, "bases": 0, "doc": 8}, "sqlglot.optimizer.scope.Scope.traverse": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 28}, "sqlglot.optimizer.scope.Scope.ref_count": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 34}, "sqlglot.optimizer.scope.traverse_scope": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 330}, "sqlglot.optimizer.scope.build_scope": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 41}, "sqlglot.optimizer.scope.walk_in_scope": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 21, "bases": 0, "doc": 83}, "sqlglot.optimizer.simplify": {"qualname": 0, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.optimizer.simplify.simplify": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 138}, "sqlglot.optimizer.simplify.rewrite_between": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 40, "bases": 0, "doc": 31}, "sqlglot.optimizer.simplify.simplify_not": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 25}, "sqlglot.optimizer.simplify.flatten": {"qualname": 1, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 24}, "sqlglot.optimizer.simplify.simplify_connectors": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 3}, "sqlglot.optimizer.simplify.remove_compliments": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 19}, "sqlglot.optimizer.simplify.uniq_sort": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 23}, "sqlglot.optimizer.simplify.absorb_and_eliminate": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 58}, "sqlglot.optimizer.simplify.simplify_literals": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 3}, "sqlglot.optimizer.simplify.simplify_parens": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 3}, "sqlglot.optimizer.simplify.remove_where_true": {"qualname": 3, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 3}, "sqlglot.optimizer.simplify.always_true": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 3}, "sqlglot.optimizer.simplify.is_complement": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 16, "bases": 0, "doc": 3}, "sqlglot.optimizer.simplify.eval_boolean": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 21, "bases": 0, "doc": 3}, "sqlglot.optimizer.simplify.extract_date": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 3}, "sqlglot.optimizer.simplify.extract_interval": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 3}, "sqlglot.optimizer.simplify.date_literal": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 3}, "sqlglot.optimizer.simplify.boolean_literal": {"qualname": 2, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 3}, "sqlglot.optimizer.unnest_subqueries": {"qualname": 0, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"qualname": 2, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 227}, "sqlglot.optimizer.unnest_subqueries.unnest": {"qualname": 1, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 22, "bases": 0, "doc": 3}, "sqlglot.optimizer.unnest_subqueries.decorrelate": {"qualname": 1, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 28, "bases": 0, "doc": 3}, "sqlglot.parser": {"qualname": 0, "fullname": 2, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.parser.parse_var_map": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 3}, "sqlglot.parser.Parser": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 174}, "sqlglot.parser.Parser.__init__": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 162, "bases": 0, "doc": 3}, "sqlglot.parser.Parser.reset": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 11, "bases": 0, "doc": 3}, "sqlglot.parser.Parser.parse": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 87, "bases": 0, "doc": 70}, "sqlglot.parser.Parser.parse_into": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 159, "bases": 0, "doc": 111}, "sqlglot.parser.Parser.check_errors": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 14, "bases": 0, "doc": 16}, "sqlglot.parser.Parser.raise_error": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 57, "bases": 0, "doc": 22}, "sqlglot.parser.Parser.expression": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 89, "bases": 0, "doc": 74}, "sqlglot.parser.Parser.validate_expression": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 60, "bases": 0, "doc": 57}, "sqlglot.planner": {"qualname": 0, "fullname": 2, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.planner.Plan": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.planner.Plan.__init__": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 24, "bases": 0, "doc": 3}, "sqlglot.planner.Step": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.planner.Step.__init__": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 4, "bases": 0, "doc": 3}, "sqlglot.planner.Step.from_expression": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 90, "bases": 0, "doc": 209}, "sqlglot.planner.Step.add_dependency": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 3}, "sqlglot.planner.Step.to_s": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 31, "bases": 0, "doc": 3}, "sqlglot.planner.Scan": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.planner.Scan.__init__": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 4, "bases": 0, "doc": 3}, "sqlglot.planner.Scan.from_expression": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 90, "bases": 0, "doc": 209}, "sqlglot.planner.Join": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.planner.Join.__init__": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 4, "bases": 0, "doc": 3}, "sqlglot.planner.Join.from_joins": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 96, "bases": 0, "doc": 3}, "sqlglot.planner.Aggregate": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.planner.Aggregate.__init__": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 4, "bases": 0, "doc": 3}, "sqlglot.planner.Sort": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.planner.Sort.__init__": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 4, "bases": 0, "doc": 3}, "sqlglot.planner.SetOperation": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 1, "doc": 3}, "sqlglot.planner.SetOperation.__init__": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 83, "bases": 0, "doc": 3}, "sqlglot.planner.SetOperation.from_expression": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 90, "bases": 0, "doc": 209}, "sqlglot.schema.Schema": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 8}, "sqlglot.schema.Schema.add_table": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 111, "bases": 0, "doc": 57}, "sqlglot.schema.Schema.column_names": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 67, "bases": 0, "doc": 56}, "sqlglot.schema.Schema.get_column_type": {"qualname": 4, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 73, "bases": 0, "doc": 56}, "sqlglot.schema.Schema.supported_table_args": {"qualname": 4, "fullname": 6, "annotation": 3, "default_value": 0, "signature": 0, "bases": 0, "doc": 16}, "sqlglot.schema.AbstractMappingSchema": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 2, "doc": 87}, "sqlglot.schema.AbstractMappingSchema.__init__": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 27, "bases": 0, "doc": 3}, "sqlglot.schema.AbstractMappingSchema.table_parts": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 40, "bases": 0, "doc": 3}, "sqlglot.schema.AbstractMappingSchema.find": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 88, "bases": 0, "doc": 3}, "sqlglot.schema.MappingSchema": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 6, "doc": 139}, "sqlglot.schema.MappingSchema.__init__": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 128, "bases": 0, "doc": 3}, "sqlglot.schema.MappingSchema.from_mapping_schema": {"qualname": 4, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 47, "bases": 0, "doc": 3}, "sqlglot.schema.MappingSchema.copy": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 31, "bases": 0, "doc": 3}, "sqlglot.schema.MappingSchema.add_table": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 111, "bases": 0, "doc": 60}, "sqlglot.schema.MappingSchema.column_names": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 67, "bases": 0, "doc": 56}, "sqlglot.schema.MappingSchema.get_column_type": {"qualname": 4, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 79, "bases": 0, "doc": 56}, "sqlglot.schema.ensure_schema": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 29, "bases": 0, "doc": 3}, "sqlglot.schema.ensure_column_mapping": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 67, "bases": 0, "doc": 3}, "sqlglot.schema.flatten_schema": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 69, "bases": 0, "doc": 3}, "sqlglot.serde": {"qualname": 0, "fullname": 2, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.serde.dump": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 149, "bases": 0, "doc": 12}, "sqlglot.serde.load": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 149, "bases": 0, "doc": 16}, "sqlglot.time": {"qualname": 0, "fullname": 2, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.time.format_time": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 73, "bases": 0, "doc": 108}, "sqlglot.tokens": {"qualname": 0, "fullname": 2, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 3, "doc": 5}, "sqlglot.tokens.TokenType.L_PAREN": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.R_PAREN": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.L_BRACKET": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.R_BRACKET": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.L_BRACE": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.R_BRACE": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.COMMA": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.DOT": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.DASH": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.PLUS": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.COLON": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.DCOLON": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.SEMICOLON": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.STAR": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.BACKSLASH": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.SLASH": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.LT": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.LTE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.GT": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.GTE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.NOT": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.EQ": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.NEQ": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.NULLSAFE_EQ": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.AND": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.OR": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.AMP": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.DPIPE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.PIPE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.CARET": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.TILDA": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.ARROW": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.DARROW": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.FARROW": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.HASH": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.HASH_ARROW": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.DHASH_ARROW": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.LR_ARROW": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.DOLLAR": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.PARAMETER": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.SESSION_PARAMETER": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.NATIONAL": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.BLOCK_START": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.BLOCK_END": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.SPACE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.BREAK": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.STRING": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.NUMBER": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.IDENTIFIER": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.COLUMN": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.COLUMN_DEF": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.SCHEMA": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.TABLE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.VAR": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.BIT_STRING": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.HEX_STRING": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.BYTE_STRING": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.BOOLEAN": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.TINYINT": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.SMALLINT": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.INT": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.BIGINT": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.FLOAT": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.DOUBLE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.DECIMAL": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.CHAR": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.NCHAR": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.VARCHAR": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.NVARCHAR": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.TEXT": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.MEDIUMTEXT": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.LONGTEXT": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.MEDIUMBLOB": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.LONGBLOB": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.BINARY": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.VARBINARY": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.JSON": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.JSONB": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.TIME": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.TIMESTAMP": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.TIMESTAMPTZ": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.TIMESTAMPLTZ": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.DATETIME": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.DATE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.UUID": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.GEOGRAPHY": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.NULLABLE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.GEOMETRY": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.HLLSKETCH": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.HSTORE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.SUPER": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.SERIAL": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.SMALLSERIAL": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.BIGSERIAL": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.XML": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.UNIQUEIDENTIFIER": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.MONEY": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.SMALLMONEY": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.ROWVERSION": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.IMAGE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.VARIANT": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.OBJECT": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.ALIAS": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.ALTER": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.ALWAYS": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.ALL": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.ANTI": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.ANY": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.APPLY": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.ARRAY": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.ASC": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.ASOF": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.AT_TIME_ZONE": {"qualname": 4, "fullname": 6, "annotation": 0, "default_value": 13, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.AUTO_INCREMENT": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.BEGIN": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.BETWEEN": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.BOTH": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.BUCKET": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.BY_DEFAULT": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.CACHE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.CASCADE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.CASE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.CHARACTER_SET": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.CHECK": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.CLUSTER_BY": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.COLLATE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.COMMAND": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.COMMENT": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.COMMIT": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.COMPOUND": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.CONSTRAINT": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.CREATE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.CROSS": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.CUBE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.CURRENT_DATE": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.CURRENT_DATETIME": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.CURRENT_ROW": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.CURRENT_TIME": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.CURRENT_TIMESTAMP": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.DEFAULT": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.DELETE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.DESC": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.DESCRIBE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.DISTINCT": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.DISTINCT_FROM": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.DISTRIBUTE_BY": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.DIV": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.DROP": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.ELSE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.ENCODE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.END": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.ESCAPE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.EXCEPT": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.EXECUTE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.EXISTS": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.FALSE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.FETCH": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.FILTER": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.FINAL": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.FIRST": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.FOLLOWING": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.FOR": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.FOREIGN_KEY": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.FORMAT": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.FROM": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.FULL": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.FUNCTION": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.GENERATED": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.GLOB": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.GLOBAL": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.GROUP_BY": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.GROUPING_SETS": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.HAVING": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.HINT": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.IDENTITY": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.IF": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.IGNORE_NULLS": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.ILIKE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.IN": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.INDEX": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.INNER": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.INSERT": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.INTERSECT": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.INTERVAL": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.INTO": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.INTRODUCER": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.IRLIKE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.IS": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.ISNULL": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.JOIN": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.LANGUAGE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.LATERAL": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.LAZY": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.LEADING": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.LEFT": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.LIKE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.LIMIT": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.LOAD_DATA": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.LOCAL": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.MAP": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.MATCH_RECOGNIZE": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.MATERIALIZED": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.MERGE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.MOD": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.NATURAL": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.NEXT": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.NO_ACTION": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.NOTNULL": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.NULL": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.NULLS_FIRST": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.NULLS_LAST": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.OFFSET": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.ON": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.ONLY": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.OPTIONS": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.ORDER_BY": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.ORDERED": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.ORDINALITY": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.OUTER": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.OUT_OF": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.OVER": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.OVERWRITE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.PARTITION": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.PARTITION_BY": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.PERCENT": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.PIVOT": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.PLACEHOLDER": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.PRECEDING": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.PRIMARY_KEY": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.PROCEDURE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.PROPERTIES": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.PSEUDO_TYPE": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.QUALIFY": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.QUOTE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.RANGE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.RECURSIVE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.REPLACE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.RESPECT_NULLS": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.REFERENCES": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.RIGHT": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.RLIKE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.ROLLBACK": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.ROLLUP": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.ROW": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.ROWS": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.SCHEMA_COMMENT": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.SEED": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.SELECT": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.SEMI": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.SEPARATOR": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.SERDE_PROPERTIES": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.SET": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.SHOW": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.SIMILAR_TO": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.SOME": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.SORTKEY": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.SORT_BY": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.STRUCT": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.TABLE_SAMPLE": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.TEMPORARY": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.TOP": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.THEN": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.TRAILING": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.TRUE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.UNBOUNDED": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.UNCACHE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.UNION": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.UNLOGGED": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.UNNEST": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.UNPIVOT": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.UPDATE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.USE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.USING": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.VALUES": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.VIEW": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.VOLATILE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.WHEN": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.WHERE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.WINDOW": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.WITH": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.WITH_TIME_ZONE": {"qualname": 4, "fullname": 6, "annotation": 0, "default_value": 13, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.WITH_LOCAL_TIME_ZONE": {"qualname": 5, "fullname": 7, "annotation": 0, "default_value": 15, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.WITHIN_GROUP": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 11, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.WITHOUT_TIME_ZONE": {"qualname": 4, "fullname": 6, "annotation": 0, "default_value": 13, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.TokenType.UNIQUE": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 9, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.Token": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.Token.__init__": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 97, "bases": 0, "doc": 3}, "sqlglot.tokens.Token.number": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 14}, "sqlglot.tokens.Token.string": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 14}, "sqlglot.tokens.Token.identifier": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 14}, "sqlglot.tokens.Token.var": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 34, "bases": 0, "doc": 14}, "sqlglot.tokens.Tokenizer": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.tokens.Tokenizer.__init__": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 4, "bases": 0, "doc": 3}, "sqlglot.tokens.Tokenizer.reset": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 14, "bases": 0, "doc": 3}, "sqlglot.tokens.Tokenizer.tokenize": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 40, "bases": 0, "doc": 16}, "sqlglot.transforms": {"qualname": 0, "fullname": 2, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.transforms.unalias_group": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 40, "bases": 0, "doc": 146}, "sqlglot.transforms.eliminate_distinct_on": {"qualname": 3, "fullname": 5, "annotation": 0, "default_value": 0, "signature": 40, "bases": 0, "doc": 66}, "sqlglot.transforms.remove_precision_parameterized_types": {"qualname": 4, "fullname": 6, "annotation": 0, "default_value": 0, "signature": 40, "bases": 0, "doc": 33}, "sqlglot.transforms.preprocess": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 144, "bases": 0, "doc": 88}, "sqlglot.transforms.delegate": {"qualname": 1, "fullname": 3, "annotation": 0, "default_value": 0, "signature": 19, "bases": 0, "doc": 29}, "sqlglot.trie": {"qualname": 0, "fullname": 2, "annotation": 0, "default_value": 0, "signature": 0, "bases": 0, "doc": 3}, "sqlglot.trie.new_trie": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 29, "bases": 0, "doc": 185}, "sqlglot.trie.in_trie": {"qualname": 2, "fullname": 4, "annotation": 0, "default_value": 0, "signature": 47, "bases": 0, "doc": 299}}, "length": 1667, "save": true}, "index": {"qualname": {"root": {"docs": {"sqlglot.dataframe.sql.SparkSession.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.__init__": {"tf": 1}, "sqlglot.dataframe.sql.Column.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.__init__": {"tf": 1}, "sqlglot.dataframe.sql.Window.__init__": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameReader.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.__init__": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.__init__": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.__init__": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.__init__": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.__init__": {"tf": 1}, "sqlglot.dialects.drill.Drill.__init__": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.__init__": {"tf": 1}, "sqlglot.dialects.hive.Hive.__init__": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.__init__": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.__init__": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.__init__": {"tf": 1}, "sqlglot.dialects.presto.Presto.__init__": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.__init__": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.__init__": {"tf": 1}, "sqlglot.dialects.spark.Spark.__init__": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.__init__": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.__init__": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.__init__": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.__init__": {"tf": 1}, "sqlglot.dialects.trino.Trino.__init__": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.__init__": {"tf": 1}, "sqlglot.diff.Insert.__init__": {"tf": 1}, "sqlglot.diff.Remove.__init__": {"tf": 1}, "sqlglot.diff.Move.__init__": {"tf": 1}, "sqlglot.diff.Update.__init__": {"tf": 1}, "sqlglot.diff.Keep.__init__": {"tf": 1}, "sqlglot.diff.ChangeDistiller.__init__": {"tf": 1}, "sqlglot.errors.ParseError.__init__": {"tf": 1}, "sqlglot.executor.context.Context.__init__": {"tf": 1}, "sqlglot.executor.env.reverse_key.__init__": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.__init__": {"tf": 1}, "sqlglot.executor.python.Python.__init__": {"tf": 1}, "sqlglot.executor.table.Table.__init__": {"tf": 1}, "sqlglot.executor.table.TableIter.__init__": {"tf": 1}, "sqlglot.executor.table.RangeReader.__init__": {"tf": 1}, "sqlglot.executor.table.RowReader.__init__": {"tf": 1}, "sqlglot.expressions.Expression.__init__": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Condition.not_": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.TimeUnit.__init__": {"tf": 1}, "sqlglot.expressions.except_": {"tf": 1}, "sqlglot.expressions.from_": {"tf": 1}, "sqlglot.expressions.and_": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 1}, "sqlglot.expressions.not_": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.expressions.table_": {"tf": 1}, "sqlglot.generator.Generator.__init__": {"tf": 1}, "sqlglot.lineage.Node.__init__": {"tf": 1}, "sqlglot.lineage.LineageHTML.__init__": {"tf": 1}, "sqlglot.optimizer.annotate_types.TypeAnnotator.__init__": {"tf": 1}, "sqlglot.optimizer.scope.Scope.__init__": {"tf": 1}, "sqlglot.parser.Parser.__init__": {"tf": 1}, "sqlglot.planner.Plan.__init__": {"tf": 1}, "sqlglot.planner.Step.__init__": {"tf": 1}, "sqlglot.planner.Scan.__init__": {"tf": 1}, "sqlglot.planner.Join.__init__": {"tf": 1}, "sqlglot.planner.Aggregate.__init__": {"tf": 1}, "sqlglot.planner.Sort.__init__": {"tf": 1}, "sqlglot.planner.SetOperation.__init__": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.__init__": {"tf": 1}, "sqlglot.schema.MappingSchema.__init__": {"tf": 1}, "sqlglot.tokens.Token.__init__": {"tf": 1}, "sqlglot.tokens.Tokenizer.__init__": {"tf": 1}}, "df": 76, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Properties.Location.PRE_SCHEMA": {"tf": 1}}, "df": 1, "t": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.pretty": {"tf": 1}}, "df": 1}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {"sqlglot.dialects.dialect.Dialects.PRESTO": {"tf": 1}, "sqlglot.dialects.presto.Presto": {"tf": 1}, "sqlglot.dialects.presto.Presto.__init__": {"tf": 1}, "sqlglot.dialects.presto.Presto.Tokenizer": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator.transaction_sql": {"tf": 1}}, "df": 7}}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Predicate": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.nodes_for_predicate": {"tf": 1}}, "df": 2, "s": {"docs": {"sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 1}}, "df": 1}}}}}}}, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.generator.Generator.prepend_ctes": {"tf": 1}}, "df": 1}}}, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.transforms.preprocess": {"tf": 1}}, "df": 1}}}}}}}, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.tokens.TokenType.PRECEDING": {"tf": 1}}, "df": 1}}}}}, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.transforms.remove_precision_parameterized_types": {"tf": 1}}, "df": 1}}}}}}}, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dialects.dialect.no_properties_sql": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.with_properties": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.with_properties": {"tf": 1}, "sqlglot.expressions.Properties": {"tf": 1}, "sqlglot.expressions.Properties.Location": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_CREATE": {"tf": 1}, "sqlglot.expressions.Properties.Location.PRE_SCHEMA": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_INDEX": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_SCHEMA_ROOT": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_SCHEMA_WITH": {"tf": 1}, "sqlglot.expressions.Properties.Location.UNSUPPORTED": {"tf": 1}, "sqlglot.expressions.Properties.from_dict": {"tf": 1}, "sqlglot.generator.Generator.properties_sql": {"tf": 1}, "sqlglot.generator.Generator.root_properties": {"tf": 1}, "sqlglot.generator.Generator.properties": {"tf": 1}, "sqlglot.generator.Generator.with_properties": {"tf": 1}, "sqlglot.generator.Generator.locate_properties": {"tf": 1}, "sqlglot.tokens.TokenType.PROPERTIES": {"tf": 1}, "sqlglot.tokens.TokenType.SERDE_PROPERTIES": {"tf": 1}}, "df": 19}}}, "y": {"docs": {"sqlglot.expressions.Property": {"tf": 1}, "sqlglot.generator.Generator.property_sql": {"tf": 1}, "sqlglot.generator.Generator.naked_property": {"tf": 1}}, "df": 3}}}}}, "j": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 1}}, "df": 1}}}}}}}}, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.PROCEDURE": {"tf": 1}}, "df": 1}}}}}}}, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.tokens.TokenType.PRIMARY_KEY": {"tf": 1}}, "df": 1, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.PrimaryKey": {"tf": 1}, "sqlglot.generator.Generator.primarykey_sql": {"tf": 1}}, "df": 2, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.PrimaryKeyColumnConstraint": {"tf": 1}, "sqlglot.generator.Generator.primarykeycolumnconstraint_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}}}}}}}}}}}}, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.parse": {"tf": 1}, "sqlglot.parse_one": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.parse": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.parse_into": {"tf": 1}, "sqlglot.dialects.dialect.parse_date_delta": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1}, "sqlglot.parser.parse_var_map": {"tf": 1}, "sqlglot.parser.Parser.parse": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}}, "df": 9, "r": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.expressions.Func.default_parser_mappings": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}, "sqlglot.parser.Parser.__init__": {"tf": 1}, "sqlglot.parser.Parser.reset": {"tf": 1}, "sqlglot.parser.Parser.parse": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}, "sqlglot.parser.Parser.check_errors": {"tf": 1}, "sqlglot.parser.Parser.raise_error": {"tf": 1}, "sqlglot.parser.Parser.expression": {"tf": 1}, "sqlglot.parser.Parser.validate_expression": {"tf": 1}}, "df": 28}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.errors.ParseError": {"tf": 1}, "sqlglot.errors.ParseError.__init__": {"tf": 1}, "sqlglot.errors.ParseError.new": {"tf": 1}}, "df": 3}}}}}}}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.Partition": {"tf": 1}, "sqlglot.generator.Generator.partition_sql": {"tf": 1}, "sqlglot.generator.Generator.partition_by_sql": {"tf": 1}, "sqlglot.tokens.TokenType.PARTITION": {"tf": 1}, "sqlglot.tokens.TokenType.PARTITION_BY": {"tf": 1}}, "df": 5, "b": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dataframe.sql.Window.partitionBy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.partitionBy": {"tf": 1}}, "df": 2}}, "s": {"docs": {"sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}}, "df": 1}, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dialects.teradata.Teradata.Generator.partitionedbyproperty_sql": {"tf": 1}, "sqlglot.expressions.PartitionedByProperty": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}}}}, "s": {"docs": {"sqlglot.schema.AbstractMappingSchema.table_parts": {"tf": 1}}, "df": 1}}, "e": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dialects.dialect.no_paren_current_date_sql": {"tf": 1}, "sqlglot.expressions.Paren": {"tf": 1}, "sqlglot.expressions.paren": {"tf": 1}, "sqlglot.generator.Generator.paren_sql": {"tf": 1}, "sqlglot.tokens.TokenType.L_PAREN": {"tf": 1}, "sqlglot.tokens.TokenType.R_PAREN": {"tf": 1}}, "df": 6, "t": {"docs": {"sqlglot.expressions.Expression.parent_select": {"tf": 1}}, "df": 1}, "s": {"docs": {"sqlglot.optimizer.simplify.simplify_parens": {"tf": 1}}, "df": 1}}}, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Parameter": {"tf": 1}, "sqlglot.generator.Generator.parameter_sql": {"tf": 1}, "sqlglot.tokens.TokenType.PARAMETER": {"tf": 1}, "sqlglot.tokens.TokenType.SESSION_PARAMETER": {"tf": 1}}, "df": 4, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.transforms.remove_precision_parameterized_types": {"tf": 1}}, "df": 1}}}}}}}}, "s": {"docs": {"sqlglot.helper.count_params": {"tf": 1}}, "df": 1}}}}, "d": {"docs": {"sqlglot.generator.Generator.pad_comment": {"tf": 1}}, "df": 1}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe.sql.DataFrame.persist": {"tf": 1}}, "df": 1}}}}, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.PERCENT": {"tf": 1}}, "df": 1, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.PercentileCont": {"tf": 1}}, "df": 1}}}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.expressions.PercentileDisc": {"tf": 1}}, "df": 1}}}}}}}}}}}}}, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe.sql.GroupedData.pivot": {"tf": 1}, "sqlglot.dialects.dialect.no_pivot_sql": {"tf": 1}, "sqlglot.expressions.Pivot": {"tf": 1}, "sqlglot.generator.Generator.pivot_sql": {"tf": 1}, "sqlglot.tokens.TokenType.PIVOT": {"tf": 1}}, "df": 5}}}, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.PIPE": {"tf": 1}}, "df": 1}}}, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.Properties.Location.POST_CREATE": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_INDEX": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_SCHEMA_ROOT": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_SCHEMA_WITH": {"tf": 1}}, "df": 4, "g": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dialects.dialect.Dialects.POSTGRES": {"tf": 1}, "sqlglot.dialects.postgres.Postgres": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.__init__": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Tokenizer": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}}, "df": 6}}}}}, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dialects.dialect.str_position_sql": {"tf": 1}, "sqlglot.executor.env.str_position": {"tf": 1}}, "df": 2}}}}}, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Posexplode": {"tf": 1}}, "df": 1}}}}}}}}, "p": {"docs": {"sqlglot.executor.table.Table.pop": {"tf": 1}, "sqlglot.expressions.Expression.pop": {"tf": 1}}, "df": 2}, "w": {"docs": {"sqlglot.expressions.Pow": {"tf": 1}}, "df": 1}}, "y": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.executor.python.Python": {"tf": 1}, "sqlglot.executor.python.Python.__init__": {"tf": 1}, "sqlglot.executor.python.Python.Tokenizer": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}}, "df": 4, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.executor.python.PythonExecutor": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.__init__": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.execute": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.generate": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.generate_tuple": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.context": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.table": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.scan": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.static": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.scan_table": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.scan_csv": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.join": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.nested_loop_join": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.hash_join": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.aggregate": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.sort": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.set_operation": {"tf": 1}}, "df": 17}}}}}}}}}}}}}, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Placeholder": {"tf": 1}, "sqlglot.generator.Generator.placeholder_sql": {"tf": 1}, "sqlglot.tokens.TokenType.PLACEHOLDER": {"tf": 1}}, "df": 3, "s": {"docs": {"sqlglot.expressions.replace_placeholders": {"tf": 1}}, "df": 1}}}}}}}}}, "n": {"docs": {"sqlglot.planner.Plan": {"tf": 1}, "sqlglot.planner.Plan.__init__": {"tf": 1}}, "df": 2}}, "u": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.tokens.TokenType.PLUS": {"tf": 1}}, "df": 1}}}, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "o": {"docs": {"sqlglot.tokens.TokenType.PSEUDO_TYPE": {"tf": 1}}, "df": 1, "t": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.PseudoType": {"tf": 1}, "sqlglot.generator.Generator.pseudotype_sql": {"tf": 1}}, "df": 2}}}}}}}}}, "u": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_cnf": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_dnf": {"tf": 1}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 1}}, "df": 5}}}}}}}}, "s": {"docs": {"sqlglot.planner.Step.to_s": {"tf": 1}}, "df": 1, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.schema": {"tf": 1}, "sqlglot.expressions.Properties.Location.PRE_SCHEMA": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_SCHEMA_ROOT": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_SCHEMA_WITH": {"tf": 1}, "sqlglot.expressions.Schema": {"tf": 1}, "sqlglot.generator.Generator.schema_sql": {"tf": 1}, "sqlglot.schema.Schema": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1}, "sqlglot.schema.Schema.column_names": {"tf": 1}, "sqlglot.schema.Schema.get_column_type": {"tf": 1}, "sqlglot.schema.Schema.supported_table_args": {"tf": 1}, "sqlglot.schema.MappingSchema.from_mapping_schema": {"tf": 1}, "sqlglot.schema.ensure_schema": {"tf": 1}, "sqlglot.schema.flatten_schema": {"tf": 1}, "sqlglot.tokens.TokenType.SCHEMA": {"tf": 1}, "sqlglot.tokens.TokenType.SCHEMA_COMMENT": {"tf": 1}}, "df": 16, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.errors.SchemaError": {"tf": 1}}, "df": 1}}}}}, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.SchemaCommentProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}}, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dialects.dialect.arrow_json_extract_scalar_sql": {"tf": 1}}, "df": 1}}}, "n": {"docs": {"sqlglot.executor.python.PythonExecutor.scan": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.scan_table": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.scan_csv": {"tf": 1}, "sqlglot.planner.Scan": {"tf": 1}, "sqlglot.planner.Scan.__init__": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}}, "df": 6}}, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.scope.Scope": {"tf": 1}, "sqlglot.optimizer.scope.Scope.__init__": {"tf": 1}, "sqlglot.optimizer.scope.Scope.clear_cache": {"tf": 1}, "sqlglot.optimizer.scope.Scope.branch": {"tf": 1}, "sqlglot.optimizer.scope.Scope.walk": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.optimizer.scope.Scope.replace": {"tf": 1}, "sqlglot.optimizer.scope.Scope.tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.ctes": {"tf": 1}, "sqlglot.optimizer.scope.Scope.derived_tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.subqueries": {"tf": 1}, "sqlglot.optimizer.scope.Scope.columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1}, "sqlglot.optimizer.scope.Scope.cte_sources": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selects": {"tf": 1}, "sqlglot.optimizer.scope.Scope.external_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.unqualified_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.join_hints": {"tf": 1}, "sqlglot.optimizer.scope.Scope.source_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_subquery": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_derived_table": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_union": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_cte": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_root": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_udtf": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_correlated_subquery": {"tf": 1}, "sqlglot.optimizer.scope.Scope.rename_source": {"tf": 1}, "sqlglot.optimizer.scope.Scope.add_source": {"tf": 1}, "sqlglot.optimizer.scope.Scope.remove_source": {"tf": 1}, "sqlglot.optimizer.scope.Scope.traverse": {"tf": 1}, "sqlglot.optimizer.scope.Scope.ref_count": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}, "sqlglot.optimizer.scope.build_scope": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}}, "df": 35, "t": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.scope.ScopeType": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.ROOT": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.SUBQUERY": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.DERIVED_TABLE": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.CTE": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.UNION": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.UDTF": {"tf": 1}}, "df": 7}}}}}}}}, "p": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.dialects.dialect.Dialects.SPARK": {"tf": 1}, "sqlglot.dialects.spark.Spark": {"tf": 1}, "sqlglot.dialects.spark.Spark.__init__": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator.cast_sql": {"tf": 1}, "sqlglot.dialects.spark.Spark.Tokenizer": {"tf": 1}}, "df": 7, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe.sql.SparkSession": {"tf": 1}, "sqlglot.dataframe.sql.SparkSession.__init__": {"tf": 1}, "sqlglot.dataframe.sql.SparkSession.table": {"tf": 1}, "sqlglot.dataframe.sql.SparkSession.createDataFrame": {"tf": 1}, "sqlglot.dataframe.sql.SparkSession.sql": {"tf": 1}}, "df": 5}}}}}}}}}, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.SPACE": {"tf": 1}}, "df": 1}}}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.Split": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1}}, "df": 2}}}, "e": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.generator.Generator.window_spec_sql": {"tf": 1}}, "df": 1}}}, "q": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dataframe.sql.SparkSession.sql": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sql": {"tf": 1}, "sqlglot.dataframe.sql.Column.sql": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.sql": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.sql": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.array_sql": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.transaction_sql": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.commit_sql": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.rollback_sql": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.cte_sql": {"tf": 1}, "sqlglot.dialects.dialect.approx_count_distinct_sql": {"tf": 1}, "sqlglot.dialects.dialect.if_sql": {"tf": 1}, "sqlglot.dialects.dialect.arrow_json_extract_sql": {"tf": 1}, "sqlglot.dialects.dialect.arrow_json_extract_scalar_sql": {"tf": 1}, "sqlglot.dialects.dialect.inline_array_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_ilike_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_paren_current_date_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_recursive_cte_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_safe_divide_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_tablesample_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_pivot_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_trycast_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_properties_sql": {"tf": 1}, "sqlglot.dialects.dialect.str_position_sql": {"tf": 1}, "sqlglot.dialects.dialect.struct_extract_sql": {"tf": 1}, "sqlglot.dialects.dialect.var_map_sql": {"tf": 1}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}, "sqlglot.dialects.dialect.strposition_to_locate_sql": {"tf": 1}, "sqlglot.dialects.dialect.timestrtotime_sql": {"tf": 1}, "sqlglot.dialects.dialect.datestrtodate_sql": {"tf": 1}, "sqlglot.dialects.dialect.trim_sql": {"tf": 1}, "sqlglot.dialects.drill.if_sql": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator.show_sql": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator.setitem_sql": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator.set_sql": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator.offset_sql": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator.table_sql": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator.transaction_sql": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.renametable_sql": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.select_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.describe_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.generatedasidentitycolumnconstraint_sql": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator.cast_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.transaction_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.partitionedbyproperty_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.update_sql": {"tf": 1}, "sqlglot.dialects.tsql.generate_date_delta_with_unit_sql": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator.systemtime_sql": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator.returnsproperty_sql": {"tf": 1}, "sqlglot.expressions.Expression.sql": {"tf": 1}, "sqlglot.expressions.Func.sql_names": {"tf": 1}, "sqlglot.expressions.Func.sql_name": {"tf": 1}, "sqlglot.generator.Generator.sql": {"tf": 1}, "sqlglot.generator.Generator.uncache_sql": {"tf": 1}, "sqlglot.generator.Generator.cache_sql": {"tf": 1}, "sqlglot.generator.Generator.characterset_sql": {"tf": 1}, "sqlglot.generator.Generator.column_sql": {"tf": 1}, "sqlglot.generator.Generator.columndef_sql": {"tf": 1}, "sqlglot.generator.Generator.columnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.autoincrementcolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.checkcolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.commentcolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.collatecolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.encodecolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.defaultcolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.generatedasidentitycolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.notnullcolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.primarykeycolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.uniquecolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.create_sql": {"tf": 1}, "sqlglot.generator.Generator.describe_sql": {"tf": 1}, "sqlglot.generator.Generator.with_sql": {"tf": 1}, "sqlglot.generator.Generator.cte_sql": {"tf": 1}, "sqlglot.generator.Generator.tablealias_sql": {"tf": 1}, "sqlglot.generator.Generator.bitstring_sql": {"tf": 1}, "sqlglot.generator.Generator.hexstring_sql": {"tf": 1}, "sqlglot.generator.Generator.datatype_sql": {"tf": 1}, "sqlglot.generator.Generator.directory_sql": {"tf": 1}, "sqlglot.generator.Generator.delete_sql": {"tf": 1}, "sqlglot.generator.Generator.drop_sql": {"tf": 1}, "sqlglot.generator.Generator.except_sql": {"tf": 1}, "sqlglot.generator.Generator.fetch_sql": {"tf": 1}, "sqlglot.generator.Generator.filter_sql": {"tf": 1}, "sqlglot.generator.Generator.hint_sql": {"tf": 1}, "sqlglot.generator.Generator.index_sql": {"tf": 1}, "sqlglot.generator.Generator.identifier_sql": {"tf": 1}, "sqlglot.generator.Generator.national_sql": {"tf": 1}, "sqlglot.generator.Generator.partition_sql": {"tf": 1}, "sqlglot.generator.Generator.properties_sql": {"tf": 1}, "sqlglot.generator.Generator.property_sql": {"tf": 1}, "sqlglot.generator.Generator.likeproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.fallbackproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.journalproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.freespaceproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.afterjournalproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.checksumproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.mergeblockratioproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.datablocksizeproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.blockcompressionproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.isolatedloadingproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.insert_sql": {"tf": 1}, "sqlglot.generator.Generator.intersect_sql": {"tf": 1}, "sqlglot.generator.Generator.introducer_sql": {"tf": 1}, "sqlglot.generator.Generator.pseudotype_sql": {"tf": 1}, "sqlglot.generator.Generator.rowformatdelimitedproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.table_sql": {"tf": 1}, "sqlglot.generator.Generator.tablesample_sql": {"tf": 1}, "sqlglot.generator.Generator.pivot_sql": {"tf": 1}, "sqlglot.generator.Generator.tuple_sql": {"tf": 1}, "sqlglot.generator.Generator.update_sql": {"tf": 1}, "sqlglot.generator.Generator.values_sql": {"tf": 1}, "sqlglot.generator.Generator.var_sql": {"tf": 1}, "sqlglot.generator.Generator.into_sql": {"tf": 1}, "sqlglot.generator.Generator.from_sql": {"tf": 1}, "sqlglot.generator.Generator.group_sql": {"tf": 1}, "sqlglot.generator.Generator.having_sql": {"tf": 1}, "sqlglot.generator.Generator.join_sql": {"tf": 1}, "sqlglot.generator.Generator.lambda_sql": {"tf": 1}, "sqlglot.generator.Generator.lateral_sql": {"tf": 1}, "sqlglot.generator.Generator.limit_sql": {"tf": 1}, "sqlglot.generator.Generator.offset_sql": {"tf": 1}, "sqlglot.generator.Generator.lock_sql": {"tf": 1}, "sqlglot.generator.Generator.literal_sql": {"tf": 1}, "sqlglot.generator.Generator.loaddata_sql": {"tf": 1}, "sqlglot.generator.Generator.null_sql": {"tf": 1}, "sqlglot.generator.Generator.boolean_sql": {"tf": 1}, "sqlglot.generator.Generator.order_sql": {"tf": 1}, "sqlglot.generator.Generator.cluster_sql": {"tf": 1}, "sqlglot.generator.Generator.distribute_sql": {"tf": 1}, "sqlglot.generator.Generator.sort_sql": {"tf": 1}, "sqlglot.generator.Generator.ordered_sql": {"tf": 1}, "sqlglot.generator.Generator.matchrecognize_sql": {"tf": 1}, "sqlglot.generator.Generator.select_sql": {"tf": 1}, "sqlglot.generator.Generator.schema_sql": {"tf": 1}, "sqlglot.generator.Generator.star_sql": {"tf": 1}, "sqlglot.generator.Generator.structkwarg_sql": {"tf": 1}, "sqlglot.generator.Generator.parameter_sql": {"tf": 1}, "sqlglot.generator.Generator.sessionparameter_sql": {"tf": 1}, "sqlglot.generator.Generator.placeholder_sql": {"tf": 1}, "sqlglot.generator.Generator.subquery_sql": {"tf": 1}, "sqlglot.generator.Generator.qualify_sql": {"tf": 1}, "sqlglot.generator.Generator.union_sql": {"tf": 1}, "sqlglot.generator.Generator.unnest_sql": {"tf": 1}, "sqlglot.generator.Generator.where_sql": {"tf": 1}, "sqlglot.generator.Generator.window_sql": {"tf": 1}, "sqlglot.generator.Generator.partition_by_sql": {"tf": 1}, "sqlglot.generator.Generator.window_spec_sql": {"tf": 1}, "sqlglot.generator.Generator.withingroup_sql": {"tf": 1}, "sqlglot.generator.Generator.between_sql": {"tf": 1}, "sqlglot.generator.Generator.bracket_sql": {"tf": 1}, "sqlglot.generator.Generator.all_sql": {"tf": 1}, "sqlglot.generator.Generator.any_sql": {"tf": 1}, "sqlglot.generator.Generator.exists_sql": {"tf": 1}, "sqlglot.generator.Generator.case_sql": {"tf": 1}, "sqlglot.generator.Generator.constraint_sql": {"tf": 1}, "sqlglot.generator.Generator.extract_sql": {"tf": 1}, "sqlglot.generator.Generator.trim_sql": {"tf": 1}, "sqlglot.generator.Generator.concat_sql": {"tf": 1}, "sqlglot.generator.Generator.check_sql": {"tf": 1}, "sqlglot.generator.Generator.foreignkey_sql": {"tf": 1}, "sqlglot.generator.Generator.primarykey_sql": {"tf": 1}, "sqlglot.generator.Generator.unique_sql": {"tf": 1}, "sqlglot.generator.Generator.if_sql": {"tf": 1}, "sqlglot.generator.Generator.in_sql": {"tf": 1}, "sqlglot.generator.Generator.interval_sql": {"tf": 1}, "sqlglot.generator.Generator.return_sql": {"tf": 1}, "sqlglot.generator.Generator.reference_sql": {"tf": 1}, "sqlglot.generator.Generator.anonymous_sql": {"tf": 1}, "sqlglot.generator.Generator.paren_sql": {"tf": 1}, "sqlglot.generator.Generator.neg_sql": {"tf": 1}, "sqlglot.generator.Generator.not_sql": {"tf": 1}, "sqlglot.generator.Generator.alias_sql": {"tf": 1}, "sqlglot.generator.Generator.aliases_sql": {"tf": 1}, "sqlglot.generator.Generator.attimezone_sql": {"tf": 1}, "sqlglot.generator.Generator.add_sql": {"tf": 1}, "sqlglot.generator.Generator.and_sql": {"tf": 1}, "sqlglot.generator.Generator.connector_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwiseand_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwiseleftshift_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwisenot_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwiseor_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwiserightshift_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwisexor_sql": {"tf": 1}, "sqlglot.generator.Generator.cast_sql": {"tf": 1}, "sqlglot.generator.Generator.currentdate_sql": {"tf": 1}, "sqlglot.generator.Generator.collate_sql": {"tf": 1}, "sqlglot.generator.Generator.command_sql": {"tf": 1}, "sqlglot.generator.Generator.transaction_sql": {"tf": 1}, "sqlglot.generator.Generator.commit_sql": {"tf": 1}, "sqlglot.generator.Generator.rollback_sql": {"tf": 1}, "sqlglot.generator.Generator.altercolumn_sql": {"tf": 1}, "sqlglot.generator.Generator.renametable_sql": {"tf": 1}, "sqlglot.generator.Generator.altertable_sql": {"tf": 1}, "sqlglot.generator.Generator.droppartition_sql": {"tf": 1}, "sqlglot.generator.Generator.addconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.distinct_sql": {"tf": 1}, "sqlglot.generator.Generator.ignorenulls_sql": {"tf": 1}, "sqlglot.generator.Generator.respectnulls_sql": {"tf": 1}, "sqlglot.generator.Generator.intdiv_sql": {"tf": 1}, "sqlglot.generator.Generator.dpipe_sql": {"tf": 1}, "sqlglot.generator.Generator.div_sql": {"tf": 1}, "sqlglot.generator.Generator.distance_sql": {"tf": 1}, "sqlglot.generator.Generator.dot_sql": {"tf": 1}, "sqlglot.generator.Generator.eq_sql": {"tf": 1}, "sqlglot.generator.Generator.escape_sql": {"tf": 1}, "sqlglot.generator.Generator.glob_sql": {"tf": 1}, "sqlglot.generator.Generator.gt_sql": {"tf": 1}, "sqlglot.generator.Generator.gte_sql": {"tf": 1}, "sqlglot.generator.Generator.ilike_sql": {"tf": 1}, "sqlglot.generator.Generator.is_sql": {"tf": 1}, "sqlglot.generator.Generator.like_sql": {"tf": 1}, "sqlglot.generator.Generator.similarto_sql": {"tf": 1}, "sqlglot.generator.Generator.lt_sql": {"tf": 1}, "sqlglot.generator.Generator.lte_sql": {"tf": 1}, "sqlglot.generator.Generator.mod_sql": {"tf": 1}, "sqlglot.generator.Generator.mul_sql": {"tf": 1}, "sqlglot.generator.Generator.neq_sql": {"tf": 1}, "sqlglot.generator.Generator.nullsafeeq_sql": {"tf": 1}, "sqlglot.generator.Generator.nullsafeneq_sql": {"tf": 1}, "sqlglot.generator.Generator.or_sql": {"tf": 1}, "sqlglot.generator.Generator.slice_sql": {"tf": 1}, "sqlglot.generator.Generator.sub_sql": {"tf": 1}, "sqlglot.generator.Generator.trycast_sql": {"tf": 1}, "sqlglot.generator.Generator.use_sql": {"tf": 1}, "sqlglot.generator.Generator.function_fallback_sql": {"tf": 1}, "sqlglot.generator.Generator.tag_sql": {"tf": 1}, "sqlglot.generator.Generator.token_sql": {"tf": 1}, "sqlglot.generator.Generator.userdefinedfunction_sql": {"tf": 1}, "sqlglot.generator.Generator.userdefinedfunctionkwarg_sql": {"tf": 1}, "sqlglot.generator.Generator.joinhint_sql": {"tf": 1}, "sqlglot.generator.Generator.kwarg_sql": {"tf": 1}, "sqlglot.generator.Generator.when_sql": {"tf": 1}, "sqlglot.generator.Generator.merge_sql": {"tf": 1}}, "df": 237, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.Dialects.SQLITE": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.__init__": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Tokenizer": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.transaction_sql": {"tf": 1}}, "df": 7}}}, "g": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.errors.SqlglotError": {"tf": 1}}, "df": 1}}}}}}}}}, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.SqlSecurityProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}, "r": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.Sqrt": {"tf": 1}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe.sql.DataFrame.select": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.select_sql": {"tf": 1}, "sqlglot.expressions.Expression.parent_select": {"tf": 1}, "sqlglot.expressions.Select": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.Select.window": {"tf": 1}, "sqlglot.expressions.Select.distinct": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.Select.lock": {"tf": 1}, "sqlglot.expressions.select": {"tf": 1}, "sqlglot.generator.Generator.select_sql": {"tf": 1}, "sqlglot.tokens.TokenType.SELECT": {"tf": 1}}, "df": 23, "s": {"docs": {"sqlglot.optimizer.expand_multi_table_selects.expand_multi_table_selects": {"tf": 1}, "sqlglot.optimizer.isolate_table_selects.isolate_table_selects": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selects": {"tf": 1}}, "df": 3}, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.optimizer.pushdown_projections.DEFAULT_SELECTION": {"tf": 1}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1}}, "df": 1}}}}}}, "t": {"docs": {"sqlglot.dataframe.sql.Column.set_table_name": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator.set_sql": {"tf": 1}, "sqlglot.executor.context.Context.set_row": {"tf": 1}, "sqlglot.executor.context.Context.set_index": {"tf": 1}, "sqlglot.executor.context.Context.set_range": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.set_operation": {"tf": 1}, "sqlglot.expressions.Expression.set": {"tf": 1}, "sqlglot.expressions.Set": {"tf": 1}, "sqlglot.generator.Generator.set_operation": {"tf": 1}, "sqlglot.tokens.TokenType.CHARACTER_SET": {"tf": 1}, "sqlglot.tokens.TokenType.SET": {"tf": 1}}, "df": 11, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot.dialects.mysql.MySQL.Generator.setitem_sql": {"tf": 1}, "sqlglot.expressions.SetItem": {"tf": 1}}, "df": 2}}}}, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.expressions.SetAgg": {"tf": 1}}, "df": 1}}}, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.planner.SetOperation": {"tf": 1}, "sqlglot.planner.SetOperation.__init__": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}}, "df": 3}}}}}}}}}, "s": {"docs": {"sqlglot.tokens.TokenType.GROUPING_SETS": {"tf": 1}}, "df": 1}}, "r": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.SERDE_PROPERTIES": {"tf": 1}}, "df": 1, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.SerdeProperties": {"tf": 1}}, "df": 1}}}}}}}}}}}}, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.DataType.Type.SERIAL": {"tf": 1}, "sqlglot.tokens.TokenType.SERIAL": {"tf": 1}}, "df": 2}}}}, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.tokens.TokenType.SESSION_PARAMETER": {"tf": 1}}, "df": 1, "p": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.SessionParameter": {"tf": 1}, "sqlglot.generator.Generator.sessionparameter_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}, "p": {"docs": {"sqlglot.generator.Generator.sep": {"tf": 1}}, "df": 1, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.tokens.TokenType.SEPARATOR": {"tf": 1}}, "df": 1}}}}}}}, "g": {"docs": {"sqlglot.generator.Generator.seg": {"tf": 1}}, "df": 1}, "q": {"docs": {"sqlglot.helper.seq_get": {"tf": 1}}, "df": 1}, "m": {"docs": {}, "df": 0, "i": {"docs": {"sqlglot.tokens.TokenType.SEMI": {"tf": 1}}, "df": 1, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.tokens.TokenType.SEMICOLON": {"tf": 1}}, "df": 1}}}}}}}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.tokens.TokenType.SEED": {"tf": 1}}, "df": 1}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}, "sqlglot.executor.context.Context.sort": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.sort": {"tf": 1}, "sqlglot.expressions.Sort": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.generator.Generator.sort_sql": {"tf": 1}, "sqlglot.optimizer.simplify.uniq_sort": {"tf": 1}, "sqlglot.planner.Sort": {"tf": 1}, "sqlglot.planner.Sort.__init__": {"tf": 1}, "sqlglot.tokens.TokenType.SORT_BY": {"tf": 1}}, "df": 10, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.tokens.TokenType.SORTKEY": {"tf": 1}}, "df": 1, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.SortKeyProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.SortArray": {"tf": 1}}, "df": 1}}}}}}}, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.scope.Scope.source_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.rename_source": {"tf": 1}, "sqlglot.optimizer.scope.Scope.add_source": {"tf": 1}, "sqlglot.optimizer.scope.Scope.remove_source": {"tf": 1}}, "df": 4, "s": {"docs": {"sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1}, "sqlglot.optimizer.scope.Scope.cte_sources": {"tf": 1}}, "df": 2}}}}}, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.SOME": {"tf": 1}}, "df": 1}}}, "u": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot.dataframe.sql.GroupedData.sum": {"tf": 1}, "sqlglot.expressions.Sum": {"tf": 1}}, "df": 2}, "b": {"docs": {"sqlglot.expressions.Sub": {"tf": 1}, "sqlglot.generator.Generator.sub_sql": {"tf": 1}}, "df": 2, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dataframe.sql.Column.substr": {"tf": 1}}, "df": 1, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.executor.env.substring": {"tf": 1}, "sqlglot.expressions.Substring": {"tf": 1}}, "df": 2}}}}}}, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.Subquery": {"tf": 1}, "sqlglot.expressions.Subquery.unnest": {"tf": 1}, "sqlglot.expressions.Subquery.output_name": {"tf": 1}, "sqlglot.expressions.subquery": {"tf": 1}, "sqlglot.generator.Generator.subquery_sql": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.SUBQUERY": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_subquery": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_correlated_subquery": {"tf": 1}}, "df": 9, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Subqueryable": {"tf": 1}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.Subqueryable.limit": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}}, "df": 4}}}}, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.SubqueryPredicate": {"tf": 1}}, "df": 1}}}}}}}}}}, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}, "sqlglot.optimizer.scope.Scope.subqueries": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1}}, "df": 4}}}}}}}, "c": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.helper.subclasses": {"tf": 1}}, "df": 1}}}}}}}}, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.DataType.Type.SUPER": {"tf": 1}, "sqlglot.tokens.TokenType.SUPER": {"tf": 1}}, "df": 2}}, "p": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.schema.Schema.supported_table_args": {"tf": 1}}, "df": 1}}}}}}}}, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Star": {"tf": 1}, "sqlglot.expressions.Star.output_name": {"tf": 1}, "sqlglot.generator.Generator.star_sql": {"tf": 1}, "sqlglot.tokens.TokenType.STAR": {"tf": 1}}, "df": 4, "t": {"docs": {"sqlglot.tokens.TokenType.BLOCK_START": {"tf": 1}}, "df": 1, "s": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.dataframe.sql.Column.startswith": {"tf": 1}}, "df": 1}}}}}}, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dialects.dialect.Dialects.STARROCKS": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.__init__": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}}, "df": 4}}}}}}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.executor.python.PythonExecutor.static": {"tf": 1}}, "df": 1}}}}, "r": {"docs": {"sqlglot.dialects.dialect.str_position_sql": {"tf": 1}, "sqlglot.executor.env.str_position": {"tf": 1}}, "df": 2, "u": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.dialect.struct_extract_sql": {"tf": 1}, "sqlglot.expressions.DataType.Type.STRUCT": {"tf": 1}, "sqlglot.expressions.Struct": {"tf": 1}, "sqlglot.tokens.TokenType.STRUCT": {"tf": 1}}, "df": 4, "k": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.expressions.StructKwarg": {"tf": 1}, "sqlglot.generator.Generator.structkwarg_sql": {"tf": 1}}, "df": 2}}}}}, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.StructExtract": {"tf": 1}}, "df": 1}}}}}}}}}}, "p": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dialects.dialect.locate_to_strposition": {"tf": 1}, "sqlglot.dialects.dialect.strposition_to_locate_sql": {"tf": 1}, "sqlglot.expressions.StrPosition": {"tf": 1}}, "df": 3}}}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.expressions.Expression.is_string": {"tf": 1}, "sqlglot.expressions.Literal.string": {"tf": 1}, "sqlglot.tokens.TokenType.STRING": {"tf": 1}, "sqlglot.tokens.TokenType.BIT_STRING": {"tf": 1}, "sqlglot.tokens.TokenType.HEX_STRING": {"tf": 1}, "sqlglot.tokens.TokenType.BYTE_STRING": {"tf": 1}, "sqlglot.tokens.Token.string": {"tf": 1}}, "df": 7}}}, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.StrToDate": {"tf": 1}}, "df": 1}}}}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.StrToTime": {"tf": 1}}, "df": 1}}}}, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "x": {"docs": {"sqlglot.expressions.StrToUnix": {"tf": 1}}, "df": 1}}}}}}}, "d": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "v": {"docs": {"sqlglot.expressions.Stddev": {"tf": 1}}, "df": 1, "p": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.expressions.StddevPop": {"tf": 1}}, "df": 1}}}, "s": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.expressions.StddevSamp": {"tf": 1}}, "df": 1}}}}}}}}, "e": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.planner.Step": {"tf": 1}, "sqlglot.planner.Step.__init__": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Step.add_dependency": {"tf": 1}, "sqlglot.planner.Step.to_s": {"tf": 1}}, "df": 5}}}, "a": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.DataFrameWriter.saveAsTable": {"tf": 1}}, "df": 1}}}}}}}}}, "f": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.no_safe_divide_sql": {"tf": 1}}, "df": 1, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.SafeDivide": {"tf": 1}}, "df": 1}}}}}}}}, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.TABLE_SAMPLE": {"tf": 1}}, "df": 1}}}}}, "n": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.Dialects.SNOWFLAKE": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.__init__": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Tokenizer": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.except_op": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.intersect_op": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.select_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.describe_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.generatedasidentitycolumnconstraint_sql": {"tf": 1}}, "df": 12}}}}}}}, "a": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.helper.camel_to_snake_case": {"tf": 1}}, "df": 1}}}}, "h": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot.dialects.mysql.MySQL.Generator.show_sql": {"tf": 1}, "sqlglot.expressions.Show": {"tf": 1}, "sqlglot.tokens.TokenType.SHOW": {"tf": 1}}, "df": 3}}}, "y": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.tsql.TSQL.Generator.systemtime_sql": {"tf": 1}, "sqlglot.expressions.SystemTime": {"tf": 1}}, "df": 2}}}}}}}}}, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.DataType.Type.SMALLINT": {"tf": 1}, "sqlglot.tokens.TokenType.SMALLINT": {"tf": 1}}, "df": 2}}}, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.DataType.Type.SMALLSERIAL": {"tf": 1}, "sqlglot.tokens.TokenType.SMALLSERIAL": {"tf": 1}}, "df": 2}}}}}}, "m": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.DataType.Type.SMALLMONEY": {"tf": 1}, "sqlglot.tokens.TokenType.SMALLMONEY": {"tf": 1}}, "df": 2}}}}}}}}}, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.tokens.TokenType.SIMILAR_TO": {"tf": 1}}, "df": 1, "t": {"docs": {}, "df": 0, "o": {"docs": {"sqlglot.expressions.SimilarTo": {"tf": 1}, "sqlglot.generator.Generator.similarto_sql": {"tf": 1}}, "df": 2}}}}}}, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.optimizer.simplify.simplify": {"tf": 1}, "sqlglot.optimizer.simplify.simplify_not": {"tf": 1}, "sqlglot.optimizer.simplify.simplify_connectors": {"tf": 1}, "sqlglot.optimizer.simplify.simplify_literals": {"tf": 1}, "sqlglot.optimizer.simplify.simplify_parens": {"tf": 1}}, "df": 5}}}}}}}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Slice": {"tf": 1}, "sqlglot.generator.Generator.slice_sql": {"tf": 1}}, "df": 2}}}, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.tokens.TokenType.SLASH": {"tf": 1}}, "df": 1}}}}}, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.tokens.TokenType.ON": {"tf": 1}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1}}, "df": 3, "e": {"docs": {"sqlglot.parse_one": {"tf": 1}}, "df": 1}, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.tokens.TokenType.ONLY": {"tf": 1}}, "df": 1}}}, "r": {"docs": {"sqlglot.dialects.dialect.Dialect.get_or_raise": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Or": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 1}, "sqlglot.generator.Generator.or_sql": {"tf": 1}, "sqlglot.tokens.TokenType.OR": {"tf": 1}}, "df": 6, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Order": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.generator.Generator.order_sql": {"tf": 1}, "sqlglot.tokens.TokenType.ORDER_BY": {"tf": 1}}, "df": 4, "b": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.Window.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.orderBy": {"tf": 1}}, "df": 3}}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.executor.env.ordered": {"tf": 1}, "sqlglot.expressions.Ordered": {"tf": 1}, "sqlglot.generator.Generator.ordered_sql": {"tf": 1}, "sqlglot.tokens.TokenType.ORDERED": {"tf": 1}}, "df": 4}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.tokens.TokenType.ORDINALITY": {"tf": 1}}, "df": 1}}}}}}}}, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.Dialects.ORACLE": {"tf": 1}, "sqlglot.dialects.oracle.Oracle": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.__init__": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator.query_modifiers": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator.offset_sql": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator.table_sql": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Tokenizer": {"tf": 1}}, "df": 9}}}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dataframe.sql.Column.invoke_expression_over_column": {"tf": 1}, "sqlglot.dataframe.sql.Column.over": {"tf": 1}, "sqlglot.tokens.TokenType.OVER": {"tf": 1}}, "df": 3, "w": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.OVERWRITE": {"tf": 1}}, "df": 1}}}}}}}}, "p": {"docs": {"sqlglot.dataframe.sql.Column.binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.inverse_binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.unary_op": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.in_unnest_op": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.except_op": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.intersect_op": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.except_op": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.intersect_op": {"tf": 1}, "sqlglot.generator.Generator.except_op": {"tf": 1}, "sqlglot.generator.Generator.intersect_op": {"tf": 1}, "sqlglot.generator.Generator.union_op": {"tf": 1}, "sqlglot.generator.Generator.in_unnest_op": {"tf": 1}, "sqlglot.generator.Generator.op_expressions": {"tf": 1}}, "df": 13, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.optimize_joins.optimize_joins": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}}, "df": 2, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.errors.OptimizeError": {"tf": 1}}, "df": 1}}}}}}}}}, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.tokens.TokenType.OPTIONS": {"tf": 1}}, "df": 1}}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.executor.python.PythonExecutor.set_operation": {"tf": 1}, "sqlglot.generator.Generator.set_operation": {"tf": 1}}, "df": 2}}}}, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.Expression.unnest_operands": {"tf": 1}}, "df": 1}}}}}, "n": {"docs": {"sqlglot.helper.open_file": {"tf": 1}}, "df": 1}}}, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.optimizer.optimize_joins.other_table_names": {"tf": 1}}, "df": 1, "w": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.Column.otherwise": {"tf": 1}}, "df": 1}}}}}}}}, "f": {"docs": {"sqlglot.tokens.TokenType.OUT_OF": {"tf": 1}}, "df": 1, "f": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.oracle.Oracle.Generator.offset_sql": {"tf": 1}, "sqlglot.expressions.Offset": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.generator.Generator.offset_sql": {"tf": 1}, "sqlglot.helper.apply_index_offset": {"tf": 1}, "sqlglot.tokens.TokenType.OFFSET": {"tf": 1}}, "df": 6}}}}}, "u": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.OUT_OF": {"tf": 1}}, "df": 1, "p": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.Expression.output_name": {"tf": 1}, "sqlglot.expressions.Column.output_name": {"tf": 1}, "sqlglot.expressions.Identifier.output_name": {"tf": 1}, "sqlglot.expressions.Literal.output_name": {"tf": 1}, "sqlglot.expressions.Subquery.output_name": {"tf": 1}, "sqlglot.expressions.Star.output_name": {"tf": 1}, "sqlglot.expressions.Alias.output_name": {"tf": 1}, "sqlglot.expressions.Cast.output_name": {"tf": 1}}, "df": 8}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.tokens.TokenType.OUTER": {"tf": 1}}, "df": 1}}}}, "b": {"docs": {}, "df": 0, "j": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.DataType.Type.OBJECT": {"tf": 1}, "sqlglot.helper.object_to_dict": {"tf": 1}, "sqlglot.tokens.TokenType.OBJECT": {"tf": 1}}, "df": 3}}}}}}, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.transpile": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.transpile": {"tf": 1}}, "df": 2}}}}, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator.transaction_sql": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator.transaction_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.transaction_sql": {"tf": 1}, "sqlglot.expressions.Transaction": {"tf": 1}, "sqlglot.generator.Generator.transaction_sql": {"tf": 1}}, "df": 5}}}}}}, "f": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot.expressions.Expression.transform": {"tf": 1}}, "df": 1}}}}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.scope.Scope.traverse": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}}, "df": 2}}}}}, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.tokens.TokenType.TRAILING": {"tf": 1}}, "df": 1}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "o": {"docs": {"sqlglot.dialects.dialect.Dialects.TRINO": {"tf": 1}, "sqlglot.dialects.trino.Trino": {"tf": 1}, "sqlglot.dialects.trino.Trino.__init__": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Tokenizer": {"tf": 1}}, "df": 5}}, "m": {"docs": {"sqlglot.dialects.dialect.trim_sql": {"tf": 1}, "sqlglot.expressions.Trim": {"tf": 1}, "sqlglot.generator.Generator.trim_sql": {"tf": 1}}, "df": 3}, "e": {"docs": {"sqlglot.trie.new_trie": {"tf": 1}, "sqlglot.trie.in_trie": {"tf": 1}}, "df": 2}}, "y": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.dialect.no_trycast_sql": {"tf": 1}, "sqlglot.expressions.TryCast": {"tf": 1}, "sqlglot.generator.Generator.trycast_sql": {"tf": 1}}, "df": 3}}}}}, "u": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.true": {"tf": 1}, "sqlglot.optimizer.simplify.remove_where_true": {"tf": 1}, "sqlglot.optimizer.simplify.always_true": {"tf": 1}, "sqlglot.tokens.TokenType.TRUE": {"tf": 1}}, "df": 4}}}, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.SparkSession.table": {"tf": 1}, "sqlglot.dataframe.sql.Column.set_table_name": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameReader.table": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator.table_sql": {"tf": 1}, "sqlglot.executor.context.Context.table_iter": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.table": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.scan_table": {"tf": 1}, "sqlglot.executor.table.Table": {"tf": 1}, "sqlglot.executor.table.Table.__init__": {"tf": 1}, "sqlglot.executor.table.Table.add_columns": {"tf": 1}, "sqlglot.executor.table.Table.append": {"tf": 1}, "sqlglot.executor.table.Table.pop": {"tf": 1}, "sqlglot.expressions.Table": {"tf": 1}, "sqlglot.expressions.to_table": {"tf": 1}, "sqlglot.expressions.table_": {"tf": 1}, "sqlglot.expressions.rename_table": {"tf": 1}, "sqlglot.expressions.column_table_names": {"tf": 1}, "sqlglot.expressions.table_name": {"tf": 1}, "sqlglot.generator.Generator.table_sql": {"tf": 1}, "sqlglot.optimizer.expand_multi_table_selects.expand_multi_table_selects": {"tf": 1}, "sqlglot.optimizer.isolate_table_selects.isolate_table_selects": {"tf": 1}, "sqlglot.optimizer.optimize_joins.other_table_names": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.DERIVED_TABLE": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_derived_table": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1}, "sqlglot.schema.Schema.supported_table_args": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.table_parts": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}, "sqlglot.tokens.TokenType.TABLE": {"tf": 1}, "sqlglot.tokens.TokenType.TABLE_SAMPLE": {"tf": 1}}, "df": 30, "a": {"docs": {}, "df": 0, "u": {"docs": {"sqlglot.dialects.dialect.Dialects.TABLEAU": {"tf": 1}, "sqlglot.dialects.tableau.Tableau": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.__init__": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}}, "df": 5}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.TableAlias": {"tf": 1}, "sqlglot.generator.Generator.tablealias_sql": {"tf": 1}}, "df": 2}}}}}, "s": {"docs": {"sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.executor.table.ensure_tables": {"tf": 1}, "sqlglot.expressions.replace_tables": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_derived_tables": {"tf": 1}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.derived_tables": {"tf": 1}}, "df": 7, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.no_tablesample_sql": {"tf": 1}, "sqlglot.expressions.TableSample": {"tf": 1}, "sqlglot.generator.Generator.tablesample_sql": {"tf": 1}}, "df": 3}}}}}}, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.executor.table.TableIter": {"tf": 1}, "sqlglot.executor.table.TableIter.__init__": {"tf": 1}}, "df": 2}}}}, "f": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.TableFormatProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}, "g": {"docs": {"sqlglot.expressions.Tag": {"tf": 1}, "sqlglot.generator.Generator.tag_sql": {"tf": 1}}, "df": 2}}, "o": {"docs": {"sqlglot.dialects.dialect.locate_to_strposition": {"tf": 1}, "sqlglot.dialects.dialect.strposition_to_locate_sql": {"tf": 1}, "sqlglot.expressions.to_identifier": {"tf": 1}, "sqlglot.expressions.to_interval": {"tf": 1}, "sqlglot.expressions.to_table": {"tf": 1}, "sqlglot.expressions.to_column": {"tf": 1}, "sqlglot.helper.camel_to_snake_case": {"tf": 1}, "sqlglot.helper.object_to_dict": {"tf": 1}, "sqlglot.lineage.Node.to_html": {"tf": 1}, "sqlglot.optimizer.canonicalize.add_text_to_concat": {"tf": 1}, "sqlglot.planner.Step.to_s": {"tf": 1}, "sqlglot.tokens.TokenType.SIMILAR_TO": {"tf": 1}}, "df": 12, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.generator.Generator.token_sql": {"tf": 1}, "sqlglot.tokens.Token": {"tf": 1}, "sqlglot.tokens.Token.__init__": {"tf": 1}, "sqlglot.tokens.Token.number": {"tf": 1}, "sqlglot.tokens.Token.string": {"tf": 1}, "sqlglot.tokens.Token.identifier": {"tf": 1}, "sqlglot.tokens.Token.var": {"tf": 1}}, "df": 7, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.Tokenizer.tokenize": {"tf": 1}}, "df": 1, "r": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Tokenizer": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Tokenizer": {"tf": 1}, "sqlglot.dialects.drill.Drill.Tokenizer": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Tokenizer": {"tf": 1}, "sqlglot.dialects.hive.Hive.Tokenizer": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Tokenizer": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Tokenizer": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Tokenizer": {"tf": 1}, "sqlglot.dialects.presto.Presto.Tokenizer": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Tokenizer": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Tokenizer": {"tf": 1}, "sqlglot.dialects.spark.Spark.Tokenizer": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Tokenizer": {"tf": 1}, "sqlglot.dialects.trino.Trino.Tokenizer": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Tokenizer": {"tf": 1}, "sqlglot.executor.python.Python.Tokenizer": {"tf": 1}, "sqlglot.tokens.Tokenizer": {"tf": 1}, "sqlglot.tokens.Tokenizer.__init__": {"tf": 1}, "sqlglot.tokens.Tokenizer.reset": {"tf": 1}, "sqlglot.tokens.Tokenizer.tokenize": {"tf": 1}}, "df": 20}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.errors.TokenError": {"tf": 1}}, "df": 1}}}}}, "t": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType": {"tf": 1}, "sqlglot.tokens.TokenType.L_PAREN": {"tf": 1}, "sqlglot.tokens.TokenType.R_PAREN": {"tf": 1}, "sqlglot.tokens.TokenType.L_BRACKET": {"tf": 1}, "sqlglot.tokens.TokenType.R_BRACKET": {"tf": 1}, "sqlglot.tokens.TokenType.L_BRACE": {"tf": 1}, "sqlglot.tokens.TokenType.R_BRACE": {"tf": 1}, "sqlglot.tokens.TokenType.COMMA": {"tf": 1}, "sqlglot.tokens.TokenType.DOT": {"tf": 1}, "sqlglot.tokens.TokenType.DASH": {"tf": 1}, "sqlglot.tokens.TokenType.PLUS": {"tf": 1}, "sqlglot.tokens.TokenType.COLON": {"tf": 1}, "sqlglot.tokens.TokenType.DCOLON": {"tf": 1}, "sqlglot.tokens.TokenType.SEMICOLON": {"tf": 1}, "sqlglot.tokens.TokenType.STAR": {"tf": 1}, "sqlglot.tokens.TokenType.BACKSLASH": {"tf": 1}, "sqlglot.tokens.TokenType.SLASH": {"tf": 1}, "sqlglot.tokens.TokenType.LT": {"tf": 1}, "sqlglot.tokens.TokenType.LTE": {"tf": 1}, "sqlglot.tokens.TokenType.GT": {"tf": 1}, "sqlglot.tokens.TokenType.GTE": {"tf": 1}, "sqlglot.tokens.TokenType.NOT": {"tf": 1}, "sqlglot.tokens.TokenType.EQ": {"tf": 1}, "sqlglot.tokens.TokenType.NEQ": {"tf": 1}, "sqlglot.tokens.TokenType.NULLSAFE_EQ": {"tf": 1}, "sqlglot.tokens.TokenType.AND": {"tf": 1}, "sqlglot.tokens.TokenType.OR": {"tf": 1}, "sqlglot.tokens.TokenType.AMP": {"tf": 1}, "sqlglot.tokens.TokenType.DPIPE": {"tf": 1}, "sqlglot.tokens.TokenType.PIPE": {"tf": 1}, "sqlglot.tokens.TokenType.CARET": {"tf": 1}, "sqlglot.tokens.TokenType.TILDA": {"tf": 1}, "sqlglot.tokens.TokenType.ARROW": {"tf": 1}, "sqlglot.tokens.TokenType.DARROW": {"tf": 1}, "sqlglot.tokens.TokenType.FARROW": {"tf": 1}, "sqlglot.tokens.TokenType.HASH": {"tf": 1}, "sqlglot.tokens.TokenType.HASH_ARROW": {"tf": 1}, "sqlglot.tokens.TokenType.DHASH_ARROW": {"tf": 1}, "sqlglot.tokens.TokenType.LR_ARROW": {"tf": 1}, "sqlglot.tokens.TokenType.DOLLAR": {"tf": 1}, "sqlglot.tokens.TokenType.PARAMETER": {"tf": 1}, "sqlglot.tokens.TokenType.SESSION_PARAMETER": {"tf": 1}, "sqlglot.tokens.TokenType.NATIONAL": {"tf": 1}, "sqlglot.tokens.TokenType.BLOCK_START": {"tf": 1}, "sqlglot.tokens.TokenType.BLOCK_END": {"tf": 1}, "sqlglot.tokens.TokenType.SPACE": {"tf": 1}, "sqlglot.tokens.TokenType.BREAK": {"tf": 1}, "sqlglot.tokens.TokenType.STRING": {"tf": 1}, "sqlglot.tokens.TokenType.NUMBER": {"tf": 1}, "sqlglot.tokens.TokenType.IDENTIFIER": {"tf": 1}, "sqlglot.tokens.TokenType.COLUMN": {"tf": 1}, "sqlglot.tokens.TokenType.COLUMN_DEF": {"tf": 1}, "sqlglot.tokens.TokenType.SCHEMA": {"tf": 1}, "sqlglot.tokens.TokenType.TABLE": {"tf": 1}, "sqlglot.tokens.TokenType.VAR": {"tf": 1}, "sqlglot.tokens.TokenType.BIT_STRING": {"tf": 1}, "sqlglot.tokens.TokenType.HEX_STRING": {"tf": 1}, "sqlglot.tokens.TokenType.BYTE_STRING": {"tf": 1}, "sqlglot.tokens.TokenType.BOOLEAN": {"tf": 1}, "sqlglot.tokens.TokenType.TINYINT": {"tf": 1}, "sqlglot.tokens.TokenType.SMALLINT": {"tf": 1}, "sqlglot.tokens.TokenType.INT": {"tf": 1}, "sqlglot.tokens.TokenType.BIGINT": {"tf": 1}, "sqlglot.tokens.TokenType.FLOAT": {"tf": 1}, "sqlglot.tokens.TokenType.DOUBLE": {"tf": 1}, "sqlglot.tokens.TokenType.DECIMAL": {"tf": 1}, "sqlglot.tokens.TokenType.CHAR": {"tf": 1}, "sqlglot.tokens.TokenType.NCHAR": {"tf": 1}, "sqlglot.tokens.TokenType.VARCHAR": {"tf": 1}, "sqlglot.tokens.TokenType.NVARCHAR": {"tf": 1}, "sqlglot.tokens.TokenType.TEXT": {"tf": 1}, "sqlglot.tokens.TokenType.MEDIUMTEXT": {"tf": 1}, "sqlglot.tokens.TokenType.LONGTEXT": {"tf": 1}, "sqlglot.tokens.TokenType.MEDIUMBLOB": {"tf": 1}, "sqlglot.tokens.TokenType.LONGBLOB": {"tf": 1}, "sqlglot.tokens.TokenType.BINARY": {"tf": 1}, "sqlglot.tokens.TokenType.VARBINARY": {"tf": 1}, "sqlglot.tokens.TokenType.JSON": {"tf": 1}, "sqlglot.tokens.TokenType.JSONB": {"tf": 1}, "sqlglot.tokens.TokenType.TIME": {"tf": 1}, "sqlglot.tokens.TokenType.TIMESTAMP": {"tf": 1}, "sqlglot.tokens.TokenType.TIMESTAMPTZ": {"tf": 1}, "sqlglot.tokens.TokenType.TIMESTAMPLTZ": {"tf": 1}, "sqlglot.tokens.TokenType.DATETIME": {"tf": 1}, "sqlglot.tokens.TokenType.DATE": {"tf": 1}, "sqlglot.tokens.TokenType.UUID": {"tf": 1}, "sqlglot.tokens.TokenType.GEOGRAPHY": {"tf": 1}, "sqlglot.tokens.TokenType.NULLABLE": {"tf": 1}, "sqlglot.tokens.TokenType.GEOMETRY": {"tf": 1}, "sqlglot.tokens.TokenType.HLLSKETCH": {"tf": 1}, "sqlglot.tokens.TokenType.HSTORE": {"tf": 1}, "sqlglot.tokens.TokenType.SUPER": {"tf": 1}, "sqlglot.tokens.TokenType.SERIAL": {"tf": 1}, "sqlglot.tokens.TokenType.SMALLSERIAL": {"tf": 1}, "sqlglot.tokens.TokenType.BIGSERIAL": {"tf": 1}, "sqlglot.tokens.TokenType.XML": {"tf": 1}, "sqlglot.tokens.TokenType.UNIQUEIDENTIFIER": {"tf": 1}, "sqlglot.tokens.TokenType.MONEY": {"tf": 1}, "sqlglot.tokens.TokenType.SMALLMONEY": {"tf": 1}, "sqlglot.tokens.TokenType.ROWVERSION": {"tf": 1}, "sqlglot.tokens.TokenType.IMAGE": {"tf": 1}, "sqlglot.tokens.TokenType.VARIANT": {"tf": 1}, "sqlglot.tokens.TokenType.OBJECT": {"tf": 1}, "sqlglot.tokens.TokenType.ALIAS": {"tf": 1}, "sqlglot.tokens.TokenType.ALTER": {"tf": 1}, "sqlglot.tokens.TokenType.ALWAYS": {"tf": 1}, "sqlglot.tokens.TokenType.ALL": {"tf": 1}, "sqlglot.tokens.TokenType.ANTI": {"tf": 1}, "sqlglot.tokens.TokenType.ANY": {"tf": 1}, "sqlglot.tokens.TokenType.APPLY": {"tf": 1}, "sqlglot.tokens.TokenType.ARRAY": {"tf": 1}, "sqlglot.tokens.TokenType.ASC": {"tf": 1}, "sqlglot.tokens.TokenType.ASOF": {"tf": 1}, "sqlglot.tokens.TokenType.AT_TIME_ZONE": {"tf": 1}, "sqlglot.tokens.TokenType.AUTO_INCREMENT": {"tf": 1}, "sqlglot.tokens.TokenType.BEGIN": {"tf": 1}, "sqlglot.tokens.TokenType.BETWEEN": {"tf": 1}, "sqlglot.tokens.TokenType.BOTH": {"tf": 1}, "sqlglot.tokens.TokenType.BUCKET": {"tf": 1}, "sqlglot.tokens.TokenType.BY_DEFAULT": {"tf": 1}, "sqlglot.tokens.TokenType.CACHE": {"tf": 1}, "sqlglot.tokens.TokenType.CASCADE": {"tf": 1}, "sqlglot.tokens.TokenType.CASE": {"tf": 1}, "sqlglot.tokens.TokenType.CHARACTER_SET": {"tf": 1}, "sqlglot.tokens.TokenType.CHECK": {"tf": 1}, "sqlglot.tokens.TokenType.CLUSTER_BY": {"tf": 1}, "sqlglot.tokens.TokenType.COLLATE": {"tf": 1}, "sqlglot.tokens.TokenType.COMMAND": {"tf": 1}, "sqlglot.tokens.TokenType.COMMENT": {"tf": 1}, "sqlglot.tokens.TokenType.COMMIT": {"tf": 1}, "sqlglot.tokens.TokenType.COMPOUND": {"tf": 1}, "sqlglot.tokens.TokenType.CONSTRAINT": {"tf": 1}, "sqlglot.tokens.TokenType.CREATE": {"tf": 1}, "sqlglot.tokens.TokenType.CROSS": {"tf": 1}, "sqlglot.tokens.TokenType.CUBE": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_DATE": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_DATETIME": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_ROW": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_TIME": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_TIMESTAMP": {"tf": 1}, "sqlglot.tokens.TokenType.DEFAULT": {"tf": 1}, "sqlglot.tokens.TokenType.DELETE": {"tf": 1}, "sqlglot.tokens.TokenType.DESC": {"tf": 1}, "sqlglot.tokens.TokenType.DESCRIBE": {"tf": 1}, "sqlglot.tokens.TokenType.DISTINCT": {"tf": 1}, "sqlglot.tokens.TokenType.DISTINCT_FROM": {"tf": 1}, "sqlglot.tokens.TokenType.DISTRIBUTE_BY": {"tf": 1}, "sqlglot.tokens.TokenType.DIV": {"tf": 1}, "sqlglot.tokens.TokenType.DROP": {"tf": 1}, "sqlglot.tokens.TokenType.ELSE": {"tf": 1}, "sqlglot.tokens.TokenType.ENCODE": {"tf": 1}, "sqlglot.tokens.TokenType.END": {"tf": 1}, "sqlglot.tokens.TokenType.ESCAPE": {"tf": 1}, "sqlglot.tokens.TokenType.EXCEPT": {"tf": 1}, "sqlglot.tokens.TokenType.EXECUTE": {"tf": 1}, "sqlglot.tokens.TokenType.EXISTS": {"tf": 1}, "sqlglot.tokens.TokenType.FALSE": {"tf": 1}, "sqlglot.tokens.TokenType.FETCH": {"tf": 1}, "sqlglot.tokens.TokenType.FILTER": {"tf": 1}, "sqlglot.tokens.TokenType.FINAL": {"tf": 1}, "sqlglot.tokens.TokenType.FIRST": {"tf": 1}, "sqlglot.tokens.TokenType.FOLLOWING": {"tf": 1}, "sqlglot.tokens.TokenType.FOR": {"tf": 1}, "sqlglot.tokens.TokenType.FOREIGN_KEY": {"tf": 1}, "sqlglot.tokens.TokenType.FORMAT": {"tf": 1}, "sqlglot.tokens.TokenType.FROM": {"tf": 1}, "sqlglot.tokens.TokenType.FULL": {"tf": 1}, "sqlglot.tokens.TokenType.FUNCTION": {"tf": 1}, "sqlglot.tokens.TokenType.GENERATED": {"tf": 1}, "sqlglot.tokens.TokenType.GLOB": {"tf": 1}, "sqlglot.tokens.TokenType.GLOBAL": {"tf": 1}, "sqlglot.tokens.TokenType.GROUP_BY": {"tf": 1}, "sqlglot.tokens.TokenType.GROUPING_SETS": {"tf": 1}, "sqlglot.tokens.TokenType.HAVING": {"tf": 1}, "sqlglot.tokens.TokenType.HINT": {"tf": 1}, "sqlglot.tokens.TokenType.IDENTITY": {"tf": 1}, "sqlglot.tokens.TokenType.IF": {"tf": 1}, "sqlglot.tokens.TokenType.IGNORE_NULLS": {"tf": 1}, "sqlglot.tokens.TokenType.ILIKE": {"tf": 1}, "sqlglot.tokens.TokenType.IN": {"tf": 1}, "sqlglot.tokens.TokenType.INDEX": {"tf": 1}, "sqlglot.tokens.TokenType.INNER": {"tf": 1}, "sqlglot.tokens.TokenType.INSERT": {"tf": 1}, "sqlglot.tokens.TokenType.INTERSECT": {"tf": 1}, "sqlglot.tokens.TokenType.INTERVAL": {"tf": 1}, "sqlglot.tokens.TokenType.INTO": {"tf": 1}, "sqlglot.tokens.TokenType.INTRODUCER": {"tf": 1}, "sqlglot.tokens.TokenType.IRLIKE": {"tf": 1}, "sqlglot.tokens.TokenType.IS": {"tf": 1}, "sqlglot.tokens.TokenType.ISNULL": {"tf": 1}, "sqlglot.tokens.TokenType.JOIN": {"tf": 1}, "sqlglot.tokens.TokenType.LANGUAGE": {"tf": 1}, "sqlglot.tokens.TokenType.LATERAL": {"tf": 1}, "sqlglot.tokens.TokenType.LAZY": {"tf": 1}, "sqlglot.tokens.TokenType.LEADING": {"tf": 1}, "sqlglot.tokens.TokenType.LEFT": {"tf": 1}, "sqlglot.tokens.TokenType.LIKE": {"tf": 1}, "sqlglot.tokens.TokenType.LIMIT": {"tf": 1}, "sqlglot.tokens.TokenType.LOAD_DATA": {"tf": 1}, "sqlglot.tokens.TokenType.LOCAL": {"tf": 1}, "sqlglot.tokens.TokenType.MAP": {"tf": 1}, "sqlglot.tokens.TokenType.MATCH_RECOGNIZE": {"tf": 1}, "sqlglot.tokens.TokenType.MATERIALIZED": {"tf": 1}, "sqlglot.tokens.TokenType.MERGE": {"tf": 1}, "sqlglot.tokens.TokenType.MOD": {"tf": 1}, "sqlglot.tokens.TokenType.NATURAL": {"tf": 1}, "sqlglot.tokens.TokenType.NEXT": {"tf": 1}, "sqlglot.tokens.TokenType.NO_ACTION": {"tf": 1}, "sqlglot.tokens.TokenType.NOTNULL": {"tf": 1}, "sqlglot.tokens.TokenType.NULL": {"tf": 1}, "sqlglot.tokens.TokenType.NULLS_FIRST": {"tf": 1}, "sqlglot.tokens.TokenType.NULLS_LAST": {"tf": 1}, "sqlglot.tokens.TokenType.OFFSET": {"tf": 1}, "sqlglot.tokens.TokenType.ON": {"tf": 1}, "sqlglot.tokens.TokenType.ONLY": {"tf": 1}, "sqlglot.tokens.TokenType.OPTIONS": {"tf": 1}, "sqlglot.tokens.TokenType.ORDER_BY": {"tf": 1}, "sqlglot.tokens.TokenType.ORDERED": {"tf": 1}, "sqlglot.tokens.TokenType.ORDINALITY": {"tf": 1}, "sqlglot.tokens.TokenType.OUTER": {"tf": 1}, "sqlglot.tokens.TokenType.OUT_OF": {"tf": 1}, "sqlglot.tokens.TokenType.OVER": {"tf": 1}, "sqlglot.tokens.TokenType.OVERWRITE": {"tf": 1}, "sqlglot.tokens.TokenType.PARTITION": {"tf": 1}, "sqlglot.tokens.TokenType.PARTITION_BY": {"tf": 1}, "sqlglot.tokens.TokenType.PERCENT": {"tf": 1}, "sqlglot.tokens.TokenType.PIVOT": {"tf": 1}, "sqlglot.tokens.TokenType.PLACEHOLDER": {"tf": 1}, "sqlglot.tokens.TokenType.PRECEDING": {"tf": 1}, "sqlglot.tokens.TokenType.PRIMARY_KEY": {"tf": 1}, "sqlglot.tokens.TokenType.PROCEDURE": {"tf": 1}, "sqlglot.tokens.TokenType.PROPERTIES": {"tf": 1}, "sqlglot.tokens.TokenType.PSEUDO_TYPE": {"tf": 1}, "sqlglot.tokens.TokenType.QUALIFY": {"tf": 1}, "sqlglot.tokens.TokenType.QUOTE": {"tf": 1}, "sqlglot.tokens.TokenType.RANGE": {"tf": 1}, "sqlglot.tokens.TokenType.RECURSIVE": {"tf": 1}, "sqlglot.tokens.TokenType.REPLACE": {"tf": 1}, "sqlglot.tokens.TokenType.RESPECT_NULLS": {"tf": 1}, "sqlglot.tokens.TokenType.REFERENCES": {"tf": 1}, "sqlglot.tokens.TokenType.RIGHT": {"tf": 1}, "sqlglot.tokens.TokenType.RLIKE": {"tf": 1}, "sqlglot.tokens.TokenType.ROLLBACK": {"tf": 1}, "sqlglot.tokens.TokenType.ROLLUP": {"tf": 1}, "sqlglot.tokens.TokenType.ROW": {"tf": 1}, "sqlglot.tokens.TokenType.ROWS": {"tf": 1}, "sqlglot.tokens.TokenType.SCHEMA_COMMENT": {"tf": 1}, "sqlglot.tokens.TokenType.SEED": {"tf": 1}, "sqlglot.tokens.TokenType.SELECT": {"tf": 1}, "sqlglot.tokens.TokenType.SEMI": {"tf": 1}, "sqlglot.tokens.TokenType.SEPARATOR": {"tf": 1}, "sqlglot.tokens.TokenType.SERDE_PROPERTIES": {"tf": 1}, "sqlglot.tokens.TokenType.SET": {"tf": 1}, "sqlglot.tokens.TokenType.SHOW": {"tf": 1}, "sqlglot.tokens.TokenType.SIMILAR_TO": {"tf": 1}, "sqlglot.tokens.TokenType.SOME": {"tf": 1}, "sqlglot.tokens.TokenType.SORTKEY": {"tf": 1}, "sqlglot.tokens.TokenType.SORT_BY": {"tf": 1}, "sqlglot.tokens.TokenType.STRUCT": {"tf": 1}, "sqlglot.tokens.TokenType.TABLE_SAMPLE": {"tf": 1}, "sqlglot.tokens.TokenType.TEMPORARY": {"tf": 1}, "sqlglot.tokens.TokenType.TOP": {"tf": 1}, "sqlglot.tokens.TokenType.THEN": {"tf": 1}, "sqlglot.tokens.TokenType.TRAILING": {"tf": 1}, "sqlglot.tokens.TokenType.TRUE": {"tf": 1}, "sqlglot.tokens.TokenType.UNBOUNDED": {"tf": 1}, "sqlglot.tokens.TokenType.UNCACHE": {"tf": 1}, "sqlglot.tokens.TokenType.UNION": {"tf": 1}, "sqlglot.tokens.TokenType.UNLOGGED": {"tf": 1}, "sqlglot.tokens.TokenType.UNNEST": {"tf": 1}, "sqlglot.tokens.TokenType.UNPIVOT": {"tf": 1}, "sqlglot.tokens.TokenType.UPDATE": {"tf": 1}, "sqlglot.tokens.TokenType.USE": {"tf": 1}, "sqlglot.tokens.TokenType.USING": {"tf": 1}, "sqlglot.tokens.TokenType.VALUES": {"tf": 1}, "sqlglot.tokens.TokenType.VIEW": {"tf": 1}, "sqlglot.tokens.TokenType.VOLATILE": {"tf": 1}, "sqlglot.tokens.TokenType.WHEN": {"tf": 1}, "sqlglot.tokens.TokenType.WHERE": {"tf": 1}, "sqlglot.tokens.TokenType.WINDOW": {"tf": 1}, "sqlglot.tokens.TokenType.WITH": {"tf": 1}, "sqlglot.tokens.TokenType.WITH_TIME_ZONE": {"tf": 1}, "sqlglot.tokens.TokenType.WITH_LOCAL_TIME_ZONE": {"tf": 1}, "sqlglot.tokens.TokenType.WITHIN_GROUP": {"tf": 1}, "sqlglot.tokens.TokenType.WITHOUT_TIME_ZONE": {"tf": 1}, "sqlglot.tokens.TokenType.UNIQUE": {"tf": 1}}, "df": 286}}}}}}}, "p": {"docs": {"sqlglot.tokens.TokenType.TOP": {"tf": 1}}, "df": 1}}, "s": {"docs": {}, "df": 0, "q": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dialects.dialect.Dialects.TSQL": {"tf": 1}, "sqlglot.dialects.tsql.TSQL": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.__init__": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Tokenizer": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator.systemtime_sql": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator.returnsproperty_sql": {"tf": 1}}, "df": 8}}, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.TsOrDsAdd": {"tf": 1}}, "df": 1}}}, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.TsOrDsToDate": {"tf": 1}}, "df": 1, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.TsOrDsToDateStr": {"tf": 1}}, "df": 1}}}}}}}}}}, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {"sqlglot.expressions.TsOrDiToDi": {"tf": 1}}, "df": 1}}}}}}, "t": {"docs": {"sqlglot.helper.tsort": {"tf": 1}}, "df": 1}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.dialects.dialect.Dialects.TERADATA": {"tf": 1}, "sqlglot.dialects.teradata.Teradata": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.__init__": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.partitionedbyproperty_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.update_sql": {"tf": 1}}, "df": 7}}}}}}, "x": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.Expression.text": {"tf": 1}, "sqlglot.expressions.DataType.Type.TEXT": {"tf": 1}, "sqlglot.generator.Generator.text_width": {"tf": 1}, "sqlglot.optimizer.canonicalize.add_text_to_concat": {"tf": 1}, "sqlglot.tokens.TokenType.TEXT": {"tf": 1}}, "df": 5}}, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.tokens.TokenType.TEMPORARY": {"tf": 1}}, "df": 1}}}}}}}}, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.Dialect.format_time": {"tf": 1}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}, "sqlglot.expressions.DataType.Type.TIME": {"tf": 1}, "sqlglot.generator.Generator.format_time": {"tf": 1}, "sqlglot.time.format_time": {"tf": 1}, "sqlglot.tokens.TokenType.TIME": {"tf": 1}, "sqlglot.tokens.TokenType.AT_TIME_ZONE": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_TIME": {"tf": 1}, "sqlglot.tokens.TokenType.WITH_TIME_ZONE": {"tf": 1}, "sqlglot.tokens.TokenType.WITH_LOCAL_TIME_ZONE": {"tf": 1}, "sqlglot.tokens.TokenType.WITHOUT_TIME_ZONE": {"tf": 1}}, "df": 11, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.timestrtotime_sql": {"tf": 1}, "sqlglot.expressions.TimeStrToTime": {"tf": 1}}, "df": 2}}}}, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.TimeStrToDate": {"tf": 1}}, "df": 1}}}}, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "x": {"docs": {"sqlglot.expressions.TimeStrToUnix": {"tf": 1}}, "df": 1}}}}}}}, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.expressions.DataType.Type.TIMESTAMP": {"tf": 1}, "sqlglot.tokens.TokenType.TIMESTAMP": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_TIMESTAMP": {"tf": 1}}, "df": 3, "t": {"docs": {}, "df": 0, "z": {"docs": {"sqlglot.expressions.DataType.Type.TIMESTAMPTZ": {"tf": 1}, "sqlglot.tokens.TokenType.TIMESTAMPTZ": {"tf": 1}}, "df": 2}, "r": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.expressions.TimestampTrunc": {"tf": 1}}, "df": 1}}}}}, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "z": {"docs": {"sqlglot.expressions.DataType.Type.TIMESTAMPLTZ": {"tf": 1}, "sqlglot.tokens.TokenType.TIMESTAMPLTZ": {"tf": 1}}, "df": 2}}}, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.TimestampAdd": {"tf": 1}}, "df": 1}}}, "s": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "b": {"docs": {"sqlglot.expressions.TimestampSub": {"tf": 1}}, "df": 1}}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.expressions.TimestampDiff": {"tf": 1}}, "df": 1}}}}}}}}, "u": {"docs": {}, "df": 0, "b": {"docs": {"sqlglot.expressions.TimeSub": {"tf": 1}}, "df": 1}}}, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.TimeUnit": {"tf": 1}, "sqlglot.expressions.TimeUnit.__init__": {"tf": 1}}, "df": 2}}}}, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.TimeAdd": {"tf": 1}}, "df": 1}}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.expressions.TimeDiff": {"tf": 1}}, "df": 1}}}}, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.expressions.TimeTrunc": {"tf": 1}}, "df": 1}}}}, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.TimeToStr": {"tf": 1}}, "df": 1}}}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.TimeToTimeStr": {"tf": 1}}, "df": 1}}}}}}}, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "x": {"docs": {"sqlglot.expressions.TimeToUnix": {"tf": 1}}, "df": 1}}}}}}}}, "n": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.DataType.Type.TINYINT": {"tf": 1}, "sqlglot.tokens.TokenType.TINYINT": {"tf": 1}}, "df": 2}}}}}, "l": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.tokens.TokenType.TILDA": {"tf": 1}}, "df": 1}}}}, "u": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor.context.Context.eval_tuple": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.generate_tuple": {"tf": 1}, "sqlglot.expressions.Tuple": {"tf": 1}, "sqlglot.generator.Generator.tuple_sql": {"tf": 1}}, "df": 4}}}}, "h": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.Expression.this": {"tf": 1}}, "df": 1}}, "e": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.tokens.TokenType.THEN": {"tf": 1}}, "df": 1}}}, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type": {"tf": 1}, "sqlglot.expressions.DataType.Type.CHAR": {"tf": 1}, "sqlglot.expressions.DataType.Type.NCHAR": {"tf": 1}, "sqlglot.expressions.DataType.Type.VARCHAR": {"tf": 1}, "sqlglot.expressions.DataType.Type.NVARCHAR": {"tf": 1}, "sqlglot.expressions.DataType.Type.TEXT": {"tf": 1}, "sqlglot.expressions.DataType.Type.MEDIUMTEXT": {"tf": 1}, "sqlglot.expressions.DataType.Type.LONGTEXT": {"tf": 1}, "sqlglot.expressions.DataType.Type.MEDIUMBLOB": {"tf": 1}, "sqlglot.expressions.DataType.Type.LONGBLOB": {"tf": 1}, "sqlglot.expressions.DataType.Type.BINARY": {"tf": 1}, "sqlglot.expressions.DataType.Type.VARBINARY": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT": {"tf": 1}, "sqlglot.expressions.DataType.Type.TINYINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.SMALLINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.BIGINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.FLOAT": {"tf": 1}, "sqlglot.expressions.DataType.Type.DOUBLE": {"tf": 1}, "sqlglot.expressions.DataType.Type.DECIMAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.BOOLEAN": {"tf": 1}, "sqlglot.expressions.DataType.Type.JSON": {"tf": 1}, "sqlglot.expressions.DataType.Type.JSONB": {"tf": 1}, "sqlglot.expressions.DataType.Type.INTERVAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.TIME": {"tf": 1}, "sqlglot.expressions.DataType.Type.TIMESTAMP": {"tf": 1}, "sqlglot.expressions.DataType.Type.TIMESTAMPTZ": {"tf": 1}, "sqlglot.expressions.DataType.Type.TIMESTAMPLTZ": {"tf": 1}, "sqlglot.expressions.DataType.Type.DATE": {"tf": 1}, "sqlglot.expressions.DataType.Type.DATETIME": {"tf": 1}, "sqlglot.expressions.DataType.Type.ARRAY": {"tf": 1}, "sqlglot.expressions.DataType.Type.MAP": {"tf": 1}, "sqlglot.expressions.DataType.Type.UUID": {"tf": 1}, "sqlglot.expressions.DataType.Type.GEOGRAPHY": {"tf": 1}, "sqlglot.expressions.DataType.Type.GEOMETRY": {"tf": 1}, "sqlglot.expressions.DataType.Type.STRUCT": {"tf": 1}, "sqlglot.expressions.DataType.Type.NULLABLE": {"tf": 1}, "sqlglot.expressions.DataType.Type.HLLSKETCH": {"tf": 1}, "sqlglot.expressions.DataType.Type.HSTORE": {"tf": 1}, "sqlglot.expressions.DataType.Type.SUPER": {"tf": 1}, "sqlglot.expressions.DataType.Type.SERIAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.SMALLSERIAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.BIGSERIAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.XML": {"tf": 1}, "sqlglot.expressions.DataType.Type.UNIQUEIDENTIFIER": {"tf": 1}, "sqlglot.expressions.DataType.Type.MONEY": {"tf": 1}, "sqlglot.expressions.DataType.Type.SMALLMONEY": {"tf": 1}, "sqlglot.expressions.DataType.Type.ROWVERSION": {"tf": 1}, "sqlglot.expressions.DataType.Type.IMAGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.VARIANT": {"tf": 1}, "sqlglot.expressions.DataType.Type.OBJECT": {"tf": 1}, "sqlglot.expressions.DataType.Type.NULL": {"tf": 1}, "sqlglot.expressions.DataType.Type.UNKNOWN": {"tf": 1}, "sqlglot.expressions.DataType.is_type": {"tf": 1}, "sqlglot.expressions.Cast.is_type": {"tf": 1}, "sqlglot.optimizer.canonicalize.coerce_type": {"tf": 1}, "sqlglot.schema.Schema.get_column_type": {"tf": 1}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1}, "sqlglot.tokens.TokenType.PSEUDO_TYPE": {"tf": 1}}, "df": 58, "s": {"docs": {"sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.transforms.remove_precision_parameterized_types": {"tf": 1}}, "df": 2}, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.optimizer.annotate_types.TypeAnnotator": {"tf": 1}, "sqlglot.optimizer.annotate_types.TypeAnnotator.__init__": {"tf": 1}, "sqlglot.optimizer.annotate_types.TypeAnnotator.annotate": {"tf": 1}}, "df": 3}}}}}}}}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator.in_unnest_op": {"tf": 1}, "sqlglot.expressions.In": {"tf": 1}, "sqlglot.generator.Generator.in_sql": {"tf": 1}, "sqlglot.generator.Generator.in_unnest_op": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}, "sqlglot.tokens.TokenType.IN": {"tf": 1}, "sqlglot.trie.in_trie": {"tf": 1}}, "df": 7, "i": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe.sql.SparkSession.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.__init__": {"tf": 1}, "sqlglot.dataframe.sql.Column.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.__init__": {"tf": 1}, "sqlglot.dataframe.sql.Window.__init__": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameReader.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.__init__": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.__init__": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.__init__": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.__init__": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.__init__": {"tf": 1}, "sqlglot.dialects.drill.Drill.__init__": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.__init__": {"tf": 1}, "sqlglot.dialects.hive.Hive.__init__": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.__init__": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.__init__": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.__init__": {"tf": 1}, "sqlglot.dialects.presto.Presto.__init__": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.__init__": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.__init__": {"tf": 1}, "sqlglot.dialects.spark.Spark.__init__": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.__init__": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.__init__": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.__init__": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.__init__": {"tf": 1}, "sqlglot.dialects.trino.Trino.__init__": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.__init__": {"tf": 1}, "sqlglot.diff.Insert.__init__": {"tf": 1}, "sqlglot.diff.Remove.__init__": {"tf": 1}, "sqlglot.diff.Move.__init__": {"tf": 1}, "sqlglot.diff.Update.__init__": {"tf": 1}, "sqlglot.diff.Keep.__init__": {"tf": 1}, "sqlglot.diff.ChangeDistiller.__init__": {"tf": 1}, "sqlglot.errors.ParseError.__init__": {"tf": 1}, "sqlglot.executor.context.Context.__init__": {"tf": 1}, "sqlglot.executor.env.reverse_key.__init__": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.__init__": {"tf": 1}, "sqlglot.executor.python.Python.__init__": {"tf": 1}, "sqlglot.executor.table.Table.__init__": {"tf": 1}, "sqlglot.executor.table.TableIter.__init__": {"tf": 1}, "sqlglot.executor.table.RangeReader.__init__": {"tf": 1}, "sqlglot.executor.table.RowReader.__init__": {"tf": 1}, "sqlglot.expressions.Expression.__init__": {"tf": 1}, "sqlglot.expressions.TimeUnit.__init__": {"tf": 1}, "sqlglot.generator.Generator.__init__": {"tf": 1}, "sqlglot.lineage.Node.__init__": {"tf": 1}, "sqlglot.lineage.LineageHTML.__init__": {"tf": 1}, "sqlglot.optimizer.annotate_types.TypeAnnotator.__init__": {"tf": 1}, "sqlglot.optimizer.scope.Scope.__init__": {"tf": 1}, "sqlglot.parser.Parser.__init__": {"tf": 1}, "sqlglot.planner.Plan.__init__": {"tf": 1}, "sqlglot.planner.Step.__init__": {"tf": 1}, "sqlglot.planner.Scan.__init__": {"tf": 1}, "sqlglot.planner.Join.__init__": {"tf": 1}, "sqlglot.planner.Aggregate.__init__": {"tf": 1}, "sqlglot.planner.Sort.__init__": {"tf": 1}, "sqlglot.planner.SetOperation.__init__": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.__init__": {"tf": 1}, "sqlglot.schema.MappingSchema.__init__": {"tf": 1}, "sqlglot.tokens.Token.__init__": {"tf": 1}, "sqlglot.tokens.Tokenizer.__init__": {"tf": 1}}, "df": 63, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.expressions.Initcap": {"tf": 1}}, "df": 1}}}}}, "t": {"docs": {"sqlglot.expressions.Expression.is_int": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT": {"tf": 1}, "sqlglot.tokens.TokenType.INT": {"tf": 1}}, "df": 3, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe.sql.DataFrame.intersect": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.intersect_op": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.intersect_op": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Intersect": {"tf": 1}, "sqlglot.expressions.intersect": {"tf": 1}, "sqlglot.generator.Generator.intersect_sql": {"tf": 1}, "sqlglot.generator.Generator.intersect_op": {"tf": 1}, "sqlglot.tokens.TokenType.INTERSECT": {"tf": 1}}, "df": 9, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dataframe.sql.DataFrame.intersectAll": {"tf": 1}}, "df": 1}}}}}}}, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.executor.env.interval": {"tf": 1}, "sqlglot.expressions.DataType.Type.INTERVAL": {"tf": 1}, "sqlglot.expressions.Interval": {"tf": 1}, "sqlglot.expressions.to_interval": {"tf": 1}, "sqlglot.generator.Generator.interval_sql": {"tf": 1}, "sqlglot.optimizer.simplify.extract_interval": {"tf": 1}, "sqlglot.tokens.TokenType.INTERVAL": {"tf": 1}}, "df": 7}}}}}, "o": {"docs": {"sqlglot.dialects.dialect.Dialect.parse_into": {"tf": 1}, "sqlglot.expressions.Into": {"tf": 1}, "sqlglot.generator.Generator.into_sql": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}, "sqlglot.tokens.TokenType.INTO": {"tf": 1}}, "df": 5}, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Introducer": {"tf": 1}, "sqlglot.generator.Generator.introducer_sql": {"tf": 1}, "sqlglot.tokens.TokenType.INTRODUCER": {"tf": 1}}, "df": 3}}}}}}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {"sqlglot.expressions.IntDiv": {"tf": 1}, "sqlglot.generator.Generator.intdiv_sql": {"tf": 1}}, "df": 2}}}}, "v": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.Column.invoke_anonymous_function": {"tf": 1}, "sqlglot.dataframe.sql.Column.invoke_expression_over_column": {"tf": 1}}, "df": 2}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.Column.inverse_binary_op": {"tf": 1}}, "df": 1}}}}}, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff.Insert": {"tf": 1}, "sqlglot.diff.Insert.__init__": {"tf": 1}, "sqlglot.expressions.Insert": {"tf": 1}, "sqlglot.generator.Generator.insert_sql": {"tf": 1}, "sqlglot.tokens.TokenType.INSERT": {"tf": 1}}, "df": 5, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {"sqlglot.dataframe.sql.DataFrameWriter.insertInto": {"tf": 1}}, "df": 1}}}}}}}}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.inline_array_sql": {"tf": 1}}, "df": 1}}}}, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "x": {"docs": {"sqlglot.executor.context.Context.set_index": {"tf": 1}, "sqlglot.expressions.Index": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_INDEX": {"tf": 1}, "sqlglot.generator.Generator.index_sql": {"tf": 1}, "sqlglot.helper.apply_index_offset": {"tf": 1}, "sqlglot.tokens.TokenType.INDEX": {"tf": 1}}, "df": 6}, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.generator.Generator.indent": {"tf": 1}}, "df": 1}}}}, "c": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.AUTO_INCREMENT": {"tf": 1}}, "df": 1}}}}}}}, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.tokens.TokenType.INNER": {"tf": 1}}, "df": 1}}}}, "s": {"docs": {"sqlglot.expressions.Expression.is_string": {"tf": 1}, "sqlglot.expressions.Expression.is_number": {"tf": 1}, "sqlglot.expressions.Expression.is_int": {"tf": 1}, "sqlglot.expressions.Expression.assert_is": {"tf": 1}, "sqlglot.expressions.DataType.is_type": {"tf": 1}, "sqlglot.expressions.Is": {"tf": 1}, "sqlglot.expressions.Cast.is_type": {"tf": 1}, "sqlglot.generator.Generator.is_sql": {"tf": 1}, "sqlglot.helper.is_iterable": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_subquery": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_derived_table": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_union": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_cte": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_root": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_udtf": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_correlated_subquery": {"tf": 1}, "sqlglot.optimizer.simplify.is_complement": {"tf": 1}, "sqlglot.tokens.TokenType.IS": {"tf": 1}}, "df": 18, "n": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dataframe.sql.Column.isNull": {"tf": 1}, "sqlglot.tokens.TokenType.ISNULL": {"tf": 1}}, "df": 2}}}, "o": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dataframe.sql.Column.isNotNull": {"tf": 1}}, "df": 1}}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe.sql.Column.isin": {"tf": 1}}, "df": 1}}, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.isolate_table_selects.isolate_table_selects": {"tf": 1}}, "df": 1, "d": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.IsolatedLoadingProperty": {"tf": 1}, "sqlglot.generator.Generator.isolatedloadingproperty_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}}}}}}}}}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.Column.ilike": {"tf": 1}, "sqlglot.dialects.dialect.no_ilike_sql": {"tf": 1}, "sqlglot.expressions.ILike": {"tf": 1}, "sqlglot.generator.Generator.ilike_sql": {"tf": 1}, "sqlglot.tokens.TokenType.ILIKE": {"tf": 1}}, "df": 5}}}}, "f": {"docs": {"sqlglot.dialects.dialect.if_sql": {"tf": 1}, "sqlglot.dialects.drill.if_sql": {"tf": 1}, "sqlglot.executor.env.null_if_any": {"tf": 1}, "sqlglot.expressions.If": {"tf": 1}, "sqlglot.generator.Generator.if_sql": {"tf": 1}, "sqlglot.tokens.TokenType.IF": {"tf": 1}}, "df": 6, "n": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.IfNull": {"tf": 1}}, "df": 1}}}}}, "g": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.errors.ErrorLevel.IGNORE": {"tf": 1}, "sqlglot.tokens.TokenType.IGNORE_NULLS": {"tf": 1}}, "df": 2, "n": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.IgnoreNulls": {"tf": 1}, "sqlglot.generator.Generator.ignorenulls_sql": {"tf": 1}}, "df": 2}}}}}}}}}}, "m": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.errors.ErrorLevel.IMMEDIATE": {"tf": 1}}, "df": 1}}}}}}}, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.IMAGE": {"tf": 1}, "sqlglot.tokens.TokenType.IMAGE": {"tf": 1}}, "df": 2}}}}, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.executor.context.Context.table_iter": {"tf": 1}}, "df": 1, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.helper.is_iterable": {"tf": 1}}, "df": 1}}}}}}}, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Identifier": {"tf": 1}, "sqlglot.expressions.Identifier.output_name": {"tf": 1}, "sqlglot.expressions.to_identifier": {"tf": 1}, "sqlglot.generator.Generator.identifier_sql": {"tf": 1}, "sqlglot.tokens.TokenType.IDENTIFIER": {"tf": 1}, "sqlglot.tokens.Token.identifier": {"tf": 1}}, "df": 6}}}, "y": {"docs": {"sqlglot.generator.Generator.no_identify": {"tf": 1}}, "df": 1}}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.optimizer.lower_identities.lower_identities": {"tf": 1}}, "df": 1}}}, "y": {"docs": {"sqlglot.tokens.TokenType.IDENTITY": {"tf": 1}}, "df": 1}}}}}}}, "r": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.IRLIKE": {"tf": 1}}, "df": 1}}}}}}, "c": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}, "sqlglot.expressions.Create": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_CREATE": {"tf": 1}, "sqlglot.generator.Generator.create_sql": {"tf": 1}, "sqlglot.tokens.TokenType.CREATE": {"tf": 1}}, "df": 5, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.SparkSession.createDataFrame": {"tf": 1}}, "df": 1}}}}}}}}}}}}}, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.tokens.TokenType.CROSS": {"tf": 1}}, "df": 1}}}}, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dataframe.sql.DataFrame.copy": {"tf": 1}, "sqlglot.dataframe.sql.Column.copy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.copy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.copy": {"tf": 1}, "sqlglot.expressions.Expression.copy": {"tf": 1}, "sqlglot.schema.MappingSchema.copy": {"tf": 1}}, "df": 6}}, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.DataFrame.coalesce": {"tf": 1}, "sqlglot.expressions.Coalesce": {"tf": 1}}, "df": 2}}}}}}, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe.sql.GroupedData.count": {"tf": 1}, "sqlglot.dialects.dialect.approx_count_distinct_sql": {"tf": 1}, "sqlglot.expressions.Count": {"tf": 1}, "sqlglot.helper.count_params": {"tf": 1}, "sqlglot.optimizer.scope.Scope.ref_count": {"tf": 1}}, "df": 5}}}, "l": {"docs": {"sqlglot.dataframe.sql.Column.ensure_col": {"tf": 1}}, "df": 1, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe.sql.Column": {"tf": 1}, "sqlglot.dataframe.sql.Column.__init__": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_col": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_cols": {"tf": 1}, "sqlglot.dataframe.sql.Column.invoke_anonymous_function": {"tf": 1}, "sqlglot.dataframe.sql.Column.invoke_expression_over_column": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.inverse_binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.unary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_literal": {"tf": 1}, "sqlglot.dataframe.sql.Column.copy": {"tf": 1}, "sqlglot.dataframe.sql.Column.set_table_name": {"tf": 1}, "sqlglot.dataframe.sql.Column.sql": {"tf": 1}, "sqlglot.dataframe.sql.Column.alias": {"tf": 1}, "sqlglot.dataframe.sql.Column.asc": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc": {"tf": 1}, "sqlglot.dataframe.sql.Column.asc_nulls_first": {"tf": 1}, "sqlglot.dataframe.sql.Column.asc_nulls_last": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc_nulls_first": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc_nulls_last": {"tf": 1}, "sqlglot.dataframe.sql.Column.when": {"tf": 1}, "sqlglot.dataframe.sql.Column.otherwise": {"tf": 1}, "sqlglot.dataframe.sql.Column.isNull": {"tf": 1}, "sqlglot.dataframe.sql.Column.isNotNull": {"tf": 1}, "sqlglot.dataframe.sql.Column.cast": {"tf": 1}, "sqlglot.dataframe.sql.Column.startswith": {"tf": 1}, "sqlglot.dataframe.sql.Column.endswith": {"tf": 1}, "sqlglot.dataframe.sql.Column.rlike": {"tf": 1}, "sqlglot.dataframe.sql.Column.like": {"tf": 1}, "sqlglot.dataframe.sql.Column.ilike": {"tf": 1}, "sqlglot.dataframe.sql.Column.substr": {"tf": 1}, "sqlglot.dataframe.sql.Column.isin": {"tf": 1}, "sqlglot.dataframe.sql.Column.between": {"tf": 1}, "sqlglot.dataframe.sql.Column.over": {"tf": 1}, "sqlglot.expressions.Column": {"tf": 1}, "sqlglot.expressions.Column.output_name": {"tf": 1}, "sqlglot.expressions.to_column": {"tf": 1}, "sqlglot.expressions.column": {"tf": 1}, "sqlglot.expressions.column_table_names": {"tf": 1}, "sqlglot.generator.Generator.column_sql": {"tf": 1}, "sqlglot.schema.Schema.column_names": {"tf": 1}, "sqlglot.schema.Schema.get_column_type": {"tf": 1}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1}, "sqlglot.schema.ensure_column_mapping": {"tf": 1}, "sqlglot.tokens.TokenType.COLUMN": {"tf": 1}, "sqlglot.tokens.TokenType.COLUMN_DEF": {"tf": 1}}, "df": 47, "s": {"docs": {"sqlglot.executor.context.Context.add_columns": {"tf": 1}, "sqlglot.executor.table.Table.add_columns": {"tf": 1}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1}, "sqlglot.optimizer.qualify_columns.validate_qualify_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.external_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.unqualified_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.source_columns": {"tf": 1}}, "df": 8}, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.expressions.ColumnDef": {"tf": 1}, "sqlglot.generator.Generator.columndef_sql": {"tf": 1}}, "df": 2}}}, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.ColumnConstraint": {"tf": 1}, "sqlglot.generator.Generator.columnconstraint_sql": {"tf": 1}}, "df": 2, "k": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.ColumnConstraintKind": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}, "s": {"docs": {"sqlglot.dataframe.sql.Column.ensure_cols": {"tf": 1}}, "df": 1}, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Collate": {"tf": 1}, "sqlglot.generator.Generator.collate_sql": {"tf": 1}, "sqlglot.tokens.TokenType.COLLATE": {"tf": 1}}, "df": 3, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.CollateColumnConstraint": {"tf": 1}, "sqlglot.generator.Generator.collatecolumnconstraint_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}}}, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.CollateProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.helper.ensure_collection": {"tf": 1}}, "df": 1}}}}}}}, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.tokens.TokenType.COLON": {"tf": 1}}, "df": 1}}}, "m": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator.commit_sql": {"tf": 1}, "sqlglot.expressions.Commit": {"tf": 1}, "sqlglot.generator.Generator.commit_sql": {"tf": 1}, "sqlglot.tokens.TokenType.COMMIT": {"tf": 1}}, "df": 4}}, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.generator.Generator.pad_comment": {"tf": 1}, "sqlglot.generator.Generator.maybe_comment": {"tf": 1}, "sqlglot.tokens.TokenType.COMMENT": {"tf": 1}, "sqlglot.tokens.TokenType.SCHEMA_COMMENT": {"tf": 1}}, "df": 4, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.CommentColumnConstraint": {"tf": 1}, "sqlglot.generator.Generator.commentcolumnconstraint_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}}}}}}, "a": {"docs": {"sqlglot.tokens.TokenType.COMMA": {"tf": 1}}, "df": 1, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.Command": {"tf": 1}, "sqlglot.generator.Generator.command_sql": {"tf": 1}, "sqlglot.tokens.TokenType.COMMAND": {"tf": 1}}, "df": 3}}}}, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.optimizer.simplify.remove_compliments": {"tf": 1}}, "df": 1}}}}}}, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.optimizer.simplify.is_complement": {"tf": 1}}, "df": 1}}}}}}, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.tokens.TokenType.COMPOUND": {"tf": 1}}, "df": 1}}}}}}, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.errors.concat_messages": {"tf": 1}, "sqlglot.expressions.Concat": {"tf": 1}, "sqlglot.generator.Generator.concat_sql": {"tf": 1}, "sqlglot.optimizer.canonicalize.add_text_to_concat": {"tf": 1}}, "df": 4, "w": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.ConcatWs": {"tf": 1}}, "df": 1}}}}}, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.executor.context.Context": {"tf": 1}, "sqlglot.executor.context.Context.__init__": {"tf": 1}, "sqlglot.executor.context.Context.eval": {"tf": 1}, "sqlglot.executor.context.Context.eval_tuple": {"tf": 1}, "sqlglot.executor.context.Context.add_columns": {"tf": 1}, "sqlglot.executor.context.Context.table_iter": {"tf": 1}, "sqlglot.executor.context.Context.filter": {"tf": 1}, "sqlglot.executor.context.Context.sort": {"tf": 1}, "sqlglot.executor.context.Context.set_row": {"tf": 1}, "sqlglot.executor.context.Context.set_index": {"tf": 1}, "sqlglot.executor.context.Context.set_range": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.context": {"tf": 1}}, "df": 12}}}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.Condition": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Condition.not_": {"tf": 1}, "sqlglot.expressions.condition": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.join_condition": {"tf": 1}}, "df": 6}}}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.Constraint": {"tf": 1}, "sqlglot.generator.Generator.constraint_sql": {"tf": 1}, "sqlglot.tokens.TokenType.CONSTRAINT": {"tf": 1}}, "df": 3}}}}}}}, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Connector": {"tf": 1}, "sqlglot.generator.Generator.connector_sql": {"tf": 1}}, "df": 2, "s": {"docs": {"sqlglot.optimizer.simplify.simplify_connectors": {"tf": 1}}, "df": 1}}}}}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.convert": {"tf": 1}}, "df": 1}}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.canonicalize.coerce_type": {"tf": 1}}, "df": 1}}}}, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.optimizer.scope.Scope.is_correlated_subquery": {"tf": 1}}, "df": 1}}}}}}}}}, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.DataFrame.cache": {"tf": 1}, "sqlglot.expressions.Cache": {"tf": 1}, "sqlglot.generator.Generator.cache_sql": {"tf": 1}, "sqlglot.optimizer.scope.Scope.clear_cache": {"tf": 1}, "sqlglot.tokens.TokenType.CACHE": {"tf": 1}}, "df": 5}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe.sql.Column.cast": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator.cast_sql": {"tf": 1}, "sqlglot.executor.env.cast": {"tf": 1}, "sqlglot.expressions.Cast": {"tf": 1}, "sqlglot.expressions.Cast.output_name": {"tf": 1}, "sqlglot.expressions.Cast.is_type": {"tf": 1}, "sqlglot.expressions.cast": {"tf": 1}, "sqlglot.generator.Generator.cast_sql": {"tf": 1}}, "df": 8, "s": {"docs": {"sqlglot.optimizer.canonicalize.remove_redundant_casts": {"tf": 1}}, "df": 1}}, "e": {"docs": {"sqlglot.expressions.Case": {"tf": 1}, "sqlglot.generator.Generator.case_sql": {"tf": 1}, "sqlglot.helper.camel_to_snake_case": {"tf": 1}, "sqlglot.tokens.TokenType.CASE": {"tf": 1}}, "df": 4}, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.CASCADE": {"tf": 1}}, "df": 1}}}}}, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.helper.camel_to_snake_case": {"tf": 1}}, "df": 1}}}, "n": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.canonicalize.canonicalize": {"tf": 1}}, "df": 1}}}}}}}}}}, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.CARET": {"tf": 1}}, "df": 1}}}}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.clickhouse.ClickHouse": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.__init__": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Tokenizer": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.cte_sql": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.CLICKHOUSE": {"tf": 1}}, "df": 7}}}}}}}}, "u": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Cluster": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.generator.Generator.cluster_sql": {"tf": 1}, "sqlglot.tokens.TokenType.CLUSTER_BY": {"tf": 1}}, "df": 4}}}}}, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.optimizer.scope.Scope.clear_cache": {"tf": 1}}, "df": 1}}}}, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.clickhouse.ClickHouse.Generator.cte_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_recursive_cte_sql": {"tf": 1}, "sqlglot.expressions.CTE": {"tf": 1}, "sqlglot.generator.Generator.cte_sql": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.CTE": {"tf": 1}, "sqlglot.optimizer.scope.Scope.cte_sources": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_cte": {"tf": 1}}, "df": 7, "s": {"docs": {"sqlglot.generator.Generator.prepend_ctes": {"tf": 1}, "sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_ctes": {"tf": 1}, "sqlglot.optimizer.scope.Scope.ctes": {"tf": 1}}, "df": 4}}, "a": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.Select.ctas": {"tf": 1}}, "df": 1}}}, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.dialect.no_paren_current_date_sql": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_DATE": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_DATETIME": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_ROW": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_TIME": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_TIMESTAMP": {"tf": 1}}, "df": 6, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.CurrentDate": {"tf": 1}, "sqlglot.generator.Generator.currentdate_sql": {"tf": 1}}, "df": 2, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.CurrentDatetime": {"tf": 1}}, "df": 1}}}}}}}}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.CurrentTime": {"tf": 1}}, "df": 1, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.expressions.CurrentTimestamp": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}, "b": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.CUBE": {"tf": 1}}, "df": 1}}}, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.diff.ChangeDistiller": {"tf": 1}, "sqlglot.diff.ChangeDistiller.__init__": {"tf": 1}, "sqlglot.diff.ChangeDistiller.diff": {"tf": 1}}, "df": 3}}}}}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.helper.while_changing": {"tf": 1}}, "df": 1}}}}}, "r": {"docs": {"sqlglot.expressions.DataType.Type.CHAR": {"tf": 1}, "sqlglot.tokens.TokenType.CHAR": {"tf": 1}}, "df": 2, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.tokens.TokenType.CHARACTER_SET": {"tf": 1}}, "df": 1, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.CharacterSet": {"tf": 1}, "sqlglot.generator.Generator.characterset_sql": {"tf": 1}}, "df": 2, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.CharacterSetProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.expressions.Check": {"tf": 1}, "sqlglot.generator.Generator.check_sql": {"tf": 1}, "sqlglot.parser.Parser.check_errors": {"tf": 1}, "sqlglot.tokens.TokenType.CHECK": {"tf": 1}}, "df": 4, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.CheckColumnConstraint": {"tf": 1}, "sqlglot.generator.Generator.checkcolumnconstraint_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}}}, "s": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.ChecksumProperty": {"tf": 1}, "sqlglot.generator.Generator.checksumproperty_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.replace_children": {"tf": 1}}, "df": 1}}}}}}}, "s": {"docs": {}, "df": 0, "v": {"docs": {"sqlglot.executor.python.PythonExecutor.scan_csv": {"tf": 1}, "sqlglot.helper.csv": {"tf": 1}, "sqlglot.helper.csv_reader": {"tf": 1}}, "df": 3}}, "e": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.Ceil": {"tf": 1}}, "df": 1}}}, "n": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.optimizer.pushdown_predicates.pushdown_cnf": {"tf": 1}}, "df": 1}}}, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.tokens.TokenType.LOAD_DATA": {"tf": 1}}, "df": 1, "f": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.DataFrame": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sql": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.copy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.select": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.alias": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.where": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.filter": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.groupBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.agg": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.join": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.union": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.unionAll": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.unionByName": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.intersect": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.intersectAll": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.exceptAll": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.distinct": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.dropDuplicates": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.dropna": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.replace": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.withColumn": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.withColumnRenamed": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.drop": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.limit": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.hint": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.repartition": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.coalesce": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.cache": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.persist": {"tf": 1}}, "df": 33, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dataframe.sql.DataFrameNaFunctions": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.drop": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.fill": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.replace": {"tf": 1}}, "df": 5}}}}}}}}}}}, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dataframe.sql.DataFrameReader": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameReader.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameReader.table": {"tf": 1}}, "df": 3}}}}}}, "w": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dataframe.sql.DataFrameWriter": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.copy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.sql": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.mode": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.insertInto": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.saveAsTable": {"tf": 1}}, "df": 7}}}}}}}}}}}, "b": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dialects.databricks.Databricks": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.__init__": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.DATABRICKS": {"tf": 1}}, "df": 5}}}}}, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.DataBlocksizeProperty": {"tf": 1}, "sqlglot.generator.Generator.datablocksizeproperty_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}}}}, "t": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.hive.Hive.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}, "sqlglot.expressions.DataType": {"tf": 1}, "sqlglot.expressions.DataType.Type": {"tf": 1}, "sqlglot.expressions.DataType.Type.CHAR": {"tf": 1}, "sqlglot.expressions.DataType.Type.NCHAR": {"tf": 1}, "sqlglot.expressions.DataType.Type.VARCHAR": {"tf": 1}, "sqlglot.expressions.DataType.Type.NVARCHAR": {"tf": 1}, "sqlglot.expressions.DataType.Type.TEXT": {"tf": 1}, "sqlglot.expressions.DataType.Type.MEDIUMTEXT": {"tf": 1}, "sqlglot.expressions.DataType.Type.LONGTEXT": {"tf": 1}, "sqlglot.expressions.DataType.Type.MEDIUMBLOB": {"tf": 1}, "sqlglot.expressions.DataType.Type.LONGBLOB": {"tf": 1}, "sqlglot.expressions.DataType.Type.BINARY": {"tf": 1}, "sqlglot.expressions.DataType.Type.VARBINARY": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT": {"tf": 1}, "sqlglot.expressions.DataType.Type.TINYINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.SMALLINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.BIGINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.FLOAT": {"tf": 1}, "sqlglot.expressions.DataType.Type.DOUBLE": {"tf": 1}, "sqlglot.expressions.DataType.Type.DECIMAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.BOOLEAN": {"tf": 1}, "sqlglot.expressions.DataType.Type.JSON": {"tf": 1}, "sqlglot.expressions.DataType.Type.JSONB": {"tf": 1}, "sqlglot.expressions.DataType.Type.INTERVAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.TIME": {"tf": 1}, "sqlglot.expressions.DataType.Type.TIMESTAMP": {"tf": 1}, "sqlglot.expressions.DataType.Type.TIMESTAMPTZ": {"tf": 1}, "sqlglot.expressions.DataType.Type.TIMESTAMPLTZ": {"tf": 1}, "sqlglot.expressions.DataType.Type.DATE": {"tf": 1}, "sqlglot.expressions.DataType.Type.DATETIME": {"tf": 1}, "sqlglot.expressions.DataType.Type.ARRAY": {"tf": 1}, "sqlglot.expressions.DataType.Type.MAP": {"tf": 1}, "sqlglot.expressions.DataType.Type.UUID": {"tf": 1}, "sqlglot.expressions.DataType.Type.GEOGRAPHY": {"tf": 1}, "sqlglot.expressions.DataType.Type.GEOMETRY": {"tf": 1}, "sqlglot.expressions.DataType.Type.STRUCT": {"tf": 1}, "sqlglot.expressions.DataType.Type.NULLABLE": {"tf": 1}, "sqlglot.expressions.DataType.Type.HLLSKETCH": {"tf": 1}, "sqlglot.expressions.DataType.Type.HSTORE": {"tf": 1}, "sqlglot.expressions.DataType.Type.SUPER": {"tf": 1}, "sqlglot.expressions.DataType.Type.SERIAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.SMALLSERIAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.BIGSERIAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.XML": {"tf": 1}, "sqlglot.expressions.DataType.Type.UNIQUEIDENTIFIER": {"tf": 1}, "sqlglot.expressions.DataType.Type.MONEY": {"tf": 1}, "sqlglot.expressions.DataType.Type.SMALLMONEY": {"tf": 1}, "sqlglot.expressions.DataType.Type.ROWVERSION": {"tf": 1}, "sqlglot.expressions.DataType.Type.IMAGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.VARIANT": {"tf": 1}, "sqlglot.expressions.DataType.Type.OBJECT": {"tf": 1}, "sqlglot.expressions.DataType.Type.NULL": {"tf": 1}, "sqlglot.expressions.DataType.Type.UNKNOWN": {"tf": 1}, "sqlglot.expressions.DataType.build": {"tf": 1}, "sqlglot.expressions.DataType.is_type": {"tf": 1}, "sqlglot.generator.Generator.datatype_sql": {"tf": 1}}, "df": 58}}}}}, "e": {"docs": {"sqlglot.dialects.dialect.no_paren_current_date_sql": {"tf": 1}, "sqlglot.dialects.dialect.parse_date_delta": {"tf": 1}, "sqlglot.dialects.tsql.generate_date_delta_with_unit_sql": {"tf": 1}, "sqlglot.expressions.DataType.Type.DATE": {"tf": 1}, "sqlglot.optimizer.simplify.extract_date": {"tf": 1}, "sqlglot.optimizer.simplify.date_literal": {"tf": 1}, "sqlglot.tokens.TokenType.DATE": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_DATE": {"tf": 1}}, "df": 8, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.datestrtodate_sql": {"tf": 1}, "sqlglot.expressions.DateStrToDate": {"tf": 1}}, "df": 2}}}}}}}}, "u": {"docs": {}, "df": 0, "b": {"docs": {"sqlglot.expressions.DateSub": {"tf": 1}}, "df": 1}}}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.DATETIME": {"tf": 1}, "sqlglot.tokens.TokenType.DATETIME": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_DATETIME": {"tf": 1}}, "df": 3, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.DatetimeAdd": {"tf": 1}}, "df": 1}}}, "s": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "b": {"docs": {"sqlglot.expressions.DatetimeSub": {"tf": 1}}, "df": 1}}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.expressions.DatetimeDiff": {"tf": 1}}, "df": 1}}}}, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.expressions.DatetimeTrunc": {"tf": 1}}, "df": 1}}}}}}}}, "r": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.expressions.DateTrunc": {"tf": 1}}, "df": 1}}}}, "o": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.DateToDateStr": {"tf": 1}}, "df": 1}}}}}}, "i": {"docs": {"sqlglot.expressions.DateToDi": {"tf": 1}}, "df": 1}}}}, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.DateAdd": {"tf": 1}}, "df": 1}}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.expressions.DateDiff": {"tf": 1}}, "df": 1}}}}, "f": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.DateFromParts": {"tf": 1}}, "df": 1}}}}}}}}}}}, "y": {"docs": {"sqlglot.expressions.Day": {"tf": 1}}, "df": 1, "o": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.expressions.DayOfWeek": {"tf": 1}}, "df": 1}}}}, "m": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.expressions.DayOfMonth": {"tf": 1}}, "df": 1}}}}}, "y": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.DayOfYear": {"tf": 1}}, "df": 1}}}}}}}, "s": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.tokens.TokenType.DASH": {"tf": 1}}, "df": 1}}, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot.tokens.TokenType.DARROW": {"tf": 1}}, "df": 1}}}}}, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe.sql.DataFrame.distinct": {"tf": 1}, "sqlglot.dialects.dialect.approx_count_distinct_sql": {"tf": 1}, "sqlglot.expressions.Select.distinct": {"tf": 1}, "sqlglot.expressions.Distinct": {"tf": 1}, "sqlglot.generator.Generator.distinct_sql": {"tf": 1}, "sqlglot.tokens.TokenType.DISTINCT": {"tf": 1}, "sqlglot.tokens.TokenType.DISTINCT_FROM": {"tf": 1}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1}}, "df": 8}}}}, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Distribute": {"tf": 1}, "sqlglot.generator.Generator.distribute_sql": {"tf": 1}, "sqlglot.tokens.TokenType.DISTRIBUTE_BY": {"tf": 1}}, "df": 3}, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.normalize.distributive_law": {"tf": 1}}, "df": 1}}}}}}}}, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.DistKeyProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.DistStyleProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Distance": {"tf": 1}, "sqlglot.generator.Generator.distance_sql": {"tf": 1}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}}, "df": 3}}}}}}, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.dialect.Dialects.DIALECT": {"tf": 1}, "sqlglot.dialects.dialect.Dialect": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.__init__": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.get_or_raise": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.format_time": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.parse": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.parse_into": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.generate": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.transpile": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.parser": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.generator": {"tf": 1}}, "df": 11, "s": {"docs": {"sqlglot.dialects.dialect.Dialects": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.DIALECT": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.BIGQUERY": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.CLICKHOUSE": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.DUCKDB": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.HIVE": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.MYSQL": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.ORACLE": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.POSTGRES": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.PRESTO": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.REDSHIFT": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.SNOWFLAKE": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.SPARK": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.SQLITE": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.STARROCKS": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.TABLEAU": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.TRINO": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.TSQL": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.DATABRICKS": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.DRILL": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.TERADATA": {"tf": 1}}, "df": 21}}}}}}, "v": {"docs": {"sqlglot.expressions.Div": {"tf": 1}, "sqlglot.generator.Generator.div_sql": {"tf": 1}, "sqlglot.tokens.TokenType.DIV": {"tf": 1}}, "df": 3, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.no_safe_divide_sql": {"tf": 1}}, "df": 1}}}}, "f": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.diff.diff": {"tf": 1}, "sqlglot.diff.ChangeDistiller.diff": {"tf": 1}}, "df": 2}}, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.Directory": {"tf": 1}, "sqlglot.generator.Generator.directory_sql": {"tf": 1}}, "df": 2}}}}}}}, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.Properties.from_dict": {"tf": 1}, "sqlglot.helper.object_to_dict": {"tf": 1}, "sqlglot.helper.dict_depth": {"tf": 1}}, "df": 3}}, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DiToDate": {"tf": 1}}, "df": 1}}}}}}}, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.dataframe.sql.DataFrame.drop": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.drop": {"tf": 1}, "sqlglot.expressions.Drop": {"tf": 1}, "sqlglot.generator.Generator.drop_sql": {"tf": 1}, "sqlglot.tokens.TokenType.DROP": {"tf": 1}}, "df": 5, "d": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dataframe.sql.DataFrame.dropDuplicates": {"tf": 1}}, "df": 1}}}}}}}}}}, "n": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.dataframe.sql.DataFrame.dropna": {"tf": 1}}, "df": 1}}, "p": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.DropPartition": {"tf": 1}, "sqlglot.generator.Generator.droppartition_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dialects.dialect.Dialects.DRILL": {"tf": 1}, "sqlglot.dialects.drill.Drill": {"tf": 1}, "sqlglot.dialects.drill.Drill.__init__": {"tf": 1}, "sqlglot.dialects.drill.Drill.Tokenizer": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator.normalize_func": {"tf": 1}}, "df": 7}}}}, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.dataframe.sql.Column.desc": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc_nulls_first": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc_nulls_last": {"tf": 1}, "sqlglot.tokens.TokenType.DESC": {"tf": 1}}, "df": 4, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.snowflake.Snowflake.Generator.describe_sql": {"tf": 1}, "sqlglot.expressions.Describe": {"tf": 1}, "sqlglot.generator.Generator.describe_sql": {"tf": 1}, "sqlglot.tokens.TokenType.DESCRIBE": {"tf": 1}}, "df": 4}}}}}}, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.dialects.dialect.parse_date_delta": {"tf": 1}, "sqlglot.dialects.tsql.generate_date_delta_with_unit_sql": {"tf": 1}}, "df": 2}}, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Delete": {"tf": 1}, "sqlglot.expressions.delete": {"tf": 1}, "sqlglot.generator.Generator.delete_sql": {"tf": 1}, "sqlglot.tokens.TokenType.DELETE": {"tf": 1}}, "df": 4}}, "g": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.transforms.delegate": {"tf": 1}}, "df": 1}}}}}}, "p": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.expressions.Expression.depth": {"tf": 1}, "sqlglot.helper.dict_depth": {"tf": 1}}, "df": 2}}, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.planner.Step.add_dependency": {"tf": 1}}, "df": 1}}}}}}}}, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.optimizer.merge_subqueries.merge_derived_tables": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.DERIVED_TABLE": {"tf": 1}, "sqlglot.optimizer.scope.Scope.derived_tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_derived_table": {"tf": 1}}, "df": 4, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DerivedTable": {"tf": 1}}, "df": 1}}}}}}}}}}, "f": {"docs": {"sqlglot.tokens.TokenType.COLUMN_DEF": {"tf": 1}}, "df": 1, "a": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.Func.default_parser_mappings": {"tf": 1}, "sqlglot.optimizer.pushdown_projections.DEFAULT_SELECTION": {"tf": 1}, "sqlglot.tokens.TokenType.BY_DEFAULT": {"tf": 1}, "sqlglot.tokens.TokenType.DEFAULT": {"tf": 1}}, "df": 4, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.DefaultColumnConstraint": {"tf": 1}, "sqlglot.generator.Generator.defaultcolumnconstraint_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.DefinerProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}, "c": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.DataType.Type.DECIMAL": {"tf": 1}, "sqlglot.tokens.TokenType.DECIMAL": {"tf": 1}}, "df": 2}}}}, "o": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Decode": {"tf": 1}}, "df": 1}}, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.unnest_subqueries.decorrelate": {"tf": 1}}, "df": 1}}}}}}}}}}, "u": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "b": {"docs": {"sqlglot.dialects.dialect.Dialects.DUCKDB": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.__init__": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Tokenizer": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}}, "df": 6}}}}, "m": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.expressions.Expression.dump": {"tf": 1}, "sqlglot.serde.dump": {"tf": 1}}, "df": 2}}}, "f": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.Expression.dfs": {"tf": 1}}, "df": 1}}, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.DOUBLE": {"tf": 1}, "sqlglot.tokens.TokenType.DOUBLE": {"tf": 1}}, "df": 2}}}}, "t": {"docs": {"sqlglot.expressions.Dot": {"tf": 1}, "sqlglot.generator.Generator.dot_sql": {"tf": 1}, "sqlglot.tokens.TokenType.DOT": {"tf": 1}}, "df": 3}, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.tokens.TokenType.DOLLAR": {"tf": 1}}, "df": 1}}}}}, "p": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DPipe": {"tf": 1}, "sqlglot.generator.Generator.dpipe_sql": {"tf": 1}, "sqlglot.tokens.TokenType.DPIPE": {"tf": 1}}, "df": 3}}}}, "n": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.optimizer.pushdown_predicates.pushdown_dnf": {"tf": 1}}, "df": 1}}, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.tokens.TokenType.DCOLON": {"tf": 1}}, "df": 1}}}}}, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.tokens.TokenType.DHASH_ARROW": {"tf": 1}}, "df": 1}}}}}, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dataframe.sql.DataFrame.alias": {"tf": 1}, "sqlglot.dataframe.sql.Column.alias": {"tf": 1}, "sqlglot.expressions.Expression.alias": {"tf": 1}, "sqlglot.expressions.Alias": {"tf": 1}, "sqlglot.expressions.Alias.output_name": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.generator.Generator.alias_sql": {"tf": 1}, "sqlglot.tokens.TokenType.ALIAS": {"tf": 1}}, "df": 8, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.Aliases": {"tf": 1}, "sqlglot.generator.Generator.aliases_sql": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.replace_aliases": {"tf": 1}}, "df": 3}}}}}, "l": {"docs": {"sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.expressions.All": {"tf": 1}, "sqlglot.generator.Generator.all_sql": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.tokens.TokenType.ALL": {"tf": 1}}, "df": 5}, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.tokens.TokenType.ALTER": {"tf": 1}}, "df": 1, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.AlterColumn": {"tf": 1}, "sqlglot.generator.Generator.altercolumn_sql": {"tf": 1}}, "df": 2}}}}}}, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.AlterTable": {"tf": 1}, "sqlglot.generator.Generator.altertable_sql": {"tf": 1}}, "df": 2}}}}}}}}, "g": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.AlgorithmProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}, "w": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.optimizer.simplify.always_true": {"tf": 1}, "sqlglot.tokens.TokenType.ALWAYS": {"tf": 1}}, "df": 2}}}}}, "g": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.dataframe.sql.DataFrame.agg": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.agg": {"tf": 1}}, "df": 2, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor.python.PythonExecutor.aggregate": {"tf": 1}, "sqlglot.planner.Aggregate": {"tf": 1}, "sqlglot.planner.Aggregate.__init__": {"tf": 1}}, "df": 3}}}}}}, "f": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.expressions.AggFunc": {"tf": 1}}, "df": 1}}}}}}, "v": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.dataframe.sql.GroupedData.avg": {"tf": 1}, "sqlglot.expressions.Avg": {"tf": 1}}, "df": 2}}, "n": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dataframe.sql.Column.invoke_anonymous_function": {"tf": 1}, "sqlglot.expressions.Anonymous": {"tf": 1}, "sqlglot.generator.Generator.anonymous_sql": {"tf": 1}}, "df": 3}}}}}}}, "y": {"docs": {"sqlglot.executor.env.null_if_any": {"tf": 1}, "sqlglot.expressions.Any": {"tf": 1}, "sqlglot.generator.Generator.any_sql": {"tf": 1}, "sqlglot.tokens.TokenType.ANY": {"tf": 1}}, "df": 4, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.AnyValue": {"tf": 1}}, "df": 1}}}}}}, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Expression.find_ancestor": {"tf": 1}}, "df": 1}}}}}}, "d": {"docs": {"sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.And": {"tf": 1}, "sqlglot.expressions.and_": {"tf": 1}, "sqlglot.generator.Generator.and_sql": {"tf": 1}, "sqlglot.optimizer.simplify.absorb_and_eliminate": {"tf": 1}, "sqlglot.tokens.TokenType.AND": {"tf": 1}}, "df": 6}, "n": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.optimizer.annotate_types.TypeAnnotator.annotate": {"tf": 1}}, "df": 2}}}}}}, "t": {"docs": {}, "df": 0, "i": {"docs": {"sqlglot.tokens.TokenType.ANTI": {"tf": 1}}, "df": 1}}}, "s": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.dataframe.sql.Column.asc": {"tf": 1}, "sqlglot.dataframe.sql.Column.asc_nulls_first": {"tf": 1}, "sqlglot.dataframe.sql.Column.asc_nulls_last": {"tf": 1}, "sqlglot.tokens.TokenType.ASC": {"tf": 1}}, "df": 4}, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.Expression.assert_is": {"tf": 1}}, "df": 1}}}}, "o": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.tokens.TokenType.ASOF": {"tf": 1}}, "df": 1}}}, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator.array_sql": {"tf": 1}, "sqlglot.dialects.dialect.inline_array_sql": {"tf": 1}, "sqlglot.expressions.DataType.Type.ARRAY": {"tf": 1}, "sqlglot.expressions.Array": {"tf": 1}, "sqlglot.tokens.TokenType.ARRAY": {"tf": 1}}, "df": 5, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.expressions.ArrayAgg": {"tf": 1}}, "df": 1}}, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.ArrayAll": {"tf": 1}}, "df": 1}}, "n": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.ArrayAny": {"tf": 1}}, "df": 1}}}, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.ArrayConcat": {"tf": 1}}, "df": 1}}}, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.ArrayContains": {"tf": 1}}, "df": 1}}}}}}}}, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.ArrayFilter": {"tf": 1}}, "df": 1}}}}}}, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.ArraySize": {"tf": 1}}, "df": 1}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.ArraySort": {"tf": 1}}, "df": 1}}}, "u": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot.expressions.ArraySum": {"tf": 1}}, "df": 1}}}, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.expressions.ArrayUnionAgg": {"tf": 1}}, "df": 1}}}}}}}}}}, "o": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot.dialects.dialect.arrow_json_extract_sql": {"tf": 1}, "sqlglot.dialects.dialect.arrow_json_extract_scalar_sql": {"tf": 1}, "sqlglot.tokens.TokenType.ARROW": {"tf": 1}, "sqlglot.tokens.TokenType.HASH_ARROW": {"tf": 1}, "sqlglot.tokens.TokenType.DHASH_ARROW": {"tf": 1}, "sqlglot.tokens.TokenType.LR_ARROW": {"tf": 1}}, "df": 6}}}, "g": {"docs": {"sqlglot.expressions.Func.from_arg_list": {"tf": 1}}, "df": 1, "s": {"docs": {"sqlglot.generator.Generator.format_args": {"tf": 1}, "sqlglot.schema.Schema.supported_table_args": {"tf": 1}}, "df": 2}}}, "p": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "x": {"docs": {"sqlglot.dialects.dialect.approx_count_distinct_sql": {"tf": 1}}, "df": 1, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.ApproxDistinct": {"tf": 1}}, "df": 1}}}}}}}}, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.ApproxQuantile": {"tf": 1}}, "df": 1}}}}}}}}}}}, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.executor.table.Table.append": {"tf": 1}, "sqlglot.expressions.Expression.append": {"tf": 1}}, "df": 2}}}, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.helper.apply_index_offset": {"tf": 1}, "sqlglot.tokens.TokenType.APPLY": {"tf": 1}}, "df": 2}}}}, "d": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.executor.context.Context.add_columns": {"tf": 1}, "sqlglot.executor.table.Table.add_columns": {"tf": 1}, "sqlglot.expressions.Add": {"tf": 1}, "sqlglot.generator.Generator.add_sql": {"tf": 1}, "sqlglot.optimizer.canonicalize.add_text_to_concat": {"tf": 1}, "sqlglot.optimizer.scope.Scope.add_source": {"tf": 1}, "sqlglot.planner.Step.add_dependency": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}}, "df": 9, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.AddConstraint": {"tf": 1}, "sqlglot.generator.Generator.addconstraint_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {"sqlglot.tokens.TokenType.AUTO_INCREMENT": {"tf": 1}}, "df": 1, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.AutoIncrementColumnConstraint": {"tf": 1}, "sqlglot.generator.Generator.autoincrementcolumnconstraint_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}}}, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.AutoIncrementProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.helper.AutoName": {"tf": 1}}, "df": 1}}}}}}}, "f": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "j": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.AfterJournalProperty": {"tf": 1}, "sqlglot.generator.Generator.afterjournalproperty_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}}}}}}, "t": {"docs": {"sqlglot.tokens.TokenType.AT_TIME_ZONE": {"tf": 1}}, "df": 1, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.AtTimeZone": {"tf": 1}, "sqlglot.generator.Generator.attimezone_sql": {"tf": 1}}, "df": 2}}}}}}}}}, "b": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.Abs": {"tf": 1}}, "df": 1, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "b": {"docs": {"sqlglot.optimizer.simplify.absorb_and_eliminate": {"tf": 1}}, "df": 1}}}, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.schema.AbstractMappingSchema": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.__init__": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.table_parts": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.find": {"tf": 1}}, "df": 4}}}}}}}}}}}}}}}}}}}}, "m": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.tokens.TokenType.AMP": {"tf": 1}}, "df": 1}}, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.tokens.TokenType.NO_ACTION": {"tf": 1}}, "df": 1}}}}}}, "w": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.DataFrame.where": {"tf": 1}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Where": {"tf": 1}, "sqlglot.generator.Generator.where_sql": {"tf": 1}, "sqlglot.optimizer.simplify.remove_where_true": {"tf": 1}, "sqlglot.tokens.TokenType.WHERE": {"tf": 1}}, "df": 6}}, "n": {"docs": {"sqlglot.dataframe.sql.Column.when": {"tf": 1}, "sqlglot.expressions.When": {"tf": 1}, "sqlglot.generator.Generator.when_sql": {"tf": 1}, "sqlglot.tokens.TokenType.WHEN": {"tf": 1}}, "df": 4}}, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.helper.while_changing": {"tf": 1}}, "df": 1}}}}, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.with_properties": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.with_properties": {"tf": 1}, "sqlglot.dialects.tsql.generate_date_delta_with_unit_sql": {"tf": 1}, "sqlglot.expressions.With": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_SCHEMA_WITH": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.generator.Generator.with_sql": {"tf": 1}, "sqlglot.generator.Generator.with_properties": {"tf": 1}, "sqlglot.tokens.TokenType.WITH": {"tf": 1}, "sqlglot.tokens.TokenType.WITH_TIME_ZONE": {"tf": 1}, "sqlglot.tokens.TokenType.WITH_LOCAL_TIME_ZONE": {"tf": 1}}, "df": 12, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe.sql.DataFrame.withColumn": {"tf": 1}}, "df": 1, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.dataframe.sql.DataFrame.withColumnRenamed": {"tf": 1}}, "df": 1}}}}}}}}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.tokens.TokenType.WITHIN_GROUP": {"tf": 1}}, "df": 1, "g": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.expressions.WithinGroup": {"tf": 1}, "sqlglot.generator.Generator.withingroup_sql": {"tf": 1}}, "df": 2}}}}}}}, "j": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.WithJournalTableProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}}}, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.WITHOUT_TIME_ZONE": {"tf": 1}}, "df": 1}}}}}, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot.dataframe.sql.Window": {"tf": 1}, "sqlglot.dataframe.sql.Window.__init__": {"tf": 1}, "sqlglot.dataframe.sql.Window.partitionBy": {"tf": 1}, "sqlglot.dataframe.sql.Window.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.Window.rowsBetween": {"tf": 1}, "sqlglot.dataframe.sql.Window.rangeBetween": {"tf": 1}, "sqlglot.expressions.Select.window": {"tf": 1}, "sqlglot.expressions.Window": {"tf": 1}, "sqlglot.generator.Generator.window_sql": {"tf": 1}, "sqlglot.generator.Generator.window_spec_sql": {"tf": 1}, "sqlglot.tokens.TokenType.WINDOW": {"tf": 1}}, "df": 11, "s": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.dataframe.sql.WindowSpec": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.__init__": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.copy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.sql": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.partitionBy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.rowsBetween": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.rangeBetween": {"tf": 1}, "sqlglot.expressions.WindowSpec": {"tf": 1}}, "df": 9}}}}}}}}, "d": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.generator.Generator.text_width": {"tf": 1}}, "df": 1}}}}, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.errors.ErrorLevel.WARN": {"tf": 1}}, "df": 1}}, "l": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.expressions.Expression.walk": {"tf": 1}, "sqlglot.lineage.Node.walk": {"tf": 1}, "sqlglot.optimizer.scope.Scope.walk": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}}, "df": 4}}}, "e": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.expressions.Week": {"tf": 1}}, "df": 1, "o": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.WeekOfYear": {"tf": 1}}, "df": 1}}}}}}}}}, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.generator.Generator.wrap": {"tf": 1}}, "df": 1}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.helper.split_num_words": {"tf": 1}}, "df": 1}}}}}, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dataframe.sql.DataFrame.filter": {"tf": 1}, "sqlglot.executor.context.Context.filter": {"tf": 1}, "sqlglot.executor.env.filter_nulls": {"tf": 1}, "sqlglot.expressions.Filter": {"tf": 1}, "sqlglot.generator.Generator.filter_sql": {"tf": 1}, "sqlglot.tokens.TokenType.FILTER": {"tf": 1}}, "df": 6}}}, "l": {"docs": {"sqlglot.dataframe.sql.DataFrameNaFunctions.fill": {"tf": 1}}, "df": 1, "n": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}}, "df": 1}}}, "e": {"docs": {"sqlglot.helper.open_file": {"tf": 1}}, "df": 1, "f": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.FileFormatProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe.sql.Column.asc_nulls_first": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc_nulls_first": {"tf": 1}, "sqlglot.helper.first": {"tf": 1}, "sqlglot.tokens.TokenType.FIRST": {"tf": 1}, "sqlglot.tokens.TokenType.NULLS_FIRST": {"tf": 1}}, "df": 5}}}, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.Expression.find": {"tf": 1}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.expressions.Expression.find_ancestor": {"tf": 1}, "sqlglot.helper.find_new_name": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.find": {"tf": 1}}, "df": 7}, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.Final": {"tf": 1}, "sqlglot.tokens.TokenType.FINAL": {"tf": 1}}, "df": 2}}}}, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.dialects.dialect.rename_func": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator.normalize_func": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1}, "sqlglot.expressions.Func.from_arg_list": {"tf": 1}, "sqlglot.expressions.Func.sql_names": {"tf": 1}, "sqlglot.expressions.Func.sql_name": {"tf": 1}, "sqlglot.expressions.Func.default_parser_mappings": {"tf": 1}, "sqlglot.expressions.func": {"tf": 1}, "sqlglot.generator.Generator.normalize_func": {"tf": 1}}, "df": 9, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe.sql.Column.invoke_anonymous_function": {"tf": 1}, "sqlglot.generator.Generator.function_fallback_sql": {"tf": 1}, "sqlglot.tokens.TokenType.FUNCTION": {"tf": 1}}, "df": 3}}}}}}, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.tokens.TokenType.FULL": {"tf": 1}}, "df": 1}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.optimizer.pushdown_predicates.nodes_for_predicate": {"tf": 1}, "sqlglot.tokens.TokenType.FOR": {"tf": 1}}, "df": 2, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.dialect.Dialect.format_time": {"tf": 1}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}, "sqlglot.generator.Generator.format_args": {"tf": 1}, "sqlglot.generator.Generator.format_time": {"tf": 1}, "sqlglot.time.format_time": {"tf": 1}, "sqlglot.tokens.TokenType.FORMAT": {"tf": 1}}, "df": 6}}}, "e": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.tokens.TokenType.FOREIGN_KEY": {"tf": 1}}, "df": 1, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.ForeignKey": {"tf": 1}, "sqlglot.generator.Generator.foreignkey_sql": {"tf": 1}}, "df": 2}}}}}}}}, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.tokens.TokenType.FOLLOWING": {"tf": 1}}, "df": 1}}}}}}}}, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.Expression.flatten": {"tf": 1}, "sqlglot.helper.flatten": {"tf": 1}, "sqlglot.optimizer.simplify.flatten": {"tf": 1}, "sqlglot.schema.flatten_schema": {"tf": 1}}, "df": 4}}}}}, "o": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.DataType.Type.FLOAT": {"tf": 1}, "sqlglot.tokens.TokenType.FLOAT": {"tf": 1}}, "df": 2}}, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Floor": {"tf": 1}}, "df": 1}}}}, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot.expressions.From": {"tf": 1}, "sqlglot.expressions.Properties.from_dict": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Func.from_arg_list": {"tf": 1}, "sqlglot.expressions.from_": {"tf": 1}, "sqlglot.generator.Generator.from_sql": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.Join.from_joins": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}, "sqlglot.schema.MappingSchema.from_mapping_schema": {"tf": 1}, "sqlglot.tokens.TokenType.DISTINCT_FROM": {"tf": 1}, "sqlglot.tokens.TokenType.FROM": {"tf": 1}}, "df": 13}}, "e": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.FreespaceProperty": {"tf": 1}, "sqlglot.generator.Generator.freespaceproperty_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}}}, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.expressions.Fetch": {"tf": 1}, "sqlglot.generator.Generator.fetch_sql": {"tf": 1}, "sqlglot.tokens.TokenType.FETCH": {"tf": 1}}, "df": 3}}}}, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.generator.Generator.function_fallback_sql": {"tf": 1}}, "df": 1, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.FallbackProperty": {"tf": 1}, "sqlglot.generator.Generator.fallbackproperty_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.false": {"tf": 1}, "sqlglot.tokens.TokenType.FALSE": {"tf": 1}}, "df": 2}}}, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot.tokens.TokenType.FARROW": {"tf": 1}}, "df": 1}}}}}}, "g": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.expressions.Group": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.generator.Generator.group_sql": {"tf": 1}, "sqlglot.tokens.TokenType.GROUP_BY": {"tf": 1}, "sqlglot.tokens.TokenType.WITHIN_GROUP": {"tf": 1}, "sqlglot.transforms.unalias_group": {"tf": 1}}, "df": 6, "b": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dataframe.sql.DataFrame.groupBy": {"tf": 1}}, "df": 1}}, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.dataframe.sql.GroupedData": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.__init__": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.agg": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.count": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.mean": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.avg": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.max": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.min": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.sum": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.pivot": {"tf": 1}}, "df": 10}}}}}}, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.GroupConcat": {"tf": 1}}, "df": 1}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.tokens.TokenType.GROUPING_SETS": {"tf": 1}}, "df": 1}}}}}}, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.Greatest": {"tf": 1}}, "df": 1}}}}}}}, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.array_sql": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.transaction_sql": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.commit_sql": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.rollback_sql": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.in_unnest_op": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.except_op": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.intersect_op": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.cte_sql": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator.normalize_func": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.with_properties": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator.show_sql": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator.setitem_sql": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator.set_sql": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator.query_modifiers": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator.offset_sql": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator.table_sql": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator.transaction_sql": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.with_properties": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.renametable_sql": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.except_op": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.intersect_op": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.select_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.describe_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.generatedasidentitycolumnconstraint_sql": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator.cast_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.transaction_sql": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.partitionedbyproperty_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.update_sql": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator.systemtime_sql": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator.returnsproperty_sql": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.generator.Generator.__init__": {"tf": 1}, "sqlglot.generator.Generator.generate": {"tf": 1}, "sqlglot.generator.Generator.unsupported": {"tf": 1}, "sqlglot.generator.Generator.sep": {"tf": 1}, "sqlglot.generator.Generator.seg": {"tf": 1}, "sqlglot.generator.Generator.pad_comment": {"tf": 1}, "sqlglot.generator.Generator.maybe_comment": {"tf": 1}, "sqlglot.generator.Generator.wrap": {"tf": 1}, "sqlglot.generator.Generator.no_identify": {"tf": 1}, "sqlglot.generator.Generator.normalize_func": {"tf": 1}, "sqlglot.generator.Generator.indent": {"tf": 1}, "sqlglot.generator.Generator.sql": {"tf": 1}, "sqlglot.generator.Generator.uncache_sql": {"tf": 1}, "sqlglot.generator.Generator.cache_sql": {"tf": 1}, "sqlglot.generator.Generator.characterset_sql": {"tf": 1}, "sqlglot.generator.Generator.column_sql": {"tf": 1}, "sqlglot.generator.Generator.columndef_sql": {"tf": 1}, "sqlglot.generator.Generator.columnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.autoincrementcolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.checkcolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.commentcolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.collatecolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.encodecolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.defaultcolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.generatedasidentitycolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.notnullcolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.primarykeycolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.uniquecolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.create_sql": {"tf": 1}, "sqlglot.generator.Generator.describe_sql": {"tf": 1}, "sqlglot.generator.Generator.prepend_ctes": {"tf": 1}, "sqlglot.generator.Generator.with_sql": {"tf": 1}, "sqlglot.generator.Generator.cte_sql": {"tf": 1}, "sqlglot.generator.Generator.tablealias_sql": {"tf": 1}, "sqlglot.generator.Generator.bitstring_sql": {"tf": 1}, "sqlglot.generator.Generator.hexstring_sql": {"tf": 1}, "sqlglot.generator.Generator.datatype_sql": {"tf": 1}, "sqlglot.generator.Generator.directory_sql": {"tf": 1}, "sqlglot.generator.Generator.delete_sql": {"tf": 1}, "sqlglot.generator.Generator.drop_sql": {"tf": 1}, "sqlglot.generator.Generator.except_sql": {"tf": 1}, "sqlglot.generator.Generator.except_op": {"tf": 1}, "sqlglot.generator.Generator.fetch_sql": {"tf": 1}, "sqlglot.generator.Generator.filter_sql": {"tf": 1}, "sqlglot.generator.Generator.hint_sql": {"tf": 1}, "sqlglot.generator.Generator.index_sql": {"tf": 1}, "sqlglot.generator.Generator.identifier_sql": {"tf": 1}, "sqlglot.generator.Generator.national_sql": {"tf": 1}, "sqlglot.generator.Generator.partition_sql": {"tf": 1}, "sqlglot.generator.Generator.properties_sql": {"tf": 1}, "sqlglot.generator.Generator.root_properties": {"tf": 1}, "sqlglot.generator.Generator.properties": {"tf": 1}, "sqlglot.generator.Generator.with_properties": {"tf": 1}, "sqlglot.generator.Generator.locate_properties": {"tf": 1}, "sqlglot.generator.Generator.property_sql": {"tf": 1}, "sqlglot.generator.Generator.likeproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.fallbackproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.journalproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.freespaceproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.afterjournalproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.checksumproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.mergeblockratioproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.datablocksizeproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.blockcompressionproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.isolatedloadingproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.insert_sql": {"tf": 1}, "sqlglot.generator.Generator.intersect_sql": {"tf": 1}, "sqlglot.generator.Generator.intersect_op": {"tf": 1}, "sqlglot.generator.Generator.introducer_sql": {"tf": 1}, "sqlglot.generator.Generator.pseudotype_sql": {"tf": 1}, "sqlglot.generator.Generator.rowformatdelimitedproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.table_sql": {"tf": 1}, "sqlglot.generator.Generator.tablesample_sql": {"tf": 1}, "sqlglot.generator.Generator.pivot_sql": {"tf": 1}, "sqlglot.generator.Generator.tuple_sql": {"tf": 1}, "sqlglot.generator.Generator.update_sql": {"tf": 1}, "sqlglot.generator.Generator.values_sql": {"tf": 1}, "sqlglot.generator.Generator.var_sql": {"tf": 1}, "sqlglot.generator.Generator.into_sql": {"tf": 1}, "sqlglot.generator.Generator.from_sql": {"tf": 1}, "sqlglot.generator.Generator.group_sql": {"tf": 1}, "sqlglot.generator.Generator.having_sql": {"tf": 1}, "sqlglot.generator.Generator.join_sql": {"tf": 1}, "sqlglot.generator.Generator.lambda_sql": {"tf": 1}, "sqlglot.generator.Generator.lateral_sql": {"tf": 1}, "sqlglot.generator.Generator.limit_sql": {"tf": 1}, "sqlglot.generator.Generator.offset_sql": {"tf": 1}, "sqlglot.generator.Generator.lock_sql": {"tf": 1}, "sqlglot.generator.Generator.literal_sql": {"tf": 1}, "sqlglot.generator.Generator.loaddata_sql": {"tf": 1}, "sqlglot.generator.Generator.null_sql": {"tf": 1}, "sqlglot.generator.Generator.boolean_sql": {"tf": 1}, "sqlglot.generator.Generator.order_sql": {"tf": 1}, "sqlglot.generator.Generator.cluster_sql": {"tf": 1}, "sqlglot.generator.Generator.distribute_sql": {"tf": 1}, "sqlglot.generator.Generator.sort_sql": {"tf": 1}, "sqlglot.generator.Generator.ordered_sql": {"tf": 1}, "sqlglot.generator.Generator.matchrecognize_sql": {"tf": 1}, "sqlglot.generator.Generator.query_modifiers": {"tf": 1}, "sqlglot.generator.Generator.select_sql": {"tf": 1}, "sqlglot.generator.Generator.schema_sql": {"tf": 1}, "sqlglot.generator.Generator.star_sql": {"tf": 1}, "sqlglot.generator.Generator.structkwarg_sql": {"tf": 1}, "sqlglot.generator.Generator.parameter_sql": {"tf": 1}, "sqlglot.generator.Generator.sessionparameter_sql": {"tf": 1}, "sqlglot.generator.Generator.placeholder_sql": {"tf": 1}, "sqlglot.generator.Generator.subquery_sql": {"tf": 1}, "sqlglot.generator.Generator.qualify_sql": {"tf": 1}, "sqlglot.generator.Generator.union_sql": {"tf": 1}, "sqlglot.generator.Generator.union_op": {"tf": 1}, "sqlglot.generator.Generator.unnest_sql": {"tf": 1}, "sqlglot.generator.Generator.where_sql": {"tf": 1}, "sqlglot.generator.Generator.window_sql": {"tf": 1}, "sqlglot.generator.Generator.partition_by_sql": {"tf": 1}, "sqlglot.generator.Generator.window_spec_sql": {"tf": 1}, "sqlglot.generator.Generator.withingroup_sql": {"tf": 1}, "sqlglot.generator.Generator.between_sql": {"tf": 1}, "sqlglot.generator.Generator.bracket_sql": {"tf": 1}, "sqlglot.generator.Generator.all_sql": {"tf": 1}, "sqlglot.generator.Generator.any_sql": {"tf": 1}, "sqlglot.generator.Generator.exists_sql": {"tf": 1}, "sqlglot.generator.Generator.case_sql": {"tf": 1}, "sqlglot.generator.Generator.constraint_sql": {"tf": 1}, "sqlglot.generator.Generator.extract_sql": {"tf": 1}, "sqlglot.generator.Generator.trim_sql": {"tf": 1}, "sqlglot.generator.Generator.concat_sql": {"tf": 1}, "sqlglot.generator.Generator.check_sql": {"tf": 1}, "sqlglot.generator.Generator.foreignkey_sql": {"tf": 1}, "sqlglot.generator.Generator.primarykey_sql": {"tf": 1}, "sqlglot.generator.Generator.unique_sql": {"tf": 1}, "sqlglot.generator.Generator.if_sql": {"tf": 1}, "sqlglot.generator.Generator.in_sql": {"tf": 1}, "sqlglot.generator.Generator.in_unnest_op": {"tf": 1}, "sqlglot.generator.Generator.interval_sql": {"tf": 1}, "sqlglot.generator.Generator.return_sql": {"tf": 1}, "sqlglot.generator.Generator.reference_sql": {"tf": 1}, "sqlglot.generator.Generator.anonymous_sql": {"tf": 1}, "sqlglot.generator.Generator.paren_sql": {"tf": 1}, "sqlglot.generator.Generator.neg_sql": {"tf": 1}, "sqlglot.generator.Generator.not_sql": {"tf": 1}, "sqlglot.generator.Generator.alias_sql": {"tf": 1}, "sqlglot.generator.Generator.aliases_sql": {"tf": 1}, "sqlglot.generator.Generator.attimezone_sql": {"tf": 1}, "sqlglot.generator.Generator.add_sql": {"tf": 1}, "sqlglot.generator.Generator.and_sql": {"tf": 1}, "sqlglot.generator.Generator.connector_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwiseand_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwiseleftshift_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwisenot_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwiseor_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwiserightshift_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwisexor_sql": {"tf": 1}, "sqlglot.generator.Generator.cast_sql": {"tf": 1}, "sqlglot.generator.Generator.currentdate_sql": {"tf": 1}, "sqlglot.generator.Generator.collate_sql": {"tf": 1}, "sqlglot.generator.Generator.command_sql": {"tf": 1}, "sqlglot.generator.Generator.transaction_sql": {"tf": 1}, "sqlglot.generator.Generator.commit_sql": {"tf": 1}, "sqlglot.generator.Generator.rollback_sql": {"tf": 1}, "sqlglot.generator.Generator.altercolumn_sql": {"tf": 1}, "sqlglot.generator.Generator.renametable_sql": {"tf": 1}, "sqlglot.generator.Generator.altertable_sql": {"tf": 1}, "sqlglot.generator.Generator.droppartition_sql": {"tf": 1}, "sqlglot.generator.Generator.addconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.distinct_sql": {"tf": 1}, "sqlglot.generator.Generator.ignorenulls_sql": {"tf": 1}, "sqlglot.generator.Generator.respectnulls_sql": {"tf": 1}, "sqlglot.generator.Generator.intdiv_sql": {"tf": 1}, "sqlglot.generator.Generator.dpipe_sql": {"tf": 1}, "sqlglot.generator.Generator.div_sql": {"tf": 1}, "sqlglot.generator.Generator.distance_sql": {"tf": 1}, "sqlglot.generator.Generator.dot_sql": {"tf": 1}, "sqlglot.generator.Generator.eq_sql": {"tf": 1}, "sqlglot.generator.Generator.escape_sql": {"tf": 1}, "sqlglot.generator.Generator.glob_sql": {"tf": 1}, "sqlglot.generator.Generator.gt_sql": {"tf": 1}, "sqlglot.generator.Generator.gte_sql": {"tf": 1}, "sqlglot.generator.Generator.ilike_sql": {"tf": 1}, "sqlglot.generator.Generator.is_sql": {"tf": 1}, "sqlglot.generator.Generator.like_sql": {"tf": 1}, "sqlglot.generator.Generator.similarto_sql": {"tf": 1}, "sqlglot.generator.Generator.lt_sql": {"tf": 1}, "sqlglot.generator.Generator.lte_sql": {"tf": 1}, "sqlglot.generator.Generator.mod_sql": {"tf": 1}, "sqlglot.generator.Generator.mul_sql": {"tf": 1}, "sqlglot.generator.Generator.neq_sql": {"tf": 1}, "sqlglot.generator.Generator.nullsafeeq_sql": {"tf": 1}, "sqlglot.generator.Generator.nullsafeneq_sql": {"tf": 1}, "sqlglot.generator.Generator.or_sql": {"tf": 1}, "sqlglot.generator.Generator.slice_sql": {"tf": 1}, "sqlglot.generator.Generator.sub_sql": {"tf": 1}, "sqlglot.generator.Generator.trycast_sql": {"tf": 1}, "sqlglot.generator.Generator.use_sql": {"tf": 1}, "sqlglot.generator.Generator.binary": {"tf": 1}, "sqlglot.generator.Generator.function_fallback_sql": {"tf": 1}, "sqlglot.generator.Generator.format_args": {"tf": 1}, "sqlglot.generator.Generator.text_width": {"tf": 1}, "sqlglot.generator.Generator.format_time": {"tf": 1}, "sqlglot.generator.Generator.expressions": {"tf": 1}, "sqlglot.generator.Generator.op_expressions": {"tf": 1}, "sqlglot.generator.Generator.naked_property": {"tf": 1}, "sqlglot.generator.Generator.set_operation": {"tf": 1}, "sqlglot.generator.Generator.tag_sql": {"tf": 1}, "sqlglot.generator.Generator.token_sql": {"tf": 1}, "sqlglot.generator.Generator.userdefinedfunction_sql": {"tf": 1}, "sqlglot.generator.Generator.userdefinedfunctionkwarg_sql": {"tf": 1}, "sqlglot.generator.Generator.joinhint_sql": {"tf": 1}, "sqlglot.generator.Generator.kwarg_sql": {"tf": 1}, "sqlglot.generator.Generator.when_sql": {"tf": 1}, "sqlglot.generator.Generator.merge_sql": {"tf": 1}}, "df": 266}}, "e": {"docs": {"sqlglot.dialects.dialect.Dialect.generate": {"tf": 1}, "sqlglot.dialects.tsql.generate_date_delta_with_unit_sql": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.generate": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.generate_tuple": {"tf": 1}, "sqlglot.generator.Generator.generate": {"tf": 1}}, "df": 5, "d": {"docs": {"sqlglot.tokens.TokenType.GENERATED": {"tf": 1}}, "df": 1, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.snowflake.Snowflake.Generator.generatedasidentitycolumnconstraint_sql": {"tf": 1}, "sqlglot.expressions.GeneratedAsIdentityColumnConstraint": {"tf": 1}, "sqlglot.generator.Generator.generatedasidentitycolumnconstraint_sql": {"tf": 1}}, "df": 3}}}}}}}}}}}}}}}}}}}}}}}}}}}, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.GenerateSeries": {"tf": 1}}, "df": 1}}}}}}}}}}}}, "t": {"docs": {"sqlglot.dialects.dialect.Dialect.get_or_raise": {"tf": 1}, "sqlglot.helper.seq_get": {"tf": 1}, "sqlglot.schema.Schema.get_column_type": {"tf": 1}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1}}, "df": 4}, "o": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.DataType.Type.GEOGRAPHY": {"tf": 1}, "sqlglot.tokens.TokenType.GEOGRAPHY": {"tf": 1}}, "df": 2}}}}}}, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.DataType.Type.GEOMETRY": {"tf": 1}, "sqlglot.tokens.TokenType.GEOMETRY": {"tf": 1}}, "df": 2}}}}}}}, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "b": {"docs": {"sqlglot.expressions.Glob": {"tf": 1}, "sqlglot.generator.Generator.glob_sql": {"tf": 1}, "sqlglot.tokens.TokenType.GLOB": {"tf": 1}}, "df": 3, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.tokens.TokenType.GLOBAL": {"tf": 1}}, "df": 1}}}}}, "t": {"docs": {"sqlglot.expressions.GT": {"tf": 1}, "sqlglot.generator.Generator.gt_sql": {"tf": 1}, "sqlglot.tokens.TokenType.GT": {"tf": 1}}, "df": 3, "e": {"docs": {"sqlglot.expressions.GTE": {"tf": 1}, "sqlglot.generator.Generator.gte_sql": {"tf": 1}, "sqlglot.tokens.TokenType.GTE": {"tf": 1}}, "df": 3}}}, "j": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe.sql.DataFrame.join": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.join": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.nested_loop_join": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.hash_join": {"tf": 1}, "sqlglot.expressions.Join": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.generator.Generator.join_sql": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.join_condition": {"tf": 1}, "sqlglot.optimizer.scope.Scope.join_hints": {"tf": 1}, "sqlglot.planner.Join": {"tf": 1}, "sqlglot.planner.Join.__init__": {"tf": 1}, "sqlglot.planner.Join.from_joins": {"tf": 1}, "sqlglot.tokens.TokenType.JOIN": {"tf": 1}}, "df": 15, "h": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.JoinHint": {"tf": 1}, "sqlglot.generator.Generator.joinhint_sql": {"tf": 1}}, "df": 2}}}}, "s": {"docs": {"sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1}, "sqlglot.optimizer.optimize_joins.optimize_joins": {"tf": 1}, "sqlglot.optimizer.optimize_joins.reorder_joins": {"tf": 1}, "sqlglot.planner.Join.from_joins": {"tf": 1}}, "df": 4}}}, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.JournalProperty": {"tf": 1}, "sqlglot.generator.Generator.journalproperty_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}, "s": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dialects.dialect.arrow_json_extract_sql": {"tf": 1}, "sqlglot.dialects.dialect.arrow_json_extract_scalar_sql": {"tf": 1}, "sqlglot.expressions.DataType.Type.JSON": {"tf": 1}, "sqlglot.tokens.TokenType.JSON": {"tf": 1}}, "df": 4, "b": {"docs": {"sqlglot.expressions.DataType.Type.JSONB": {"tf": 1}, "sqlglot.tokens.TokenType.JSONB": {"tf": 1}}, "df": 2, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.JSONBContains": {"tf": 1}}, "df": 1}}}}}}}}, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.JSONBExtract": {"tf": 1}}, "df": 1, "s": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.JSONBExtractScalar": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.JSONExtract": {"tf": 1}}, "df": 1, "s": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.JSONExtractScalar": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe.sql.DataFrame.union": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Union": {"tf": 1}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.union": {"tf": 1}, "sqlglot.generator.Generator.union_sql": {"tf": 1}, "sqlglot.generator.Generator.union_op": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.UNION": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_union": {"tf": 1}, "sqlglot.tokens.TokenType.UNION": {"tf": 1}}, "df": 10, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dataframe.sql.DataFrame.unionAll": {"tf": 1}}, "df": 1}}, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Unionable": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}}, "df": 4}}}}, "b": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.DataFrame.unionByName": {"tf": 1}}, "df": 1}}}}}}}}, "t": {"docs": {"sqlglot.dialects.tsql.generate_date_delta_with_unit_sql": {"tf": 1}}, "df": 1}, "q": {"docs": {"sqlglot.optimizer.simplify.uniq_sort": {"tf": 1}}, "df": 1, "u": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Unique": {"tf": 1}, "sqlglot.generator.Generator.unique_sql": {"tf": 1}, "sqlglot.tokens.TokenType.UNIQUE": {"tf": 1}}, "df": 3, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.UniqueColumnConstraint": {"tf": 1}, "sqlglot.generator.Generator.uniquecolumnconstraint_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}}}, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.DataType.Type.UNIQUEIDENTIFIER": {"tf": 1}, "sqlglot.tokens.TokenType.UNIQUEIDENTIFIER": {"tf": 1}}, "df": 2}}}}}}}}}}}}}, "x": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.UnixToStr": {"tf": 1}}, "df": 1}}}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.UnixToTime": {"tf": 1}}, "df": 1, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.UnixToTimeStr": {"tf": 1}}, "df": 1}}}}}}}}}}}, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dataframe.sql.Column.unary_op": {"tf": 1}, "sqlglot.expressions.Unary": {"tf": 1}}, "df": 2}}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.Expression.unalias": {"tf": 1}, "sqlglot.transforms.unalias_group": {"tf": 1}}, "df": 2}}}}}, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator.in_unnest_op": {"tf": 1}, "sqlglot.expressions.Expression.unnest": {"tf": 1}, "sqlglot.expressions.Expression.unnest_operands": {"tf": 1}, "sqlglot.expressions.Unnest": {"tf": 1}, "sqlglot.expressions.Subquery.unnest": {"tf": 1}, "sqlglot.generator.Generator.unnest_sql": {"tf": 1}, "sqlglot.generator.Generator.in_unnest_op": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest": {"tf": 1}, "sqlglot.tokens.TokenType.UNNEST": {"tf": 1}}, "df": 10}}}}, "s": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.Properties.Location.UNSUPPORTED": {"tf": 1}, "sqlglot.generator.Generator.unsupported": {"tf": 1}}, "df": 2, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.errors.UnsupportedError": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Uncache": {"tf": 1}, "sqlglot.generator.Generator.uncache_sql": {"tf": 1}, "sqlglot.tokens.TokenType.UNCACHE": {"tf": 1}}, "df": 3}}}}}, "k": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.DataType.Type.UNKNOWN": {"tf": 1}}, "df": 1}}}}}, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "x": {"docs": {"sqlglot.expressions.Unhex": {"tf": 1}}, "df": 1}}}, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.optimizer.scope.Scope.unqualified_columns": {"tf": 1}}, "df": 1}}}}}}}}}, "b": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.tokens.TokenType.UNBOUNDED": {"tf": 1}}, "df": 1}}}}}}}, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.tokens.TokenType.UNLOGGED": {"tf": 1}}, "df": 1}}}}}}, "p": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.UNPIVOT": {"tf": 1}}, "df": 1}}}}}}, "p": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.teradata.Teradata.Generator.update_sql": {"tf": 1}, "sqlglot.diff.Update": {"tf": 1}, "sqlglot.diff.Update.__init__": {"tf": 1}, "sqlglot.expressions.Update": {"tf": 1}, "sqlglot.expressions.update": {"tf": 1}, "sqlglot.generator.Generator.update_sql": {"tf": 1}, "sqlglot.tokens.TokenType.UPDATE": {"tf": 1}}, "df": 7}}}}, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Upper": {"tf": 1}}, "df": 1}}}}, "d": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.expressions.UDTF": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.UDTF": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_udtf": {"tf": 1}}, "df": 3}}}, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Use": {"tf": 1}, "sqlglot.generator.Generator.use_sql": {"tf": 1}, "sqlglot.tokens.TokenType.USE": {"tf": 1}}, "df": 3, "r": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.UserDefinedFunction": {"tf": 1}, "sqlglot.generator.Generator.userdefinedfunction_sql": {"tf": 1}}, "df": 2, "k": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.expressions.UserDefinedFunctionKwarg": {"tf": 1}, "sqlglot.generator.Generator.userdefinedfunctionkwarg_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}}}}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.tokens.TokenType.USING": {"tf": 1}}, "df": 2}}}}, "u": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.DataType.Type.UUID": {"tf": 1}, "sqlglot.tokens.TokenType.UUID": {"tf": 1}}, "df": 2}}}}, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator.except_op": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.except_op": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.Except": {"tf": 1}, "sqlglot.expressions.except_": {"tf": 1}, "sqlglot.generator.Generator.except_sql": {"tf": 1}, "sqlglot.generator.Generator.except_op": {"tf": 1}, "sqlglot.tokens.TokenType.EXCEPT": {"tf": 1}}, "df": 8, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dataframe.sql.DataFrame.exceptAll": {"tf": 1}}, "df": 1}}}}}}}, "p": {"docs": {"sqlglot.expressions.Exp": {"tf": 1}}, "df": 1, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe.sql.Column.invoke_expression_over_column": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.expressions.Expression.__init__": {"tf": 1}, "sqlglot.expressions.Expression.this": {"tf": 1}, "sqlglot.expressions.Expression.expression": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.expressions": {"tf": 1}, "sqlglot.expressions.Expression.text": {"tf": 1}, "sqlglot.expressions.Expression.is_string": {"tf": 1}, "sqlglot.expressions.Expression.is_number": {"tf": 1}, "sqlglot.expressions.Expression.is_int": {"tf": 1}, "sqlglot.expressions.Expression.alias": {"tf": 1}, "sqlglot.expressions.Expression.output_name": {"tf": 1}, "sqlglot.expressions.Expression.copy": {"tf": 1}, "sqlglot.expressions.Expression.append": {"tf": 1}, "sqlglot.expressions.Expression.set": {"tf": 1}, "sqlglot.expressions.Expression.depth": {"tf": 1}, "sqlglot.expressions.Expression.find": {"tf": 1}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.expressions.Expression.find_ancestor": {"tf": 1}, "sqlglot.expressions.Expression.parent_select": {"tf": 1}, "sqlglot.expressions.Expression.walk": {"tf": 1}, "sqlglot.expressions.Expression.dfs": {"tf": 1}, "sqlglot.expressions.Expression.bfs": {"tf": 1}, "sqlglot.expressions.Expression.unnest": {"tf": 1}, "sqlglot.expressions.Expression.unalias": {"tf": 1}, "sqlglot.expressions.Expression.unnest_operands": {"tf": 1}, "sqlglot.expressions.Expression.flatten": {"tf": 1}, "sqlglot.expressions.Expression.sql": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.expressions.Expression.replace": {"tf": 1}, "sqlglot.expressions.Expression.pop": {"tf": 1}, "sqlglot.expressions.Expression.assert_is": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.expressions.Expression.dump": {"tf": 1}, "sqlglot.expressions.Expression.load": {"tf": 1}, "sqlglot.parser.Parser.expression": {"tf": 1}, "sqlglot.parser.Parser.validate_expression": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}}, "df": 40, "s": {"docs": {"sqlglot.expressions.Expression.expressions": {"tf": 1}, "sqlglot.generator.Generator.expressions": {"tf": 1}, "sqlglot.generator.Generator.op_expressions": {"tf": 1}}, "df": 3}}}}}}}}, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Explode": {"tf": 1}}, "df": 1}}}}, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.expand": {"tf": 1}, "sqlglot.optimizer.expand_laterals.expand_laterals": {"tf": 1}, "sqlglot.optimizer.expand_multi_table_selects.expand_multi_table_selects": {"tf": 1}}, "df": 3}}}}, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.dialect.arrow_json_extract_sql": {"tf": 1}, "sqlglot.dialects.dialect.arrow_json_extract_scalar_sql": {"tf": 1}, "sqlglot.dialects.dialect.struct_extract_sql": {"tf": 1}, "sqlglot.expressions.Extract": {"tf": 1}, "sqlglot.generator.Generator.extract_sql": {"tf": 1}, "sqlglot.optimizer.simplify.extract_date": {"tf": 1}, "sqlglot.optimizer.simplify.extract_interval": {"tf": 1}}, "df": 7}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.optimizer.scope.Scope.external_columns": {"tf": 1}}, "df": 1}}}}}}, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor.execute": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.execute": {"tf": 1}, "sqlglot.tokens.TokenType.EXECUTE": {"tf": 1}}, "df": 3, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.errors.ExecuteError": {"tf": 1}}, "df": 1}}}}}, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.ExecuteAsProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.Exists": {"tf": 1}, "sqlglot.generator.Generator.exists_sql": {"tf": 1}, "sqlglot.tokens.TokenType.EXISTS": {"tf": 1}}, "df": 3}}}}}, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.Column.ensure_col": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_cols": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_literal": {"tf": 1}, "sqlglot.executor.table.ensure_tables": {"tf": 1}, "sqlglot.helper.ensure_list": {"tf": 1}, "sqlglot.helper.ensure_collection": {"tf": 1}, "sqlglot.schema.ensure_schema": {"tf": 1}, "sqlglot.schema.ensure_column_mapping": {"tf": 1}}, "df": 8}}}}, "d": {"docs": {"sqlglot.tokens.TokenType.BLOCK_END": {"tf": 1}, "sqlglot.tokens.TokenType.END": {"tf": 1}}, "df": 2, "s": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.dataframe.sql.Column.endswith": {"tf": 1}}, "df": 1}}}}}}, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Encode": {"tf": 1}, "sqlglot.tokens.TokenType.ENCODE": {"tf": 1}}, "df": 2, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.EncodeColumnConstraint": {"tf": 1}, "sqlglot.generator.Generator.encodecolumnconstraint_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}}}}}}}, "g": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.EngineProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.parser.Parser.raise_error": {"tf": 1}}, "df": 2, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.errors.ErrorLevel": {"tf": 1}, "sqlglot.errors.ErrorLevel.IGNORE": {"tf": 1}, "sqlglot.errors.ErrorLevel.WARN": {"tf": 1}, "sqlglot.errors.ErrorLevel.RAISE": {"tf": 1}, "sqlglot.errors.ErrorLevel.IMMEDIATE": {"tf": 1}}, "df": 5}}}}}, "s": {"docs": {"sqlglot.errors.merge_errors": {"tf": 1}, "sqlglot.parser.Parser.check_errors": {"tf": 1}}, "df": 2}}}}}, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.executor.context.Context.eval": {"tf": 1}, "sqlglot.executor.context.Context.eval_tuple": {"tf": 1}, "sqlglot.optimizer.simplify.eval_boolean": {"tf": 1}}, "df": 3}}}, "q": {"docs": {"sqlglot.expressions.EQ": {"tf": 1}, "sqlglot.generator.Generator.eq_sql": {"tf": 1}, "sqlglot.tokens.TokenType.EQ": {"tf": 1}, "sqlglot.tokens.TokenType.NULLSAFE_EQ": {"tf": 1}}, "df": 4}, "s": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Escape": {"tf": 1}, "sqlglot.generator.Generator.escape_sql": {"tf": 1}, "sqlglot.tokens.TokenType.ESCAPE": {"tf": 1}}, "df": 3}}}}}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 1}, "sqlglot.optimizer.simplify.absorb_and_eliminate": {"tf": 1}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1}}, "df": 5}}}}}}}, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.ELSE": {"tf": 1}}, "df": 1}}}}, "r": {"docs": {"sqlglot.tokens.TokenType.R_PAREN": {"tf": 1}, "sqlglot.tokens.TokenType.R_BRACKET": {"tf": 1}, "sqlglot.tokens.TokenType.R_BRACE": {"tf": 1}}, "df": 3, "e": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.DataFrame.replace": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.replace": {"tf": 1}, "sqlglot.expressions.Expression.replace": {"tf": 1}, "sqlglot.expressions.replace_children": {"tf": 1}, "sqlglot.expressions.replace_tables": {"tf": 1}, "sqlglot.expressions.replace_placeholders": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.replace_aliases": {"tf": 1}, "sqlglot.optimizer.scope.Scope.replace": {"tf": 1}, "sqlglot.tokens.TokenType.REPLACE": {"tf": 1}}, "df": 9}}}}, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe.sql.DataFrame.repartition": {"tf": 1}}, "df": 1}}}}}}}}, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.Repeat": {"tf": 1}}, "df": 1}}}}, "d": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.dialect.Dialects.REDSHIFT": {"tf": 1}, "sqlglot.dialects.redshift.Redshift": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.__init__": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Tokenizer": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.with_properties": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.renametable_sql": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}}, "df": 10}}}}}, "u": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Reduce": {"tf": 1}}, "df": 1}}, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.optimizer.canonicalize.remove_redundant_casts": {"tf": 1}}, "df": 1}}}}}}}, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.rename_func": {"tf": 1}, "sqlglot.expressions.rename_table": {"tf": 1}, "sqlglot.optimizer.scope.Scope.rename_source": {"tf": 1}}, "df": 3, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.redshift.Redshift.Generator.renametable_sql": {"tf": 1}, "sqlglot.expressions.RenameTable": {"tf": 1}, "sqlglot.generator.Generator.renametable_sql": {"tf": 1}}, "df": 3}}}}}}}}}, "c": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.no_recursive_cte_sql": {"tf": 1}, "sqlglot.tokens.TokenType.RECURSIVE": {"tf": 1}}, "df": 2}}}}}}, "o": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.MATCH_RECOGNIZE": {"tf": 1}}, "df": 1}}}}}}}, "t": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.Return": {"tf": 1}, "sqlglot.generator.Generator.return_sql": {"tf": 1}}, "df": 2, "s": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dialects.tsql.TSQL.Generator.returnsproperty_sql": {"tf": 1}, "sqlglot.expressions.ReturnsProperty": {"tf": 1}}, "df": 2}}}}}}}}}}}}}, "m": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff.Remove": {"tf": 1}, "sqlglot.diff.Remove.__init__": {"tf": 1}, "sqlglot.optimizer.canonicalize.remove_redundant_casts": {"tf": 1}, "sqlglot.optimizer.scope.Scope.remove_source": {"tf": 1}, "sqlglot.optimizer.simplify.remove_compliments": {"tf": 1}, "sqlglot.optimizer.simplify.remove_where_true": {"tf": 1}, "sqlglot.transforms.remove_precision_parameterized_types": {"tf": 1}}, "df": 7}}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor.env.reverse_key": {"tf": 1}, "sqlglot.executor.env.reverse_key.__init__": {"tf": 1}}, "df": 2}}}}}, "f": {"docs": {"sqlglot.optimizer.scope.Scope.ref_count": {"tf": 1}}, "df": 1, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Reference": {"tf": 1}, "sqlglot.generator.Generator.reference_sql": {"tf": 1}}, "df": 2, "s": {"docs": {"sqlglot.tokens.TokenType.REFERENCES": {"tf": 1}}, "df": 1}}}}}}}}, "s": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.RESPECT_NULLS": {"tf": 1}}, "df": 1, "n": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.RespectNulls": {"tf": 1}, "sqlglot.generator.Generator.respectnulls_sql": {"tf": 1}}, "df": 2}}}}}}}}}, "e": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.parser.Parser.reset": {"tf": 1}, "sqlglot.tokens.Tokenizer.reset": {"tf": 1}}, "df": 2}}}, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "v": {"docs": {"sqlglot.expressions.ReadCSV": {"tf": 1}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.helper.csv_reader": {"tf": 1}}, "df": 1}}}}, "g": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.RegexpLike": {"tf": 1}}, "df": 1}}}}, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.RegexpILike": {"tf": 1}}, "df": 1}}}}}, "s": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.RegexpSplit": {"tf": 1}}, "df": 1}}}}}}}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.optimizer.optimize_joins.reorder_joins": {"tf": 1}}, "df": 1}}}}}, "w": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.simplify.rewrite_between": {"tf": 1}}, "df": 1}}}}}}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.Column.rlike": {"tf": 1}, "sqlglot.tokens.TokenType.RLIKE": {"tf": 1}}, "df": 2}}}}, "o": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot.executor.context.Context.set_row": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_ROW": {"tf": 1}, "sqlglot.tokens.TokenType.ROW": {"tf": 1}}, "df": 3, "s": {"docs": {"sqlglot.tokens.TokenType.ROWS": {"tf": 1}}, "df": 1, "b": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe.sql.Window.rowsBetween": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.rowsBetween": {"tf": 1}}, "df": 2}}}}}}}}, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.executor.table.RowReader": {"tf": 1}, "sqlglot.executor.table.RowReader.__init__": {"tf": 1}}, "df": 2}}}}}}, "f": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.RowFormatDelimitedProperty": {"tf": 1}, "sqlglot.generator.Generator.rowformatdelimitedproperty_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}}}}, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.RowFormatSerdeProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.DataType.Type.ROWVERSION": {"tf": 1}, "sqlglot.tokens.TokenType.ROWVERSION": {"tf": 1}}, "df": 2}}}}}}}, "n": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.RowNumber": {"tf": 1}}, "df": 1}}}}}}}, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator.rollback_sql": {"tf": 1}, "sqlglot.expressions.Rollback": {"tf": 1}, "sqlglot.generator.Generator.rollback_sql": {"tf": 1}, "sqlglot.tokens.TokenType.ROLLBACK": {"tf": 1}}, "df": 4}}}}, "u": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.tokens.TokenType.ROLLUP": {"tf": 1}}, "df": 1}}}}, "o": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.Properties.Location.POST_SCHEMA_ROOT": {"tf": 1}, "sqlglot.generator.Generator.root_properties": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.ROOT": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_root": {"tf": 1}}, "df": 4}}, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.Round": {"tf": 1}}, "df": 1}}}}, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor.context.Context.set_range": {"tf": 1}, "sqlglot.tokens.TokenType.RANGE": {"tf": 1}}, "df": 2, "b": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe.sql.Window.rangeBetween": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.rangeBetween": {"tf": 1}}, "df": 2}}}}}}}, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.executor.table.RangeReader": {"tf": 1}, "sqlglot.executor.table.RangeReader.__init__": {"tf": 1}}, "df": 2}}}}}}}}}, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.Dialect.get_or_raise": {"tf": 1}, "sqlglot.errors.ErrorLevel.RAISE": {"tf": 1}, "sqlglot.parser.Parser.raise_error": {"tf": 1}}, "df": 3}}}}, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.RIGHT": {"tf": 1}}, "df": 1}}}}}, "l": {"docs": {"sqlglot.tokens.TokenType.L_PAREN": {"tf": 1}, "sqlglot.tokens.TokenType.L_BRACKET": {"tf": 1}, "sqlglot.tokens.TokenType.L_BRACE": {"tf": 1}}, "df": 3, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe.sql.DataFrame.limit": {"tf": 1}, "sqlglot.expressions.Limit": {"tf": 1}, "sqlglot.expressions.Subqueryable.limit": {"tf": 1}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.generator.Generator.limit_sql": {"tf": 1}, "sqlglot.tokens.TokenType.LIMIT": {"tf": 1}}, "df": 7}}}, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dataframe.sql.Column.ensure_literal": {"tf": 1}, "sqlglot.expressions.Literal": {"tf": 1}, "sqlglot.expressions.Literal.number": {"tf": 1}, "sqlglot.expressions.Literal.string": {"tf": 1}, "sqlglot.expressions.Literal.output_name": {"tf": 1}, "sqlglot.generator.Generator.literal_sql": {"tf": 1}, "sqlglot.optimizer.simplify.date_literal": {"tf": 1}, "sqlglot.optimizer.simplify.boolean_literal": {"tf": 1}}, "df": 8, "s": {"docs": {"sqlglot.optimizer.simplify.simplify_literals": {"tf": 1}}, "df": 1}}}}}}, "k": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.Column.like": {"tf": 1}, "sqlglot.expressions.Like": {"tf": 1}, "sqlglot.generator.Generator.like_sql": {"tf": 1}, "sqlglot.tokens.TokenType.LIKE": {"tf": 1}}, "df": 4, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.LikeProperty": {"tf": 1}, "sqlglot.generator.Generator.likeproperty_sql": {"tf": 1}}, "df": 2}}}}}}}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.Func.from_arg_list": {"tf": 1}, "sqlglot.helper.ensure_list": {"tf": 1}}, "df": 2}}, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.lineage.lineage": {"tf": 1}}, "df": 1, "h": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.lineage.LineageHTML": {"tf": 1}, "sqlglot.lineage.LineageHTML.__init__": {"tf": 1}}, "df": 2}}}}}}}}}}, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe.sql.Column.asc_nulls_last": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc_nulls_last": {"tf": 1}, "sqlglot.tokens.TokenType.NULLS_LAST": {"tf": 1}}, "df": 3, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.expressions.LastDateOfMonth": {"tf": 1}}, "df": 1}}}}}}}}}}}}}, "m": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}, "sqlglot.expressions.Lambda": {"tf": 1}, "sqlglot.generator.Generator.lambda_sql": {"tf": 1}}, "df": 3}}}}, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.Lateral": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.generator.Generator.lateral_sql": {"tf": 1}, "sqlglot.tokens.TokenType.LATERAL": {"tf": 1}}, "df": 4, "s": {"docs": {"sqlglot.optimizer.expand_laterals.expand_laterals": {"tf": 1}}, "df": 1}}}}}}, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.LANGUAGE": {"tf": 1}}, "df": 1, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.LanguageProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}, "w": {"docs": {"sqlglot.optimizer.normalize.distributive_law": {"tf": 1}}, "df": 1}, "z": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.tokens.TokenType.LAZY": {"tf": 1}}, "df": 1}}}, "o": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.locate_to_strposition": {"tf": 1}, "sqlglot.dialects.dialect.strposition_to_locate_sql": {"tf": 1}, "sqlglot.generator.Generator.locate_properties": {"tf": 1}}, "df": 3}, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.Properties.Location": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_CREATE": {"tf": 1}, "sqlglot.expressions.Properties.Location.PRE_SCHEMA": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_INDEX": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_SCHEMA_ROOT": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_SCHEMA_WITH": {"tf": 1}, "sqlglot.expressions.Properties.Location.UNSUPPORTED": {"tf": 1}}, "df": 7, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.LocationProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}, "l": {"docs": {"sqlglot.tokens.TokenType.LOCAL": {"tf": 1}, "sqlglot.tokens.TokenType.WITH_LOCAL_TIME_ZONE": {"tf": 1}}, "df": 2}}, "k": {"docs": {"sqlglot.expressions.Lock": {"tf": 1}, "sqlglot.expressions.Select.lock": {"tf": 1}, "sqlglot.generator.Generator.lock_sql": {"tf": 1}}, "df": 3}}, "o": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.executor.python.PythonExecutor.nested_loop_join": {"tf": 1}}, "df": 1}}, "a": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.Expression.load": {"tf": 1}, "sqlglot.serde.load": {"tf": 1}, "sqlglot.tokens.TokenType.LOAD_DATA": {"tf": 1}}, "df": 3, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.expressions.LoadData": {"tf": 1}, "sqlglot.generator.Generator.loaddata_sql": {"tf": 1}}, "df": 2}}}}}}, "g": {"1": {"0": {"docs": {"sqlglot.expressions.Log10": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "2": {"docs": {"sqlglot.expressions.Log2": {"tf": 1}}, "df": 1}, "docs": {"sqlglot.expressions.Log": {"tf": 1}}, "df": 1, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.LogProperty": {"tf": 1}}, "df": 1}}}}}}}}, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.LogicalOr": {"tf": 1}}, "df": 1}}}}}}}, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.DataType.Type.LONGTEXT": {"tf": 1}, "sqlglot.tokens.TokenType.LONGTEXT": {"tf": 1}}, "df": 2}}}}, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "b": {"docs": {"sqlglot.expressions.DataType.Type.LONGBLOB": {"tf": 1}, "sqlglot.tokens.TokenType.LONGBLOB": {"tf": 1}}, "df": 2}}}}}}, "w": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Lower": {"tf": 1}, "sqlglot.optimizer.lower_identities.lower_identities": {"tf": 1}}, "df": 2}}}}, "t": {"docs": {"sqlglot.expressions.LT": {"tf": 1}, "sqlglot.generator.Generator.lt_sql": {"tf": 1}, "sqlglot.tokens.TokenType.LT": {"tf": 1}}, "df": 3, "e": {"docs": {"sqlglot.expressions.LTE": {"tf": 1}, "sqlglot.generator.Generator.lte_sql": {"tf": 1}, "sqlglot.tokens.TokenType.LTE": {"tf": 1}}, "df": 3}}, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.Least": {"tf": 1}}, "df": 1}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.tokens.TokenType.LEADING": {"tf": 1}}, "df": 1}}}}}, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.expressions.Length": {"tf": 1}}, "df": 1}}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.Levenshtein": {"tf": 1}}, "df": 1}}}}}}}}}, "f": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.LEFT": {"tf": 1}}, "df": 1}}}, "n": {"docs": {"sqlglot.expressions.Ln": {"tf": 1}}, "df": 1}, "r": {"docs": {"sqlglot.tokens.TokenType.LR_ARROW": {"tf": 1}}, "df": 1}}, "h": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe.sql.DataFrame.hint": {"tf": 1}, "sqlglot.expressions.Hint": {"tf": 1}, "sqlglot.generator.Generator.hint_sql": {"tf": 1}, "sqlglot.tokens.TokenType.HINT": {"tf": 1}}, "df": 4, "s": {"docs": {"sqlglot.optimizer.scope.Scope.join_hints": {"tf": 1}}, "df": 1}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.Dialects.HIVE": {"tf": 1}, "sqlglot.dialects.hive.Hive": {"tf": 1}, "sqlglot.dialects.hive.Hive.__init__": {"tf": 1}, "sqlglot.dialects.hive.Hive.Tokenizer": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.with_properties": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.datatype_sql": {"tf": 1}}, "df": 8}}}, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.executor.python.PythonExecutor.hash_join": {"tf": 1}, "sqlglot.tokens.TokenType.HASH": {"tf": 1}, "sqlglot.tokens.TokenType.HASH_ARROW": {"tf": 1}}, "df": 3}}, "v": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.expressions.Having": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.generator.Generator.having_sql": {"tf": 1}, "sqlglot.tokens.TokenType.HAVING": {"tf": 1}}, "df": 4}}}}}, "e": {"docs": {}, "df": 0, "x": {"docs": {"sqlglot.expressions.Hex": {"tf": 1}, "sqlglot.tokens.TokenType.HEX_STRING": {"tf": 1}}, "df": 2, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.expressions.HexString": {"tf": 1}, "sqlglot.generator.Generator.hexstring_sql": {"tf": 1}}, "df": 2}}}}}}}}, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.expressions.DataType.Type.HLLSKETCH": {"tf": 1}, "sqlglot.tokens.TokenType.HLLSKETCH": {"tf": 1}}, "df": 2}}}}}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.HSTORE": {"tf": 1}, "sqlglot.tokens.TokenType.HSTORE": {"tf": 1}}, "df": 2}}}}}, "t": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.lineage.Node.to_html": {"tf": 1}}, "df": 1}}}}, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe.sql.GroupedData.mean": {"tf": 1}}, "df": 1}}, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.errors.concat_messages": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1}}, "df": 2}}}}}}, "r": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.errors.merge_errors": {"tf": 1}, "sqlglot.expressions.Merge": {"tf": 1}, "sqlglot.generator.Generator.merge_sql": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_ctes": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_derived_tables": {"tf": 1}, "sqlglot.tokens.TokenType.MERGE": {"tf": 1}}, "df": 7, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.MergeBlockRatioProperty": {"tf": 1}, "sqlglot.generator.Generator.mergeblockratioproperty_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}}}}}}}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.DataType.Type.MEDIUMTEXT": {"tf": 1}, "sqlglot.tokens.TokenType.MEDIUMTEXT": {"tf": 1}}, "df": 2}}}}, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "b": {"docs": {"sqlglot.expressions.DataType.Type.MEDIUMBLOB": {"tf": 1}, "sqlglot.tokens.TokenType.MEDIUMBLOB": {"tf": 1}}, "df": 2}}}}}}}}}, "a": {"docs": {}, "df": 0, "x": {"docs": {"sqlglot.dataframe.sql.GroupedData.max": {"tf": 1}, "sqlglot.expressions.Max": {"tf": 1}}, "df": 2}, "p": {"docs": {"sqlglot.dialects.dialect.var_map_sql": {"tf": 1}, "sqlglot.expressions.DataType.Type.MAP": {"tf": 1}, "sqlglot.expressions.Map": {"tf": 1}, "sqlglot.parser.parse_var_map": {"tf": 1}, "sqlglot.tokens.TokenType.MAP": {"tf": 1}}, "df": 5, "p": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.schema.MappingSchema.from_mapping_schema": {"tf": 1}, "sqlglot.schema.ensure_column_mapping": {"tf": 1}}, "df": 2, "s": {"docs": {"sqlglot.expressions.Func.default_parser_mappings": {"tf": 1}}, "df": 1, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.schema.MappingSchema": {"tf": 1}, "sqlglot.schema.MappingSchema.__init__": {"tf": 1}, "sqlglot.schema.MappingSchema.from_mapping_schema": {"tf": 1}, "sqlglot.schema.MappingSchema.copy": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1}}, "df": 7}}}}}}}}}}}, "t": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.tokens.TokenType.MATCH_RECOGNIZE": {"tf": 1}}, "df": 1, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.MatchRecognize": {"tf": 1}, "sqlglot.generator.Generator.matchrecognize_sql": {"tf": 1}}, "df": 2}}}}}}}}}, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.Matches": {"tf": 1}}, "df": 1}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.tokens.TokenType.MATERIALIZED": {"tf": 1}}, "df": 1}}}}}}}}}}, "y": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.maybe_parse": {"tf": 1}, "sqlglot.generator.Generator.maybe_comment": {"tf": 1}}, "df": 2}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe.sql.GroupedData.min": {"tf": 1}, "sqlglot.expressions.Min": {"tf": 1}}, "df": 2}}, "o": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.Mod": {"tf": 1}, "sqlglot.generator.Generator.mod_sql": {"tf": 1}, "sqlglot.tokens.TokenType.MOD": {"tf": 1}}, "df": 3, "e": {"docs": {"sqlglot.dataframe.sql.DataFrameWriter.mode": {"tf": 1}}, "df": 1}, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dialects.oracle.Oracle.Generator.query_modifiers": {"tf": 1}, "sqlglot.generator.Generator.query_modifiers": {"tf": 1}}, "df": 2}}}}}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff.Move": {"tf": 1}, "sqlglot.diff.Move.__init__": {"tf": 1}}, "df": 2}}, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.DataType.Type.MONEY": {"tf": 1}, "sqlglot.tokens.TokenType.MONEY": {"tf": 1}}, "df": 2}}, "t": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.expressions.Month": {"tf": 1}}, "df": 1}}}}, "y": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "q": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dialects.dialect.Dialects.MYSQL": {"tf": 1}, "sqlglot.dialects.mysql.MySQL": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.__init__": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Tokenizer": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator.show_sql": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator.setitem_sql": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator.set_sql": {"tf": 1}}, "df": 9}}}}, "u": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.Mul": {"tf": 1}, "sqlglot.generator.Generator.mul_sql": {"tf": 1}}, "df": 2, "t": {"docs": {}, "df": 0, "i": {"docs": {"sqlglot.optimizer.expand_multi_table_selects.expand_multi_table_selects": {"tf": 1}}, "df": 1}}}}}, "b": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dataframe.sql.Column.binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.inverse_binary_op": {"tf": 1}, "sqlglot.expressions.DataType.Type.BINARY": {"tf": 1}, "sqlglot.expressions.Binary": {"tf": 1}, "sqlglot.generator.Generator.binary": {"tf": 1}, "sqlglot.tokens.TokenType.BINARY": {"tf": 1}}, "df": 6}}}}, "g": {"docs": {}, "df": 0, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dialects.bigquery.BigQuery": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.__init__": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Tokenizer": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.array_sql": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.transaction_sql": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.commit_sql": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.rollback_sql": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.in_unnest_op": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.except_op": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.intersect_op": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.BIGQUERY": {"tf": 1}}, "df": 13}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.DataType.Type.BIGINT": {"tf": 1}, "sqlglot.tokens.TokenType.BIGINT": {"tf": 1}}, "df": 2}}}, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.DataType.Type.BIGSERIAL": {"tf": 1}, "sqlglot.tokens.TokenType.BIGSERIAL": {"tf": 1}}, "df": 2}}}}}}}, "t": {"docs": {"sqlglot.tokens.TokenType.BIT_STRING": {"tf": 1}}, "df": 1, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.expressions.BitString": {"tf": 1}, "sqlglot.generator.Generator.bitstring_sql": {"tf": 1}}, "df": 2}}}}}}, "w": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.BitwiseAnd": {"tf": 1}, "sqlglot.generator.Generator.bitwiseand_sql": {"tf": 1}}, "df": 2}}}, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.BitwiseLeftShift": {"tf": 1}, "sqlglot.generator.Generator.bitwiseleftshift_sql": {"tf": 1}}, "df": 2}}}}}}}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.BitwiseOr": {"tf": 1}, "sqlglot.generator.Generator.bitwiseor_sql": {"tf": 1}}, "df": 2}}, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.BitwiseRightShift": {"tf": 1}, "sqlglot.generator.Generator.bitwiserightshift_sql": {"tf": 1}}, "df": 2}}}}}}}}}}, "x": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.BitwiseXor": {"tf": 1}, "sqlglot.generator.Generator.bitwisexor_sql": {"tf": 1}}, "df": 2}}}, "n": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.BitwiseNot": {"tf": 1}, "sqlglot.generator.Generator.bitwisenot_sql": {"tf": 1}}, "df": 2}}}}}}}}}, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe.sql.Column.between": {"tf": 1}, "sqlglot.expressions.Between": {"tf": 1}, "sqlglot.generator.Generator.between_sql": {"tf": 1}, "sqlglot.optimizer.simplify.rewrite_between": {"tf": 1}, "sqlglot.tokens.TokenType.BETWEEN": {"tf": 1}}, "df": 5}}}}}, "g": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.tokens.TokenType.BEGIN": {"tf": 1}}, "df": 1}}}}, "f": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.Expression.bfs": {"tf": 1}}, "df": 1}}, "y": {"docs": {"sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.generator.Generator.partition_by_sql": {"tf": 1}, "sqlglot.tokens.TokenType.BY_DEFAULT": {"tf": 1}, "sqlglot.tokens.TokenType.CLUSTER_BY": {"tf": 1}, "sqlglot.tokens.TokenType.DISTRIBUTE_BY": {"tf": 1}, "sqlglot.tokens.TokenType.GROUP_BY": {"tf": 1}, "sqlglot.tokens.TokenType.ORDER_BY": {"tf": 1}, "sqlglot.tokens.TokenType.PARTITION_BY": {"tf": 1}, "sqlglot.tokens.TokenType.SORT_BY": {"tf": 1}}, "df": 12, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.BYTE_STRING": {"tf": 1}}, "df": 1, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.expressions.ByteString": {"tf": 1}}, "df": 1}}}}}}}}}, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.tokens.TokenType.BLOCK_START": {"tf": 1}, "sqlglot.tokens.TokenType.BLOCK_END": {"tf": 1}}, "df": 2, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.BlockCompressionProperty": {"tf": 1}, "sqlglot.generator.Generator.blockcompressionproperty_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}}}}}}}}}}, "o": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.Boolean": {"tf": 1}, "sqlglot.expressions.DataType.Type.BOOLEAN": {"tf": 1}, "sqlglot.generator.Generator.boolean_sql": {"tf": 1}, "sqlglot.optimizer.simplify.eval_boolean": {"tf": 1}, "sqlglot.optimizer.simplify.boolean_literal": {"tf": 1}, "sqlglot.tokens.TokenType.BOOLEAN": {"tf": 1}}, "df": 6}}}}}, "t": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.tokens.TokenType.BOTH": {"tf": 1}}, "df": 1}}}, "u": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.DataType.build": {"tf": 1}, "sqlglot.optimizer.scope.build_scope": {"tf": 1}}, "df": 2}}}, "c": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.BUCKET": {"tf": 1}}, "df": 1}}}}}, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.Bracket": {"tf": 1}, "sqlglot.generator.Generator.bracket_sql": {"tf": 1}, "sqlglot.tokens.TokenType.L_BRACKET": {"tf": 1}, "sqlglot.tokens.TokenType.R_BRACKET": {"tf": 1}}, "df": 4}}}, "e": {"docs": {"sqlglot.tokens.TokenType.L_BRACE": {"tf": 1}, "sqlglot.tokens.TokenType.R_BRACE": {"tf": 1}}, "df": 2}}, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.optimizer.scope.Scope.branch": {"tf": 1}}, "df": 1}}}}, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.tokens.TokenType.BREAK": {"tf": 1}}, "df": 1}}}}, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.tokens.TokenType.BACKSLASH": {"tf": 1}}, "df": 1}}}}}}}}}, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.Column.set_table_name": {"tf": 1}, "sqlglot.expressions.Expression.output_name": {"tf": 1}, "sqlglot.expressions.Column.output_name": {"tf": 1}, "sqlglot.expressions.Identifier.output_name": {"tf": 1}, "sqlglot.expressions.Literal.output_name": {"tf": 1}, "sqlglot.expressions.Subquery.output_name": {"tf": 1}, "sqlglot.expressions.Star.output_name": {"tf": 1}, "sqlglot.expressions.Alias.output_name": {"tf": 1}, "sqlglot.expressions.Func.sql_name": {"tf": 1}, "sqlglot.expressions.Cast.output_name": {"tf": 1}, "sqlglot.expressions.table_name": {"tf": 1}, "sqlglot.helper.find_new_name": {"tf": 1}}, "df": 12, "s": {"docs": {"sqlglot.expressions.Func.sql_names": {"tf": 1}, "sqlglot.expressions.column_table_names": {"tf": 1}, "sqlglot.optimizer.optimize_joins.other_table_names": {"tf": 1}, "sqlglot.schema.Schema.column_names": {"tf": 1}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}}, "df": 5}}}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.National": {"tf": 1}, "sqlglot.generator.Generator.national_sql": {"tf": 1}, "sqlglot.tokens.TokenType.NATIONAL": {"tf": 1}}, "df": 3}}}}}, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.tokens.TokenType.NATURAL": {"tf": 1}}, "df": 1}}}}}, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.generator.Generator.naked_property": {"tf": 1}}, "df": 1}}}}, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.executor.env.null_if_any": {"tf": 1}, "sqlglot.expressions.Null": {"tf": 1}, "sqlglot.expressions.DataType.Type.NULL": {"tf": 1}, "sqlglot.expressions.null": {"tf": 1}, "sqlglot.generator.Generator.null_sql": {"tf": 1}, "sqlglot.tokens.TokenType.NULL": {"tf": 1}}, "df": 6, "s": {"docs": {"sqlglot.dataframe.sql.Column.asc_nulls_first": {"tf": 1}, "sqlglot.dataframe.sql.Column.asc_nulls_last": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc_nulls_first": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc_nulls_last": {"tf": 1}, "sqlglot.executor.env.filter_nulls": {"tf": 1}, "sqlglot.tokens.TokenType.IGNORE_NULLS": {"tf": 1}, "sqlglot.tokens.TokenType.NULLS_FIRST": {"tf": 1}, "sqlglot.tokens.TokenType.NULLS_LAST": {"tf": 1}, "sqlglot.tokens.TokenType.RESPECT_NULLS": {"tf": 1}}, "df": 9, "a": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.NULLSAFE_EQ": {"tf": 1}}, "df": 1, "e": {"docs": {}, "df": 0, "q": {"docs": {"sqlglot.expressions.NullSafeEQ": {"tf": 1}, "sqlglot.generator.Generator.nullsafeeq_sql": {"tf": 1}}, "df": 2}}, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "q": {"docs": {"sqlglot.expressions.NullSafeNEQ": {"tf": 1}, "sqlglot.generator.Generator.nullsafeneq_sql": {"tf": 1}}, "df": 2}}}}}}}, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.NULLABLE": {"tf": 1}, "sqlglot.tokens.TokenType.NULLABLE": {"tf": 1}}, "df": 2}}}}}}, "m": {"docs": {"sqlglot.helper.split_num_words": {"tf": 1}}, "df": 1, "b": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Expression.is_number": {"tf": 1}, "sqlglot.expressions.Literal.number": {"tf": 1}, "sqlglot.tokens.TokenType.NUMBER": {"tf": 1}, "sqlglot.tokens.Token.number": {"tf": 1}}, "df": 4, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.NumberToStr": {"tf": 1}}, "df": 1}}}}}}}}}}, "o": {"docs": {"sqlglot.dialects.dialect.no_ilike_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_paren_current_date_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_recursive_cte_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_safe_divide_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_tablesample_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_pivot_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_trycast_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_properties_sql": {"tf": 1}, "sqlglot.generator.Generator.no_identify": {"tf": 1}, "sqlglot.tokens.TokenType.NO_ACTION": {"tf": 1}}, "df": 10, "r": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.drill.Drill.Generator.normalize_func": {"tf": 1}, "sqlglot.generator.Generator.normalize_func": {"tf": 1}, "sqlglot.optimizer.normalize.normalize": {"tf": 1}, "sqlglot.optimizer.optimize_joins.normalize": {"tf": 1}}, "df": 4, "d": {"docs": {"sqlglot.optimizer.normalize.normalized": {"tf": 1}}, "df": 1}}, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}}, "df": 1}}}}}}}}}}}, "t": {"docs": {"sqlglot.expressions.Condition.not_": {"tf": 1}, "sqlglot.expressions.Not": {"tf": 1}, "sqlglot.expressions.not_": {"tf": 1}, "sqlglot.generator.Generator.not_sql": {"tf": 1}, "sqlglot.optimizer.simplify.simplify_not": {"tf": 1}, "sqlglot.tokens.TokenType.NOT": {"tf": 1}}, "df": 6, "n": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.tokens.TokenType.NOTNULL": {"tf": 1}}, "df": 1, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.NotNullColumnConstraint": {"tf": 1}, "sqlglot.generator.Generator.notnullcolumnconstraint_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}}}}}}}}, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.lineage.Node": {"tf": 1}, "sqlglot.lineage.Node.__init__": {"tf": 1}, "sqlglot.lineage.Node.walk": {"tf": 1}, "sqlglot.lineage.Node.to_html": {"tf": 1}}, "df": 4, "s": {"docs": {"sqlglot.optimizer.pushdown_predicates.nodes_for_predicate": {"tf": 1}}, "df": 1}}}}, "e": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot.errors.ParseError.new": {"tf": 1}, "sqlglot.helper.find_new_name": {"tf": 1}, "sqlglot.trie.new_trie": {"tf": 1}}, "df": 3}, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.executor.python.PythonExecutor.nested_loop_join": {"tf": 1}}, "df": 1}}}}, "q": {"docs": {"sqlglot.expressions.NEQ": {"tf": 1}, "sqlglot.generator.Generator.neq_sql": {"tf": 1}, "sqlglot.tokens.TokenType.NEQ": {"tf": 1}}, "df": 3}, "g": {"docs": {"sqlglot.expressions.Neg": {"tf": 1}, "sqlglot.generator.Generator.neg_sql": {"tf": 1}}, "df": 2}, "x": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.NEXT": {"tf": 1}}, "df": 1}}}, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.DataType.Type.NCHAR": {"tf": 1}, "sqlglot.tokens.TokenType.NCHAR": {"tf": 1}}, "df": 2}}}}, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.DataType.Type.NVARCHAR": {"tf": 1}, "sqlglot.tokens.TokenType.NVARCHAR": {"tf": 1}}, "df": 2}}}}}}, "l": {"2": {"docs": {"sqlglot.expressions.Nvl2": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}}}, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dialects.dialect.var_map_sql": {"tf": 1}, "sqlglot.expressions.Var": {"tf": 1}, "sqlglot.generator.Generator.var_sql": {"tf": 1}, "sqlglot.parser.parse_var_map": {"tf": 1}, "sqlglot.tokens.TokenType.VAR": {"tf": 1}, "sqlglot.tokens.Token.var": {"tf": 1}}, "df": 6, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.DataType.Type.VARCHAR": {"tf": 1}, "sqlglot.tokens.TokenType.VARCHAR": {"tf": 1}}, "df": 2}}}}, "b": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.DataType.Type.VARBINARY": {"tf": 1}, "sqlglot.tokens.TokenType.VARBINARY": {"tf": 1}}, "df": 2}}}}}}, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.DataType.Type.VARIANT": {"tf": 1}, "sqlglot.tokens.TokenType.VARIANT": {"tf": 1}}, "df": 2}, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Variance": {"tf": 1}}, "df": 1, "p": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.expressions.VariancePop": {"tf": 1}}, "df": 1}}}}}}}}, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.expressions.VarMap": {"tf": 1}}, "df": 1}}}}, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.values_sql": {"tf": 1}, "sqlglot.expressions.Values": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1}, "sqlglot.generator.Generator.values_sql": {"tf": 1}, "sqlglot.tokens.TokenType.VALUES": {"tf": 1}}, "df": 6}}}, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.qualify_columns.validate_qualify_columns": {"tf": 1}, "sqlglot.parser.Parser.validate_expression": {"tf": 1}}, "df": 2}}}}}}}, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.VolatilityProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}, "e": {"docs": {"sqlglot.tokens.TokenType.VOLATILE": {"tf": 1}}, "df": 1}}}}}}}, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot.tokens.TokenType.VIEW": {"tf": 1}}, "df": 1}}}}, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dialects.oracle.Oracle.Generator.query_modifiers": {"tf": 1}, "sqlglot.generator.Generator.query_modifiers": {"tf": 1}}, "df": 2}}}, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.Qualify": {"tf": 1}, "sqlglot.generator.Generator.qualify_sql": {"tf": 1}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1}, "sqlglot.optimizer.qualify_columns.validate_qualify_columns": {"tf": 1}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1}, "sqlglot.tokens.TokenType.QUALIFY": {"tf": 1}}, "df": 6}}}}, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Quantile": {"tf": 1}}, "df": 1, "s": {"docs": {"sqlglot.expressions.Quantiles": {"tf": 1}}, "df": 1}, "i": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.expressions.QuantileIf": {"tf": 1}}, "df": 1}}}}}}}}, "o": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.QUOTE": {"tf": 1}}, "df": 1}}}}}, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.diff.Keep": {"tf": 1}, "sqlglot.diff.Keep.__init__": {"tf": 1}}, "df": 2}}, "y": {"docs": {"sqlglot.executor.env.reverse_key": {"tf": 1}, "sqlglot.executor.env.reverse_key.__init__": {"tf": 1}, "sqlglot.tokens.TokenType.FOREIGN_KEY": {"tf": 1}, "sqlglot.tokens.TokenType.PRIMARY_KEY": {"tf": 1}}, "df": 4}}, "w": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.expressions.Kwarg": {"tf": 1}, "sqlglot.generator.Generator.kwarg_sql": {"tf": 1}}, "df": 2}}}}}, "x": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.DataType.Type.XML": {"tf": 1}, "sqlglot.tokens.TokenType.XML": {"tf": 1}}, "df": 2}}}, "y": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Year": {"tf": 1}}, "df": 1}}}}, "z": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.AT_TIME_ZONE": {"tf": 1}, "sqlglot.tokens.TokenType.WITH_TIME_ZONE": {"tf": 1}, "sqlglot.tokens.TokenType.WITH_LOCAL_TIME_ZONE": {"tf": 1}, "sqlglot.tokens.TokenType.WITHOUT_TIME_ZONE": {"tf": 1}}, "df": 4}}}}}}, "fullname": {"root": {"docs": {"sqlglot.dataframe.sql.SparkSession.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.__init__": {"tf": 1}, "sqlglot.dataframe.sql.Column.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.__init__": {"tf": 1}, "sqlglot.dataframe.sql.Window.__init__": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameReader.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.__init__": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.__init__": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.__init__": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.__init__": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.__init__": {"tf": 1}, "sqlglot.dialects.drill.Drill.__init__": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.__init__": {"tf": 1}, "sqlglot.dialects.hive.Hive.__init__": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.__init__": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.__init__": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.__init__": {"tf": 1}, "sqlglot.dialects.presto.Presto.__init__": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.__init__": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.__init__": {"tf": 1}, "sqlglot.dialects.spark.Spark.__init__": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.__init__": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.__init__": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.__init__": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.__init__": {"tf": 1}, "sqlglot.dialects.trino.Trino.__init__": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.__init__": {"tf": 1}, "sqlglot.diff.Insert.__init__": {"tf": 1}, "sqlglot.diff.Remove.__init__": {"tf": 1}, "sqlglot.diff.Move.__init__": {"tf": 1}, "sqlglot.diff.Update.__init__": {"tf": 1}, "sqlglot.diff.Keep.__init__": {"tf": 1}, "sqlglot.diff.ChangeDistiller.__init__": {"tf": 1}, "sqlglot.errors.ParseError.__init__": {"tf": 1}, "sqlglot.executor.context.Context.__init__": {"tf": 1}, "sqlglot.executor.env.reverse_key.__init__": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.__init__": {"tf": 1}, "sqlglot.executor.python.Python.__init__": {"tf": 1}, "sqlglot.executor.table.Table.__init__": {"tf": 1}, "sqlglot.executor.table.TableIter.__init__": {"tf": 1}, "sqlglot.executor.table.RangeReader.__init__": {"tf": 1}, "sqlglot.executor.table.RowReader.__init__": {"tf": 1}, "sqlglot.expressions.Expression.__init__": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Condition.not_": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.TimeUnit.__init__": {"tf": 1}, "sqlglot.expressions.except_": {"tf": 1}, "sqlglot.expressions.from_": {"tf": 1}, "sqlglot.expressions.and_": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 1}, "sqlglot.expressions.not_": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.expressions.table_": {"tf": 1}, "sqlglot.generator.Generator.__init__": {"tf": 1}, "sqlglot.lineage.Node.__init__": {"tf": 1}, "sqlglot.lineage.LineageHTML.__init__": {"tf": 1}, "sqlglot.optimizer.annotate_types.TypeAnnotator.__init__": {"tf": 1}, "sqlglot.optimizer.scope.Scope.__init__": {"tf": 1}, "sqlglot.parser.Parser.__init__": {"tf": 1}, "sqlglot.planner.Plan.__init__": {"tf": 1}, "sqlglot.planner.Step.__init__": {"tf": 1}, "sqlglot.planner.Scan.__init__": {"tf": 1}, "sqlglot.planner.Join.__init__": {"tf": 1}, "sqlglot.planner.Aggregate.__init__": {"tf": 1}, "sqlglot.planner.Sort.__init__": {"tf": 1}, "sqlglot.planner.SetOperation.__init__": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.__init__": {"tf": 1}, "sqlglot.schema.MappingSchema.__init__": {"tf": 1}, "sqlglot.tokens.Token.__init__": {"tf": 1}, "sqlglot.tokens.Tokenizer.__init__": {"tf": 1}}, "df": 76, "s": {"docs": {"sqlglot.planner.Step.to_s": {"tf": 1}}, "df": 1, "q": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dataframe.sql": {"tf": 1}, "sqlglot.dataframe.sql.SparkSession": {"tf": 1}, "sqlglot.dataframe.sql.SparkSession.__init__": {"tf": 1}, "sqlglot.dataframe.sql.SparkSession.table": {"tf": 1}, "sqlglot.dataframe.sql.SparkSession.createDataFrame": {"tf": 1}, "sqlglot.dataframe.sql.SparkSession.sql": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sql": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.copy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.select": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.alias": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.where": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.filter": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.groupBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.agg": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.join": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.union": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.unionAll": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.unionByName": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.intersect": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.intersectAll": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.exceptAll": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.distinct": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.dropDuplicates": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.dropna": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.replace": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.withColumn": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.withColumnRenamed": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.drop": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.limit": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.hint": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.repartition": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.coalesce": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.cache": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.persist": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.__init__": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.agg": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.count": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.mean": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.avg": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.max": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.min": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.sum": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.pivot": {"tf": 1}, "sqlglot.dataframe.sql.Column": {"tf": 1}, "sqlglot.dataframe.sql.Column.__init__": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_col": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_cols": {"tf": 1}, "sqlglot.dataframe.sql.Column.invoke_anonymous_function": {"tf": 1}, "sqlglot.dataframe.sql.Column.invoke_expression_over_column": {"tf": 1}, "sqlglot.dataframe.sql.Column.binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.inverse_binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.unary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_literal": {"tf": 1}, "sqlglot.dataframe.sql.Column.copy": {"tf": 1}, "sqlglot.dataframe.sql.Column.set_table_name": {"tf": 1}, "sqlglot.dataframe.sql.Column.sql": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.alias": {"tf": 1}, "sqlglot.dataframe.sql.Column.asc": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc": {"tf": 1}, "sqlglot.dataframe.sql.Column.asc_nulls_first": {"tf": 1}, "sqlglot.dataframe.sql.Column.asc_nulls_last": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc_nulls_first": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc_nulls_last": {"tf": 1}, "sqlglot.dataframe.sql.Column.when": {"tf": 1}, "sqlglot.dataframe.sql.Column.otherwise": {"tf": 1}, "sqlglot.dataframe.sql.Column.isNull": {"tf": 1}, "sqlglot.dataframe.sql.Column.isNotNull": {"tf": 1}, "sqlglot.dataframe.sql.Column.cast": {"tf": 1}, "sqlglot.dataframe.sql.Column.startswith": {"tf": 1}, "sqlglot.dataframe.sql.Column.endswith": {"tf": 1}, "sqlglot.dataframe.sql.Column.rlike": {"tf": 1}, "sqlglot.dataframe.sql.Column.like": {"tf": 1}, "sqlglot.dataframe.sql.Column.ilike": {"tf": 1}, "sqlglot.dataframe.sql.Column.substr": {"tf": 1}, "sqlglot.dataframe.sql.Column.isin": {"tf": 1}, "sqlglot.dataframe.sql.Column.between": {"tf": 1}, "sqlglot.dataframe.sql.Column.over": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.drop": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.fill": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.replace": {"tf": 1}, "sqlglot.dataframe.sql.Window": {"tf": 1}, "sqlglot.dataframe.sql.Window.__init__": {"tf": 1}, "sqlglot.dataframe.sql.Window.partitionBy": {"tf": 1}, "sqlglot.dataframe.sql.Window.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.Window.rowsBetween": {"tf": 1}, "sqlglot.dataframe.sql.Window.rangeBetween": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.__init__": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.copy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.sql": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.WindowSpec.partitionBy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.rowsBetween": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.rangeBetween": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameReader": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameReader.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameReader.table": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.copy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.sql": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrameWriter.mode": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.insertInto": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.saveAsTable": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.array_sql": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.transaction_sql": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.commit_sql": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.rollback_sql": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.cte_sql": {"tf": 1}, "sqlglot.dialects.dialect.approx_count_distinct_sql": {"tf": 1}, "sqlglot.dialects.dialect.if_sql": {"tf": 1}, "sqlglot.dialects.dialect.arrow_json_extract_sql": {"tf": 1}, "sqlglot.dialects.dialect.arrow_json_extract_scalar_sql": {"tf": 1}, "sqlglot.dialects.dialect.inline_array_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_ilike_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_paren_current_date_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_recursive_cte_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_safe_divide_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_tablesample_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_pivot_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_trycast_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_properties_sql": {"tf": 1}, "sqlglot.dialects.dialect.str_position_sql": {"tf": 1}, "sqlglot.dialects.dialect.struct_extract_sql": {"tf": 1}, "sqlglot.dialects.dialect.var_map_sql": {"tf": 1}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}, "sqlglot.dialects.dialect.strposition_to_locate_sql": {"tf": 1}, "sqlglot.dialects.dialect.timestrtotime_sql": {"tf": 1}, "sqlglot.dialects.dialect.datestrtodate_sql": {"tf": 1}, "sqlglot.dialects.dialect.trim_sql": {"tf": 1}, "sqlglot.dialects.drill.if_sql": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator.show_sql": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator.setitem_sql": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator.set_sql": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator.offset_sql": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator.table_sql": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator.transaction_sql": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.renametable_sql": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.select_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.describe_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.generatedasidentitycolumnconstraint_sql": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator.cast_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.transaction_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.partitionedbyproperty_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.update_sql": {"tf": 1}, "sqlglot.dialects.tsql.generate_date_delta_with_unit_sql": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator.systemtime_sql": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator.returnsproperty_sql": {"tf": 1}, "sqlglot.expressions.Expression.sql": {"tf": 1}, "sqlglot.expressions.Func.sql_names": {"tf": 1}, "sqlglot.expressions.Func.sql_name": {"tf": 1}, "sqlglot.generator.Generator.sql": {"tf": 1}, "sqlglot.generator.Generator.uncache_sql": {"tf": 1}, "sqlglot.generator.Generator.cache_sql": {"tf": 1}, "sqlglot.generator.Generator.characterset_sql": {"tf": 1}, "sqlglot.generator.Generator.column_sql": {"tf": 1}, "sqlglot.generator.Generator.columndef_sql": {"tf": 1}, "sqlglot.generator.Generator.columnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.autoincrementcolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.checkcolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.commentcolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.collatecolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.encodecolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.defaultcolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.generatedasidentitycolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.notnullcolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.primarykeycolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.uniquecolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.create_sql": {"tf": 1}, "sqlglot.generator.Generator.describe_sql": {"tf": 1}, "sqlglot.generator.Generator.with_sql": {"tf": 1}, "sqlglot.generator.Generator.cte_sql": {"tf": 1}, "sqlglot.generator.Generator.tablealias_sql": {"tf": 1}, "sqlglot.generator.Generator.bitstring_sql": {"tf": 1}, "sqlglot.generator.Generator.hexstring_sql": {"tf": 1}, "sqlglot.generator.Generator.datatype_sql": {"tf": 1}, "sqlglot.generator.Generator.directory_sql": {"tf": 1}, "sqlglot.generator.Generator.delete_sql": {"tf": 1}, "sqlglot.generator.Generator.drop_sql": {"tf": 1}, "sqlglot.generator.Generator.except_sql": {"tf": 1}, "sqlglot.generator.Generator.fetch_sql": {"tf": 1}, "sqlglot.generator.Generator.filter_sql": {"tf": 1}, "sqlglot.generator.Generator.hint_sql": {"tf": 1}, "sqlglot.generator.Generator.index_sql": {"tf": 1}, "sqlglot.generator.Generator.identifier_sql": {"tf": 1}, "sqlglot.generator.Generator.national_sql": {"tf": 1}, "sqlglot.generator.Generator.partition_sql": {"tf": 1}, "sqlglot.generator.Generator.properties_sql": {"tf": 1}, "sqlglot.generator.Generator.property_sql": {"tf": 1}, "sqlglot.generator.Generator.likeproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.fallbackproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.journalproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.freespaceproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.afterjournalproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.checksumproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.mergeblockratioproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.datablocksizeproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.blockcompressionproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.isolatedloadingproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.insert_sql": {"tf": 1}, "sqlglot.generator.Generator.intersect_sql": {"tf": 1}, "sqlglot.generator.Generator.introducer_sql": {"tf": 1}, "sqlglot.generator.Generator.pseudotype_sql": {"tf": 1}, "sqlglot.generator.Generator.rowformatdelimitedproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.table_sql": {"tf": 1}, "sqlglot.generator.Generator.tablesample_sql": {"tf": 1}, "sqlglot.generator.Generator.pivot_sql": {"tf": 1}, "sqlglot.generator.Generator.tuple_sql": {"tf": 1}, "sqlglot.generator.Generator.update_sql": {"tf": 1}, "sqlglot.generator.Generator.values_sql": {"tf": 1}, "sqlglot.generator.Generator.var_sql": {"tf": 1}, "sqlglot.generator.Generator.into_sql": {"tf": 1}, "sqlglot.generator.Generator.from_sql": {"tf": 1}, "sqlglot.generator.Generator.group_sql": {"tf": 1}, "sqlglot.generator.Generator.having_sql": {"tf": 1}, "sqlglot.generator.Generator.join_sql": {"tf": 1}, "sqlglot.generator.Generator.lambda_sql": {"tf": 1}, "sqlglot.generator.Generator.lateral_sql": {"tf": 1}, "sqlglot.generator.Generator.limit_sql": {"tf": 1}, "sqlglot.generator.Generator.offset_sql": {"tf": 1}, "sqlglot.generator.Generator.lock_sql": {"tf": 1}, "sqlglot.generator.Generator.literal_sql": {"tf": 1}, "sqlglot.generator.Generator.loaddata_sql": {"tf": 1}, "sqlglot.generator.Generator.null_sql": {"tf": 1}, "sqlglot.generator.Generator.boolean_sql": {"tf": 1}, "sqlglot.generator.Generator.order_sql": {"tf": 1}, "sqlglot.generator.Generator.cluster_sql": {"tf": 1}, "sqlglot.generator.Generator.distribute_sql": {"tf": 1}, "sqlglot.generator.Generator.sort_sql": {"tf": 1}, "sqlglot.generator.Generator.ordered_sql": {"tf": 1}, "sqlglot.generator.Generator.matchrecognize_sql": {"tf": 1}, "sqlglot.generator.Generator.select_sql": {"tf": 1}, "sqlglot.generator.Generator.schema_sql": {"tf": 1}, "sqlglot.generator.Generator.star_sql": {"tf": 1}, "sqlglot.generator.Generator.structkwarg_sql": {"tf": 1}, "sqlglot.generator.Generator.parameter_sql": {"tf": 1}, "sqlglot.generator.Generator.sessionparameter_sql": {"tf": 1}, "sqlglot.generator.Generator.placeholder_sql": {"tf": 1}, "sqlglot.generator.Generator.subquery_sql": {"tf": 1}, "sqlglot.generator.Generator.qualify_sql": {"tf": 1}, "sqlglot.generator.Generator.union_sql": {"tf": 1}, "sqlglot.generator.Generator.unnest_sql": {"tf": 1}, "sqlglot.generator.Generator.where_sql": {"tf": 1}, "sqlglot.generator.Generator.window_sql": {"tf": 1}, "sqlglot.generator.Generator.partition_by_sql": {"tf": 1}, "sqlglot.generator.Generator.window_spec_sql": {"tf": 1}, "sqlglot.generator.Generator.withingroup_sql": {"tf": 1}, "sqlglot.generator.Generator.between_sql": {"tf": 1}, "sqlglot.generator.Generator.bracket_sql": {"tf": 1}, "sqlglot.generator.Generator.all_sql": {"tf": 1}, "sqlglot.generator.Generator.any_sql": {"tf": 1}, "sqlglot.generator.Generator.exists_sql": {"tf": 1}, "sqlglot.generator.Generator.case_sql": {"tf": 1}, "sqlglot.generator.Generator.constraint_sql": {"tf": 1}, "sqlglot.generator.Generator.extract_sql": {"tf": 1}, "sqlglot.generator.Generator.trim_sql": {"tf": 1}, "sqlglot.generator.Generator.concat_sql": {"tf": 1}, "sqlglot.generator.Generator.check_sql": {"tf": 1}, "sqlglot.generator.Generator.foreignkey_sql": {"tf": 1}, "sqlglot.generator.Generator.primarykey_sql": {"tf": 1}, "sqlglot.generator.Generator.unique_sql": {"tf": 1}, "sqlglot.generator.Generator.if_sql": {"tf": 1}, "sqlglot.generator.Generator.in_sql": {"tf": 1}, "sqlglot.generator.Generator.interval_sql": {"tf": 1}, "sqlglot.generator.Generator.return_sql": {"tf": 1}, "sqlglot.generator.Generator.reference_sql": {"tf": 1}, "sqlglot.generator.Generator.anonymous_sql": {"tf": 1}, "sqlglot.generator.Generator.paren_sql": {"tf": 1}, "sqlglot.generator.Generator.neg_sql": {"tf": 1}, "sqlglot.generator.Generator.not_sql": {"tf": 1}, "sqlglot.generator.Generator.alias_sql": {"tf": 1}, "sqlglot.generator.Generator.aliases_sql": {"tf": 1}, "sqlglot.generator.Generator.attimezone_sql": {"tf": 1}, "sqlglot.generator.Generator.add_sql": {"tf": 1}, "sqlglot.generator.Generator.and_sql": {"tf": 1}, "sqlglot.generator.Generator.connector_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwiseand_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwiseleftshift_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwisenot_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwiseor_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwiserightshift_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwisexor_sql": {"tf": 1}, "sqlglot.generator.Generator.cast_sql": {"tf": 1}, "sqlglot.generator.Generator.currentdate_sql": {"tf": 1}, "sqlglot.generator.Generator.collate_sql": {"tf": 1}, "sqlglot.generator.Generator.command_sql": {"tf": 1}, "sqlglot.generator.Generator.transaction_sql": {"tf": 1}, "sqlglot.generator.Generator.commit_sql": {"tf": 1}, "sqlglot.generator.Generator.rollback_sql": {"tf": 1}, "sqlglot.generator.Generator.altercolumn_sql": {"tf": 1}, "sqlglot.generator.Generator.renametable_sql": {"tf": 1}, "sqlglot.generator.Generator.altertable_sql": {"tf": 1}, "sqlglot.generator.Generator.droppartition_sql": {"tf": 1}, "sqlglot.generator.Generator.addconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.distinct_sql": {"tf": 1}, "sqlglot.generator.Generator.ignorenulls_sql": {"tf": 1}, "sqlglot.generator.Generator.respectnulls_sql": {"tf": 1}, "sqlglot.generator.Generator.intdiv_sql": {"tf": 1}, "sqlglot.generator.Generator.dpipe_sql": {"tf": 1}, "sqlglot.generator.Generator.div_sql": {"tf": 1}, "sqlglot.generator.Generator.distance_sql": {"tf": 1}, "sqlglot.generator.Generator.dot_sql": {"tf": 1}, "sqlglot.generator.Generator.eq_sql": {"tf": 1}, "sqlglot.generator.Generator.escape_sql": {"tf": 1}, "sqlglot.generator.Generator.glob_sql": {"tf": 1}, "sqlglot.generator.Generator.gt_sql": {"tf": 1}, "sqlglot.generator.Generator.gte_sql": {"tf": 1}, "sqlglot.generator.Generator.ilike_sql": {"tf": 1}, "sqlglot.generator.Generator.is_sql": {"tf": 1}, "sqlglot.generator.Generator.like_sql": {"tf": 1}, "sqlglot.generator.Generator.similarto_sql": {"tf": 1}, "sqlglot.generator.Generator.lt_sql": {"tf": 1}, "sqlglot.generator.Generator.lte_sql": {"tf": 1}, "sqlglot.generator.Generator.mod_sql": {"tf": 1}, "sqlglot.generator.Generator.mul_sql": {"tf": 1}, "sqlglot.generator.Generator.neq_sql": {"tf": 1}, "sqlglot.generator.Generator.nullsafeeq_sql": {"tf": 1}, "sqlglot.generator.Generator.nullsafeneq_sql": {"tf": 1}, "sqlglot.generator.Generator.or_sql": {"tf": 1}, "sqlglot.generator.Generator.slice_sql": {"tf": 1}, "sqlglot.generator.Generator.sub_sql": {"tf": 1}, "sqlglot.generator.Generator.trycast_sql": {"tf": 1}, "sqlglot.generator.Generator.use_sql": {"tf": 1}, "sqlglot.generator.Generator.function_fallback_sql": {"tf": 1}, "sqlglot.generator.Generator.tag_sql": {"tf": 1}, "sqlglot.generator.Generator.token_sql": {"tf": 1}, "sqlglot.generator.Generator.userdefinedfunction_sql": {"tf": 1}, "sqlglot.generator.Generator.userdefinedfunctionkwarg_sql": {"tf": 1}, "sqlglot.generator.Generator.joinhint_sql": {"tf": 1}, "sqlglot.generator.Generator.kwarg_sql": {"tf": 1}, "sqlglot.generator.Generator.when_sql": {"tf": 1}, "sqlglot.generator.Generator.merge_sql": {"tf": 1}}, "df": 344, "g": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.pretty": {"tf": 1}, "sqlglot.schema": {"tf": 1}, "sqlglot.parse": {"tf": 1}, "sqlglot.parse_one": {"tf": 1}, "sqlglot.transpile": {"tf": 1}, "sqlglot.dataframe": {"tf": 1}, "sqlglot.dataframe.sql": {"tf": 1}, "sqlglot.dataframe.sql.SparkSession": {"tf": 1}, "sqlglot.dataframe.sql.SparkSession.__init__": {"tf": 1}, "sqlglot.dataframe.sql.SparkSession.table": {"tf": 1}, "sqlglot.dataframe.sql.SparkSession.createDataFrame": {"tf": 1}, "sqlglot.dataframe.sql.SparkSession.sql": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sql": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.copy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.select": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.alias": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.where": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.filter": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.groupBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.agg": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.join": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.union": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.unionAll": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.unionByName": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.intersect": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.intersectAll": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.exceptAll": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.distinct": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.dropDuplicates": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.dropna": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.replace": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.withColumn": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.withColumnRenamed": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.drop": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.limit": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.hint": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.repartition": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.coalesce": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.cache": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.persist": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.__init__": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.agg": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.count": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.mean": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.avg": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.max": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.min": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.sum": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.pivot": {"tf": 1}, "sqlglot.dataframe.sql.Column": {"tf": 1}, "sqlglot.dataframe.sql.Column.__init__": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_col": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_cols": {"tf": 1}, "sqlglot.dataframe.sql.Column.invoke_anonymous_function": {"tf": 1}, "sqlglot.dataframe.sql.Column.invoke_expression_over_column": {"tf": 1}, "sqlglot.dataframe.sql.Column.binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.inverse_binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.unary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_literal": {"tf": 1}, "sqlglot.dataframe.sql.Column.copy": {"tf": 1}, "sqlglot.dataframe.sql.Column.set_table_name": {"tf": 1}, "sqlglot.dataframe.sql.Column.sql": {"tf": 1}, "sqlglot.dataframe.sql.Column.alias": {"tf": 1}, "sqlglot.dataframe.sql.Column.asc": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc": {"tf": 1}, "sqlglot.dataframe.sql.Column.asc_nulls_first": {"tf": 1}, "sqlglot.dataframe.sql.Column.asc_nulls_last": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc_nulls_first": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc_nulls_last": {"tf": 1}, "sqlglot.dataframe.sql.Column.when": {"tf": 1}, "sqlglot.dataframe.sql.Column.otherwise": {"tf": 1}, "sqlglot.dataframe.sql.Column.isNull": {"tf": 1}, "sqlglot.dataframe.sql.Column.isNotNull": {"tf": 1}, "sqlglot.dataframe.sql.Column.cast": {"tf": 1}, "sqlglot.dataframe.sql.Column.startswith": {"tf": 1}, "sqlglot.dataframe.sql.Column.endswith": {"tf": 1}, "sqlglot.dataframe.sql.Column.rlike": {"tf": 1}, "sqlglot.dataframe.sql.Column.like": {"tf": 1}, "sqlglot.dataframe.sql.Column.ilike": {"tf": 1}, "sqlglot.dataframe.sql.Column.substr": {"tf": 1}, "sqlglot.dataframe.sql.Column.isin": {"tf": 1}, "sqlglot.dataframe.sql.Column.between": {"tf": 1}, "sqlglot.dataframe.sql.Column.over": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.drop": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.fill": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.replace": {"tf": 1}, "sqlglot.dataframe.sql.Window": {"tf": 1}, "sqlglot.dataframe.sql.Window.__init__": {"tf": 1}, "sqlglot.dataframe.sql.Window.partitionBy": {"tf": 1}, "sqlglot.dataframe.sql.Window.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.Window.rowsBetween": {"tf": 1}, "sqlglot.dataframe.sql.Window.rangeBetween": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.__init__": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.copy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.sql": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.partitionBy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.rowsBetween": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.rangeBetween": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameReader": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameReader.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameReader.table": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.copy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.sql": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.mode": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.insertInto": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.saveAsTable": {"tf": 1}, "sqlglot.dialects": {"tf": 1}, "sqlglot.dialects.bigquery": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.__init__": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Tokenizer": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.array_sql": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.transaction_sql": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.commit_sql": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.rollback_sql": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.in_unnest_op": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.except_op": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.intersect_op": {"tf": 1}, "sqlglot.dialects.clickhouse": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.__init__": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Tokenizer": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.cte_sql": {"tf": 1}, "sqlglot.dialects.databricks": {"tf": 1}, "sqlglot.dialects.databricks.Databricks": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.__init__": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.dialect": {"tf": 1}, "sqlglot.dialects.dialect.Dialects": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.DIALECT": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.BIGQUERY": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.CLICKHOUSE": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.DUCKDB": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.HIVE": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.MYSQL": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.ORACLE": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.POSTGRES": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.PRESTO": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.REDSHIFT": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.SNOWFLAKE": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.SPARK": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.SQLITE": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.STARROCKS": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.TABLEAU": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.TRINO": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.TSQL": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.DATABRICKS": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.DRILL": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.TERADATA": {"tf": 1}, "sqlglot.dialects.dialect.Dialect": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.__init__": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.get_or_raise": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.format_time": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.parse": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.parse_into": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.generate": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.transpile": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.parser": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.generator": {"tf": 1}, "sqlglot.dialects.dialect.rename_func": {"tf": 1}, "sqlglot.dialects.dialect.approx_count_distinct_sql": {"tf": 1}, "sqlglot.dialects.dialect.if_sql": {"tf": 1}, "sqlglot.dialects.dialect.arrow_json_extract_sql": {"tf": 1}, "sqlglot.dialects.dialect.arrow_json_extract_scalar_sql": {"tf": 1}, "sqlglot.dialects.dialect.inline_array_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_ilike_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_paren_current_date_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_recursive_cte_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_safe_divide_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_tablesample_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_pivot_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_trycast_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_properties_sql": {"tf": 1}, "sqlglot.dialects.dialect.str_position_sql": {"tf": 1}, "sqlglot.dialects.dialect.struct_extract_sql": {"tf": 1}, "sqlglot.dialects.dialect.var_map_sql": {"tf": 1}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}, "sqlglot.dialects.dialect.parse_date_delta": {"tf": 1}, "sqlglot.dialects.dialect.locate_to_strposition": {"tf": 1}, "sqlglot.dialects.dialect.strposition_to_locate_sql": {"tf": 1}, "sqlglot.dialects.dialect.timestrtotime_sql": {"tf": 1}, "sqlglot.dialects.dialect.datestrtodate_sql": {"tf": 1}, "sqlglot.dialects.dialect.trim_sql": {"tf": 1}, "sqlglot.dialects.drill": {"tf": 1}, "sqlglot.dialects.drill.if_sql": {"tf": 1}, "sqlglot.dialects.drill.Drill": {"tf": 1}, "sqlglot.dialects.drill.Drill.__init__": {"tf": 1}, "sqlglot.dialects.drill.Drill.Tokenizer": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator.normalize_func": {"tf": 1}, "sqlglot.dialects.duckdb": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.__init__": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Tokenizer": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive": {"tf": 1}, "sqlglot.dialects.hive.Hive": {"tf": 1}, "sqlglot.dialects.hive.Hive.__init__": {"tf": 1}, "sqlglot.dialects.hive.Hive.Tokenizer": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.with_properties": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.mysql": {"tf": 1}, "sqlglot.dialects.mysql.MySQL": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.__init__": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Tokenizer": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator.show_sql": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator.setitem_sql": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator.set_sql": {"tf": 1}, "sqlglot.dialects.oracle": {"tf": 1}, "sqlglot.dialects.oracle.Oracle": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.__init__": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator.query_modifiers": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator.offset_sql": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator.table_sql": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Tokenizer": {"tf": 1}, "sqlglot.dialects.postgres": {"tf": 1}, "sqlglot.dialects.postgres.Postgres": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.__init__": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Tokenizer": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto": {"tf": 1}, "sqlglot.dialects.presto.Presto": {"tf": 1}, "sqlglot.dialects.presto.Presto.__init__": {"tf": 1}, "sqlglot.dialects.presto.Presto.Tokenizer": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator.transaction_sql": {"tf": 1}, "sqlglot.dialects.redshift": {"tf": 1}, "sqlglot.dialects.redshift.Redshift": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.__init__": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Tokenizer": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.with_properties": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.renametable_sql": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.snowflake": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.__init__": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Tokenizer": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.except_op": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.intersect_op": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.select_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.describe_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.generatedasidentitycolumnconstraint_sql": {"tf": 1}, "sqlglot.dialects.spark": {"tf": 1}, "sqlglot.dialects.spark.Spark": {"tf": 1}, "sqlglot.dialects.spark.Spark.__init__": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator.cast_sql": {"tf": 1}, "sqlglot.dialects.spark.Spark.Tokenizer": {"tf": 1}, "sqlglot.dialects.sqlite": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.__init__": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Tokenizer": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.transaction_sql": {"tf": 1}, "sqlglot.dialects.starrocks": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.__init__": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau": {"tf": 1}, "sqlglot.dialects.tableau.Tableau": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.__init__": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata": {"tf": 1}, "sqlglot.dialects.teradata.Teradata": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.__init__": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.partitionedbyproperty_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.update_sql": {"tf": 1}, "sqlglot.dialects.trino": {"tf": 1}, "sqlglot.dialects.trino.Trino": {"tf": 1}, "sqlglot.dialects.trino.Trino.__init__": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Tokenizer": {"tf": 1}, "sqlglot.dialects.tsql": {"tf": 1}, "sqlglot.dialects.tsql.generate_date_delta_with_unit_sql": {"tf": 1}, "sqlglot.dialects.tsql.TSQL": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.__init__": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Tokenizer": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator.systemtime_sql": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator.returnsproperty_sql": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.diff.Insert": {"tf": 1}, "sqlglot.diff.Insert.__init__": {"tf": 1}, "sqlglot.diff.Remove": {"tf": 1}, "sqlglot.diff.Remove.__init__": {"tf": 1}, "sqlglot.diff.Move": {"tf": 1}, "sqlglot.diff.Move.__init__": {"tf": 1}, "sqlglot.diff.Update": {"tf": 1}, "sqlglot.diff.Update.__init__": {"tf": 1}, "sqlglot.diff.Keep": {"tf": 1}, "sqlglot.diff.Keep.__init__": {"tf": 1}, "sqlglot.diff.diff": {"tf": 1}, "sqlglot.diff.ChangeDistiller": {"tf": 1}, "sqlglot.diff.ChangeDistiller.__init__": {"tf": 1}, "sqlglot.diff.ChangeDistiller.diff": {"tf": 1}, "sqlglot.errors": {"tf": 1}, "sqlglot.errors.ErrorLevel": {"tf": 1}, "sqlglot.errors.ErrorLevel.IGNORE": {"tf": 1}, "sqlglot.errors.ErrorLevel.WARN": {"tf": 1}, "sqlglot.errors.ErrorLevel.RAISE": {"tf": 1}, "sqlglot.errors.ErrorLevel.IMMEDIATE": {"tf": 1}, "sqlglot.errors.SqlglotError": {"tf": 1}, "sqlglot.errors.UnsupportedError": {"tf": 1}, "sqlglot.errors.ParseError": {"tf": 1}, "sqlglot.errors.ParseError.__init__": {"tf": 1}, "sqlglot.errors.ParseError.new": {"tf": 1}, "sqlglot.errors.TokenError": {"tf": 1}, "sqlglot.errors.OptimizeError": {"tf": 1}, "sqlglot.errors.SchemaError": {"tf": 1}, "sqlglot.errors.ExecuteError": {"tf": 1}, "sqlglot.errors.concat_messages": {"tf": 1}, "sqlglot.errors.merge_errors": {"tf": 1}, "sqlglot.executor": {"tf": 1}, "sqlglot.executor.execute": {"tf": 1}, "sqlglot.executor.context": {"tf": 1}, "sqlglot.executor.context.Context": {"tf": 1}, "sqlglot.executor.context.Context.__init__": {"tf": 1}, "sqlglot.executor.context.Context.eval": {"tf": 1}, "sqlglot.executor.context.Context.eval_tuple": {"tf": 1}, "sqlglot.executor.context.Context.add_columns": {"tf": 1}, "sqlglot.executor.context.Context.table_iter": {"tf": 1}, "sqlglot.executor.context.Context.filter": {"tf": 1}, "sqlglot.executor.context.Context.sort": {"tf": 1}, "sqlglot.executor.context.Context.set_row": {"tf": 1}, "sqlglot.executor.context.Context.set_index": {"tf": 1}, "sqlglot.executor.context.Context.set_range": {"tf": 1}, "sqlglot.executor.env": {"tf": 1}, "sqlglot.executor.env.reverse_key": {"tf": 1}, "sqlglot.executor.env.reverse_key.__init__": {"tf": 1}, "sqlglot.executor.env.filter_nulls": {"tf": 1}, "sqlglot.executor.env.null_if_any": {"tf": 1}, "sqlglot.executor.env.str_position": {"tf": 1}, "sqlglot.executor.env.substring": {"tf": 1}, "sqlglot.executor.env.cast": {"tf": 1}, "sqlglot.executor.env.ordered": {"tf": 1}, "sqlglot.executor.env.interval": {"tf": 1}, "sqlglot.executor.python": {"tf": 1}, "sqlglot.executor.python.PythonExecutor": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.__init__": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.execute": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.generate": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.generate_tuple": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.context": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.table": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.scan": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.static": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.scan_table": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.scan_csv": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.join": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.nested_loop_join": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.hash_join": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.aggregate": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.sort": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.set_operation": {"tf": 1}, "sqlglot.executor.python.Python": {"tf": 1}, "sqlglot.executor.python.Python.__init__": {"tf": 1}, "sqlglot.executor.python.Python.Tokenizer": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.executor.table": {"tf": 1}, "sqlglot.executor.table.Table": {"tf": 1}, "sqlglot.executor.table.Table.__init__": {"tf": 1}, "sqlglot.executor.table.Table.add_columns": {"tf": 1}, "sqlglot.executor.table.Table.append": {"tf": 1}, "sqlglot.executor.table.Table.pop": {"tf": 1}, "sqlglot.executor.table.TableIter": {"tf": 1}, "sqlglot.executor.table.TableIter.__init__": {"tf": 1}, "sqlglot.executor.table.RangeReader": {"tf": 1}, "sqlglot.executor.table.RangeReader.__init__": {"tf": 1}, "sqlglot.executor.table.RowReader": {"tf": 1}, "sqlglot.executor.table.RowReader.__init__": {"tf": 1}, "sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.executor.table.ensure_tables": {"tf": 1}, "sqlglot.expressions": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.expressions.Expression.__init__": {"tf": 1}, "sqlglot.expressions.Expression.this": {"tf": 1}, "sqlglot.expressions.Expression.expression": {"tf": 1}, "sqlglot.expressions.Expression.expressions": {"tf": 1}, "sqlglot.expressions.Expression.text": {"tf": 1}, "sqlglot.expressions.Expression.is_string": {"tf": 1}, "sqlglot.expressions.Expression.is_number": {"tf": 1}, "sqlglot.expressions.Expression.is_int": {"tf": 1}, "sqlglot.expressions.Expression.alias": {"tf": 1}, "sqlglot.expressions.Expression.output_name": {"tf": 1}, "sqlglot.expressions.Expression.copy": {"tf": 1}, "sqlglot.expressions.Expression.append": {"tf": 1}, "sqlglot.expressions.Expression.set": {"tf": 1}, "sqlglot.expressions.Expression.depth": {"tf": 1}, "sqlglot.expressions.Expression.find": {"tf": 1}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.expressions.Expression.find_ancestor": {"tf": 1}, "sqlglot.expressions.Expression.parent_select": {"tf": 1}, "sqlglot.expressions.Expression.walk": {"tf": 1}, "sqlglot.expressions.Expression.dfs": {"tf": 1}, "sqlglot.expressions.Expression.bfs": {"tf": 1}, "sqlglot.expressions.Expression.unnest": {"tf": 1}, "sqlglot.expressions.Expression.unalias": {"tf": 1}, "sqlglot.expressions.Expression.unnest_operands": {"tf": 1}, "sqlglot.expressions.Expression.flatten": {"tf": 1}, "sqlglot.expressions.Expression.sql": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.expressions.Expression.replace": {"tf": 1}, "sqlglot.expressions.Expression.pop": {"tf": 1}, "sqlglot.expressions.Expression.assert_is": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.expressions.Expression.dump": {"tf": 1}, "sqlglot.expressions.Expression.load": {"tf": 1}, "sqlglot.expressions.Condition": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Condition.not_": {"tf": 1}, "sqlglot.expressions.Predicate": {"tf": 1}, "sqlglot.expressions.DerivedTable": {"tf": 1}, "sqlglot.expressions.Unionable": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.UDTF": {"tf": 1}, "sqlglot.expressions.Cache": {"tf": 1}, "sqlglot.expressions.Uncache": {"tf": 1}, "sqlglot.expressions.Create": {"tf": 1}, "sqlglot.expressions.Describe": {"tf": 1}, "sqlglot.expressions.Set": {"tf": 1}, "sqlglot.expressions.SetItem": {"tf": 1}, "sqlglot.expressions.Show": {"tf": 1}, "sqlglot.expressions.UserDefinedFunction": {"tf": 1}, "sqlglot.expressions.UserDefinedFunctionKwarg": {"tf": 1}, "sqlglot.expressions.CharacterSet": {"tf": 1}, "sqlglot.expressions.With": {"tf": 1}, "sqlglot.expressions.WithinGroup": {"tf": 1}, "sqlglot.expressions.CTE": {"tf": 1}, "sqlglot.expressions.TableAlias": {"tf": 1}, "sqlglot.expressions.BitString": {"tf": 1}, "sqlglot.expressions.HexString": {"tf": 1}, "sqlglot.expressions.ByteString": {"tf": 1}, "sqlglot.expressions.Column": {"tf": 1}, "sqlglot.expressions.Column.output_name": {"tf": 1}, "sqlglot.expressions.ColumnDef": {"tf": 1}, "sqlglot.expressions.AlterColumn": {"tf": 1}, "sqlglot.expressions.RenameTable": {"tf": 1}, "sqlglot.expressions.ColumnConstraint": {"tf": 1}, "sqlglot.expressions.ColumnConstraintKind": {"tf": 1}, "sqlglot.expressions.AutoIncrementColumnConstraint": {"tf": 1}, "sqlglot.expressions.CheckColumnConstraint": {"tf": 1}, "sqlglot.expressions.CollateColumnConstraint": {"tf": 1}, "sqlglot.expressions.CommentColumnConstraint": {"tf": 1}, "sqlglot.expressions.DefaultColumnConstraint": {"tf": 1}, "sqlglot.expressions.EncodeColumnConstraint": {"tf": 1}, "sqlglot.expressions.GeneratedAsIdentityColumnConstraint": {"tf": 1}, "sqlglot.expressions.NotNullColumnConstraint": {"tf": 1}, "sqlglot.expressions.PrimaryKeyColumnConstraint": {"tf": 1}, "sqlglot.expressions.UniqueColumnConstraint": {"tf": 1}, "sqlglot.expressions.Constraint": {"tf": 1}, "sqlglot.expressions.Delete": {"tf": 1}, "sqlglot.expressions.Drop": {"tf": 1}, "sqlglot.expressions.Filter": {"tf": 1}, "sqlglot.expressions.Check": {"tf": 1}, "sqlglot.expressions.Directory": {"tf": 1}, "sqlglot.expressions.ForeignKey": {"tf": 1}, "sqlglot.expressions.PrimaryKey": {"tf": 1}, "sqlglot.expressions.Unique": {"tf": 1}, "sqlglot.expressions.Into": {"tf": 1}, "sqlglot.expressions.From": {"tf": 1}, "sqlglot.expressions.Having": {"tf": 1}, "sqlglot.expressions.Hint": {"tf": 1}, "sqlglot.expressions.JoinHint": {"tf": 1}, "sqlglot.expressions.Identifier": {"tf": 1}, "sqlglot.expressions.Identifier.output_name": {"tf": 1}, "sqlglot.expressions.Index": {"tf": 1}, "sqlglot.expressions.Insert": {"tf": 1}, "sqlglot.expressions.Introducer": {"tf": 1}, "sqlglot.expressions.National": {"tf": 1}, "sqlglot.expressions.LoadData": {"tf": 1}, "sqlglot.expressions.Partition": {"tf": 1}, "sqlglot.expressions.Fetch": {"tf": 1}, "sqlglot.expressions.Group": {"tf": 1}, "sqlglot.expressions.Lambda": {"tf": 1}, "sqlglot.expressions.Limit": {"tf": 1}, "sqlglot.expressions.Literal": {"tf": 1}, "sqlglot.expressions.Literal.number": {"tf": 1}, "sqlglot.expressions.Literal.string": {"tf": 1}, "sqlglot.expressions.Literal.output_name": {"tf": 1}, "sqlglot.expressions.Join": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Lateral": {"tf": 1}, "sqlglot.expressions.MatchRecognize": {"tf": 1}, "sqlglot.expressions.Final": {"tf": 1}, "sqlglot.expressions.Offset": {"tf": 1}, "sqlglot.expressions.Order": {"tf": 1}, "sqlglot.expressions.Cluster": {"tf": 1}, "sqlglot.expressions.Distribute": {"tf": 1}, "sqlglot.expressions.Sort": {"tf": 1}, "sqlglot.expressions.Ordered": {"tf": 1}, "sqlglot.expressions.Property": {"tf": 1}, "sqlglot.expressions.AlgorithmProperty": {"tf": 1}, "sqlglot.expressions.DefinerProperty": {"tf": 1}, "sqlglot.expressions.SqlSecurityProperty": {"tf": 1}, "sqlglot.expressions.TableFormatProperty": {"tf": 1}, "sqlglot.expressions.PartitionedByProperty": {"tf": 1}, "sqlglot.expressions.FileFormatProperty": {"tf": 1}, "sqlglot.expressions.DistKeyProperty": {"tf": 1}, "sqlglot.expressions.SortKeyProperty": {"tf": 1}, "sqlglot.expressions.DistStyleProperty": {"tf": 1}, "sqlglot.expressions.LikeProperty": {"tf": 1}, "sqlglot.expressions.LocationProperty": {"tf": 1}, "sqlglot.expressions.EngineProperty": {"tf": 1}, "sqlglot.expressions.AutoIncrementProperty": {"tf": 1}, "sqlglot.expressions.CharacterSetProperty": {"tf": 1}, "sqlglot.expressions.CollateProperty": {"tf": 1}, "sqlglot.expressions.SchemaCommentProperty": {"tf": 1}, "sqlglot.expressions.ReturnsProperty": {"tf": 1}, "sqlglot.expressions.LanguageProperty": {"tf": 1}, "sqlglot.expressions.ExecuteAsProperty": {"tf": 1}, "sqlglot.expressions.VolatilityProperty": {"tf": 1}, "sqlglot.expressions.RowFormatDelimitedProperty": {"tf": 1}, "sqlglot.expressions.RowFormatSerdeProperty": {"tf": 1}, "sqlglot.expressions.SerdeProperties": {"tf": 1}, "sqlglot.expressions.FallbackProperty": {"tf": 1}, "sqlglot.expressions.WithJournalTableProperty": {"tf": 1}, "sqlglot.expressions.LogProperty": {"tf": 1}, "sqlglot.expressions.JournalProperty": {"tf": 1}, "sqlglot.expressions.AfterJournalProperty": {"tf": 1}, "sqlglot.expressions.ChecksumProperty": {"tf": 1}, "sqlglot.expressions.FreespaceProperty": {"tf": 1}, "sqlglot.expressions.MergeBlockRatioProperty": {"tf": 1}, "sqlglot.expressions.DataBlocksizeProperty": {"tf": 1}, "sqlglot.expressions.BlockCompressionProperty": {"tf": 1}, "sqlglot.expressions.IsolatedLoadingProperty": {"tf": 1}, "sqlglot.expressions.Properties": {"tf": 1}, "sqlglot.expressions.Properties.Location": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_CREATE": {"tf": 1}, "sqlglot.expressions.Properties.Location.PRE_SCHEMA": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_INDEX": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_SCHEMA_ROOT": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_SCHEMA_WITH": {"tf": 1}, "sqlglot.expressions.Properties.Location.UNSUPPORTED": {"tf": 1}, "sqlglot.expressions.Properties.from_dict": {"tf": 1}, "sqlglot.expressions.Qualify": {"tf": 1}, "sqlglot.expressions.Return": {"tf": 1}, "sqlglot.expressions.Reference": {"tf": 1}, "sqlglot.expressions.Tuple": {"tf": 1}, "sqlglot.expressions.Subqueryable": {"tf": 1}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.Subqueryable.limit": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.expressions.Table": {"tf": 1}, "sqlglot.expressions.SystemTime": {"tf": 1}, "sqlglot.expressions.Union": {"tf": 1}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Except": {"tf": 1}, "sqlglot.expressions.Intersect": {"tf": 1}, "sqlglot.expressions.Unnest": {"tf": 1}, "sqlglot.expressions.Update": {"tf": 1}, "sqlglot.expressions.Values": {"tf": 1}, "sqlglot.expressions.Var": {"tf": 1}, "sqlglot.expressions.Schema": {"tf": 1}, "sqlglot.expressions.Lock": {"tf": 1}, "sqlglot.expressions.Select": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.Select.window": {"tf": 1}, "sqlglot.expressions.Select.distinct": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.Select.lock": {"tf": 1}, "sqlglot.expressions.Subquery": {"tf": 1}, "sqlglot.expressions.Subquery.unnest": {"tf": 1}, "sqlglot.expressions.Subquery.output_name": {"tf": 1}, "sqlglot.expressions.TableSample": {"tf": 1}, "sqlglot.expressions.Tag": {"tf": 1}, "sqlglot.expressions.Pivot": {"tf": 1}, "sqlglot.expressions.Window": {"tf": 1}, "sqlglot.expressions.WindowSpec": {"tf": 1}, "sqlglot.expressions.Where": {"tf": 1}, "sqlglot.expressions.Star": {"tf": 1}, "sqlglot.expressions.Star.output_name": {"tf": 1}, "sqlglot.expressions.Parameter": {"tf": 1}, "sqlglot.expressions.SessionParameter": {"tf": 1}, "sqlglot.expressions.Placeholder": {"tf": 1}, "sqlglot.expressions.Null": {"tf": 1}, "sqlglot.expressions.Boolean": {"tf": 1}, "sqlglot.expressions.DataType": {"tf": 1}, "sqlglot.expressions.DataType.Type": {"tf": 1}, "sqlglot.expressions.DataType.Type.CHAR": {"tf": 1}, "sqlglot.expressions.DataType.Type.NCHAR": {"tf": 1}, "sqlglot.expressions.DataType.Type.VARCHAR": {"tf": 1}, "sqlglot.expressions.DataType.Type.NVARCHAR": {"tf": 1}, "sqlglot.expressions.DataType.Type.TEXT": {"tf": 1}, "sqlglot.expressions.DataType.Type.MEDIUMTEXT": {"tf": 1}, "sqlglot.expressions.DataType.Type.LONGTEXT": {"tf": 1}, "sqlglot.expressions.DataType.Type.MEDIUMBLOB": {"tf": 1}, "sqlglot.expressions.DataType.Type.LONGBLOB": {"tf": 1}, "sqlglot.expressions.DataType.Type.BINARY": {"tf": 1}, "sqlglot.expressions.DataType.Type.VARBINARY": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT": {"tf": 1}, "sqlglot.expressions.DataType.Type.TINYINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.SMALLINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.BIGINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.FLOAT": {"tf": 1}, "sqlglot.expressions.DataType.Type.DOUBLE": {"tf": 1}, "sqlglot.expressions.DataType.Type.DECIMAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.BOOLEAN": {"tf": 1}, "sqlglot.expressions.DataType.Type.JSON": {"tf": 1}, "sqlglot.expressions.DataType.Type.JSONB": {"tf": 1}, "sqlglot.expressions.DataType.Type.INTERVAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.TIME": {"tf": 1}, "sqlglot.expressions.DataType.Type.TIMESTAMP": {"tf": 1}, "sqlglot.expressions.DataType.Type.TIMESTAMPTZ": {"tf": 1}, "sqlglot.expressions.DataType.Type.TIMESTAMPLTZ": {"tf": 1}, "sqlglot.expressions.DataType.Type.DATE": {"tf": 1}, "sqlglot.expressions.DataType.Type.DATETIME": {"tf": 1}, "sqlglot.expressions.DataType.Type.ARRAY": {"tf": 1}, "sqlglot.expressions.DataType.Type.MAP": {"tf": 1}, "sqlglot.expressions.DataType.Type.UUID": {"tf": 1}, "sqlglot.expressions.DataType.Type.GEOGRAPHY": {"tf": 1}, "sqlglot.expressions.DataType.Type.GEOMETRY": {"tf": 1}, "sqlglot.expressions.DataType.Type.STRUCT": {"tf": 1}, "sqlglot.expressions.DataType.Type.NULLABLE": {"tf": 1}, "sqlglot.expressions.DataType.Type.HLLSKETCH": {"tf": 1}, "sqlglot.expressions.DataType.Type.HSTORE": {"tf": 1}, "sqlglot.expressions.DataType.Type.SUPER": {"tf": 1}, "sqlglot.expressions.DataType.Type.SERIAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.SMALLSERIAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.BIGSERIAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.XML": {"tf": 1}, "sqlglot.expressions.DataType.Type.UNIQUEIDENTIFIER": {"tf": 1}, "sqlglot.expressions.DataType.Type.MONEY": {"tf": 1}, "sqlglot.expressions.DataType.Type.SMALLMONEY": {"tf": 1}, "sqlglot.expressions.DataType.Type.ROWVERSION": {"tf": 1}, "sqlglot.expressions.DataType.Type.IMAGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.VARIANT": {"tf": 1}, "sqlglot.expressions.DataType.Type.OBJECT": {"tf": 1}, "sqlglot.expressions.DataType.Type.NULL": {"tf": 1}, "sqlglot.expressions.DataType.Type.UNKNOWN": {"tf": 1}, "sqlglot.expressions.DataType.build": {"tf": 1}, "sqlglot.expressions.DataType.is_type": {"tf": 1}, "sqlglot.expressions.PseudoType": {"tf": 1}, "sqlglot.expressions.StructKwarg": {"tf": 1}, "sqlglot.expressions.SubqueryPredicate": {"tf": 1}, "sqlglot.expressions.All": {"tf": 1}, "sqlglot.expressions.Any": {"tf": 1}, "sqlglot.expressions.Exists": {"tf": 1}, "sqlglot.expressions.Command": {"tf": 1}, "sqlglot.expressions.Transaction": {"tf": 1}, "sqlglot.expressions.Commit": {"tf": 1}, "sqlglot.expressions.Rollback": {"tf": 1}, "sqlglot.expressions.AlterTable": {"tf": 1}, "sqlglot.expressions.AddConstraint": {"tf": 1}, "sqlglot.expressions.DropPartition": {"tf": 1}, "sqlglot.expressions.Binary": {"tf": 1}, "sqlglot.expressions.Add": {"tf": 1}, "sqlglot.expressions.Connector": {"tf": 1}, "sqlglot.expressions.And": {"tf": 1}, "sqlglot.expressions.Or": {"tf": 1}, "sqlglot.expressions.BitwiseAnd": {"tf": 1}, "sqlglot.expressions.BitwiseLeftShift": {"tf": 1}, "sqlglot.expressions.BitwiseOr": {"tf": 1}, "sqlglot.expressions.BitwiseRightShift": {"tf": 1}, "sqlglot.expressions.BitwiseXor": {"tf": 1}, "sqlglot.expressions.Div": {"tf": 1}, "sqlglot.expressions.Dot": {"tf": 1}, "sqlglot.expressions.DPipe": {"tf": 1}, "sqlglot.expressions.EQ": {"tf": 1}, "sqlglot.expressions.NullSafeEQ": {"tf": 1}, "sqlglot.expressions.NullSafeNEQ": {"tf": 1}, "sqlglot.expressions.Distance": {"tf": 1}, "sqlglot.expressions.Escape": {"tf": 1}, "sqlglot.expressions.Glob": {"tf": 1}, "sqlglot.expressions.GT": {"tf": 1}, "sqlglot.expressions.GTE": {"tf": 1}, "sqlglot.expressions.ILike": {"tf": 1}, "sqlglot.expressions.IntDiv": {"tf": 1}, "sqlglot.expressions.Is": {"tf": 1}, "sqlglot.expressions.Kwarg": {"tf": 1}, "sqlglot.expressions.Like": {"tf": 1}, "sqlglot.expressions.LT": {"tf": 1}, "sqlglot.expressions.LTE": {"tf": 1}, "sqlglot.expressions.Mod": {"tf": 1}, "sqlglot.expressions.Mul": {"tf": 1}, "sqlglot.expressions.NEQ": {"tf": 1}, "sqlglot.expressions.SimilarTo": {"tf": 1}, "sqlglot.expressions.Slice": {"tf": 1}, "sqlglot.expressions.Sub": {"tf": 1}, "sqlglot.expressions.Unary": {"tf": 1}, "sqlglot.expressions.BitwiseNot": {"tf": 1}, "sqlglot.expressions.Not": {"tf": 1}, "sqlglot.expressions.Paren": {"tf": 1}, "sqlglot.expressions.Neg": {"tf": 1}, "sqlglot.expressions.Alias": {"tf": 1}, "sqlglot.expressions.Alias.output_name": {"tf": 1}, "sqlglot.expressions.Aliases": {"tf": 1}, "sqlglot.expressions.AtTimeZone": {"tf": 1}, "sqlglot.expressions.Between": {"tf": 1}, "sqlglot.expressions.Bracket": {"tf": 1}, "sqlglot.expressions.Distinct": {"tf": 1}, "sqlglot.expressions.In": {"tf": 1}, "sqlglot.expressions.TimeUnit": {"tf": 1}, "sqlglot.expressions.TimeUnit.__init__": {"tf": 1}, "sqlglot.expressions.Interval": {"tf": 1}, "sqlglot.expressions.IgnoreNulls": {"tf": 1}, "sqlglot.expressions.RespectNulls": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1}, "sqlglot.expressions.Func.from_arg_list": {"tf": 1}, "sqlglot.expressions.Func.sql_names": {"tf": 1}, "sqlglot.expressions.Func.sql_name": {"tf": 1}, "sqlglot.expressions.Func.default_parser_mappings": {"tf": 1}, "sqlglot.expressions.AggFunc": {"tf": 1}, "sqlglot.expressions.Abs": {"tf": 1}, "sqlglot.expressions.Anonymous": {"tf": 1}, "sqlglot.expressions.ApproxDistinct": {"tf": 1}, "sqlglot.expressions.Array": {"tf": 1}, "sqlglot.expressions.GenerateSeries": {"tf": 1}, "sqlglot.expressions.ArrayAgg": {"tf": 1}, "sqlglot.expressions.ArrayAll": {"tf": 1}, "sqlglot.expressions.ArrayAny": {"tf": 1}, "sqlglot.expressions.ArrayConcat": {"tf": 1}, "sqlglot.expressions.ArrayContains": {"tf": 1}, "sqlglot.expressions.ArrayFilter": {"tf": 1}, "sqlglot.expressions.ArraySize": {"tf": 1}, "sqlglot.expressions.ArraySort": {"tf": 1}, "sqlglot.expressions.ArraySum": {"tf": 1}, "sqlglot.expressions.ArrayUnionAgg": {"tf": 1}, "sqlglot.expressions.Avg": {"tf": 1}, "sqlglot.expressions.AnyValue": {"tf": 1}, "sqlglot.expressions.Case": {"tf": 1}, "sqlglot.expressions.Cast": {"tf": 1}, "sqlglot.expressions.Cast.output_name": {"tf": 1}, "sqlglot.expressions.Cast.is_type": {"tf": 1}, "sqlglot.expressions.Collate": {"tf": 1}, "sqlglot.expressions.TryCast": {"tf": 1}, "sqlglot.expressions.Ceil": {"tf": 1}, "sqlglot.expressions.Coalesce": {"tf": 1}, "sqlglot.expressions.Concat": {"tf": 1}, "sqlglot.expressions.ConcatWs": {"tf": 1}, "sqlglot.expressions.Count": {"tf": 1}, "sqlglot.expressions.CurrentDate": {"tf": 1}, "sqlglot.expressions.CurrentDatetime": {"tf": 1}, "sqlglot.expressions.CurrentTime": {"tf": 1}, "sqlglot.expressions.CurrentTimestamp": {"tf": 1}, "sqlglot.expressions.DateAdd": {"tf": 1}, "sqlglot.expressions.DateSub": {"tf": 1}, "sqlglot.expressions.DateDiff": {"tf": 1}, "sqlglot.expressions.DateTrunc": {"tf": 1}, "sqlglot.expressions.DatetimeAdd": {"tf": 1}, "sqlglot.expressions.DatetimeSub": {"tf": 1}, "sqlglot.expressions.DatetimeDiff": {"tf": 1}, "sqlglot.expressions.DatetimeTrunc": {"tf": 1}, "sqlglot.expressions.DayOfWeek": {"tf": 1}, "sqlglot.expressions.DayOfMonth": {"tf": 1}, "sqlglot.expressions.DayOfYear": {"tf": 1}, "sqlglot.expressions.WeekOfYear": {"tf": 1}, "sqlglot.expressions.LastDateOfMonth": {"tf": 1}, "sqlglot.expressions.Extract": {"tf": 1}, "sqlglot.expressions.TimestampAdd": {"tf": 1}, "sqlglot.expressions.TimestampSub": {"tf": 1}, "sqlglot.expressions.TimestampDiff": {"tf": 1}, "sqlglot.expressions.TimestampTrunc": {"tf": 1}, "sqlglot.expressions.TimeAdd": {"tf": 1}, "sqlglot.expressions.TimeSub": {"tf": 1}, "sqlglot.expressions.TimeDiff": {"tf": 1}, "sqlglot.expressions.TimeTrunc": {"tf": 1}, "sqlglot.expressions.DateFromParts": {"tf": 1}, "sqlglot.expressions.DateStrToDate": {"tf": 1}, "sqlglot.expressions.DateToDateStr": {"tf": 1}, "sqlglot.expressions.DateToDi": {"tf": 1}, "sqlglot.expressions.Day": {"tf": 1}, "sqlglot.expressions.Decode": {"tf": 1}, "sqlglot.expressions.DiToDate": {"tf": 1}, "sqlglot.expressions.Encode": {"tf": 1}, "sqlglot.expressions.Exp": {"tf": 1}, "sqlglot.expressions.Explode": {"tf": 1}, "sqlglot.expressions.Floor": {"tf": 1}, "sqlglot.expressions.Greatest": {"tf": 1}, "sqlglot.expressions.GroupConcat": {"tf": 1}, "sqlglot.expressions.Hex": {"tf": 1}, "sqlglot.expressions.If": {"tf": 1}, "sqlglot.expressions.IfNull": {"tf": 1}, "sqlglot.expressions.Initcap": {"tf": 1}, "sqlglot.expressions.JSONBContains": {"tf": 1}, "sqlglot.expressions.JSONExtract": {"tf": 1}, "sqlglot.expressions.JSONExtractScalar": {"tf": 1}, "sqlglot.expressions.JSONBExtract": {"tf": 1}, "sqlglot.expressions.JSONBExtractScalar": {"tf": 1}, "sqlglot.expressions.Least": {"tf": 1}, "sqlglot.expressions.Length": {"tf": 1}, "sqlglot.expressions.Levenshtein": {"tf": 1}, "sqlglot.expressions.Ln": {"tf": 1}, "sqlglot.expressions.Log": {"tf": 1}, "sqlglot.expressions.Log2": {"tf": 1}, "sqlglot.expressions.Log10": {"tf": 1}, "sqlglot.expressions.LogicalOr": {"tf": 1}, "sqlglot.expressions.Lower": {"tf": 1}, "sqlglot.expressions.Map": {"tf": 1}, "sqlglot.expressions.VarMap": {"tf": 1}, "sqlglot.expressions.Matches": {"tf": 1}, "sqlglot.expressions.Max": {"tf": 1}, "sqlglot.expressions.Min": {"tf": 1}, "sqlglot.expressions.Month": {"tf": 1}, "sqlglot.expressions.Nvl2": {"tf": 1}, "sqlglot.expressions.Posexplode": {"tf": 1}, "sqlglot.expressions.Pow": {"tf": 1}, "sqlglot.expressions.PercentileCont": {"tf": 1}, "sqlglot.expressions.PercentileDisc": {"tf": 1}, "sqlglot.expressions.Quantile": {"tf": 1}, "sqlglot.expressions.Quantiles": {"tf": 1}, "sqlglot.expressions.QuantileIf": {"tf": 1}, "sqlglot.expressions.ApproxQuantile": {"tf": 1}, "sqlglot.expressions.ReadCSV": {"tf": 1}, "sqlglot.expressions.Reduce": {"tf": 1}, "sqlglot.expressions.RegexpLike": {"tf": 1}, "sqlglot.expressions.RegexpILike": {"tf": 1}, "sqlglot.expressions.RegexpSplit": {"tf": 1}, "sqlglot.expressions.Repeat": {"tf": 1}, "sqlglot.expressions.Round": {"tf": 1}, "sqlglot.expressions.RowNumber": {"tf": 1}, "sqlglot.expressions.SafeDivide": {"tf": 1}, "sqlglot.expressions.SetAgg": {"tf": 1}, "sqlglot.expressions.SortArray": {"tf": 1}, "sqlglot.expressions.Split": {"tf": 1}, "sqlglot.expressions.Substring": {"tf": 1}, "sqlglot.expressions.StrPosition": {"tf": 1}, "sqlglot.expressions.StrToDate": {"tf": 1}, "sqlglot.expressions.StrToTime": {"tf": 1}, "sqlglot.expressions.StrToUnix": {"tf": 1}, "sqlglot.expressions.NumberToStr": {"tf": 1}, "sqlglot.expressions.Struct": {"tf": 1}, "sqlglot.expressions.StructExtract": {"tf": 1}, "sqlglot.expressions.Sum": {"tf": 1}, "sqlglot.expressions.Sqrt": {"tf": 1}, "sqlglot.expressions.Stddev": {"tf": 1}, "sqlglot.expressions.StddevPop": {"tf": 1}, "sqlglot.expressions.StddevSamp": {"tf": 1}, "sqlglot.expressions.TimeToStr": {"tf": 1}, "sqlglot.expressions.TimeToTimeStr": {"tf": 1}, "sqlglot.expressions.TimeToUnix": {"tf": 1}, "sqlglot.expressions.TimeStrToDate": {"tf": 1}, "sqlglot.expressions.TimeStrToTime": {"tf": 1}, "sqlglot.expressions.TimeStrToUnix": {"tf": 1}, "sqlglot.expressions.Trim": {"tf": 1}, "sqlglot.expressions.TsOrDsAdd": {"tf": 1}, "sqlglot.expressions.TsOrDsToDateStr": {"tf": 1}, "sqlglot.expressions.TsOrDsToDate": {"tf": 1}, "sqlglot.expressions.TsOrDiToDi": {"tf": 1}, "sqlglot.expressions.Unhex": {"tf": 1}, "sqlglot.expressions.UnixToStr": {"tf": 1}, "sqlglot.expressions.UnixToTime": {"tf": 1}, "sqlglot.expressions.UnixToTimeStr": {"tf": 1}, "sqlglot.expressions.Upper": {"tf": 1}, "sqlglot.expressions.Variance": {"tf": 1}, "sqlglot.expressions.VariancePop": {"tf": 1}, "sqlglot.expressions.Week": {"tf": 1}, "sqlglot.expressions.Year": {"tf": 1}, "sqlglot.expressions.Use": {"tf": 1}, "sqlglot.expressions.Merge": {"tf": 1}, "sqlglot.expressions.When": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1}, "sqlglot.expressions.union": {"tf": 1}, "sqlglot.expressions.intersect": {"tf": 1}, "sqlglot.expressions.except_": {"tf": 1}, "sqlglot.expressions.select": {"tf": 1}, "sqlglot.expressions.from_": {"tf": 1}, "sqlglot.expressions.update": {"tf": 1}, "sqlglot.expressions.delete": {"tf": 1}, "sqlglot.expressions.condition": {"tf": 1}, "sqlglot.expressions.and_": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 1}, "sqlglot.expressions.not_": {"tf": 1}, "sqlglot.expressions.paren": {"tf": 1}, "sqlglot.expressions.to_identifier": {"tf": 1}, "sqlglot.expressions.to_interval": {"tf": 1}, "sqlglot.expressions.to_table": {"tf": 1}, "sqlglot.expressions.to_column": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.expressions.subquery": {"tf": 1}, "sqlglot.expressions.column": {"tf": 1}, "sqlglot.expressions.cast": {"tf": 1}, "sqlglot.expressions.table_": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1}, "sqlglot.expressions.rename_table": {"tf": 1}, "sqlglot.expressions.convert": {"tf": 1}, "sqlglot.expressions.replace_children": {"tf": 1}, "sqlglot.expressions.column_table_names": {"tf": 1}, "sqlglot.expressions.table_name": {"tf": 1}, "sqlglot.expressions.replace_tables": {"tf": 1}, "sqlglot.expressions.replace_placeholders": {"tf": 1}, "sqlglot.expressions.expand": {"tf": 1}, "sqlglot.expressions.func": {"tf": 1}, "sqlglot.expressions.true": {"tf": 1}, "sqlglot.expressions.false": {"tf": 1}, "sqlglot.expressions.null": {"tf": 1}, "sqlglot.generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.generator.Generator.__init__": {"tf": 1}, "sqlglot.generator.Generator.generate": {"tf": 1}, "sqlglot.generator.Generator.unsupported": {"tf": 1}, "sqlglot.generator.Generator.sep": {"tf": 1}, "sqlglot.generator.Generator.seg": {"tf": 1}, "sqlglot.generator.Generator.pad_comment": {"tf": 1}, "sqlglot.generator.Generator.maybe_comment": {"tf": 1}, "sqlglot.generator.Generator.wrap": {"tf": 1}, "sqlglot.generator.Generator.no_identify": {"tf": 1}, "sqlglot.generator.Generator.normalize_func": {"tf": 1}, "sqlglot.generator.Generator.indent": {"tf": 1}, "sqlglot.generator.Generator.sql": {"tf": 1}, "sqlglot.generator.Generator.uncache_sql": {"tf": 1}, "sqlglot.generator.Generator.cache_sql": {"tf": 1}, "sqlglot.generator.Generator.characterset_sql": {"tf": 1}, "sqlglot.generator.Generator.column_sql": {"tf": 1}, "sqlglot.generator.Generator.columndef_sql": {"tf": 1}, "sqlglot.generator.Generator.columnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.autoincrementcolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.checkcolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.commentcolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.collatecolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.encodecolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.defaultcolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.generatedasidentitycolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.notnullcolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.primarykeycolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.uniquecolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.create_sql": {"tf": 1}, "sqlglot.generator.Generator.describe_sql": {"tf": 1}, "sqlglot.generator.Generator.prepend_ctes": {"tf": 1}, "sqlglot.generator.Generator.with_sql": {"tf": 1}, "sqlglot.generator.Generator.cte_sql": {"tf": 1}, "sqlglot.generator.Generator.tablealias_sql": {"tf": 1}, "sqlglot.generator.Generator.bitstring_sql": {"tf": 1}, "sqlglot.generator.Generator.hexstring_sql": {"tf": 1}, "sqlglot.generator.Generator.datatype_sql": {"tf": 1}, "sqlglot.generator.Generator.directory_sql": {"tf": 1}, "sqlglot.generator.Generator.delete_sql": {"tf": 1}, "sqlglot.generator.Generator.drop_sql": {"tf": 1}, "sqlglot.generator.Generator.except_sql": {"tf": 1}, "sqlglot.generator.Generator.except_op": {"tf": 1}, "sqlglot.generator.Generator.fetch_sql": {"tf": 1}, "sqlglot.generator.Generator.filter_sql": {"tf": 1}, "sqlglot.generator.Generator.hint_sql": {"tf": 1}, "sqlglot.generator.Generator.index_sql": {"tf": 1}, "sqlglot.generator.Generator.identifier_sql": {"tf": 1}, "sqlglot.generator.Generator.national_sql": {"tf": 1}, "sqlglot.generator.Generator.partition_sql": {"tf": 1}, "sqlglot.generator.Generator.properties_sql": {"tf": 1}, "sqlglot.generator.Generator.root_properties": {"tf": 1}, "sqlglot.generator.Generator.properties": {"tf": 1}, "sqlglot.generator.Generator.with_properties": {"tf": 1}, "sqlglot.generator.Generator.locate_properties": {"tf": 1}, "sqlglot.generator.Generator.property_sql": {"tf": 1}, "sqlglot.generator.Generator.likeproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.fallbackproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.journalproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.freespaceproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.afterjournalproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.checksumproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.mergeblockratioproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.datablocksizeproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.blockcompressionproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.isolatedloadingproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.insert_sql": {"tf": 1}, "sqlglot.generator.Generator.intersect_sql": {"tf": 1}, "sqlglot.generator.Generator.intersect_op": {"tf": 1}, "sqlglot.generator.Generator.introducer_sql": {"tf": 1}, "sqlglot.generator.Generator.pseudotype_sql": {"tf": 1}, "sqlglot.generator.Generator.rowformatdelimitedproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.table_sql": {"tf": 1}, "sqlglot.generator.Generator.tablesample_sql": {"tf": 1}, "sqlglot.generator.Generator.pivot_sql": {"tf": 1}, "sqlglot.generator.Generator.tuple_sql": {"tf": 1}, "sqlglot.generator.Generator.update_sql": {"tf": 1}, "sqlglot.generator.Generator.values_sql": {"tf": 1}, "sqlglot.generator.Generator.var_sql": {"tf": 1}, "sqlglot.generator.Generator.into_sql": {"tf": 1}, "sqlglot.generator.Generator.from_sql": {"tf": 1}, "sqlglot.generator.Generator.group_sql": {"tf": 1}, "sqlglot.generator.Generator.having_sql": {"tf": 1}, "sqlglot.generator.Generator.join_sql": {"tf": 1}, "sqlglot.generator.Generator.lambda_sql": {"tf": 1}, "sqlglot.generator.Generator.lateral_sql": {"tf": 1}, "sqlglot.generator.Generator.limit_sql": {"tf": 1}, "sqlglot.generator.Generator.offset_sql": {"tf": 1}, "sqlglot.generator.Generator.lock_sql": {"tf": 1}, "sqlglot.generator.Generator.literal_sql": {"tf": 1}, "sqlglot.generator.Generator.loaddata_sql": {"tf": 1}, "sqlglot.generator.Generator.null_sql": {"tf": 1}, "sqlglot.generator.Generator.boolean_sql": {"tf": 1}, "sqlglot.generator.Generator.order_sql": {"tf": 1}, "sqlglot.generator.Generator.cluster_sql": {"tf": 1}, "sqlglot.generator.Generator.distribute_sql": {"tf": 1}, "sqlglot.generator.Generator.sort_sql": {"tf": 1}, "sqlglot.generator.Generator.ordered_sql": {"tf": 1}, "sqlglot.generator.Generator.matchrecognize_sql": {"tf": 1}, "sqlglot.generator.Generator.query_modifiers": {"tf": 1}, "sqlglot.generator.Generator.select_sql": {"tf": 1}, "sqlglot.generator.Generator.schema_sql": {"tf": 1}, "sqlglot.generator.Generator.star_sql": {"tf": 1}, "sqlglot.generator.Generator.structkwarg_sql": {"tf": 1}, "sqlglot.generator.Generator.parameter_sql": {"tf": 1}, "sqlglot.generator.Generator.sessionparameter_sql": {"tf": 1}, "sqlglot.generator.Generator.placeholder_sql": {"tf": 1}, "sqlglot.generator.Generator.subquery_sql": {"tf": 1}, "sqlglot.generator.Generator.qualify_sql": {"tf": 1}, "sqlglot.generator.Generator.union_sql": {"tf": 1}, "sqlglot.generator.Generator.union_op": {"tf": 1}, "sqlglot.generator.Generator.unnest_sql": {"tf": 1}, "sqlglot.generator.Generator.where_sql": {"tf": 1}, "sqlglot.generator.Generator.window_sql": {"tf": 1}, "sqlglot.generator.Generator.partition_by_sql": {"tf": 1}, "sqlglot.generator.Generator.window_spec_sql": {"tf": 1}, "sqlglot.generator.Generator.withingroup_sql": {"tf": 1}, "sqlglot.generator.Generator.between_sql": {"tf": 1}, "sqlglot.generator.Generator.bracket_sql": {"tf": 1}, "sqlglot.generator.Generator.all_sql": {"tf": 1}, "sqlglot.generator.Generator.any_sql": {"tf": 1}, "sqlglot.generator.Generator.exists_sql": {"tf": 1}, "sqlglot.generator.Generator.case_sql": {"tf": 1}, "sqlglot.generator.Generator.constraint_sql": {"tf": 1}, "sqlglot.generator.Generator.extract_sql": {"tf": 1}, "sqlglot.generator.Generator.trim_sql": {"tf": 1}, "sqlglot.generator.Generator.concat_sql": {"tf": 1}, "sqlglot.generator.Generator.check_sql": {"tf": 1}, "sqlglot.generator.Generator.foreignkey_sql": {"tf": 1}, "sqlglot.generator.Generator.primarykey_sql": {"tf": 1}, "sqlglot.generator.Generator.unique_sql": {"tf": 1}, "sqlglot.generator.Generator.if_sql": {"tf": 1}, "sqlglot.generator.Generator.in_sql": {"tf": 1}, "sqlglot.generator.Generator.in_unnest_op": {"tf": 1}, "sqlglot.generator.Generator.interval_sql": {"tf": 1}, "sqlglot.generator.Generator.return_sql": {"tf": 1}, "sqlglot.generator.Generator.reference_sql": {"tf": 1}, "sqlglot.generator.Generator.anonymous_sql": {"tf": 1}, "sqlglot.generator.Generator.paren_sql": {"tf": 1}, "sqlglot.generator.Generator.neg_sql": {"tf": 1}, "sqlglot.generator.Generator.not_sql": {"tf": 1}, "sqlglot.generator.Generator.alias_sql": {"tf": 1}, "sqlglot.generator.Generator.aliases_sql": {"tf": 1}, "sqlglot.generator.Generator.attimezone_sql": {"tf": 1}, "sqlglot.generator.Generator.add_sql": {"tf": 1}, "sqlglot.generator.Generator.and_sql": {"tf": 1}, "sqlglot.generator.Generator.connector_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwiseand_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwiseleftshift_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwisenot_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwiseor_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwiserightshift_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwisexor_sql": {"tf": 1}, "sqlglot.generator.Generator.cast_sql": {"tf": 1}, "sqlglot.generator.Generator.currentdate_sql": {"tf": 1}, "sqlglot.generator.Generator.collate_sql": {"tf": 1}, "sqlglot.generator.Generator.command_sql": {"tf": 1}, "sqlglot.generator.Generator.transaction_sql": {"tf": 1}, "sqlglot.generator.Generator.commit_sql": {"tf": 1}, "sqlglot.generator.Generator.rollback_sql": {"tf": 1}, "sqlglot.generator.Generator.altercolumn_sql": {"tf": 1}, "sqlglot.generator.Generator.renametable_sql": {"tf": 1}, "sqlglot.generator.Generator.altertable_sql": {"tf": 1}, "sqlglot.generator.Generator.droppartition_sql": {"tf": 1}, "sqlglot.generator.Generator.addconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.distinct_sql": {"tf": 1}, "sqlglot.generator.Generator.ignorenulls_sql": {"tf": 1}, "sqlglot.generator.Generator.respectnulls_sql": {"tf": 1}, "sqlglot.generator.Generator.intdiv_sql": {"tf": 1}, "sqlglot.generator.Generator.dpipe_sql": {"tf": 1}, "sqlglot.generator.Generator.div_sql": {"tf": 1}, "sqlglot.generator.Generator.distance_sql": {"tf": 1}, "sqlglot.generator.Generator.dot_sql": {"tf": 1}, "sqlglot.generator.Generator.eq_sql": {"tf": 1}, "sqlglot.generator.Generator.escape_sql": {"tf": 1}, "sqlglot.generator.Generator.glob_sql": {"tf": 1}, "sqlglot.generator.Generator.gt_sql": {"tf": 1}, "sqlglot.generator.Generator.gte_sql": {"tf": 1}, "sqlglot.generator.Generator.ilike_sql": {"tf": 1}, "sqlglot.generator.Generator.is_sql": {"tf": 1}, "sqlglot.generator.Generator.like_sql": {"tf": 1}, "sqlglot.generator.Generator.similarto_sql": {"tf": 1}, "sqlglot.generator.Generator.lt_sql": {"tf": 1}, "sqlglot.generator.Generator.lte_sql": {"tf": 1}, "sqlglot.generator.Generator.mod_sql": {"tf": 1}, "sqlglot.generator.Generator.mul_sql": {"tf": 1}, "sqlglot.generator.Generator.neq_sql": {"tf": 1}, "sqlglot.generator.Generator.nullsafeeq_sql": {"tf": 1}, "sqlglot.generator.Generator.nullsafeneq_sql": {"tf": 1}, "sqlglot.generator.Generator.or_sql": {"tf": 1}, "sqlglot.generator.Generator.slice_sql": {"tf": 1}, "sqlglot.generator.Generator.sub_sql": {"tf": 1}, "sqlglot.generator.Generator.trycast_sql": {"tf": 1}, "sqlglot.generator.Generator.use_sql": {"tf": 1}, "sqlglot.generator.Generator.binary": {"tf": 1}, "sqlglot.generator.Generator.function_fallback_sql": {"tf": 1}, "sqlglot.generator.Generator.format_args": {"tf": 1}, "sqlglot.generator.Generator.text_width": {"tf": 1}, "sqlglot.generator.Generator.format_time": {"tf": 1}, "sqlglot.generator.Generator.expressions": {"tf": 1}, "sqlglot.generator.Generator.op_expressions": {"tf": 1}, "sqlglot.generator.Generator.naked_property": {"tf": 1}, "sqlglot.generator.Generator.set_operation": {"tf": 1}, "sqlglot.generator.Generator.tag_sql": {"tf": 1}, "sqlglot.generator.Generator.token_sql": {"tf": 1}, "sqlglot.generator.Generator.userdefinedfunction_sql": {"tf": 1}, "sqlglot.generator.Generator.userdefinedfunctionkwarg_sql": {"tf": 1}, "sqlglot.generator.Generator.joinhint_sql": {"tf": 1}, "sqlglot.generator.Generator.kwarg_sql": {"tf": 1}, "sqlglot.generator.Generator.when_sql": {"tf": 1}, "sqlglot.generator.Generator.merge_sql": {"tf": 1}, "sqlglot.helper": {"tf": 1}, "sqlglot.helper.AutoName": {"tf": 1}, "sqlglot.helper.seq_get": {"tf": 1}, "sqlglot.helper.ensure_list": {"tf": 1}, "sqlglot.helper.ensure_collection": {"tf": 1}, "sqlglot.helper.csv": {"tf": 1}, "sqlglot.helper.subclasses": {"tf": 1}, "sqlglot.helper.apply_index_offset": {"tf": 1}, "sqlglot.helper.camel_to_snake_case": {"tf": 1}, "sqlglot.helper.while_changing": {"tf": 1}, "sqlglot.helper.tsort": {"tf": 1}, "sqlglot.helper.open_file": {"tf": 1}, "sqlglot.helper.csv_reader": {"tf": 1}, "sqlglot.helper.find_new_name": {"tf": 1}, "sqlglot.helper.object_to_dict": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1}, "sqlglot.helper.is_iterable": {"tf": 1}, "sqlglot.helper.flatten": {"tf": 1}, "sqlglot.helper.count_params": {"tf": 1}, "sqlglot.helper.dict_depth": {"tf": 1}, "sqlglot.helper.first": {"tf": 1}, "sqlglot.lineage": {"tf": 1}, "sqlglot.lineage.Node": {"tf": 1}, "sqlglot.lineage.Node.__init__": {"tf": 1}, "sqlglot.lineage.Node.walk": {"tf": 1}, "sqlglot.lineage.Node.to_html": {"tf": 1}, "sqlglot.lineage.lineage": {"tf": 1}, "sqlglot.lineage.LineageHTML": {"tf": 1}, "sqlglot.lineage.LineageHTML.__init__": {"tf": 1}, "sqlglot.optimizer": {"tf": 1}, "sqlglot.optimizer.annotate_types": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.optimizer.annotate_types.TypeAnnotator": {"tf": 1}, "sqlglot.optimizer.annotate_types.TypeAnnotator.__init__": {"tf": 1}, "sqlglot.optimizer.annotate_types.TypeAnnotator.annotate": {"tf": 1}, "sqlglot.optimizer.canonicalize": {"tf": 1}, "sqlglot.optimizer.canonicalize.canonicalize": {"tf": 1}, "sqlglot.optimizer.canonicalize.add_text_to_concat": {"tf": 1}, "sqlglot.optimizer.canonicalize.coerce_type": {"tf": 1}, "sqlglot.optimizer.canonicalize.remove_redundant_casts": {"tf": 1}, "sqlglot.optimizer.eliminate_ctes": {"tf": 1}, "sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"tf": 1}, "sqlglot.optimizer.eliminate_joins": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.join_condition": {"tf": 1}, "sqlglot.optimizer.eliminate_subqueries": {"tf": 1}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 1}, "sqlglot.optimizer.expand_laterals": {"tf": 1}, "sqlglot.optimizer.expand_laterals.expand_laterals": {"tf": 1}, "sqlglot.optimizer.expand_multi_table_selects": {"tf": 1}, "sqlglot.optimizer.expand_multi_table_selects.expand_multi_table_selects": {"tf": 1}, "sqlglot.optimizer.isolate_table_selects": {"tf": 1}, "sqlglot.optimizer.isolate_table_selects.isolate_table_selects": {"tf": 1}, "sqlglot.optimizer.lower_identities": {"tf": 1}, "sqlglot.optimizer.lower_identities.lower_identities": {"tf": 1}, "sqlglot.optimizer.merge_subqueries": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_ctes": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_derived_tables": {"tf": 1}, "sqlglot.optimizer.normalize": {"tf": 1}, "sqlglot.optimizer.normalize.normalize": {"tf": 1}, "sqlglot.optimizer.normalize.normalized": {"tf": 1}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}, "sqlglot.optimizer.normalize.distributive_law": {"tf": 1}, "sqlglot.optimizer.optimize_joins": {"tf": 1}, "sqlglot.optimizer.optimize_joins.optimize_joins": {"tf": 1}, "sqlglot.optimizer.optimize_joins.reorder_joins": {"tf": 1}, "sqlglot.optimizer.optimize_joins.normalize": {"tf": 1}, "sqlglot.optimizer.optimize_joins.other_table_names": {"tf": 1}, "sqlglot.optimizer.optimizer": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_cnf": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_dnf": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.nodes_for_predicate": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.replace_aliases": {"tf": 1}, "sqlglot.optimizer.pushdown_projections": {"tf": 1}, "sqlglot.optimizer.pushdown_projections.DEFAULT_SELECTION": {"tf": 1}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 1}, "sqlglot.optimizer.qualify_columns": {"tf": 1}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1}, "sqlglot.optimizer.qualify_columns.validate_qualify_columns": {"tf": 1}, "sqlglot.optimizer.qualify_tables": {"tf": 1}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1}, "sqlglot.optimizer.scope": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.ROOT": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.SUBQUERY": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.DERIVED_TABLE": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.CTE": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.UNION": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.UDTF": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1}, "sqlglot.optimizer.scope.Scope.__init__": {"tf": 1}, "sqlglot.optimizer.scope.Scope.clear_cache": {"tf": 1}, "sqlglot.optimizer.scope.Scope.branch": {"tf": 1}, "sqlglot.optimizer.scope.Scope.walk": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.optimizer.scope.Scope.replace": {"tf": 1}, "sqlglot.optimizer.scope.Scope.tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.ctes": {"tf": 1}, "sqlglot.optimizer.scope.Scope.derived_tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.subqueries": {"tf": 1}, "sqlglot.optimizer.scope.Scope.columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1}, "sqlglot.optimizer.scope.Scope.cte_sources": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selects": {"tf": 1}, "sqlglot.optimizer.scope.Scope.external_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.unqualified_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.join_hints": {"tf": 1}, "sqlglot.optimizer.scope.Scope.source_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_subquery": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_derived_table": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_union": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_cte": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_root": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_udtf": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_correlated_subquery": {"tf": 1}, "sqlglot.optimizer.scope.Scope.rename_source": {"tf": 1}, "sqlglot.optimizer.scope.Scope.add_source": {"tf": 1}, "sqlglot.optimizer.scope.Scope.remove_source": {"tf": 1}, "sqlglot.optimizer.scope.Scope.traverse": {"tf": 1}, "sqlglot.optimizer.scope.Scope.ref_count": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}, "sqlglot.optimizer.scope.build_scope": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}, "sqlglot.optimizer.simplify": {"tf": 1}, "sqlglot.optimizer.simplify.simplify": {"tf": 1}, "sqlglot.optimizer.simplify.rewrite_between": {"tf": 1}, "sqlglot.optimizer.simplify.simplify_not": {"tf": 1}, "sqlglot.optimizer.simplify.flatten": {"tf": 1}, "sqlglot.optimizer.simplify.simplify_connectors": {"tf": 1}, "sqlglot.optimizer.simplify.remove_compliments": {"tf": 1}, "sqlglot.optimizer.simplify.uniq_sort": {"tf": 1}, "sqlglot.optimizer.simplify.absorb_and_eliminate": {"tf": 1}, "sqlglot.optimizer.simplify.simplify_literals": {"tf": 1}, "sqlglot.optimizer.simplify.simplify_parens": {"tf": 1}, "sqlglot.optimizer.simplify.remove_where_true": {"tf": 1}, "sqlglot.optimizer.simplify.always_true": {"tf": 1}, "sqlglot.optimizer.simplify.is_complement": {"tf": 1}, "sqlglot.optimizer.simplify.eval_boolean": {"tf": 1}, "sqlglot.optimizer.simplify.extract_date": {"tf": 1}, "sqlglot.optimizer.simplify.extract_interval": {"tf": 1}, "sqlglot.optimizer.simplify.date_literal": {"tf": 1}, "sqlglot.optimizer.simplify.boolean_literal": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.decorrelate": {"tf": 1}, "sqlglot.parser": {"tf": 1}, "sqlglot.parser.parse_var_map": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}, "sqlglot.parser.Parser.__init__": {"tf": 1}, "sqlglot.parser.Parser.reset": {"tf": 1}, "sqlglot.parser.Parser.parse": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}, "sqlglot.parser.Parser.check_errors": {"tf": 1}, "sqlglot.parser.Parser.raise_error": {"tf": 1}, "sqlglot.parser.Parser.expression": {"tf": 1}, "sqlglot.parser.Parser.validate_expression": {"tf": 1}, "sqlglot.planner": {"tf": 1}, "sqlglot.planner.Plan": {"tf": 1}, "sqlglot.planner.Plan.__init__": {"tf": 1}, "sqlglot.planner.Step": {"tf": 1}, "sqlglot.planner.Step.__init__": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Step.add_dependency": {"tf": 1}, "sqlglot.planner.Step.to_s": {"tf": 1}, "sqlglot.planner.Scan": {"tf": 1}, "sqlglot.planner.Scan.__init__": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.Join": {"tf": 1}, "sqlglot.planner.Join.__init__": {"tf": 1}, "sqlglot.planner.Join.from_joins": {"tf": 1}, "sqlglot.planner.Aggregate": {"tf": 1}, "sqlglot.planner.Aggregate.__init__": {"tf": 1}, "sqlglot.planner.Sort": {"tf": 1}, "sqlglot.planner.Sort.__init__": {"tf": 1}, "sqlglot.planner.SetOperation": {"tf": 1}, "sqlglot.planner.SetOperation.__init__": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}, "sqlglot.schema.Schema": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1}, "sqlglot.schema.Schema.column_names": {"tf": 1}, "sqlglot.schema.Schema.get_column_type": {"tf": 1}, "sqlglot.schema.Schema.supported_table_args": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.__init__": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.table_parts": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.find": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1}, "sqlglot.schema.MappingSchema.__init__": {"tf": 1}, "sqlglot.schema.MappingSchema.from_mapping_schema": {"tf": 1}, "sqlglot.schema.MappingSchema.copy": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1}, "sqlglot.schema.ensure_schema": {"tf": 1}, "sqlglot.schema.ensure_column_mapping": {"tf": 1}, "sqlglot.schema.flatten_schema": {"tf": 1}, "sqlglot.serde": {"tf": 1}, "sqlglot.serde.dump": {"tf": 1}, "sqlglot.serde.load": {"tf": 1}, "sqlglot.time": {"tf": 1}, "sqlglot.time.format_time": {"tf": 1}, "sqlglot.tokens": {"tf": 1}, "sqlglot.tokens.TokenType": {"tf": 1}, "sqlglot.tokens.TokenType.L_PAREN": {"tf": 1}, "sqlglot.tokens.TokenType.R_PAREN": {"tf": 1}, "sqlglot.tokens.TokenType.L_BRACKET": {"tf": 1}, "sqlglot.tokens.TokenType.R_BRACKET": {"tf": 1}, "sqlglot.tokens.TokenType.L_BRACE": {"tf": 1}, "sqlglot.tokens.TokenType.R_BRACE": {"tf": 1}, "sqlglot.tokens.TokenType.COMMA": {"tf": 1}, "sqlglot.tokens.TokenType.DOT": {"tf": 1}, "sqlglot.tokens.TokenType.DASH": {"tf": 1}, "sqlglot.tokens.TokenType.PLUS": {"tf": 1}, "sqlglot.tokens.TokenType.COLON": {"tf": 1}, "sqlglot.tokens.TokenType.DCOLON": {"tf": 1}, "sqlglot.tokens.TokenType.SEMICOLON": {"tf": 1}, "sqlglot.tokens.TokenType.STAR": {"tf": 1}, "sqlglot.tokens.TokenType.BACKSLASH": {"tf": 1}, "sqlglot.tokens.TokenType.SLASH": {"tf": 1}, "sqlglot.tokens.TokenType.LT": {"tf": 1}, "sqlglot.tokens.TokenType.LTE": {"tf": 1}, "sqlglot.tokens.TokenType.GT": {"tf": 1}, "sqlglot.tokens.TokenType.GTE": {"tf": 1}, "sqlglot.tokens.TokenType.NOT": {"tf": 1}, "sqlglot.tokens.TokenType.EQ": {"tf": 1}, "sqlglot.tokens.TokenType.NEQ": {"tf": 1}, "sqlglot.tokens.TokenType.NULLSAFE_EQ": {"tf": 1}, "sqlglot.tokens.TokenType.AND": {"tf": 1}, "sqlglot.tokens.TokenType.OR": {"tf": 1}, "sqlglot.tokens.TokenType.AMP": {"tf": 1}, "sqlglot.tokens.TokenType.DPIPE": {"tf": 1}, "sqlglot.tokens.TokenType.PIPE": {"tf": 1}, "sqlglot.tokens.TokenType.CARET": {"tf": 1}, "sqlglot.tokens.TokenType.TILDA": {"tf": 1}, "sqlglot.tokens.TokenType.ARROW": {"tf": 1}, "sqlglot.tokens.TokenType.DARROW": {"tf": 1}, "sqlglot.tokens.TokenType.FARROW": {"tf": 1}, "sqlglot.tokens.TokenType.HASH": {"tf": 1}, "sqlglot.tokens.TokenType.HASH_ARROW": {"tf": 1}, "sqlglot.tokens.TokenType.DHASH_ARROW": {"tf": 1}, "sqlglot.tokens.TokenType.LR_ARROW": {"tf": 1}, "sqlglot.tokens.TokenType.DOLLAR": {"tf": 1}, "sqlglot.tokens.TokenType.PARAMETER": {"tf": 1}, "sqlglot.tokens.TokenType.SESSION_PARAMETER": {"tf": 1}, "sqlglot.tokens.TokenType.NATIONAL": {"tf": 1}, "sqlglot.tokens.TokenType.BLOCK_START": {"tf": 1}, "sqlglot.tokens.TokenType.BLOCK_END": {"tf": 1}, "sqlglot.tokens.TokenType.SPACE": {"tf": 1}, "sqlglot.tokens.TokenType.BREAK": {"tf": 1}, "sqlglot.tokens.TokenType.STRING": {"tf": 1}, "sqlglot.tokens.TokenType.NUMBER": {"tf": 1}, "sqlglot.tokens.TokenType.IDENTIFIER": {"tf": 1}, "sqlglot.tokens.TokenType.COLUMN": {"tf": 1}, "sqlglot.tokens.TokenType.COLUMN_DEF": {"tf": 1}, "sqlglot.tokens.TokenType.SCHEMA": {"tf": 1}, "sqlglot.tokens.TokenType.TABLE": {"tf": 1}, "sqlglot.tokens.TokenType.VAR": {"tf": 1}, "sqlglot.tokens.TokenType.BIT_STRING": {"tf": 1}, "sqlglot.tokens.TokenType.HEX_STRING": {"tf": 1}, "sqlglot.tokens.TokenType.BYTE_STRING": {"tf": 1}, "sqlglot.tokens.TokenType.BOOLEAN": {"tf": 1}, "sqlglot.tokens.TokenType.TINYINT": {"tf": 1}, "sqlglot.tokens.TokenType.SMALLINT": {"tf": 1}, "sqlglot.tokens.TokenType.INT": {"tf": 1}, "sqlglot.tokens.TokenType.BIGINT": {"tf": 1}, "sqlglot.tokens.TokenType.FLOAT": {"tf": 1}, "sqlglot.tokens.TokenType.DOUBLE": {"tf": 1}, "sqlglot.tokens.TokenType.DECIMAL": {"tf": 1}, "sqlglot.tokens.TokenType.CHAR": {"tf": 1}, "sqlglot.tokens.TokenType.NCHAR": {"tf": 1}, "sqlglot.tokens.TokenType.VARCHAR": {"tf": 1}, "sqlglot.tokens.TokenType.NVARCHAR": {"tf": 1}, "sqlglot.tokens.TokenType.TEXT": {"tf": 1}, "sqlglot.tokens.TokenType.MEDIUMTEXT": {"tf": 1}, "sqlglot.tokens.TokenType.LONGTEXT": {"tf": 1}, "sqlglot.tokens.TokenType.MEDIUMBLOB": {"tf": 1}, "sqlglot.tokens.TokenType.LONGBLOB": {"tf": 1}, "sqlglot.tokens.TokenType.BINARY": {"tf": 1}, "sqlglot.tokens.TokenType.VARBINARY": {"tf": 1}, "sqlglot.tokens.TokenType.JSON": {"tf": 1}, "sqlglot.tokens.TokenType.JSONB": {"tf": 1}, "sqlglot.tokens.TokenType.TIME": {"tf": 1}, "sqlglot.tokens.TokenType.TIMESTAMP": {"tf": 1}, "sqlglot.tokens.TokenType.TIMESTAMPTZ": {"tf": 1}, "sqlglot.tokens.TokenType.TIMESTAMPLTZ": {"tf": 1}, "sqlglot.tokens.TokenType.DATETIME": {"tf": 1}, "sqlglot.tokens.TokenType.DATE": {"tf": 1}, "sqlglot.tokens.TokenType.UUID": {"tf": 1}, "sqlglot.tokens.TokenType.GEOGRAPHY": {"tf": 1}, "sqlglot.tokens.TokenType.NULLABLE": {"tf": 1}, "sqlglot.tokens.TokenType.GEOMETRY": {"tf": 1}, "sqlglot.tokens.TokenType.HLLSKETCH": {"tf": 1}, "sqlglot.tokens.TokenType.HSTORE": {"tf": 1}, "sqlglot.tokens.TokenType.SUPER": {"tf": 1}, "sqlglot.tokens.TokenType.SERIAL": {"tf": 1}, "sqlglot.tokens.TokenType.SMALLSERIAL": {"tf": 1}, "sqlglot.tokens.TokenType.BIGSERIAL": {"tf": 1}, "sqlglot.tokens.TokenType.XML": {"tf": 1}, "sqlglot.tokens.TokenType.UNIQUEIDENTIFIER": {"tf": 1}, "sqlglot.tokens.TokenType.MONEY": {"tf": 1}, "sqlglot.tokens.TokenType.SMALLMONEY": {"tf": 1}, "sqlglot.tokens.TokenType.ROWVERSION": {"tf": 1}, "sqlglot.tokens.TokenType.IMAGE": {"tf": 1}, "sqlglot.tokens.TokenType.VARIANT": {"tf": 1}, "sqlglot.tokens.TokenType.OBJECT": {"tf": 1}, "sqlglot.tokens.TokenType.ALIAS": {"tf": 1}, "sqlglot.tokens.TokenType.ALTER": {"tf": 1}, "sqlglot.tokens.TokenType.ALWAYS": {"tf": 1}, "sqlglot.tokens.TokenType.ALL": {"tf": 1}, "sqlglot.tokens.TokenType.ANTI": {"tf": 1}, "sqlglot.tokens.TokenType.ANY": {"tf": 1}, "sqlglot.tokens.TokenType.APPLY": {"tf": 1}, "sqlglot.tokens.TokenType.ARRAY": {"tf": 1}, "sqlglot.tokens.TokenType.ASC": {"tf": 1}, "sqlglot.tokens.TokenType.ASOF": {"tf": 1}, "sqlglot.tokens.TokenType.AT_TIME_ZONE": {"tf": 1}, "sqlglot.tokens.TokenType.AUTO_INCREMENT": {"tf": 1}, "sqlglot.tokens.TokenType.BEGIN": {"tf": 1}, "sqlglot.tokens.TokenType.BETWEEN": {"tf": 1}, "sqlglot.tokens.TokenType.BOTH": {"tf": 1}, "sqlglot.tokens.TokenType.BUCKET": {"tf": 1}, "sqlglot.tokens.TokenType.BY_DEFAULT": {"tf": 1}, "sqlglot.tokens.TokenType.CACHE": {"tf": 1}, "sqlglot.tokens.TokenType.CASCADE": {"tf": 1}, "sqlglot.tokens.TokenType.CASE": {"tf": 1}, "sqlglot.tokens.TokenType.CHARACTER_SET": {"tf": 1}, "sqlglot.tokens.TokenType.CHECK": {"tf": 1}, "sqlglot.tokens.TokenType.CLUSTER_BY": {"tf": 1}, "sqlglot.tokens.TokenType.COLLATE": {"tf": 1}, "sqlglot.tokens.TokenType.COMMAND": {"tf": 1}, "sqlglot.tokens.TokenType.COMMENT": {"tf": 1}, "sqlglot.tokens.TokenType.COMMIT": {"tf": 1}, "sqlglot.tokens.TokenType.COMPOUND": {"tf": 1}, "sqlglot.tokens.TokenType.CONSTRAINT": {"tf": 1}, "sqlglot.tokens.TokenType.CREATE": {"tf": 1}, "sqlglot.tokens.TokenType.CROSS": {"tf": 1}, "sqlglot.tokens.TokenType.CUBE": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_DATE": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_DATETIME": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_ROW": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_TIME": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_TIMESTAMP": {"tf": 1}, "sqlglot.tokens.TokenType.DEFAULT": {"tf": 1}, "sqlglot.tokens.TokenType.DELETE": {"tf": 1}, "sqlglot.tokens.TokenType.DESC": {"tf": 1}, "sqlglot.tokens.TokenType.DESCRIBE": {"tf": 1}, "sqlglot.tokens.TokenType.DISTINCT": {"tf": 1}, "sqlglot.tokens.TokenType.DISTINCT_FROM": {"tf": 1}, "sqlglot.tokens.TokenType.DISTRIBUTE_BY": {"tf": 1}, "sqlglot.tokens.TokenType.DIV": {"tf": 1}, "sqlglot.tokens.TokenType.DROP": {"tf": 1}, "sqlglot.tokens.TokenType.ELSE": {"tf": 1}, "sqlglot.tokens.TokenType.ENCODE": {"tf": 1}, "sqlglot.tokens.TokenType.END": {"tf": 1}, "sqlglot.tokens.TokenType.ESCAPE": {"tf": 1}, "sqlglot.tokens.TokenType.EXCEPT": {"tf": 1}, "sqlglot.tokens.TokenType.EXECUTE": {"tf": 1}, "sqlglot.tokens.TokenType.EXISTS": {"tf": 1}, "sqlglot.tokens.TokenType.FALSE": {"tf": 1}, "sqlglot.tokens.TokenType.FETCH": {"tf": 1}, "sqlglot.tokens.TokenType.FILTER": {"tf": 1}, "sqlglot.tokens.TokenType.FINAL": {"tf": 1}, "sqlglot.tokens.TokenType.FIRST": {"tf": 1}, "sqlglot.tokens.TokenType.FOLLOWING": {"tf": 1}, "sqlglot.tokens.TokenType.FOR": {"tf": 1}, "sqlglot.tokens.TokenType.FOREIGN_KEY": {"tf": 1}, "sqlglot.tokens.TokenType.FORMAT": {"tf": 1}, "sqlglot.tokens.TokenType.FROM": {"tf": 1}, "sqlglot.tokens.TokenType.FULL": {"tf": 1}, "sqlglot.tokens.TokenType.FUNCTION": {"tf": 1}, "sqlglot.tokens.TokenType.GENERATED": {"tf": 1}, "sqlglot.tokens.TokenType.GLOB": {"tf": 1}, "sqlglot.tokens.TokenType.GLOBAL": {"tf": 1}, "sqlglot.tokens.TokenType.GROUP_BY": {"tf": 1}, "sqlglot.tokens.TokenType.GROUPING_SETS": {"tf": 1}, "sqlglot.tokens.TokenType.HAVING": {"tf": 1}, "sqlglot.tokens.TokenType.HINT": {"tf": 1}, "sqlglot.tokens.TokenType.IDENTITY": {"tf": 1}, "sqlglot.tokens.TokenType.IF": {"tf": 1}, "sqlglot.tokens.TokenType.IGNORE_NULLS": {"tf": 1}, "sqlglot.tokens.TokenType.ILIKE": {"tf": 1}, "sqlglot.tokens.TokenType.IN": {"tf": 1}, "sqlglot.tokens.TokenType.INDEX": {"tf": 1}, "sqlglot.tokens.TokenType.INNER": {"tf": 1}, "sqlglot.tokens.TokenType.INSERT": {"tf": 1}, "sqlglot.tokens.TokenType.INTERSECT": {"tf": 1}, "sqlglot.tokens.TokenType.INTERVAL": {"tf": 1}, "sqlglot.tokens.TokenType.INTO": {"tf": 1}, "sqlglot.tokens.TokenType.INTRODUCER": {"tf": 1}, "sqlglot.tokens.TokenType.IRLIKE": {"tf": 1}, "sqlglot.tokens.TokenType.IS": {"tf": 1}, "sqlglot.tokens.TokenType.ISNULL": {"tf": 1}, "sqlglot.tokens.TokenType.JOIN": {"tf": 1}, "sqlglot.tokens.TokenType.LANGUAGE": {"tf": 1}, "sqlglot.tokens.TokenType.LATERAL": {"tf": 1}, "sqlglot.tokens.TokenType.LAZY": {"tf": 1}, "sqlglot.tokens.TokenType.LEADING": {"tf": 1}, "sqlglot.tokens.TokenType.LEFT": {"tf": 1}, "sqlglot.tokens.TokenType.LIKE": {"tf": 1}, "sqlglot.tokens.TokenType.LIMIT": {"tf": 1}, "sqlglot.tokens.TokenType.LOAD_DATA": {"tf": 1}, "sqlglot.tokens.TokenType.LOCAL": {"tf": 1}, "sqlglot.tokens.TokenType.MAP": {"tf": 1}, "sqlglot.tokens.TokenType.MATCH_RECOGNIZE": {"tf": 1}, "sqlglot.tokens.TokenType.MATERIALIZED": {"tf": 1}, "sqlglot.tokens.TokenType.MERGE": {"tf": 1}, "sqlglot.tokens.TokenType.MOD": {"tf": 1}, "sqlglot.tokens.TokenType.NATURAL": {"tf": 1}, "sqlglot.tokens.TokenType.NEXT": {"tf": 1}, "sqlglot.tokens.TokenType.NO_ACTION": {"tf": 1}, "sqlglot.tokens.TokenType.NOTNULL": {"tf": 1}, "sqlglot.tokens.TokenType.NULL": {"tf": 1}, "sqlglot.tokens.TokenType.NULLS_FIRST": {"tf": 1}, "sqlglot.tokens.TokenType.NULLS_LAST": {"tf": 1}, "sqlglot.tokens.TokenType.OFFSET": {"tf": 1}, "sqlglot.tokens.TokenType.ON": {"tf": 1}, "sqlglot.tokens.TokenType.ONLY": {"tf": 1}, "sqlglot.tokens.TokenType.OPTIONS": {"tf": 1}, "sqlglot.tokens.TokenType.ORDER_BY": {"tf": 1}, "sqlglot.tokens.TokenType.ORDERED": {"tf": 1}, "sqlglot.tokens.TokenType.ORDINALITY": {"tf": 1}, "sqlglot.tokens.TokenType.OUTER": {"tf": 1}, "sqlglot.tokens.TokenType.OUT_OF": {"tf": 1}, "sqlglot.tokens.TokenType.OVER": {"tf": 1}, "sqlglot.tokens.TokenType.OVERWRITE": {"tf": 1}, "sqlglot.tokens.TokenType.PARTITION": {"tf": 1}, "sqlglot.tokens.TokenType.PARTITION_BY": {"tf": 1}, "sqlglot.tokens.TokenType.PERCENT": {"tf": 1}, "sqlglot.tokens.TokenType.PIVOT": {"tf": 1}, "sqlglot.tokens.TokenType.PLACEHOLDER": {"tf": 1}, "sqlglot.tokens.TokenType.PRECEDING": {"tf": 1}, "sqlglot.tokens.TokenType.PRIMARY_KEY": {"tf": 1}, "sqlglot.tokens.TokenType.PROCEDURE": {"tf": 1}, "sqlglot.tokens.TokenType.PROPERTIES": {"tf": 1}, "sqlglot.tokens.TokenType.PSEUDO_TYPE": {"tf": 1}, "sqlglot.tokens.TokenType.QUALIFY": {"tf": 1}, "sqlglot.tokens.TokenType.QUOTE": {"tf": 1}, "sqlglot.tokens.TokenType.RANGE": {"tf": 1}, "sqlglot.tokens.TokenType.RECURSIVE": {"tf": 1}, "sqlglot.tokens.TokenType.REPLACE": {"tf": 1}, "sqlglot.tokens.TokenType.RESPECT_NULLS": {"tf": 1}, "sqlglot.tokens.TokenType.REFERENCES": {"tf": 1}, "sqlglot.tokens.TokenType.RIGHT": {"tf": 1}, "sqlglot.tokens.TokenType.RLIKE": {"tf": 1}, "sqlglot.tokens.TokenType.ROLLBACK": {"tf": 1}, "sqlglot.tokens.TokenType.ROLLUP": {"tf": 1}, "sqlglot.tokens.TokenType.ROW": {"tf": 1}, "sqlglot.tokens.TokenType.ROWS": {"tf": 1}, "sqlglot.tokens.TokenType.SCHEMA_COMMENT": {"tf": 1}, "sqlglot.tokens.TokenType.SEED": {"tf": 1}, "sqlglot.tokens.TokenType.SELECT": {"tf": 1}, "sqlglot.tokens.TokenType.SEMI": {"tf": 1}, "sqlglot.tokens.TokenType.SEPARATOR": {"tf": 1}, "sqlglot.tokens.TokenType.SERDE_PROPERTIES": {"tf": 1}, "sqlglot.tokens.TokenType.SET": {"tf": 1}, "sqlglot.tokens.TokenType.SHOW": {"tf": 1}, "sqlglot.tokens.TokenType.SIMILAR_TO": {"tf": 1}, "sqlglot.tokens.TokenType.SOME": {"tf": 1}, "sqlglot.tokens.TokenType.SORTKEY": {"tf": 1}, "sqlglot.tokens.TokenType.SORT_BY": {"tf": 1}, "sqlglot.tokens.TokenType.STRUCT": {"tf": 1}, "sqlglot.tokens.TokenType.TABLE_SAMPLE": {"tf": 1}, "sqlglot.tokens.TokenType.TEMPORARY": {"tf": 1}, "sqlglot.tokens.TokenType.TOP": {"tf": 1}, "sqlglot.tokens.TokenType.THEN": {"tf": 1}, "sqlglot.tokens.TokenType.TRAILING": {"tf": 1}, "sqlglot.tokens.TokenType.TRUE": {"tf": 1}, "sqlglot.tokens.TokenType.UNBOUNDED": {"tf": 1}, "sqlglot.tokens.TokenType.UNCACHE": {"tf": 1}, "sqlglot.tokens.TokenType.UNION": {"tf": 1}, "sqlglot.tokens.TokenType.UNLOGGED": {"tf": 1}, "sqlglot.tokens.TokenType.UNNEST": {"tf": 1}, "sqlglot.tokens.TokenType.UNPIVOT": {"tf": 1}, "sqlglot.tokens.TokenType.UPDATE": {"tf": 1}, "sqlglot.tokens.TokenType.USE": {"tf": 1}, "sqlglot.tokens.TokenType.USING": {"tf": 1}, "sqlglot.tokens.TokenType.VALUES": {"tf": 1}, "sqlglot.tokens.TokenType.VIEW": {"tf": 1}, "sqlglot.tokens.TokenType.VOLATILE": {"tf": 1}, "sqlglot.tokens.TokenType.WHEN": {"tf": 1}, "sqlglot.tokens.TokenType.WHERE": {"tf": 1}, "sqlglot.tokens.TokenType.WINDOW": {"tf": 1}, "sqlglot.tokens.TokenType.WITH": {"tf": 1}, "sqlglot.tokens.TokenType.WITH_TIME_ZONE": {"tf": 1}, "sqlglot.tokens.TokenType.WITH_LOCAL_TIME_ZONE": {"tf": 1}, "sqlglot.tokens.TokenType.WITHIN_GROUP": {"tf": 1}, "sqlglot.tokens.TokenType.WITHOUT_TIME_ZONE": {"tf": 1}, "sqlglot.tokens.TokenType.UNIQUE": {"tf": 1}, "sqlglot.tokens.Token": {"tf": 1}, "sqlglot.tokens.Token.__init__": {"tf": 1}, "sqlglot.tokens.Token.number": {"tf": 1}, "sqlglot.tokens.Token.string": {"tf": 1}, "sqlglot.tokens.Token.identifier": {"tf": 1}, "sqlglot.tokens.Token.var": {"tf": 1}, "sqlglot.tokens.Tokenizer": {"tf": 1}, "sqlglot.tokens.Tokenizer.__init__": {"tf": 1}, "sqlglot.tokens.Tokenizer.reset": {"tf": 1}, "sqlglot.tokens.Tokenizer.tokenize": {"tf": 1}, "sqlglot.transforms": {"tf": 1}, "sqlglot.transforms.unalias_group": {"tf": 1}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1}, "sqlglot.transforms.remove_precision_parameterized_types": {"tf": 1}, "sqlglot.transforms.preprocess": {"tf": 1}, "sqlglot.transforms.delegate": {"tf": 1}, "sqlglot.trie": {"tf": 1}, "sqlglot.trie.new_trie": {"tf": 1}, "sqlglot.trie.in_trie": {"tf": 1}}, "df": 1667, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.errors.SqlglotError": {"tf": 1}}, "df": 1}}}}}}}}}, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.Dialects.SQLITE": {"tf": 1}, "sqlglot.dialects.sqlite": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.__init__": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Tokenizer": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Generator.transaction_sql": {"tf": 1.4142135623730951}}, "df": 8}}}, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.SqlSecurityProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}, "r": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.Sqrt": {"tf": 1}}, "df": 1}}}, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.schema": {"tf": 1}, "sqlglot.expressions.Properties.Location.PRE_SCHEMA": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_SCHEMA_ROOT": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_SCHEMA_WITH": {"tf": 1}, "sqlglot.expressions.Schema": {"tf": 1}, "sqlglot.generator.Generator.schema_sql": {"tf": 1}, "sqlglot.schema.Schema": {"tf": 1.4142135623730951}, "sqlglot.schema.Schema.add_table": {"tf": 1.4142135623730951}, "sqlglot.schema.Schema.column_names": {"tf": 1.4142135623730951}, "sqlglot.schema.Schema.get_column_type": {"tf": 1.4142135623730951}, "sqlglot.schema.Schema.supported_table_args": {"tf": 1.4142135623730951}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.__init__": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.table_parts": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.find": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1}, "sqlglot.schema.MappingSchema.__init__": {"tf": 1}, "sqlglot.schema.MappingSchema.from_mapping_schema": {"tf": 1.4142135623730951}, "sqlglot.schema.MappingSchema.copy": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1}, "sqlglot.schema.ensure_schema": {"tf": 1.4142135623730951}, "sqlglot.schema.ensure_column_mapping": {"tf": 1}, "sqlglot.schema.flatten_schema": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SCHEMA": {"tf": 1}, "sqlglot.tokens.TokenType.SCHEMA_COMMENT": {"tf": 1}}, "df": 27, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.errors.SchemaError": {"tf": 1}}, "df": 1}}}}}, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.SchemaCommentProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}}, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dialects.dialect.arrow_json_extract_scalar_sql": {"tf": 1}}, "df": 1}}}, "n": {"docs": {"sqlglot.executor.python.PythonExecutor.scan": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.scan_table": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.scan_csv": {"tf": 1}, "sqlglot.planner.Scan": {"tf": 1}, "sqlglot.planner.Scan.__init__": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}}, "df": 6}}, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.scope": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.ROOT": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.SUBQUERY": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.DERIVED_TABLE": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.CTE": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.UNION": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.UDTF": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.__init__": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.clear_cache": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.branch": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.walk": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.replace": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.tables": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.ctes": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.derived_tables": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.subqueries": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.columns": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.cte_sources": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.selects": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.external_columns": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.unqualified_columns": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.join_hints": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.source_columns": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.is_subquery": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.is_derived_table": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.is_union": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.is_cte": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.is_root": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.is_udtf": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.is_correlated_subquery": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.rename_source": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.add_source": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.remove_source": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.traverse": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.ref_count": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.build_scope": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1.4142135623730951}}, "df": 43, "t": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.scope.ScopeType": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.ROOT": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.SUBQUERY": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.DERIVED_TABLE": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.CTE": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.UNION": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.UDTF": {"tf": 1}}, "df": 7}}}}}}}}, "p": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.dialects.dialect.Dialects.SPARK": {"tf": 1}, "sqlglot.dialects.spark": {"tf": 1}, "sqlglot.dialects.spark.Spark": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.__init__": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Generator.cast_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Tokenizer": {"tf": 1.4142135623730951}}, "df": 8, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe.sql.SparkSession": {"tf": 1}, "sqlglot.dataframe.sql.SparkSession.__init__": {"tf": 1}, "sqlglot.dataframe.sql.SparkSession.table": {"tf": 1}, "sqlglot.dataframe.sql.SparkSession.createDataFrame": {"tf": 1}, "sqlglot.dataframe.sql.SparkSession.sql": {"tf": 1}}, "df": 5}}}}}}}}}, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.SPACE": {"tf": 1}}, "df": 1}}}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.Split": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1}}, "df": 2}}}, "e": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.generator.Generator.window_spec_sql": {"tf": 1}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe.sql.DataFrame.select": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.select_sql": {"tf": 1}, "sqlglot.expressions.Expression.parent_select": {"tf": 1}, "sqlglot.expressions.Select": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.Select.window": {"tf": 1}, "sqlglot.expressions.Select.distinct": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.Select.lock": {"tf": 1}, "sqlglot.expressions.select": {"tf": 1}, "sqlglot.generator.Generator.select_sql": {"tf": 1}, "sqlglot.tokens.TokenType.SELECT": {"tf": 1}}, "df": 23, "s": {"docs": {"sqlglot.optimizer.expand_multi_table_selects": {"tf": 1}, "sqlglot.optimizer.expand_multi_table_selects.expand_multi_table_selects": {"tf": 1.4142135623730951}, "sqlglot.optimizer.isolate_table_selects": {"tf": 1}, "sqlglot.optimizer.isolate_table_selects.isolate_table_selects": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.selects": {"tf": 1}}, "df": 5}, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.optimizer.pushdown_projections.DEFAULT_SELECTION": {"tf": 1}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1}}, "df": 1}}}}}}, "t": {"docs": {"sqlglot.dataframe.sql.Column.set_table_name": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator.set_sql": {"tf": 1}, "sqlglot.executor.context.Context.set_row": {"tf": 1}, "sqlglot.executor.context.Context.set_index": {"tf": 1}, "sqlglot.executor.context.Context.set_range": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.set_operation": {"tf": 1}, "sqlglot.expressions.Expression.set": {"tf": 1}, "sqlglot.expressions.Set": {"tf": 1}, "sqlglot.generator.Generator.set_operation": {"tf": 1}, "sqlglot.tokens.TokenType.CHARACTER_SET": {"tf": 1}, "sqlglot.tokens.TokenType.SET": {"tf": 1}}, "df": 11, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot.dialects.mysql.MySQL.Generator.setitem_sql": {"tf": 1}, "sqlglot.expressions.SetItem": {"tf": 1}}, "df": 2}}}}, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.expressions.SetAgg": {"tf": 1}}, "df": 1}}}, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.planner.SetOperation": {"tf": 1}, "sqlglot.planner.SetOperation.__init__": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}}, "df": 3}}}}}}}}}, "s": {"docs": {"sqlglot.tokens.TokenType.GROUPING_SETS": {"tf": 1}}, "df": 1}}, "r": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.serde": {"tf": 1}, "sqlglot.serde.dump": {"tf": 1}, "sqlglot.serde.load": {"tf": 1}, "sqlglot.tokens.TokenType.SERDE_PROPERTIES": {"tf": 1}}, "df": 4, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.SerdeProperties": {"tf": 1}}, "df": 1}}}}}}}}}}}}, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.DataType.Type.SERIAL": {"tf": 1}, "sqlglot.tokens.TokenType.SERIAL": {"tf": 1}}, "df": 2}}}}, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.tokens.TokenType.SESSION_PARAMETER": {"tf": 1}}, "df": 1, "p": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.SessionParameter": {"tf": 1}, "sqlglot.generator.Generator.sessionparameter_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}, "p": {"docs": {"sqlglot.generator.Generator.sep": {"tf": 1}}, "df": 1, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.tokens.TokenType.SEPARATOR": {"tf": 1}}, "df": 1}}}}}}}, "g": {"docs": {"sqlglot.generator.Generator.seg": {"tf": 1}}, "df": 1}, "q": {"docs": {"sqlglot.helper.seq_get": {"tf": 1}}, "df": 1}, "m": {"docs": {}, "df": 0, "i": {"docs": {"sqlglot.tokens.TokenType.SEMI": {"tf": 1}}, "df": 1, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.tokens.TokenType.SEMICOLON": {"tf": 1}}, "df": 1}}}}}}}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.tokens.TokenType.SEED": {"tf": 1}}, "df": 1}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}, "sqlglot.executor.context.Context.sort": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.sort": {"tf": 1}, "sqlglot.expressions.Sort": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.generator.Generator.sort_sql": {"tf": 1}, "sqlglot.optimizer.simplify.uniq_sort": {"tf": 1}, "sqlglot.planner.Sort": {"tf": 1}, "sqlglot.planner.Sort.__init__": {"tf": 1}, "sqlglot.tokens.TokenType.SORT_BY": {"tf": 1}}, "df": 10, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.tokens.TokenType.SORTKEY": {"tf": 1}}, "df": 1, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.SortKeyProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.SortArray": {"tf": 1}}, "df": 1}}}}}}}, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.scope.Scope.source_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.rename_source": {"tf": 1}, "sqlglot.optimizer.scope.Scope.add_source": {"tf": 1}, "sqlglot.optimizer.scope.Scope.remove_source": {"tf": 1}}, "df": 4, "s": {"docs": {"sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1}, "sqlglot.optimizer.scope.Scope.cte_sources": {"tf": 1}}, "df": 2}}}}}, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.SOME": {"tf": 1}}, "df": 1}}}, "u": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot.dataframe.sql.GroupedData.sum": {"tf": 1}, "sqlglot.expressions.Sum": {"tf": 1}}, "df": 2}, "b": {"docs": {"sqlglot.expressions.Sub": {"tf": 1}, "sqlglot.generator.Generator.sub_sql": {"tf": 1}}, "df": 2, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dataframe.sql.Column.substr": {"tf": 1}}, "df": 1, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.executor.env.substring": {"tf": 1}, "sqlglot.expressions.Substring": {"tf": 1}}, "df": 2}}}}}}, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.Subquery": {"tf": 1}, "sqlglot.expressions.Subquery.unnest": {"tf": 1}, "sqlglot.expressions.Subquery.output_name": {"tf": 1}, "sqlglot.expressions.subquery": {"tf": 1}, "sqlglot.generator.Generator.subquery_sql": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.SUBQUERY": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_subquery": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_correlated_subquery": {"tf": 1}}, "df": 9, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Subqueryable": {"tf": 1}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.Subqueryable.limit": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}}, "df": 4}}}}, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.SubqueryPredicate": {"tf": 1}}, "df": 1}}}}}}}}}}, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.optimizer.eliminate_subqueries": {"tf": 1}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 1.4142135623730951}, "sqlglot.optimizer.merge_subqueries": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1.4142135623730951}, "sqlglot.optimizer.merge_subqueries.merge_ctes": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_derived_tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.subqueries": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1.4142135623730951}, "sqlglot.optimizer.unnest_subqueries.unnest": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.decorrelate": {"tf": 1}}, "df": 11}}}}}}}, "c": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.helper.subclasses": {"tf": 1}}, "df": 1}}}}}}}}, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.DataType.Type.SUPER": {"tf": 1}, "sqlglot.tokens.TokenType.SUPER": {"tf": 1}}, "df": 2}}, "p": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.schema.Schema.supported_table_args": {"tf": 1}}, "df": 1}}}}}}}}, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Star": {"tf": 1}, "sqlglot.expressions.Star.output_name": {"tf": 1}, "sqlglot.generator.Generator.star_sql": {"tf": 1}, "sqlglot.tokens.TokenType.STAR": {"tf": 1}}, "df": 4, "t": {"docs": {"sqlglot.tokens.TokenType.BLOCK_START": {"tf": 1}}, "df": 1, "s": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.dataframe.sql.Column.startswith": {"tf": 1}}, "df": 1}}}}}}, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dialects.dialect.Dialects.STARROCKS": {"tf": 1}, "sqlglot.dialects.starrocks": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.__init__": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.4142135623730951}}, "df": 5}}}}}}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.executor.python.PythonExecutor.static": {"tf": 1}}, "df": 1}}}}, "r": {"docs": {"sqlglot.dialects.dialect.str_position_sql": {"tf": 1}, "sqlglot.executor.env.str_position": {"tf": 1}}, "df": 2, "u": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.dialect.struct_extract_sql": {"tf": 1}, "sqlglot.expressions.DataType.Type.STRUCT": {"tf": 1}, "sqlglot.expressions.Struct": {"tf": 1}, "sqlglot.tokens.TokenType.STRUCT": {"tf": 1}}, "df": 4, "k": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.expressions.StructKwarg": {"tf": 1}, "sqlglot.generator.Generator.structkwarg_sql": {"tf": 1}}, "df": 2}}}}}, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.StructExtract": {"tf": 1}}, "df": 1}}}}}}}}}}, "p": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dialects.dialect.locate_to_strposition": {"tf": 1}, "sqlglot.dialects.dialect.strposition_to_locate_sql": {"tf": 1}, "sqlglot.expressions.StrPosition": {"tf": 1}}, "df": 3}}}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.expressions.Expression.is_string": {"tf": 1}, "sqlglot.expressions.Literal.string": {"tf": 1}, "sqlglot.tokens.TokenType.STRING": {"tf": 1}, "sqlglot.tokens.TokenType.BIT_STRING": {"tf": 1}, "sqlglot.tokens.TokenType.HEX_STRING": {"tf": 1}, "sqlglot.tokens.TokenType.BYTE_STRING": {"tf": 1}, "sqlglot.tokens.Token.string": {"tf": 1}}, "df": 7}}}, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.StrToDate": {"tf": 1}}, "df": 1}}}}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.StrToTime": {"tf": 1}}, "df": 1}}}}, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "x": {"docs": {"sqlglot.expressions.StrToUnix": {"tf": 1}}, "df": 1}}}}}}}, "d": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "v": {"docs": {"sqlglot.expressions.Stddev": {"tf": 1}}, "df": 1, "p": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.expressions.StddevPop": {"tf": 1}}, "df": 1}}}, "s": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.expressions.StddevSamp": {"tf": 1}}, "df": 1}}}}}}}}, "e": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.planner.Step": {"tf": 1}, "sqlglot.planner.Step.__init__": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Step.add_dependency": {"tf": 1}, "sqlglot.planner.Step.to_s": {"tf": 1}}, "df": 5}}}, "a": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.DataFrameWriter.saveAsTable": {"tf": 1}}, "df": 1}}}}}}}}}, "f": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.no_safe_divide_sql": {"tf": 1}}, "df": 1, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.SafeDivide": {"tf": 1}}, "df": 1}}}}}}}}, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.TABLE_SAMPLE": {"tf": 1}}, "df": 1}}}}}, "n": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.Dialects.SNOWFLAKE": {"tf": 1}, "sqlglot.dialects.snowflake": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.__init__": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Tokenizer": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator.except_op": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator.intersect_op": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator.values_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator.select_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator.describe_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator.generatedasidentitycolumnconstraint_sql": {"tf": 1.4142135623730951}}, "df": 13}}}}}}}, "a": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.helper.camel_to_snake_case": {"tf": 1}}, "df": 1}}}}, "h": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot.dialects.mysql.MySQL.Generator.show_sql": {"tf": 1}, "sqlglot.expressions.Show": {"tf": 1}, "sqlglot.tokens.TokenType.SHOW": {"tf": 1}}, "df": 3}}}, "y": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.tsql.TSQL.Generator.systemtime_sql": {"tf": 1}, "sqlglot.expressions.SystemTime": {"tf": 1}}, "df": 2}}}}}}}}}, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.DataType.Type.SMALLINT": {"tf": 1}, "sqlglot.tokens.TokenType.SMALLINT": {"tf": 1}}, "df": 2}}}, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.DataType.Type.SMALLSERIAL": {"tf": 1}, "sqlglot.tokens.TokenType.SMALLSERIAL": {"tf": 1}}, "df": 2}}}}}}, "m": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.DataType.Type.SMALLMONEY": {"tf": 1}, "sqlglot.tokens.TokenType.SMALLMONEY": {"tf": 1}}, "df": 2}}}}}}}}}, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.tokens.TokenType.SIMILAR_TO": {"tf": 1}}, "df": 1, "t": {"docs": {}, "df": 0, "o": {"docs": {"sqlglot.expressions.SimilarTo": {"tf": 1}, "sqlglot.generator.Generator.similarto_sql": {"tf": 1}}, "df": 2}}}}}}, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.optimizer.simplify": {"tf": 1}, "sqlglot.optimizer.simplify.simplify": {"tf": 1.4142135623730951}, "sqlglot.optimizer.simplify.rewrite_between": {"tf": 1}, "sqlglot.optimizer.simplify.simplify_not": {"tf": 1.4142135623730951}, "sqlglot.optimizer.simplify.flatten": {"tf": 1}, "sqlglot.optimizer.simplify.simplify_connectors": {"tf": 1.4142135623730951}, "sqlglot.optimizer.simplify.remove_compliments": {"tf": 1}, "sqlglot.optimizer.simplify.uniq_sort": {"tf": 1}, "sqlglot.optimizer.simplify.absorb_and_eliminate": {"tf": 1}, "sqlglot.optimizer.simplify.simplify_literals": {"tf": 1.4142135623730951}, "sqlglot.optimizer.simplify.simplify_parens": {"tf": 1.4142135623730951}, "sqlglot.optimizer.simplify.remove_where_true": {"tf": 1}, "sqlglot.optimizer.simplify.always_true": {"tf": 1}, "sqlglot.optimizer.simplify.is_complement": {"tf": 1}, "sqlglot.optimizer.simplify.eval_boolean": {"tf": 1}, "sqlglot.optimizer.simplify.extract_date": {"tf": 1}, "sqlglot.optimizer.simplify.extract_interval": {"tf": 1}, "sqlglot.optimizer.simplify.date_literal": {"tf": 1}, "sqlglot.optimizer.simplify.boolean_literal": {"tf": 1}}, "df": 19}}}}}}}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Slice": {"tf": 1}, "sqlglot.generator.Generator.slice_sql": {"tf": 1}}, "df": 2}}}, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.tokens.TokenType.SLASH": {"tf": 1}}, "df": 1}}}}}, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Properties.Location.PRE_SCHEMA": {"tf": 1}}, "df": 1, "t": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.pretty": {"tf": 1}}, "df": 1}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {"sqlglot.dialects.dialect.Dialects.PRESTO": {"tf": 1}, "sqlglot.dialects.presto": {"tf": 1}, "sqlglot.dialects.presto.Presto": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.__init__": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Tokenizer": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Generator.transaction_sql": {"tf": 1.4142135623730951}}, "df": 8}}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Predicate": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.nodes_for_predicate": {"tf": 1}}, "df": 2, "s": {"docs": {"sqlglot.optimizer.pushdown_predicates": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 1.4142135623730951}, "sqlglot.optimizer.pushdown_predicates.pushdown": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_cnf": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_dnf": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.nodes_for_predicate": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.replace_aliases": {"tf": 1}}, "df": 7}}}}}}}, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.generator.Generator.prepend_ctes": {"tf": 1}}, "df": 1}}}, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.transforms.preprocess": {"tf": 1}}, "df": 1}}}}}}}, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.tokens.TokenType.PRECEDING": {"tf": 1}}, "df": 1}}}}}, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.transforms.remove_precision_parameterized_types": {"tf": 1}}, "df": 1}}}}}}}, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dialects.dialect.no_properties_sql": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.with_properties": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.with_properties": {"tf": 1}, "sqlglot.expressions.Properties": {"tf": 1}, "sqlglot.expressions.Properties.Location": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_CREATE": {"tf": 1}, "sqlglot.expressions.Properties.Location.PRE_SCHEMA": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_INDEX": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_SCHEMA_ROOT": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_SCHEMA_WITH": {"tf": 1}, "sqlglot.expressions.Properties.Location.UNSUPPORTED": {"tf": 1}, "sqlglot.expressions.Properties.from_dict": {"tf": 1}, "sqlglot.generator.Generator.properties_sql": {"tf": 1}, "sqlglot.generator.Generator.root_properties": {"tf": 1}, "sqlglot.generator.Generator.properties": {"tf": 1}, "sqlglot.generator.Generator.with_properties": {"tf": 1}, "sqlglot.generator.Generator.locate_properties": {"tf": 1}, "sqlglot.tokens.TokenType.PROPERTIES": {"tf": 1}, "sqlglot.tokens.TokenType.SERDE_PROPERTIES": {"tf": 1}}, "df": 19}}}, "y": {"docs": {"sqlglot.expressions.Property": {"tf": 1}, "sqlglot.generator.Generator.property_sql": {"tf": 1}, "sqlglot.generator.Generator.naked_property": {"tf": 1}}, "df": 3}}}}}, "j": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.optimizer.pushdown_projections": {"tf": 1}, "sqlglot.optimizer.pushdown_projections.DEFAULT_SELECTION": {"tf": 1}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 1.4142135623730951}}, "df": 3}}}}}}}}, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.PROCEDURE": {"tf": 1}}, "df": 1}}}}}}}, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.tokens.TokenType.PRIMARY_KEY": {"tf": 1}}, "df": 1, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.PrimaryKey": {"tf": 1}, "sqlglot.generator.Generator.primarykey_sql": {"tf": 1}}, "df": 2, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.PrimaryKeyColumnConstraint": {"tf": 1}, "sqlglot.generator.Generator.primarykeycolumnconstraint_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}}}}}}}}}}}}, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.parse": {"tf": 1}, "sqlglot.parse_one": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.parse": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.parse_into": {"tf": 1}, "sqlglot.dialects.dialect.parse_date_delta": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1}, "sqlglot.parser.parse_var_map": {"tf": 1}, "sqlglot.parser.Parser.parse": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}}, "df": 9, "r": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.expressions.Func.default_parser_mappings": {"tf": 1}, "sqlglot.parser": {"tf": 1}, "sqlglot.parser.parse_var_map": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.__init__": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.reset": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.parse": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.parse_into": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.check_errors": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.raise_error": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.expression": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.validate_expression": {"tf": 1.4142135623730951}}, "df": 30}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.errors.ParseError": {"tf": 1}, "sqlglot.errors.ParseError.__init__": {"tf": 1}, "sqlglot.errors.ParseError.new": {"tf": 1}}, "df": 3}}}}}}}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.Partition": {"tf": 1}, "sqlglot.generator.Generator.partition_sql": {"tf": 1}, "sqlglot.generator.Generator.partition_by_sql": {"tf": 1}, "sqlglot.tokens.TokenType.PARTITION": {"tf": 1}, "sqlglot.tokens.TokenType.PARTITION_BY": {"tf": 1}}, "df": 5, "b": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dataframe.sql.Window.partitionBy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.partitionBy": {"tf": 1}}, "df": 2}}, "s": {"docs": {"sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}}, "df": 1}, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dialects.teradata.Teradata.Generator.partitionedbyproperty_sql": {"tf": 1}, "sqlglot.expressions.PartitionedByProperty": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}}}}, "s": {"docs": {"sqlglot.schema.AbstractMappingSchema.table_parts": {"tf": 1}}, "df": 1}}, "e": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dialects.dialect.no_paren_current_date_sql": {"tf": 1}, "sqlglot.expressions.Paren": {"tf": 1}, "sqlglot.expressions.paren": {"tf": 1}, "sqlglot.generator.Generator.paren_sql": {"tf": 1}, "sqlglot.tokens.TokenType.L_PAREN": {"tf": 1}, "sqlglot.tokens.TokenType.R_PAREN": {"tf": 1}}, "df": 6, "t": {"docs": {"sqlglot.expressions.Expression.parent_select": {"tf": 1}}, "df": 1}, "s": {"docs": {"sqlglot.optimizer.simplify.simplify_parens": {"tf": 1}}, "df": 1}}}, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Parameter": {"tf": 1}, "sqlglot.generator.Generator.parameter_sql": {"tf": 1}, "sqlglot.tokens.TokenType.PARAMETER": {"tf": 1}, "sqlglot.tokens.TokenType.SESSION_PARAMETER": {"tf": 1}}, "df": 4, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.transforms.remove_precision_parameterized_types": {"tf": 1}}, "df": 1}}}}}}}}, "s": {"docs": {"sqlglot.helper.count_params": {"tf": 1}}, "df": 1}}}}, "d": {"docs": {"sqlglot.generator.Generator.pad_comment": {"tf": 1}}, "df": 1}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe.sql.DataFrame.persist": {"tf": 1}}, "df": 1}}}}, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.PERCENT": {"tf": 1}}, "df": 1, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.PercentileCont": {"tf": 1}}, "df": 1}}}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.expressions.PercentileDisc": {"tf": 1}}, "df": 1}}}}}}}}}}}}}, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe.sql.GroupedData.pivot": {"tf": 1}, "sqlglot.dialects.dialect.no_pivot_sql": {"tf": 1}, "sqlglot.expressions.Pivot": {"tf": 1}, "sqlglot.generator.Generator.pivot_sql": {"tf": 1}, "sqlglot.tokens.TokenType.PIVOT": {"tf": 1}}, "df": 5}}}, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.PIPE": {"tf": 1}}, "df": 1}}}, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.Properties.Location.POST_CREATE": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_INDEX": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_SCHEMA_ROOT": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_SCHEMA_WITH": {"tf": 1}}, "df": 4, "g": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dialects.dialect.Dialects.POSTGRES": {"tf": 1}, "sqlglot.dialects.postgres": {"tf": 1}, "sqlglot.dialects.postgres.Postgres": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.__init__": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Tokenizer": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.4142135623730951}}, "df": 7}}}}}, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dialects.dialect.str_position_sql": {"tf": 1}, "sqlglot.executor.env.str_position": {"tf": 1}}, "df": 2}}}}}, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Posexplode": {"tf": 1}}, "df": 1}}}}}}}}, "p": {"docs": {"sqlglot.executor.table.Table.pop": {"tf": 1}, "sqlglot.expressions.Expression.pop": {"tf": 1}}, "df": 2}, "w": {"docs": {"sqlglot.expressions.Pow": {"tf": 1}}, "df": 1}}, "y": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.executor.python": {"tf": 1}, "sqlglot.executor.python.PythonExecutor": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.__init__": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.execute": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.generate": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.generate_tuple": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.context": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.table": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.scan": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.static": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.scan_table": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.scan_csv": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.join": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.nested_loop_join": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.hash_join": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.aggregate": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.sort": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.set_operation": {"tf": 1}, "sqlglot.executor.python.Python": {"tf": 1.4142135623730951}, "sqlglot.executor.python.Python.__init__": {"tf": 1.4142135623730951}, "sqlglot.executor.python.Python.Tokenizer": {"tf": 1.4142135623730951}, "sqlglot.executor.python.Python.Generator": {"tf": 1.4142135623730951}}, "df": 22, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.executor.python.PythonExecutor": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.__init__": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.execute": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.generate": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.generate_tuple": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.context": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.table": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.scan": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.static": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.scan_table": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.scan_csv": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.join": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.nested_loop_join": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.hash_join": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.aggregate": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.sort": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.set_operation": {"tf": 1}}, "df": 17}}}}}}}}}}}}}, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Placeholder": {"tf": 1}, "sqlglot.generator.Generator.placeholder_sql": {"tf": 1}, "sqlglot.tokens.TokenType.PLACEHOLDER": {"tf": 1}}, "df": 3, "s": {"docs": {"sqlglot.expressions.replace_placeholders": {"tf": 1}}, "df": 1}}}}}}}}}, "n": {"docs": {"sqlglot.planner.Plan": {"tf": 1}, "sqlglot.planner.Plan.__init__": {"tf": 1}}, "df": 2, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.planner": {"tf": 1}, "sqlglot.planner.Plan": {"tf": 1}, "sqlglot.planner.Plan.__init__": {"tf": 1}, "sqlglot.planner.Step": {"tf": 1}, "sqlglot.planner.Step.__init__": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Step.add_dependency": {"tf": 1}, "sqlglot.planner.Step.to_s": {"tf": 1}, "sqlglot.planner.Scan": {"tf": 1}, "sqlglot.planner.Scan.__init__": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.Join": {"tf": 1}, "sqlglot.planner.Join.__init__": {"tf": 1}, "sqlglot.planner.Join.from_joins": {"tf": 1}, "sqlglot.planner.Aggregate": {"tf": 1}, "sqlglot.planner.Aggregate.__init__": {"tf": 1}, "sqlglot.planner.Sort": {"tf": 1}, "sqlglot.planner.Sort.__init__": {"tf": 1}, "sqlglot.planner.SetOperation": {"tf": 1}, "sqlglot.planner.SetOperation.__init__": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}}, "df": 21}}}}}, "u": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.tokens.TokenType.PLUS": {"tf": 1}}, "df": 1}}}, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "o": {"docs": {"sqlglot.tokens.TokenType.PSEUDO_TYPE": {"tf": 1}}, "df": 1, "t": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.PseudoType": {"tf": 1}, "sqlglot.generator.Generator.pseudotype_sql": {"tf": 1}}, "df": 2}}}}}}}}}, "u": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.optimizer.pushdown_predicates": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 1.4142135623730951}, "sqlglot.optimizer.pushdown_predicates.pushdown": {"tf": 1.4142135623730951}, "sqlglot.optimizer.pushdown_predicates.pushdown_cnf": {"tf": 1.4142135623730951}, "sqlglot.optimizer.pushdown_predicates.pushdown_dnf": {"tf": 1.4142135623730951}, "sqlglot.optimizer.pushdown_predicates.nodes_for_predicate": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.replace_aliases": {"tf": 1}, "sqlglot.optimizer.pushdown_projections": {"tf": 1}, "sqlglot.optimizer.pushdown_projections.DEFAULT_SELECTION": {"tf": 1}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 1.4142135623730951}}, "df": 10}}}}}}}}, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.tokens.TokenType.ON": {"tf": 1}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1}}, "df": 3, "e": {"docs": {"sqlglot.parse_one": {"tf": 1}}, "df": 1}, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.tokens.TokenType.ONLY": {"tf": 1}}, "df": 1}}}, "r": {"docs": {"sqlglot.dialects.dialect.Dialect.get_or_raise": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Or": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 1}, "sqlglot.generator.Generator.or_sql": {"tf": 1}, "sqlglot.tokens.TokenType.OR": {"tf": 1}}, "df": 6, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Order": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.generator.Generator.order_sql": {"tf": 1}, "sqlglot.tokens.TokenType.ORDER_BY": {"tf": 1}}, "df": 4, "b": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.Window.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.orderBy": {"tf": 1}}, "df": 3}}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.executor.env.ordered": {"tf": 1}, "sqlglot.expressions.Ordered": {"tf": 1}, "sqlglot.generator.Generator.ordered_sql": {"tf": 1}, "sqlglot.tokens.TokenType.ORDERED": {"tf": 1}}, "df": 4}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.tokens.TokenType.ORDINALITY": {"tf": 1}}, "df": 1}}}}}}}}, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.Dialects.ORACLE": {"tf": 1}, "sqlglot.dialects.oracle": {"tf": 1}, "sqlglot.dialects.oracle.Oracle": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.__init__": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Generator.query_modifiers": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Generator.offset_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Generator.table_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Tokenizer": {"tf": 1.4142135623730951}}, "df": 10}}}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dataframe.sql.Column.invoke_expression_over_column": {"tf": 1}, "sqlglot.dataframe.sql.Column.over": {"tf": 1}, "sqlglot.tokens.TokenType.OVER": {"tf": 1}}, "df": 3, "w": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.OVERWRITE": {"tf": 1}}, "df": 1}}}}}}}}, "p": {"docs": {"sqlglot.dataframe.sql.Column.binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.inverse_binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.unary_op": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.in_unnest_op": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.except_op": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.intersect_op": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.except_op": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.intersect_op": {"tf": 1}, "sqlglot.generator.Generator.except_op": {"tf": 1}, "sqlglot.generator.Generator.intersect_op": {"tf": 1}, "sqlglot.generator.Generator.union_op": {"tf": 1}, "sqlglot.generator.Generator.in_unnest_op": {"tf": 1}, "sqlglot.generator.Generator.op_expressions": {"tf": 1}}, "df": 13, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.optimize_joins": {"tf": 1}, "sqlglot.optimizer.optimize_joins.optimize_joins": {"tf": 1.4142135623730951}, "sqlglot.optimizer.optimize_joins.reorder_joins": {"tf": 1}, "sqlglot.optimizer.optimize_joins.normalize": {"tf": 1}, "sqlglot.optimizer.optimize_joins.other_table_names": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}}, "df": 6, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.errors.OptimizeError": {"tf": 1}}, "df": 1}}}}}, "r": {"docs": {"sqlglot.optimizer": {"tf": 1}, "sqlglot.optimizer.annotate_types": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.optimizer.annotate_types.TypeAnnotator": {"tf": 1}, "sqlglot.optimizer.annotate_types.TypeAnnotator.__init__": {"tf": 1}, "sqlglot.optimizer.annotate_types.TypeAnnotator.annotate": {"tf": 1}, "sqlglot.optimizer.canonicalize": {"tf": 1}, "sqlglot.optimizer.canonicalize.canonicalize": {"tf": 1}, "sqlglot.optimizer.canonicalize.add_text_to_concat": {"tf": 1}, "sqlglot.optimizer.canonicalize.coerce_type": {"tf": 1}, "sqlglot.optimizer.canonicalize.remove_redundant_casts": {"tf": 1}, "sqlglot.optimizer.eliminate_ctes": {"tf": 1}, "sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"tf": 1}, "sqlglot.optimizer.eliminate_joins": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.join_condition": {"tf": 1}, "sqlglot.optimizer.eliminate_subqueries": {"tf": 1}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 1}, "sqlglot.optimizer.expand_laterals": {"tf": 1}, "sqlglot.optimizer.expand_laterals.expand_laterals": {"tf": 1}, "sqlglot.optimizer.expand_multi_table_selects": {"tf": 1}, "sqlglot.optimizer.expand_multi_table_selects.expand_multi_table_selects": {"tf": 1}, "sqlglot.optimizer.isolate_table_selects": {"tf": 1}, "sqlglot.optimizer.isolate_table_selects.isolate_table_selects": {"tf": 1}, "sqlglot.optimizer.lower_identities": {"tf": 1}, "sqlglot.optimizer.lower_identities.lower_identities": {"tf": 1}, "sqlglot.optimizer.merge_subqueries": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_ctes": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_derived_tables": {"tf": 1}, "sqlglot.optimizer.normalize": {"tf": 1}, "sqlglot.optimizer.normalize.normalize": {"tf": 1}, "sqlglot.optimizer.normalize.normalized": {"tf": 1}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}, "sqlglot.optimizer.normalize.distributive_law": {"tf": 1}, "sqlglot.optimizer.optimize_joins": {"tf": 1}, "sqlglot.optimizer.optimize_joins.optimize_joins": {"tf": 1}, "sqlglot.optimizer.optimize_joins.reorder_joins": {"tf": 1}, "sqlglot.optimizer.optimize_joins.normalize": {"tf": 1}, "sqlglot.optimizer.optimize_joins.other_table_names": {"tf": 1}, "sqlglot.optimizer.optimizer": {"tf": 1.4142135623730951}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1.4142135623730951}, "sqlglot.optimizer.pushdown_predicates": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_cnf": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_dnf": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.nodes_for_predicate": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.replace_aliases": {"tf": 1}, "sqlglot.optimizer.pushdown_projections": {"tf": 1}, "sqlglot.optimizer.pushdown_projections.DEFAULT_SELECTION": {"tf": 1}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 1}, "sqlglot.optimizer.qualify_columns": {"tf": 1}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1}, "sqlglot.optimizer.qualify_columns.validate_qualify_columns": {"tf": 1}, "sqlglot.optimizer.qualify_tables": {"tf": 1}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1}, "sqlglot.optimizer.scope": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.ROOT": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.SUBQUERY": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.DERIVED_TABLE": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.CTE": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.UNION": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.UDTF": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1}, "sqlglot.optimizer.scope.Scope.__init__": {"tf": 1}, "sqlglot.optimizer.scope.Scope.clear_cache": {"tf": 1}, "sqlglot.optimizer.scope.Scope.branch": {"tf": 1}, "sqlglot.optimizer.scope.Scope.walk": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.optimizer.scope.Scope.replace": {"tf": 1}, "sqlglot.optimizer.scope.Scope.tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.ctes": {"tf": 1}, "sqlglot.optimizer.scope.Scope.derived_tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.subqueries": {"tf": 1}, "sqlglot.optimizer.scope.Scope.columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1}, "sqlglot.optimizer.scope.Scope.cte_sources": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selects": {"tf": 1}, "sqlglot.optimizer.scope.Scope.external_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.unqualified_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.join_hints": {"tf": 1}, "sqlglot.optimizer.scope.Scope.source_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_subquery": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_derived_table": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_union": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_cte": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_root": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_udtf": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_correlated_subquery": {"tf": 1}, "sqlglot.optimizer.scope.Scope.rename_source": {"tf": 1}, "sqlglot.optimizer.scope.Scope.add_source": {"tf": 1}, "sqlglot.optimizer.scope.Scope.remove_source": {"tf": 1}, "sqlglot.optimizer.scope.Scope.traverse": {"tf": 1}, "sqlglot.optimizer.scope.Scope.ref_count": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}, "sqlglot.optimizer.scope.build_scope": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}, "sqlglot.optimizer.simplify": {"tf": 1}, "sqlglot.optimizer.simplify.simplify": {"tf": 1}, "sqlglot.optimizer.simplify.rewrite_between": {"tf": 1}, "sqlglot.optimizer.simplify.simplify_not": {"tf": 1}, "sqlglot.optimizer.simplify.flatten": {"tf": 1}, "sqlglot.optimizer.simplify.simplify_connectors": {"tf": 1}, "sqlglot.optimizer.simplify.remove_compliments": {"tf": 1}, "sqlglot.optimizer.simplify.uniq_sort": {"tf": 1}, "sqlglot.optimizer.simplify.absorb_and_eliminate": {"tf": 1}, "sqlglot.optimizer.simplify.simplify_literals": {"tf": 1}, "sqlglot.optimizer.simplify.simplify_parens": {"tf": 1}, "sqlglot.optimizer.simplify.remove_where_true": {"tf": 1}, "sqlglot.optimizer.simplify.always_true": {"tf": 1}, "sqlglot.optimizer.simplify.is_complement": {"tf": 1}, "sqlglot.optimizer.simplify.eval_boolean": {"tf": 1}, "sqlglot.optimizer.simplify.extract_date": {"tf": 1}, "sqlglot.optimizer.simplify.extract_interval": {"tf": 1}, "sqlglot.optimizer.simplify.date_literal": {"tf": 1}, "sqlglot.optimizer.simplify.boolean_literal": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.decorrelate": {"tf": 1}}, "df": 123}}}}}, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.tokens.TokenType.OPTIONS": {"tf": 1}}, "df": 1}}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.executor.python.PythonExecutor.set_operation": {"tf": 1}, "sqlglot.generator.Generator.set_operation": {"tf": 1}}, "df": 2}}}}, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.Expression.unnest_operands": {"tf": 1}}, "df": 1}}}}}, "n": {"docs": {"sqlglot.helper.open_file": {"tf": 1}}, "df": 1}}}, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.optimizer.optimize_joins.other_table_names": {"tf": 1}}, "df": 1, "w": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.Column.otherwise": {"tf": 1}}, "df": 1}}}}}}}}, "f": {"docs": {"sqlglot.tokens.TokenType.OUT_OF": {"tf": 1}}, "df": 1, "f": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.oracle.Oracle.Generator.offset_sql": {"tf": 1}, "sqlglot.expressions.Offset": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.generator.Generator.offset_sql": {"tf": 1}, "sqlglot.helper.apply_index_offset": {"tf": 1}, "sqlglot.tokens.TokenType.OFFSET": {"tf": 1}}, "df": 6}}}}}, "u": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.OUT_OF": {"tf": 1}}, "df": 1, "p": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.Expression.output_name": {"tf": 1}, "sqlglot.expressions.Column.output_name": {"tf": 1}, "sqlglot.expressions.Identifier.output_name": {"tf": 1}, "sqlglot.expressions.Literal.output_name": {"tf": 1}, "sqlglot.expressions.Subquery.output_name": {"tf": 1}, "sqlglot.expressions.Star.output_name": {"tf": 1}, "sqlglot.expressions.Alias.output_name": {"tf": 1}, "sqlglot.expressions.Cast.output_name": {"tf": 1}}, "df": 8}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.tokens.TokenType.OUTER": {"tf": 1}}, "df": 1}}}}, "b": {"docs": {}, "df": 0, "j": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.DataType.Type.OBJECT": {"tf": 1}, "sqlglot.helper.object_to_dict": {"tf": 1}, "sqlglot.tokens.TokenType.OBJECT": {"tf": 1}}, "df": 3}}}}}}, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.transpile": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.transpile": {"tf": 1}}, "df": 2}}}}, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator.transaction_sql": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator.transaction_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.transaction_sql": {"tf": 1}, "sqlglot.expressions.Transaction": {"tf": 1}, "sqlglot.generator.Generator.transaction_sql": {"tf": 1}}, "df": 5}}}}}}, "f": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot.expressions.Expression.transform": {"tf": 1}}, "df": 1, "s": {"docs": {"sqlglot.transforms": {"tf": 1}, "sqlglot.transforms.unalias_group": {"tf": 1}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1}, "sqlglot.transforms.remove_precision_parameterized_types": {"tf": 1}, "sqlglot.transforms.preprocess": {"tf": 1}, "sqlglot.transforms.delegate": {"tf": 1}}, "df": 6}}}}}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.scope.Scope.traverse": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}}, "df": 2}}}}}, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.tokens.TokenType.TRAILING": {"tf": 1}}, "df": 1}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "o": {"docs": {"sqlglot.dialects.dialect.Dialects.TRINO": {"tf": 1}, "sqlglot.dialects.trino": {"tf": 1}, "sqlglot.dialects.trino.Trino": {"tf": 1.4142135623730951}, "sqlglot.dialects.trino.Trino.__init__": {"tf": 1.4142135623730951}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.trino.Trino.Tokenizer": {"tf": 1.4142135623730951}}, "df": 6}}, "m": {"docs": {"sqlglot.dialects.dialect.trim_sql": {"tf": 1}, "sqlglot.expressions.Trim": {"tf": 1}, "sqlglot.generator.Generator.trim_sql": {"tf": 1}}, "df": 3}, "e": {"docs": {"sqlglot.trie": {"tf": 1}, "sqlglot.trie.new_trie": {"tf": 1.4142135623730951}, "sqlglot.trie.in_trie": {"tf": 1.4142135623730951}}, "df": 3}}, "y": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.dialect.no_trycast_sql": {"tf": 1}, "sqlglot.expressions.TryCast": {"tf": 1}, "sqlglot.generator.Generator.trycast_sql": {"tf": 1}}, "df": 3}}}}}, "u": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.true": {"tf": 1}, "sqlglot.optimizer.simplify.remove_where_true": {"tf": 1}, "sqlglot.optimizer.simplify.always_true": {"tf": 1}, "sqlglot.tokens.TokenType.TRUE": {"tf": 1}}, "df": 4}}}, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.SparkSession.table": {"tf": 1}, "sqlglot.dataframe.sql.Column.set_table_name": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameReader.table": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator.table_sql": {"tf": 1}, "sqlglot.executor.context.Context.table_iter": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.table": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.scan_table": {"tf": 1}, "sqlglot.executor.table": {"tf": 1}, "sqlglot.executor.table.Table": {"tf": 1.4142135623730951}, "sqlglot.executor.table.Table.__init__": {"tf": 1.4142135623730951}, "sqlglot.executor.table.Table.add_columns": {"tf": 1.4142135623730951}, "sqlglot.executor.table.Table.append": {"tf": 1.4142135623730951}, "sqlglot.executor.table.Table.pop": {"tf": 1.4142135623730951}, "sqlglot.executor.table.TableIter": {"tf": 1}, "sqlglot.executor.table.TableIter.__init__": {"tf": 1}, "sqlglot.executor.table.RangeReader": {"tf": 1}, "sqlglot.executor.table.RangeReader.__init__": {"tf": 1}, "sqlglot.executor.table.RowReader": {"tf": 1}, "sqlglot.executor.table.RowReader.__init__": {"tf": 1}, "sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.executor.table.ensure_tables": {"tf": 1}, "sqlglot.expressions.Table": {"tf": 1}, "sqlglot.expressions.to_table": {"tf": 1}, "sqlglot.expressions.table_": {"tf": 1}, "sqlglot.expressions.rename_table": {"tf": 1}, "sqlglot.expressions.column_table_names": {"tf": 1}, "sqlglot.expressions.table_name": {"tf": 1}, "sqlglot.generator.Generator.table_sql": {"tf": 1}, "sqlglot.optimizer.expand_multi_table_selects": {"tf": 1}, "sqlglot.optimizer.expand_multi_table_selects.expand_multi_table_selects": {"tf": 1.4142135623730951}, "sqlglot.optimizer.isolate_table_selects": {"tf": 1}, "sqlglot.optimizer.isolate_table_selects.isolate_table_selects": {"tf": 1.4142135623730951}, "sqlglot.optimizer.optimize_joins.other_table_names": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.DERIVED_TABLE": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_derived_table": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1}, "sqlglot.schema.Schema.supported_table_args": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.table_parts": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}, "sqlglot.tokens.TokenType.TABLE": {"tf": 1}, "sqlglot.tokens.TokenType.TABLE_SAMPLE": {"tf": 1}}, "df": 41, "a": {"docs": {}, "df": 0, "u": {"docs": {"sqlglot.dialects.dialect.Dialects.TABLEAU": {"tf": 1}, "sqlglot.dialects.tableau": {"tf": 1}, "sqlglot.dialects.tableau.Tableau": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.__init__": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1.4142135623730951}}, "df": 6}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.TableAlias": {"tf": 1}, "sqlglot.generator.Generator.tablealias_sql": {"tf": 1}}, "df": 2}}}}}, "s": {"docs": {"sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.executor.table.ensure_tables": {"tf": 1}, "sqlglot.expressions.replace_tables": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_derived_tables": {"tf": 1}, "sqlglot.optimizer.qualify_tables": {"tf": 1}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.derived_tables": {"tf": 1}}, "df": 8, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.no_tablesample_sql": {"tf": 1}, "sqlglot.expressions.TableSample": {"tf": 1}, "sqlglot.generator.Generator.tablesample_sql": {"tf": 1}}, "df": 3}}}}}}, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.executor.table.TableIter": {"tf": 1}, "sqlglot.executor.table.TableIter.__init__": {"tf": 1}}, "df": 2}}}}, "f": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.TableFormatProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}, "g": {"docs": {"sqlglot.expressions.Tag": {"tf": 1}, "sqlglot.generator.Generator.tag_sql": {"tf": 1}}, "df": 2}}, "o": {"docs": {"sqlglot.dialects.dialect.locate_to_strposition": {"tf": 1}, "sqlglot.dialects.dialect.strposition_to_locate_sql": {"tf": 1}, "sqlglot.expressions.to_identifier": {"tf": 1}, "sqlglot.expressions.to_interval": {"tf": 1}, "sqlglot.expressions.to_table": {"tf": 1}, "sqlglot.expressions.to_column": {"tf": 1}, "sqlglot.helper.camel_to_snake_case": {"tf": 1}, "sqlglot.helper.object_to_dict": {"tf": 1}, "sqlglot.lineage.Node.to_html": {"tf": 1}, "sqlglot.optimizer.canonicalize.add_text_to_concat": {"tf": 1}, "sqlglot.planner.Step.to_s": {"tf": 1}, "sqlglot.tokens.TokenType.SIMILAR_TO": {"tf": 1}}, "df": 12, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.generator.Generator.token_sql": {"tf": 1}, "sqlglot.tokens.Token": {"tf": 1}, "sqlglot.tokens.Token.__init__": {"tf": 1}, "sqlglot.tokens.Token.number": {"tf": 1}, "sqlglot.tokens.Token.string": {"tf": 1}, "sqlglot.tokens.Token.identifier": {"tf": 1}, "sqlglot.tokens.Token.var": {"tf": 1}}, "df": 7, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.Tokenizer.tokenize": {"tf": 1}}, "df": 1, "r": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Tokenizer": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Tokenizer": {"tf": 1}, "sqlglot.dialects.drill.Drill.Tokenizer": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Tokenizer": {"tf": 1}, "sqlglot.dialects.hive.Hive.Tokenizer": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Tokenizer": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Tokenizer": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Tokenizer": {"tf": 1}, "sqlglot.dialects.presto.Presto.Tokenizer": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Tokenizer": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Tokenizer": {"tf": 1}, "sqlglot.dialects.spark.Spark.Tokenizer": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Tokenizer": {"tf": 1}, "sqlglot.dialects.trino.Trino.Tokenizer": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Tokenizer": {"tf": 1}, "sqlglot.executor.python.Python.Tokenizer": {"tf": 1}, "sqlglot.tokens.Tokenizer": {"tf": 1}, "sqlglot.tokens.Tokenizer.__init__": {"tf": 1}, "sqlglot.tokens.Tokenizer.reset": {"tf": 1}, "sqlglot.tokens.Tokenizer.tokenize": {"tf": 1}}, "df": 20}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.errors.TokenError": {"tf": 1}}, "df": 1}}}}}, "s": {"docs": {"sqlglot.tokens": {"tf": 1}, "sqlglot.tokens.TokenType": {"tf": 1}, "sqlglot.tokens.TokenType.L_PAREN": {"tf": 1}, "sqlglot.tokens.TokenType.R_PAREN": {"tf": 1}, "sqlglot.tokens.TokenType.L_BRACKET": {"tf": 1}, "sqlglot.tokens.TokenType.R_BRACKET": {"tf": 1}, "sqlglot.tokens.TokenType.L_BRACE": {"tf": 1}, "sqlglot.tokens.TokenType.R_BRACE": {"tf": 1}, "sqlglot.tokens.TokenType.COMMA": {"tf": 1}, "sqlglot.tokens.TokenType.DOT": {"tf": 1}, "sqlglot.tokens.TokenType.DASH": {"tf": 1}, "sqlglot.tokens.TokenType.PLUS": {"tf": 1}, "sqlglot.tokens.TokenType.COLON": {"tf": 1}, "sqlglot.tokens.TokenType.DCOLON": {"tf": 1}, "sqlglot.tokens.TokenType.SEMICOLON": {"tf": 1}, "sqlglot.tokens.TokenType.STAR": {"tf": 1}, "sqlglot.tokens.TokenType.BACKSLASH": {"tf": 1}, "sqlglot.tokens.TokenType.SLASH": {"tf": 1}, "sqlglot.tokens.TokenType.LT": {"tf": 1}, "sqlglot.tokens.TokenType.LTE": {"tf": 1}, "sqlglot.tokens.TokenType.GT": {"tf": 1}, "sqlglot.tokens.TokenType.GTE": {"tf": 1}, "sqlglot.tokens.TokenType.NOT": {"tf": 1}, "sqlglot.tokens.TokenType.EQ": {"tf": 1}, "sqlglot.tokens.TokenType.NEQ": {"tf": 1}, "sqlglot.tokens.TokenType.NULLSAFE_EQ": {"tf": 1}, "sqlglot.tokens.TokenType.AND": {"tf": 1}, "sqlglot.tokens.TokenType.OR": {"tf": 1}, "sqlglot.tokens.TokenType.AMP": {"tf": 1}, "sqlglot.tokens.TokenType.DPIPE": {"tf": 1}, "sqlglot.tokens.TokenType.PIPE": {"tf": 1}, "sqlglot.tokens.TokenType.CARET": {"tf": 1}, "sqlglot.tokens.TokenType.TILDA": {"tf": 1}, "sqlglot.tokens.TokenType.ARROW": {"tf": 1}, "sqlglot.tokens.TokenType.DARROW": {"tf": 1}, "sqlglot.tokens.TokenType.FARROW": {"tf": 1}, "sqlglot.tokens.TokenType.HASH": {"tf": 1}, "sqlglot.tokens.TokenType.HASH_ARROW": {"tf": 1}, "sqlglot.tokens.TokenType.DHASH_ARROW": {"tf": 1}, "sqlglot.tokens.TokenType.LR_ARROW": {"tf": 1}, "sqlglot.tokens.TokenType.DOLLAR": {"tf": 1}, "sqlglot.tokens.TokenType.PARAMETER": {"tf": 1}, "sqlglot.tokens.TokenType.SESSION_PARAMETER": {"tf": 1}, "sqlglot.tokens.TokenType.NATIONAL": {"tf": 1}, "sqlglot.tokens.TokenType.BLOCK_START": {"tf": 1}, "sqlglot.tokens.TokenType.BLOCK_END": {"tf": 1}, "sqlglot.tokens.TokenType.SPACE": {"tf": 1}, "sqlglot.tokens.TokenType.BREAK": {"tf": 1}, "sqlglot.tokens.TokenType.STRING": {"tf": 1}, "sqlglot.tokens.TokenType.NUMBER": {"tf": 1}, "sqlglot.tokens.TokenType.IDENTIFIER": {"tf": 1}, "sqlglot.tokens.TokenType.COLUMN": {"tf": 1}, "sqlglot.tokens.TokenType.COLUMN_DEF": {"tf": 1}, "sqlglot.tokens.TokenType.SCHEMA": {"tf": 1}, "sqlglot.tokens.TokenType.TABLE": {"tf": 1}, "sqlglot.tokens.TokenType.VAR": {"tf": 1}, "sqlglot.tokens.TokenType.BIT_STRING": {"tf": 1}, "sqlglot.tokens.TokenType.HEX_STRING": {"tf": 1}, "sqlglot.tokens.TokenType.BYTE_STRING": {"tf": 1}, "sqlglot.tokens.TokenType.BOOLEAN": {"tf": 1}, "sqlglot.tokens.TokenType.TINYINT": {"tf": 1}, "sqlglot.tokens.TokenType.SMALLINT": {"tf": 1}, "sqlglot.tokens.TokenType.INT": {"tf": 1}, "sqlglot.tokens.TokenType.BIGINT": {"tf": 1}, "sqlglot.tokens.TokenType.FLOAT": {"tf": 1}, "sqlglot.tokens.TokenType.DOUBLE": {"tf": 1}, "sqlglot.tokens.TokenType.DECIMAL": {"tf": 1}, "sqlglot.tokens.TokenType.CHAR": {"tf": 1}, "sqlglot.tokens.TokenType.NCHAR": {"tf": 1}, "sqlglot.tokens.TokenType.VARCHAR": {"tf": 1}, "sqlglot.tokens.TokenType.NVARCHAR": {"tf": 1}, "sqlglot.tokens.TokenType.TEXT": {"tf": 1}, "sqlglot.tokens.TokenType.MEDIUMTEXT": {"tf": 1}, "sqlglot.tokens.TokenType.LONGTEXT": {"tf": 1}, "sqlglot.tokens.TokenType.MEDIUMBLOB": {"tf": 1}, "sqlglot.tokens.TokenType.LONGBLOB": {"tf": 1}, "sqlglot.tokens.TokenType.BINARY": {"tf": 1}, "sqlglot.tokens.TokenType.VARBINARY": {"tf": 1}, "sqlglot.tokens.TokenType.JSON": {"tf": 1}, "sqlglot.tokens.TokenType.JSONB": {"tf": 1}, "sqlglot.tokens.TokenType.TIME": {"tf": 1}, "sqlglot.tokens.TokenType.TIMESTAMP": {"tf": 1}, "sqlglot.tokens.TokenType.TIMESTAMPTZ": {"tf": 1}, "sqlglot.tokens.TokenType.TIMESTAMPLTZ": {"tf": 1}, "sqlglot.tokens.TokenType.DATETIME": {"tf": 1}, "sqlglot.tokens.TokenType.DATE": {"tf": 1}, "sqlglot.tokens.TokenType.UUID": {"tf": 1}, "sqlglot.tokens.TokenType.GEOGRAPHY": {"tf": 1}, "sqlglot.tokens.TokenType.NULLABLE": {"tf": 1}, "sqlglot.tokens.TokenType.GEOMETRY": {"tf": 1}, "sqlglot.tokens.TokenType.HLLSKETCH": {"tf": 1}, "sqlglot.tokens.TokenType.HSTORE": {"tf": 1}, "sqlglot.tokens.TokenType.SUPER": {"tf": 1}, "sqlglot.tokens.TokenType.SERIAL": {"tf": 1}, "sqlglot.tokens.TokenType.SMALLSERIAL": {"tf": 1}, "sqlglot.tokens.TokenType.BIGSERIAL": {"tf": 1}, "sqlglot.tokens.TokenType.XML": {"tf": 1}, "sqlglot.tokens.TokenType.UNIQUEIDENTIFIER": {"tf": 1}, "sqlglot.tokens.TokenType.MONEY": {"tf": 1}, "sqlglot.tokens.TokenType.SMALLMONEY": {"tf": 1}, "sqlglot.tokens.TokenType.ROWVERSION": {"tf": 1}, "sqlglot.tokens.TokenType.IMAGE": {"tf": 1}, "sqlglot.tokens.TokenType.VARIANT": {"tf": 1}, "sqlglot.tokens.TokenType.OBJECT": {"tf": 1}, "sqlglot.tokens.TokenType.ALIAS": {"tf": 1}, "sqlglot.tokens.TokenType.ALTER": {"tf": 1}, "sqlglot.tokens.TokenType.ALWAYS": {"tf": 1}, "sqlglot.tokens.TokenType.ALL": {"tf": 1}, "sqlglot.tokens.TokenType.ANTI": {"tf": 1}, "sqlglot.tokens.TokenType.ANY": {"tf": 1}, "sqlglot.tokens.TokenType.APPLY": {"tf": 1}, "sqlglot.tokens.TokenType.ARRAY": {"tf": 1}, "sqlglot.tokens.TokenType.ASC": {"tf": 1}, "sqlglot.tokens.TokenType.ASOF": {"tf": 1}, "sqlglot.tokens.TokenType.AT_TIME_ZONE": {"tf": 1}, "sqlglot.tokens.TokenType.AUTO_INCREMENT": {"tf": 1}, "sqlglot.tokens.TokenType.BEGIN": {"tf": 1}, "sqlglot.tokens.TokenType.BETWEEN": {"tf": 1}, "sqlglot.tokens.TokenType.BOTH": {"tf": 1}, "sqlglot.tokens.TokenType.BUCKET": {"tf": 1}, "sqlglot.tokens.TokenType.BY_DEFAULT": {"tf": 1}, "sqlglot.tokens.TokenType.CACHE": {"tf": 1}, "sqlglot.tokens.TokenType.CASCADE": {"tf": 1}, "sqlglot.tokens.TokenType.CASE": {"tf": 1}, "sqlglot.tokens.TokenType.CHARACTER_SET": {"tf": 1}, "sqlglot.tokens.TokenType.CHECK": {"tf": 1}, "sqlglot.tokens.TokenType.CLUSTER_BY": {"tf": 1}, "sqlglot.tokens.TokenType.COLLATE": {"tf": 1}, "sqlglot.tokens.TokenType.COMMAND": {"tf": 1}, "sqlglot.tokens.TokenType.COMMENT": {"tf": 1}, "sqlglot.tokens.TokenType.COMMIT": {"tf": 1}, "sqlglot.tokens.TokenType.COMPOUND": {"tf": 1}, "sqlglot.tokens.TokenType.CONSTRAINT": {"tf": 1}, "sqlglot.tokens.TokenType.CREATE": {"tf": 1}, "sqlglot.tokens.TokenType.CROSS": {"tf": 1}, "sqlglot.tokens.TokenType.CUBE": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_DATE": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_DATETIME": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_ROW": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_TIME": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_TIMESTAMP": {"tf": 1}, "sqlglot.tokens.TokenType.DEFAULT": {"tf": 1}, "sqlglot.tokens.TokenType.DELETE": {"tf": 1}, "sqlglot.tokens.TokenType.DESC": {"tf": 1}, "sqlglot.tokens.TokenType.DESCRIBE": {"tf": 1}, "sqlglot.tokens.TokenType.DISTINCT": {"tf": 1}, "sqlglot.tokens.TokenType.DISTINCT_FROM": {"tf": 1}, "sqlglot.tokens.TokenType.DISTRIBUTE_BY": {"tf": 1}, "sqlglot.tokens.TokenType.DIV": {"tf": 1}, "sqlglot.tokens.TokenType.DROP": {"tf": 1}, "sqlglot.tokens.TokenType.ELSE": {"tf": 1}, "sqlglot.tokens.TokenType.ENCODE": {"tf": 1}, "sqlglot.tokens.TokenType.END": {"tf": 1}, "sqlglot.tokens.TokenType.ESCAPE": {"tf": 1}, "sqlglot.tokens.TokenType.EXCEPT": {"tf": 1}, "sqlglot.tokens.TokenType.EXECUTE": {"tf": 1}, "sqlglot.tokens.TokenType.EXISTS": {"tf": 1}, "sqlglot.tokens.TokenType.FALSE": {"tf": 1}, "sqlglot.tokens.TokenType.FETCH": {"tf": 1}, "sqlglot.tokens.TokenType.FILTER": {"tf": 1}, "sqlglot.tokens.TokenType.FINAL": {"tf": 1}, "sqlglot.tokens.TokenType.FIRST": {"tf": 1}, "sqlglot.tokens.TokenType.FOLLOWING": {"tf": 1}, "sqlglot.tokens.TokenType.FOR": {"tf": 1}, "sqlglot.tokens.TokenType.FOREIGN_KEY": {"tf": 1}, "sqlglot.tokens.TokenType.FORMAT": {"tf": 1}, "sqlglot.tokens.TokenType.FROM": {"tf": 1}, "sqlglot.tokens.TokenType.FULL": {"tf": 1}, "sqlglot.tokens.TokenType.FUNCTION": {"tf": 1}, "sqlglot.tokens.TokenType.GENERATED": {"tf": 1}, "sqlglot.tokens.TokenType.GLOB": {"tf": 1}, "sqlglot.tokens.TokenType.GLOBAL": {"tf": 1}, "sqlglot.tokens.TokenType.GROUP_BY": {"tf": 1}, "sqlglot.tokens.TokenType.GROUPING_SETS": {"tf": 1}, "sqlglot.tokens.TokenType.HAVING": {"tf": 1}, "sqlglot.tokens.TokenType.HINT": {"tf": 1}, "sqlglot.tokens.TokenType.IDENTITY": {"tf": 1}, "sqlglot.tokens.TokenType.IF": {"tf": 1}, "sqlglot.tokens.TokenType.IGNORE_NULLS": {"tf": 1}, "sqlglot.tokens.TokenType.ILIKE": {"tf": 1}, "sqlglot.tokens.TokenType.IN": {"tf": 1}, "sqlglot.tokens.TokenType.INDEX": {"tf": 1}, "sqlglot.tokens.TokenType.INNER": {"tf": 1}, "sqlglot.tokens.TokenType.INSERT": {"tf": 1}, "sqlglot.tokens.TokenType.INTERSECT": {"tf": 1}, "sqlglot.tokens.TokenType.INTERVAL": {"tf": 1}, "sqlglot.tokens.TokenType.INTO": {"tf": 1}, "sqlglot.tokens.TokenType.INTRODUCER": {"tf": 1}, "sqlglot.tokens.TokenType.IRLIKE": {"tf": 1}, "sqlglot.tokens.TokenType.IS": {"tf": 1}, "sqlglot.tokens.TokenType.ISNULL": {"tf": 1}, "sqlglot.tokens.TokenType.JOIN": {"tf": 1}, "sqlglot.tokens.TokenType.LANGUAGE": {"tf": 1}, "sqlglot.tokens.TokenType.LATERAL": {"tf": 1}, "sqlglot.tokens.TokenType.LAZY": {"tf": 1}, "sqlglot.tokens.TokenType.LEADING": {"tf": 1}, "sqlglot.tokens.TokenType.LEFT": {"tf": 1}, "sqlglot.tokens.TokenType.LIKE": {"tf": 1}, "sqlglot.tokens.TokenType.LIMIT": {"tf": 1}, "sqlglot.tokens.TokenType.LOAD_DATA": {"tf": 1}, "sqlglot.tokens.TokenType.LOCAL": {"tf": 1}, "sqlglot.tokens.TokenType.MAP": {"tf": 1}, "sqlglot.tokens.TokenType.MATCH_RECOGNIZE": {"tf": 1}, "sqlglot.tokens.TokenType.MATERIALIZED": {"tf": 1}, "sqlglot.tokens.TokenType.MERGE": {"tf": 1}, "sqlglot.tokens.TokenType.MOD": {"tf": 1}, "sqlglot.tokens.TokenType.NATURAL": {"tf": 1}, "sqlglot.tokens.TokenType.NEXT": {"tf": 1}, "sqlglot.tokens.TokenType.NO_ACTION": {"tf": 1}, "sqlglot.tokens.TokenType.NOTNULL": {"tf": 1}, "sqlglot.tokens.TokenType.NULL": {"tf": 1}, "sqlglot.tokens.TokenType.NULLS_FIRST": {"tf": 1}, "sqlglot.tokens.TokenType.NULLS_LAST": {"tf": 1}, "sqlglot.tokens.TokenType.OFFSET": {"tf": 1}, "sqlglot.tokens.TokenType.ON": {"tf": 1}, "sqlglot.tokens.TokenType.ONLY": {"tf": 1}, "sqlglot.tokens.TokenType.OPTIONS": {"tf": 1}, "sqlglot.tokens.TokenType.ORDER_BY": {"tf": 1}, "sqlglot.tokens.TokenType.ORDERED": {"tf": 1}, "sqlglot.tokens.TokenType.ORDINALITY": {"tf": 1}, "sqlglot.tokens.TokenType.OUTER": {"tf": 1}, "sqlglot.tokens.TokenType.OUT_OF": {"tf": 1}, "sqlglot.tokens.TokenType.OVER": {"tf": 1}, "sqlglot.tokens.TokenType.OVERWRITE": {"tf": 1}, "sqlglot.tokens.TokenType.PARTITION": {"tf": 1}, "sqlglot.tokens.TokenType.PARTITION_BY": {"tf": 1}, "sqlglot.tokens.TokenType.PERCENT": {"tf": 1}, "sqlglot.tokens.TokenType.PIVOT": {"tf": 1}, "sqlglot.tokens.TokenType.PLACEHOLDER": {"tf": 1}, "sqlglot.tokens.TokenType.PRECEDING": {"tf": 1}, "sqlglot.tokens.TokenType.PRIMARY_KEY": {"tf": 1}, "sqlglot.tokens.TokenType.PROCEDURE": {"tf": 1}, "sqlglot.tokens.TokenType.PROPERTIES": {"tf": 1}, "sqlglot.tokens.TokenType.PSEUDO_TYPE": {"tf": 1}, "sqlglot.tokens.TokenType.QUALIFY": {"tf": 1}, "sqlglot.tokens.TokenType.QUOTE": {"tf": 1}, "sqlglot.tokens.TokenType.RANGE": {"tf": 1}, "sqlglot.tokens.TokenType.RECURSIVE": {"tf": 1}, "sqlglot.tokens.TokenType.REPLACE": {"tf": 1}, "sqlglot.tokens.TokenType.RESPECT_NULLS": {"tf": 1}, "sqlglot.tokens.TokenType.REFERENCES": {"tf": 1}, "sqlglot.tokens.TokenType.RIGHT": {"tf": 1}, "sqlglot.tokens.TokenType.RLIKE": {"tf": 1}, "sqlglot.tokens.TokenType.ROLLBACK": {"tf": 1}, "sqlglot.tokens.TokenType.ROLLUP": {"tf": 1}, "sqlglot.tokens.TokenType.ROW": {"tf": 1}, "sqlglot.tokens.TokenType.ROWS": {"tf": 1}, "sqlglot.tokens.TokenType.SCHEMA_COMMENT": {"tf": 1}, "sqlglot.tokens.TokenType.SEED": {"tf": 1}, "sqlglot.tokens.TokenType.SELECT": {"tf": 1}, "sqlglot.tokens.TokenType.SEMI": {"tf": 1}, "sqlglot.tokens.TokenType.SEPARATOR": {"tf": 1}, "sqlglot.tokens.TokenType.SERDE_PROPERTIES": {"tf": 1}, "sqlglot.tokens.TokenType.SET": {"tf": 1}, "sqlglot.tokens.TokenType.SHOW": {"tf": 1}, "sqlglot.tokens.TokenType.SIMILAR_TO": {"tf": 1}, "sqlglot.tokens.TokenType.SOME": {"tf": 1}, "sqlglot.tokens.TokenType.SORTKEY": {"tf": 1}, "sqlglot.tokens.TokenType.SORT_BY": {"tf": 1}, "sqlglot.tokens.TokenType.STRUCT": {"tf": 1}, "sqlglot.tokens.TokenType.TABLE_SAMPLE": {"tf": 1}, "sqlglot.tokens.TokenType.TEMPORARY": {"tf": 1}, "sqlglot.tokens.TokenType.TOP": {"tf": 1}, "sqlglot.tokens.TokenType.THEN": {"tf": 1}, "sqlglot.tokens.TokenType.TRAILING": {"tf": 1}, "sqlglot.tokens.TokenType.TRUE": {"tf": 1}, "sqlglot.tokens.TokenType.UNBOUNDED": {"tf": 1}, "sqlglot.tokens.TokenType.UNCACHE": {"tf": 1}, "sqlglot.tokens.TokenType.UNION": {"tf": 1}, "sqlglot.tokens.TokenType.UNLOGGED": {"tf": 1}, "sqlglot.tokens.TokenType.UNNEST": {"tf": 1}, "sqlglot.tokens.TokenType.UNPIVOT": {"tf": 1}, "sqlglot.tokens.TokenType.UPDATE": {"tf": 1}, "sqlglot.tokens.TokenType.USE": {"tf": 1}, "sqlglot.tokens.TokenType.USING": {"tf": 1}, "sqlglot.tokens.TokenType.VALUES": {"tf": 1}, "sqlglot.tokens.TokenType.VIEW": {"tf": 1}, "sqlglot.tokens.TokenType.VOLATILE": {"tf": 1}, "sqlglot.tokens.TokenType.WHEN": {"tf": 1}, "sqlglot.tokens.TokenType.WHERE": {"tf": 1}, "sqlglot.tokens.TokenType.WINDOW": {"tf": 1}, "sqlglot.tokens.TokenType.WITH": {"tf": 1}, "sqlglot.tokens.TokenType.WITH_TIME_ZONE": {"tf": 1}, "sqlglot.tokens.TokenType.WITH_LOCAL_TIME_ZONE": {"tf": 1}, "sqlglot.tokens.TokenType.WITHIN_GROUP": {"tf": 1}, "sqlglot.tokens.TokenType.WITHOUT_TIME_ZONE": {"tf": 1}, "sqlglot.tokens.TokenType.UNIQUE": {"tf": 1}, "sqlglot.tokens.Token": {"tf": 1}, "sqlglot.tokens.Token.__init__": {"tf": 1}, "sqlglot.tokens.Token.number": {"tf": 1}, "sqlglot.tokens.Token.string": {"tf": 1}, "sqlglot.tokens.Token.identifier": {"tf": 1}, "sqlglot.tokens.Token.var": {"tf": 1}, "sqlglot.tokens.Tokenizer": {"tf": 1}, "sqlglot.tokens.Tokenizer.__init__": {"tf": 1}, "sqlglot.tokens.Tokenizer.reset": {"tf": 1}, "sqlglot.tokens.Tokenizer.tokenize": {"tf": 1}}, "df": 297}, "t": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType": {"tf": 1}, "sqlglot.tokens.TokenType.L_PAREN": {"tf": 1}, "sqlglot.tokens.TokenType.R_PAREN": {"tf": 1}, "sqlglot.tokens.TokenType.L_BRACKET": {"tf": 1}, "sqlglot.tokens.TokenType.R_BRACKET": {"tf": 1}, "sqlglot.tokens.TokenType.L_BRACE": {"tf": 1}, "sqlglot.tokens.TokenType.R_BRACE": {"tf": 1}, "sqlglot.tokens.TokenType.COMMA": {"tf": 1}, "sqlglot.tokens.TokenType.DOT": {"tf": 1}, "sqlglot.tokens.TokenType.DASH": {"tf": 1}, "sqlglot.tokens.TokenType.PLUS": {"tf": 1}, "sqlglot.tokens.TokenType.COLON": {"tf": 1}, "sqlglot.tokens.TokenType.DCOLON": {"tf": 1}, "sqlglot.tokens.TokenType.SEMICOLON": {"tf": 1}, "sqlglot.tokens.TokenType.STAR": {"tf": 1}, "sqlglot.tokens.TokenType.BACKSLASH": {"tf": 1}, "sqlglot.tokens.TokenType.SLASH": {"tf": 1}, "sqlglot.tokens.TokenType.LT": {"tf": 1}, "sqlglot.tokens.TokenType.LTE": {"tf": 1}, "sqlglot.tokens.TokenType.GT": {"tf": 1}, "sqlglot.tokens.TokenType.GTE": {"tf": 1}, "sqlglot.tokens.TokenType.NOT": {"tf": 1}, "sqlglot.tokens.TokenType.EQ": {"tf": 1}, "sqlglot.tokens.TokenType.NEQ": {"tf": 1}, "sqlglot.tokens.TokenType.NULLSAFE_EQ": {"tf": 1}, "sqlglot.tokens.TokenType.AND": {"tf": 1}, "sqlglot.tokens.TokenType.OR": {"tf": 1}, "sqlglot.tokens.TokenType.AMP": {"tf": 1}, "sqlglot.tokens.TokenType.DPIPE": {"tf": 1}, "sqlglot.tokens.TokenType.PIPE": {"tf": 1}, "sqlglot.tokens.TokenType.CARET": {"tf": 1}, "sqlglot.tokens.TokenType.TILDA": {"tf": 1}, "sqlglot.tokens.TokenType.ARROW": {"tf": 1}, "sqlglot.tokens.TokenType.DARROW": {"tf": 1}, "sqlglot.tokens.TokenType.FARROW": {"tf": 1}, "sqlglot.tokens.TokenType.HASH": {"tf": 1}, "sqlglot.tokens.TokenType.HASH_ARROW": {"tf": 1}, "sqlglot.tokens.TokenType.DHASH_ARROW": {"tf": 1}, "sqlglot.tokens.TokenType.LR_ARROW": {"tf": 1}, "sqlglot.tokens.TokenType.DOLLAR": {"tf": 1}, "sqlglot.tokens.TokenType.PARAMETER": {"tf": 1}, "sqlglot.tokens.TokenType.SESSION_PARAMETER": {"tf": 1}, "sqlglot.tokens.TokenType.NATIONAL": {"tf": 1}, "sqlglot.tokens.TokenType.BLOCK_START": {"tf": 1}, "sqlglot.tokens.TokenType.BLOCK_END": {"tf": 1}, "sqlglot.tokens.TokenType.SPACE": {"tf": 1}, "sqlglot.tokens.TokenType.BREAK": {"tf": 1}, "sqlglot.tokens.TokenType.STRING": {"tf": 1}, "sqlglot.tokens.TokenType.NUMBER": {"tf": 1}, "sqlglot.tokens.TokenType.IDENTIFIER": {"tf": 1}, "sqlglot.tokens.TokenType.COLUMN": {"tf": 1}, "sqlglot.tokens.TokenType.COLUMN_DEF": {"tf": 1}, "sqlglot.tokens.TokenType.SCHEMA": {"tf": 1}, "sqlglot.tokens.TokenType.TABLE": {"tf": 1}, "sqlglot.tokens.TokenType.VAR": {"tf": 1}, "sqlglot.tokens.TokenType.BIT_STRING": {"tf": 1}, "sqlglot.tokens.TokenType.HEX_STRING": {"tf": 1}, "sqlglot.tokens.TokenType.BYTE_STRING": {"tf": 1}, "sqlglot.tokens.TokenType.BOOLEAN": {"tf": 1}, "sqlglot.tokens.TokenType.TINYINT": {"tf": 1}, "sqlglot.tokens.TokenType.SMALLINT": {"tf": 1}, "sqlglot.tokens.TokenType.INT": {"tf": 1}, "sqlglot.tokens.TokenType.BIGINT": {"tf": 1}, "sqlglot.tokens.TokenType.FLOAT": {"tf": 1}, "sqlglot.tokens.TokenType.DOUBLE": {"tf": 1}, "sqlglot.tokens.TokenType.DECIMAL": {"tf": 1}, "sqlglot.tokens.TokenType.CHAR": {"tf": 1}, "sqlglot.tokens.TokenType.NCHAR": {"tf": 1}, "sqlglot.tokens.TokenType.VARCHAR": {"tf": 1}, "sqlglot.tokens.TokenType.NVARCHAR": {"tf": 1}, "sqlglot.tokens.TokenType.TEXT": {"tf": 1}, "sqlglot.tokens.TokenType.MEDIUMTEXT": {"tf": 1}, "sqlglot.tokens.TokenType.LONGTEXT": {"tf": 1}, "sqlglot.tokens.TokenType.MEDIUMBLOB": {"tf": 1}, "sqlglot.tokens.TokenType.LONGBLOB": {"tf": 1}, "sqlglot.tokens.TokenType.BINARY": {"tf": 1}, "sqlglot.tokens.TokenType.VARBINARY": {"tf": 1}, "sqlglot.tokens.TokenType.JSON": {"tf": 1}, "sqlglot.tokens.TokenType.JSONB": {"tf": 1}, "sqlglot.tokens.TokenType.TIME": {"tf": 1}, "sqlglot.tokens.TokenType.TIMESTAMP": {"tf": 1}, "sqlglot.tokens.TokenType.TIMESTAMPTZ": {"tf": 1}, "sqlglot.tokens.TokenType.TIMESTAMPLTZ": {"tf": 1}, "sqlglot.tokens.TokenType.DATETIME": {"tf": 1}, "sqlglot.tokens.TokenType.DATE": {"tf": 1}, "sqlglot.tokens.TokenType.UUID": {"tf": 1}, "sqlglot.tokens.TokenType.GEOGRAPHY": {"tf": 1}, "sqlglot.tokens.TokenType.NULLABLE": {"tf": 1}, "sqlglot.tokens.TokenType.GEOMETRY": {"tf": 1}, "sqlglot.tokens.TokenType.HLLSKETCH": {"tf": 1}, "sqlglot.tokens.TokenType.HSTORE": {"tf": 1}, "sqlglot.tokens.TokenType.SUPER": {"tf": 1}, "sqlglot.tokens.TokenType.SERIAL": {"tf": 1}, "sqlglot.tokens.TokenType.SMALLSERIAL": {"tf": 1}, "sqlglot.tokens.TokenType.BIGSERIAL": {"tf": 1}, "sqlglot.tokens.TokenType.XML": {"tf": 1}, "sqlglot.tokens.TokenType.UNIQUEIDENTIFIER": {"tf": 1}, "sqlglot.tokens.TokenType.MONEY": {"tf": 1}, "sqlglot.tokens.TokenType.SMALLMONEY": {"tf": 1}, "sqlglot.tokens.TokenType.ROWVERSION": {"tf": 1}, "sqlglot.tokens.TokenType.IMAGE": {"tf": 1}, "sqlglot.tokens.TokenType.VARIANT": {"tf": 1}, "sqlglot.tokens.TokenType.OBJECT": {"tf": 1}, "sqlglot.tokens.TokenType.ALIAS": {"tf": 1}, "sqlglot.tokens.TokenType.ALTER": {"tf": 1}, "sqlglot.tokens.TokenType.ALWAYS": {"tf": 1}, "sqlglot.tokens.TokenType.ALL": {"tf": 1}, "sqlglot.tokens.TokenType.ANTI": {"tf": 1}, "sqlglot.tokens.TokenType.ANY": {"tf": 1}, "sqlglot.tokens.TokenType.APPLY": {"tf": 1}, "sqlglot.tokens.TokenType.ARRAY": {"tf": 1}, "sqlglot.tokens.TokenType.ASC": {"tf": 1}, "sqlglot.tokens.TokenType.ASOF": {"tf": 1}, "sqlglot.tokens.TokenType.AT_TIME_ZONE": {"tf": 1}, "sqlglot.tokens.TokenType.AUTO_INCREMENT": {"tf": 1}, "sqlglot.tokens.TokenType.BEGIN": {"tf": 1}, "sqlglot.tokens.TokenType.BETWEEN": {"tf": 1}, "sqlglot.tokens.TokenType.BOTH": {"tf": 1}, "sqlglot.tokens.TokenType.BUCKET": {"tf": 1}, "sqlglot.tokens.TokenType.BY_DEFAULT": {"tf": 1}, "sqlglot.tokens.TokenType.CACHE": {"tf": 1}, "sqlglot.tokens.TokenType.CASCADE": {"tf": 1}, "sqlglot.tokens.TokenType.CASE": {"tf": 1}, "sqlglot.tokens.TokenType.CHARACTER_SET": {"tf": 1}, "sqlglot.tokens.TokenType.CHECK": {"tf": 1}, "sqlglot.tokens.TokenType.CLUSTER_BY": {"tf": 1}, "sqlglot.tokens.TokenType.COLLATE": {"tf": 1}, "sqlglot.tokens.TokenType.COMMAND": {"tf": 1}, "sqlglot.tokens.TokenType.COMMENT": {"tf": 1}, "sqlglot.tokens.TokenType.COMMIT": {"tf": 1}, "sqlglot.tokens.TokenType.COMPOUND": {"tf": 1}, "sqlglot.tokens.TokenType.CONSTRAINT": {"tf": 1}, "sqlglot.tokens.TokenType.CREATE": {"tf": 1}, "sqlglot.tokens.TokenType.CROSS": {"tf": 1}, "sqlglot.tokens.TokenType.CUBE": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_DATE": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_DATETIME": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_ROW": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_TIME": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_TIMESTAMP": {"tf": 1}, "sqlglot.tokens.TokenType.DEFAULT": {"tf": 1}, "sqlglot.tokens.TokenType.DELETE": {"tf": 1}, "sqlglot.tokens.TokenType.DESC": {"tf": 1}, "sqlglot.tokens.TokenType.DESCRIBE": {"tf": 1}, "sqlglot.tokens.TokenType.DISTINCT": {"tf": 1}, "sqlglot.tokens.TokenType.DISTINCT_FROM": {"tf": 1}, "sqlglot.tokens.TokenType.DISTRIBUTE_BY": {"tf": 1}, "sqlglot.tokens.TokenType.DIV": {"tf": 1}, "sqlglot.tokens.TokenType.DROP": {"tf": 1}, "sqlglot.tokens.TokenType.ELSE": {"tf": 1}, "sqlglot.tokens.TokenType.ENCODE": {"tf": 1}, "sqlglot.tokens.TokenType.END": {"tf": 1}, "sqlglot.tokens.TokenType.ESCAPE": {"tf": 1}, "sqlglot.tokens.TokenType.EXCEPT": {"tf": 1}, "sqlglot.tokens.TokenType.EXECUTE": {"tf": 1}, "sqlglot.tokens.TokenType.EXISTS": {"tf": 1}, "sqlglot.tokens.TokenType.FALSE": {"tf": 1}, "sqlglot.tokens.TokenType.FETCH": {"tf": 1}, "sqlglot.tokens.TokenType.FILTER": {"tf": 1}, "sqlglot.tokens.TokenType.FINAL": {"tf": 1}, "sqlglot.tokens.TokenType.FIRST": {"tf": 1}, "sqlglot.tokens.TokenType.FOLLOWING": {"tf": 1}, "sqlglot.tokens.TokenType.FOR": {"tf": 1}, "sqlglot.tokens.TokenType.FOREIGN_KEY": {"tf": 1}, "sqlglot.tokens.TokenType.FORMAT": {"tf": 1}, "sqlglot.tokens.TokenType.FROM": {"tf": 1}, "sqlglot.tokens.TokenType.FULL": {"tf": 1}, "sqlglot.tokens.TokenType.FUNCTION": {"tf": 1}, "sqlglot.tokens.TokenType.GENERATED": {"tf": 1}, "sqlglot.tokens.TokenType.GLOB": {"tf": 1}, "sqlglot.tokens.TokenType.GLOBAL": {"tf": 1}, "sqlglot.tokens.TokenType.GROUP_BY": {"tf": 1}, "sqlglot.tokens.TokenType.GROUPING_SETS": {"tf": 1}, "sqlglot.tokens.TokenType.HAVING": {"tf": 1}, "sqlglot.tokens.TokenType.HINT": {"tf": 1}, "sqlglot.tokens.TokenType.IDENTITY": {"tf": 1}, "sqlglot.tokens.TokenType.IF": {"tf": 1}, "sqlglot.tokens.TokenType.IGNORE_NULLS": {"tf": 1}, "sqlglot.tokens.TokenType.ILIKE": {"tf": 1}, "sqlglot.tokens.TokenType.IN": {"tf": 1}, "sqlglot.tokens.TokenType.INDEX": {"tf": 1}, "sqlglot.tokens.TokenType.INNER": {"tf": 1}, "sqlglot.tokens.TokenType.INSERT": {"tf": 1}, "sqlglot.tokens.TokenType.INTERSECT": {"tf": 1}, "sqlglot.tokens.TokenType.INTERVAL": {"tf": 1}, "sqlglot.tokens.TokenType.INTO": {"tf": 1}, "sqlglot.tokens.TokenType.INTRODUCER": {"tf": 1}, "sqlglot.tokens.TokenType.IRLIKE": {"tf": 1}, "sqlglot.tokens.TokenType.IS": {"tf": 1}, "sqlglot.tokens.TokenType.ISNULL": {"tf": 1}, "sqlglot.tokens.TokenType.JOIN": {"tf": 1}, "sqlglot.tokens.TokenType.LANGUAGE": {"tf": 1}, "sqlglot.tokens.TokenType.LATERAL": {"tf": 1}, "sqlglot.tokens.TokenType.LAZY": {"tf": 1}, "sqlglot.tokens.TokenType.LEADING": {"tf": 1}, "sqlglot.tokens.TokenType.LEFT": {"tf": 1}, "sqlglot.tokens.TokenType.LIKE": {"tf": 1}, "sqlglot.tokens.TokenType.LIMIT": {"tf": 1}, "sqlglot.tokens.TokenType.LOAD_DATA": {"tf": 1}, "sqlglot.tokens.TokenType.LOCAL": {"tf": 1}, "sqlglot.tokens.TokenType.MAP": {"tf": 1}, "sqlglot.tokens.TokenType.MATCH_RECOGNIZE": {"tf": 1}, "sqlglot.tokens.TokenType.MATERIALIZED": {"tf": 1}, "sqlglot.tokens.TokenType.MERGE": {"tf": 1}, "sqlglot.tokens.TokenType.MOD": {"tf": 1}, "sqlglot.tokens.TokenType.NATURAL": {"tf": 1}, "sqlglot.tokens.TokenType.NEXT": {"tf": 1}, "sqlglot.tokens.TokenType.NO_ACTION": {"tf": 1}, "sqlglot.tokens.TokenType.NOTNULL": {"tf": 1}, "sqlglot.tokens.TokenType.NULL": {"tf": 1}, "sqlglot.tokens.TokenType.NULLS_FIRST": {"tf": 1}, "sqlglot.tokens.TokenType.NULLS_LAST": {"tf": 1}, "sqlglot.tokens.TokenType.OFFSET": {"tf": 1}, "sqlglot.tokens.TokenType.ON": {"tf": 1}, "sqlglot.tokens.TokenType.ONLY": {"tf": 1}, "sqlglot.tokens.TokenType.OPTIONS": {"tf": 1}, "sqlglot.tokens.TokenType.ORDER_BY": {"tf": 1}, "sqlglot.tokens.TokenType.ORDERED": {"tf": 1}, "sqlglot.tokens.TokenType.ORDINALITY": {"tf": 1}, "sqlglot.tokens.TokenType.OUTER": {"tf": 1}, "sqlglot.tokens.TokenType.OUT_OF": {"tf": 1}, "sqlglot.tokens.TokenType.OVER": {"tf": 1}, "sqlglot.tokens.TokenType.OVERWRITE": {"tf": 1}, "sqlglot.tokens.TokenType.PARTITION": {"tf": 1}, "sqlglot.tokens.TokenType.PARTITION_BY": {"tf": 1}, "sqlglot.tokens.TokenType.PERCENT": {"tf": 1}, "sqlglot.tokens.TokenType.PIVOT": {"tf": 1}, "sqlglot.tokens.TokenType.PLACEHOLDER": {"tf": 1}, "sqlglot.tokens.TokenType.PRECEDING": {"tf": 1}, "sqlglot.tokens.TokenType.PRIMARY_KEY": {"tf": 1}, "sqlglot.tokens.TokenType.PROCEDURE": {"tf": 1}, "sqlglot.tokens.TokenType.PROPERTIES": {"tf": 1}, "sqlglot.tokens.TokenType.PSEUDO_TYPE": {"tf": 1}, "sqlglot.tokens.TokenType.QUALIFY": {"tf": 1}, "sqlglot.tokens.TokenType.QUOTE": {"tf": 1}, "sqlglot.tokens.TokenType.RANGE": {"tf": 1}, "sqlglot.tokens.TokenType.RECURSIVE": {"tf": 1}, "sqlglot.tokens.TokenType.REPLACE": {"tf": 1}, "sqlglot.tokens.TokenType.RESPECT_NULLS": {"tf": 1}, "sqlglot.tokens.TokenType.REFERENCES": {"tf": 1}, "sqlglot.tokens.TokenType.RIGHT": {"tf": 1}, "sqlglot.tokens.TokenType.RLIKE": {"tf": 1}, "sqlglot.tokens.TokenType.ROLLBACK": {"tf": 1}, "sqlglot.tokens.TokenType.ROLLUP": {"tf": 1}, "sqlglot.tokens.TokenType.ROW": {"tf": 1}, "sqlglot.tokens.TokenType.ROWS": {"tf": 1}, "sqlglot.tokens.TokenType.SCHEMA_COMMENT": {"tf": 1}, "sqlglot.tokens.TokenType.SEED": {"tf": 1}, "sqlglot.tokens.TokenType.SELECT": {"tf": 1}, "sqlglot.tokens.TokenType.SEMI": {"tf": 1}, "sqlglot.tokens.TokenType.SEPARATOR": {"tf": 1}, "sqlglot.tokens.TokenType.SERDE_PROPERTIES": {"tf": 1}, "sqlglot.tokens.TokenType.SET": {"tf": 1}, "sqlglot.tokens.TokenType.SHOW": {"tf": 1}, "sqlglot.tokens.TokenType.SIMILAR_TO": {"tf": 1}, "sqlglot.tokens.TokenType.SOME": {"tf": 1}, "sqlglot.tokens.TokenType.SORTKEY": {"tf": 1}, "sqlglot.tokens.TokenType.SORT_BY": {"tf": 1}, "sqlglot.tokens.TokenType.STRUCT": {"tf": 1}, "sqlglot.tokens.TokenType.TABLE_SAMPLE": {"tf": 1}, "sqlglot.tokens.TokenType.TEMPORARY": {"tf": 1}, "sqlglot.tokens.TokenType.TOP": {"tf": 1}, "sqlglot.tokens.TokenType.THEN": {"tf": 1}, "sqlglot.tokens.TokenType.TRAILING": {"tf": 1}, "sqlglot.tokens.TokenType.TRUE": {"tf": 1}, "sqlglot.tokens.TokenType.UNBOUNDED": {"tf": 1}, "sqlglot.tokens.TokenType.UNCACHE": {"tf": 1}, "sqlglot.tokens.TokenType.UNION": {"tf": 1}, "sqlglot.tokens.TokenType.UNLOGGED": {"tf": 1}, "sqlglot.tokens.TokenType.UNNEST": {"tf": 1}, "sqlglot.tokens.TokenType.UNPIVOT": {"tf": 1}, "sqlglot.tokens.TokenType.UPDATE": {"tf": 1}, "sqlglot.tokens.TokenType.USE": {"tf": 1}, "sqlglot.tokens.TokenType.USING": {"tf": 1}, "sqlglot.tokens.TokenType.VALUES": {"tf": 1}, "sqlglot.tokens.TokenType.VIEW": {"tf": 1}, "sqlglot.tokens.TokenType.VOLATILE": {"tf": 1}, "sqlglot.tokens.TokenType.WHEN": {"tf": 1}, "sqlglot.tokens.TokenType.WHERE": {"tf": 1}, "sqlglot.tokens.TokenType.WINDOW": {"tf": 1}, "sqlglot.tokens.TokenType.WITH": {"tf": 1}, "sqlglot.tokens.TokenType.WITH_TIME_ZONE": {"tf": 1}, "sqlglot.tokens.TokenType.WITH_LOCAL_TIME_ZONE": {"tf": 1}, "sqlglot.tokens.TokenType.WITHIN_GROUP": {"tf": 1}, "sqlglot.tokens.TokenType.WITHOUT_TIME_ZONE": {"tf": 1}, "sqlglot.tokens.TokenType.UNIQUE": {"tf": 1}}, "df": 286}}}}}}}, "p": {"docs": {"sqlglot.tokens.TokenType.TOP": {"tf": 1}}, "df": 1}}, "s": {"docs": {}, "df": 0, "q": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dialects.dialect.Dialects.TSQL": {"tf": 1}, "sqlglot.dialects.tsql": {"tf": 1}, "sqlglot.dialects.tsql.generate_date_delta_with_unit_sql": {"tf": 1}, "sqlglot.dialects.tsql.TSQL": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.__init__": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Tokenizer": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Generator.systemtime_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Generator.returnsproperty_sql": {"tf": 1.4142135623730951}}, "df": 10}}, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.TsOrDsAdd": {"tf": 1}}, "df": 1}}}, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.TsOrDsToDate": {"tf": 1}}, "df": 1, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.TsOrDsToDateStr": {"tf": 1}}, "df": 1}}}}}}}}}}, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {"sqlglot.expressions.TsOrDiToDi": {"tf": 1}}, "df": 1}}}}}}, "t": {"docs": {"sqlglot.helper.tsort": {"tf": 1}}, "df": 1}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.dialects.dialect.Dialects.TERADATA": {"tf": 1}, "sqlglot.dialects.teradata": {"tf": 1}, "sqlglot.dialects.teradata.Teradata": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.__init__": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Generator.partitionedbyproperty_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Generator.update_sql": {"tf": 1.4142135623730951}}, "df": 8}}}}}}, "x": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.Expression.text": {"tf": 1}, "sqlglot.expressions.DataType.Type.TEXT": {"tf": 1}, "sqlglot.generator.Generator.text_width": {"tf": 1}, "sqlglot.optimizer.canonicalize.add_text_to_concat": {"tf": 1}, "sqlglot.tokens.TokenType.TEXT": {"tf": 1}}, "df": 5}}, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.tokens.TokenType.TEMPORARY": {"tf": 1}}, "df": 1}}}}}}}}, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.Dialect.format_time": {"tf": 1}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}, "sqlglot.expressions.DataType.Type.TIME": {"tf": 1}, "sqlglot.generator.Generator.format_time": {"tf": 1}, "sqlglot.time": {"tf": 1}, "sqlglot.time.format_time": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TIME": {"tf": 1}, "sqlglot.tokens.TokenType.AT_TIME_ZONE": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_TIME": {"tf": 1}, "sqlglot.tokens.TokenType.WITH_TIME_ZONE": {"tf": 1}, "sqlglot.tokens.TokenType.WITH_LOCAL_TIME_ZONE": {"tf": 1}, "sqlglot.tokens.TokenType.WITHOUT_TIME_ZONE": {"tf": 1}}, "df": 12, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.timestrtotime_sql": {"tf": 1}, "sqlglot.expressions.TimeStrToTime": {"tf": 1}}, "df": 2}}}}, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.TimeStrToDate": {"tf": 1}}, "df": 1}}}}, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "x": {"docs": {"sqlglot.expressions.TimeStrToUnix": {"tf": 1}}, "df": 1}}}}}}}, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.expressions.DataType.Type.TIMESTAMP": {"tf": 1}, "sqlglot.tokens.TokenType.TIMESTAMP": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_TIMESTAMP": {"tf": 1}}, "df": 3, "t": {"docs": {}, "df": 0, "z": {"docs": {"sqlglot.expressions.DataType.Type.TIMESTAMPTZ": {"tf": 1}, "sqlglot.tokens.TokenType.TIMESTAMPTZ": {"tf": 1}}, "df": 2}, "r": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.expressions.TimestampTrunc": {"tf": 1}}, "df": 1}}}}}, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "z": {"docs": {"sqlglot.expressions.DataType.Type.TIMESTAMPLTZ": {"tf": 1}, "sqlglot.tokens.TokenType.TIMESTAMPLTZ": {"tf": 1}}, "df": 2}}}, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.TimestampAdd": {"tf": 1}}, "df": 1}}}, "s": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "b": {"docs": {"sqlglot.expressions.TimestampSub": {"tf": 1}}, "df": 1}}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.expressions.TimestampDiff": {"tf": 1}}, "df": 1}}}}}}}}, "u": {"docs": {}, "df": 0, "b": {"docs": {"sqlglot.expressions.TimeSub": {"tf": 1}}, "df": 1}}}, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.TimeUnit": {"tf": 1}, "sqlglot.expressions.TimeUnit.__init__": {"tf": 1}}, "df": 2}}}}, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.TimeAdd": {"tf": 1}}, "df": 1}}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.expressions.TimeDiff": {"tf": 1}}, "df": 1}}}}, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.expressions.TimeTrunc": {"tf": 1}}, "df": 1}}}}, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.TimeToStr": {"tf": 1}}, "df": 1}}}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.TimeToTimeStr": {"tf": 1}}, "df": 1}}}}}}}, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "x": {"docs": {"sqlglot.expressions.TimeToUnix": {"tf": 1}}, "df": 1}}}}}}}}, "n": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.DataType.Type.TINYINT": {"tf": 1}, "sqlglot.tokens.TokenType.TINYINT": {"tf": 1}}, "df": 2}}}}}, "l": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.tokens.TokenType.TILDA": {"tf": 1}}, "df": 1}}}}, "u": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor.context.Context.eval_tuple": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.generate_tuple": {"tf": 1}, "sqlglot.expressions.Tuple": {"tf": 1}, "sqlglot.generator.Generator.tuple_sql": {"tf": 1}}, "df": 4}}}}, "h": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.Expression.this": {"tf": 1}}, "df": 1}}, "e": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.tokens.TokenType.THEN": {"tf": 1}}, "df": 1}}}, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type": {"tf": 1}, "sqlglot.expressions.DataType.Type.CHAR": {"tf": 1}, "sqlglot.expressions.DataType.Type.NCHAR": {"tf": 1}, "sqlglot.expressions.DataType.Type.VARCHAR": {"tf": 1}, "sqlglot.expressions.DataType.Type.NVARCHAR": {"tf": 1}, "sqlglot.expressions.DataType.Type.TEXT": {"tf": 1}, "sqlglot.expressions.DataType.Type.MEDIUMTEXT": {"tf": 1}, "sqlglot.expressions.DataType.Type.LONGTEXT": {"tf": 1}, "sqlglot.expressions.DataType.Type.MEDIUMBLOB": {"tf": 1}, "sqlglot.expressions.DataType.Type.LONGBLOB": {"tf": 1}, "sqlglot.expressions.DataType.Type.BINARY": {"tf": 1}, "sqlglot.expressions.DataType.Type.VARBINARY": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT": {"tf": 1}, "sqlglot.expressions.DataType.Type.TINYINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.SMALLINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.BIGINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.FLOAT": {"tf": 1}, "sqlglot.expressions.DataType.Type.DOUBLE": {"tf": 1}, "sqlglot.expressions.DataType.Type.DECIMAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.BOOLEAN": {"tf": 1}, "sqlglot.expressions.DataType.Type.JSON": {"tf": 1}, "sqlglot.expressions.DataType.Type.JSONB": {"tf": 1}, "sqlglot.expressions.DataType.Type.INTERVAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.TIME": {"tf": 1}, "sqlglot.expressions.DataType.Type.TIMESTAMP": {"tf": 1}, "sqlglot.expressions.DataType.Type.TIMESTAMPTZ": {"tf": 1}, "sqlglot.expressions.DataType.Type.TIMESTAMPLTZ": {"tf": 1}, "sqlglot.expressions.DataType.Type.DATE": {"tf": 1}, "sqlglot.expressions.DataType.Type.DATETIME": {"tf": 1}, "sqlglot.expressions.DataType.Type.ARRAY": {"tf": 1}, "sqlglot.expressions.DataType.Type.MAP": {"tf": 1}, "sqlglot.expressions.DataType.Type.UUID": {"tf": 1}, "sqlglot.expressions.DataType.Type.GEOGRAPHY": {"tf": 1}, "sqlglot.expressions.DataType.Type.GEOMETRY": {"tf": 1}, "sqlglot.expressions.DataType.Type.STRUCT": {"tf": 1}, "sqlglot.expressions.DataType.Type.NULLABLE": {"tf": 1}, "sqlglot.expressions.DataType.Type.HLLSKETCH": {"tf": 1}, "sqlglot.expressions.DataType.Type.HSTORE": {"tf": 1}, "sqlglot.expressions.DataType.Type.SUPER": {"tf": 1}, "sqlglot.expressions.DataType.Type.SERIAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.SMALLSERIAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.BIGSERIAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.XML": {"tf": 1}, "sqlglot.expressions.DataType.Type.UNIQUEIDENTIFIER": {"tf": 1}, "sqlglot.expressions.DataType.Type.MONEY": {"tf": 1}, "sqlglot.expressions.DataType.Type.SMALLMONEY": {"tf": 1}, "sqlglot.expressions.DataType.Type.ROWVERSION": {"tf": 1}, "sqlglot.expressions.DataType.Type.IMAGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.VARIANT": {"tf": 1}, "sqlglot.expressions.DataType.Type.OBJECT": {"tf": 1}, "sqlglot.expressions.DataType.Type.NULL": {"tf": 1}, "sqlglot.expressions.DataType.Type.UNKNOWN": {"tf": 1}, "sqlglot.expressions.DataType.is_type": {"tf": 1}, "sqlglot.expressions.Cast.is_type": {"tf": 1}, "sqlglot.optimizer.canonicalize.coerce_type": {"tf": 1}, "sqlglot.schema.Schema.get_column_type": {"tf": 1}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1}, "sqlglot.tokens.TokenType.PSEUDO_TYPE": {"tf": 1}}, "df": 58, "s": {"docs": {"sqlglot.optimizer.annotate_types": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1.4142135623730951}, "sqlglot.optimizer.annotate_types.TypeAnnotator": {"tf": 1}, "sqlglot.optimizer.annotate_types.TypeAnnotator.__init__": {"tf": 1}, "sqlglot.optimizer.annotate_types.TypeAnnotator.annotate": {"tf": 1}, "sqlglot.transforms.remove_precision_parameterized_types": {"tf": 1}}, "df": 6}, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.optimizer.annotate_types.TypeAnnotator": {"tf": 1}, "sqlglot.optimizer.annotate_types.TypeAnnotator.__init__": {"tf": 1}, "sqlglot.optimizer.annotate_types.TypeAnnotator.annotate": {"tf": 1}}, "df": 3}}}}}}}}}}}}}, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.tokens.TokenType.LOAD_DATA": {"tf": 1}}, "df": 1, "f": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.dataframe.sql": {"tf": 1}, "sqlglot.dataframe.sql.SparkSession": {"tf": 1}, "sqlglot.dataframe.sql.SparkSession.__init__": {"tf": 1}, "sqlglot.dataframe.sql.SparkSession.table": {"tf": 1}, "sqlglot.dataframe.sql.SparkSession.createDataFrame": {"tf": 1}, "sqlglot.dataframe.sql.SparkSession.sql": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.sql": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.copy": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.select": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.alias": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.where": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.filter": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.groupBy": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.agg": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.join": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.union": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.unionAll": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.unionByName": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.intersect": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.intersectAll": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.exceptAll": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.distinct": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.dropDuplicates": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.dropna": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.replace": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.withColumn": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.withColumnRenamed": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.drop": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.limit": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.hint": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.repartition": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.coalesce": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.cache": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.persist": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.GroupedData": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.__init__": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.agg": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.count": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.mean": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.avg": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.max": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.min": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.sum": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.pivot": {"tf": 1}, "sqlglot.dataframe.sql.Column": {"tf": 1}, "sqlglot.dataframe.sql.Column.__init__": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_col": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_cols": {"tf": 1}, "sqlglot.dataframe.sql.Column.invoke_anonymous_function": {"tf": 1}, "sqlglot.dataframe.sql.Column.invoke_expression_over_column": {"tf": 1}, "sqlglot.dataframe.sql.Column.binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.inverse_binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.unary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_literal": {"tf": 1}, "sqlglot.dataframe.sql.Column.copy": {"tf": 1}, "sqlglot.dataframe.sql.Column.set_table_name": {"tf": 1}, "sqlglot.dataframe.sql.Column.sql": {"tf": 1}, "sqlglot.dataframe.sql.Column.alias": {"tf": 1}, "sqlglot.dataframe.sql.Column.asc": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc": {"tf": 1}, "sqlglot.dataframe.sql.Column.asc_nulls_first": {"tf": 1}, "sqlglot.dataframe.sql.Column.asc_nulls_last": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc_nulls_first": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc_nulls_last": {"tf": 1}, "sqlglot.dataframe.sql.Column.when": {"tf": 1}, "sqlglot.dataframe.sql.Column.otherwise": {"tf": 1}, "sqlglot.dataframe.sql.Column.isNull": {"tf": 1}, "sqlglot.dataframe.sql.Column.isNotNull": {"tf": 1}, "sqlglot.dataframe.sql.Column.cast": {"tf": 1}, "sqlglot.dataframe.sql.Column.startswith": {"tf": 1}, "sqlglot.dataframe.sql.Column.endswith": {"tf": 1}, "sqlglot.dataframe.sql.Column.rlike": {"tf": 1}, "sqlglot.dataframe.sql.Column.like": {"tf": 1}, "sqlglot.dataframe.sql.Column.ilike": {"tf": 1}, "sqlglot.dataframe.sql.Column.substr": {"tf": 1}, "sqlglot.dataframe.sql.Column.isin": {"tf": 1}, "sqlglot.dataframe.sql.Column.between": {"tf": 1}, "sqlglot.dataframe.sql.Column.over": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.drop": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.fill": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.replace": {"tf": 1}, "sqlglot.dataframe.sql.Window": {"tf": 1}, "sqlglot.dataframe.sql.Window.__init__": {"tf": 1}, "sqlglot.dataframe.sql.Window.partitionBy": {"tf": 1}, "sqlglot.dataframe.sql.Window.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.Window.rowsBetween": {"tf": 1}, "sqlglot.dataframe.sql.Window.rangeBetween": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.__init__": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.copy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.sql": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.partitionBy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.rowsBetween": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.rangeBetween": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameReader": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameReader.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameReader.table": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.copy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.sql": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.mode": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.insertInto": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.saveAsTable": {"tf": 1}}, "df": 113, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dataframe.sql.DataFrameNaFunctions": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.drop": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.fill": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.replace": {"tf": 1}}, "df": 5}}}}}}}}}}}, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dataframe.sql.DataFrameReader": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameReader.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameReader.table": {"tf": 1}}, "df": 3}}}}}}, "w": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dataframe.sql.DataFrameWriter": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.copy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.sql": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.mode": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.insertInto": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.saveAsTable": {"tf": 1}}, "df": 7}}}}}}}}}}}, "b": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dialects.databricks": {"tf": 1}, "sqlglot.dialects.databricks.Databricks": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.__init__": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.DATABRICKS": {"tf": 1}}, "df": 6}}}}}, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.DataBlocksizeProperty": {"tf": 1}, "sqlglot.generator.Generator.datablocksizeproperty_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}}}}, "t": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.hive.Hive.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}, "sqlglot.expressions.DataType": {"tf": 1}, "sqlglot.expressions.DataType.Type": {"tf": 1}, "sqlglot.expressions.DataType.Type.CHAR": {"tf": 1}, "sqlglot.expressions.DataType.Type.NCHAR": {"tf": 1}, "sqlglot.expressions.DataType.Type.VARCHAR": {"tf": 1}, "sqlglot.expressions.DataType.Type.NVARCHAR": {"tf": 1}, "sqlglot.expressions.DataType.Type.TEXT": {"tf": 1}, "sqlglot.expressions.DataType.Type.MEDIUMTEXT": {"tf": 1}, "sqlglot.expressions.DataType.Type.LONGTEXT": {"tf": 1}, "sqlglot.expressions.DataType.Type.MEDIUMBLOB": {"tf": 1}, "sqlglot.expressions.DataType.Type.LONGBLOB": {"tf": 1}, "sqlglot.expressions.DataType.Type.BINARY": {"tf": 1}, "sqlglot.expressions.DataType.Type.VARBINARY": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT": {"tf": 1}, "sqlglot.expressions.DataType.Type.TINYINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.SMALLINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.BIGINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.FLOAT": {"tf": 1}, "sqlglot.expressions.DataType.Type.DOUBLE": {"tf": 1}, "sqlglot.expressions.DataType.Type.DECIMAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.BOOLEAN": {"tf": 1}, "sqlglot.expressions.DataType.Type.JSON": {"tf": 1}, "sqlglot.expressions.DataType.Type.JSONB": {"tf": 1}, "sqlglot.expressions.DataType.Type.INTERVAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.TIME": {"tf": 1}, "sqlglot.expressions.DataType.Type.TIMESTAMP": {"tf": 1}, "sqlglot.expressions.DataType.Type.TIMESTAMPTZ": {"tf": 1}, "sqlglot.expressions.DataType.Type.TIMESTAMPLTZ": {"tf": 1}, "sqlglot.expressions.DataType.Type.DATE": {"tf": 1}, "sqlglot.expressions.DataType.Type.DATETIME": {"tf": 1}, "sqlglot.expressions.DataType.Type.ARRAY": {"tf": 1}, "sqlglot.expressions.DataType.Type.MAP": {"tf": 1}, "sqlglot.expressions.DataType.Type.UUID": {"tf": 1}, "sqlglot.expressions.DataType.Type.GEOGRAPHY": {"tf": 1}, "sqlglot.expressions.DataType.Type.GEOMETRY": {"tf": 1}, "sqlglot.expressions.DataType.Type.STRUCT": {"tf": 1}, "sqlglot.expressions.DataType.Type.NULLABLE": {"tf": 1}, "sqlglot.expressions.DataType.Type.HLLSKETCH": {"tf": 1}, "sqlglot.expressions.DataType.Type.HSTORE": {"tf": 1}, "sqlglot.expressions.DataType.Type.SUPER": {"tf": 1}, "sqlglot.expressions.DataType.Type.SERIAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.SMALLSERIAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.BIGSERIAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.XML": {"tf": 1}, "sqlglot.expressions.DataType.Type.UNIQUEIDENTIFIER": {"tf": 1}, "sqlglot.expressions.DataType.Type.MONEY": {"tf": 1}, "sqlglot.expressions.DataType.Type.SMALLMONEY": {"tf": 1}, "sqlglot.expressions.DataType.Type.ROWVERSION": {"tf": 1}, "sqlglot.expressions.DataType.Type.IMAGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.VARIANT": {"tf": 1}, "sqlglot.expressions.DataType.Type.OBJECT": {"tf": 1}, "sqlglot.expressions.DataType.Type.NULL": {"tf": 1}, "sqlglot.expressions.DataType.Type.UNKNOWN": {"tf": 1}, "sqlglot.expressions.DataType.build": {"tf": 1}, "sqlglot.expressions.DataType.is_type": {"tf": 1}, "sqlglot.generator.Generator.datatype_sql": {"tf": 1}}, "df": 58}}}}}, "e": {"docs": {"sqlglot.dialects.dialect.no_paren_current_date_sql": {"tf": 1}, "sqlglot.dialects.dialect.parse_date_delta": {"tf": 1}, "sqlglot.dialects.tsql.generate_date_delta_with_unit_sql": {"tf": 1}, "sqlglot.expressions.DataType.Type.DATE": {"tf": 1}, "sqlglot.optimizer.simplify.extract_date": {"tf": 1}, "sqlglot.optimizer.simplify.date_literal": {"tf": 1}, "sqlglot.tokens.TokenType.DATE": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_DATE": {"tf": 1}}, "df": 8, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.datestrtodate_sql": {"tf": 1}, "sqlglot.expressions.DateStrToDate": {"tf": 1}}, "df": 2}}}}}}}}, "u": {"docs": {}, "df": 0, "b": {"docs": {"sqlglot.expressions.DateSub": {"tf": 1}}, "df": 1}}}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.DATETIME": {"tf": 1}, "sqlglot.tokens.TokenType.DATETIME": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_DATETIME": {"tf": 1}}, "df": 3, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.DatetimeAdd": {"tf": 1}}, "df": 1}}}, "s": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "b": {"docs": {"sqlglot.expressions.DatetimeSub": {"tf": 1}}, "df": 1}}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.expressions.DatetimeDiff": {"tf": 1}}, "df": 1}}}}, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.expressions.DatetimeTrunc": {"tf": 1}}, "df": 1}}}}}}}}, "r": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.expressions.DateTrunc": {"tf": 1}}, "df": 1}}}}, "o": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.DateToDateStr": {"tf": 1}}, "df": 1}}}}}}, "i": {"docs": {"sqlglot.expressions.DateToDi": {"tf": 1}}, "df": 1}}}}, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.DateAdd": {"tf": 1}}, "df": 1}}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.expressions.DateDiff": {"tf": 1}}, "df": 1}}}}, "f": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.DateFromParts": {"tf": 1}}, "df": 1}}}}}}}}}}}, "y": {"docs": {"sqlglot.expressions.Day": {"tf": 1}}, "df": 1, "o": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.expressions.DayOfWeek": {"tf": 1}}, "df": 1}}}}, "m": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.expressions.DayOfMonth": {"tf": 1}}, "df": 1}}}}}, "y": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.DayOfYear": {"tf": 1}}, "df": 1}}}}}}}, "s": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.tokens.TokenType.DASH": {"tf": 1}}, "df": 1}}, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot.tokens.TokenType.DARROW": {"tf": 1}}, "df": 1}}}}}, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe.sql.DataFrame.distinct": {"tf": 1}, "sqlglot.dialects.dialect.approx_count_distinct_sql": {"tf": 1}, "sqlglot.expressions.Select.distinct": {"tf": 1}, "sqlglot.expressions.Distinct": {"tf": 1}, "sqlglot.generator.Generator.distinct_sql": {"tf": 1}, "sqlglot.tokens.TokenType.DISTINCT": {"tf": 1}, "sqlglot.tokens.TokenType.DISTINCT_FROM": {"tf": 1}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1}}, "df": 8}}}}, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Distribute": {"tf": 1}, "sqlglot.generator.Generator.distribute_sql": {"tf": 1}, "sqlglot.tokens.TokenType.DISTRIBUTE_BY": {"tf": 1}}, "df": 3}, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.normalize.distributive_law": {"tf": 1}}, "df": 1}}}}}}}}, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.DistKeyProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.DistStyleProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Distance": {"tf": 1}, "sqlglot.generator.Generator.distance_sql": {"tf": 1}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}}, "df": 3}}}}}}, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.dialect": {"tf": 1}, "sqlglot.dialects.dialect.Dialects": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.DIALECT": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.BIGQUERY": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.CLICKHOUSE": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.DUCKDB": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.HIVE": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.MYSQL": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.ORACLE": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.POSTGRES": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.PRESTO": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.REDSHIFT": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.SNOWFLAKE": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.SPARK": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.SQLITE": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.STARROCKS": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.TABLEAU": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.TRINO": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.TSQL": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.DATABRICKS": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.DRILL": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.TERADATA": {"tf": 1}, "sqlglot.dialects.dialect.Dialect": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialect.__init__": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialect.get_or_raise": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialect.format_time": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialect.parse": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialect.parse_into": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialect.generate": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialect.transpile": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialect.parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialect.generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.rename_func": {"tf": 1}, "sqlglot.dialects.dialect.approx_count_distinct_sql": {"tf": 1}, "sqlglot.dialects.dialect.if_sql": {"tf": 1}, "sqlglot.dialects.dialect.arrow_json_extract_sql": {"tf": 1}, "sqlglot.dialects.dialect.arrow_json_extract_scalar_sql": {"tf": 1}, "sqlglot.dialects.dialect.inline_array_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_ilike_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_paren_current_date_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_recursive_cte_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_safe_divide_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_tablesample_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_pivot_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_trycast_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_properties_sql": {"tf": 1}, "sqlglot.dialects.dialect.str_position_sql": {"tf": 1}, "sqlglot.dialects.dialect.struct_extract_sql": {"tf": 1}, "sqlglot.dialects.dialect.var_map_sql": {"tf": 1}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}, "sqlglot.dialects.dialect.parse_date_delta": {"tf": 1}, "sqlglot.dialects.dialect.locate_to_strposition": {"tf": 1}, "sqlglot.dialects.dialect.strposition_to_locate_sql": {"tf": 1}, "sqlglot.dialects.dialect.timestrtotime_sql": {"tf": 1}, "sqlglot.dialects.dialect.datestrtodate_sql": {"tf": 1}, "sqlglot.dialects.dialect.trim_sql": {"tf": 1}}, "df": 57, "s": {"docs": {"sqlglot.dialects": {"tf": 1}, "sqlglot.dialects.bigquery": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.__init__": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Tokenizer": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.array_sql": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.transaction_sql": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.commit_sql": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.rollback_sql": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.in_unnest_op": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.except_op": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.intersect_op": {"tf": 1}, "sqlglot.dialects.clickhouse": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.__init__": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Tokenizer": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.cte_sql": {"tf": 1}, "sqlglot.dialects.databricks": {"tf": 1}, "sqlglot.dialects.databricks.Databricks": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.__init__": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.dialect": {"tf": 1}, "sqlglot.dialects.dialect.Dialects": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.DIALECT": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.BIGQUERY": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.CLICKHOUSE": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.DUCKDB": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.HIVE": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.MYSQL": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.ORACLE": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.POSTGRES": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.PRESTO": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.REDSHIFT": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.SNOWFLAKE": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.SPARK": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.SQLITE": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.STARROCKS": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.TABLEAU": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.TRINO": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.TSQL": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.DATABRICKS": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.DRILL": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.TERADATA": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialect": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.__init__": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.get_or_raise": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.format_time": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.parse": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.parse_into": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.generate": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.transpile": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.parser": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.generator": {"tf": 1}, "sqlglot.dialects.dialect.rename_func": {"tf": 1}, "sqlglot.dialects.dialect.approx_count_distinct_sql": {"tf": 1}, "sqlglot.dialects.dialect.if_sql": {"tf": 1}, "sqlglot.dialects.dialect.arrow_json_extract_sql": {"tf": 1}, "sqlglot.dialects.dialect.arrow_json_extract_scalar_sql": {"tf": 1}, "sqlglot.dialects.dialect.inline_array_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_ilike_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_paren_current_date_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_recursive_cte_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_safe_divide_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_tablesample_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_pivot_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_trycast_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_properties_sql": {"tf": 1}, "sqlglot.dialects.dialect.str_position_sql": {"tf": 1}, "sqlglot.dialects.dialect.struct_extract_sql": {"tf": 1}, "sqlglot.dialects.dialect.var_map_sql": {"tf": 1}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}, "sqlglot.dialects.dialect.parse_date_delta": {"tf": 1}, "sqlglot.dialects.dialect.locate_to_strposition": {"tf": 1}, "sqlglot.dialects.dialect.strposition_to_locate_sql": {"tf": 1}, "sqlglot.dialects.dialect.timestrtotime_sql": {"tf": 1}, "sqlglot.dialects.dialect.datestrtodate_sql": {"tf": 1}, "sqlglot.dialects.dialect.trim_sql": {"tf": 1}, "sqlglot.dialects.drill": {"tf": 1}, "sqlglot.dialects.drill.if_sql": {"tf": 1}, "sqlglot.dialects.drill.Drill": {"tf": 1}, "sqlglot.dialects.drill.Drill.__init__": {"tf": 1}, "sqlglot.dialects.drill.Drill.Tokenizer": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator.normalize_func": {"tf": 1}, "sqlglot.dialects.duckdb": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.__init__": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Tokenizer": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive": {"tf": 1}, "sqlglot.dialects.hive.Hive": {"tf": 1}, "sqlglot.dialects.hive.Hive.__init__": {"tf": 1}, "sqlglot.dialects.hive.Hive.Tokenizer": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.with_properties": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.mysql": {"tf": 1}, "sqlglot.dialects.mysql.MySQL": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.__init__": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Tokenizer": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator.show_sql": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator.setitem_sql": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator.set_sql": {"tf": 1}, "sqlglot.dialects.oracle": {"tf": 1}, "sqlglot.dialects.oracle.Oracle": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.__init__": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator.query_modifiers": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator.offset_sql": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator.table_sql": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Tokenizer": {"tf": 1}, "sqlglot.dialects.postgres": {"tf": 1}, "sqlglot.dialects.postgres.Postgres": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.__init__": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Tokenizer": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto": {"tf": 1}, "sqlglot.dialects.presto.Presto": {"tf": 1}, "sqlglot.dialects.presto.Presto.__init__": {"tf": 1}, "sqlglot.dialects.presto.Presto.Tokenizer": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator.transaction_sql": {"tf": 1}, "sqlglot.dialects.redshift": {"tf": 1}, "sqlglot.dialects.redshift.Redshift": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.__init__": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Tokenizer": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.with_properties": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.renametable_sql": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.snowflake": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.__init__": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Tokenizer": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.except_op": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.intersect_op": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.select_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.describe_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.generatedasidentitycolumnconstraint_sql": {"tf": 1}, "sqlglot.dialects.spark": {"tf": 1}, "sqlglot.dialects.spark.Spark": {"tf": 1}, "sqlglot.dialects.spark.Spark.__init__": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator.cast_sql": {"tf": 1}, "sqlglot.dialects.spark.Spark.Tokenizer": {"tf": 1}, "sqlglot.dialects.sqlite": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.__init__": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Tokenizer": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.transaction_sql": {"tf": 1}, "sqlglot.dialects.starrocks": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.__init__": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau": {"tf": 1}, "sqlglot.dialects.tableau.Tableau": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.__init__": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata": {"tf": 1}, "sqlglot.dialects.teradata.Teradata": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.__init__": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.partitionedbyproperty_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.update_sql": {"tf": 1}, "sqlglot.dialects.trino": {"tf": 1}, "sqlglot.dialects.trino.Trino": {"tf": 1}, "sqlglot.dialects.trino.Trino.__init__": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Tokenizer": {"tf": 1}, "sqlglot.dialects.tsql": {"tf": 1}, "sqlglot.dialects.tsql.generate_date_delta_with_unit_sql": {"tf": 1}, "sqlglot.dialects.tsql.TSQL": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.__init__": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Tokenizer": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator.systemtime_sql": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator.returnsproperty_sql": {"tf": 1}}, "df": 202}}}}}}, "v": {"docs": {"sqlglot.expressions.Div": {"tf": 1}, "sqlglot.generator.Generator.div_sql": {"tf": 1}, "sqlglot.tokens.TokenType.DIV": {"tf": 1}}, "df": 3, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.no_safe_divide_sql": {"tf": 1}}, "df": 1}}}}, "f": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.diff.Insert": {"tf": 1}, "sqlglot.diff.Insert.__init__": {"tf": 1}, "sqlglot.diff.Remove": {"tf": 1}, "sqlglot.diff.Remove.__init__": {"tf": 1}, "sqlglot.diff.Move": {"tf": 1}, "sqlglot.diff.Move.__init__": {"tf": 1}, "sqlglot.diff.Update": {"tf": 1}, "sqlglot.diff.Update.__init__": {"tf": 1}, "sqlglot.diff.Keep": {"tf": 1}, "sqlglot.diff.Keep.__init__": {"tf": 1}, "sqlglot.diff.diff": {"tf": 1.4142135623730951}, "sqlglot.diff.ChangeDistiller": {"tf": 1}, "sqlglot.diff.ChangeDistiller.__init__": {"tf": 1}, "sqlglot.diff.ChangeDistiller.diff": {"tf": 1.4142135623730951}}, "df": 15}}, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.Directory": {"tf": 1}, "sqlglot.generator.Generator.directory_sql": {"tf": 1}}, "df": 2}}}}}}}, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.Properties.from_dict": {"tf": 1}, "sqlglot.helper.object_to_dict": {"tf": 1}, "sqlglot.helper.dict_depth": {"tf": 1}}, "df": 3}}, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DiToDate": {"tf": 1}}, "df": 1}}}}}}}, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.dataframe.sql.DataFrame.drop": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.drop": {"tf": 1}, "sqlglot.expressions.Drop": {"tf": 1}, "sqlglot.generator.Generator.drop_sql": {"tf": 1}, "sqlglot.tokens.TokenType.DROP": {"tf": 1}}, "df": 5, "d": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dataframe.sql.DataFrame.dropDuplicates": {"tf": 1}}, "df": 1}}}}}}}}}}, "n": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.dataframe.sql.DataFrame.dropna": {"tf": 1}}, "df": 1}}, "p": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.DropPartition": {"tf": 1}, "sqlglot.generator.Generator.droppartition_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dialects.dialect.Dialects.DRILL": {"tf": 1}, "sqlglot.dialects.drill": {"tf": 1}, "sqlglot.dialects.drill.if_sql": {"tf": 1}, "sqlglot.dialects.drill.Drill": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.__init__": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Tokenizer": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Generator.normalize_func": {"tf": 1.4142135623730951}}, "df": 9}}}}, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.dataframe.sql.Column.desc": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc_nulls_first": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc_nulls_last": {"tf": 1}, "sqlglot.tokens.TokenType.DESC": {"tf": 1}}, "df": 4, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.snowflake.Snowflake.Generator.describe_sql": {"tf": 1}, "sqlglot.expressions.Describe": {"tf": 1}, "sqlglot.generator.Generator.describe_sql": {"tf": 1}, "sqlglot.tokens.TokenType.DESCRIBE": {"tf": 1}}, "df": 4}}}}}}, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.dialects.dialect.parse_date_delta": {"tf": 1}, "sqlglot.dialects.tsql.generate_date_delta_with_unit_sql": {"tf": 1}}, "df": 2}}, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Delete": {"tf": 1}, "sqlglot.expressions.delete": {"tf": 1}, "sqlglot.generator.Generator.delete_sql": {"tf": 1}, "sqlglot.tokens.TokenType.DELETE": {"tf": 1}}, "df": 4}}, "g": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.transforms.delegate": {"tf": 1}}, "df": 1}}}}}}, "p": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.expressions.Expression.depth": {"tf": 1}, "sqlglot.helper.dict_depth": {"tf": 1}}, "df": 2}}, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.planner.Step.add_dependency": {"tf": 1}}, "df": 1}}}}}}}}, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.optimizer.merge_subqueries.merge_derived_tables": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.DERIVED_TABLE": {"tf": 1}, "sqlglot.optimizer.scope.Scope.derived_tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_derived_table": {"tf": 1}}, "df": 4, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DerivedTable": {"tf": 1}}, "df": 1}}}}}}}}}}, "f": {"docs": {"sqlglot.tokens.TokenType.COLUMN_DEF": {"tf": 1}}, "df": 1, "a": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.Func.default_parser_mappings": {"tf": 1}, "sqlglot.optimizer.pushdown_projections.DEFAULT_SELECTION": {"tf": 1}, "sqlglot.tokens.TokenType.BY_DEFAULT": {"tf": 1}, "sqlglot.tokens.TokenType.DEFAULT": {"tf": 1}}, "df": 4, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.DefaultColumnConstraint": {"tf": 1}, "sqlglot.generator.Generator.defaultcolumnconstraint_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.DefinerProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}, "c": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.DataType.Type.DECIMAL": {"tf": 1}, "sqlglot.tokens.TokenType.DECIMAL": {"tf": 1}}, "df": 2}}}}, "o": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Decode": {"tf": 1}}, "df": 1}}, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.unnest_subqueries.decorrelate": {"tf": 1}}, "df": 1}}}}}}}}}}, "u": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "b": {"docs": {"sqlglot.dialects.dialect.Dialects.DUCKDB": {"tf": 1}, "sqlglot.dialects.duckdb": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.__init__": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Tokenizer": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.4142135623730951}}, "df": 7}}}}, "m": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.expressions.Expression.dump": {"tf": 1}, "sqlglot.serde.dump": {"tf": 1}}, "df": 2}}}, "f": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.Expression.dfs": {"tf": 1}}, "df": 1}}, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.DOUBLE": {"tf": 1}, "sqlglot.tokens.TokenType.DOUBLE": {"tf": 1}}, "df": 2}}}}, "t": {"docs": {"sqlglot.expressions.Dot": {"tf": 1}, "sqlglot.generator.Generator.dot_sql": {"tf": 1}, "sqlglot.tokens.TokenType.DOT": {"tf": 1}}, "df": 3}, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.tokens.TokenType.DOLLAR": {"tf": 1}}, "df": 1}}}}}, "p": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DPipe": {"tf": 1}, "sqlglot.generator.Generator.dpipe_sql": {"tf": 1}, "sqlglot.tokens.TokenType.DPIPE": {"tf": 1}}, "df": 3}}}}, "n": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.optimizer.pushdown_predicates.pushdown_dnf": {"tf": 1}}, "df": 1}}, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.tokens.TokenType.DCOLON": {"tf": 1}}, "df": 1}}}}}, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.tokens.TokenType.DHASH_ARROW": {"tf": 1}}, "df": 1}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator.in_unnest_op": {"tf": 1}, "sqlglot.expressions.In": {"tf": 1}, "sqlglot.generator.Generator.in_sql": {"tf": 1}, "sqlglot.generator.Generator.in_unnest_op": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}, "sqlglot.tokens.TokenType.IN": {"tf": 1}, "sqlglot.trie.in_trie": {"tf": 1}}, "df": 7, "i": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe.sql.SparkSession.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.__init__": {"tf": 1}, "sqlglot.dataframe.sql.Column.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.__init__": {"tf": 1}, "sqlglot.dataframe.sql.Window.__init__": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameReader.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.__init__": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.__init__": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.__init__": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.__init__": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.__init__": {"tf": 1}, "sqlglot.dialects.drill.Drill.__init__": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.__init__": {"tf": 1}, "sqlglot.dialects.hive.Hive.__init__": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.__init__": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.__init__": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.__init__": {"tf": 1}, "sqlglot.dialects.presto.Presto.__init__": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.__init__": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.__init__": {"tf": 1}, "sqlglot.dialects.spark.Spark.__init__": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.__init__": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.__init__": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.__init__": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.__init__": {"tf": 1}, "sqlglot.dialects.trino.Trino.__init__": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.__init__": {"tf": 1}, "sqlglot.diff.Insert.__init__": {"tf": 1}, "sqlglot.diff.Remove.__init__": {"tf": 1}, "sqlglot.diff.Move.__init__": {"tf": 1}, "sqlglot.diff.Update.__init__": {"tf": 1}, "sqlglot.diff.Keep.__init__": {"tf": 1}, "sqlglot.diff.ChangeDistiller.__init__": {"tf": 1}, "sqlglot.errors.ParseError.__init__": {"tf": 1}, "sqlglot.executor.context.Context.__init__": {"tf": 1}, "sqlglot.executor.env.reverse_key.__init__": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.__init__": {"tf": 1}, "sqlglot.executor.python.Python.__init__": {"tf": 1}, "sqlglot.executor.table.Table.__init__": {"tf": 1}, "sqlglot.executor.table.TableIter.__init__": {"tf": 1}, "sqlglot.executor.table.RangeReader.__init__": {"tf": 1}, "sqlglot.executor.table.RowReader.__init__": {"tf": 1}, "sqlglot.expressions.Expression.__init__": {"tf": 1}, "sqlglot.expressions.TimeUnit.__init__": {"tf": 1}, "sqlglot.generator.Generator.__init__": {"tf": 1}, "sqlglot.lineage.Node.__init__": {"tf": 1}, "sqlglot.lineage.LineageHTML.__init__": {"tf": 1}, "sqlglot.optimizer.annotate_types.TypeAnnotator.__init__": {"tf": 1}, "sqlglot.optimizer.scope.Scope.__init__": {"tf": 1}, "sqlglot.parser.Parser.__init__": {"tf": 1}, "sqlglot.planner.Plan.__init__": {"tf": 1}, "sqlglot.planner.Step.__init__": {"tf": 1}, "sqlglot.planner.Scan.__init__": {"tf": 1}, "sqlglot.planner.Join.__init__": {"tf": 1}, "sqlglot.planner.Aggregate.__init__": {"tf": 1}, "sqlglot.planner.Sort.__init__": {"tf": 1}, "sqlglot.planner.SetOperation.__init__": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.__init__": {"tf": 1}, "sqlglot.schema.MappingSchema.__init__": {"tf": 1}, "sqlglot.tokens.Token.__init__": {"tf": 1}, "sqlglot.tokens.Tokenizer.__init__": {"tf": 1}}, "df": 63, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.expressions.Initcap": {"tf": 1}}, "df": 1}}}}}, "t": {"docs": {"sqlglot.expressions.Expression.is_int": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT": {"tf": 1}, "sqlglot.tokens.TokenType.INT": {"tf": 1}}, "df": 3, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe.sql.DataFrame.intersect": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.intersect_op": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.intersect_op": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Intersect": {"tf": 1}, "sqlglot.expressions.intersect": {"tf": 1}, "sqlglot.generator.Generator.intersect_sql": {"tf": 1}, "sqlglot.generator.Generator.intersect_op": {"tf": 1}, "sqlglot.tokens.TokenType.INTERSECT": {"tf": 1}}, "df": 9, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dataframe.sql.DataFrame.intersectAll": {"tf": 1}}, "df": 1}}}}}}}, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.executor.env.interval": {"tf": 1}, "sqlglot.expressions.DataType.Type.INTERVAL": {"tf": 1}, "sqlglot.expressions.Interval": {"tf": 1}, "sqlglot.expressions.to_interval": {"tf": 1}, "sqlglot.generator.Generator.interval_sql": {"tf": 1}, "sqlglot.optimizer.simplify.extract_interval": {"tf": 1}, "sqlglot.tokens.TokenType.INTERVAL": {"tf": 1}}, "df": 7}}}}}, "o": {"docs": {"sqlglot.dialects.dialect.Dialect.parse_into": {"tf": 1}, "sqlglot.expressions.Into": {"tf": 1}, "sqlglot.generator.Generator.into_sql": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}, "sqlglot.tokens.TokenType.INTO": {"tf": 1}}, "df": 5}, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Introducer": {"tf": 1}, "sqlglot.generator.Generator.introducer_sql": {"tf": 1}, "sqlglot.tokens.TokenType.INTRODUCER": {"tf": 1}}, "df": 3}}}}}}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {"sqlglot.expressions.IntDiv": {"tf": 1}, "sqlglot.generator.Generator.intdiv_sql": {"tf": 1}}, "df": 2}}}}, "v": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.Column.invoke_anonymous_function": {"tf": 1}, "sqlglot.dataframe.sql.Column.invoke_expression_over_column": {"tf": 1}}, "df": 2}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.Column.inverse_binary_op": {"tf": 1}}, "df": 1}}}}}, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff.Insert": {"tf": 1}, "sqlglot.diff.Insert.__init__": {"tf": 1}, "sqlglot.expressions.Insert": {"tf": 1}, "sqlglot.generator.Generator.insert_sql": {"tf": 1}, "sqlglot.tokens.TokenType.INSERT": {"tf": 1}}, "df": 5, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {"sqlglot.dataframe.sql.DataFrameWriter.insertInto": {"tf": 1}}, "df": 1}}}}}}}}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.inline_array_sql": {"tf": 1}}, "df": 1}}}}, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "x": {"docs": {"sqlglot.executor.context.Context.set_index": {"tf": 1}, "sqlglot.expressions.Index": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_INDEX": {"tf": 1}, "sqlglot.generator.Generator.index_sql": {"tf": 1}, "sqlglot.helper.apply_index_offset": {"tf": 1}, "sqlglot.tokens.TokenType.INDEX": {"tf": 1}}, "df": 6}, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.generator.Generator.indent": {"tf": 1}}, "df": 1}}}}, "c": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.AUTO_INCREMENT": {"tf": 1}}, "df": 1}}}}}}}, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.tokens.TokenType.INNER": {"tf": 1}}, "df": 1}}}}, "s": {"docs": {"sqlglot.expressions.Expression.is_string": {"tf": 1}, "sqlglot.expressions.Expression.is_number": {"tf": 1}, "sqlglot.expressions.Expression.is_int": {"tf": 1}, "sqlglot.expressions.Expression.assert_is": {"tf": 1}, "sqlglot.expressions.DataType.is_type": {"tf": 1}, "sqlglot.expressions.Is": {"tf": 1}, "sqlglot.expressions.Cast.is_type": {"tf": 1}, "sqlglot.generator.Generator.is_sql": {"tf": 1}, "sqlglot.helper.is_iterable": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_subquery": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_derived_table": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_union": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_cte": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_root": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_udtf": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_correlated_subquery": {"tf": 1}, "sqlglot.optimizer.simplify.is_complement": {"tf": 1}, "sqlglot.tokens.TokenType.IS": {"tf": 1}}, "df": 18, "n": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dataframe.sql.Column.isNull": {"tf": 1}, "sqlglot.tokens.TokenType.ISNULL": {"tf": 1}}, "df": 2}}}, "o": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dataframe.sql.Column.isNotNull": {"tf": 1}}, "df": 1}}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe.sql.Column.isin": {"tf": 1}}, "df": 1}}, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.isolate_table_selects": {"tf": 1}, "sqlglot.optimizer.isolate_table_selects.isolate_table_selects": {"tf": 1.4142135623730951}}, "df": 2, "d": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.IsolatedLoadingProperty": {"tf": 1}, "sqlglot.generator.Generator.isolatedloadingproperty_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}}}}}}}}}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.Column.ilike": {"tf": 1}, "sqlglot.dialects.dialect.no_ilike_sql": {"tf": 1}, "sqlglot.expressions.ILike": {"tf": 1}, "sqlglot.generator.Generator.ilike_sql": {"tf": 1}, "sqlglot.tokens.TokenType.ILIKE": {"tf": 1}}, "df": 5}}}}, "f": {"docs": {"sqlglot.dialects.dialect.if_sql": {"tf": 1}, "sqlglot.dialects.drill.if_sql": {"tf": 1}, "sqlglot.executor.env.null_if_any": {"tf": 1}, "sqlglot.expressions.If": {"tf": 1}, "sqlglot.generator.Generator.if_sql": {"tf": 1}, "sqlglot.tokens.TokenType.IF": {"tf": 1}}, "df": 6, "n": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.IfNull": {"tf": 1}}, "df": 1}}}}}, "g": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.errors.ErrorLevel.IGNORE": {"tf": 1}, "sqlglot.tokens.TokenType.IGNORE_NULLS": {"tf": 1}}, "df": 2, "n": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.IgnoreNulls": {"tf": 1}, "sqlglot.generator.Generator.ignorenulls_sql": {"tf": 1}}, "df": 2}}}}}}}}}}, "m": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.errors.ErrorLevel.IMMEDIATE": {"tf": 1}}, "df": 1}}}}}}}, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.IMAGE": {"tf": 1}, "sqlglot.tokens.TokenType.IMAGE": {"tf": 1}}, "df": 2}}}}, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.executor.context.Context.table_iter": {"tf": 1}}, "df": 1, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.helper.is_iterable": {"tf": 1}}, "df": 1}}}}}}}, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Identifier": {"tf": 1}, "sqlglot.expressions.Identifier.output_name": {"tf": 1}, "sqlglot.expressions.to_identifier": {"tf": 1}, "sqlglot.generator.Generator.identifier_sql": {"tf": 1}, "sqlglot.tokens.TokenType.IDENTIFIER": {"tf": 1}, "sqlglot.tokens.Token.identifier": {"tf": 1}}, "df": 6}}}, "y": {"docs": {"sqlglot.generator.Generator.no_identify": {"tf": 1}}, "df": 1}}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.optimizer.lower_identities": {"tf": 1}, "sqlglot.optimizer.lower_identities.lower_identities": {"tf": 1.4142135623730951}}, "df": 2}}}, "y": {"docs": {"sqlglot.tokens.TokenType.IDENTITY": {"tf": 1}}, "df": 1}}}}}}}, "r": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.IRLIKE": {"tf": 1}}, "df": 1}}}}}}, "c": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}, "sqlglot.expressions.Create": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_CREATE": {"tf": 1}, "sqlglot.generator.Generator.create_sql": {"tf": 1}, "sqlglot.tokens.TokenType.CREATE": {"tf": 1}}, "df": 5, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.SparkSession.createDataFrame": {"tf": 1}}, "df": 1}}}}}}}}}}}}}, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.tokens.TokenType.CROSS": {"tf": 1}}, "df": 1}}}}, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dataframe.sql.DataFrame.copy": {"tf": 1}, "sqlglot.dataframe.sql.Column.copy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.copy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.copy": {"tf": 1}, "sqlglot.expressions.Expression.copy": {"tf": 1}, "sqlglot.schema.MappingSchema.copy": {"tf": 1}}, "df": 6}}, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.DataFrame.coalesce": {"tf": 1}, "sqlglot.expressions.Coalesce": {"tf": 1}}, "df": 2}}}}}}, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe.sql.GroupedData.count": {"tf": 1}, "sqlglot.dialects.dialect.approx_count_distinct_sql": {"tf": 1}, "sqlglot.expressions.Count": {"tf": 1}, "sqlglot.helper.count_params": {"tf": 1}, "sqlglot.optimizer.scope.Scope.ref_count": {"tf": 1}}, "df": 5}}}, "l": {"docs": {"sqlglot.dataframe.sql.Column.ensure_col": {"tf": 1}}, "df": 1, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe.sql.Column": {"tf": 1}, "sqlglot.dataframe.sql.Column.__init__": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_col": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_cols": {"tf": 1}, "sqlglot.dataframe.sql.Column.invoke_anonymous_function": {"tf": 1}, "sqlglot.dataframe.sql.Column.invoke_expression_over_column": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.inverse_binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.unary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_literal": {"tf": 1}, "sqlglot.dataframe.sql.Column.copy": {"tf": 1}, "sqlglot.dataframe.sql.Column.set_table_name": {"tf": 1}, "sqlglot.dataframe.sql.Column.sql": {"tf": 1}, "sqlglot.dataframe.sql.Column.alias": {"tf": 1}, "sqlglot.dataframe.sql.Column.asc": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc": {"tf": 1}, "sqlglot.dataframe.sql.Column.asc_nulls_first": {"tf": 1}, "sqlglot.dataframe.sql.Column.asc_nulls_last": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc_nulls_first": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc_nulls_last": {"tf": 1}, "sqlglot.dataframe.sql.Column.when": {"tf": 1}, "sqlglot.dataframe.sql.Column.otherwise": {"tf": 1}, "sqlglot.dataframe.sql.Column.isNull": {"tf": 1}, "sqlglot.dataframe.sql.Column.isNotNull": {"tf": 1}, "sqlglot.dataframe.sql.Column.cast": {"tf": 1}, "sqlglot.dataframe.sql.Column.startswith": {"tf": 1}, "sqlglot.dataframe.sql.Column.endswith": {"tf": 1}, "sqlglot.dataframe.sql.Column.rlike": {"tf": 1}, "sqlglot.dataframe.sql.Column.like": {"tf": 1}, "sqlglot.dataframe.sql.Column.ilike": {"tf": 1}, "sqlglot.dataframe.sql.Column.substr": {"tf": 1}, "sqlglot.dataframe.sql.Column.isin": {"tf": 1}, "sqlglot.dataframe.sql.Column.between": {"tf": 1}, "sqlglot.dataframe.sql.Column.over": {"tf": 1}, "sqlglot.expressions.Column": {"tf": 1}, "sqlglot.expressions.Column.output_name": {"tf": 1}, "sqlglot.expressions.to_column": {"tf": 1}, "sqlglot.expressions.column": {"tf": 1}, "sqlglot.expressions.column_table_names": {"tf": 1}, "sqlglot.generator.Generator.column_sql": {"tf": 1}, "sqlglot.schema.Schema.column_names": {"tf": 1}, "sqlglot.schema.Schema.get_column_type": {"tf": 1}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1}, "sqlglot.schema.ensure_column_mapping": {"tf": 1}, "sqlglot.tokens.TokenType.COLUMN": {"tf": 1}, "sqlglot.tokens.TokenType.COLUMN_DEF": {"tf": 1}}, "df": 47, "s": {"docs": {"sqlglot.executor.context.Context.add_columns": {"tf": 1}, "sqlglot.executor.table.Table.add_columns": {"tf": 1}, "sqlglot.optimizer.qualify_columns": {"tf": 1}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify_columns.validate_qualify_columns": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.external_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.unqualified_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.source_columns": {"tf": 1}}, "df": 9}, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.expressions.ColumnDef": {"tf": 1}, "sqlglot.generator.Generator.columndef_sql": {"tf": 1}}, "df": 2}}}, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.ColumnConstraint": {"tf": 1}, "sqlglot.generator.Generator.columnconstraint_sql": {"tf": 1}}, "df": 2, "k": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.ColumnConstraintKind": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}, "s": {"docs": {"sqlglot.dataframe.sql.Column.ensure_cols": {"tf": 1}}, "df": 1}, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Collate": {"tf": 1}, "sqlglot.generator.Generator.collate_sql": {"tf": 1}, "sqlglot.tokens.TokenType.COLLATE": {"tf": 1}}, "df": 3, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.CollateColumnConstraint": {"tf": 1}, "sqlglot.generator.Generator.collatecolumnconstraint_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}}}, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.CollateProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.helper.ensure_collection": {"tf": 1}}, "df": 1}}}}}}}, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.tokens.TokenType.COLON": {"tf": 1}}, "df": 1}}}, "m": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator.commit_sql": {"tf": 1}, "sqlglot.expressions.Commit": {"tf": 1}, "sqlglot.generator.Generator.commit_sql": {"tf": 1}, "sqlglot.tokens.TokenType.COMMIT": {"tf": 1}}, "df": 4}}, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.generator.Generator.pad_comment": {"tf": 1}, "sqlglot.generator.Generator.maybe_comment": {"tf": 1}, "sqlglot.tokens.TokenType.COMMENT": {"tf": 1}, "sqlglot.tokens.TokenType.SCHEMA_COMMENT": {"tf": 1}}, "df": 4, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.CommentColumnConstraint": {"tf": 1}, "sqlglot.generator.Generator.commentcolumnconstraint_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}}}}}}, "a": {"docs": {"sqlglot.tokens.TokenType.COMMA": {"tf": 1}}, "df": 1, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.Command": {"tf": 1}, "sqlglot.generator.Generator.command_sql": {"tf": 1}, "sqlglot.tokens.TokenType.COMMAND": {"tf": 1}}, "df": 3}}}}, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.optimizer.simplify.remove_compliments": {"tf": 1}}, "df": 1}}}}}}, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.optimizer.simplify.is_complement": {"tf": 1}}, "df": 1}}}}}}, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.tokens.TokenType.COMPOUND": {"tf": 1}}, "df": 1}}}}}}, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.errors.concat_messages": {"tf": 1}, "sqlglot.expressions.Concat": {"tf": 1}, "sqlglot.generator.Generator.concat_sql": {"tf": 1}, "sqlglot.optimizer.canonicalize.add_text_to_concat": {"tf": 1}}, "df": 4, "w": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.ConcatWs": {"tf": 1}}, "df": 1}}}}}, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.executor.context": {"tf": 1}, "sqlglot.executor.context.Context": {"tf": 1.4142135623730951}, "sqlglot.executor.context.Context.__init__": {"tf": 1.4142135623730951}, "sqlglot.executor.context.Context.eval": {"tf": 1.4142135623730951}, "sqlglot.executor.context.Context.eval_tuple": {"tf": 1.4142135623730951}, "sqlglot.executor.context.Context.add_columns": {"tf": 1.4142135623730951}, "sqlglot.executor.context.Context.table_iter": {"tf": 1.4142135623730951}, "sqlglot.executor.context.Context.filter": {"tf": 1.4142135623730951}, "sqlglot.executor.context.Context.sort": {"tf": 1.4142135623730951}, "sqlglot.executor.context.Context.set_row": {"tf": 1.4142135623730951}, "sqlglot.executor.context.Context.set_index": {"tf": 1.4142135623730951}, "sqlglot.executor.context.Context.set_range": {"tf": 1.4142135623730951}, "sqlglot.executor.python.PythonExecutor.context": {"tf": 1}}, "df": 13}}}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.Condition": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Condition.not_": {"tf": 1}, "sqlglot.expressions.condition": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.join_condition": {"tf": 1}}, "df": 6}}}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.Constraint": {"tf": 1}, "sqlglot.generator.Generator.constraint_sql": {"tf": 1}, "sqlglot.tokens.TokenType.CONSTRAINT": {"tf": 1}}, "df": 3}}}}}}}, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Connector": {"tf": 1}, "sqlglot.generator.Generator.connector_sql": {"tf": 1}}, "df": 2, "s": {"docs": {"sqlglot.optimizer.simplify.simplify_connectors": {"tf": 1}}, "df": 1}}}}}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.convert": {"tf": 1}}, "df": 1}}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.canonicalize.coerce_type": {"tf": 1}}, "df": 1}}}}, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.optimizer.scope.Scope.is_correlated_subquery": {"tf": 1}}, "df": 1}}}}}}}}}, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.DataFrame.cache": {"tf": 1}, "sqlglot.expressions.Cache": {"tf": 1}, "sqlglot.generator.Generator.cache_sql": {"tf": 1}, "sqlglot.optimizer.scope.Scope.clear_cache": {"tf": 1}, "sqlglot.tokens.TokenType.CACHE": {"tf": 1}}, "df": 5}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe.sql.Column.cast": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator.cast_sql": {"tf": 1}, "sqlglot.executor.env.cast": {"tf": 1}, "sqlglot.expressions.Cast": {"tf": 1}, "sqlglot.expressions.Cast.output_name": {"tf": 1}, "sqlglot.expressions.Cast.is_type": {"tf": 1}, "sqlglot.expressions.cast": {"tf": 1}, "sqlglot.generator.Generator.cast_sql": {"tf": 1}}, "df": 8, "s": {"docs": {"sqlglot.optimizer.canonicalize.remove_redundant_casts": {"tf": 1}}, "df": 1}}, "e": {"docs": {"sqlglot.expressions.Case": {"tf": 1}, "sqlglot.generator.Generator.case_sql": {"tf": 1}, "sqlglot.helper.camel_to_snake_case": {"tf": 1}, "sqlglot.tokens.TokenType.CASE": {"tf": 1}}, "df": 4}, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.CASCADE": {"tf": 1}}, "df": 1}}}}}, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.helper.camel_to_snake_case": {"tf": 1}}, "df": 1}}}, "n": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.canonicalize": {"tf": 1}, "sqlglot.optimizer.canonicalize.canonicalize": {"tf": 1.4142135623730951}, "sqlglot.optimizer.canonicalize.add_text_to_concat": {"tf": 1}, "sqlglot.optimizer.canonicalize.coerce_type": {"tf": 1}, "sqlglot.optimizer.canonicalize.remove_redundant_casts": {"tf": 1}}, "df": 5}}}}}}}}}}, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.CARET": {"tf": 1}}, "df": 1}}}}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.clickhouse": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.__init__": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Tokenizer": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.cte_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.CLICKHOUSE": {"tf": 1}}, "df": 8}}}}}}}}, "u": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Cluster": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.generator.Generator.cluster_sql": {"tf": 1}, "sqlglot.tokens.TokenType.CLUSTER_BY": {"tf": 1}}, "df": 4}}}}}, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.optimizer.scope.Scope.clear_cache": {"tf": 1}}, "df": 1}}}}, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.clickhouse.ClickHouse.Generator.cte_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_recursive_cte_sql": {"tf": 1}, "sqlglot.expressions.CTE": {"tf": 1}, "sqlglot.generator.Generator.cte_sql": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.CTE": {"tf": 1}, "sqlglot.optimizer.scope.Scope.cte_sources": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_cte": {"tf": 1}}, "df": 7, "s": {"docs": {"sqlglot.generator.Generator.prepend_ctes": {"tf": 1}, "sqlglot.optimizer.eliminate_ctes": {"tf": 1}, "sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"tf": 1.4142135623730951}, "sqlglot.optimizer.merge_subqueries.merge_ctes": {"tf": 1}, "sqlglot.optimizer.scope.Scope.ctes": {"tf": 1}}, "df": 5}}, "a": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.Select.ctas": {"tf": 1}}, "df": 1}}}, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.dialect.no_paren_current_date_sql": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_DATE": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_DATETIME": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_ROW": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_TIME": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_TIMESTAMP": {"tf": 1}}, "df": 6, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.CurrentDate": {"tf": 1}, "sqlglot.generator.Generator.currentdate_sql": {"tf": 1}}, "df": 2, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.CurrentDatetime": {"tf": 1}}, "df": 1}}}}}}}}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.CurrentTime": {"tf": 1}}, "df": 1, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.expressions.CurrentTimestamp": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}, "b": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.CUBE": {"tf": 1}}, "df": 1}}}, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.diff.ChangeDistiller": {"tf": 1}, "sqlglot.diff.ChangeDistiller.__init__": {"tf": 1}, "sqlglot.diff.ChangeDistiller.diff": {"tf": 1}}, "df": 3}}}}}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.helper.while_changing": {"tf": 1}}, "df": 1}}}}}, "r": {"docs": {"sqlglot.expressions.DataType.Type.CHAR": {"tf": 1}, "sqlglot.tokens.TokenType.CHAR": {"tf": 1}}, "df": 2, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.tokens.TokenType.CHARACTER_SET": {"tf": 1}}, "df": 1, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.CharacterSet": {"tf": 1}, "sqlglot.generator.Generator.characterset_sql": {"tf": 1}}, "df": 2, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.CharacterSetProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.expressions.Check": {"tf": 1}, "sqlglot.generator.Generator.check_sql": {"tf": 1}, "sqlglot.parser.Parser.check_errors": {"tf": 1}, "sqlglot.tokens.TokenType.CHECK": {"tf": 1}}, "df": 4, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.CheckColumnConstraint": {"tf": 1}, "sqlglot.generator.Generator.checkcolumnconstraint_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}}}, "s": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.ChecksumProperty": {"tf": 1}, "sqlglot.generator.Generator.checksumproperty_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.replace_children": {"tf": 1}}, "df": 1}}}}}}}, "s": {"docs": {}, "df": 0, "v": {"docs": {"sqlglot.executor.python.PythonExecutor.scan_csv": {"tf": 1}, "sqlglot.helper.csv": {"tf": 1}, "sqlglot.helper.csv_reader": {"tf": 1}}, "df": 3}}, "e": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.Ceil": {"tf": 1}}, "df": 1}}}, "n": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.optimizer.pushdown_predicates.pushdown_cnf": {"tf": 1}}, "df": 1}}}, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dataframe.sql.DataFrame.alias": {"tf": 1}, "sqlglot.dataframe.sql.Column.alias": {"tf": 1}, "sqlglot.expressions.Expression.alias": {"tf": 1}, "sqlglot.expressions.Alias": {"tf": 1}, "sqlglot.expressions.Alias.output_name": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.generator.Generator.alias_sql": {"tf": 1}, "sqlglot.tokens.TokenType.ALIAS": {"tf": 1}}, "df": 8, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.Aliases": {"tf": 1}, "sqlglot.generator.Generator.aliases_sql": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.replace_aliases": {"tf": 1}}, "df": 3}}}}}, "l": {"docs": {"sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.expressions.All": {"tf": 1}, "sqlglot.generator.Generator.all_sql": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.tokens.TokenType.ALL": {"tf": 1}}, "df": 5}, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.tokens.TokenType.ALTER": {"tf": 1}}, "df": 1, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.AlterColumn": {"tf": 1}, "sqlglot.generator.Generator.altercolumn_sql": {"tf": 1}}, "df": 2}}}}}}, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.AlterTable": {"tf": 1}, "sqlglot.generator.Generator.altertable_sql": {"tf": 1}}, "df": 2}}}}}}}}, "g": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.AlgorithmProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}, "w": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.optimizer.simplify.always_true": {"tf": 1}, "sqlglot.tokens.TokenType.ALWAYS": {"tf": 1}}, "df": 2}}}}}, "g": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.dataframe.sql.DataFrame.agg": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.agg": {"tf": 1}}, "df": 2, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor.python.PythonExecutor.aggregate": {"tf": 1}, "sqlglot.planner.Aggregate": {"tf": 1}, "sqlglot.planner.Aggregate.__init__": {"tf": 1}}, "df": 3}}}}}}, "f": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.expressions.AggFunc": {"tf": 1}}, "df": 1}}}}}}, "v": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.dataframe.sql.GroupedData.avg": {"tf": 1}, "sqlglot.expressions.Avg": {"tf": 1}}, "df": 2}}, "n": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dataframe.sql.Column.invoke_anonymous_function": {"tf": 1}, "sqlglot.expressions.Anonymous": {"tf": 1}, "sqlglot.generator.Generator.anonymous_sql": {"tf": 1}}, "df": 3}}}}}}}, "y": {"docs": {"sqlglot.executor.env.null_if_any": {"tf": 1}, "sqlglot.expressions.Any": {"tf": 1}, "sqlglot.generator.Generator.any_sql": {"tf": 1}, "sqlglot.tokens.TokenType.ANY": {"tf": 1}}, "df": 4, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.AnyValue": {"tf": 1}}, "df": 1}}}}}}, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Expression.find_ancestor": {"tf": 1}}, "df": 1}}}}}}, "d": {"docs": {"sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.And": {"tf": 1}, "sqlglot.expressions.and_": {"tf": 1}, "sqlglot.generator.Generator.and_sql": {"tf": 1}, "sqlglot.optimizer.simplify.absorb_and_eliminate": {"tf": 1}, "sqlglot.tokens.TokenType.AND": {"tf": 1}}, "df": 6}, "n": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.annotate_types": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1.4142135623730951}, "sqlglot.optimizer.annotate_types.TypeAnnotator": {"tf": 1}, "sqlglot.optimizer.annotate_types.TypeAnnotator.__init__": {"tf": 1}, "sqlglot.optimizer.annotate_types.TypeAnnotator.annotate": {"tf": 1.4142135623730951}}, "df": 5}}}}}}, "t": {"docs": {}, "df": 0, "i": {"docs": {"sqlglot.tokens.TokenType.ANTI": {"tf": 1}}, "df": 1}}}, "s": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.dataframe.sql.Column.asc": {"tf": 1}, "sqlglot.dataframe.sql.Column.asc_nulls_first": {"tf": 1}, "sqlglot.dataframe.sql.Column.asc_nulls_last": {"tf": 1}, "sqlglot.tokens.TokenType.ASC": {"tf": 1}}, "df": 4}, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.Expression.assert_is": {"tf": 1}}, "df": 1}}}}, "o": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.tokens.TokenType.ASOF": {"tf": 1}}, "df": 1}}}, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator.array_sql": {"tf": 1}, "sqlglot.dialects.dialect.inline_array_sql": {"tf": 1}, "sqlglot.expressions.DataType.Type.ARRAY": {"tf": 1}, "sqlglot.expressions.Array": {"tf": 1}, "sqlglot.tokens.TokenType.ARRAY": {"tf": 1}}, "df": 5, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.expressions.ArrayAgg": {"tf": 1}}, "df": 1}}, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.ArrayAll": {"tf": 1}}, "df": 1}}, "n": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.ArrayAny": {"tf": 1}}, "df": 1}}}, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.ArrayConcat": {"tf": 1}}, "df": 1}}}, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.ArrayContains": {"tf": 1}}, "df": 1}}}}}}}}, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.ArrayFilter": {"tf": 1}}, "df": 1}}}}}}, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.ArraySize": {"tf": 1}}, "df": 1}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.ArraySort": {"tf": 1}}, "df": 1}}}, "u": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot.expressions.ArraySum": {"tf": 1}}, "df": 1}}}, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.expressions.ArrayUnionAgg": {"tf": 1}}, "df": 1}}}}}}}}}}, "o": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot.dialects.dialect.arrow_json_extract_sql": {"tf": 1}, "sqlglot.dialects.dialect.arrow_json_extract_scalar_sql": {"tf": 1}, "sqlglot.tokens.TokenType.ARROW": {"tf": 1}, "sqlglot.tokens.TokenType.HASH_ARROW": {"tf": 1}, "sqlglot.tokens.TokenType.DHASH_ARROW": {"tf": 1}, "sqlglot.tokens.TokenType.LR_ARROW": {"tf": 1}}, "df": 6}}}, "g": {"docs": {"sqlglot.expressions.Func.from_arg_list": {"tf": 1}}, "df": 1, "s": {"docs": {"sqlglot.generator.Generator.format_args": {"tf": 1}, "sqlglot.schema.Schema.supported_table_args": {"tf": 1}}, "df": 2}}}, "p": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "x": {"docs": {"sqlglot.dialects.dialect.approx_count_distinct_sql": {"tf": 1}}, "df": 1, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.ApproxDistinct": {"tf": 1}}, "df": 1}}}}}}}}, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.ApproxQuantile": {"tf": 1}}, "df": 1}}}}}}}}}}}, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.executor.table.Table.append": {"tf": 1}, "sqlglot.expressions.Expression.append": {"tf": 1}}, "df": 2}}}, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.helper.apply_index_offset": {"tf": 1}, "sqlglot.tokens.TokenType.APPLY": {"tf": 1}}, "df": 2}}}}, "d": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.executor.context.Context.add_columns": {"tf": 1}, "sqlglot.executor.table.Table.add_columns": {"tf": 1}, "sqlglot.expressions.Add": {"tf": 1}, "sqlglot.generator.Generator.add_sql": {"tf": 1}, "sqlglot.optimizer.canonicalize.add_text_to_concat": {"tf": 1}, "sqlglot.optimizer.scope.Scope.add_source": {"tf": 1}, "sqlglot.planner.Step.add_dependency": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}}, "df": 9, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.AddConstraint": {"tf": 1}, "sqlglot.generator.Generator.addconstraint_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {"sqlglot.tokens.TokenType.AUTO_INCREMENT": {"tf": 1}}, "df": 1, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.AutoIncrementColumnConstraint": {"tf": 1}, "sqlglot.generator.Generator.autoincrementcolumnconstraint_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}}}, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.AutoIncrementProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.helper.AutoName": {"tf": 1}}, "df": 1}}}}}}}, "f": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "j": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.AfterJournalProperty": {"tf": 1}, "sqlglot.generator.Generator.afterjournalproperty_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}}}}}}, "t": {"docs": {"sqlglot.tokens.TokenType.AT_TIME_ZONE": {"tf": 1}}, "df": 1, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.AtTimeZone": {"tf": 1}, "sqlglot.generator.Generator.attimezone_sql": {"tf": 1}}, "df": 2}}}}}}}}}, "b": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.Abs": {"tf": 1}}, "df": 1, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "b": {"docs": {"sqlglot.optimizer.simplify.absorb_and_eliminate": {"tf": 1}}, "df": 1}}}, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.schema.AbstractMappingSchema": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.__init__": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.table_parts": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.find": {"tf": 1}}, "df": 4}}}}}}}}}}}}}}}}}}}}, "m": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.tokens.TokenType.AMP": {"tf": 1}}, "df": 1}}, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.tokens.TokenType.NO_ACTION": {"tf": 1}}, "df": 1}}}}}}, "w": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.DataFrame.where": {"tf": 1}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Where": {"tf": 1}, "sqlglot.generator.Generator.where_sql": {"tf": 1}, "sqlglot.optimizer.simplify.remove_where_true": {"tf": 1}, "sqlglot.tokens.TokenType.WHERE": {"tf": 1}}, "df": 6}}, "n": {"docs": {"sqlglot.dataframe.sql.Column.when": {"tf": 1}, "sqlglot.expressions.When": {"tf": 1}, "sqlglot.generator.Generator.when_sql": {"tf": 1}, "sqlglot.tokens.TokenType.WHEN": {"tf": 1}}, "df": 4}}, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.helper.while_changing": {"tf": 1}}, "df": 1}}}}, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.with_properties": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.with_properties": {"tf": 1}, "sqlglot.dialects.tsql.generate_date_delta_with_unit_sql": {"tf": 1}, "sqlglot.expressions.With": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_SCHEMA_WITH": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.generator.Generator.with_sql": {"tf": 1}, "sqlglot.generator.Generator.with_properties": {"tf": 1}, "sqlglot.tokens.TokenType.WITH": {"tf": 1}, "sqlglot.tokens.TokenType.WITH_TIME_ZONE": {"tf": 1}, "sqlglot.tokens.TokenType.WITH_LOCAL_TIME_ZONE": {"tf": 1}}, "df": 12, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe.sql.DataFrame.withColumn": {"tf": 1}}, "df": 1, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.dataframe.sql.DataFrame.withColumnRenamed": {"tf": 1}}, "df": 1}}}}}}}}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.tokens.TokenType.WITHIN_GROUP": {"tf": 1}}, "df": 1, "g": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.expressions.WithinGroup": {"tf": 1}, "sqlglot.generator.Generator.withingroup_sql": {"tf": 1}}, "df": 2}}}}}}}, "j": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.WithJournalTableProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}}}, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.WITHOUT_TIME_ZONE": {"tf": 1}}, "df": 1}}}}}, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot.dataframe.sql.Window": {"tf": 1}, "sqlglot.dataframe.sql.Window.__init__": {"tf": 1}, "sqlglot.dataframe.sql.Window.partitionBy": {"tf": 1}, "sqlglot.dataframe.sql.Window.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.Window.rowsBetween": {"tf": 1}, "sqlglot.dataframe.sql.Window.rangeBetween": {"tf": 1}, "sqlglot.expressions.Select.window": {"tf": 1}, "sqlglot.expressions.Window": {"tf": 1}, "sqlglot.generator.Generator.window_sql": {"tf": 1}, "sqlglot.generator.Generator.window_spec_sql": {"tf": 1}, "sqlglot.tokens.TokenType.WINDOW": {"tf": 1}}, "df": 11, "s": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.dataframe.sql.WindowSpec": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.__init__": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.copy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.sql": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.partitionBy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.rowsBetween": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.rangeBetween": {"tf": 1}, "sqlglot.expressions.WindowSpec": {"tf": 1}}, "df": 9}}}}}}}}, "d": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.generator.Generator.text_width": {"tf": 1}}, "df": 1}}}}, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.errors.ErrorLevel.WARN": {"tf": 1}}, "df": 1}}, "l": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.expressions.Expression.walk": {"tf": 1}, "sqlglot.lineage.Node.walk": {"tf": 1}, "sqlglot.optimizer.scope.Scope.walk": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}}, "df": 4}}}, "e": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.expressions.Week": {"tf": 1}}, "df": 1, "o": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.WeekOfYear": {"tf": 1}}, "df": 1}}}}}}}}}, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.generator.Generator.wrap": {"tf": 1}}, "df": 1}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.helper.split_num_words": {"tf": 1}}, "df": 1}}}}}, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dataframe.sql.DataFrame.filter": {"tf": 1}, "sqlglot.executor.context.Context.filter": {"tf": 1}, "sqlglot.executor.env.filter_nulls": {"tf": 1}, "sqlglot.expressions.Filter": {"tf": 1}, "sqlglot.generator.Generator.filter_sql": {"tf": 1}, "sqlglot.tokens.TokenType.FILTER": {"tf": 1}}, "df": 6}}}, "l": {"docs": {"sqlglot.dataframe.sql.DataFrameNaFunctions.fill": {"tf": 1}}, "df": 1, "n": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}}, "df": 1}}}, "e": {"docs": {"sqlglot.helper.open_file": {"tf": 1}}, "df": 1, "f": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.FileFormatProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe.sql.Column.asc_nulls_first": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc_nulls_first": {"tf": 1}, "sqlglot.helper.first": {"tf": 1}, "sqlglot.tokens.TokenType.FIRST": {"tf": 1}, "sqlglot.tokens.TokenType.NULLS_FIRST": {"tf": 1}}, "df": 5}}}, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.Expression.find": {"tf": 1}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.expressions.Expression.find_ancestor": {"tf": 1}, "sqlglot.helper.find_new_name": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.find": {"tf": 1}}, "df": 7}, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.Final": {"tf": 1}, "sqlglot.tokens.TokenType.FINAL": {"tf": 1}}, "df": 2}}}}, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.dialects.dialect.rename_func": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator.normalize_func": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1}, "sqlglot.expressions.Func.from_arg_list": {"tf": 1}, "sqlglot.expressions.Func.sql_names": {"tf": 1}, "sqlglot.expressions.Func.sql_name": {"tf": 1}, "sqlglot.expressions.Func.default_parser_mappings": {"tf": 1}, "sqlglot.expressions.func": {"tf": 1}, "sqlglot.generator.Generator.normalize_func": {"tf": 1}}, "df": 9, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe.sql.Column.invoke_anonymous_function": {"tf": 1}, "sqlglot.generator.Generator.function_fallback_sql": {"tf": 1}, "sqlglot.tokens.TokenType.FUNCTION": {"tf": 1}}, "df": 3}}}}}}, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.tokens.TokenType.FULL": {"tf": 1}}, "df": 1}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.optimizer.pushdown_predicates.nodes_for_predicate": {"tf": 1}, "sqlglot.tokens.TokenType.FOR": {"tf": 1}}, "df": 2, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.dialect.Dialect.format_time": {"tf": 1}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}, "sqlglot.generator.Generator.format_args": {"tf": 1}, "sqlglot.generator.Generator.format_time": {"tf": 1}, "sqlglot.time.format_time": {"tf": 1}, "sqlglot.tokens.TokenType.FORMAT": {"tf": 1}}, "df": 6}}}, "e": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.tokens.TokenType.FOREIGN_KEY": {"tf": 1}}, "df": 1, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.ForeignKey": {"tf": 1}, "sqlglot.generator.Generator.foreignkey_sql": {"tf": 1}}, "df": 2}}}}}}}}, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.tokens.TokenType.FOLLOWING": {"tf": 1}}, "df": 1}}}}}}}}, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.Expression.flatten": {"tf": 1}, "sqlglot.helper.flatten": {"tf": 1}, "sqlglot.optimizer.simplify.flatten": {"tf": 1}, "sqlglot.schema.flatten_schema": {"tf": 1}}, "df": 4}}}}}, "o": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.DataType.Type.FLOAT": {"tf": 1}, "sqlglot.tokens.TokenType.FLOAT": {"tf": 1}}, "df": 2}}, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Floor": {"tf": 1}}, "df": 1}}}}, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot.expressions.From": {"tf": 1}, "sqlglot.expressions.Properties.from_dict": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Func.from_arg_list": {"tf": 1}, "sqlglot.expressions.from_": {"tf": 1}, "sqlglot.generator.Generator.from_sql": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.Join.from_joins": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}, "sqlglot.schema.MappingSchema.from_mapping_schema": {"tf": 1}, "sqlglot.tokens.TokenType.DISTINCT_FROM": {"tf": 1}, "sqlglot.tokens.TokenType.FROM": {"tf": 1}}, "df": 13}}, "e": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.FreespaceProperty": {"tf": 1}, "sqlglot.generator.Generator.freespaceproperty_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}}}, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.expressions.Fetch": {"tf": 1}, "sqlglot.generator.Generator.fetch_sql": {"tf": 1}, "sqlglot.tokens.TokenType.FETCH": {"tf": 1}}, "df": 3}}}}, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.generator.Generator.function_fallback_sql": {"tf": 1}}, "df": 1, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.FallbackProperty": {"tf": 1}, "sqlglot.generator.Generator.fallbackproperty_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.false": {"tf": 1}, "sqlglot.tokens.TokenType.FALSE": {"tf": 1}}, "df": 2}}}, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot.tokens.TokenType.FARROW": {"tf": 1}}, "df": 1}}}}}}, "g": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.expressions.Group": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.generator.Generator.group_sql": {"tf": 1}, "sqlglot.tokens.TokenType.GROUP_BY": {"tf": 1}, "sqlglot.tokens.TokenType.WITHIN_GROUP": {"tf": 1}, "sqlglot.transforms.unalias_group": {"tf": 1}}, "df": 6, "b": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dataframe.sql.DataFrame.groupBy": {"tf": 1}}, "df": 1}}, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.dataframe.sql.GroupedData": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.__init__": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.agg": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.count": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.mean": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.avg": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.max": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.min": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.sum": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.pivot": {"tf": 1}}, "df": 10}}}}}}, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.GroupConcat": {"tf": 1}}, "df": 1}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.tokens.TokenType.GROUPING_SETS": {"tf": 1}}, "df": 1}}}}}}, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.Greatest": {"tf": 1}}, "df": 1}}}}}}}, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.array_sql": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.transaction_sql": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.commit_sql": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.rollback_sql": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.in_unnest_op": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.except_op": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.intersect_op": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.cte_sql": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator.normalize_func": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.with_properties": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator.show_sql": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator.setitem_sql": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator.set_sql": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator.query_modifiers": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator.offset_sql": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator.table_sql": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator.transaction_sql": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.with_properties": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.renametable_sql": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.except_op": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.intersect_op": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.select_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.describe_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.generatedasidentitycolumnconstraint_sql": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator.cast_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.transaction_sql": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.partitionedbyproperty_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.update_sql": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator.systemtime_sql": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator.returnsproperty_sql": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.__init__": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.generate": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.unsupported": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.sep": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.seg": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.pad_comment": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.maybe_comment": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.wrap": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.no_identify": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.normalize_func": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.indent": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.uncache_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.cache_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.characterset_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.column_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.columndef_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.columnconstraint_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.autoincrementcolumnconstraint_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.checkcolumnconstraint_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.commentcolumnconstraint_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.collatecolumnconstraint_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.encodecolumnconstraint_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.defaultcolumnconstraint_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.generatedasidentitycolumnconstraint_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.notnullcolumnconstraint_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.primarykeycolumnconstraint_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.uniquecolumnconstraint_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.create_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.describe_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.prepend_ctes": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.with_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.cte_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.tablealias_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.bitstring_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.hexstring_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.datatype_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.directory_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.delete_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.drop_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.except_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.except_op": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.fetch_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.filter_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.hint_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.index_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.identifier_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.national_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.partition_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.properties_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.root_properties": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.properties": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.with_properties": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.locate_properties": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.property_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.likeproperty_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.fallbackproperty_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.journalproperty_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.freespaceproperty_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.afterjournalproperty_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.checksumproperty_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.mergeblockratioproperty_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.datablocksizeproperty_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.blockcompressionproperty_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.isolatedloadingproperty_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.insert_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.intersect_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.intersect_op": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.introducer_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.pseudotype_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.rowformatdelimitedproperty_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.table_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.tablesample_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.pivot_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.tuple_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.update_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.values_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.var_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.into_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.from_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.group_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.having_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.join_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.lambda_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.lateral_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.limit_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.offset_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.lock_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.literal_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.loaddata_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.null_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.boolean_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.order_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.cluster_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.distribute_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.sort_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.ordered_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.matchrecognize_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.query_modifiers": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.select_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.schema_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.star_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.structkwarg_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.parameter_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.sessionparameter_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.placeholder_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.subquery_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.qualify_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.union_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.union_op": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.unnest_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.where_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.window_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.partition_by_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.window_spec_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.withingroup_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.between_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.bracket_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.all_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.any_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.exists_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.case_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.constraint_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.extract_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.trim_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.concat_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.check_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.foreignkey_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.primarykey_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.unique_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.if_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.in_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.in_unnest_op": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.interval_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.return_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.reference_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.anonymous_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.paren_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.neg_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.not_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.alias_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.aliases_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.attimezone_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.add_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.and_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.connector_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.bitwiseand_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.bitwiseleftshift_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.bitwisenot_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.bitwiseor_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.bitwiserightshift_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.bitwisexor_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.cast_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.currentdate_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.collate_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.command_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.transaction_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.commit_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.rollback_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.altercolumn_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.renametable_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.altertable_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.droppartition_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.addconstraint_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.distinct_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.ignorenulls_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.respectnulls_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.intdiv_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.dpipe_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.div_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.distance_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.dot_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.eq_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.escape_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.glob_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.gt_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.gte_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.ilike_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.is_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.like_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.similarto_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.lt_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.lte_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.mod_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.mul_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.neq_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.nullsafeeq_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.nullsafeneq_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.or_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.slice_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.sub_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.trycast_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.use_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.binary": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.function_fallback_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.format_args": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.text_width": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.format_time": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.expressions": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.op_expressions": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.naked_property": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.set_operation": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.tag_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.token_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.userdefinedfunction_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.userdefinedfunctionkwarg_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.joinhint_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.kwarg_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.when_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.merge_sql": {"tf": 1.4142135623730951}}, "df": 267}}, "e": {"docs": {"sqlglot.dialects.dialect.Dialect.generate": {"tf": 1}, "sqlglot.dialects.tsql.generate_date_delta_with_unit_sql": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.generate": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.generate_tuple": {"tf": 1}, "sqlglot.generator.Generator.generate": {"tf": 1}}, "df": 5, "d": {"docs": {"sqlglot.tokens.TokenType.GENERATED": {"tf": 1}}, "df": 1, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.snowflake.Snowflake.Generator.generatedasidentitycolumnconstraint_sql": {"tf": 1}, "sqlglot.expressions.GeneratedAsIdentityColumnConstraint": {"tf": 1}, "sqlglot.generator.Generator.generatedasidentitycolumnconstraint_sql": {"tf": 1}}, "df": 3}}}}}}}}}}}}}}}}}}}}}}}}}}}, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.GenerateSeries": {"tf": 1}}, "df": 1}}}}}}}}}}}}, "t": {"docs": {"sqlglot.dialects.dialect.Dialect.get_or_raise": {"tf": 1}, "sqlglot.helper.seq_get": {"tf": 1}, "sqlglot.schema.Schema.get_column_type": {"tf": 1}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1}}, "df": 4}, "o": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.DataType.Type.GEOGRAPHY": {"tf": 1}, "sqlglot.tokens.TokenType.GEOGRAPHY": {"tf": 1}}, "df": 2}}}}}}, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.DataType.Type.GEOMETRY": {"tf": 1}, "sqlglot.tokens.TokenType.GEOMETRY": {"tf": 1}}, "df": 2}}}}}}}, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "b": {"docs": {"sqlglot.expressions.Glob": {"tf": 1}, "sqlglot.generator.Generator.glob_sql": {"tf": 1}, "sqlglot.tokens.TokenType.GLOB": {"tf": 1}}, "df": 3, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.tokens.TokenType.GLOBAL": {"tf": 1}}, "df": 1}}}}}, "t": {"docs": {"sqlglot.expressions.GT": {"tf": 1}, "sqlglot.generator.Generator.gt_sql": {"tf": 1}, "sqlglot.tokens.TokenType.GT": {"tf": 1}}, "df": 3, "e": {"docs": {"sqlglot.expressions.GTE": {"tf": 1}, "sqlglot.generator.Generator.gte_sql": {"tf": 1}, "sqlglot.tokens.TokenType.GTE": {"tf": 1}}, "df": 3}}}, "j": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe.sql.DataFrame.join": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.join": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.nested_loop_join": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.hash_join": {"tf": 1}, "sqlglot.expressions.Join": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.generator.Generator.join_sql": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.join_condition": {"tf": 1}, "sqlglot.optimizer.scope.Scope.join_hints": {"tf": 1}, "sqlglot.planner.Join": {"tf": 1}, "sqlglot.planner.Join.__init__": {"tf": 1}, "sqlglot.planner.Join.from_joins": {"tf": 1}, "sqlglot.tokens.TokenType.JOIN": {"tf": 1}}, "df": 15, "h": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.JoinHint": {"tf": 1}, "sqlglot.generator.Generator.joinhint_sql": {"tf": 1}}, "df": 2}}}}, "s": {"docs": {"sqlglot.optimizer.eliminate_joins": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1.4142135623730951}, "sqlglot.optimizer.eliminate_joins.join_condition": {"tf": 1}, "sqlglot.optimizer.optimize_joins": {"tf": 1}, "sqlglot.optimizer.optimize_joins.optimize_joins": {"tf": 1.4142135623730951}, "sqlglot.optimizer.optimize_joins.reorder_joins": {"tf": 1.4142135623730951}, "sqlglot.optimizer.optimize_joins.normalize": {"tf": 1}, "sqlglot.optimizer.optimize_joins.other_table_names": {"tf": 1}, "sqlglot.planner.Join.from_joins": {"tf": 1}}, "df": 9}}}, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.JournalProperty": {"tf": 1}, "sqlglot.generator.Generator.journalproperty_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}, "s": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dialects.dialect.arrow_json_extract_sql": {"tf": 1}, "sqlglot.dialects.dialect.arrow_json_extract_scalar_sql": {"tf": 1}, "sqlglot.expressions.DataType.Type.JSON": {"tf": 1}, "sqlglot.tokens.TokenType.JSON": {"tf": 1}}, "df": 4, "b": {"docs": {"sqlglot.expressions.DataType.Type.JSONB": {"tf": 1}, "sqlglot.tokens.TokenType.JSONB": {"tf": 1}}, "df": 2, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.JSONBContains": {"tf": 1}}, "df": 1}}}}}}}}, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.JSONBExtract": {"tf": 1}}, "df": 1, "s": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.JSONBExtractScalar": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.JSONExtract": {"tf": 1}}, "df": 1, "s": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.JSONExtractScalar": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe.sql.DataFrame.union": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Union": {"tf": 1}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.union": {"tf": 1}, "sqlglot.generator.Generator.union_sql": {"tf": 1}, "sqlglot.generator.Generator.union_op": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.UNION": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_union": {"tf": 1}, "sqlglot.tokens.TokenType.UNION": {"tf": 1}}, "df": 10, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dataframe.sql.DataFrame.unionAll": {"tf": 1}}, "df": 1}}, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Unionable": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}}, "df": 4}}}}, "b": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.DataFrame.unionByName": {"tf": 1}}, "df": 1}}}}}}}}, "t": {"docs": {"sqlglot.dialects.tsql.generate_date_delta_with_unit_sql": {"tf": 1}}, "df": 1}, "q": {"docs": {"sqlglot.optimizer.simplify.uniq_sort": {"tf": 1}}, "df": 1, "u": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Unique": {"tf": 1}, "sqlglot.generator.Generator.unique_sql": {"tf": 1}, "sqlglot.tokens.TokenType.UNIQUE": {"tf": 1}}, "df": 3, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.UniqueColumnConstraint": {"tf": 1}, "sqlglot.generator.Generator.uniquecolumnconstraint_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}}}, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.DataType.Type.UNIQUEIDENTIFIER": {"tf": 1}, "sqlglot.tokens.TokenType.UNIQUEIDENTIFIER": {"tf": 1}}, "df": 2}}}}}}}}}}}}}, "x": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.UnixToStr": {"tf": 1}}, "df": 1}}}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.UnixToTime": {"tf": 1}}, "df": 1, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.UnixToTimeStr": {"tf": 1}}, "df": 1}}}}}}}}}}}, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dataframe.sql.Column.unary_op": {"tf": 1}, "sqlglot.expressions.Unary": {"tf": 1}}, "df": 2}}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.Expression.unalias": {"tf": 1}, "sqlglot.transforms.unalias_group": {"tf": 1}}, "df": 2}}}}}, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator.in_unnest_op": {"tf": 1}, "sqlglot.expressions.Expression.unnest": {"tf": 1}, "sqlglot.expressions.Expression.unnest_operands": {"tf": 1}, "sqlglot.expressions.Unnest": {"tf": 1}, "sqlglot.expressions.Subquery.unnest": {"tf": 1}, "sqlglot.generator.Generator.unnest_sql": {"tf": 1}, "sqlglot.generator.Generator.in_unnest_op": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1.4142135623730951}, "sqlglot.optimizer.unnest_subqueries.unnest": {"tf": 1.4142135623730951}, "sqlglot.optimizer.unnest_subqueries.decorrelate": {"tf": 1}, "sqlglot.tokens.TokenType.UNNEST": {"tf": 1}}, "df": 12}}}}, "s": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.Properties.Location.UNSUPPORTED": {"tf": 1}, "sqlglot.generator.Generator.unsupported": {"tf": 1}}, "df": 2, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.errors.UnsupportedError": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Uncache": {"tf": 1}, "sqlglot.generator.Generator.uncache_sql": {"tf": 1}, "sqlglot.tokens.TokenType.UNCACHE": {"tf": 1}}, "df": 3}}}}}, "k": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.DataType.Type.UNKNOWN": {"tf": 1}}, "df": 1}}}}}, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "x": {"docs": {"sqlglot.expressions.Unhex": {"tf": 1}}, "df": 1}}}, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.optimizer.scope.Scope.unqualified_columns": {"tf": 1}}, "df": 1}}}}}}}}}, "b": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.tokens.TokenType.UNBOUNDED": {"tf": 1}}, "df": 1}}}}}}}, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.tokens.TokenType.UNLOGGED": {"tf": 1}}, "df": 1}}}}}}, "p": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.UNPIVOT": {"tf": 1}}, "df": 1}}}}}}, "p": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.teradata.Teradata.Generator.update_sql": {"tf": 1}, "sqlglot.diff.Update": {"tf": 1}, "sqlglot.diff.Update.__init__": {"tf": 1}, "sqlglot.expressions.Update": {"tf": 1}, "sqlglot.expressions.update": {"tf": 1}, "sqlglot.generator.Generator.update_sql": {"tf": 1}, "sqlglot.tokens.TokenType.UPDATE": {"tf": 1}}, "df": 7}}}}, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Upper": {"tf": 1}}, "df": 1}}}}, "d": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.expressions.UDTF": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.UDTF": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_udtf": {"tf": 1}}, "df": 3}}}, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Use": {"tf": 1}, "sqlglot.generator.Generator.use_sql": {"tf": 1}, "sqlglot.tokens.TokenType.USE": {"tf": 1}}, "df": 3, "r": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.UserDefinedFunction": {"tf": 1}, "sqlglot.generator.Generator.userdefinedfunction_sql": {"tf": 1}}, "df": 2, "k": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.expressions.UserDefinedFunctionKwarg": {"tf": 1}, "sqlglot.generator.Generator.userdefinedfunctionkwarg_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}}}}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.tokens.TokenType.USING": {"tf": 1}}, "df": 2}}}}, "u": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.DataType.Type.UUID": {"tf": 1}, "sqlglot.tokens.TokenType.UUID": {"tf": 1}}, "df": 2}}}}, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator.except_op": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.except_op": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.Except": {"tf": 1}, "sqlglot.expressions.except_": {"tf": 1}, "sqlglot.generator.Generator.except_sql": {"tf": 1}, "sqlglot.generator.Generator.except_op": {"tf": 1}, "sqlglot.tokens.TokenType.EXCEPT": {"tf": 1}}, "df": 8, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dataframe.sql.DataFrame.exceptAll": {"tf": 1}}, "df": 1}}}}}}}, "p": {"docs": {"sqlglot.expressions.Exp": {"tf": 1}}, "df": 1, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe.sql.Column.invoke_expression_over_column": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.expressions.Expression.__init__": {"tf": 1}, "sqlglot.expressions.Expression.this": {"tf": 1}, "sqlglot.expressions.Expression.expression": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.expressions": {"tf": 1}, "sqlglot.expressions.Expression.text": {"tf": 1}, "sqlglot.expressions.Expression.is_string": {"tf": 1}, "sqlglot.expressions.Expression.is_number": {"tf": 1}, "sqlglot.expressions.Expression.is_int": {"tf": 1}, "sqlglot.expressions.Expression.alias": {"tf": 1}, "sqlglot.expressions.Expression.output_name": {"tf": 1}, "sqlglot.expressions.Expression.copy": {"tf": 1}, "sqlglot.expressions.Expression.append": {"tf": 1}, "sqlglot.expressions.Expression.set": {"tf": 1}, "sqlglot.expressions.Expression.depth": {"tf": 1}, "sqlglot.expressions.Expression.find": {"tf": 1}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.expressions.Expression.find_ancestor": {"tf": 1}, "sqlglot.expressions.Expression.parent_select": {"tf": 1}, "sqlglot.expressions.Expression.walk": {"tf": 1}, "sqlglot.expressions.Expression.dfs": {"tf": 1}, "sqlglot.expressions.Expression.bfs": {"tf": 1}, "sqlglot.expressions.Expression.unnest": {"tf": 1}, "sqlglot.expressions.Expression.unalias": {"tf": 1}, "sqlglot.expressions.Expression.unnest_operands": {"tf": 1}, "sqlglot.expressions.Expression.flatten": {"tf": 1}, "sqlglot.expressions.Expression.sql": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.expressions.Expression.replace": {"tf": 1}, "sqlglot.expressions.Expression.pop": {"tf": 1}, "sqlglot.expressions.Expression.assert_is": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.expressions.Expression.dump": {"tf": 1}, "sqlglot.expressions.Expression.load": {"tf": 1}, "sqlglot.parser.Parser.expression": {"tf": 1}, "sqlglot.parser.Parser.validate_expression": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}}, "df": 40, "s": {"docs": {"sqlglot.expressions": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.expressions.Expression.__init__": {"tf": 1}, "sqlglot.expressions.Expression.this": {"tf": 1}, "sqlglot.expressions.Expression.expression": {"tf": 1}, "sqlglot.expressions.Expression.expressions": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.text": {"tf": 1}, "sqlglot.expressions.Expression.is_string": {"tf": 1}, "sqlglot.expressions.Expression.is_number": {"tf": 1}, "sqlglot.expressions.Expression.is_int": {"tf": 1}, "sqlglot.expressions.Expression.alias": {"tf": 1}, "sqlglot.expressions.Expression.output_name": {"tf": 1}, "sqlglot.expressions.Expression.copy": {"tf": 1}, "sqlglot.expressions.Expression.append": {"tf": 1}, "sqlglot.expressions.Expression.set": {"tf": 1}, "sqlglot.expressions.Expression.depth": {"tf": 1}, "sqlglot.expressions.Expression.find": {"tf": 1}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.expressions.Expression.find_ancestor": {"tf": 1}, "sqlglot.expressions.Expression.parent_select": {"tf": 1}, "sqlglot.expressions.Expression.walk": {"tf": 1}, "sqlglot.expressions.Expression.dfs": {"tf": 1}, "sqlglot.expressions.Expression.bfs": {"tf": 1}, "sqlglot.expressions.Expression.unnest": {"tf": 1}, "sqlglot.expressions.Expression.unalias": {"tf": 1}, "sqlglot.expressions.Expression.unnest_operands": {"tf": 1}, "sqlglot.expressions.Expression.flatten": {"tf": 1}, "sqlglot.expressions.Expression.sql": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.expressions.Expression.replace": {"tf": 1}, "sqlglot.expressions.Expression.pop": {"tf": 1}, "sqlglot.expressions.Expression.assert_is": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.expressions.Expression.dump": {"tf": 1}, "sqlglot.expressions.Expression.load": {"tf": 1}, "sqlglot.expressions.Condition": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Condition.not_": {"tf": 1}, "sqlglot.expressions.Predicate": {"tf": 1}, "sqlglot.expressions.DerivedTable": {"tf": 1}, "sqlglot.expressions.Unionable": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.UDTF": {"tf": 1}, "sqlglot.expressions.Cache": {"tf": 1}, "sqlglot.expressions.Uncache": {"tf": 1}, "sqlglot.expressions.Create": {"tf": 1}, "sqlglot.expressions.Describe": {"tf": 1}, "sqlglot.expressions.Set": {"tf": 1}, "sqlglot.expressions.SetItem": {"tf": 1}, "sqlglot.expressions.Show": {"tf": 1}, "sqlglot.expressions.UserDefinedFunction": {"tf": 1}, "sqlglot.expressions.UserDefinedFunctionKwarg": {"tf": 1}, "sqlglot.expressions.CharacterSet": {"tf": 1}, "sqlglot.expressions.With": {"tf": 1}, "sqlglot.expressions.WithinGroup": {"tf": 1}, "sqlglot.expressions.CTE": {"tf": 1}, "sqlglot.expressions.TableAlias": {"tf": 1}, "sqlglot.expressions.BitString": {"tf": 1}, "sqlglot.expressions.HexString": {"tf": 1}, "sqlglot.expressions.ByteString": {"tf": 1}, "sqlglot.expressions.Column": {"tf": 1}, "sqlglot.expressions.Column.output_name": {"tf": 1}, "sqlglot.expressions.ColumnDef": {"tf": 1}, "sqlglot.expressions.AlterColumn": {"tf": 1}, "sqlglot.expressions.RenameTable": {"tf": 1}, "sqlglot.expressions.ColumnConstraint": {"tf": 1}, "sqlglot.expressions.ColumnConstraintKind": {"tf": 1}, "sqlglot.expressions.AutoIncrementColumnConstraint": {"tf": 1}, "sqlglot.expressions.CheckColumnConstraint": {"tf": 1}, "sqlglot.expressions.CollateColumnConstraint": {"tf": 1}, "sqlglot.expressions.CommentColumnConstraint": {"tf": 1}, "sqlglot.expressions.DefaultColumnConstraint": {"tf": 1}, "sqlglot.expressions.EncodeColumnConstraint": {"tf": 1}, "sqlglot.expressions.GeneratedAsIdentityColumnConstraint": {"tf": 1}, "sqlglot.expressions.NotNullColumnConstraint": {"tf": 1}, "sqlglot.expressions.PrimaryKeyColumnConstraint": {"tf": 1}, "sqlglot.expressions.UniqueColumnConstraint": {"tf": 1}, "sqlglot.expressions.Constraint": {"tf": 1}, "sqlglot.expressions.Delete": {"tf": 1}, "sqlglot.expressions.Drop": {"tf": 1}, "sqlglot.expressions.Filter": {"tf": 1}, "sqlglot.expressions.Check": {"tf": 1}, "sqlglot.expressions.Directory": {"tf": 1}, "sqlglot.expressions.ForeignKey": {"tf": 1}, "sqlglot.expressions.PrimaryKey": {"tf": 1}, "sqlglot.expressions.Unique": {"tf": 1}, "sqlglot.expressions.Into": {"tf": 1}, "sqlglot.expressions.From": {"tf": 1}, "sqlglot.expressions.Having": {"tf": 1}, "sqlglot.expressions.Hint": {"tf": 1}, "sqlglot.expressions.JoinHint": {"tf": 1}, "sqlglot.expressions.Identifier": {"tf": 1}, "sqlglot.expressions.Identifier.output_name": {"tf": 1}, "sqlglot.expressions.Index": {"tf": 1}, "sqlglot.expressions.Insert": {"tf": 1}, "sqlglot.expressions.Introducer": {"tf": 1}, "sqlglot.expressions.National": {"tf": 1}, "sqlglot.expressions.LoadData": {"tf": 1}, "sqlglot.expressions.Partition": {"tf": 1}, "sqlglot.expressions.Fetch": {"tf": 1}, "sqlglot.expressions.Group": {"tf": 1}, "sqlglot.expressions.Lambda": {"tf": 1}, "sqlglot.expressions.Limit": {"tf": 1}, "sqlglot.expressions.Literal": {"tf": 1}, "sqlglot.expressions.Literal.number": {"tf": 1}, "sqlglot.expressions.Literal.string": {"tf": 1}, "sqlglot.expressions.Literal.output_name": {"tf": 1}, "sqlglot.expressions.Join": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Lateral": {"tf": 1}, "sqlglot.expressions.MatchRecognize": {"tf": 1}, "sqlglot.expressions.Final": {"tf": 1}, "sqlglot.expressions.Offset": {"tf": 1}, "sqlglot.expressions.Order": {"tf": 1}, "sqlglot.expressions.Cluster": {"tf": 1}, "sqlglot.expressions.Distribute": {"tf": 1}, "sqlglot.expressions.Sort": {"tf": 1}, "sqlglot.expressions.Ordered": {"tf": 1}, "sqlglot.expressions.Property": {"tf": 1}, "sqlglot.expressions.AlgorithmProperty": {"tf": 1}, "sqlglot.expressions.DefinerProperty": {"tf": 1}, "sqlglot.expressions.SqlSecurityProperty": {"tf": 1}, "sqlglot.expressions.TableFormatProperty": {"tf": 1}, "sqlglot.expressions.PartitionedByProperty": {"tf": 1}, "sqlglot.expressions.FileFormatProperty": {"tf": 1}, "sqlglot.expressions.DistKeyProperty": {"tf": 1}, "sqlglot.expressions.SortKeyProperty": {"tf": 1}, "sqlglot.expressions.DistStyleProperty": {"tf": 1}, "sqlglot.expressions.LikeProperty": {"tf": 1}, "sqlglot.expressions.LocationProperty": {"tf": 1}, "sqlglot.expressions.EngineProperty": {"tf": 1}, "sqlglot.expressions.AutoIncrementProperty": {"tf": 1}, "sqlglot.expressions.CharacterSetProperty": {"tf": 1}, "sqlglot.expressions.CollateProperty": {"tf": 1}, "sqlglot.expressions.SchemaCommentProperty": {"tf": 1}, "sqlglot.expressions.ReturnsProperty": {"tf": 1}, "sqlglot.expressions.LanguageProperty": {"tf": 1}, "sqlglot.expressions.ExecuteAsProperty": {"tf": 1}, "sqlglot.expressions.VolatilityProperty": {"tf": 1}, "sqlglot.expressions.RowFormatDelimitedProperty": {"tf": 1}, "sqlglot.expressions.RowFormatSerdeProperty": {"tf": 1}, "sqlglot.expressions.SerdeProperties": {"tf": 1}, "sqlglot.expressions.FallbackProperty": {"tf": 1}, "sqlglot.expressions.WithJournalTableProperty": {"tf": 1}, "sqlglot.expressions.LogProperty": {"tf": 1}, "sqlglot.expressions.JournalProperty": {"tf": 1}, "sqlglot.expressions.AfterJournalProperty": {"tf": 1}, "sqlglot.expressions.ChecksumProperty": {"tf": 1}, "sqlglot.expressions.FreespaceProperty": {"tf": 1}, "sqlglot.expressions.MergeBlockRatioProperty": {"tf": 1}, "sqlglot.expressions.DataBlocksizeProperty": {"tf": 1}, "sqlglot.expressions.BlockCompressionProperty": {"tf": 1}, "sqlglot.expressions.IsolatedLoadingProperty": {"tf": 1}, "sqlglot.expressions.Properties": {"tf": 1}, "sqlglot.expressions.Properties.Location": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_CREATE": {"tf": 1}, "sqlglot.expressions.Properties.Location.PRE_SCHEMA": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_INDEX": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_SCHEMA_ROOT": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_SCHEMA_WITH": {"tf": 1}, "sqlglot.expressions.Properties.Location.UNSUPPORTED": {"tf": 1}, "sqlglot.expressions.Properties.from_dict": {"tf": 1}, "sqlglot.expressions.Qualify": {"tf": 1}, "sqlglot.expressions.Return": {"tf": 1}, "sqlglot.expressions.Reference": {"tf": 1}, "sqlglot.expressions.Tuple": {"tf": 1}, "sqlglot.expressions.Subqueryable": {"tf": 1}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.Subqueryable.limit": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.expressions.Table": {"tf": 1}, "sqlglot.expressions.SystemTime": {"tf": 1}, "sqlglot.expressions.Union": {"tf": 1}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Except": {"tf": 1}, "sqlglot.expressions.Intersect": {"tf": 1}, "sqlglot.expressions.Unnest": {"tf": 1}, "sqlglot.expressions.Update": {"tf": 1}, "sqlglot.expressions.Values": {"tf": 1}, "sqlglot.expressions.Var": {"tf": 1}, "sqlglot.expressions.Schema": {"tf": 1}, "sqlglot.expressions.Lock": {"tf": 1}, "sqlglot.expressions.Select": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.Select.window": {"tf": 1}, "sqlglot.expressions.Select.distinct": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.Select.lock": {"tf": 1}, "sqlglot.expressions.Subquery": {"tf": 1}, "sqlglot.expressions.Subquery.unnest": {"tf": 1}, "sqlglot.expressions.Subquery.output_name": {"tf": 1}, "sqlglot.expressions.TableSample": {"tf": 1}, "sqlglot.expressions.Tag": {"tf": 1}, "sqlglot.expressions.Pivot": {"tf": 1}, "sqlglot.expressions.Window": {"tf": 1}, "sqlglot.expressions.WindowSpec": {"tf": 1}, "sqlglot.expressions.Where": {"tf": 1}, "sqlglot.expressions.Star": {"tf": 1}, "sqlglot.expressions.Star.output_name": {"tf": 1}, "sqlglot.expressions.Parameter": {"tf": 1}, "sqlglot.expressions.SessionParameter": {"tf": 1}, "sqlglot.expressions.Placeholder": {"tf": 1}, "sqlglot.expressions.Null": {"tf": 1}, "sqlglot.expressions.Boolean": {"tf": 1}, "sqlglot.expressions.DataType": {"tf": 1}, "sqlglot.expressions.DataType.Type": {"tf": 1}, "sqlglot.expressions.DataType.Type.CHAR": {"tf": 1}, "sqlglot.expressions.DataType.Type.NCHAR": {"tf": 1}, "sqlglot.expressions.DataType.Type.VARCHAR": {"tf": 1}, "sqlglot.expressions.DataType.Type.NVARCHAR": {"tf": 1}, "sqlglot.expressions.DataType.Type.TEXT": {"tf": 1}, "sqlglot.expressions.DataType.Type.MEDIUMTEXT": {"tf": 1}, "sqlglot.expressions.DataType.Type.LONGTEXT": {"tf": 1}, "sqlglot.expressions.DataType.Type.MEDIUMBLOB": {"tf": 1}, "sqlglot.expressions.DataType.Type.LONGBLOB": {"tf": 1}, "sqlglot.expressions.DataType.Type.BINARY": {"tf": 1}, "sqlglot.expressions.DataType.Type.VARBINARY": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT": {"tf": 1}, "sqlglot.expressions.DataType.Type.TINYINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.SMALLINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.BIGINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.FLOAT": {"tf": 1}, "sqlglot.expressions.DataType.Type.DOUBLE": {"tf": 1}, "sqlglot.expressions.DataType.Type.DECIMAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.BOOLEAN": {"tf": 1}, "sqlglot.expressions.DataType.Type.JSON": {"tf": 1}, "sqlglot.expressions.DataType.Type.JSONB": {"tf": 1}, "sqlglot.expressions.DataType.Type.INTERVAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.TIME": {"tf": 1}, "sqlglot.expressions.DataType.Type.TIMESTAMP": {"tf": 1}, "sqlglot.expressions.DataType.Type.TIMESTAMPTZ": {"tf": 1}, "sqlglot.expressions.DataType.Type.TIMESTAMPLTZ": {"tf": 1}, "sqlglot.expressions.DataType.Type.DATE": {"tf": 1}, "sqlglot.expressions.DataType.Type.DATETIME": {"tf": 1}, "sqlglot.expressions.DataType.Type.ARRAY": {"tf": 1}, "sqlglot.expressions.DataType.Type.MAP": {"tf": 1}, "sqlglot.expressions.DataType.Type.UUID": {"tf": 1}, "sqlglot.expressions.DataType.Type.GEOGRAPHY": {"tf": 1}, "sqlglot.expressions.DataType.Type.GEOMETRY": {"tf": 1}, "sqlglot.expressions.DataType.Type.STRUCT": {"tf": 1}, "sqlglot.expressions.DataType.Type.NULLABLE": {"tf": 1}, "sqlglot.expressions.DataType.Type.HLLSKETCH": {"tf": 1}, "sqlglot.expressions.DataType.Type.HSTORE": {"tf": 1}, "sqlglot.expressions.DataType.Type.SUPER": {"tf": 1}, "sqlglot.expressions.DataType.Type.SERIAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.SMALLSERIAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.BIGSERIAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.XML": {"tf": 1}, "sqlglot.expressions.DataType.Type.UNIQUEIDENTIFIER": {"tf": 1}, "sqlglot.expressions.DataType.Type.MONEY": {"tf": 1}, "sqlglot.expressions.DataType.Type.SMALLMONEY": {"tf": 1}, "sqlglot.expressions.DataType.Type.ROWVERSION": {"tf": 1}, "sqlglot.expressions.DataType.Type.IMAGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.VARIANT": {"tf": 1}, "sqlglot.expressions.DataType.Type.OBJECT": {"tf": 1}, "sqlglot.expressions.DataType.Type.NULL": {"tf": 1}, "sqlglot.expressions.DataType.Type.UNKNOWN": {"tf": 1}, "sqlglot.expressions.DataType.build": {"tf": 1}, "sqlglot.expressions.DataType.is_type": {"tf": 1}, "sqlglot.expressions.PseudoType": {"tf": 1}, "sqlglot.expressions.StructKwarg": {"tf": 1}, "sqlglot.expressions.SubqueryPredicate": {"tf": 1}, "sqlglot.expressions.All": {"tf": 1}, "sqlglot.expressions.Any": {"tf": 1}, "sqlglot.expressions.Exists": {"tf": 1}, "sqlglot.expressions.Command": {"tf": 1}, "sqlglot.expressions.Transaction": {"tf": 1}, "sqlglot.expressions.Commit": {"tf": 1}, "sqlglot.expressions.Rollback": {"tf": 1}, "sqlglot.expressions.AlterTable": {"tf": 1}, "sqlglot.expressions.AddConstraint": {"tf": 1}, "sqlglot.expressions.DropPartition": {"tf": 1}, "sqlglot.expressions.Binary": {"tf": 1}, "sqlglot.expressions.Add": {"tf": 1}, "sqlglot.expressions.Connector": {"tf": 1}, "sqlglot.expressions.And": {"tf": 1}, "sqlglot.expressions.Or": {"tf": 1}, "sqlglot.expressions.BitwiseAnd": {"tf": 1}, "sqlglot.expressions.BitwiseLeftShift": {"tf": 1}, "sqlglot.expressions.BitwiseOr": {"tf": 1}, "sqlglot.expressions.BitwiseRightShift": {"tf": 1}, "sqlglot.expressions.BitwiseXor": {"tf": 1}, "sqlglot.expressions.Div": {"tf": 1}, "sqlglot.expressions.Dot": {"tf": 1}, "sqlglot.expressions.DPipe": {"tf": 1}, "sqlglot.expressions.EQ": {"tf": 1}, "sqlglot.expressions.NullSafeEQ": {"tf": 1}, "sqlglot.expressions.NullSafeNEQ": {"tf": 1}, "sqlglot.expressions.Distance": {"tf": 1}, "sqlglot.expressions.Escape": {"tf": 1}, "sqlglot.expressions.Glob": {"tf": 1}, "sqlglot.expressions.GT": {"tf": 1}, "sqlglot.expressions.GTE": {"tf": 1}, "sqlglot.expressions.ILike": {"tf": 1}, "sqlglot.expressions.IntDiv": {"tf": 1}, "sqlglot.expressions.Is": {"tf": 1}, "sqlglot.expressions.Kwarg": {"tf": 1}, "sqlglot.expressions.Like": {"tf": 1}, "sqlglot.expressions.LT": {"tf": 1}, "sqlglot.expressions.LTE": {"tf": 1}, "sqlglot.expressions.Mod": {"tf": 1}, "sqlglot.expressions.Mul": {"tf": 1}, "sqlglot.expressions.NEQ": {"tf": 1}, "sqlglot.expressions.SimilarTo": {"tf": 1}, "sqlglot.expressions.Slice": {"tf": 1}, "sqlglot.expressions.Sub": {"tf": 1}, "sqlglot.expressions.Unary": {"tf": 1}, "sqlglot.expressions.BitwiseNot": {"tf": 1}, "sqlglot.expressions.Not": {"tf": 1}, "sqlglot.expressions.Paren": {"tf": 1}, "sqlglot.expressions.Neg": {"tf": 1}, "sqlglot.expressions.Alias": {"tf": 1}, "sqlglot.expressions.Alias.output_name": {"tf": 1}, "sqlglot.expressions.Aliases": {"tf": 1}, "sqlglot.expressions.AtTimeZone": {"tf": 1}, "sqlglot.expressions.Between": {"tf": 1}, "sqlglot.expressions.Bracket": {"tf": 1}, "sqlglot.expressions.Distinct": {"tf": 1}, "sqlglot.expressions.In": {"tf": 1}, "sqlglot.expressions.TimeUnit": {"tf": 1}, "sqlglot.expressions.TimeUnit.__init__": {"tf": 1}, "sqlglot.expressions.Interval": {"tf": 1}, "sqlglot.expressions.IgnoreNulls": {"tf": 1}, "sqlglot.expressions.RespectNulls": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1}, "sqlglot.expressions.Func.from_arg_list": {"tf": 1}, "sqlglot.expressions.Func.sql_names": {"tf": 1}, "sqlglot.expressions.Func.sql_name": {"tf": 1}, "sqlglot.expressions.Func.default_parser_mappings": {"tf": 1}, "sqlglot.expressions.AggFunc": {"tf": 1}, "sqlglot.expressions.Abs": {"tf": 1}, "sqlglot.expressions.Anonymous": {"tf": 1}, "sqlglot.expressions.ApproxDistinct": {"tf": 1}, "sqlglot.expressions.Array": {"tf": 1}, "sqlglot.expressions.GenerateSeries": {"tf": 1}, "sqlglot.expressions.ArrayAgg": {"tf": 1}, "sqlglot.expressions.ArrayAll": {"tf": 1}, "sqlglot.expressions.ArrayAny": {"tf": 1}, "sqlglot.expressions.ArrayConcat": {"tf": 1}, "sqlglot.expressions.ArrayContains": {"tf": 1}, "sqlglot.expressions.ArrayFilter": {"tf": 1}, "sqlglot.expressions.ArraySize": {"tf": 1}, "sqlglot.expressions.ArraySort": {"tf": 1}, "sqlglot.expressions.ArraySum": {"tf": 1}, "sqlglot.expressions.ArrayUnionAgg": {"tf": 1}, "sqlglot.expressions.Avg": {"tf": 1}, "sqlglot.expressions.AnyValue": {"tf": 1}, "sqlglot.expressions.Case": {"tf": 1}, "sqlglot.expressions.Cast": {"tf": 1}, "sqlglot.expressions.Cast.output_name": {"tf": 1}, "sqlglot.expressions.Cast.is_type": {"tf": 1}, "sqlglot.expressions.Collate": {"tf": 1}, "sqlglot.expressions.TryCast": {"tf": 1}, "sqlglot.expressions.Ceil": {"tf": 1}, "sqlglot.expressions.Coalesce": {"tf": 1}, "sqlglot.expressions.Concat": {"tf": 1}, "sqlglot.expressions.ConcatWs": {"tf": 1}, "sqlglot.expressions.Count": {"tf": 1}, "sqlglot.expressions.CurrentDate": {"tf": 1}, "sqlglot.expressions.CurrentDatetime": {"tf": 1}, "sqlglot.expressions.CurrentTime": {"tf": 1}, "sqlglot.expressions.CurrentTimestamp": {"tf": 1}, "sqlglot.expressions.DateAdd": {"tf": 1}, "sqlglot.expressions.DateSub": {"tf": 1}, "sqlglot.expressions.DateDiff": {"tf": 1}, "sqlglot.expressions.DateTrunc": {"tf": 1}, "sqlglot.expressions.DatetimeAdd": {"tf": 1}, "sqlglot.expressions.DatetimeSub": {"tf": 1}, "sqlglot.expressions.DatetimeDiff": {"tf": 1}, "sqlglot.expressions.DatetimeTrunc": {"tf": 1}, "sqlglot.expressions.DayOfWeek": {"tf": 1}, "sqlglot.expressions.DayOfMonth": {"tf": 1}, "sqlglot.expressions.DayOfYear": {"tf": 1}, "sqlglot.expressions.WeekOfYear": {"tf": 1}, "sqlglot.expressions.LastDateOfMonth": {"tf": 1}, "sqlglot.expressions.Extract": {"tf": 1}, "sqlglot.expressions.TimestampAdd": {"tf": 1}, "sqlglot.expressions.TimestampSub": {"tf": 1}, "sqlglot.expressions.TimestampDiff": {"tf": 1}, "sqlglot.expressions.TimestampTrunc": {"tf": 1}, "sqlglot.expressions.TimeAdd": {"tf": 1}, "sqlglot.expressions.TimeSub": {"tf": 1}, "sqlglot.expressions.TimeDiff": {"tf": 1}, "sqlglot.expressions.TimeTrunc": {"tf": 1}, "sqlglot.expressions.DateFromParts": {"tf": 1}, "sqlglot.expressions.DateStrToDate": {"tf": 1}, "sqlglot.expressions.DateToDateStr": {"tf": 1}, "sqlglot.expressions.DateToDi": {"tf": 1}, "sqlglot.expressions.Day": {"tf": 1}, "sqlglot.expressions.Decode": {"tf": 1}, "sqlglot.expressions.DiToDate": {"tf": 1}, "sqlglot.expressions.Encode": {"tf": 1}, "sqlglot.expressions.Exp": {"tf": 1}, "sqlglot.expressions.Explode": {"tf": 1}, "sqlglot.expressions.Floor": {"tf": 1}, "sqlglot.expressions.Greatest": {"tf": 1}, "sqlglot.expressions.GroupConcat": {"tf": 1}, "sqlglot.expressions.Hex": {"tf": 1}, "sqlglot.expressions.If": {"tf": 1}, "sqlglot.expressions.IfNull": {"tf": 1}, "sqlglot.expressions.Initcap": {"tf": 1}, "sqlglot.expressions.JSONBContains": {"tf": 1}, "sqlglot.expressions.JSONExtract": {"tf": 1}, "sqlglot.expressions.JSONExtractScalar": {"tf": 1}, "sqlglot.expressions.JSONBExtract": {"tf": 1}, "sqlglot.expressions.JSONBExtractScalar": {"tf": 1}, "sqlglot.expressions.Least": {"tf": 1}, "sqlglot.expressions.Length": {"tf": 1}, "sqlglot.expressions.Levenshtein": {"tf": 1}, "sqlglot.expressions.Ln": {"tf": 1}, "sqlglot.expressions.Log": {"tf": 1}, "sqlglot.expressions.Log2": {"tf": 1}, "sqlglot.expressions.Log10": {"tf": 1}, "sqlglot.expressions.LogicalOr": {"tf": 1}, "sqlglot.expressions.Lower": {"tf": 1}, "sqlglot.expressions.Map": {"tf": 1}, "sqlglot.expressions.VarMap": {"tf": 1}, "sqlglot.expressions.Matches": {"tf": 1}, "sqlglot.expressions.Max": {"tf": 1}, "sqlglot.expressions.Min": {"tf": 1}, "sqlglot.expressions.Month": {"tf": 1}, "sqlglot.expressions.Nvl2": {"tf": 1}, "sqlglot.expressions.Posexplode": {"tf": 1}, "sqlglot.expressions.Pow": {"tf": 1}, "sqlglot.expressions.PercentileCont": {"tf": 1}, "sqlglot.expressions.PercentileDisc": {"tf": 1}, "sqlglot.expressions.Quantile": {"tf": 1}, "sqlglot.expressions.Quantiles": {"tf": 1}, "sqlglot.expressions.QuantileIf": {"tf": 1}, "sqlglot.expressions.ApproxQuantile": {"tf": 1}, "sqlglot.expressions.ReadCSV": {"tf": 1}, "sqlglot.expressions.Reduce": {"tf": 1}, "sqlglot.expressions.RegexpLike": {"tf": 1}, "sqlglot.expressions.RegexpILike": {"tf": 1}, "sqlglot.expressions.RegexpSplit": {"tf": 1}, "sqlglot.expressions.Repeat": {"tf": 1}, "sqlglot.expressions.Round": {"tf": 1}, "sqlglot.expressions.RowNumber": {"tf": 1}, "sqlglot.expressions.SafeDivide": {"tf": 1}, "sqlglot.expressions.SetAgg": {"tf": 1}, "sqlglot.expressions.SortArray": {"tf": 1}, "sqlglot.expressions.Split": {"tf": 1}, "sqlglot.expressions.Substring": {"tf": 1}, "sqlglot.expressions.StrPosition": {"tf": 1}, "sqlglot.expressions.StrToDate": {"tf": 1}, "sqlglot.expressions.StrToTime": {"tf": 1}, "sqlglot.expressions.StrToUnix": {"tf": 1}, "sqlglot.expressions.NumberToStr": {"tf": 1}, "sqlglot.expressions.Struct": {"tf": 1}, "sqlglot.expressions.StructExtract": {"tf": 1}, "sqlglot.expressions.Sum": {"tf": 1}, "sqlglot.expressions.Sqrt": {"tf": 1}, "sqlglot.expressions.Stddev": {"tf": 1}, "sqlglot.expressions.StddevPop": {"tf": 1}, "sqlglot.expressions.StddevSamp": {"tf": 1}, "sqlglot.expressions.TimeToStr": {"tf": 1}, "sqlglot.expressions.TimeToTimeStr": {"tf": 1}, "sqlglot.expressions.TimeToUnix": {"tf": 1}, "sqlglot.expressions.TimeStrToDate": {"tf": 1}, "sqlglot.expressions.TimeStrToTime": {"tf": 1}, "sqlglot.expressions.TimeStrToUnix": {"tf": 1}, "sqlglot.expressions.Trim": {"tf": 1}, "sqlglot.expressions.TsOrDsAdd": {"tf": 1}, "sqlglot.expressions.TsOrDsToDateStr": {"tf": 1}, "sqlglot.expressions.TsOrDsToDate": {"tf": 1}, "sqlglot.expressions.TsOrDiToDi": {"tf": 1}, "sqlglot.expressions.Unhex": {"tf": 1}, "sqlglot.expressions.UnixToStr": {"tf": 1}, "sqlglot.expressions.UnixToTime": {"tf": 1}, "sqlglot.expressions.UnixToTimeStr": {"tf": 1}, "sqlglot.expressions.Upper": {"tf": 1}, "sqlglot.expressions.Variance": {"tf": 1}, "sqlglot.expressions.VariancePop": {"tf": 1}, "sqlglot.expressions.Week": {"tf": 1}, "sqlglot.expressions.Year": {"tf": 1}, "sqlglot.expressions.Use": {"tf": 1}, "sqlglot.expressions.Merge": {"tf": 1}, "sqlglot.expressions.When": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1}, "sqlglot.expressions.union": {"tf": 1}, "sqlglot.expressions.intersect": {"tf": 1}, "sqlglot.expressions.except_": {"tf": 1}, "sqlglot.expressions.select": {"tf": 1}, "sqlglot.expressions.from_": {"tf": 1}, "sqlglot.expressions.update": {"tf": 1}, "sqlglot.expressions.delete": {"tf": 1}, "sqlglot.expressions.condition": {"tf": 1}, "sqlglot.expressions.and_": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 1}, "sqlglot.expressions.not_": {"tf": 1}, "sqlglot.expressions.paren": {"tf": 1}, "sqlglot.expressions.to_identifier": {"tf": 1}, "sqlglot.expressions.to_interval": {"tf": 1}, "sqlglot.expressions.to_table": {"tf": 1}, "sqlglot.expressions.to_column": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.expressions.subquery": {"tf": 1}, "sqlglot.expressions.column": {"tf": 1}, "sqlglot.expressions.cast": {"tf": 1}, "sqlglot.expressions.table_": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1}, "sqlglot.expressions.rename_table": {"tf": 1}, "sqlglot.expressions.convert": {"tf": 1}, "sqlglot.expressions.replace_children": {"tf": 1}, "sqlglot.expressions.column_table_names": {"tf": 1}, "sqlglot.expressions.table_name": {"tf": 1}, "sqlglot.expressions.replace_tables": {"tf": 1}, "sqlglot.expressions.replace_placeholders": {"tf": 1}, "sqlglot.expressions.expand": {"tf": 1}, "sqlglot.expressions.func": {"tf": 1}, "sqlglot.expressions.true": {"tf": 1}, "sqlglot.expressions.false": {"tf": 1}, "sqlglot.expressions.null": {"tf": 1}, "sqlglot.generator.Generator.expressions": {"tf": 1}, "sqlglot.generator.Generator.op_expressions": {"tf": 1}}, "df": 530}}}}}}}}, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Explode": {"tf": 1}}, "df": 1}}}}, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.expand": {"tf": 1}, "sqlglot.optimizer.expand_laterals": {"tf": 1}, "sqlglot.optimizer.expand_laterals.expand_laterals": {"tf": 1.4142135623730951}, "sqlglot.optimizer.expand_multi_table_selects": {"tf": 1}, "sqlglot.optimizer.expand_multi_table_selects.expand_multi_table_selects": {"tf": 1.4142135623730951}}, "df": 5}}}}, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.dialect.arrow_json_extract_sql": {"tf": 1}, "sqlglot.dialects.dialect.arrow_json_extract_scalar_sql": {"tf": 1}, "sqlglot.dialects.dialect.struct_extract_sql": {"tf": 1}, "sqlglot.expressions.Extract": {"tf": 1}, "sqlglot.generator.Generator.extract_sql": {"tf": 1}, "sqlglot.optimizer.simplify.extract_date": {"tf": 1}, "sqlglot.optimizer.simplify.extract_interval": {"tf": 1}}, "df": 7}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.optimizer.scope.Scope.external_columns": {"tf": 1}}, "df": 1}}}}}}, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor.execute": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.execute": {"tf": 1}, "sqlglot.tokens.TokenType.EXECUTE": {"tf": 1}}, "df": 3, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.errors.ExecuteError": {"tf": 1}}, "df": 1}}}}}, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.ExecuteAsProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.executor.execute": {"tf": 1}, "sqlglot.executor.context": {"tf": 1}, "sqlglot.executor.context.Context": {"tf": 1}, "sqlglot.executor.context.Context.__init__": {"tf": 1}, "sqlglot.executor.context.Context.eval": {"tf": 1}, "sqlglot.executor.context.Context.eval_tuple": {"tf": 1}, "sqlglot.executor.context.Context.add_columns": {"tf": 1}, "sqlglot.executor.context.Context.table_iter": {"tf": 1}, "sqlglot.executor.context.Context.filter": {"tf": 1}, "sqlglot.executor.context.Context.sort": {"tf": 1}, "sqlglot.executor.context.Context.set_row": {"tf": 1}, "sqlglot.executor.context.Context.set_index": {"tf": 1}, "sqlglot.executor.context.Context.set_range": {"tf": 1}, "sqlglot.executor.env": {"tf": 1}, "sqlglot.executor.env.reverse_key": {"tf": 1}, "sqlglot.executor.env.reverse_key.__init__": {"tf": 1}, "sqlglot.executor.env.filter_nulls": {"tf": 1}, "sqlglot.executor.env.null_if_any": {"tf": 1}, "sqlglot.executor.env.str_position": {"tf": 1}, "sqlglot.executor.env.substring": {"tf": 1}, "sqlglot.executor.env.cast": {"tf": 1}, "sqlglot.executor.env.ordered": {"tf": 1}, "sqlglot.executor.env.interval": {"tf": 1}, "sqlglot.executor.python": {"tf": 1}, "sqlglot.executor.python.PythonExecutor": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.__init__": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.execute": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.generate": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.generate_tuple": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.context": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.table": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.scan": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.static": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.scan_table": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.scan_csv": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.join": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.nested_loop_join": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.hash_join": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.aggregate": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.sort": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.set_operation": {"tf": 1}, "sqlglot.executor.python.Python": {"tf": 1}, "sqlglot.executor.python.Python.__init__": {"tf": 1}, "sqlglot.executor.python.Python.Tokenizer": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.executor.table": {"tf": 1}, "sqlglot.executor.table.Table": {"tf": 1}, "sqlglot.executor.table.Table.__init__": {"tf": 1}, "sqlglot.executor.table.Table.add_columns": {"tf": 1}, "sqlglot.executor.table.Table.append": {"tf": 1}, "sqlglot.executor.table.Table.pop": {"tf": 1}, "sqlglot.executor.table.TableIter": {"tf": 1}, "sqlglot.executor.table.TableIter.__init__": {"tf": 1}, "sqlglot.executor.table.RangeReader": {"tf": 1}, "sqlglot.executor.table.RangeReader.__init__": {"tf": 1}, "sqlglot.executor.table.RowReader": {"tf": 1}, "sqlglot.executor.table.RowReader.__init__": {"tf": 1}, "sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.executor.table.ensure_tables": {"tf": 1}}, "df": 60}}}}}}, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.Exists": {"tf": 1}, "sqlglot.generator.Generator.exists_sql": {"tf": 1}, "sqlglot.tokens.TokenType.EXISTS": {"tf": 1}}, "df": 3}}}}}, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.Column.ensure_col": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_cols": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_literal": {"tf": 1}, "sqlglot.executor.table.ensure_tables": {"tf": 1}, "sqlglot.helper.ensure_list": {"tf": 1}, "sqlglot.helper.ensure_collection": {"tf": 1}, "sqlglot.schema.ensure_schema": {"tf": 1}, "sqlglot.schema.ensure_column_mapping": {"tf": 1}}, "df": 8}}}}, "d": {"docs": {"sqlglot.tokens.TokenType.BLOCK_END": {"tf": 1}, "sqlglot.tokens.TokenType.END": {"tf": 1}}, "df": 2, "s": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.dataframe.sql.Column.endswith": {"tf": 1}}, "df": 1}}}}}}, "v": {"docs": {"sqlglot.executor.env": {"tf": 1}, "sqlglot.executor.env.reverse_key": {"tf": 1}, "sqlglot.executor.env.reverse_key.__init__": {"tf": 1}, "sqlglot.executor.env.filter_nulls": {"tf": 1}, "sqlglot.executor.env.null_if_any": {"tf": 1}, "sqlglot.executor.env.str_position": {"tf": 1}, "sqlglot.executor.env.substring": {"tf": 1}, "sqlglot.executor.env.cast": {"tf": 1}, "sqlglot.executor.env.ordered": {"tf": 1}, "sqlglot.executor.env.interval": {"tf": 1}}, "df": 10}, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Encode": {"tf": 1}, "sqlglot.tokens.TokenType.ENCODE": {"tf": 1}}, "df": 2, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.EncodeColumnConstraint": {"tf": 1}, "sqlglot.generator.Generator.encodecolumnconstraint_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}}}}}}}, "g": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.EngineProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.parser.Parser.raise_error": {"tf": 1}}, "df": 2, "s": {"docs": {"sqlglot.errors": {"tf": 1}, "sqlglot.errors.ErrorLevel": {"tf": 1}, "sqlglot.errors.ErrorLevel.IGNORE": {"tf": 1}, "sqlglot.errors.ErrorLevel.WARN": {"tf": 1}, "sqlglot.errors.ErrorLevel.RAISE": {"tf": 1}, "sqlglot.errors.ErrorLevel.IMMEDIATE": {"tf": 1}, "sqlglot.errors.SqlglotError": {"tf": 1}, "sqlglot.errors.UnsupportedError": {"tf": 1}, "sqlglot.errors.ParseError": {"tf": 1}, "sqlglot.errors.ParseError.__init__": {"tf": 1}, "sqlglot.errors.ParseError.new": {"tf": 1}, "sqlglot.errors.TokenError": {"tf": 1}, "sqlglot.errors.OptimizeError": {"tf": 1}, "sqlglot.errors.SchemaError": {"tf": 1}, "sqlglot.errors.ExecuteError": {"tf": 1}, "sqlglot.errors.concat_messages": {"tf": 1}, "sqlglot.errors.merge_errors": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.check_errors": {"tf": 1}}, "df": 18}, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.errors.ErrorLevel": {"tf": 1}, "sqlglot.errors.ErrorLevel.IGNORE": {"tf": 1}, "sqlglot.errors.ErrorLevel.WARN": {"tf": 1}, "sqlglot.errors.ErrorLevel.RAISE": {"tf": 1}, "sqlglot.errors.ErrorLevel.IMMEDIATE": {"tf": 1}}, "df": 5}}}}}}}}}, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.executor.context.Context.eval": {"tf": 1}, "sqlglot.executor.context.Context.eval_tuple": {"tf": 1}, "sqlglot.optimizer.simplify.eval_boolean": {"tf": 1}}, "df": 3}}}, "q": {"docs": {"sqlglot.expressions.EQ": {"tf": 1}, "sqlglot.generator.Generator.eq_sql": {"tf": 1}, "sqlglot.tokens.TokenType.EQ": {"tf": 1}, "sqlglot.tokens.TokenType.NULLSAFE_EQ": {"tf": 1}}, "df": 4}, "s": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Escape": {"tf": 1}, "sqlglot.generator.Generator.escape_sql": {"tf": 1}, "sqlglot.tokens.TokenType.ESCAPE": {"tf": 1}}, "df": 3}}}}}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.eliminate_ctes": {"tf": 1}, "sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"tf": 1.4142135623730951}, "sqlglot.optimizer.eliminate_joins": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1.4142135623730951}, "sqlglot.optimizer.eliminate_joins.join_condition": {"tf": 1}, "sqlglot.optimizer.eliminate_subqueries": {"tf": 1}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 1.4142135623730951}, "sqlglot.optimizer.simplify.absorb_and_eliminate": {"tf": 1}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1}}, "df": 9}}}}}}}, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.ELSE": {"tf": 1}}, "df": 1}}}}, "r": {"docs": {"sqlglot.tokens.TokenType.R_PAREN": {"tf": 1}, "sqlglot.tokens.TokenType.R_BRACKET": {"tf": 1}, "sqlglot.tokens.TokenType.R_BRACE": {"tf": 1}}, "df": 3, "e": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.DataFrame.replace": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.replace": {"tf": 1}, "sqlglot.expressions.Expression.replace": {"tf": 1}, "sqlglot.expressions.replace_children": {"tf": 1}, "sqlglot.expressions.replace_tables": {"tf": 1}, "sqlglot.expressions.replace_placeholders": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.replace_aliases": {"tf": 1}, "sqlglot.optimizer.scope.Scope.replace": {"tf": 1}, "sqlglot.tokens.TokenType.REPLACE": {"tf": 1}}, "df": 9}}}}, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe.sql.DataFrame.repartition": {"tf": 1}}, "df": 1}}}}}}}}, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.Repeat": {"tf": 1}}, "df": 1}}}}, "d": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.dialect.Dialects.REDSHIFT": {"tf": 1}, "sqlglot.dialects.redshift": {"tf": 1}, "sqlglot.dialects.redshift.Redshift": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.__init__": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Tokenizer": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator.with_properties": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator.renametable_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1.4142135623730951}}, "df": 11}}}}}, "u": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Reduce": {"tf": 1}}, "df": 1}}, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.optimizer.canonicalize.remove_redundant_casts": {"tf": 1}}, "df": 1}}}}}}}, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.rename_func": {"tf": 1}, "sqlglot.expressions.rename_table": {"tf": 1}, "sqlglot.optimizer.scope.Scope.rename_source": {"tf": 1}}, "df": 3, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.redshift.Redshift.Generator.renametable_sql": {"tf": 1}, "sqlglot.expressions.RenameTable": {"tf": 1}, "sqlglot.generator.Generator.renametable_sql": {"tf": 1}}, "df": 3}}}}}}}}}, "c": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.no_recursive_cte_sql": {"tf": 1}, "sqlglot.tokens.TokenType.RECURSIVE": {"tf": 1}}, "df": 2}}}}}}, "o": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.MATCH_RECOGNIZE": {"tf": 1}}, "df": 1}}}}}}}, "t": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.Return": {"tf": 1}, "sqlglot.generator.Generator.return_sql": {"tf": 1}}, "df": 2, "s": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dialects.tsql.TSQL.Generator.returnsproperty_sql": {"tf": 1}, "sqlglot.expressions.ReturnsProperty": {"tf": 1}}, "df": 2}}}}}}}}}}}}}, "m": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff.Remove": {"tf": 1}, "sqlglot.diff.Remove.__init__": {"tf": 1}, "sqlglot.optimizer.canonicalize.remove_redundant_casts": {"tf": 1}, "sqlglot.optimizer.scope.Scope.remove_source": {"tf": 1}, "sqlglot.optimizer.simplify.remove_compliments": {"tf": 1}, "sqlglot.optimizer.simplify.remove_where_true": {"tf": 1}, "sqlglot.transforms.remove_precision_parameterized_types": {"tf": 1}}, "df": 7}}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor.env.reverse_key": {"tf": 1}, "sqlglot.executor.env.reverse_key.__init__": {"tf": 1}}, "df": 2}}}}}, "f": {"docs": {"sqlglot.optimizer.scope.Scope.ref_count": {"tf": 1}}, "df": 1, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Reference": {"tf": 1}, "sqlglot.generator.Generator.reference_sql": {"tf": 1}}, "df": 2, "s": {"docs": {"sqlglot.tokens.TokenType.REFERENCES": {"tf": 1}}, "df": 1}}}}}}}}, "s": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.RESPECT_NULLS": {"tf": 1}}, "df": 1, "n": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.RespectNulls": {"tf": 1}, "sqlglot.generator.Generator.respectnulls_sql": {"tf": 1}}, "df": 2}}}}}}}}}, "e": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.parser.Parser.reset": {"tf": 1}, "sqlglot.tokens.Tokenizer.reset": {"tf": 1}}, "df": 2}}}, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "v": {"docs": {"sqlglot.expressions.ReadCSV": {"tf": 1}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.helper.csv_reader": {"tf": 1}}, "df": 1}}}}, "g": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.RegexpLike": {"tf": 1}}, "df": 1}}}}, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.RegexpILike": {"tf": 1}}, "df": 1}}}}}, "s": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.RegexpSplit": {"tf": 1}}, "df": 1}}}}}}}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.optimizer.optimize_joins.reorder_joins": {"tf": 1}}, "df": 1}}}}}, "w": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.simplify.rewrite_between": {"tf": 1}}, "df": 1}}}}}}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.Column.rlike": {"tf": 1}, "sqlglot.tokens.TokenType.RLIKE": {"tf": 1}}, "df": 2}}}}, "o": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot.executor.context.Context.set_row": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_ROW": {"tf": 1}, "sqlglot.tokens.TokenType.ROW": {"tf": 1}}, "df": 3, "s": {"docs": {"sqlglot.tokens.TokenType.ROWS": {"tf": 1}}, "df": 1, "b": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe.sql.Window.rowsBetween": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.rowsBetween": {"tf": 1}}, "df": 2}}}}}}}}, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.executor.table.RowReader": {"tf": 1}, "sqlglot.executor.table.RowReader.__init__": {"tf": 1}}, "df": 2}}}}}}, "f": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.RowFormatDelimitedProperty": {"tf": 1}, "sqlglot.generator.Generator.rowformatdelimitedproperty_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}}}}, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.RowFormatSerdeProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.DataType.Type.ROWVERSION": {"tf": 1}, "sqlglot.tokens.TokenType.ROWVERSION": {"tf": 1}}, "df": 2}}}}}}}, "n": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.RowNumber": {"tf": 1}}, "df": 1}}}}}}}, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator.rollback_sql": {"tf": 1}, "sqlglot.expressions.Rollback": {"tf": 1}, "sqlglot.generator.Generator.rollback_sql": {"tf": 1}, "sqlglot.tokens.TokenType.ROLLBACK": {"tf": 1}}, "df": 4}}}}, "u": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.tokens.TokenType.ROLLUP": {"tf": 1}}, "df": 1}}}}, "o": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.Properties.Location.POST_SCHEMA_ROOT": {"tf": 1}, "sqlglot.generator.Generator.root_properties": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.ROOT": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_root": {"tf": 1}}, "df": 4}}, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.Round": {"tf": 1}}, "df": 1}}}}, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor.context.Context.set_range": {"tf": 1}, "sqlglot.tokens.TokenType.RANGE": {"tf": 1}}, "df": 2, "b": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe.sql.Window.rangeBetween": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.rangeBetween": {"tf": 1}}, "df": 2}}}}}}}, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.executor.table.RangeReader": {"tf": 1}, "sqlglot.executor.table.RangeReader.__init__": {"tf": 1}}, "df": 2}}}}}}}}}, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.Dialect.get_or_raise": {"tf": 1}, "sqlglot.errors.ErrorLevel.RAISE": {"tf": 1}, "sqlglot.parser.Parser.raise_error": {"tf": 1}}, "df": 3}}}}, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.RIGHT": {"tf": 1}}, "df": 1}}}}}, "l": {"docs": {"sqlglot.tokens.TokenType.L_PAREN": {"tf": 1}, "sqlglot.tokens.TokenType.L_BRACKET": {"tf": 1}, "sqlglot.tokens.TokenType.L_BRACE": {"tf": 1}}, "df": 3, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe.sql.DataFrame.limit": {"tf": 1}, "sqlglot.expressions.Limit": {"tf": 1}, "sqlglot.expressions.Subqueryable.limit": {"tf": 1}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.generator.Generator.limit_sql": {"tf": 1}, "sqlglot.tokens.TokenType.LIMIT": {"tf": 1}}, "df": 7}}}, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dataframe.sql.Column.ensure_literal": {"tf": 1}, "sqlglot.expressions.Literal": {"tf": 1}, "sqlglot.expressions.Literal.number": {"tf": 1}, "sqlglot.expressions.Literal.string": {"tf": 1}, "sqlglot.expressions.Literal.output_name": {"tf": 1}, "sqlglot.generator.Generator.literal_sql": {"tf": 1}, "sqlglot.optimizer.simplify.date_literal": {"tf": 1}, "sqlglot.optimizer.simplify.boolean_literal": {"tf": 1}}, "df": 8, "s": {"docs": {"sqlglot.optimizer.simplify.simplify_literals": {"tf": 1}}, "df": 1}}}}}}, "k": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.Column.like": {"tf": 1}, "sqlglot.expressions.Like": {"tf": 1}, "sqlglot.generator.Generator.like_sql": {"tf": 1}, "sqlglot.tokens.TokenType.LIKE": {"tf": 1}}, "df": 4, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.LikeProperty": {"tf": 1}, "sqlglot.generator.Generator.likeproperty_sql": {"tf": 1}}, "df": 2}}}}}}}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.Func.from_arg_list": {"tf": 1}, "sqlglot.helper.ensure_list": {"tf": 1}}, "df": 2}}, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.lineage": {"tf": 1}, "sqlglot.lineage.Node": {"tf": 1}, "sqlglot.lineage.Node.__init__": {"tf": 1}, "sqlglot.lineage.Node.walk": {"tf": 1}, "sqlglot.lineage.Node.to_html": {"tf": 1}, "sqlglot.lineage.lineage": {"tf": 1.4142135623730951}, "sqlglot.lineage.LineageHTML": {"tf": 1}, "sqlglot.lineage.LineageHTML.__init__": {"tf": 1}}, "df": 8, "h": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.lineage.LineageHTML": {"tf": 1}, "sqlglot.lineage.LineageHTML.__init__": {"tf": 1}}, "df": 2}}}}}}}}}}, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe.sql.Column.asc_nulls_last": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc_nulls_last": {"tf": 1}, "sqlglot.tokens.TokenType.NULLS_LAST": {"tf": 1}}, "df": 3, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.expressions.LastDateOfMonth": {"tf": 1}}, "df": 1}}}}}}}}}}}}}, "m": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}, "sqlglot.expressions.Lambda": {"tf": 1}, "sqlglot.generator.Generator.lambda_sql": {"tf": 1}}, "df": 3}}}}, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.Lateral": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.generator.Generator.lateral_sql": {"tf": 1}, "sqlglot.tokens.TokenType.LATERAL": {"tf": 1}}, "df": 4, "s": {"docs": {"sqlglot.optimizer.expand_laterals": {"tf": 1}, "sqlglot.optimizer.expand_laterals.expand_laterals": {"tf": 1.4142135623730951}}, "df": 2}}}}}}, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.LANGUAGE": {"tf": 1}}, "df": 1, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.LanguageProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}, "w": {"docs": {"sqlglot.optimizer.normalize.distributive_law": {"tf": 1}}, "df": 1}, "z": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.tokens.TokenType.LAZY": {"tf": 1}}, "df": 1}}}, "o": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.locate_to_strposition": {"tf": 1}, "sqlglot.dialects.dialect.strposition_to_locate_sql": {"tf": 1}, "sqlglot.generator.Generator.locate_properties": {"tf": 1}}, "df": 3}, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.Properties.Location": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_CREATE": {"tf": 1}, "sqlglot.expressions.Properties.Location.PRE_SCHEMA": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_INDEX": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_SCHEMA_ROOT": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_SCHEMA_WITH": {"tf": 1}, "sqlglot.expressions.Properties.Location.UNSUPPORTED": {"tf": 1}}, "df": 7, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.LocationProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}}, "l": {"docs": {"sqlglot.tokens.TokenType.LOCAL": {"tf": 1}, "sqlglot.tokens.TokenType.WITH_LOCAL_TIME_ZONE": {"tf": 1}}, "df": 2}}, "k": {"docs": {"sqlglot.expressions.Lock": {"tf": 1}, "sqlglot.expressions.Select.lock": {"tf": 1}, "sqlglot.generator.Generator.lock_sql": {"tf": 1}}, "df": 3}}, "o": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.executor.python.PythonExecutor.nested_loop_join": {"tf": 1}}, "df": 1}}, "a": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.Expression.load": {"tf": 1}, "sqlglot.serde.load": {"tf": 1}, "sqlglot.tokens.TokenType.LOAD_DATA": {"tf": 1}}, "df": 3, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.expressions.LoadData": {"tf": 1}, "sqlglot.generator.Generator.loaddata_sql": {"tf": 1}}, "df": 2}}}}}}, "g": {"1": {"0": {"docs": {"sqlglot.expressions.Log10": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "2": {"docs": {"sqlglot.expressions.Log2": {"tf": 1}}, "df": 1}, "docs": {"sqlglot.expressions.Log": {"tf": 1}}, "df": 1, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.LogProperty": {"tf": 1}}, "df": 1}}}}}}}}, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.LogicalOr": {"tf": 1}}, "df": 1}}}}}}}, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.DataType.Type.LONGTEXT": {"tf": 1}, "sqlglot.tokens.TokenType.LONGTEXT": {"tf": 1}}, "df": 2}}}}, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "b": {"docs": {"sqlglot.expressions.DataType.Type.LONGBLOB": {"tf": 1}, "sqlglot.tokens.TokenType.LONGBLOB": {"tf": 1}}, "df": 2}}}}}}, "w": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Lower": {"tf": 1}, "sqlglot.optimizer.lower_identities": {"tf": 1}, "sqlglot.optimizer.lower_identities.lower_identities": {"tf": 1.4142135623730951}}, "df": 3}}}}, "t": {"docs": {"sqlglot.expressions.LT": {"tf": 1}, "sqlglot.generator.Generator.lt_sql": {"tf": 1}, "sqlglot.tokens.TokenType.LT": {"tf": 1}}, "df": 3, "e": {"docs": {"sqlglot.expressions.LTE": {"tf": 1}, "sqlglot.generator.Generator.lte_sql": {"tf": 1}, "sqlglot.tokens.TokenType.LTE": {"tf": 1}}, "df": 3}}, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.Least": {"tf": 1}}, "df": 1}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.tokens.TokenType.LEADING": {"tf": 1}}, "df": 1}}}}}, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.expressions.Length": {"tf": 1}}, "df": 1}}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.Levenshtein": {"tf": 1}}, "df": 1}}}}}}}}}, "f": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.LEFT": {"tf": 1}}, "df": 1}}}, "n": {"docs": {"sqlglot.expressions.Ln": {"tf": 1}}, "df": 1}, "r": {"docs": {"sqlglot.tokens.TokenType.LR_ARROW": {"tf": 1}}, "df": 1}}, "h": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe.sql.DataFrame.hint": {"tf": 1}, "sqlglot.expressions.Hint": {"tf": 1}, "sqlglot.generator.Generator.hint_sql": {"tf": 1}, "sqlglot.tokens.TokenType.HINT": {"tf": 1}}, "df": 4, "s": {"docs": {"sqlglot.optimizer.scope.Scope.join_hints": {"tf": 1}}, "df": 1}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.Dialects.HIVE": {"tf": 1}, "sqlglot.dialects.hive": {"tf": 1}, "sqlglot.dialects.hive.Hive": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.__init__": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Tokenizer": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Generator.with_properties": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Generator.datatype_sql": {"tf": 1.4142135623730951}}, "df": 9}}}, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.executor.python.PythonExecutor.hash_join": {"tf": 1}, "sqlglot.tokens.TokenType.HASH": {"tf": 1}, "sqlglot.tokens.TokenType.HASH_ARROW": {"tf": 1}}, "df": 3}}, "v": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.expressions.Having": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.generator.Generator.having_sql": {"tf": 1}, "sqlglot.tokens.TokenType.HAVING": {"tf": 1}}, "df": 4}}}}}, "e": {"docs": {}, "df": 0, "x": {"docs": {"sqlglot.expressions.Hex": {"tf": 1}, "sqlglot.tokens.TokenType.HEX_STRING": {"tf": 1}}, "df": 2, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.expressions.HexString": {"tf": 1}, "sqlglot.generator.Generator.hexstring_sql": {"tf": 1}}, "df": 2}}}}}}}, "l": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.helper": {"tf": 1}, "sqlglot.helper.AutoName": {"tf": 1}, "sqlglot.helper.seq_get": {"tf": 1}, "sqlglot.helper.ensure_list": {"tf": 1}, "sqlglot.helper.ensure_collection": {"tf": 1}, "sqlglot.helper.csv": {"tf": 1}, "sqlglot.helper.subclasses": {"tf": 1}, "sqlglot.helper.apply_index_offset": {"tf": 1}, "sqlglot.helper.camel_to_snake_case": {"tf": 1}, "sqlglot.helper.while_changing": {"tf": 1}, "sqlglot.helper.tsort": {"tf": 1}, "sqlglot.helper.open_file": {"tf": 1}, "sqlglot.helper.csv_reader": {"tf": 1}, "sqlglot.helper.find_new_name": {"tf": 1}, "sqlglot.helper.object_to_dict": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1}, "sqlglot.helper.is_iterable": {"tf": 1}, "sqlglot.helper.flatten": {"tf": 1}, "sqlglot.helper.count_params": {"tf": 1}, "sqlglot.helper.dict_depth": {"tf": 1}, "sqlglot.helper.first": {"tf": 1}}, "df": 21}}}}}, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.expressions.DataType.Type.HLLSKETCH": {"tf": 1}, "sqlglot.tokens.TokenType.HLLSKETCH": {"tf": 1}}, "df": 2}}}}}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.HSTORE": {"tf": 1}, "sqlglot.tokens.TokenType.HSTORE": {"tf": 1}}, "df": 2}}}}}, "t": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.lineage.Node.to_html": {"tf": 1}}, "df": 1}}}}, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe.sql.GroupedData.mean": {"tf": 1}}, "df": 1}}, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.errors.concat_messages": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1}}, "df": 2}}}}}}, "r": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.errors.merge_errors": {"tf": 1}, "sqlglot.expressions.Merge": {"tf": 1}, "sqlglot.generator.Generator.merge_sql": {"tf": 1}, "sqlglot.optimizer.merge_subqueries": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1.4142135623730951}, "sqlglot.optimizer.merge_subqueries.merge_ctes": {"tf": 1.4142135623730951}, "sqlglot.optimizer.merge_subqueries.merge_derived_tables": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.MERGE": {"tf": 1}}, "df": 8, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.MergeBlockRatioProperty": {"tf": 1}, "sqlglot.generator.Generator.mergeblockratioproperty_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}}}}}}}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.DataType.Type.MEDIUMTEXT": {"tf": 1}, "sqlglot.tokens.TokenType.MEDIUMTEXT": {"tf": 1}}, "df": 2}}}}, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "b": {"docs": {"sqlglot.expressions.DataType.Type.MEDIUMBLOB": {"tf": 1}, "sqlglot.tokens.TokenType.MEDIUMBLOB": {"tf": 1}}, "df": 2}}}}}}}}}, "a": {"docs": {}, "df": 0, "x": {"docs": {"sqlglot.dataframe.sql.GroupedData.max": {"tf": 1}, "sqlglot.expressions.Max": {"tf": 1}}, "df": 2}, "p": {"docs": {"sqlglot.dialects.dialect.var_map_sql": {"tf": 1}, "sqlglot.expressions.DataType.Type.MAP": {"tf": 1}, "sqlglot.expressions.Map": {"tf": 1}, "sqlglot.parser.parse_var_map": {"tf": 1}, "sqlglot.tokens.TokenType.MAP": {"tf": 1}}, "df": 5, "p": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.schema.MappingSchema.from_mapping_schema": {"tf": 1}, "sqlglot.schema.ensure_column_mapping": {"tf": 1}}, "df": 2, "s": {"docs": {"sqlglot.expressions.Func.default_parser_mappings": {"tf": 1}}, "df": 1, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.schema.MappingSchema": {"tf": 1}, "sqlglot.schema.MappingSchema.__init__": {"tf": 1}, "sqlglot.schema.MappingSchema.from_mapping_schema": {"tf": 1}, "sqlglot.schema.MappingSchema.copy": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1}}, "df": 7}}}}}}}}}}}, "t": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.tokens.TokenType.MATCH_RECOGNIZE": {"tf": 1}}, "df": 1, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.MatchRecognize": {"tf": 1}, "sqlglot.generator.Generator.matchrecognize_sql": {"tf": 1}}, "df": 2}}}}}}}}}, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.Matches": {"tf": 1}}, "df": 1}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.tokens.TokenType.MATERIALIZED": {"tf": 1}}, "df": 1}}}}}}}}}}, "y": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.maybe_parse": {"tf": 1}, "sqlglot.generator.Generator.maybe_comment": {"tf": 1}}, "df": 2}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe.sql.GroupedData.min": {"tf": 1}, "sqlglot.expressions.Min": {"tf": 1}}, "df": 2}}, "o": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.Mod": {"tf": 1}, "sqlglot.generator.Generator.mod_sql": {"tf": 1}, "sqlglot.tokens.TokenType.MOD": {"tf": 1}}, "df": 3, "e": {"docs": {"sqlglot.dataframe.sql.DataFrameWriter.mode": {"tf": 1}}, "df": 1}, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dialects.oracle.Oracle.Generator.query_modifiers": {"tf": 1}, "sqlglot.generator.Generator.query_modifiers": {"tf": 1}}, "df": 2}}}}}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff.Move": {"tf": 1}, "sqlglot.diff.Move.__init__": {"tf": 1}}, "df": 2}}, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.DataType.Type.MONEY": {"tf": 1}, "sqlglot.tokens.TokenType.MONEY": {"tf": 1}}, "df": 2}}, "t": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.expressions.Month": {"tf": 1}}, "df": 1}}}}, "y": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "q": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dialects.dialect.Dialects.MYSQL": {"tf": 1}, "sqlglot.dialects.mysql": {"tf": 1}, "sqlglot.dialects.mysql.MySQL": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.__init__": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Tokenizer": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Generator.show_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Generator.setitem_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Generator.set_sql": {"tf": 1.4142135623730951}}, "df": 10}}}}, "u": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.Mul": {"tf": 1}, "sqlglot.generator.Generator.mul_sql": {"tf": 1}}, "df": 2, "t": {"docs": {}, "df": 0, "i": {"docs": {"sqlglot.optimizer.expand_multi_table_selects": {"tf": 1}, "sqlglot.optimizer.expand_multi_table_selects.expand_multi_table_selects": {"tf": 1.4142135623730951}}, "df": 2}}}}}, "b": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dataframe.sql.Column.binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.inverse_binary_op": {"tf": 1}, "sqlglot.expressions.DataType.Type.BINARY": {"tf": 1}, "sqlglot.expressions.Binary": {"tf": 1}, "sqlglot.generator.Generator.binary": {"tf": 1}, "sqlglot.tokens.TokenType.BINARY": {"tf": 1}}, "df": 6}}}}, "g": {"docs": {}, "df": 0, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dialects.bigquery": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.__init__": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Tokenizer": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Generator.array_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Generator.transaction_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Generator.commit_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Generator.rollback_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Generator.in_unnest_op": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Generator.except_op": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Generator.intersect_op": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.BIGQUERY": {"tf": 1}}, "df": 14}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.DataType.Type.BIGINT": {"tf": 1}, "sqlglot.tokens.TokenType.BIGINT": {"tf": 1}}, "df": 2}}}, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.DataType.Type.BIGSERIAL": {"tf": 1}, "sqlglot.tokens.TokenType.BIGSERIAL": {"tf": 1}}, "df": 2}}}}}}}, "t": {"docs": {"sqlglot.tokens.TokenType.BIT_STRING": {"tf": 1}}, "df": 1, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.expressions.BitString": {"tf": 1}, "sqlglot.generator.Generator.bitstring_sql": {"tf": 1}}, "df": 2}}}}}}, "w": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.BitwiseAnd": {"tf": 1}, "sqlglot.generator.Generator.bitwiseand_sql": {"tf": 1}}, "df": 2}}}, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.BitwiseLeftShift": {"tf": 1}, "sqlglot.generator.Generator.bitwiseleftshift_sql": {"tf": 1}}, "df": 2}}}}}}}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.BitwiseOr": {"tf": 1}, "sqlglot.generator.Generator.bitwiseor_sql": {"tf": 1}}, "df": 2}}, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.BitwiseRightShift": {"tf": 1}, "sqlglot.generator.Generator.bitwiserightshift_sql": {"tf": 1}}, "df": 2}}}}}}}}}}, "x": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.BitwiseXor": {"tf": 1}, "sqlglot.generator.Generator.bitwisexor_sql": {"tf": 1}}, "df": 2}}}, "n": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.BitwiseNot": {"tf": 1}, "sqlglot.generator.Generator.bitwisenot_sql": {"tf": 1}}, "df": 2}}}}}}}}}, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe.sql.Column.between": {"tf": 1}, "sqlglot.expressions.Between": {"tf": 1}, "sqlglot.generator.Generator.between_sql": {"tf": 1}, "sqlglot.optimizer.simplify.rewrite_between": {"tf": 1}, "sqlglot.tokens.TokenType.BETWEEN": {"tf": 1}}, "df": 5}}}}}, "g": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.tokens.TokenType.BEGIN": {"tf": 1}}, "df": 1}}}}, "f": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.Expression.bfs": {"tf": 1}}, "df": 1}}, "y": {"docs": {"sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.generator.Generator.partition_by_sql": {"tf": 1}, "sqlglot.tokens.TokenType.BY_DEFAULT": {"tf": 1}, "sqlglot.tokens.TokenType.CLUSTER_BY": {"tf": 1}, "sqlglot.tokens.TokenType.DISTRIBUTE_BY": {"tf": 1}, "sqlglot.tokens.TokenType.GROUP_BY": {"tf": 1}, "sqlglot.tokens.TokenType.ORDER_BY": {"tf": 1}, "sqlglot.tokens.TokenType.PARTITION_BY": {"tf": 1}, "sqlglot.tokens.TokenType.SORT_BY": {"tf": 1}}, "df": 12, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.BYTE_STRING": {"tf": 1}}, "df": 1, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.expressions.ByteString": {"tf": 1}}, "df": 1}}}}}}}}}, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.tokens.TokenType.BLOCK_START": {"tf": 1}, "sqlglot.tokens.TokenType.BLOCK_END": {"tf": 1}}, "df": 2, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.BlockCompressionProperty": {"tf": 1}, "sqlglot.generator.Generator.blockcompressionproperty_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}}}}}}}}}}, "o": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.Boolean": {"tf": 1}, "sqlglot.expressions.DataType.Type.BOOLEAN": {"tf": 1}, "sqlglot.generator.Generator.boolean_sql": {"tf": 1}, "sqlglot.optimizer.simplify.eval_boolean": {"tf": 1}, "sqlglot.optimizer.simplify.boolean_literal": {"tf": 1}, "sqlglot.tokens.TokenType.BOOLEAN": {"tf": 1}}, "df": 6}}}}}, "t": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.tokens.TokenType.BOTH": {"tf": 1}}, "df": 1}}}, "u": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.DataType.build": {"tf": 1}, "sqlglot.optimizer.scope.build_scope": {"tf": 1}}, "df": 2}}}, "c": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.BUCKET": {"tf": 1}}, "df": 1}}}}}, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.Bracket": {"tf": 1}, "sqlglot.generator.Generator.bracket_sql": {"tf": 1}, "sqlglot.tokens.TokenType.L_BRACKET": {"tf": 1}, "sqlglot.tokens.TokenType.R_BRACKET": {"tf": 1}}, "df": 4}}}, "e": {"docs": {"sqlglot.tokens.TokenType.L_BRACE": {"tf": 1}, "sqlglot.tokens.TokenType.R_BRACE": {"tf": 1}}, "df": 2}}, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.optimizer.scope.Scope.branch": {"tf": 1}}, "df": 1}}}}, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.tokens.TokenType.BREAK": {"tf": 1}}, "df": 1}}}}, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.tokens.TokenType.BACKSLASH": {"tf": 1}}, "df": 1}}}}}}}}}, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.Column.set_table_name": {"tf": 1}, "sqlglot.expressions.Expression.output_name": {"tf": 1}, "sqlglot.expressions.Column.output_name": {"tf": 1}, "sqlglot.expressions.Identifier.output_name": {"tf": 1}, "sqlglot.expressions.Literal.output_name": {"tf": 1}, "sqlglot.expressions.Subquery.output_name": {"tf": 1}, "sqlglot.expressions.Star.output_name": {"tf": 1}, "sqlglot.expressions.Alias.output_name": {"tf": 1}, "sqlglot.expressions.Func.sql_name": {"tf": 1}, "sqlglot.expressions.Cast.output_name": {"tf": 1}, "sqlglot.expressions.table_name": {"tf": 1}, "sqlglot.helper.find_new_name": {"tf": 1}}, "df": 12, "s": {"docs": {"sqlglot.expressions.Func.sql_names": {"tf": 1}, "sqlglot.expressions.column_table_names": {"tf": 1}, "sqlglot.optimizer.optimize_joins.other_table_names": {"tf": 1}, "sqlglot.schema.Schema.column_names": {"tf": 1}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}}, "df": 5}}}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.National": {"tf": 1}, "sqlglot.generator.Generator.national_sql": {"tf": 1}, "sqlglot.tokens.TokenType.NATIONAL": {"tf": 1}}, "df": 3}}}}}, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.tokens.TokenType.NATURAL": {"tf": 1}}, "df": 1}}}}}, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.generator.Generator.naked_property": {"tf": 1}}, "df": 1}}}}, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.executor.env.null_if_any": {"tf": 1}, "sqlglot.expressions.Null": {"tf": 1}, "sqlglot.expressions.DataType.Type.NULL": {"tf": 1}, "sqlglot.expressions.null": {"tf": 1}, "sqlglot.generator.Generator.null_sql": {"tf": 1}, "sqlglot.tokens.TokenType.NULL": {"tf": 1}}, "df": 6, "s": {"docs": {"sqlglot.dataframe.sql.Column.asc_nulls_first": {"tf": 1}, "sqlglot.dataframe.sql.Column.asc_nulls_last": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc_nulls_first": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc_nulls_last": {"tf": 1}, "sqlglot.executor.env.filter_nulls": {"tf": 1}, "sqlglot.tokens.TokenType.IGNORE_NULLS": {"tf": 1}, "sqlglot.tokens.TokenType.NULLS_FIRST": {"tf": 1}, "sqlglot.tokens.TokenType.NULLS_LAST": {"tf": 1}, "sqlglot.tokens.TokenType.RESPECT_NULLS": {"tf": 1}}, "df": 9, "a": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.NULLSAFE_EQ": {"tf": 1}}, "df": 1, "e": {"docs": {}, "df": 0, "q": {"docs": {"sqlglot.expressions.NullSafeEQ": {"tf": 1}, "sqlglot.generator.Generator.nullsafeeq_sql": {"tf": 1}}, "df": 2}}, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "q": {"docs": {"sqlglot.expressions.NullSafeNEQ": {"tf": 1}, "sqlglot.generator.Generator.nullsafeneq_sql": {"tf": 1}}, "df": 2}}}}}}}, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.NULLABLE": {"tf": 1}, "sqlglot.tokens.TokenType.NULLABLE": {"tf": 1}}, "df": 2}}}}}}, "m": {"docs": {"sqlglot.helper.split_num_words": {"tf": 1}}, "df": 1, "b": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Expression.is_number": {"tf": 1}, "sqlglot.expressions.Literal.number": {"tf": 1}, "sqlglot.tokens.TokenType.NUMBER": {"tf": 1}, "sqlglot.tokens.Token.number": {"tf": 1}}, "df": 4, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.NumberToStr": {"tf": 1}}, "df": 1}}}}}}}}}}, "o": {"docs": {"sqlglot.dialects.dialect.no_ilike_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_paren_current_date_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_recursive_cte_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_safe_divide_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_tablesample_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_pivot_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_trycast_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_properties_sql": {"tf": 1}, "sqlglot.generator.Generator.no_identify": {"tf": 1}, "sqlglot.tokens.TokenType.NO_ACTION": {"tf": 1}}, "df": 10, "r": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.drill.Drill.Generator.normalize_func": {"tf": 1}, "sqlglot.generator.Generator.normalize_func": {"tf": 1}, "sqlglot.optimizer.normalize": {"tf": 1}, "sqlglot.optimizer.normalize.normalize": {"tf": 1.4142135623730951}, "sqlglot.optimizer.normalize.normalized": {"tf": 1}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}, "sqlglot.optimizer.normalize.distributive_law": {"tf": 1}, "sqlglot.optimizer.optimize_joins.normalize": {"tf": 1}}, "df": 8, "d": {"docs": {"sqlglot.optimizer.normalize.normalized": {"tf": 1}}, "df": 1}}, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}}, "df": 1}}}}}}}}}}}, "t": {"docs": {"sqlglot.expressions.Condition.not_": {"tf": 1}, "sqlglot.expressions.Not": {"tf": 1}, "sqlglot.expressions.not_": {"tf": 1}, "sqlglot.generator.Generator.not_sql": {"tf": 1}, "sqlglot.optimizer.simplify.simplify_not": {"tf": 1}, "sqlglot.tokens.TokenType.NOT": {"tf": 1}}, "df": 6, "n": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.tokens.TokenType.NOTNULL": {"tf": 1}}, "df": 1, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.NotNullColumnConstraint": {"tf": 1}, "sqlglot.generator.Generator.notnullcolumnconstraint_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}}}}}}}}, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.lineage.Node": {"tf": 1}, "sqlglot.lineage.Node.__init__": {"tf": 1}, "sqlglot.lineage.Node.walk": {"tf": 1}, "sqlglot.lineage.Node.to_html": {"tf": 1}}, "df": 4, "s": {"docs": {"sqlglot.optimizer.pushdown_predicates.nodes_for_predicate": {"tf": 1}}, "df": 1}}}}, "e": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot.errors.ParseError.new": {"tf": 1}, "sqlglot.helper.find_new_name": {"tf": 1}, "sqlglot.trie.new_trie": {"tf": 1}}, "df": 3}, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.executor.python.PythonExecutor.nested_loop_join": {"tf": 1}}, "df": 1}}}}, "q": {"docs": {"sqlglot.expressions.NEQ": {"tf": 1}, "sqlglot.generator.Generator.neq_sql": {"tf": 1}, "sqlglot.tokens.TokenType.NEQ": {"tf": 1}}, "df": 3}, "g": {"docs": {"sqlglot.expressions.Neg": {"tf": 1}, "sqlglot.generator.Generator.neg_sql": {"tf": 1}}, "df": 2}, "x": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.NEXT": {"tf": 1}}, "df": 1}}}, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.DataType.Type.NCHAR": {"tf": 1}, "sqlglot.tokens.TokenType.NCHAR": {"tf": 1}}, "df": 2}}}}, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.DataType.Type.NVARCHAR": {"tf": 1}, "sqlglot.tokens.TokenType.NVARCHAR": {"tf": 1}}, "df": 2}}}}}}, "l": {"2": {"docs": {"sqlglot.expressions.Nvl2": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}}}, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dialects.dialect.var_map_sql": {"tf": 1}, "sqlglot.expressions.Var": {"tf": 1}, "sqlglot.generator.Generator.var_sql": {"tf": 1}, "sqlglot.parser.parse_var_map": {"tf": 1}, "sqlglot.tokens.TokenType.VAR": {"tf": 1}, "sqlglot.tokens.Token.var": {"tf": 1}}, "df": 6, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.DataType.Type.VARCHAR": {"tf": 1}, "sqlglot.tokens.TokenType.VARCHAR": {"tf": 1}}, "df": 2}}}}, "b": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.DataType.Type.VARBINARY": {"tf": 1}, "sqlglot.tokens.TokenType.VARBINARY": {"tf": 1}}, "df": 2}}}}}}, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.DataType.Type.VARIANT": {"tf": 1}, "sqlglot.tokens.TokenType.VARIANT": {"tf": 1}}, "df": 2}, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Variance": {"tf": 1}}, "df": 1, "p": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.expressions.VariancePop": {"tf": 1}}, "df": 1}}}}}}}}, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.expressions.VarMap": {"tf": 1}}, "df": 1}}}}, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.values_sql": {"tf": 1}, "sqlglot.expressions.Values": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1}, "sqlglot.generator.Generator.values_sql": {"tf": 1}, "sqlglot.tokens.TokenType.VALUES": {"tf": 1}}, "df": 6}}}, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.qualify_columns.validate_qualify_columns": {"tf": 1}, "sqlglot.parser.Parser.validate_expression": {"tf": 1}}, "df": 2}}}}}}}, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.VolatilityProperty": {"tf": 1}}, "df": 1}}}}}}}}}}}, "e": {"docs": {"sqlglot.tokens.TokenType.VOLATILE": {"tf": 1}}, "df": 1}}}}}}}, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot.tokens.TokenType.VIEW": {"tf": 1}}, "df": 1}}}}, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dialects.oracle.Oracle.Generator.query_modifiers": {"tf": 1}, "sqlglot.generator.Generator.query_modifiers": {"tf": 1}}, "df": 2}}}, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.Qualify": {"tf": 1}, "sqlglot.generator.Generator.qualify_sql": {"tf": 1}, "sqlglot.optimizer.qualify_columns": {"tf": 1}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify_columns.validate_qualify_columns": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify_tables": {"tf": 1}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.QUALIFY": {"tf": 1}}, "df": 8}}}}, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Quantile": {"tf": 1}}, "df": 1, "s": {"docs": {"sqlglot.expressions.Quantiles": {"tf": 1}}, "df": 1}, "i": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.expressions.QuantileIf": {"tf": 1}}, "df": 1}}}}}}}}, "o": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.QUOTE": {"tf": 1}}, "df": 1}}}}}, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.diff.Keep": {"tf": 1}, "sqlglot.diff.Keep.__init__": {"tf": 1}}, "df": 2}}, "y": {"docs": {"sqlglot.executor.env.reverse_key": {"tf": 1}, "sqlglot.executor.env.reverse_key.__init__": {"tf": 1}, "sqlglot.tokens.TokenType.FOREIGN_KEY": {"tf": 1}, "sqlglot.tokens.TokenType.PRIMARY_KEY": {"tf": 1}}, "df": 4}}, "w": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.expressions.Kwarg": {"tf": 1}, "sqlglot.generator.Generator.kwarg_sql": {"tf": 1}}, "df": 2}}}}}, "x": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.DataType.Type.XML": {"tf": 1}, "sqlglot.tokens.TokenType.XML": {"tf": 1}}, "df": 2}}}, "y": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Year": {"tf": 1}}, "df": 1}}}}, "z": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.AT_TIME_ZONE": {"tf": 1}, "sqlglot.tokens.TokenType.WITH_TIME_ZONE": {"tf": 1}, "sqlglot.tokens.TokenType.WITH_LOCAL_TIME_ZONE": {"tf": 1}, "sqlglot.tokens.TokenType.WITHOUT_TIME_ZONE": {"tf": 1}}, "df": 4}}}}}}, "annotation": {"root": {"docs": {"sqlglot.schema.Schema.supported_table_args": {"tf": 1.4142135623730951}}, "df": 1, "t": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "[": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.schema.Schema.supported_table_args": {"tf": 1}}, "df": 1}}}}}}}}}}}, "default_value": {"root": {"1": {"docs": {"sqlglot.optimizer.scope.ScopeType.ROOT": {"tf": 1}}, "df": 1}, "2": {"docs": {"sqlglot.optimizer.scope.ScopeType.SUBQUERY": {"tf": 1}}, "df": 1}, "3": {"docs": {"sqlglot.optimizer.scope.ScopeType.DERIVED_TABLE": {"tf": 1}}, "df": 1}, "4": {"docs": {"sqlglot.optimizer.scope.ScopeType.CTE": {"tf": 1}}, "df": 1}, "5": {"docs": {"sqlglot.optimizer.scope.ScopeType.UNION": {"tf": 1}}, "df": 1}, "6": {"docs": {"sqlglot.optimizer.scope.ScopeType.UDTF": {"tf": 1}}, "df": 1}, "docs": {"sqlglot.pretty": {"tf": 1}, "sqlglot.schema": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.DIALECT": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.BIGQUERY": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.CLICKHOUSE": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.DUCKDB": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.HIVE": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.MYSQL": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.ORACLE": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.POSTGRES": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.PRESTO": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.REDSHIFT": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.SNOWFLAKE": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.SPARK": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.SQLITE": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.STARROCKS": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.TABLEAU": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.TRINO": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.TSQL": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.DATABRICKS": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.DRILL": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.TERADATA": {"tf": 1.4142135623730951}, "sqlglot.errors.ErrorLevel.IGNORE": {"tf": 1.4142135623730951}, "sqlglot.errors.ErrorLevel.WARN": {"tf": 1.4142135623730951}, "sqlglot.errors.ErrorLevel.RAISE": {"tf": 1.4142135623730951}, "sqlglot.errors.ErrorLevel.IMMEDIATE": {"tf": 1.4142135623730951}, "sqlglot.expressions.Properties.Location.POST_CREATE": {"tf": 1.4142135623730951}, "sqlglot.expressions.Properties.Location.PRE_SCHEMA": {"tf": 1.4142135623730951}, "sqlglot.expressions.Properties.Location.POST_INDEX": {"tf": 1.4142135623730951}, "sqlglot.expressions.Properties.Location.POST_SCHEMA_ROOT": {"tf": 1.4142135623730951}, "sqlglot.expressions.Properties.Location.POST_SCHEMA_WITH": {"tf": 1.4142135623730951}, "sqlglot.expressions.Properties.Location.UNSUPPORTED": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.CHAR": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.NCHAR": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.VARCHAR": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.NVARCHAR": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.TEXT": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.MEDIUMTEXT": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.LONGTEXT": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.MEDIUMBLOB": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.LONGBLOB": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.BINARY": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.VARBINARY": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.INT": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.TINYINT": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.SMALLINT": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.BIGINT": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.FLOAT": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.DOUBLE": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.DECIMAL": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.BOOLEAN": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.JSON": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.JSONB": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.INTERVAL": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.TIME": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.TIMESTAMP": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.TIMESTAMPTZ": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.TIMESTAMPLTZ": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.DATE": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.DATETIME": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.ARRAY": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.MAP": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.UUID": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.GEOGRAPHY": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.GEOMETRY": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.STRUCT": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.NULLABLE": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.HLLSKETCH": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.HSTORE": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.SUPER": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.SERIAL": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.SMALLSERIAL": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.BIGSERIAL": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.XML": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.UNIQUEIDENTIFIER": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.MONEY": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.SMALLMONEY": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.ROWVERSION": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.IMAGE": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.VARIANT": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.OBJECT": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.NULL": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.UNKNOWN": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.ScopeType.ROOT": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.ScopeType.SUBQUERY": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.ScopeType.DERIVED_TABLE": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.ScopeType.CTE": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.ScopeType.UNION": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.ScopeType.UDTF": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.L_PAREN": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.R_PAREN": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.L_BRACKET": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.R_BRACKET": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.L_BRACE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.R_BRACE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.COMMA": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DOT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DASH": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.PLUS": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.COLON": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DCOLON": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SEMICOLON": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.STAR": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.BACKSLASH": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SLASH": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.LT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.LTE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.GT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.GTE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NOT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.EQ": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NEQ": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NULLSAFE_EQ": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.AND": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.OR": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.AMP": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DPIPE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.PIPE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CARET": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TILDA": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ARROW": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DARROW": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.FARROW": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.HASH": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.HASH_ARROW": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DHASH_ARROW": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.LR_ARROW": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DOLLAR": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.PARAMETER": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SESSION_PARAMETER": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NATIONAL": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.BLOCK_START": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.BLOCK_END": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SPACE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.BREAK": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.STRING": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NUMBER": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.IDENTIFIER": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.COLUMN": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.COLUMN_DEF": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SCHEMA": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TABLE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.VAR": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.BIT_STRING": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.HEX_STRING": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.BYTE_STRING": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.BOOLEAN": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TINYINT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SMALLINT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.INT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.BIGINT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.FLOAT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DOUBLE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DECIMAL": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CHAR": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NCHAR": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.VARCHAR": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NVARCHAR": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TEXT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.MEDIUMTEXT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.LONGTEXT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.MEDIUMBLOB": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.LONGBLOB": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.BINARY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.VARBINARY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.JSON": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.JSONB": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TIME": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TIMESTAMP": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TIMESTAMPTZ": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TIMESTAMPLTZ": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DATETIME": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DATE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.UUID": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.GEOGRAPHY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NULLABLE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.GEOMETRY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.HLLSKETCH": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.HSTORE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SUPER": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SERIAL": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SMALLSERIAL": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.BIGSERIAL": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.XML": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.UNIQUEIDENTIFIER": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.MONEY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SMALLMONEY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ROWVERSION": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.IMAGE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.VARIANT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.OBJECT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ALIAS": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ALTER": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ALWAYS": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ALL": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ANTI": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ANY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.APPLY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ARRAY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ASC": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ASOF": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.AT_TIME_ZONE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.AUTO_INCREMENT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.BEGIN": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.BETWEEN": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.BOTH": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.BUCKET": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.BY_DEFAULT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CACHE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CASCADE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CASE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CHARACTER_SET": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CHECK": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CLUSTER_BY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.COLLATE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.COMMAND": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.COMMENT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.COMMIT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.COMPOUND": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CONSTRAINT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CREATE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CROSS": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CUBE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CURRENT_DATE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CURRENT_DATETIME": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CURRENT_ROW": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CURRENT_TIME": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CURRENT_TIMESTAMP": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DEFAULT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DELETE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DESC": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DESCRIBE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DISTINCT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DISTINCT_FROM": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DISTRIBUTE_BY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DIV": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DROP": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ELSE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ENCODE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.END": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ESCAPE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.EXCEPT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.EXECUTE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.EXISTS": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.FALSE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.FETCH": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.FILTER": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.FINAL": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.FIRST": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.FOLLOWING": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.FOR": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.FOREIGN_KEY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.FORMAT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.FROM": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.FULL": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.FUNCTION": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.GENERATED": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.GLOB": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.GLOBAL": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.GROUP_BY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.GROUPING_SETS": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.HAVING": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.HINT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.IDENTITY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.IF": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.IGNORE_NULLS": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ILIKE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.IN": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.INDEX": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.INNER": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.INSERT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.INTERSECT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.INTERVAL": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.INTO": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.INTRODUCER": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.IRLIKE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.IS": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ISNULL": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.JOIN": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.LANGUAGE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.LATERAL": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.LAZY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.LEADING": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.LEFT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.LIKE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.LIMIT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.LOAD_DATA": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.LOCAL": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.MAP": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.MATCH_RECOGNIZE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.MATERIALIZED": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.MERGE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.MOD": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NATURAL": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NEXT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NO_ACTION": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NOTNULL": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NULL": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NULLS_FIRST": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NULLS_LAST": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.OFFSET": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ON": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ONLY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.OPTIONS": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ORDER_BY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ORDERED": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ORDINALITY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.OUTER": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.OUT_OF": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.OVER": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.OVERWRITE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.PARTITION": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.PARTITION_BY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.PERCENT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.PIVOT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.PLACEHOLDER": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.PRECEDING": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.PRIMARY_KEY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.PROCEDURE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.PROPERTIES": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.PSEUDO_TYPE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.QUALIFY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.QUOTE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.RANGE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.RECURSIVE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.REPLACE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.RESPECT_NULLS": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.REFERENCES": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.RIGHT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.RLIKE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ROLLBACK": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ROLLUP": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ROW": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ROWS": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SCHEMA_COMMENT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SEED": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SELECT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SEMI": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SEPARATOR": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SERDE_PROPERTIES": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SET": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SHOW": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SIMILAR_TO": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SOME": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SORTKEY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SORT_BY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.STRUCT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TABLE_SAMPLE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TEMPORARY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TOP": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.THEN": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TRAILING": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TRUE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.UNBOUNDED": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.UNCACHE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.UNION": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.UNLOGGED": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.UNNEST": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.UNPIVOT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.UPDATE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.USE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.USING": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.VALUES": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.VIEW": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.VOLATILE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.WHEN": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.WHERE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.WINDOW": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.WITH": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.WITH_TIME_ZONE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.WITH_LOCAL_TIME_ZONE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.WITHIN_GROUP": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.WITHOUT_TIME_ZONE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.UNIQUE": {"tf": 1.4142135623730951}}, "df": 374, "f": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.pretty": {"tf": 1}, "sqlglot.tokens.TokenType.FALSE": {"tf": 1.4142135623730951}}, "df": 2}}}, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot.tokens.TokenType.FARROW": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.DataType.Type.FLOAT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.FLOAT": {"tf": 1.4142135623730951}}, "df": 2}}}}, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot.tokens.TokenType.DISTINCT_FROM": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.FROM": {"tf": 1.4142135623730951}}, "df": 2}}}, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.tokens.TokenType.FETCH": {"tf": 1.4142135623730951}}, "df": 1}}}}, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.tokens.TokenType.FILTER": {"tf": 1.4142135623730951}}, "df": 1}}}}, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.tokens.TokenType.FINAL": {"tf": 1.4142135623730951}}, "df": 1}}}, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.FIRST": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NULLS_FIRST": {"tf": 1.4142135623730951}}, "df": 2}}}}, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.tokens.TokenType.FOLLOWING": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}, "r": {"docs": {"sqlglot.tokens.TokenType.FOR": {"tf": 1.4142135623730951}}, "df": 1, "e": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.tokens.TokenType.FOREIGN_KEY": {"tf": 1.4142135623730951}}, "df": 1}}}}, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.FORMAT": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.tokens.TokenType.FULL": {"tf": 1.4142135623730951}}, "df": 1}}, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.tokens.TokenType.FUNCTION": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}, "l": {"docs": {"sqlglot.tokens.TokenType.L_PAREN": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.L_BRACKET": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.L_BRACE": {"tf": 1.4142135623730951}}, "df": 3, "t": {"docs": {"sqlglot.schema": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.DIALECT": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.BIGQUERY": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.CLICKHOUSE": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.DUCKDB": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.HIVE": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.MYSQL": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.ORACLE": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.POSTGRES": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.PRESTO": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.REDSHIFT": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.SNOWFLAKE": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.SPARK": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.SQLITE": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.STARROCKS": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.TABLEAU": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.TRINO": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.TSQL": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.DATABRICKS": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.DRILL": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.TERADATA": {"tf": 1}, "sqlglot.errors.ErrorLevel.IGNORE": {"tf": 1}, "sqlglot.errors.ErrorLevel.WARN": {"tf": 1}, "sqlglot.errors.ErrorLevel.RAISE": {"tf": 1}, "sqlglot.errors.ErrorLevel.IMMEDIATE": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_CREATE": {"tf": 1}, "sqlglot.expressions.Properties.Location.PRE_SCHEMA": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_INDEX": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_SCHEMA_ROOT": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_SCHEMA_WITH": {"tf": 1}, "sqlglot.expressions.Properties.Location.UNSUPPORTED": {"tf": 1}, "sqlglot.expressions.DataType.Type.CHAR": {"tf": 1}, "sqlglot.expressions.DataType.Type.NCHAR": {"tf": 1}, "sqlglot.expressions.DataType.Type.VARCHAR": {"tf": 1}, "sqlglot.expressions.DataType.Type.NVARCHAR": {"tf": 1}, "sqlglot.expressions.DataType.Type.TEXT": {"tf": 1}, "sqlglot.expressions.DataType.Type.MEDIUMTEXT": {"tf": 1}, "sqlglot.expressions.DataType.Type.LONGTEXT": {"tf": 1}, "sqlglot.expressions.DataType.Type.MEDIUMBLOB": {"tf": 1}, "sqlglot.expressions.DataType.Type.LONGBLOB": {"tf": 1}, "sqlglot.expressions.DataType.Type.BINARY": {"tf": 1}, "sqlglot.expressions.DataType.Type.VARBINARY": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT": {"tf": 1}, "sqlglot.expressions.DataType.Type.TINYINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.SMALLINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.BIGINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.FLOAT": {"tf": 1}, "sqlglot.expressions.DataType.Type.DOUBLE": {"tf": 1}, "sqlglot.expressions.DataType.Type.DECIMAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.BOOLEAN": {"tf": 1}, "sqlglot.expressions.DataType.Type.JSON": {"tf": 1}, "sqlglot.expressions.DataType.Type.JSONB": {"tf": 1}, "sqlglot.expressions.DataType.Type.INTERVAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.TIME": {"tf": 1}, "sqlglot.expressions.DataType.Type.TIMESTAMP": {"tf": 1}, "sqlglot.expressions.DataType.Type.TIMESTAMPTZ": {"tf": 1}, "sqlglot.expressions.DataType.Type.TIMESTAMPLTZ": {"tf": 1}, "sqlglot.expressions.DataType.Type.DATE": {"tf": 1}, "sqlglot.expressions.DataType.Type.DATETIME": {"tf": 1}, "sqlglot.expressions.DataType.Type.ARRAY": {"tf": 1}, "sqlglot.expressions.DataType.Type.MAP": {"tf": 1}, "sqlglot.expressions.DataType.Type.UUID": {"tf": 1}, "sqlglot.expressions.DataType.Type.GEOGRAPHY": {"tf": 1}, "sqlglot.expressions.DataType.Type.GEOMETRY": {"tf": 1}, "sqlglot.expressions.DataType.Type.STRUCT": {"tf": 1}, "sqlglot.expressions.DataType.Type.NULLABLE": {"tf": 1}, "sqlglot.expressions.DataType.Type.HLLSKETCH": {"tf": 1}, "sqlglot.expressions.DataType.Type.HSTORE": {"tf": 1}, "sqlglot.expressions.DataType.Type.SUPER": {"tf": 1}, "sqlglot.expressions.DataType.Type.SERIAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.SMALLSERIAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.BIGSERIAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.XML": {"tf": 1}, "sqlglot.expressions.DataType.Type.UNIQUEIDENTIFIER": {"tf": 1}, "sqlglot.expressions.DataType.Type.MONEY": {"tf": 1}, "sqlglot.expressions.DataType.Type.SMALLMONEY": {"tf": 1}, "sqlglot.expressions.DataType.Type.ROWVERSION": {"tf": 1}, "sqlglot.expressions.DataType.Type.IMAGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.VARIANT": {"tf": 1}, "sqlglot.expressions.DataType.Type.OBJECT": {"tf": 1}, "sqlglot.expressions.DataType.Type.NULL": {"tf": 1}, "sqlglot.expressions.DataType.Type.UNKNOWN": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.ROOT": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.SUBQUERY": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.DERIVED_TABLE": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.CTE": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.UNION": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.UDTF": {"tf": 1}, "sqlglot.tokens.TokenType.L_PAREN": {"tf": 1}, "sqlglot.tokens.TokenType.R_PAREN": {"tf": 1}, "sqlglot.tokens.TokenType.L_BRACKET": {"tf": 1}, "sqlglot.tokens.TokenType.R_BRACKET": {"tf": 1}, "sqlglot.tokens.TokenType.L_BRACE": {"tf": 1}, "sqlglot.tokens.TokenType.R_BRACE": {"tf": 1}, "sqlglot.tokens.TokenType.COMMA": {"tf": 1}, "sqlglot.tokens.TokenType.DOT": {"tf": 1}, "sqlglot.tokens.TokenType.DASH": {"tf": 1}, "sqlglot.tokens.TokenType.PLUS": {"tf": 1}, "sqlglot.tokens.TokenType.COLON": {"tf": 1}, "sqlglot.tokens.TokenType.DCOLON": {"tf": 1}, "sqlglot.tokens.TokenType.SEMICOLON": {"tf": 1}, "sqlglot.tokens.TokenType.STAR": {"tf": 1}, "sqlglot.tokens.TokenType.BACKSLASH": {"tf": 1}, "sqlglot.tokens.TokenType.SLASH": {"tf": 1}, "sqlglot.tokens.TokenType.LT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.LTE": {"tf": 1}, "sqlglot.tokens.TokenType.GT": {"tf": 1}, "sqlglot.tokens.TokenType.GTE": {"tf": 1}, "sqlglot.tokens.TokenType.NOT": {"tf": 1}, "sqlglot.tokens.TokenType.EQ": {"tf": 1}, "sqlglot.tokens.TokenType.NEQ": {"tf": 1}, "sqlglot.tokens.TokenType.NULLSAFE_EQ": {"tf": 1}, "sqlglot.tokens.TokenType.AND": {"tf": 1}, "sqlglot.tokens.TokenType.OR": {"tf": 1}, "sqlglot.tokens.TokenType.AMP": {"tf": 1}, "sqlglot.tokens.TokenType.DPIPE": {"tf": 1}, "sqlglot.tokens.TokenType.PIPE": {"tf": 1}, "sqlglot.tokens.TokenType.CARET": {"tf": 1}, "sqlglot.tokens.TokenType.TILDA": {"tf": 1}, "sqlglot.tokens.TokenType.ARROW": {"tf": 1}, "sqlglot.tokens.TokenType.DARROW": {"tf": 1}, "sqlglot.tokens.TokenType.FARROW": {"tf": 1}, "sqlglot.tokens.TokenType.HASH": {"tf": 1}, "sqlglot.tokens.TokenType.HASH_ARROW": {"tf": 1}, "sqlglot.tokens.TokenType.DHASH_ARROW": {"tf": 1}, "sqlglot.tokens.TokenType.LR_ARROW": {"tf": 1}, "sqlglot.tokens.TokenType.DOLLAR": {"tf": 1}, "sqlglot.tokens.TokenType.PARAMETER": {"tf": 1}, "sqlglot.tokens.TokenType.SESSION_PARAMETER": {"tf": 1}, "sqlglot.tokens.TokenType.NATIONAL": {"tf": 1}, "sqlglot.tokens.TokenType.BLOCK_START": {"tf": 1}, "sqlglot.tokens.TokenType.BLOCK_END": {"tf": 1}, "sqlglot.tokens.TokenType.SPACE": {"tf": 1}, "sqlglot.tokens.TokenType.BREAK": {"tf": 1}, "sqlglot.tokens.TokenType.STRING": {"tf": 1}, "sqlglot.tokens.TokenType.NUMBER": {"tf": 1}, "sqlglot.tokens.TokenType.IDENTIFIER": {"tf": 1}, "sqlglot.tokens.TokenType.COLUMN": {"tf": 1}, "sqlglot.tokens.TokenType.COLUMN_DEF": {"tf": 1}, "sqlglot.tokens.TokenType.SCHEMA": {"tf": 1}, "sqlglot.tokens.TokenType.TABLE": {"tf": 1}, "sqlglot.tokens.TokenType.VAR": {"tf": 1}, "sqlglot.tokens.TokenType.BIT_STRING": {"tf": 1}, "sqlglot.tokens.TokenType.HEX_STRING": {"tf": 1}, "sqlglot.tokens.TokenType.BYTE_STRING": {"tf": 1}, "sqlglot.tokens.TokenType.BOOLEAN": {"tf": 1}, "sqlglot.tokens.TokenType.TINYINT": {"tf": 1}, "sqlglot.tokens.TokenType.SMALLINT": {"tf": 1}, "sqlglot.tokens.TokenType.INT": {"tf": 1}, "sqlglot.tokens.TokenType.BIGINT": {"tf": 1}, "sqlglot.tokens.TokenType.FLOAT": {"tf": 1}, "sqlglot.tokens.TokenType.DOUBLE": {"tf": 1}, "sqlglot.tokens.TokenType.DECIMAL": {"tf": 1}, "sqlglot.tokens.TokenType.CHAR": {"tf": 1}, "sqlglot.tokens.TokenType.NCHAR": {"tf": 1}, "sqlglot.tokens.TokenType.VARCHAR": {"tf": 1}, "sqlglot.tokens.TokenType.NVARCHAR": {"tf": 1}, "sqlglot.tokens.TokenType.TEXT": {"tf": 1}, "sqlglot.tokens.TokenType.MEDIUMTEXT": {"tf": 1}, "sqlglot.tokens.TokenType.LONGTEXT": {"tf": 1}, "sqlglot.tokens.TokenType.MEDIUMBLOB": {"tf": 1}, "sqlglot.tokens.TokenType.LONGBLOB": {"tf": 1}, "sqlglot.tokens.TokenType.BINARY": {"tf": 1}, "sqlglot.tokens.TokenType.VARBINARY": {"tf": 1}, "sqlglot.tokens.TokenType.JSON": {"tf": 1}, "sqlglot.tokens.TokenType.JSONB": {"tf": 1}, "sqlglot.tokens.TokenType.TIME": {"tf": 1}, "sqlglot.tokens.TokenType.TIMESTAMP": {"tf": 1}, "sqlglot.tokens.TokenType.TIMESTAMPTZ": {"tf": 1}, "sqlglot.tokens.TokenType.TIMESTAMPLTZ": {"tf": 1}, "sqlglot.tokens.TokenType.DATETIME": {"tf": 1}, "sqlglot.tokens.TokenType.DATE": {"tf": 1}, "sqlglot.tokens.TokenType.UUID": {"tf": 1}, "sqlglot.tokens.TokenType.GEOGRAPHY": {"tf": 1}, "sqlglot.tokens.TokenType.NULLABLE": {"tf": 1}, "sqlglot.tokens.TokenType.GEOMETRY": {"tf": 1}, "sqlglot.tokens.TokenType.HLLSKETCH": {"tf": 1}, "sqlglot.tokens.TokenType.HSTORE": {"tf": 1}, "sqlglot.tokens.TokenType.SUPER": {"tf": 1}, "sqlglot.tokens.TokenType.SERIAL": {"tf": 1}, "sqlglot.tokens.TokenType.SMALLSERIAL": {"tf": 1}, "sqlglot.tokens.TokenType.BIGSERIAL": {"tf": 1}, "sqlglot.tokens.TokenType.XML": {"tf": 1}, "sqlglot.tokens.TokenType.UNIQUEIDENTIFIER": {"tf": 1}, "sqlglot.tokens.TokenType.MONEY": {"tf": 1}, "sqlglot.tokens.TokenType.SMALLMONEY": {"tf": 1}, "sqlglot.tokens.TokenType.ROWVERSION": {"tf": 1}, "sqlglot.tokens.TokenType.IMAGE": {"tf": 1}, "sqlglot.tokens.TokenType.VARIANT": {"tf": 1}, "sqlglot.tokens.TokenType.OBJECT": {"tf": 1}, "sqlglot.tokens.TokenType.ALIAS": {"tf": 1}, "sqlglot.tokens.TokenType.ALTER": {"tf": 1}, "sqlglot.tokens.TokenType.ALWAYS": {"tf": 1}, "sqlglot.tokens.TokenType.ALL": {"tf": 1}, "sqlglot.tokens.TokenType.ANTI": {"tf": 1}, "sqlglot.tokens.TokenType.ANY": {"tf": 1}, "sqlglot.tokens.TokenType.APPLY": {"tf": 1}, "sqlglot.tokens.TokenType.ARRAY": {"tf": 1}, "sqlglot.tokens.TokenType.ASC": {"tf": 1}, "sqlglot.tokens.TokenType.ASOF": {"tf": 1}, "sqlglot.tokens.TokenType.AT_TIME_ZONE": {"tf": 1}, "sqlglot.tokens.TokenType.AUTO_INCREMENT": {"tf": 1}, "sqlglot.tokens.TokenType.BEGIN": {"tf": 1}, "sqlglot.tokens.TokenType.BETWEEN": {"tf": 1}, "sqlglot.tokens.TokenType.BOTH": {"tf": 1}, "sqlglot.tokens.TokenType.BUCKET": {"tf": 1}, "sqlglot.tokens.TokenType.BY_DEFAULT": {"tf": 1}, "sqlglot.tokens.TokenType.CACHE": {"tf": 1}, "sqlglot.tokens.TokenType.CASCADE": {"tf": 1}, "sqlglot.tokens.TokenType.CASE": {"tf": 1}, "sqlglot.tokens.TokenType.CHARACTER_SET": {"tf": 1}, "sqlglot.tokens.TokenType.CHECK": {"tf": 1}, "sqlglot.tokens.TokenType.CLUSTER_BY": {"tf": 1}, "sqlglot.tokens.TokenType.COLLATE": {"tf": 1}, "sqlglot.tokens.TokenType.COMMAND": {"tf": 1}, "sqlglot.tokens.TokenType.COMMENT": {"tf": 1}, "sqlglot.tokens.TokenType.COMMIT": {"tf": 1}, "sqlglot.tokens.TokenType.COMPOUND": {"tf": 1}, "sqlglot.tokens.TokenType.CONSTRAINT": {"tf": 1}, "sqlglot.tokens.TokenType.CREATE": {"tf": 1}, "sqlglot.tokens.TokenType.CROSS": {"tf": 1}, "sqlglot.tokens.TokenType.CUBE": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_DATE": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_DATETIME": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_ROW": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_TIME": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_TIMESTAMP": {"tf": 1}, "sqlglot.tokens.TokenType.DEFAULT": {"tf": 1}, "sqlglot.tokens.TokenType.DELETE": {"tf": 1}, "sqlglot.tokens.TokenType.DESC": {"tf": 1}, "sqlglot.tokens.TokenType.DESCRIBE": {"tf": 1}, "sqlglot.tokens.TokenType.DISTINCT": {"tf": 1}, "sqlglot.tokens.TokenType.DISTINCT_FROM": {"tf": 1}, "sqlglot.tokens.TokenType.DISTRIBUTE_BY": {"tf": 1}, "sqlglot.tokens.TokenType.DIV": {"tf": 1}, "sqlglot.tokens.TokenType.DROP": {"tf": 1}, "sqlglot.tokens.TokenType.ELSE": {"tf": 1}, "sqlglot.tokens.TokenType.ENCODE": {"tf": 1}, "sqlglot.tokens.TokenType.END": {"tf": 1}, "sqlglot.tokens.TokenType.ESCAPE": {"tf": 1}, "sqlglot.tokens.TokenType.EXCEPT": {"tf": 1}, "sqlglot.tokens.TokenType.EXECUTE": {"tf": 1}, "sqlglot.tokens.TokenType.EXISTS": {"tf": 1}, "sqlglot.tokens.TokenType.FALSE": {"tf": 1}, "sqlglot.tokens.TokenType.FETCH": {"tf": 1}, "sqlglot.tokens.TokenType.FILTER": {"tf": 1}, "sqlglot.tokens.TokenType.FINAL": {"tf": 1}, "sqlglot.tokens.TokenType.FIRST": {"tf": 1}, "sqlglot.tokens.TokenType.FOLLOWING": {"tf": 1}, "sqlglot.tokens.TokenType.FOR": {"tf": 1}, "sqlglot.tokens.TokenType.FOREIGN_KEY": {"tf": 1}, "sqlglot.tokens.TokenType.FORMAT": {"tf": 1}, "sqlglot.tokens.TokenType.FROM": {"tf": 1}, "sqlglot.tokens.TokenType.FULL": {"tf": 1}, "sqlglot.tokens.TokenType.FUNCTION": {"tf": 1}, "sqlglot.tokens.TokenType.GENERATED": {"tf": 1}, "sqlglot.tokens.TokenType.GLOB": {"tf": 1}, "sqlglot.tokens.TokenType.GLOBAL": {"tf": 1}, "sqlglot.tokens.TokenType.GROUP_BY": {"tf": 1}, "sqlglot.tokens.TokenType.GROUPING_SETS": {"tf": 1}, "sqlglot.tokens.TokenType.HAVING": {"tf": 1}, "sqlglot.tokens.TokenType.HINT": {"tf": 1}, "sqlglot.tokens.TokenType.IDENTITY": {"tf": 1}, "sqlglot.tokens.TokenType.IF": {"tf": 1}, "sqlglot.tokens.TokenType.IGNORE_NULLS": {"tf": 1}, "sqlglot.tokens.TokenType.ILIKE": {"tf": 1}, "sqlglot.tokens.TokenType.IN": {"tf": 1}, "sqlglot.tokens.TokenType.INDEX": {"tf": 1}, "sqlglot.tokens.TokenType.INNER": {"tf": 1}, "sqlglot.tokens.TokenType.INSERT": {"tf": 1}, "sqlglot.tokens.TokenType.INTERSECT": {"tf": 1}, "sqlglot.tokens.TokenType.INTERVAL": {"tf": 1}, "sqlglot.tokens.TokenType.INTO": {"tf": 1}, "sqlglot.tokens.TokenType.INTRODUCER": {"tf": 1}, "sqlglot.tokens.TokenType.IRLIKE": {"tf": 1}, "sqlglot.tokens.TokenType.IS": {"tf": 1}, "sqlglot.tokens.TokenType.ISNULL": {"tf": 1}, "sqlglot.tokens.TokenType.JOIN": {"tf": 1}, "sqlglot.tokens.TokenType.LANGUAGE": {"tf": 1}, "sqlglot.tokens.TokenType.LATERAL": {"tf": 1}, "sqlglot.tokens.TokenType.LAZY": {"tf": 1}, "sqlglot.tokens.TokenType.LEADING": {"tf": 1}, "sqlglot.tokens.TokenType.LEFT": {"tf": 1}, "sqlglot.tokens.TokenType.LIKE": {"tf": 1}, "sqlglot.tokens.TokenType.LIMIT": {"tf": 1}, "sqlglot.tokens.TokenType.LOAD_DATA": {"tf": 1}, "sqlglot.tokens.TokenType.LOCAL": {"tf": 1}, "sqlglot.tokens.TokenType.MAP": {"tf": 1}, "sqlglot.tokens.TokenType.MATCH_RECOGNIZE": {"tf": 1}, "sqlglot.tokens.TokenType.MATERIALIZED": {"tf": 1}, "sqlglot.tokens.TokenType.MERGE": {"tf": 1}, "sqlglot.tokens.TokenType.MOD": {"tf": 1}, "sqlglot.tokens.TokenType.NATURAL": {"tf": 1}, "sqlglot.tokens.TokenType.NEXT": {"tf": 1}, "sqlglot.tokens.TokenType.NO_ACTION": {"tf": 1}, "sqlglot.tokens.TokenType.NOTNULL": {"tf": 1}, "sqlglot.tokens.TokenType.NULL": {"tf": 1}, "sqlglot.tokens.TokenType.NULLS_FIRST": {"tf": 1}, "sqlglot.tokens.TokenType.NULLS_LAST": {"tf": 1}, "sqlglot.tokens.TokenType.OFFSET": {"tf": 1}, "sqlglot.tokens.TokenType.ON": {"tf": 1}, "sqlglot.tokens.TokenType.ONLY": {"tf": 1}, "sqlglot.tokens.TokenType.OPTIONS": {"tf": 1}, "sqlglot.tokens.TokenType.ORDER_BY": {"tf": 1}, "sqlglot.tokens.TokenType.ORDERED": {"tf": 1}, "sqlglot.tokens.TokenType.ORDINALITY": {"tf": 1}, "sqlglot.tokens.TokenType.OUTER": {"tf": 1}, "sqlglot.tokens.TokenType.OUT_OF": {"tf": 1}, "sqlglot.tokens.TokenType.OVER": {"tf": 1}, "sqlglot.tokens.TokenType.OVERWRITE": {"tf": 1}, "sqlglot.tokens.TokenType.PARTITION": {"tf": 1}, "sqlglot.tokens.TokenType.PARTITION_BY": {"tf": 1}, "sqlglot.tokens.TokenType.PERCENT": {"tf": 1}, "sqlglot.tokens.TokenType.PIVOT": {"tf": 1}, "sqlglot.tokens.TokenType.PLACEHOLDER": {"tf": 1}, "sqlglot.tokens.TokenType.PRECEDING": {"tf": 1}, "sqlglot.tokens.TokenType.PRIMARY_KEY": {"tf": 1}, "sqlglot.tokens.TokenType.PROCEDURE": {"tf": 1}, "sqlglot.tokens.TokenType.PROPERTIES": {"tf": 1}, "sqlglot.tokens.TokenType.PSEUDO_TYPE": {"tf": 1}, "sqlglot.tokens.TokenType.QUALIFY": {"tf": 1}, "sqlglot.tokens.TokenType.QUOTE": {"tf": 1}, "sqlglot.tokens.TokenType.RANGE": {"tf": 1}, "sqlglot.tokens.TokenType.RECURSIVE": {"tf": 1}, "sqlglot.tokens.TokenType.REPLACE": {"tf": 1}, "sqlglot.tokens.TokenType.RESPECT_NULLS": {"tf": 1}, "sqlglot.tokens.TokenType.REFERENCES": {"tf": 1}, "sqlglot.tokens.TokenType.RIGHT": {"tf": 1}, "sqlglot.tokens.TokenType.RLIKE": {"tf": 1}, "sqlglot.tokens.TokenType.ROLLBACK": {"tf": 1}, "sqlglot.tokens.TokenType.ROLLUP": {"tf": 1}, "sqlglot.tokens.TokenType.ROW": {"tf": 1}, "sqlglot.tokens.TokenType.ROWS": {"tf": 1}, "sqlglot.tokens.TokenType.SCHEMA_COMMENT": {"tf": 1}, "sqlglot.tokens.TokenType.SEED": {"tf": 1}, "sqlglot.tokens.TokenType.SELECT": {"tf": 1}, "sqlglot.tokens.TokenType.SEMI": {"tf": 1}, "sqlglot.tokens.TokenType.SEPARATOR": {"tf": 1}, "sqlglot.tokens.TokenType.SERDE_PROPERTIES": {"tf": 1}, "sqlglot.tokens.TokenType.SET": {"tf": 1}, "sqlglot.tokens.TokenType.SHOW": {"tf": 1}, "sqlglot.tokens.TokenType.SIMILAR_TO": {"tf": 1}, "sqlglot.tokens.TokenType.SOME": {"tf": 1}, "sqlglot.tokens.TokenType.SORTKEY": {"tf": 1}, "sqlglot.tokens.TokenType.SORT_BY": {"tf": 1}, "sqlglot.tokens.TokenType.STRUCT": {"tf": 1}, "sqlglot.tokens.TokenType.TABLE_SAMPLE": {"tf": 1}, "sqlglot.tokens.TokenType.TEMPORARY": {"tf": 1}, "sqlglot.tokens.TokenType.TOP": {"tf": 1}, "sqlglot.tokens.TokenType.THEN": {"tf": 1}, "sqlglot.tokens.TokenType.TRAILING": {"tf": 1}, "sqlglot.tokens.TokenType.TRUE": {"tf": 1}, "sqlglot.tokens.TokenType.UNBOUNDED": {"tf": 1}, "sqlglot.tokens.TokenType.UNCACHE": {"tf": 1}, "sqlglot.tokens.TokenType.UNION": {"tf": 1}, "sqlglot.tokens.TokenType.UNLOGGED": {"tf": 1}, "sqlglot.tokens.TokenType.UNNEST": {"tf": 1}, "sqlglot.tokens.TokenType.UNPIVOT": {"tf": 1}, "sqlglot.tokens.TokenType.UPDATE": {"tf": 1}, "sqlglot.tokens.TokenType.USE": {"tf": 1}, "sqlglot.tokens.TokenType.USING": {"tf": 1}, "sqlglot.tokens.TokenType.VALUES": {"tf": 1}, "sqlglot.tokens.TokenType.VIEW": {"tf": 1}, "sqlglot.tokens.TokenType.VOLATILE": {"tf": 1}, "sqlglot.tokens.TokenType.WHEN": {"tf": 1}, "sqlglot.tokens.TokenType.WHERE": {"tf": 1}, "sqlglot.tokens.TokenType.WINDOW": {"tf": 1}, "sqlglot.tokens.TokenType.WITH": {"tf": 1}, "sqlglot.tokens.TokenType.WITH_TIME_ZONE": {"tf": 1}, "sqlglot.tokens.TokenType.WITH_LOCAL_TIME_ZONE": {"tf": 1}, "sqlglot.tokens.TokenType.WITHIN_GROUP": {"tf": 1}, "sqlglot.tokens.TokenType.WITHOUT_TIME_ZONE": {"tf": 1}, "sqlglot.tokens.TokenType.UNIQUE": {"tf": 1}}, "df": 373, "e": {"docs": {"sqlglot.tokens.TokenType.LTE": {"tf": 1.4142135623730951}}, "df": 1}}, "o": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.Properties.Location.POST_CREATE": {"tf": 1}, "sqlglot.expressions.Properties.Location.PRE_SCHEMA": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_INDEX": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_SCHEMA_ROOT": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_SCHEMA_WITH": {"tf": 1}, "sqlglot.expressions.Properties.Location.UNSUPPORTED": {"tf": 1}}, "df": 6}}}}, "l": {"docs": {"sqlglot.tokens.TokenType.LOCAL": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.WITH_LOCAL_TIME_ZONE": {"tf": 1.4142135623730951}}, "df": 2}}}, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.DataType.Type.LONGTEXT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.LONGTEXT": {"tf": 1.4142135623730951}}, "df": 2}}}}, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "b": {"docs": {"sqlglot.expressions.DataType.Type.LONGBLOB": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.LONGBLOB": {"tf": 1.4142135623730951}}, "df": 2}}}}}}, "a": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.tokens.TokenType.LOAD_DATA": {"tf": 1.4142135623730951}}, "df": 1}}}, "r": {"docs": {"sqlglot.tokens.TokenType.LR_ARROW": {"tf": 1.4142135623730951}}, "df": 1}, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.LANGUAGE": {"tf": 1.4142135623730951}}, "df": 1}}}}}}, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.tokens.TokenType.LATERAL": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "z": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.tokens.TokenType.LAZY": {"tf": 1.4142135623730951}}, "df": 1}}, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.NULLS_LAST": {"tf": 1.4142135623730951}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.tokens.TokenType.LEADING": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "f": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.LEFT": {"tf": 1.4142135623730951}}, "df": 1}}}, "i": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.LIKE": {"tf": 1.4142135623730951}}, "df": 1}}, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.LIMIT": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "s": {"docs": {}, "df": 0, "q": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.schema": {"tf": 1}}, "df": 1}}}}, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.Dialects.SQLITE": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.schema": {"tf": 1}, "sqlglot.expressions.Properties.Location.PRE_SCHEMA": {"tf": 1.4142135623730951}, "sqlglot.expressions.Properties.Location.POST_SCHEMA_ROOT": {"tf": 1.4142135623730951}, "sqlglot.expressions.Properties.Location.POST_SCHEMA_WITH": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SCHEMA": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SCHEMA_COMMENT": {"tf": 1.4142135623730951}}, "df": 6}}}}, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.scope.ScopeType.ROOT": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.SUBQUERY": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.DERIVED_TABLE": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.CTE": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.UNION": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.UDTF": {"tf": 1}}, "df": 6}}}}}}}}, "n": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.Dialects.SNOWFLAKE": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}, "p": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.dialects.dialect.Dialects.SPARK": {"tf": 1.4142135623730951}}, "df": 1}}, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.SPACE": {"tf": 1.4142135623730951}}, "df": 1}}}}, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.tokens.TokenType.STAR": {"tf": 1.4142135623730951}}, "df": 1, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dialects.dialect.Dialects.STARROCKS": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "t": {"docs": {"sqlglot.tokens.TokenType.BLOCK_START": {"tf": 1.4142135623730951}}, "df": 1}}}, "r": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.DataType.Type.STRUCT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.STRUCT": {"tf": 1.4142135623730951}}, "df": 2}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.tokens.TokenType.STRING": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.BIT_STRING": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.HEX_STRING": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.BYTE_STRING": {"tf": 1.4142135623730951}}, "df": 4}}}}}, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.DataType.Type.SMALLINT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SMALLINT": {"tf": 1.4142135623730951}}, "df": 2}}}, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.DataType.Type.SMALLSERIAL": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SMALLSERIAL": {"tf": 1.4142135623730951}}, "df": 2}}}}}}, "m": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.DataType.Type.SMALLMONEY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SMALLMONEY": {"tf": 1.4142135623730951}}, "df": 2}}}}}}}}}, "u": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.DataType.Type.SUPER": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SUPER": {"tf": 1.4142135623730951}}, "df": 2}}}, "b": {"docs": {}, "df": 0, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.optimizer.scope.ScopeType.SUBQUERY": {"tf": 1}}, "df": 1}}}}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.DataType.Type.SERIAL": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SERIAL": {"tf": 1.4142135623730951}}, "df": 2}}}, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.SERDE_PROPERTIES": {"tf": 1.4142135623730951}}, "df": 1}}}, "m": {"docs": {}, "df": 0, "i": {"docs": {"sqlglot.tokens.TokenType.SEMI": {"tf": 1.4142135623730951}}, "df": 1, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.tokens.TokenType.SEMICOLON": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.tokens.TokenType.SESSION_PARAMETER": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "t": {"docs": {"sqlglot.tokens.TokenType.CHARACTER_SET": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SET": {"tf": 1.4142135623730951}}, "df": 2, "s": {"docs": {"sqlglot.tokens.TokenType.GROUPING_SETS": {"tf": 1.4142135623730951}}, "df": 1}}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.tokens.TokenType.SEED": {"tf": 1.4142135623730951}}, "df": 1}}, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.SELECT": {"tf": 1.4142135623730951}}, "df": 1}}}}, "p": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.tokens.TokenType.SEPARATOR": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.tokens.TokenType.SLASH": {"tf": 1.4142135623730951}}, "df": 1}}}}, "h": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot.tokens.TokenType.SHOW": {"tf": 1.4142135623730951}}, "df": 1}}}, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.tokens.TokenType.SIMILAR_TO": {"tf": 1.4142135623730951}}, "df": 1}}}}}}, "o": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.SOME": {"tf": 1.4142135623730951}}, "df": 1}}, "r": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.SORT_BY": {"tf": 1.4142135623730951}}, "df": 1, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.tokens.TokenType.SORTKEY": {"tf": 1.4142135623730951}}, "df": 1}}}}}}, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.TABLE_SAMPLE": {"tf": 1.4142135623730951}}, "df": 1}}}}}}, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.expressions.DataType.Type.MAP": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.MAP": {"tf": 1.4142135623730951}}, "df": 2, "p": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.schema": {"tf": 1}}, "df": 1}}}}}}}}}}}, "t": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.tokens.TokenType.MATCH_RECOGNIZE": {"tf": 1.4142135623730951}}, "df": 1}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.tokens.TokenType.MATERIALIZED": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}}}}, "y": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "q": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dialects.dialect.Dialects.MYSQL": {"tf": 1.4142135623730951}}, "df": 1}}}}, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.DataType.Type.MEDIUMTEXT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.MEDIUMTEXT": {"tf": 1.4142135623730951}}, "df": 2}}}}, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "b": {"docs": {"sqlglot.expressions.DataType.Type.MEDIUMBLOB": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.MEDIUMBLOB": {"tf": 1.4142135623730951}}, "df": 2}}}}}}}}, "r": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.MERGE": {"tf": 1.4142135623730951}}, "df": 1}}}}, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.DataType.Type.MONEY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.MONEY": {"tf": 1.4142135623730951}}, "df": 2}}}, "d": {"docs": {"sqlglot.tokens.TokenType.MOD": {"tf": 1.4142135623730951}}, "df": 1}}}, "o": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "j": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.schema": {"tf": 1}, "sqlglot.expressions.DataType.Type.OBJECT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.OBJECT": {"tf": 1.4142135623730951}}, "df": 3}}}}}, "r": {"docs": {"sqlglot.tokens.TokenType.OR": {"tf": 1.4142135623730951}}, "df": 1, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.Dialects.ORACLE": {"tf": 1.4142135623730951}}, "df": 1}}}}, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.tokens.TokenType.ORDER_BY": {"tf": 1.4142135623730951}}, "df": 1, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.tokens.TokenType.ORDERED": {"tf": 1.4142135623730951}}, "df": 1}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.tokens.TokenType.ORDINALITY": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}}, "f": {"docs": {"sqlglot.tokens.TokenType.OUT_OF": {"tf": 1.4142135623730951}}, "df": 1, "f": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.OFFSET": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "n": {"docs": {"sqlglot.tokens.TokenType.ON": {"tf": 1.4142135623730951}}, "df": 1, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.tokens.TokenType.ONLY": {"tf": 1.4142135623730951}}, "df": 1}}}, "p": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.tokens.TokenType.OPTIONS": {"tf": 1.4142135623730951}}, "df": 1}}}}}}, "u": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.OUT_OF": {"tf": 1.4142135623730951}}, "df": 1, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.tokens.TokenType.OUTER": {"tf": 1.4142135623730951}}, "df": 1}}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.tokens.TokenType.OVER": {"tf": 1.4142135623730951}}, "df": 1, "w": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.OVERWRITE": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}}, "g": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.schema": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.DIALECT": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.BIGQUERY": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.CLICKHOUSE": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.DUCKDB": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.HIVE": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.MYSQL": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.ORACLE": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.POSTGRES": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.PRESTO": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.REDSHIFT": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.SNOWFLAKE": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.SPARK": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.SQLITE": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.STARROCKS": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.TABLEAU": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.TRINO": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.TSQL": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.DATABRICKS": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.DRILL": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.TERADATA": {"tf": 1}, "sqlglot.errors.ErrorLevel.IGNORE": {"tf": 1}, "sqlglot.errors.ErrorLevel.WARN": {"tf": 1}, "sqlglot.errors.ErrorLevel.RAISE": {"tf": 1}, "sqlglot.errors.ErrorLevel.IMMEDIATE": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_CREATE": {"tf": 1}, "sqlglot.expressions.Properties.Location.PRE_SCHEMA": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_INDEX": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_SCHEMA_ROOT": {"tf": 1}, "sqlglot.expressions.Properties.Location.POST_SCHEMA_WITH": {"tf": 1}, "sqlglot.expressions.Properties.Location.UNSUPPORTED": {"tf": 1}, "sqlglot.expressions.DataType.Type.CHAR": {"tf": 1}, "sqlglot.expressions.DataType.Type.NCHAR": {"tf": 1}, "sqlglot.expressions.DataType.Type.VARCHAR": {"tf": 1}, "sqlglot.expressions.DataType.Type.NVARCHAR": {"tf": 1}, "sqlglot.expressions.DataType.Type.TEXT": {"tf": 1}, "sqlglot.expressions.DataType.Type.MEDIUMTEXT": {"tf": 1}, "sqlglot.expressions.DataType.Type.LONGTEXT": {"tf": 1}, "sqlglot.expressions.DataType.Type.MEDIUMBLOB": {"tf": 1}, "sqlglot.expressions.DataType.Type.LONGBLOB": {"tf": 1}, "sqlglot.expressions.DataType.Type.BINARY": {"tf": 1}, "sqlglot.expressions.DataType.Type.VARBINARY": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT": {"tf": 1}, "sqlglot.expressions.DataType.Type.TINYINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.SMALLINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.BIGINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.FLOAT": {"tf": 1}, "sqlglot.expressions.DataType.Type.DOUBLE": {"tf": 1}, "sqlglot.expressions.DataType.Type.DECIMAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.BOOLEAN": {"tf": 1}, "sqlglot.expressions.DataType.Type.JSON": {"tf": 1}, "sqlglot.expressions.DataType.Type.JSONB": {"tf": 1}, "sqlglot.expressions.DataType.Type.INTERVAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.TIME": {"tf": 1}, "sqlglot.expressions.DataType.Type.TIMESTAMP": {"tf": 1}, "sqlglot.expressions.DataType.Type.TIMESTAMPTZ": {"tf": 1}, "sqlglot.expressions.DataType.Type.TIMESTAMPLTZ": {"tf": 1}, "sqlglot.expressions.DataType.Type.DATE": {"tf": 1}, "sqlglot.expressions.DataType.Type.DATETIME": {"tf": 1}, "sqlglot.expressions.DataType.Type.ARRAY": {"tf": 1}, "sqlglot.expressions.DataType.Type.MAP": {"tf": 1}, "sqlglot.expressions.DataType.Type.UUID": {"tf": 1}, "sqlglot.expressions.DataType.Type.GEOGRAPHY": {"tf": 1}, "sqlglot.expressions.DataType.Type.GEOMETRY": {"tf": 1}, "sqlglot.expressions.DataType.Type.STRUCT": {"tf": 1}, "sqlglot.expressions.DataType.Type.NULLABLE": {"tf": 1}, "sqlglot.expressions.DataType.Type.HLLSKETCH": {"tf": 1}, "sqlglot.expressions.DataType.Type.HSTORE": {"tf": 1}, "sqlglot.expressions.DataType.Type.SUPER": {"tf": 1}, "sqlglot.expressions.DataType.Type.SERIAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.SMALLSERIAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.BIGSERIAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.XML": {"tf": 1}, "sqlglot.expressions.DataType.Type.UNIQUEIDENTIFIER": {"tf": 1}, "sqlglot.expressions.DataType.Type.MONEY": {"tf": 1}, "sqlglot.expressions.DataType.Type.SMALLMONEY": {"tf": 1}, "sqlglot.expressions.DataType.Type.ROWVERSION": {"tf": 1}, "sqlglot.expressions.DataType.Type.IMAGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.VARIANT": {"tf": 1}, "sqlglot.expressions.DataType.Type.OBJECT": {"tf": 1}, "sqlglot.expressions.DataType.Type.NULL": {"tf": 1}, "sqlglot.expressions.DataType.Type.UNKNOWN": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.ROOT": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.SUBQUERY": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.DERIVED_TABLE": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.CTE": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.UNION": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType.UDTF": {"tf": 1}, "sqlglot.tokens.TokenType.L_PAREN": {"tf": 1}, "sqlglot.tokens.TokenType.R_PAREN": {"tf": 1}, "sqlglot.tokens.TokenType.L_BRACKET": {"tf": 1}, "sqlglot.tokens.TokenType.R_BRACKET": {"tf": 1}, "sqlglot.tokens.TokenType.L_BRACE": {"tf": 1}, "sqlglot.tokens.TokenType.R_BRACE": {"tf": 1}, "sqlglot.tokens.TokenType.COMMA": {"tf": 1}, "sqlglot.tokens.TokenType.DOT": {"tf": 1}, "sqlglot.tokens.TokenType.DASH": {"tf": 1}, "sqlglot.tokens.TokenType.PLUS": {"tf": 1}, "sqlglot.tokens.TokenType.COLON": {"tf": 1}, "sqlglot.tokens.TokenType.DCOLON": {"tf": 1}, "sqlglot.tokens.TokenType.SEMICOLON": {"tf": 1}, "sqlglot.tokens.TokenType.STAR": {"tf": 1}, "sqlglot.tokens.TokenType.BACKSLASH": {"tf": 1}, "sqlglot.tokens.TokenType.SLASH": {"tf": 1}, "sqlglot.tokens.TokenType.LT": {"tf": 1}, "sqlglot.tokens.TokenType.LTE": {"tf": 1}, "sqlglot.tokens.TokenType.GT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.GTE": {"tf": 1}, "sqlglot.tokens.TokenType.NOT": {"tf": 1}, "sqlglot.tokens.TokenType.EQ": {"tf": 1}, "sqlglot.tokens.TokenType.NEQ": {"tf": 1}, "sqlglot.tokens.TokenType.NULLSAFE_EQ": {"tf": 1}, "sqlglot.tokens.TokenType.AND": {"tf": 1}, "sqlglot.tokens.TokenType.OR": {"tf": 1}, "sqlglot.tokens.TokenType.AMP": {"tf": 1}, "sqlglot.tokens.TokenType.DPIPE": {"tf": 1}, "sqlglot.tokens.TokenType.PIPE": {"tf": 1}, "sqlglot.tokens.TokenType.CARET": {"tf": 1}, "sqlglot.tokens.TokenType.TILDA": {"tf": 1}, "sqlglot.tokens.TokenType.ARROW": {"tf": 1}, "sqlglot.tokens.TokenType.DARROW": {"tf": 1}, "sqlglot.tokens.TokenType.FARROW": {"tf": 1}, "sqlglot.tokens.TokenType.HASH": {"tf": 1}, "sqlglot.tokens.TokenType.HASH_ARROW": {"tf": 1}, "sqlglot.tokens.TokenType.DHASH_ARROW": {"tf": 1}, "sqlglot.tokens.TokenType.LR_ARROW": {"tf": 1}, "sqlglot.tokens.TokenType.DOLLAR": {"tf": 1}, "sqlglot.tokens.TokenType.PARAMETER": {"tf": 1}, "sqlglot.tokens.TokenType.SESSION_PARAMETER": {"tf": 1}, "sqlglot.tokens.TokenType.NATIONAL": {"tf": 1}, "sqlglot.tokens.TokenType.BLOCK_START": {"tf": 1}, "sqlglot.tokens.TokenType.BLOCK_END": {"tf": 1}, "sqlglot.tokens.TokenType.SPACE": {"tf": 1}, "sqlglot.tokens.TokenType.BREAK": {"tf": 1}, "sqlglot.tokens.TokenType.STRING": {"tf": 1}, "sqlglot.tokens.TokenType.NUMBER": {"tf": 1}, "sqlglot.tokens.TokenType.IDENTIFIER": {"tf": 1}, "sqlglot.tokens.TokenType.COLUMN": {"tf": 1}, "sqlglot.tokens.TokenType.COLUMN_DEF": {"tf": 1}, "sqlglot.tokens.TokenType.SCHEMA": {"tf": 1}, "sqlglot.tokens.TokenType.TABLE": {"tf": 1}, "sqlglot.tokens.TokenType.VAR": {"tf": 1}, "sqlglot.tokens.TokenType.BIT_STRING": {"tf": 1}, "sqlglot.tokens.TokenType.HEX_STRING": {"tf": 1}, "sqlglot.tokens.TokenType.BYTE_STRING": {"tf": 1}, "sqlglot.tokens.TokenType.BOOLEAN": {"tf": 1}, "sqlglot.tokens.TokenType.TINYINT": {"tf": 1}, "sqlglot.tokens.TokenType.SMALLINT": {"tf": 1}, "sqlglot.tokens.TokenType.INT": {"tf": 1}, "sqlglot.tokens.TokenType.BIGINT": {"tf": 1}, "sqlglot.tokens.TokenType.FLOAT": {"tf": 1}, "sqlglot.tokens.TokenType.DOUBLE": {"tf": 1}, "sqlglot.tokens.TokenType.DECIMAL": {"tf": 1}, "sqlglot.tokens.TokenType.CHAR": {"tf": 1}, "sqlglot.tokens.TokenType.NCHAR": {"tf": 1}, "sqlglot.tokens.TokenType.VARCHAR": {"tf": 1}, "sqlglot.tokens.TokenType.NVARCHAR": {"tf": 1}, "sqlglot.tokens.TokenType.TEXT": {"tf": 1}, "sqlglot.tokens.TokenType.MEDIUMTEXT": {"tf": 1}, "sqlglot.tokens.TokenType.LONGTEXT": {"tf": 1}, "sqlglot.tokens.TokenType.MEDIUMBLOB": {"tf": 1}, "sqlglot.tokens.TokenType.LONGBLOB": {"tf": 1}, "sqlglot.tokens.TokenType.BINARY": {"tf": 1}, "sqlglot.tokens.TokenType.VARBINARY": {"tf": 1}, "sqlglot.tokens.TokenType.JSON": {"tf": 1}, "sqlglot.tokens.TokenType.JSONB": {"tf": 1}, "sqlglot.tokens.TokenType.TIME": {"tf": 1}, "sqlglot.tokens.TokenType.TIMESTAMP": {"tf": 1}, "sqlglot.tokens.TokenType.TIMESTAMPTZ": {"tf": 1}, "sqlglot.tokens.TokenType.TIMESTAMPLTZ": {"tf": 1}, "sqlglot.tokens.TokenType.DATETIME": {"tf": 1}, "sqlglot.tokens.TokenType.DATE": {"tf": 1}, "sqlglot.tokens.TokenType.UUID": {"tf": 1}, "sqlglot.tokens.TokenType.GEOGRAPHY": {"tf": 1}, "sqlglot.tokens.TokenType.NULLABLE": {"tf": 1}, "sqlglot.tokens.TokenType.GEOMETRY": {"tf": 1}, "sqlglot.tokens.TokenType.HLLSKETCH": {"tf": 1}, "sqlglot.tokens.TokenType.HSTORE": {"tf": 1}, "sqlglot.tokens.TokenType.SUPER": {"tf": 1}, "sqlglot.tokens.TokenType.SERIAL": {"tf": 1}, "sqlglot.tokens.TokenType.SMALLSERIAL": {"tf": 1}, "sqlglot.tokens.TokenType.BIGSERIAL": {"tf": 1}, "sqlglot.tokens.TokenType.XML": {"tf": 1}, "sqlglot.tokens.TokenType.UNIQUEIDENTIFIER": {"tf": 1}, "sqlglot.tokens.TokenType.MONEY": {"tf": 1}, "sqlglot.tokens.TokenType.SMALLMONEY": {"tf": 1}, "sqlglot.tokens.TokenType.ROWVERSION": {"tf": 1}, "sqlglot.tokens.TokenType.IMAGE": {"tf": 1}, "sqlglot.tokens.TokenType.VARIANT": {"tf": 1}, "sqlglot.tokens.TokenType.OBJECT": {"tf": 1}, "sqlglot.tokens.TokenType.ALIAS": {"tf": 1}, "sqlglot.tokens.TokenType.ALTER": {"tf": 1}, "sqlglot.tokens.TokenType.ALWAYS": {"tf": 1}, "sqlglot.tokens.TokenType.ALL": {"tf": 1}, "sqlglot.tokens.TokenType.ANTI": {"tf": 1}, "sqlglot.tokens.TokenType.ANY": {"tf": 1}, "sqlglot.tokens.TokenType.APPLY": {"tf": 1}, "sqlglot.tokens.TokenType.ARRAY": {"tf": 1}, "sqlglot.tokens.TokenType.ASC": {"tf": 1}, "sqlglot.tokens.TokenType.ASOF": {"tf": 1}, "sqlglot.tokens.TokenType.AT_TIME_ZONE": {"tf": 1}, "sqlglot.tokens.TokenType.AUTO_INCREMENT": {"tf": 1}, "sqlglot.tokens.TokenType.BEGIN": {"tf": 1}, "sqlglot.tokens.TokenType.BETWEEN": {"tf": 1}, "sqlglot.tokens.TokenType.BOTH": {"tf": 1}, "sqlglot.tokens.TokenType.BUCKET": {"tf": 1}, "sqlglot.tokens.TokenType.BY_DEFAULT": {"tf": 1}, "sqlglot.tokens.TokenType.CACHE": {"tf": 1}, "sqlglot.tokens.TokenType.CASCADE": {"tf": 1}, "sqlglot.tokens.TokenType.CASE": {"tf": 1}, "sqlglot.tokens.TokenType.CHARACTER_SET": {"tf": 1}, "sqlglot.tokens.TokenType.CHECK": {"tf": 1}, "sqlglot.tokens.TokenType.CLUSTER_BY": {"tf": 1}, "sqlglot.tokens.TokenType.COLLATE": {"tf": 1}, "sqlglot.tokens.TokenType.COMMAND": {"tf": 1}, "sqlglot.tokens.TokenType.COMMENT": {"tf": 1}, "sqlglot.tokens.TokenType.COMMIT": {"tf": 1}, "sqlglot.tokens.TokenType.COMPOUND": {"tf": 1}, "sqlglot.tokens.TokenType.CONSTRAINT": {"tf": 1}, "sqlglot.tokens.TokenType.CREATE": {"tf": 1}, "sqlglot.tokens.TokenType.CROSS": {"tf": 1}, "sqlglot.tokens.TokenType.CUBE": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_DATE": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_DATETIME": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_ROW": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_TIME": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_TIMESTAMP": {"tf": 1}, "sqlglot.tokens.TokenType.DEFAULT": {"tf": 1}, "sqlglot.tokens.TokenType.DELETE": {"tf": 1}, "sqlglot.tokens.TokenType.DESC": {"tf": 1}, "sqlglot.tokens.TokenType.DESCRIBE": {"tf": 1}, "sqlglot.tokens.TokenType.DISTINCT": {"tf": 1}, "sqlglot.tokens.TokenType.DISTINCT_FROM": {"tf": 1}, "sqlglot.tokens.TokenType.DISTRIBUTE_BY": {"tf": 1}, "sqlglot.tokens.TokenType.DIV": {"tf": 1}, "sqlglot.tokens.TokenType.DROP": {"tf": 1}, "sqlglot.tokens.TokenType.ELSE": {"tf": 1}, "sqlglot.tokens.TokenType.ENCODE": {"tf": 1}, "sqlglot.tokens.TokenType.END": {"tf": 1}, "sqlglot.tokens.TokenType.ESCAPE": {"tf": 1}, "sqlglot.tokens.TokenType.EXCEPT": {"tf": 1}, "sqlglot.tokens.TokenType.EXECUTE": {"tf": 1}, "sqlglot.tokens.TokenType.EXISTS": {"tf": 1}, "sqlglot.tokens.TokenType.FALSE": {"tf": 1}, "sqlglot.tokens.TokenType.FETCH": {"tf": 1}, "sqlglot.tokens.TokenType.FILTER": {"tf": 1}, "sqlglot.tokens.TokenType.FINAL": {"tf": 1}, "sqlglot.tokens.TokenType.FIRST": {"tf": 1}, "sqlglot.tokens.TokenType.FOLLOWING": {"tf": 1}, "sqlglot.tokens.TokenType.FOR": {"tf": 1}, "sqlglot.tokens.TokenType.FOREIGN_KEY": {"tf": 1}, "sqlglot.tokens.TokenType.FORMAT": {"tf": 1}, "sqlglot.tokens.TokenType.FROM": {"tf": 1}, "sqlglot.tokens.TokenType.FULL": {"tf": 1}, "sqlglot.tokens.TokenType.FUNCTION": {"tf": 1}, "sqlglot.tokens.TokenType.GENERATED": {"tf": 1}, "sqlglot.tokens.TokenType.GLOB": {"tf": 1}, "sqlglot.tokens.TokenType.GLOBAL": {"tf": 1}, "sqlglot.tokens.TokenType.GROUP_BY": {"tf": 1}, "sqlglot.tokens.TokenType.GROUPING_SETS": {"tf": 1}, "sqlglot.tokens.TokenType.HAVING": {"tf": 1}, "sqlglot.tokens.TokenType.HINT": {"tf": 1}, "sqlglot.tokens.TokenType.IDENTITY": {"tf": 1}, "sqlglot.tokens.TokenType.IF": {"tf": 1}, "sqlglot.tokens.TokenType.IGNORE_NULLS": {"tf": 1}, "sqlglot.tokens.TokenType.ILIKE": {"tf": 1}, "sqlglot.tokens.TokenType.IN": {"tf": 1}, "sqlglot.tokens.TokenType.INDEX": {"tf": 1}, "sqlglot.tokens.TokenType.INNER": {"tf": 1}, "sqlglot.tokens.TokenType.INSERT": {"tf": 1}, "sqlglot.tokens.TokenType.INTERSECT": {"tf": 1}, "sqlglot.tokens.TokenType.INTERVAL": {"tf": 1}, "sqlglot.tokens.TokenType.INTO": {"tf": 1}, "sqlglot.tokens.TokenType.INTRODUCER": {"tf": 1}, "sqlglot.tokens.TokenType.IRLIKE": {"tf": 1}, "sqlglot.tokens.TokenType.IS": {"tf": 1}, "sqlglot.tokens.TokenType.ISNULL": {"tf": 1}, "sqlglot.tokens.TokenType.JOIN": {"tf": 1}, "sqlglot.tokens.TokenType.LANGUAGE": {"tf": 1}, "sqlglot.tokens.TokenType.LATERAL": {"tf": 1}, "sqlglot.tokens.TokenType.LAZY": {"tf": 1}, "sqlglot.tokens.TokenType.LEADING": {"tf": 1}, "sqlglot.tokens.TokenType.LEFT": {"tf": 1}, "sqlglot.tokens.TokenType.LIKE": {"tf": 1}, "sqlglot.tokens.TokenType.LIMIT": {"tf": 1}, "sqlglot.tokens.TokenType.LOAD_DATA": {"tf": 1}, "sqlglot.tokens.TokenType.LOCAL": {"tf": 1}, "sqlglot.tokens.TokenType.MAP": {"tf": 1}, "sqlglot.tokens.TokenType.MATCH_RECOGNIZE": {"tf": 1}, "sqlglot.tokens.TokenType.MATERIALIZED": {"tf": 1}, "sqlglot.tokens.TokenType.MERGE": {"tf": 1}, "sqlglot.tokens.TokenType.MOD": {"tf": 1}, "sqlglot.tokens.TokenType.NATURAL": {"tf": 1}, "sqlglot.tokens.TokenType.NEXT": {"tf": 1}, "sqlglot.tokens.TokenType.NO_ACTION": {"tf": 1}, "sqlglot.tokens.TokenType.NOTNULL": {"tf": 1}, "sqlglot.tokens.TokenType.NULL": {"tf": 1}, "sqlglot.tokens.TokenType.NULLS_FIRST": {"tf": 1}, "sqlglot.tokens.TokenType.NULLS_LAST": {"tf": 1}, "sqlglot.tokens.TokenType.OFFSET": {"tf": 1}, "sqlglot.tokens.TokenType.ON": {"tf": 1}, "sqlglot.tokens.TokenType.ONLY": {"tf": 1}, "sqlglot.tokens.TokenType.OPTIONS": {"tf": 1}, "sqlglot.tokens.TokenType.ORDER_BY": {"tf": 1}, "sqlglot.tokens.TokenType.ORDERED": {"tf": 1}, "sqlglot.tokens.TokenType.ORDINALITY": {"tf": 1}, "sqlglot.tokens.TokenType.OUTER": {"tf": 1}, "sqlglot.tokens.TokenType.OUT_OF": {"tf": 1}, "sqlglot.tokens.TokenType.OVER": {"tf": 1}, "sqlglot.tokens.TokenType.OVERWRITE": {"tf": 1}, "sqlglot.tokens.TokenType.PARTITION": {"tf": 1}, "sqlglot.tokens.TokenType.PARTITION_BY": {"tf": 1}, "sqlglot.tokens.TokenType.PERCENT": {"tf": 1}, "sqlglot.tokens.TokenType.PIVOT": {"tf": 1}, "sqlglot.tokens.TokenType.PLACEHOLDER": {"tf": 1}, "sqlglot.tokens.TokenType.PRECEDING": {"tf": 1}, "sqlglot.tokens.TokenType.PRIMARY_KEY": {"tf": 1}, "sqlglot.tokens.TokenType.PROCEDURE": {"tf": 1}, "sqlglot.tokens.TokenType.PROPERTIES": {"tf": 1}, "sqlglot.tokens.TokenType.PSEUDO_TYPE": {"tf": 1}, "sqlglot.tokens.TokenType.QUALIFY": {"tf": 1}, "sqlglot.tokens.TokenType.QUOTE": {"tf": 1}, "sqlglot.tokens.TokenType.RANGE": {"tf": 1}, "sqlglot.tokens.TokenType.RECURSIVE": {"tf": 1}, "sqlglot.tokens.TokenType.REPLACE": {"tf": 1}, "sqlglot.tokens.TokenType.RESPECT_NULLS": {"tf": 1}, "sqlglot.tokens.TokenType.REFERENCES": {"tf": 1}, "sqlglot.tokens.TokenType.RIGHT": {"tf": 1}, "sqlglot.tokens.TokenType.RLIKE": {"tf": 1}, "sqlglot.tokens.TokenType.ROLLBACK": {"tf": 1}, "sqlglot.tokens.TokenType.ROLLUP": {"tf": 1}, "sqlglot.tokens.TokenType.ROW": {"tf": 1}, "sqlglot.tokens.TokenType.ROWS": {"tf": 1}, "sqlglot.tokens.TokenType.SCHEMA_COMMENT": {"tf": 1}, "sqlglot.tokens.TokenType.SEED": {"tf": 1}, "sqlglot.tokens.TokenType.SELECT": {"tf": 1}, "sqlglot.tokens.TokenType.SEMI": {"tf": 1}, "sqlglot.tokens.TokenType.SEPARATOR": {"tf": 1}, "sqlglot.tokens.TokenType.SERDE_PROPERTIES": {"tf": 1}, "sqlglot.tokens.TokenType.SET": {"tf": 1}, "sqlglot.tokens.TokenType.SHOW": {"tf": 1}, "sqlglot.tokens.TokenType.SIMILAR_TO": {"tf": 1}, "sqlglot.tokens.TokenType.SOME": {"tf": 1}, "sqlglot.tokens.TokenType.SORTKEY": {"tf": 1}, "sqlglot.tokens.TokenType.SORT_BY": {"tf": 1}, "sqlglot.tokens.TokenType.STRUCT": {"tf": 1}, "sqlglot.tokens.TokenType.TABLE_SAMPLE": {"tf": 1}, "sqlglot.tokens.TokenType.TEMPORARY": {"tf": 1}, "sqlglot.tokens.TokenType.TOP": {"tf": 1}, "sqlglot.tokens.TokenType.THEN": {"tf": 1}, "sqlglot.tokens.TokenType.TRAILING": {"tf": 1}, "sqlglot.tokens.TokenType.TRUE": {"tf": 1}, "sqlglot.tokens.TokenType.UNBOUNDED": {"tf": 1}, "sqlglot.tokens.TokenType.UNCACHE": {"tf": 1}, "sqlglot.tokens.TokenType.UNION": {"tf": 1}, "sqlglot.tokens.TokenType.UNLOGGED": {"tf": 1}, "sqlglot.tokens.TokenType.UNNEST": {"tf": 1}, "sqlglot.tokens.TokenType.UNPIVOT": {"tf": 1}, "sqlglot.tokens.TokenType.UPDATE": {"tf": 1}, "sqlglot.tokens.TokenType.USE": {"tf": 1}, "sqlglot.tokens.TokenType.USING": {"tf": 1}, "sqlglot.tokens.TokenType.VALUES": {"tf": 1}, "sqlglot.tokens.TokenType.VIEW": {"tf": 1}, "sqlglot.tokens.TokenType.VOLATILE": {"tf": 1}, "sqlglot.tokens.TokenType.WHEN": {"tf": 1}, "sqlglot.tokens.TokenType.WHERE": {"tf": 1}, "sqlglot.tokens.TokenType.WINDOW": {"tf": 1}, "sqlglot.tokens.TokenType.WITH": {"tf": 1}, "sqlglot.tokens.TokenType.WITH_TIME_ZONE": {"tf": 1}, "sqlglot.tokens.TokenType.WITH_LOCAL_TIME_ZONE": {"tf": 1}, "sqlglot.tokens.TokenType.WITHIN_GROUP": {"tf": 1}, "sqlglot.tokens.TokenType.WITHOUT_TIME_ZONE": {"tf": 1}, "sqlglot.tokens.TokenType.UNIQUE": {"tf": 1}}, "df": 373, "e": {"docs": {"sqlglot.tokens.TokenType.GTE": {"tf": 1.4142135623730951}}, "df": 1}}, "e": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.DataType.Type.GEOGRAPHY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.GEOGRAPHY": {"tf": 1.4142135623730951}}, "df": 2}}}}}}, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.DataType.Type.GEOMETRY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.GEOMETRY": {"tf": 1.4142135623730951}}, "df": 2}}}}}}, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.tokens.TokenType.GENERATED": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "b": {"docs": {"sqlglot.tokens.TokenType.GLOB": {"tf": 1.4142135623730951}}, "df": 1, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.tokens.TokenType.GLOBAL": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.tokens.TokenType.GROUP_BY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.WITHIN_GROUP": {"tf": 1.4142135623730951}}, "df": 2, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.tokens.TokenType.GROUPING_SETS": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.dialect.Dialects.DIALECT": {"tf": 1}}, "df": 1, "s": {"docs": {"sqlglot.dialects.dialect.Dialects.DIALECT": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.BIGQUERY": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.CLICKHOUSE": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.DUCKDB": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.HIVE": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.MYSQL": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.ORACLE": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.POSTGRES": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.PRESTO": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.REDSHIFT": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.SNOWFLAKE": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.SPARK": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.SQLITE": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.STARROCKS": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.TABLEAU": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.TRINO": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.TSQL": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.DATABRICKS": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.DRILL": {"tf": 1}, "sqlglot.dialects.dialect.Dialects.TERADATA": {"tf": 1}}, "df": 20}}}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.DISTINCT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DISTINCT_FROM": {"tf": 1.4142135623730951}}, "df": 2}}}}, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.DISTRIBUTE_BY": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}, "v": {"docs": {"sqlglot.tokens.TokenType.DIV": {"tf": 1.4142135623730951}}, "df": 1}}, "u": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "b": {"docs": {"sqlglot.dialects.dialect.Dialects.DUCKDB": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.tokens.TokenType.LOAD_DATA": {"tf": 1.4142135623730951}}, "df": 1, "b": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dialects.dialect.Dialects.DATABRICKS": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}, "e": {"docs": {"sqlglot.expressions.DataType.Type.DATE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DATE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CURRENT_DATE": {"tf": 1.4142135623730951}}, "df": 3, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.DATETIME": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DATETIME": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CURRENT_DATETIME": {"tf": 1.4142135623730951}}, "df": 3}}}}}}, "s": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.tokens.TokenType.DASH": {"tf": 1.4142135623730951}}, "df": 1}}, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot.tokens.TokenType.DARROW": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dialects.dialect.Dialects.DRILL": {"tf": 1.4142135623730951}}, "df": 1}}}, "o": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.tokens.TokenType.DROP": {"tf": 1.4142135623730951}}, "df": 1}}}, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.DOUBLE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DOUBLE": {"tf": 1.4142135623730951}}, "df": 2}}}}, "t": {"docs": {"sqlglot.tokens.TokenType.DOT": {"tf": 1.4142135623730951}}, "df": 1}, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.tokens.TokenType.DOLLAR": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.DataType.Type.DECIMAL": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DECIMAL": {"tf": 1.4142135623730951}}, "df": 2}}}}}, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.optimizer.scope.ScopeType.DERIVED_TABLE": {"tf": 1}}, "df": 1}}}}}, "f": {"docs": {"sqlglot.tokens.TokenType.COLUMN_DEF": {"tf": 1.4142135623730951}}, "df": 1, "a": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.BY_DEFAULT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DEFAULT": {"tf": 1.4142135623730951}}, "df": 2}}}}}, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.DELETE": {"tf": 1.4142135623730951}}, "df": 1}}}}, "s": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.tokens.TokenType.DESC": {"tf": 1.4142135623730951}}, "df": 1, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.DESCRIBE": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.tokens.TokenType.DCOLON": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "p": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.DPIPE": {"tf": 1.4142135623730951}}, "df": 1}}}}, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.tokens.TokenType.DHASH_ARROW": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "x": {"2": {"7": {"docs": {"sqlglot.dialects.dialect.Dialects.DIALECT": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.BIGQUERY": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.CLICKHOUSE": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.DUCKDB": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.HIVE": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.MYSQL": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.ORACLE": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.POSTGRES": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.PRESTO": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.REDSHIFT": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.SNOWFLAKE": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.SPARK": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.SQLITE": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.STARROCKS": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.TABLEAU": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.TRINO": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.TSQL": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.DATABRICKS": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.DRILL": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects.TERADATA": {"tf": 1.4142135623730951}, "sqlglot.errors.ErrorLevel.IGNORE": {"tf": 1.4142135623730951}, "sqlglot.errors.ErrorLevel.WARN": {"tf": 1.4142135623730951}, "sqlglot.errors.ErrorLevel.RAISE": {"tf": 1.4142135623730951}, "sqlglot.errors.ErrorLevel.IMMEDIATE": {"tf": 1.4142135623730951}, "sqlglot.expressions.Properties.Location.POST_CREATE": {"tf": 1.4142135623730951}, "sqlglot.expressions.Properties.Location.PRE_SCHEMA": {"tf": 1.4142135623730951}, "sqlglot.expressions.Properties.Location.POST_INDEX": {"tf": 1.4142135623730951}, "sqlglot.expressions.Properties.Location.POST_SCHEMA_ROOT": {"tf": 1.4142135623730951}, "sqlglot.expressions.Properties.Location.POST_SCHEMA_WITH": {"tf": 1.4142135623730951}, "sqlglot.expressions.Properties.Location.UNSUPPORTED": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.CHAR": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.NCHAR": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.VARCHAR": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.NVARCHAR": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.TEXT": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.MEDIUMTEXT": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.LONGTEXT": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.MEDIUMBLOB": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.LONGBLOB": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.BINARY": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.VARBINARY": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.INT": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.TINYINT": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.SMALLINT": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.BIGINT": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.FLOAT": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.DOUBLE": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.DECIMAL": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.BOOLEAN": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.JSON": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.JSONB": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.INTERVAL": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.TIME": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.TIMESTAMP": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.TIMESTAMPTZ": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.TIMESTAMPLTZ": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.DATE": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.DATETIME": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.ARRAY": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.MAP": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.UUID": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.GEOGRAPHY": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.GEOMETRY": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.STRUCT": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.NULLABLE": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.HLLSKETCH": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.HSTORE": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.SUPER": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.SERIAL": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.SMALLSERIAL": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.BIGSERIAL": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.XML": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.UNIQUEIDENTIFIER": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.MONEY": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.SMALLMONEY": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.ROWVERSION": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.IMAGE": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.VARIANT": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.OBJECT": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.NULL": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.Type.UNKNOWN": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.L_PAREN": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.R_PAREN": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.L_BRACKET": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.R_BRACKET": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.L_BRACE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.R_BRACE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.COMMA": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DOT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DASH": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.PLUS": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.COLON": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DCOLON": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SEMICOLON": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.STAR": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.BACKSLASH": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SLASH": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.LT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.LTE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.GT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.GTE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NOT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.EQ": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NEQ": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NULLSAFE_EQ": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.AND": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.OR": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.AMP": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DPIPE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.PIPE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CARET": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TILDA": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ARROW": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DARROW": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.FARROW": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.HASH": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.HASH_ARROW": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DHASH_ARROW": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.LR_ARROW": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DOLLAR": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.PARAMETER": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SESSION_PARAMETER": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NATIONAL": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.BLOCK_START": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.BLOCK_END": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SPACE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.BREAK": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.STRING": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NUMBER": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.IDENTIFIER": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.COLUMN": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.COLUMN_DEF": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SCHEMA": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TABLE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.VAR": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.BIT_STRING": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.HEX_STRING": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.BYTE_STRING": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.BOOLEAN": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TINYINT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SMALLINT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.INT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.BIGINT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.FLOAT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DOUBLE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DECIMAL": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CHAR": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NCHAR": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.VARCHAR": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NVARCHAR": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TEXT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.MEDIUMTEXT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.LONGTEXT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.MEDIUMBLOB": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.LONGBLOB": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.BINARY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.VARBINARY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.JSON": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.JSONB": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TIME": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TIMESTAMP": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TIMESTAMPTZ": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TIMESTAMPLTZ": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DATETIME": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DATE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.UUID": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.GEOGRAPHY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NULLABLE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.GEOMETRY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.HLLSKETCH": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.HSTORE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SUPER": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SERIAL": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SMALLSERIAL": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.BIGSERIAL": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.XML": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.UNIQUEIDENTIFIER": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.MONEY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SMALLMONEY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ROWVERSION": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.IMAGE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.VARIANT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.OBJECT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ALIAS": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ALTER": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ALWAYS": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ALL": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ANTI": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ANY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.APPLY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ARRAY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ASC": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ASOF": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.AT_TIME_ZONE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.AUTO_INCREMENT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.BEGIN": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.BETWEEN": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.BOTH": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.BUCKET": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.BY_DEFAULT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CACHE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CASCADE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CASE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CHARACTER_SET": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CHECK": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CLUSTER_BY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.COLLATE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.COMMAND": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.COMMENT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.COMMIT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.COMPOUND": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CONSTRAINT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CREATE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CROSS": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CUBE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CURRENT_DATE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CURRENT_DATETIME": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CURRENT_ROW": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CURRENT_TIME": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CURRENT_TIMESTAMP": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DEFAULT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DELETE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DESC": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DESCRIBE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DISTINCT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DISTINCT_FROM": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DISTRIBUTE_BY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DIV": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DROP": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ELSE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ENCODE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.END": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ESCAPE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.EXCEPT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.EXECUTE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.EXISTS": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.FALSE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.FETCH": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.FILTER": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.FINAL": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.FIRST": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.FOLLOWING": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.FOR": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.FOREIGN_KEY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.FORMAT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.FROM": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.FULL": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.FUNCTION": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.GENERATED": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.GLOB": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.GLOBAL": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.GROUP_BY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.GROUPING_SETS": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.HAVING": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.HINT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.IDENTITY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.IF": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.IGNORE_NULLS": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ILIKE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.IN": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.INDEX": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.INNER": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.INSERT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.INTERSECT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.INTERVAL": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.INTO": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.INTRODUCER": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.IRLIKE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.IS": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ISNULL": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.JOIN": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.LANGUAGE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.LATERAL": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.LAZY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.LEADING": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.LEFT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.LIKE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.LIMIT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.LOAD_DATA": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.LOCAL": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.MAP": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.MATCH_RECOGNIZE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.MATERIALIZED": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.MERGE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.MOD": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NATURAL": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NEXT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NO_ACTION": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NOTNULL": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NULL": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NULLS_FIRST": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NULLS_LAST": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.OFFSET": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ON": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ONLY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.OPTIONS": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ORDER_BY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ORDERED": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ORDINALITY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.OUTER": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.OUT_OF": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.OVER": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.OVERWRITE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.PARTITION": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.PARTITION_BY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.PERCENT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.PIVOT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.PLACEHOLDER": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.PRECEDING": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.PRIMARY_KEY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.PROCEDURE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.PROPERTIES": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.PSEUDO_TYPE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.QUALIFY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.QUOTE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.RANGE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.RECURSIVE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.REPLACE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.RESPECT_NULLS": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.REFERENCES": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.RIGHT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.RLIKE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ROLLBACK": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ROLLUP": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ROW": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ROWS": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SCHEMA_COMMENT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SEED": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SELECT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SEMI": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SEPARATOR": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SERDE_PROPERTIES": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SET": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SHOW": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SIMILAR_TO": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SOME": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SORTKEY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SORT_BY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.STRUCT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TABLE_SAMPLE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TEMPORARY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TOP": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.THEN": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TRAILING": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TRUE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.UNBOUNDED": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.UNCACHE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.UNION": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.UNLOGGED": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.UNNEST": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.UNPIVOT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.UPDATE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.USE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.USING": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.VALUES": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.VIEW": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.VOLATILE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.WHEN": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.WHERE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.WINDOW": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.WITH": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.WITH_TIME_ZONE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.WITH_LOCAL_TIME_ZONE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.WITHIN_GROUP": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.WITHOUT_TIME_ZONE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.UNIQUE": {"tf": 1.4142135623730951}}, "df": 366}, "docs": {}, "df": 0}, "docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.DataType.Type.XML": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.XML": {"tf": 1.4142135623730951}}, "df": 2}}}, "b": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dialects.dialect.Dialects.BIGQUERY": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.DataType.Type.BIGINT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.BIGINT": {"tf": 1.4142135623730951}}, "df": 2}}}, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.DataType.Type.BIGSERIAL": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.BIGSERIAL": {"tf": 1.4142135623730951}}, "df": 2}}}}}}}, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.DataType.Type.BINARY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.BINARY": {"tf": 1.4142135623730951}}, "df": 2}}}}, "t": {"docs": {"sqlglot.tokens.TokenType.BIT_STRING": {"tf": 1.4142135623730951}}, "df": 1}}, "o": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.DataType.Type.BOOLEAN": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.BOOLEAN": {"tf": 1.4142135623730951}}, "df": 2}}}}}, "t": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.tokens.TokenType.BOTH": {"tf": 1.4142135623730951}}, "df": 1}}}, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.L_BRACKET": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.R_BRACKET": {"tf": 1.4142135623730951}}, "df": 2}}}, "e": {"docs": {"sqlglot.tokens.TokenType.L_BRACE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.R_BRACE": {"tf": 1.4142135623730951}}, "df": 2}}}, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.tokens.TokenType.BREAK": {"tf": 1.4142135623730951}}, "df": 1}}}}, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.tokens.TokenType.BACKSLASH": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.tokens.TokenType.BLOCK_START": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.BLOCK_END": {"tf": 1.4142135623730951}}, "df": 2}}}}, "y": {"docs": {"sqlglot.tokens.TokenType.BY_DEFAULT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CLUSTER_BY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DISTRIBUTE_BY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.GROUP_BY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ORDER_BY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.PARTITION_BY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SORT_BY": {"tf": 1.4142135623730951}}, "df": 7, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.BYTE_STRING": {"tf": 1.4142135623730951}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.tokens.TokenType.BEGIN": {"tf": 1.4142135623730951}}, "df": 1}}}, "t": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.tokens.TokenType.BETWEEN": {"tf": 1.4142135623730951}}, "df": 1}}}}}}, "u": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.BUCKET": {"tf": 1.4142135623730951}}, "df": 1}}}}}}, "c": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.Dialects.CLICKHOUSE": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}, "u": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.tokens.TokenType.CLUSTER_BY": {"tf": 1.4142135623730951}}, "df": 1}}}}}}, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Properties.Location.POST_CREATE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CREATE": {"tf": 1.4142135623730951}}, "df": 2}}}}, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.tokens.TokenType.CROSS": {"tf": 1.4142135623730951}}, "df": 1}}}}, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.DataType.Type.CHAR": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CHAR": {"tf": 1.4142135623730951}}, "df": 2, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.tokens.TokenType.CHARACTER_SET": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.tokens.TokenType.CHECK": {"tf": 1.4142135623730951}}, "df": 1}}}}, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.scope.ScopeType.CTE": {"tf": 1}}, "df": 1}}, "o": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.tokens.TokenType.COMMA": {"tf": 1.4142135623730951}}, "df": 1, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.tokens.TokenType.COMMAND": {"tf": 1.4142135623730951}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.COMMENT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SCHEMA_COMMENT": {"tf": 1.4142135623730951}}, "df": 2}}}, "i": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.COMMIT": {"tf": 1.4142135623730951}}, "df": 1}}}, "p": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.tokens.TokenType.COMPOUND": {"tf": 1.4142135623730951}}, "df": 1}}}}}}, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.tokens.TokenType.COLON": {"tf": 1.4142135623730951}}, "df": 1}}, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.tokens.TokenType.COLUMN": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.COLUMN_DEF": {"tf": 1.4142135623730951}}, "df": 2}}}, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.COLLATE": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.CONSTRAINT": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}}, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.CARET": {"tf": 1.4142135623730951}}, "df": 1}}}, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.CACHE": {"tf": 1.4142135623730951}}, "df": 1}}}, "s": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.CASCADE": {"tf": 1.4142135623730951}}, "df": 1}}}}, "e": {"docs": {"sqlglot.tokens.TokenType.CASE": {"tf": 1.4142135623730951}}, "df": 1}}}, "u": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.CUBE": {"tf": 1.4142135623730951}}, "df": 1}}, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.CURRENT_DATE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CURRENT_DATETIME": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CURRENT_ROW": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CURRENT_TIME": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CURRENT_TIMESTAMP": {"tf": 1.4142135623730951}}, "df": 5}}}}}}}, "h": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.Dialects.HIVE": {"tf": 1.4142135623730951}}, "df": 1}}, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.HINT": {"tf": 1.4142135623730951}}, "df": 1}}}, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.expressions.DataType.Type.HLLSKETCH": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.HLLSKETCH": {"tf": 1.4142135623730951}}, "df": 2}}}}}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.HSTORE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.HSTORE": {"tf": 1.4142135623730951}}, "df": 2}}}}}, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.tokens.TokenType.HASH": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.HASH_ARROW": {"tf": 1.4142135623730951}}, "df": 2}}, "v": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.tokens.TokenType.HAVING": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "e": {"docs": {}, "df": 0, "x": {"docs": {"sqlglot.tokens.TokenType.HEX_STRING": {"tf": 1.4142135623730951}}, "df": 1}}}, "p": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.Properties.Location.POST_CREATE": {"tf": 1.4142135623730951}, "sqlglot.expressions.Properties.Location.POST_INDEX": {"tf": 1.4142135623730951}, "sqlglot.expressions.Properties.Location.POST_SCHEMA_ROOT": {"tf": 1.4142135623730951}, "sqlglot.expressions.Properties.Location.POST_SCHEMA_WITH": {"tf": 1.4142135623730951}}, "df": 4, "g": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dialects.dialect.Dialects.POSTGRES": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Properties.Location.PRE_SCHEMA": {"tf": 1.4142135623730951}}, "df": 1, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {"sqlglot.dialects.dialect.Dialects.PRESTO": {"tf": 1.4142135623730951}}, "df": 1}}}, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.tokens.TokenType.PRECEDING": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.tokens.TokenType.PRIMARY_KEY": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "o": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.PROCEDURE": {"tf": 1.4142135623730951}}, "df": 1}}}}}}, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.tokens.TokenType.PROPERTIES": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SERDE_PROPERTIES": {"tf": 1.4142135623730951}}, "df": 2}}}}}}}}}, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.tokens.TokenType.L_PAREN": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.R_PAREN": {"tf": 1.4142135623730951}}, "df": 2}}, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.tokens.TokenType.PARAMETER": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.SESSION_PARAMETER": {"tf": 1.4142135623730951}}, "df": 2}}}}}}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.tokens.TokenType.PARTITION": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.PARTITION_BY": {"tf": 1.4142135623730951}}, "df": 2}}}}}}}}, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.tokens.TokenType.PLUS": {"tf": 1.4142135623730951}}, "df": 1}}, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.tokens.TokenType.PLACEHOLDER": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}}}, "i": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.PIPE": {"tf": 1.4142135623730951}}, "df": 1}}, "v": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.PIVOT": {"tf": 1.4142135623730951}}, "df": 1}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.PERCENT": {"tf": 1.4142135623730951}}, "df": 1}}}}}}, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "o": {"docs": {"sqlglot.tokens.TokenType.PSEUDO_TYPE": {"tf": 1.4142135623730951}}, "df": 1}}}}}}, "r": {"docs": {"sqlglot.tokens.TokenType.R_PAREN": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.R_BRACKET": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.R_BRACE": {"tf": 1.4142135623730951}}, "df": 3, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.dialect.Dialects.REDSHIFT": {"tf": 1.4142135623730951}}, "df": 1}}}}}}, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.MATCH_RECOGNIZE": {"tf": 1.4142135623730951}}, "df": 1}}}}}}, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.RECURSIVE": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.REPLACE": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "s": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.RESPECT_NULLS": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "f": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.tokens.TokenType.REFERENCES": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}}, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.errors.ErrorLevel.RAISE": {"tf": 1.4142135623730951}}, "df": 1}}}, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.RANGE": {"tf": 1.4142135623730951}}, "df": 1}}}}, "o": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.Properties.Location.POST_SCHEMA_ROOT": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.ScopeType.ROOT": {"tf": 1}}, "df": 2}}, "w": {"docs": {"sqlglot.tokens.TokenType.CURRENT_ROW": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ROW": {"tf": 1.4142135623730951}}, "df": 2, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.DataType.Type.ROWVERSION": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ROWVERSION": {"tf": 1.4142135623730951}}, "df": 2}}}}}}}, "s": {"docs": {"sqlglot.tokens.TokenType.ROWS": {"tf": 1.4142135623730951}}, "df": 1}}, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.tokens.TokenType.ROLLBACK": {"tf": 1.4142135623730951}}, "df": 1}}}}, "u": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.tokens.TokenType.ROLLUP": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.RIGHT": {"tf": 1.4142135623730951}}, "df": 1}}}}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.RLIKE": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.scope.ScopeType.DERIVED_TABLE": {"tf": 1}, "sqlglot.tokens.TokenType.TABLE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TABLE_SAMPLE": {"tf": 1.4142135623730951}}, "df": 3, "a": {"docs": {}, "df": 0, "u": {"docs": {"sqlglot.dialects.dialect.Dialects.TABLEAU": {"tf": 1.4142135623730951}}, "df": 1}}}}}}, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "o": {"docs": {"sqlglot.dialects.dialect.Dialects.TRINO": {"tf": 1.4142135623730951}}, "df": 1}}}, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.tokens.TokenType.TRAILING": {"tf": 1.4142135623730951}}, "df": 1}}}}}}, "u": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.TRUE": {"tf": 1.4142135623730951}}, "df": 1}}}, "s": {"docs": {}, "df": 0, "q": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dialects.dialect.Dialects.TSQL": {"tf": 1.4142135623730951}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.dialects.dialect.Dialects.TERADATA": {"tf": 1.4142135623730951}}, "df": 1}}}}}}, "x": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.DataType.Type.TEXT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TEXT": {"tf": 1.4142135623730951}}, "df": 2}}, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.tokens.TokenType.TEMPORARY": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.CHAR": {"tf": 1}, "sqlglot.expressions.DataType.Type.NCHAR": {"tf": 1}, "sqlglot.expressions.DataType.Type.VARCHAR": {"tf": 1}, "sqlglot.expressions.DataType.Type.NVARCHAR": {"tf": 1}, "sqlglot.expressions.DataType.Type.TEXT": {"tf": 1}, "sqlglot.expressions.DataType.Type.MEDIUMTEXT": {"tf": 1}, "sqlglot.expressions.DataType.Type.LONGTEXT": {"tf": 1}, "sqlglot.expressions.DataType.Type.MEDIUMBLOB": {"tf": 1}, "sqlglot.expressions.DataType.Type.LONGBLOB": {"tf": 1}, "sqlglot.expressions.DataType.Type.BINARY": {"tf": 1}, "sqlglot.expressions.DataType.Type.VARBINARY": {"tf": 1}, "sqlglot.expressions.DataType.Type.INT": {"tf": 1}, "sqlglot.expressions.DataType.Type.TINYINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.SMALLINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.BIGINT": {"tf": 1}, "sqlglot.expressions.DataType.Type.FLOAT": {"tf": 1}, "sqlglot.expressions.DataType.Type.DOUBLE": {"tf": 1}, "sqlglot.expressions.DataType.Type.DECIMAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.BOOLEAN": {"tf": 1}, "sqlglot.expressions.DataType.Type.JSON": {"tf": 1}, "sqlglot.expressions.DataType.Type.JSONB": {"tf": 1}, "sqlglot.expressions.DataType.Type.INTERVAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.TIME": {"tf": 1}, "sqlglot.expressions.DataType.Type.TIMESTAMP": {"tf": 1}, "sqlglot.expressions.DataType.Type.TIMESTAMPTZ": {"tf": 1}, "sqlglot.expressions.DataType.Type.TIMESTAMPLTZ": {"tf": 1}, "sqlglot.expressions.DataType.Type.DATE": {"tf": 1}, "sqlglot.expressions.DataType.Type.DATETIME": {"tf": 1}, "sqlglot.expressions.DataType.Type.ARRAY": {"tf": 1}, "sqlglot.expressions.DataType.Type.MAP": {"tf": 1}, "sqlglot.expressions.DataType.Type.UUID": {"tf": 1}, "sqlglot.expressions.DataType.Type.GEOGRAPHY": {"tf": 1}, "sqlglot.expressions.DataType.Type.GEOMETRY": {"tf": 1}, "sqlglot.expressions.DataType.Type.STRUCT": {"tf": 1}, "sqlglot.expressions.DataType.Type.NULLABLE": {"tf": 1}, "sqlglot.expressions.DataType.Type.HLLSKETCH": {"tf": 1}, "sqlglot.expressions.DataType.Type.HSTORE": {"tf": 1}, "sqlglot.expressions.DataType.Type.SUPER": {"tf": 1}, "sqlglot.expressions.DataType.Type.SERIAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.SMALLSERIAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.BIGSERIAL": {"tf": 1}, "sqlglot.expressions.DataType.Type.XML": {"tf": 1}, "sqlglot.expressions.DataType.Type.UNIQUEIDENTIFIER": {"tf": 1}, "sqlglot.expressions.DataType.Type.MONEY": {"tf": 1}, "sqlglot.expressions.DataType.Type.SMALLMONEY": {"tf": 1}, "sqlglot.expressions.DataType.Type.ROWVERSION": {"tf": 1}, "sqlglot.expressions.DataType.Type.IMAGE": {"tf": 1}, "sqlglot.expressions.DataType.Type.VARIANT": {"tf": 1}, "sqlglot.expressions.DataType.Type.OBJECT": {"tf": 1}, "sqlglot.expressions.DataType.Type.NULL": {"tf": 1}, "sqlglot.expressions.DataType.Type.UNKNOWN": {"tf": 1}, "sqlglot.tokens.TokenType.PSEUDO_TYPE": {"tf": 1.4142135623730951}}, "df": 52}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.DataType.Type.TINYINT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TINYINT": {"tf": 1.4142135623730951}}, "df": 2}}}}}, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.TIME": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TIME": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.AT_TIME_ZONE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CURRENT_TIME": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.WITH_TIME_ZONE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.WITH_LOCAL_TIME_ZONE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.WITHOUT_TIME_ZONE": {"tf": 1.4142135623730951}}, "df": 7, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.expressions.DataType.Type.TIMESTAMP": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TIMESTAMP": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.CURRENT_TIMESTAMP": {"tf": 1.4142135623730951}}, "df": 3, "t": {"docs": {}, "df": 0, "z": {"docs": {"sqlglot.expressions.DataType.Type.TIMESTAMPTZ": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TIMESTAMPTZ": {"tf": 1.4142135623730951}}, "df": 2}}, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "z": {"docs": {"sqlglot.expressions.DataType.Type.TIMESTAMPLTZ": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.TIMESTAMPLTZ": {"tf": 1.4142135623730951}}, "df": 2}}}}}}}}}}, "l": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.tokens.TokenType.TILDA": {"tf": 1.4142135623730951}}, "df": 1}}}}, "o": {"docs": {"sqlglot.tokens.TokenType.SIMILAR_TO": {"tf": 1.4142135623730951}}, "df": 1, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.L_PAREN": {"tf": 1}, "sqlglot.tokens.TokenType.R_PAREN": {"tf": 1}, "sqlglot.tokens.TokenType.L_BRACKET": {"tf": 1}, "sqlglot.tokens.TokenType.R_BRACKET": {"tf": 1}, "sqlglot.tokens.TokenType.L_BRACE": {"tf": 1}, "sqlglot.tokens.TokenType.R_BRACE": {"tf": 1}, "sqlglot.tokens.TokenType.COMMA": {"tf": 1}, "sqlglot.tokens.TokenType.DOT": {"tf": 1}, "sqlglot.tokens.TokenType.DASH": {"tf": 1}, "sqlglot.tokens.TokenType.PLUS": {"tf": 1}, "sqlglot.tokens.TokenType.COLON": {"tf": 1}, "sqlglot.tokens.TokenType.DCOLON": {"tf": 1}, "sqlglot.tokens.TokenType.SEMICOLON": {"tf": 1}, "sqlglot.tokens.TokenType.STAR": {"tf": 1}, "sqlglot.tokens.TokenType.BACKSLASH": {"tf": 1}, "sqlglot.tokens.TokenType.SLASH": {"tf": 1}, "sqlglot.tokens.TokenType.LT": {"tf": 1}, "sqlglot.tokens.TokenType.LTE": {"tf": 1}, "sqlglot.tokens.TokenType.GT": {"tf": 1}, "sqlglot.tokens.TokenType.GTE": {"tf": 1}, "sqlglot.tokens.TokenType.NOT": {"tf": 1}, "sqlglot.tokens.TokenType.EQ": {"tf": 1}, "sqlglot.tokens.TokenType.NEQ": {"tf": 1}, "sqlglot.tokens.TokenType.NULLSAFE_EQ": {"tf": 1}, "sqlglot.tokens.TokenType.AND": {"tf": 1}, "sqlglot.tokens.TokenType.OR": {"tf": 1}, "sqlglot.tokens.TokenType.AMP": {"tf": 1}, "sqlglot.tokens.TokenType.DPIPE": {"tf": 1}, "sqlglot.tokens.TokenType.PIPE": {"tf": 1}, "sqlglot.tokens.TokenType.CARET": {"tf": 1}, "sqlglot.tokens.TokenType.TILDA": {"tf": 1}, "sqlglot.tokens.TokenType.ARROW": {"tf": 1}, "sqlglot.tokens.TokenType.DARROW": {"tf": 1}, "sqlglot.tokens.TokenType.FARROW": {"tf": 1}, "sqlglot.tokens.TokenType.HASH": {"tf": 1}, "sqlglot.tokens.TokenType.HASH_ARROW": {"tf": 1}, "sqlglot.tokens.TokenType.DHASH_ARROW": {"tf": 1}, "sqlglot.tokens.TokenType.LR_ARROW": {"tf": 1}, "sqlglot.tokens.TokenType.DOLLAR": {"tf": 1}, "sqlglot.tokens.TokenType.PARAMETER": {"tf": 1}, "sqlglot.tokens.TokenType.SESSION_PARAMETER": {"tf": 1}, "sqlglot.tokens.TokenType.NATIONAL": {"tf": 1}, "sqlglot.tokens.TokenType.BLOCK_START": {"tf": 1}, "sqlglot.tokens.TokenType.BLOCK_END": {"tf": 1}, "sqlglot.tokens.TokenType.SPACE": {"tf": 1}, "sqlglot.tokens.TokenType.BREAK": {"tf": 1}, "sqlglot.tokens.TokenType.STRING": {"tf": 1}, "sqlglot.tokens.TokenType.NUMBER": {"tf": 1}, "sqlglot.tokens.TokenType.IDENTIFIER": {"tf": 1}, "sqlglot.tokens.TokenType.COLUMN": {"tf": 1}, "sqlglot.tokens.TokenType.COLUMN_DEF": {"tf": 1}, "sqlglot.tokens.TokenType.SCHEMA": {"tf": 1}, "sqlglot.tokens.TokenType.TABLE": {"tf": 1}, "sqlglot.tokens.TokenType.VAR": {"tf": 1}, "sqlglot.tokens.TokenType.BIT_STRING": {"tf": 1}, "sqlglot.tokens.TokenType.HEX_STRING": {"tf": 1}, "sqlglot.tokens.TokenType.BYTE_STRING": {"tf": 1}, "sqlglot.tokens.TokenType.BOOLEAN": {"tf": 1}, "sqlglot.tokens.TokenType.TINYINT": {"tf": 1}, "sqlglot.tokens.TokenType.SMALLINT": {"tf": 1}, "sqlglot.tokens.TokenType.INT": {"tf": 1}, "sqlglot.tokens.TokenType.BIGINT": {"tf": 1}, "sqlglot.tokens.TokenType.FLOAT": {"tf": 1}, "sqlglot.tokens.TokenType.DOUBLE": {"tf": 1}, "sqlglot.tokens.TokenType.DECIMAL": {"tf": 1}, "sqlglot.tokens.TokenType.CHAR": {"tf": 1}, "sqlglot.tokens.TokenType.NCHAR": {"tf": 1}, "sqlglot.tokens.TokenType.VARCHAR": {"tf": 1}, "sqlglot.tokens.TokenType.NVARCHAR": {"tf": 1}, "sqlglot.tokens.TokenType.TEXT": {"tf": 1}, "sqlglot.tokens.TokenType.MEDIUMTEXT": {"tf": 1}, "sqlglot.tokens.TokenType.LONGTEXT": {"tf": 1}, "sqlglot.tokens.TokenType.MEDIUMBLOB": {"tf": 1}, "sqlglot.tokens.TokenType.LONGBLOB": {"tf": 1}, "sqlglot.tokens.TokenType.BINARY": {"tf": 1}, "sqlglot.tokens.TokenType.VARBINARY": {"tf": 1}, "sqlglot.tokens.TokenType.JSON": {"tf": 1}, "sqlglot.tokens.TokenType.JSONB": {"tf": 1}, "sqlglot.tokens.TokenType.TIME": {"tf": 1}, "sqlglot.tokens.TokenType.TIMESTAMP": {"tf": 1}, "sqlglot.tokens.TokenType.TIMESTAMPTZ": {"tf": 1}, "sqlglot.tokens.TokenType.TIMESTAMPLTZ": {"tf": 1}, "sqlglot.tokens.TokenType.DATETIME": {"tf": 1}, "sqlglot.tokens.TokenType.DATE": {"tf": 1}, "sqlglot.tokens.TokenType.UUID": {"tf": 1}, "sqlglot.tokens.TokenType.GEOGRAPHY": {"tf": 1}, "sqlglot.tokens.TokenType.NULLABLE": {"tf": 1}, "sqlglot.tokens.TokenType.GEOMETRY": {"tf": 1}, "sqlglot.tokens.TokenType.HLLSKETCH": {"tf": 1}, "sqlglot.tokens.TokenType.HSTORE": {"tf": 1}, "sqlglot.tokens.TokenType.SUPER": {"tf": 1}, "sqlglot.tokens.TokenType.SERIAL": {"tf": 1}, "sqlglot.tokens.TokenType.SMALLSERIAL": {"tf": 1}, "sqlglot.tokens.TokenType.BIGSERIAL": {"tf": 1}, "sqlglot.tokens.TokenType.XML": {"tf": 1}, "sqlglot.tokens.TokenType.UNIQUEIDENTIFIER": {"tf": 1}, "sqlglot.tokens.TokenType.MONEY": {"tf": 1}, "sqlglot.tokens.TokenType.SMALLMONEY": {"tf": 1}, "sqlglot.tokens.TokenType.ROWVERSION": {"tf": 1}, "sqlglot.tokens.TokenType.IMAGE": {"tf": 1}, "sqlglot.tokens.TokenType.VARIANT": {"tf": 1}, "sqlglot.tokens.TokenType.OBJECT": {"tf": 1}, "sqlglot.tokens.TokenType.ALIAS": {"tf": 1}, "sqlglot.tokens.TokenType.ALTER": {"tf": 1}, "sqlglot.tokens.TokenType.ALWAYS": {"tf": 1}, "sqlglot.tokens.TokenType.ALL": {"tf": 1}, "sqlglot.tokens.TokenType.ANTI": {"tf": 1}, "sqlglot.tokens.TokenType.ANY": {"tf": 1}, "sqlglot.tokens.TokenType.APPLY": {"tf": 1}, "sqlglot.tokens.TokenType.ARRAY": {"tf": 1}, "sqlglot.tokens.TokenType.ASC": {"tf": 1}, "sqlglot.tokens.TokenType.ASOF": {"tf": 1}, "sqlglot.tokens.TokenType.AT_TIME_ZONE": {"tf": 1}, "sqlglot.tokens.TokenType.AUTO_INCREMENT": {"tf": 1}, "sqlglot.tokens.TokenType.BEGIN": {"tf": 1}, "sqlglot.tokens.TokenType.BETWEEN": {"tf": 1}, "sqlglot.tokens.TokenType.BOTH": {"tf": 1}, "sqlglot.tokens.TokenType.BUCKET": {"tf": 1}, "sqlglot.tokens.TokenType.BY_DEFAULT": {"tf": 1}, "sqlglot.tokens.TokenType.CACHE": {"tf": 1}, "sqlglot.tokens.TokenType.CASCADE": {"tf": 1}, "sqlglot.tokens.TokenType.CASE": {"tf": 1}, "sqlglot.tokens.TokenType.CHARACTER_SET": {"tf": 1}, "sqlglot.tokens.TokenType.CHECK": {"tf": 1}, "sqlglot.tokens.TokenType.CLUSTER_BY": {"tf": 1}, "sqlglot.tokens.TokenType.COLLATE": {"tf": 1}, "sqlglot.tokens.TokenType.COMMAND": {"tf": 1}, "sqlglot.tokens.TokenType.COMMENT": {"tf": 1}, "sqlglot.tokens.TokenType.COMMIT": {"tf": 1}, "sqlglot.tokens.TokenType.COMPOUND": {"tf": 1}, "sqlglot.tokens.TokenType.CONSTRAINT": {"tf": 1}, "sqlglot.tokens.TokenType.CREATE": {"tf": 1}, "sqlglot.tokens.TokenType.CROSS": {"tf": 1}, "sqlglot.tokens.TokenType.CUBE": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_DATE": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_DATETIME": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_ROW": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_TIME": {"tf": 1}, "sqlglot.tokens.TokenType.CURRENT_TIMESTAMP": {"tf": 1}, "sqlglot.tokens.TokenType.DEFAULT": {"tf": 1}, "sqlglot.tokens.TokenType.DELETE": {"tf": 1}, "sqlglot.tokens.TokenType.DESC": {"tf": 1}, "sqlglot.tokens.TokenType.DESCRIBE": {"tf": 1}, "sqlglot.tokens.TokenType.DISTINCT": {"tf": 1}, "sqlglot.tokens.TokenType.DISTINCT_FROM": {"tf": 1}, "sqlglot.tokens.TokenType.DISTRIBUTE_BY": {"tf": 1}, "sqlglot.tokens.TokenType.DIV": {"tf": 1}, "sqlglot.tokens.TokenType.DROP": {"tf": 1}, "sqlglot.tokens.TokenType.ELSE": {"tf": 1}, "sqlglot.tokens.TokenType.ENCODE": {"tf": 1}, "sqlglot.tokens.TokenType.END": {"tf": 1}, "sqlglot.tokens.TokenType.ESCAPE": {"tf": 1}, "sqlglot.tokens.TokenType.EXCEPT": {"tf": 1}, "sqlglot.tokens.TokenType.EXECUTE": {"tf": 1}, "sqlglot.tokens.TokenType.EXISTS": {"tf": 1}, "sqlglot.tokens.TokenType.FALSE": {"tf": 1}, "sqlglot.tokens.TokenType.FETCH": {"tf": 1}, "sqlglot.tokens.TokenType.FILTER": {"tf": 1}, "sqlglot.tokens.TokenType.FINAL": {"tf": 1}, "sqlglot.tokens.TokenType.FIRST": {"tf": 1}, "sqlglot.tokens.TokenType.FOLLOWING": {"tf": 1}, "sqlglot.tokens.TokenType.FOR": {"tf": 1}, "sqlglot.tokens.TokenType.FOREIGN_KEY": {"tf": 1}, "sqlglot.tokens.TokenType.FORMAT": {"tf": 1}, "sqlglot.tokens.TokenType.FROM": {"tf": 1}, "sqlglot.tokens.TokenType.FULL": {"tf": 1}, "sqlglot.tokens.TokenType.FUNCTION": {"tf": 1}, "sqlglot.tokens.TokenType.GENERATED": {"tf": 1}, "sqlglot.tokens.TokenType.GLOB": {"tf": 1}, "sqlglot.tokens.TokenType.GLOBAL": {"tf": 1}, "sqlglot.tokens.TokenType.GROUP_BY": {"tf": 1}, "sqlglot.tokens.TokenType.GROUPING_SETS": {"tf": 1}, "sqlglot.tokens.TokenType.HAVING": {"tf": 1}, "sqlglot.tokens.TokenType.HINT": {"tf": 1}, "sqlglot.tokens.TokenType.IDENTITY": {"tf": 1}, "sqlglot.tokens.TokenType.IF": {"tf": 1}, "sqlglot.tokens.TokenType.IGNORE_NULLS": {"tf": 1}, "sqlglot.tokens.TokenType.ILIKE": {"tf": 1}, "sqlglot.tokens.TokenType.IN": {"tf": 1}, "sqlglot.tokens.TokenType.INDEX": {"tf": 1}, "sqlglot.tokens.TokenType.INNER": {"tf": 1}, "sqlglot.tokens.TokenType.INSERT": {"tf": 1}, "sqlglot.tokens.TokenType.INTERSECT": {"tf": 1}, "sqlglot.tokens.TokenType.INTERVAL": {"tf": 1}, "sqlglot.tokens.TokenType.INTO": {"tf": 1}, "sqlglot.tokens.TokenType.INTRODUCER": {"tf": 1}, "sqlglot.tokens.TokenType.IRLIKE": {"tf": 1}, "sqlglot.tokens.TokenType.IS": {"tf": 1}, "sqlglot.tokens.TokenType.ISNULL": {"tf": 1}, "sqlglot.tokens.TokenType.JOIN": {"tf": 1}, "sqlglot.tokens.TokenType.LANGUAGE": {"tf": 1}, "sqlglot.tokens.TokenType.LATERAL": {"tf": 1}, "sqlglot.tokens.TokenType.LAZY": {"tf": 1}, "sqlglot.tokens.TokenType.LEADING": {"tf": 1}, "sqlglot.tokens.TokenType.LEFT": {"tf": 1}, "sqlglot.tokens.TokenType.LIKE": {"tf": 1}, "sqlglot.tokens.TokenType.LIMIT": {"tf": 1}, "sqlglot.tokens.TokenType.LOAD_DATA": {"tf": 1}, "sqlglot.tokens.TokenType.LOCAL": {"tf": 1}, "sqlglot.tokens.TokenType.MAP": {"tf": 1}, "sqlglot.tokens.TokenType.MATCH_RECOGNIZE": {"tf": 1}, "sqlglot.tokens.TokenType.MATERIALIZED": {"tf": 1}, "sqlglot.tokens.TokenType.MERGE": {"tf": 1}, "sqlglot.tokens.TokenType.MOD": {"tf": 1}, "sqlglot.tokens.TokenType.NATURAL": {"tf": 1}, "sqlglot.tokens.TokenType.NEXT": {"tf": 1}, "sqlglot.tokens.TokenType.NO_ACTION": {"tf": 1}, "sqlglot.tokens.TokenType.NOTNULL": {"tf": 1}, "sqlglot.tokens.TokenType.NULL": {"tf": 1}, "sqlglot.tokens.TokenType.NULLS_FIRST": {"tf": 1}, "sqlglot.tokens.TokenType.NULLS_LAST": {"tf": 1}, "sqlglot.tokens.TokenType.OFFSET": {"tf": 1}, "sqlglot.tokens.TokenType.ON": {"tf": 1}, "sqlglot.tokens.TokenType.ONLY": {"tf": 1}, "sqlglot.tokens.TokenType.OPTIONS": {"tf": 1}, "sqlglot.tokens.TokenType.ORDER_BY": {"tf": 1}, "sqlglot.tokens.TokenType.ORDERED": {"tf": 1}, "sqlglot.tokens.TokenType.ORDINALITY": {"tf": 1}, "sqlglot.tokens.TokenType.OUTER": {"tf": 1}, "sqlglot.tokens.TokenType.OUT_OF": {"tf": 1}, "sqlglot.tokens.TokenType.OVER": {"tf": 1}, "sqlglot.tokens.TokenType.OVERWRITE": {"tf": 1}, "sqlglot.tokens.TokenType.PARTITION": {"tf": 1}, "sqlglot.tokens.TokenType.PARTITION_BY": {"tf": 1}, "sqlglot.tokens.TokenType.PERCENT": {"tf": 1}, "sqlglot.tokens.TokenType.PIVOT": {"tf": 1}, "sqlglot.tokens.TokenType.PLACEHOLDER": {"tf": 1}, "sqlglot.tokens.TokenType.PRECEDING": {"tf": 1}, "sqlglot.tokens.TokenType.PRIMARY_KEY": {"tf": 1}, "sqlglot.tokens.TokenType.PROCEDURE": {"tf": 1}, "sqlglot.tokens.TokenType.PROPERTIES": {"tf": 1}, "sqlglot.tokens.TokenType.PSEUDO_TYPE": {"tf": 1}, "sqlglot.tokens.TokenType.QUALIFY": {"tf": 1}, "sqlglot.tokens.TokenType.QUOTE": {"tf": 1}, "sqlglot.tokens.TokenType.RANGE": {"tf": 1}, "sqlglot.tokens.TokenType.RECURSIVE": {"tf": 1}, "sqlglot.tokens.TokenType.REPLACE": {"tf": 1}, "sqlglot.tokens.TokenType.RESPECT_NULLS": {"tf": 1}, "sqlglot.tokens.TokenType.REFERENCES": {"tf": 1}, "sqlglot.tokens.TokenType.RIGHT": {"tf": 1}, "sqlglot.tokens.TokenType.RLIKE": {"tf": 1}, "sqlglot.tokens.TokenType.ROLLBACK": {"tf": 1}, "sqlglot.tokens.TokenType.ROLLUP": {"tf": 1}, "sqlglot.tokens.TokenType.ROW": {"tf": 1}, "sqlglot.tokens.TokenType.ROWS": {"tf": 1}, "sqlglot.tokens.TokenType.SCHEMA_COMMENT": {"tf": 1}, "sqlglot.tokens.TokenType.SEED": {"tf": 1}, "sqlglot.tokens.TokenType.SELECT": {"tf": 1}, "sqlglot.tokens.TokenType.SEMI": {"tf": 1}, "sqlglot.tokens.TokenType.SEPARATOR": {"tf": 1}, "sqlglot.tokens.TokenType.SERDE_PROPERTIES": {"tf": 1}, "sqlglot.tokens.TokenType.SET": {"tf": 1}, "sqlglot.tokens.TokenType.SHOW": {"tf": 1}, "sqlglot.tokens.TokenType.SIMILAR_TO": {"tf": 1}, "sqlglot.tokens.TokenType.SOME": {"tf": 1}, "sqlglot.tokens.TokenType.SORTKEY": {"tf": 1}, "sqlglot.tokens.TokenType.SORT_BY": {"tf": 1}, "sqlglot.tokens.TokenType.STRUCT": {"tf": 1}, "sqlglot.tokens.TokenType.TABLE_SAMPLE": {"tf": 1}, "sqlglot.tokens.TokenType.TEMPORARY": {"tf": 1}, "sqlglot.tokens.TokenType.TOP": {"tf": 1}, "sqlglot.tokens.TokenType.THEN": {"tf": 1}, "sqlglot.tokens.TokenType.TRAILING": {"tf": 1}, "sqlglot.tokens.TokenType.TRUE": {"tf": 1}, "sqlglot.tokens.TokenType.UNBOUNDED": {"tf": 1}, "sqlglot.tokens.TokenType.UNCACHE": {"tf": 1}, "sqlglot.tokens.TokenType.UNION": {"tf": 1}, "sqlglot.tokens.TokenType.UNLOGGED": {"tf": 1}, "sqlglot.tokens.TokenType.UNNEST": {"tf": 1}, "sqlglot.tokens.TokenType.UNPIVOT": {"tf": 1}, "sqlglot.tokens.TokenType.UPDATE": {"tf": 1}, "sqlglot.tokens.TokenType.USE": {"tf": 1}, "sqlglot.tokens.TokenType.USING": {"tf": 1}, "sqlglot.tokens.TokenType.VALUES": {"tf": 1}, "sqlglot.tokens.TokenType.VIEW": {"tf": 1}, "sqlglot.tokens.TokenType.VOLATILE": {"tf": 1}, "sqlglot.tokens.TokenType.WHEN": {"tf": 1}, "sqlglot.tokens.TokenType.WHERE": {"tf": 1}, "sqlglot.tokens.TokenType.WINDOW": {"tf": 1}, "sqlglot.tokens.TokenType.WITH": {"tf": 1}, "sqlglot.tokens.TokenType.WITH_TIME_ZONE": {"tf": 1}, "sqlglot.tokens.TokenType.WITH_LOCAL_TIME_ZONE": {"tf": 1}, "sqlglot.tokens.TokenType.WITHIN_GROUP": {"tf": 1}, "sqlglot.tokens.TokenType.WITHOUT_TIME_ZONE": {"tf": 1}, "sqlglot.tokens.TokenType.UNIQUE": {"tf": 1}}, "df": 285}}}}}}}, "p": {"docs": {"sqlglot.tokens.TokenType.TOP": {"tf": 1.4142135623730951}}, "df": 1}}, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.tokens.TokenType.THEN": {"tf": 1.4142135623730951}}, "df": 1}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.errors.ErrorLevel.IGNORE": {"tf": 1}, "sqlglot.errors.ErrorLevel.WARN": {"tf": 1}, "sqlglot.errors.ErrorLevel.RAISE": {"tf": 1}, "sqlglot.errors.ErrorLevel.IMMEDIATE": {"tf": 1}}, "df": 4}}}}}}}}}, "q": {"docs": {"sqlglot.tokens.TokenType.EQ": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NULLSAFE_EQ": {"tf": 1.4142135623730951}}, "df": 2}, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.tokens.TokenType.BLOCK_END": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.END": {"tf": 1.4142135623730951}}, "df": 2}, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.ENCODE": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "l": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.ELSE": {"tf": 1.4142135623730951}}, "df": 1}}}, "s": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.ESCAPE": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "x": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.EXCEPT": {"tf": 1.4142135623730951}}, "df": 1}}}}, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.EXECUTE": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.tokens.TokenType.EXISTS": {"tf": 1.4142135623730951}}, "df": 1}}}}}}, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.errors.ErrorLevel.IGNORE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.IGNORE_NULLS": {"tf": 1.4142135623730951}}, "df": 2}}}}}, "m": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.errors.ErrorLevel.IMMEDIATE": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.IMAGE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.IMAGE": {"tf": 1.4142135623730951}}, "df": 2}}}}, "n": {"docs": {"sqlglot.tokens.TokenType.IN": {"tf": 1.4142135623730951}}, "df": 1, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "x": {"docs": {"sqlglot.expressions.Properties.Location.POST_INDEX": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.INDEX": {"tf": 1.4142135623730951}}, "df": 2}}}, "t": {"docs": {"sqlglot.expressions.DataType.Type.INT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.INT": {"tf": 1.4142135623730951}}, "df": 2, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.DataType.Type.INTERVAL": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.INTERVAL": {"tf": 1.4142135623730951}}, "df": 2}}}, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.INTERSECT": {"tf": 1.4142135623730951}}, "df": 1}}}}}}, "o": {"docs": {"sqlglot.tokens.TokenType.INTO": {"tf": 1.4142135623730951}}, "df": 1}, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.tokens.TokenType.INTRODUCER": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}, "c": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.AUTO_INCREMENT": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.tokens.TokenType.INNER": {"tf": 1.4142135623730951}}, "df": 1}}}, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.INSERT": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.tokens.TokenType.IDENTIFIER": {"tf": 1.4142135623730951}}, "df": 1}}}}, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.tokens.TokenType.IDENTITY": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}, "f": {"docs": {"sqlglot.tokens.TokenType.IF": {"tf": 1.4142135623730951}}, "df": 1}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.ILIKE": {"tf": 1.4142135623730951}}, "df": 1}}}}, "r": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.IRLIKE": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "s": {"docs": {"sqlglot.tokens.TokenType.IS": {"tf": 1.4142135623730951}}, "df": 1, "n": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.tokens.TokenType.ISNULL": {"tf": 1.4142135623730951}}, "df": 1}}}}}}, "w": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.errors.ErrorLevel.WARN": {"tf": 1.4142135623730951}}, "df": 1}}}, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.expressions.Properties.Location.POST_SCHEMA_WITH": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.WITH": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.WITH_TIME_ZONE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.WITH_LOCAL_TIME_ZONE": {"tf": 1.4142135623730951}}, "df": 4, "i": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.tokens.TokenType.WITHIN_GROUP": {"tf": 1.4142135623730951}}, "df": 1}}, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.WITHOUT_TIME_ZONE": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot.tokens.TokenType.WINDOW": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.tokens.TokenType.WHEN": {"tf": 1.4142135623730951}}, "df": 1}, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.WHERE": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.Properties.Location.UNSUPPORTED": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}}, "i": {"docs": {}, "df": 0, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.UNIQUE": {"tf": 1.4142135623730951}}, "df": 1, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.DataType.Type.UNIQUEIDENTIFIER": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.UNIQUEIDENTIFIER": {"tf": 1.4142135623730951}}, "df": 2}}}}}}}}}}}}}, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.optimizer.scope.ScopeType.UNION": {"tf": 1}, "sqlglot.tokens.TokenType.UNION": {"tf": 1.4142135623730951}}, "df": 2}}}, "k": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.DataType.Type.UNKNOWN": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "b": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.tokens.TokenType.UNBOUNDED": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.UNCACHE": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.tokens.TokenType.UNLOGGED": {"tf": 1.4142135623730951}}, "df": 1}}}}}}, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.UNNEST": {"tf": 1.4142135623730951}}, "df": 1}}}}, "p": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.UNPIVOT": {"tf": 1.4142135623730951}}, "df": 1}}}}}}, "u": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.DataType.Type.UUID": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.UUID": {"tf": 1.4142135623730951}}, "df": 2}}}, "d": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.optimizer.scope.ScopeType.UDTF": {"tf": 1}}, "df": 1}}}, "p": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.UPDATE": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.USE": {"tf": 1.4142135623730951}}, "df": 1}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.tokens.TokenType.USING": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.DataType.Type.NCHAR": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NCHAR": {"tf": 1.4142135623730951}}, "df": 2}}}}, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.DataType.Type.NVARCHAR": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NVARCHAR": {"tf": 1.4142135623730951}}, "df": 2}}}}}}}, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.DataType.Type.NULL": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NULL": {"tf": 1.4142135623730951}}, "df": 2, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.Type.NULLABLE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NULLABLE": {"tf": 1.4142135623730951}}, "df": 2}}}}, "s": {"docs": {"sqlglot.tokens.TokenType.IGNORE_NULLS": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NULLS_FIRST": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.NULLS_LAST": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.RESPECT_NULLS": {"tf": 1.4142135623730951}}, "df": 4, "a": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.NULLSAFE_EQ": {"tf": 1.4142135623730951}}, "df": 1}}}}}}, "m": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.tokens.TokenType.NUMBER": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "o": {"docs": {"sqlglot.tokens.TokenType.NO_ACTION": {"tf": 1.4142135623730951}}, "df": 1, "t": {"docs": {"sqlglot.tokens.TokenType.NOT": {"tf": 1.4142135623730951}}, "df": 1, "n": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.tokens.TokenType.NOTNULL": {"tf": 1.4142135623730951}}, "df": 1}}}}}}, "e": {"docs": {}, "df": 0, "q": {"docs": {"sqlglot.tokens.TokenType.NEQ": {"tf": 1.4142135623730951}}, "df": 1}, "x": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.tokens.TokenType.NEXT": {"tf": 1.4142135623730951}}, "df": 1}}}, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.tokens.TokenType.NATIONAL": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.tokens.TokenType.NATURAL": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.tokens.TokenType.VAR": {"tf": 1.4142135623730951}}, "df": 1, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.DataType.Type.VARCHAR": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.VARCHAR": {"tf": 1.4142135623730951}}, "df": 2}}}}, "b": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.DataType.Type.VARBINARY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.VARBINARY": {"tf": 1.4142135623730951}}, "df": 2}}}}}}, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.DataType.Type.VARIANT": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.VARIANT": {"tf": 1.4142135623730951}}, "df": 2}}}}}, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.tokens.TokenType.VALUES": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot.tokens.TokenType.VIEW": {"tf": 1.4142135623730951}}, "df": 1}}}, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.VOLATILE": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}, "j": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.DataType.Type.JSON": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.JSON": {"tf": 1.4142135623730951}}, "df": 2, "b": {"docs": {"sqlglot.expressions.DataType.Type.JSONB": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.JSONB": {"tf": 1.4142135623730951}}, "df": 2}}}}, "o": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.tokens.TokenType.JOIN": {"tf": 1.4142135623730951}}, "df": 1}}}}, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.DataType.Type.ARRAY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.ARRAY": {"tf": 1.4142135623730951}}, "df": 2}}, "o": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot.tokens.TokenType.ARROW": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.HASH_ARROW": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.DHASH_ARROW": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.LR_ARROW": {"tf": 1.4142135623730951}}, "df": 4}}}}, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.tokens.TokenType.AND": {"tf": 1.4142135623730951}}, "df": 1}, "t": {"docs": {}, "df": 0, "i": {"docs": {"sqlglot.tokens.TokenType.ANTI": {"tf": 1.4142135623730951}}, "df": 1}}, "y": {"docs": {"sqlglot.tokens.TokenType.ANY": {"tf": 1.4142135623730951}}, "df": 1}}, "m": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.tokens.TokenType.AMP": {"tf": 1.4142135623730951}}, "df": 1}}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.tokens.TokenType.ALIAS": {"tf": 1.4142135623730951}}, "df": 1}}}, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.tokens.TokenType.ALTER": {"tf": 1.4142135623730951}}, "df": 1}}}, "w": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.tokens.TokenType.ALWAYS": {"tf": 1.4142135623730951}}, "df": 1}}}}, "l": {"docs": {"sqlglot.tokens.TokenType.ALL": {"tf": 1.4142135623730951}}, "df": 1}}, "p": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.tokens.TokenType.APPLY": {"tf": 1.4142135623730951}}, "df": 1}}}}, "s": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.tokens.TokenType.ASC": {"tf": 1.4142135623730951}}, "df": 1}, "o": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.tokens.TokenType.ASOF": {"tf": 1.4142135623730951}}, "df": 1}}}, "t": {"docs": {"sqlglot.tokens.TokenType.AT_TIME_ZONE": {"tf": 1.4142135623730951}}, "df": 1}, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {"sqlglot.tokens.TokenType.AUTO_INCREMENT": {"tf": 1.4142135623730951}}, "df": 1}}}, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.tokens.TokenType.NO_ACTION": {"tf": 1.4142135623730951}}, "df": 1}}}}}}, "z": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.AT_TIME_ZONE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.WITH_TIME_ZONE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.WITH_LOCAL_TIME_ZONE": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.WITHOUT_TIME_ZONE": {"tf": 1.4142135623730951}}, "df": 4}}}}, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.tokens.TokenType.FOREIGN_KEY": {"tf": 1.4142135623730951}, "sqlglot.tokens.TokenType.PRIMARY_KEY": {"tf": 1.4142135623730951}}, "df": 2}}}, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.tokens.TokenType.QUALIFY": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "o": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.tokens.TokenType.QUOTE": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}, "signature": {"root": {"0": {"docs": {"sqlglot.diff.ChangeDistiller.__init__": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.__init__": {"tf": 1}, "sqlglot.generator.Generator.indent": {"tf": 1}, "sqlglot.parser.Parser.__init__": {"tf": 1}, "sqlglot.planner.Step.to_s": {"tf": 1}}, "df": 5, "x": {"7": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "f": {"7": {"5": {"docs": {}, "df": 0, "a": {"9": {"8": {"9": {"4": {"8": {"0": {"docs": {"sqlglot.optimizer.optimizer.optimize": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0, "d": {"8": {"1": {"docs": {}, "df": 0, "f": {"0": {"docs": {"sqlglot.optimizer.optimizer.optimize": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}}, "5": {"docs": {}, "df": 0, "e": {"0": {"docs": {"sqlglot.optimizer.optimizer.optimize": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}}, "8": {"2": {"0": {"docs": {"sqlglot.lineage.lineage": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}}, "df": 2}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0, "c": {"1": {"0": {"docs": {"sqlglot.optimizer.optimizer.optimize": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}}, "9": {"2": {"4": {"0": {"docs": {"sqlglot.lineage.lineage": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}}, "df": 2}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "9": {"0": {"0": {"docs": {"sqlglot.optimizer.optimizer.optimize": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "b": {"0": {"1": {"6": {"0": {"docs": {"sqlglot.optimizer.optimizer.optimize": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "5": {"docs": {}, "df": 0, "e": {"0": {"docs": {"sqlglot.optimizer.optimizer.optimize": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}}, "7": {"0": {"0": {"docs": {"sqlglot.optimizer.optimizer.optimize": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "8": {"2": {"0": {"docs": {"sqlglot.optimizer.optimizer.optimize": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "2": {"8": {"3": {"0": {"docs": {"sqlglot.optimizer.optimizer.optimize": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0, "b": {"9": {"0": {"docs": {"sqlglot.lineage.lineage": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}}, "df": 2}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "f": {"8": {"0": {"docs": {"sqlglot.optimizer.optimizer.optimize": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "e": {"6": {"0": {"docs": {"sqlglot.optimizer.optimizer.optimize": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "d": {"docs": {}, "df": 0, "d": {"0": {"docs": {"sqlglot.optimizer.optimizer.optimize": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}}}, "3": {"5": {"docs": {}, "df": 0, "b": {"0": {"docs": {"sqlglot.optimizer.optimizer.optimize": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}}, "docs": {}, "df": 0, "d": {"0": {"0": {"docs": {"sqlglot.optimizer.optimizer.optimize": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}}, "docs": {}, "df": 0}}, "docs": {}, "df": 0}}, "docs": {}, "df": 0}, "docs": {}, "df": 0}}}, "docs": {}, "df": 0}}, "1": {"0": {"0": {"docs": {"sqlglot.parser.Parser.__init__": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "2": {"8": {"docs": {"sqlglot.optimizer.normalize.normalize": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "4": {"0": {"7": {"0": {"0": {"3": {"2": {"9": {"2": {"4": {"0": {"3": {"8": {"4": {"docs": {"sqlglot.dataframe.sql.Column.between": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "3": {"1": {"4": {"4": {"8": {"0": {"docs": {"sqlglot.dataframe.sql.Column.over": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "6": {"2": {"6": {"5": {"9": {"2": {"docs": {"sqlglot.dataframe.sql.Window.partitionBy": {"tf": 1.4142135623730951}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "5": {"4": {"4": {"0": {"0": {"docs": {"sqlglot.dataframe.sql.WindowSpec.orderBy": {"tf": 1.4142135623730951}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "8": {"5": {"4": {"4": {"0": {"docs": {"sqlglot.dataframe.sql.WindowSpec.partitionBy": {"tf": 1.4142135623730951}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "8": {"2": {"8": {"7": {"6": {"8": {"docs": {"sqlglot.dataframe.sql.Window.orderBy": {"tf": 1.4142135623730951}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "3": {"0": {"6": {"1": {"1": {"6": {"9": {"6": {"docs": {"sqlglot.dataframe.sql.Column.ensure_col": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "8": {"4": {"0": {"7": {"3": {"6": {"docs": {"sqlglot.dataframe.sql.Column.ensure_cols": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "9": {"2": {"4": {"0": {"9": {"6": {"docs": {"sqlglot.dataframe.sql.Column.invoke_anonymous_function": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "6": {"4": {"1": {"1": {"2": {"docs": {"sqlglot.dataframe.sql.Column.invoke_anonymous_function": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "1": {"0": {"2": {"9": {"6": {"4": {"8": {"docs": {"sqlglot.dataframe.sql.Column.invoke_expression_over_column": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "8": {"3": {"1": {"3": {"6": {"docs": {"sqlglot.dataframe.sql.Column.binary_op": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "9": {"3": {"2": {"1": {"6": {"docs": {"sqlglot.dataframe.sql.Column.inverse_binary_op": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "2": {"1": {"3": {"1": {"0": {"4": {"docs": {"sqlglot.dataframe.sql.Column.isin": {"tf": 1.4142135623730951}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "9": {"9": {"4": {"4": {"0": {"docs": {"sqlglot.dataframe.sql.Column.between": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "8": {"0": {"4": {"9": {"9": {"2": {"docs": {"sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "9": {"9": {"0": {"2": {"0": {"8": {"docs": {"sqlglot.dataframe.sql.DataFrame.replace": {"tf": 1.4142135623730951}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "2": {"1": {"3": {"6": {"0": {"3": {"2": {"docs": {"sqlglot.dataframe.sql.DataFrame.repartition": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "2": {"4": {"5": {"2": {"4": {"8": {"docs": {"sqlglot.dataframe.sql.DataFrame.repartition": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "5": {"9": {"6": {"9": {"6": {"docs": {"sqlglot.dataframe.sql.Column.__init__": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "9": {"5": {"7": {"0": {"5": {"6": {"docs": {"sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "8": {"1": {"5": {"0": {"4": {"docs": {"sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "3": {"8": {"6": {"7": {"3": {"1": {"2": {"docs": {"sqlglot.dataframe.sql.SparkSession.createDataFrame": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "9": {"8": {"3": {"3": {"6": {"docs": {"sqlglot.dataframe.sql.SparkSession.createDataFrame": {"tf": 1.4142135623730951}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {"sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 1}, "sqlglot.optimizer.scope.Scope.__init__": {"tf": 1}, "sqlglot.tokens.Token.__init__": {"tf": 1.4142135623730951}}, "df": 3}, "2": {"docs": {"sqlglot.generator.Generator.__init__": {"tf": 1.4142135623730951}}, "df": 1}, "3": {"9": {"docs": {"sqlglot.dataframe.sql.SparkSession.createDataFrame": {"tf": 2.449489742783178}, "sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 2}, "sqlglot.dataframe.sql.DataFrame.sql": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.join": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.dropna": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.replace": {"tf": 2}, "sqlglot.dataframe.sql.DataFrame.repartition": {"tf": 2}, "sqlglot.dataframe.sql.DataFrame.persist": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.__init__": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.ensure_col": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.ensure_cols": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.invoke_anonymous_function": {"tf": 2}, "sqlglot.dataframe.sql.Column.invoke_expression_over_column": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.binary_op": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.inverse_binary_op": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.isin": {"tf": 2}, "sqlglot.dataframe.sql.Column.between": {"tf": 2}, "sqlglot.dataframe.sql.Column.over": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrameNaFunctions.drop": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Window.partitionBy": {"tf": 2}, "sqlglot.dataframe.sql.Window.orderBy": {"tf": 2}, "sqlglot.dataframe.sql.WindowSpec.partitionBy": {"tf": 2}, "sqlglot.dataframe.sql.WindowSpec.orderBy": {"tf": 2}, "sqlglot.dialects.dialect.var_map_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.__init__": {"tf": 2}, "sqlglot.generator.Generator.sep": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.seg": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.properties": {"tf": 2.449489742783178}, "sqlglot.generator.Generator.table_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.lambda_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.expressions": {"tf": 2}, "sqlglot.helper.csv": {"tf": 1.4142135623730951}, "sqlglot.serde.dump": {"tf": 1.4142135623730951}, "sqlglot.serde.load": {"tf": 1.4142135623730951}}, "df": 35}, "docs": {"sqlglot.generator.Generator.__init__": {"tf": 1}, "sqlglot.parser.Parser.__init__": {"tf": 1}}, "df": 2}, "6": {"docs": {"sqlglot.diff.ChangeDistiller.__init__": {"tf": 1.4142135623730951}}, "df": 1}, "8": {"0": {"docs": {"sqlglot.generator.Generator.__init__": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {"sqlglot.parse": {"tf": 10.099504938362077}, "sqlglot.parse_one": {"tf": 12.727922061357855}, "sqlglot.transpile": {"tf": 14}, "sqlglot.dataframe.sql.SparkSession.__init__": {"tf": 2}, "sqlglot.dataframe.sql.SparkSession.table": {"tf": 6}, "sqlglot.dataframe.sql.SparkSession.createDataFrame": {"tf": 12.806248474865697}, "sqlglot.dataframe.sql.SparkSession.sql": {"tf": 6}, "sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 14.2828568570857}, "sqlglot.dataframe.sql.DataFrame.sql": {"tf": 6.4031242374328485}, "sqlglot.dataframe.sql.DataFrame.copy": {"tf": 5.830951894845301}, "sqlglot.dataframe.sql.DataFrame.select": {"tf": 6.324555320336759}, "sqlglot.dataframe.sql.DataFrame.alias": {"tf": 6.48074069840786}, "sqlglot.dataframe.sql.DataFrame.where": {"tf": 8.426149773176359}, "sqlglot.dataframe.sql.DataFrame.filter": {"tf": 8.426149773176359}, "sqlglot.dataframe.sql.DataFrame.groupBy": {"tf": 6.324555320336759}, "sqlglot.dataframe.sql.DataFrame.agg": {"tf": 6.324555320336759}, "sqlglot.dataframe.sql.DataFrame.join": {"tf": 12.041594578792296}, "sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 9.9498743710662}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 9.9498743710662}, "sqlglot.dataframe.sql.DataFrame.union": {"tf": 7.3484692283495345}, "sqlglot.dataframe.sql.DataFrame.unionAll": {"tf": 7.3484692283495345}, "sqlglot.dataframe.sql.DataFrame.unionByName": {"tf": 7.14142842854285}, "sqlglot.dataframe.sql.DataFrame.intersect": {"tf": 7.3484692283495345}, "sqlglot.dataframe.sql.DataFrame.intersectAll": {"tf": 7.3484692283495345}, "sqlglot.dataframe.sql.DataFrame.exceptAll": {"tf": 7.3484692283495345}, "sqlglot.dataframe.sql.DataFrame.distinct": {"tf": 5.291502622129181}, "sqlglot.dataframe.sql.DataFrame.dropDuplicates": {"tf": 5.656854249492381}, "sqlglot.dataframe.sql.DataFrame.dropna": {"tf": 10.677078252031311}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 10}, "sqlglot.dataframe.sql.DataFrame.replace": {"tf": 13.19090595827292}, "sqlglot.dataframe.sql.DataFrame.withColumn": {"tf": 7.937253933193772}, "sqlglot.dataframe.sql.DataFrame.withColumnRenamed": {"tf": 5.0990195135927845}, "sqlglot.dataframe.sql.DataFrame.drop": {"tf": 8.12403840463596}, "sqlglot.dataframe.sql.DataFrame.limit": {"tf": 6}, "sqlglot.dataframe.sql.DataFrame.hint": {"tf": 8}, "sqlglot.dataframe.sql.DataFrame.repartition": {"tf": 9.327379053088816}, "sqlglot.dataframe.sql.DataFrame.coalesce": {"tf": 6}, "sqlglot.dataframe.sql.DataFrame.cache": {"tf": 5.291502622129181}, "sqlglot.dataframe.sql.DataFrame.persist": {"tf": 6.782329983125268}, "sqlglot.dataframe.sql.GroupedData.__init__": {"tf": 9.16515138991168}, "sqlglot.dataframe.sql.GroupedData.agg": {"tf": 8.602325267042627}, "sqlglot.dataframe.sql.GroupedData.count": {"tf": 5.291502622129181}, "sqlglot.dataframe.sql.GroupedData.mean": {"tf": 6.164414002968976}, "sqlglot.dataframe.sql.GroupedData.avg": {"tf": 6.164414002968976}, "sqlglot.dataframe.sql.GroupedData.max": {"tf": 6.164414002968976}, "sqlglot.dataframe.sql.GroupedData.min": {"tf": 6.164414002968976}, "sqlglot.dataframe.sql.GroupedData.sum": {"tf": 6.164414002968976}, "sqlglot.dataframe.sql.GroupedData.pivot": {"tf": 6.164414002968976}, "sqlglot.dataframe.sql.Column.__init__": {"tf": 7.0710678118654755}, "sqlglot.dataframe.sql.Column.ensure_col": {"tf": 7.54983443527075}, "sqlglot.dataframe.sql.Column.ensure_cols": {"tf": 8.831760866327848}, "sqlglot.dataframe.sql.Column.invoke_anonymous_function": {"tf": 9.797958971132712}, "sqlglot.dataframe.sql.Column.invoke_expression_over_column": {"tf": 8.54400374531753}, "sqlglot.dataframe.sql.Column.binary_op": {"tf": 8.246211251235321}, "sqlglot.dataframe.sql.Column.inverse_binary_op": {"tf": 8.246211251235321}, "sqlglot.dataframe.sql.Column.unary_op": {"tf": 6.48074069840786}, "sqlglot.dataframe.sql.Column.ensure_literal": {"tf": 5.656854249492381}, "sqlglot.dataframe.sql.Column.copy": {"tf": 5.291502622129181}, "sqlglot.dataframe.sql.Column.set_table_name": {"tf": 6.6332495807108}, "sqlglot.dataframe.sql.Column.sql": {"tf": 4.242640687119285}, "sqlglot.dataframe.sql.Column.alias": {"tf": 6}, "sqlglot.dataframe.sql.Column.asc": {"tf": 5.291502622129181}, "sqlglot.dataframe.sql.Column.desc": {"tf": 5.291502622129181}, "sqlglot.dataframe.sql.Column.asc_nulls_first": {"tf": 5.291502622129181}, "sqlglot.dataframe.sql.Column.asc_nulls_last": {"tf": 5.291502622129181}, "sqlglot.dataframe.sql.Column.desc_nulls_first": {"tf": 5.291502622129181}, "sqlglot.dataframe.sql.Column.desc_nulls_last": {"tf": 5.291502622129181}, "sqlglot.dataframe.sql.Column.when": {"tf": 7.937253933193772}, "sqlglot.dataframe.sql.Column.otherwise": {"tf": 6}, "sqlglot.dataframe.sql.Column.isNull": {"tf": 5.291502622129181}, "sqlglot.dataframe.sql.Column.isNotNull": {"tf": 5.291502622129181}, "sqlglot.dataframe.sql.Column.cast": {"tf": 6.6332495807108}, "sqlglot.dataframe.sql.Column.startswith": {"tf": 8}, "sqlglot.dataframe.sql.Column.endswith": {"tf": 8}, "sqlglot.dataframe.sql.Column.rlike": {"tf": 6}, "sqlglot.dataframe.sql.Column.like": {"tf": 4.242640687119285}, "sqlglot.dataframe.sql.Column.ilike": {"tf": 4.242640687119285}, "sqlglot.dataframe.sql.Column.substr": {"tf": 9.9498743710662}, "sqlglot.dataframe.sql.Column.isin": {"tf": 7.937253933193772}, "sqlglot.dataframe.sql.Column.between": {"tf": 8.660254037844387}, "sqlglot.dataframe.sql.Column.over": {"tf": 7.211102550927978}, "sqlglot.dataframe.sql.DataFrameNaFunctions.__init__": {"tf": 5.291502622129181}, "sqlglot.dataframe.sql.DataFrameNaFunctions.drop": {"tf": 10.677078252031311}, "sqlglot.dataframe.sql.DataFrameNaFunctions.fill": {"tf": 10.908712114635714}, "sqlglot.dataframe.sql.DataFrameNaFunctions.replace": {"tf": 12.083045973594572}, "sqlglot.dataframe.sql.Window.__init__": {"tf": 2}, "sqlglot.dataframe.sql.Window.partitionBy": {"tf": 9}, "sqlglot.dataframe.sql.Window.orderBy": {"tf": 9}, "sqlglot.dataframe.sql.Window.rowsBetween": {"tf": 6.6332495807108}, "sqlglot.dataframe.sql.Window.rangeBetween": {"tf": 6.6332495807108}, "sqlglot.dataframe.sql.WindowSpec.__init__": {"tf": 5.744562646538029}, "sqlglot.dataframe.sql.WindowSpec.copy": {"tf": 3.1622776601683795}, "sqlglot.dataframe.sql.WindowSpec.sql": {"tf": 4.242640687119285}, "sqlglot.dataframe.sql.WindowSpec.partitionBy": {"tf": 9}, "sqlglot.dataframe.sql.WindowSpec.orderBy": {"tf": 9}, "sqlglot.dataframe.sql.WindowSpec.rowsBetween": {"tf": 6.6332495807108}, "sqlglot.dataframe.sql.WindowSpec.rangeBetween": {"tf": 6.6332495807108}, "sqlglot.dataframe.sql.DataFrameReader.__init__": {"tf": 5.291502622129181}, "sqlglot.dataframe.sql.DataFrameReader.table": {"tf": 6}, "sqlglot.dataframe.sql.DataFrameWriter.__init__": {"tf": 10}, "sqlglot.dataframe.sql.DataFrameWriter.copy": {"tf": 5.830951894845301}, "sqlglot.dataframe.sql.DataFrameWriter.sql": {"tf": 4.795831523312719}, "sqlglot.dataframe.sql.DataFrameWriter.mode": {"tf": 6.557438524302}, "sqlglot.dataframe.sql.DataFrameWriter.insertInto": {"tf": 7.615773105863909}, "sqlglot.dataframe.sql.DataFrameWriter.saveAsTable": {"tf": 7.745966692414834}, "sqlglot.dialects.bigquery.BigQuery.__init__": {"tf": 2}, "sqlglot.dialects.bigquery.BigQuery.Generator.array_sql": {"tf": 5.291502622129181}, "sqlglot.dialects.bigquery.BigQuery.Generator.transaction_sql": {"tf": 4.47213595499958}, "sqlglot.dialects.bigquery.BigQuery.Generator.commit_sql": {"tf": 4.47213595499958}, "sqlglot.dialects.bigquery.BigQuery.Generator.rollback_sql": {"tf": 4.47213595499958}, "sqlglot.dialects.bigquery.BigQuery.Generator.in_unnest_op": {"tf": 5.291502622129181}, "sqlglot.dialects.bigquery.BigQuery.Generator.except_op": {"tf": 5.291502622129181}, "sqlglot.dialects.bigquery.BigQuery.Generator.intersect_op": {"tf": 5.291502622129181}, "sqlglot.dialects.clickhouse.ClickHouse.__init__": {"tf": 2}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.cte_sql": {"tf": 5.291502622129181}, "sqlglot.dialects.databricks.Databricks.__init__": {"tf": 2}, "sqlglot.dialects.dialect.Dialect.__init__": {"tf": 2}, "sqlglot.dialects.dialect.Dialect.get_or_raise": {"tf": 9.273618495495704}, "sqlglot.dialects.dialect.Dialect.format_time": {"tf": 7.615773105863909}, "sqlglot.dialects.dialect.Dialect.parse": {"tf": 6.48074069840786}, "sqlglot.dialects.dialect.Dialect.parse_into": {"tf": 10.099504938362077}, "sqlglot.dialects.dialect.Dialect.generate": {"tf": 6.48074069840786}, "sqlglot.dialects.dialect.Dialect.transpile": {"tf": 5.5677643628300215}, "sqlglot.dialects.dialect.Dialect.parser": {"tf": 5.0990195135927845}, "sqlglot.dialects.dialect.Dialect.generator": {"tf": 5.0990195135927845}, "sqlglot.dialects.dialect.rename_func": {"tf": 6.928203230275509}, "sqlglot.dialects.dialect.approx_count_distinct_sql": {"tf": 6.48074069840786}, "sqlglot.dialects.dialect.if_sql": {"tf": 6.48074069840786}, "sqlglot.dialects.dialect.arrow_json_extract_sql": {"tf": 7.416198487095663}, "sqlglot.dialects.dialect.arrow_json_extract_scalar_sql": {"tf": 7.416198487095663}, "sqlglot.dialects.dialect.inline_array_sql": {"tf": 6.48074069840786}, "sqlglot.dialects.dialect.no_ilike_sql": {"tf": 6.48074069840786}, "sqlglot.dialects.dialect.no_paren_current_date_sql": {"tf": 6.48074069840786}, "sqlglot.dialects.dialect.no_recursive_cte_sql": {"tf": 6.48074069840786}, "sqlglot.dialects.dialect.no_safe_divide_sql": {"tf": 6.48074069840786}, "sqlglot.dialects.dialect.no_tablesample_sql": {"tf": 6.48074069840786}, "sqlglot.dialects.dialect.no_pivot_sql": {"tf": 6.48074069840786}, "sqlglot.dialects.dialect.no_trycast_sql": {"tf": 6.48074069840786}, "sqlglot.dialects.dialect.no_properties_sql": {"tf": 6.48074069840786}, "sqlglot.dialects.dialect.str_position_sql": {"tf": 6.48074069840786}, "sqlglot.dialects.dialect.struct_extract_sql": {"tf": 6.48074069840786}, "sqlglot.dialects.dialect.var_map_sql": {"tf": 8.48528137423857}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 8.660254037844387}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 6.48074069840786}, "sqlglot.dialects.dialect.parse_date_delta": {"tf": 8}, "sqlglot.dialects.dialect.locate_to_strposition": {"tf": 4.898979485566356}, "sqlglot.dialects.dialect.strposition_to_locate_sql": {"tf": 6.48074069840786}, "sqlglot.dialects.dialect.timestrtotime_sql": {"tf": 6.48074069840786}, "sqlglot.dialects.dialect.datestrtodate_sql": {"tf": 6.48074069840786}, "sqlglot.dialects.dialect.trim_sql": {"tf": 6.48074069840786}, "sqlglot.dialects.drill.if_sql": {"tf": 6.48074069840786}, "sqlglot.dialects.drill.Drill.__init__": {"tf": 2}, "sqlglot.dialects.drill.Drill.Generator.normalize_func": {"tf": 4.47213595499958}, "sqlglot.dialects.duckdb.DuckDB.__init__": {"tf": 2}, "sqlglot.dialects.hive.Hive.__init__": {"tf": 2}, "sqlglot.dialects.hive.Hive.Generator.with_properties": {"tf": 3.7416573867739413}, "sqlglot.dialects.hive.Hive.Generator.datatype_sql": {"tf": 3.7416573867739413}, "sqlglot.dialects.mysql.MySQL.__init__": {"tf": 2}, "sqlglot.dialects.mysql.MySQL.Generator.show_sql": {"tf": 3.7416573867739413}, "sqlglot.dialects.mysql.MySQL.Generator.setitem_sql": {"tf": 3.7416573867739413}, "sqlglot.dialects.mysql.MySQL.Generator.set_sql": {"tf": 3.7416573867739413}, "sqlglot.dialects.oracle.Oracle.__init__": {"tf": 2}, "sqlglot.dialects.oracle.Oracle.Generator.query_modifiers": {"tf": 4.47213595499958}, "sqlglot.dialects.oracle.Oracle.Generator.offset_sql": {"tf": 3.7416573867739413}, "sqlglot.dialects.oracle.Oracle.Generator.table_sql": {"tf": 3.7416573867739413}, "sqlglot.dialects.postgres.Postgres.__init__": {"tf": 2}, "sqlglot.dialects.presto.Presto.__init__": {"tf": 2}, "sqlglot.dialects.presto.Presto.Generator.transaction_sql": {"tf": 3.7416573867739413}, "sqlglot.dialects.redshift.Redshift.__init__": {"tf": 2}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 5.291502622129181}, "sqlglot.dialects.redshift.Redshift.Generator.with_properties": {"tf": 5.291502622129181}, "sqlglot.dialects.redshift.Redshift.Generator.renametable_sql": {"tf": 5.291502622129181}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 5.291502622129181}, "sqlglot.dialects.snowflake.Snowflake.__init__": {"tf": 2}, "sqlglot.dialects.snowflake.Snowflake.Generator.except_op": {"tf": 3.7416573867739413}, "sqlglot.dialects.snowflake.Snowflake.Generator.intersect_op": {"tf": 3.7416573867739413}, "sqlglot.dialects.snowflake.Snowflake.Generator.values_sql": {"tf": 5.291502622129181}, "sqlglot.dialects.snowflake.Snowflake.Generator.select_sql": {"tf": 5.291502622129181}, "sqlglot.dialects.snowflake.Snowflake.Generator.describe_sql": {"tf": 5.291502622129181}, "sqlglot.dialects.snowflake.Snowflake.Generator.generatedasidentitycolumnconstraint_sql": {"tf": 5.477225575051661}, "sqlglot.dialects.spark.Spark.__init__": {"tf": 2}, "sqlglot.dialects.spark.Spark.Generator.cast_sql": {"tf": 5.291502622129181}, "sqlglot.dialects.sqlite.SQLite.__init__": {"tf": 2}, "sqlglot.dialects.sqlite.SQLite.Generator.transaction_sql": {"tf": 3.7416573867739413}, "sqlglot.dialects.starrocks.StarRocks.__init__": {"tf": 2}, "sqlglot.dialects.tableau.Tableau.__init__": {"tf": 2}, "sqlglot.dialects.teradata.Teradata.__init__": {"tf": 2}, "sqlglot.dialects.teradata.Teradata.Generator.partitionedbyproperty_sql": {"tf": 5.291502622129181}, "sqlglot.dialects.teradata.Teradata.Generator.update_sql": {"tf": 5.291502622129181}, "sqlglot.dialects.trino.Trino.__init__": {"tf": 2}, "sqlglot.dialects.tsql.generate_date_delta_with_unit_sql": {"tf": 3.7416573867739413}, "sqlglot.dialects.tsql.TSQL.__init__": {"tf": 2}, "sqlglot.dialects.tsql.TSQL.Generator.systemtime_sql": {"tf": 5.291502622129181}, "sqlglot.dialects.tsql.TSQL.Generator.returnsproperty_sql": {"tf": 5.291502622129181}, "sqlglot.diff.Insert.__init__": {"tf": 4.47213595499958}, "sqlglot.diff.Remove.__init__": {"tf": 4.47213595499958}, "sqlglot.diff.Move.__init__": {"tf": 4.47213595499958}, "sqlglot.diff.Update.__init__": {"tf": 6.164414002968976}, "sqlglot.diff.Keep.__init__": {"tf": 6.164414002968976}, "sqlglot.diff.diff": {"tf": 10.488088481701515}, "sqlglot.diff.ChangeDistiller.__init__": {"tf": 5.656854249492381}, "sqlglot.diff.ChangeDistiller.diff": {"tf": 10.723805294763608}, "sqlglot.errors.ParseError.__init__": {"tf": 6.48074069840786}, "sqlglot.errors.ParseError.new": {"tf": 13.038404810405298}, "sqlglot.errors.concat_messages": {"tf": 5.385164807134504}, "sqlglot.errors.merge_errors": {"tf": 6.48074069840786}, "sqlglot.executor.execute": {"tf": 12.727922061357855}, "sqlglot.executor.context.Context.__init__": {"tf": 7.416198487095663}, "sqlglot.executor.context.Context.eval": {"tf": 3.7416573867739413}, "sqlglot.executor.context.Context.eval_tuple": {"tf": 3.7416573867739413}, "sqlglot.executor.context.Context.add_columns": {"tf": 4.69041575982343}, "sqlglot.executor.context.Context.table_iter": {"tf": 7.681145747868608}, "sqlglot.executor.context.Context.filter": {"tf": 4}, "sqlglot.executor.context.Context.sort": {"tf": 4}, "sqlglot.executor.context.Context.set_row": {"tf": 4.47213595499958}, "sqlglot.executor.context.Context.set_index": {"tf": 4.47213595499958}, "sqlglot.executor.context.Context.set_range": {"tf": 5.291502622129181}, "sqlglot.executor.env.reverse_key.__init__": {"tf": 2.8284271247461903}, "sqlglot.executor.env.filter_nulls": {"tf": 4.242640687119285}, "sqlglot.executor.env.null_if_any": {"tf": 3.4641016151377544}, "sqlglot.executor.env.str_position": {"tf": 4.69041575982343}, "sqlglot.executor.env.substring": {"tf": 5.0990195135927845}, "sqlglot.executor.env.cast": {"tf": 3.7416573867739413}, "sqlglot.executor.env.ordered": {"tf": 4.242640687119285}, "sqlglot.executor.env.interval": {"tf": 3.7416573867739413}, "sqlglot.executor.python.PythonExecutor.__init__": {"tf": 4.47213595499958}, "sqlglot.executor.python.PythonExecutor.execute": {"tf": 3.7416573867739413}, "sqlglot.executor.python.PythonExecutor.generate": {"tf": 3.7416573867739413}, "sqlglot.executor.python.PythonExecutor.generate_tuple": {"tf": 3.7416573867739413}, "sqlglot.executor.python.PythonExecutor.context": {"tf": 3.7416573867739413}, "sqlglot.executor.python.PythonExecutor.table": {"tf": 3.7416573867739413}, "sqlglot.executor.python.PythonExecutor.scan": {"tf": 4.242640687119285}, "sqlglot.executor.python.PythonExecutor.static": {"tf": 3.1622776601683795}, "sqlglot.executor.python.PythonExecutor.scan_table": {"tf": 3.7416573867739413}, "sqlglot.executor.python.PythonExecutor.scan_csv": {"tf": 3.7416573867739413}, "sqlglot.executor.python.PythonExecutor.join": {"tf": 4.242640687119285}, "sqlglot.executor.python.PythonExecutor.nested_loop_join": {"tf": 4.795831523312719}, "sqlglot.executor.python.PythonExecutor.hash_join": {"tf": 4.69041575982343}, "sqlglot.executor.python.PythonExecutor.aggregate": {"tf": 4.242640687119285}, "sqlglot.executor.python.PythonExecutor.sort": {"tf": 4.242640687119285}, "sqlglot.executor.python.PythonExecutor.set_operation": {"tf": 4.242640687119285}, "sqlglot.executor.python.Python.__init__": {"tf": 2}, "sqlglot.executor.table.Table.__init__": {"tf": 4.898979485566356}, "sqlglot.executor.table.Table.add_columns": {"tf": 4.69041575982343}, "sqlglot.executor.table.Table.append": {"tf": 3.7416573867739413}, "sqlglot.executor.table.Table.pop": {"tf": 3.1622776601683795}, "sqlglot.executor.table.TableIter.__init__": {"tf": 2.8284271247461903}, "sqlglot.executor.table.RangeReader.__init__": {"tf": 2.8284271247461903}, "sqlglot.executor.table.RowReader.__init__": {"tf": 4}, "sqlglot.executor.table.ensure_tables": {"tf": 5.744562646538029}, "sqlglot.expressions.Expression.__init__": {"tf": 3.7416573867739413}, "sqlglot.expressions.Expression.text": {"tf": 3.7416573867739413}, "sqlglot.expressions.Expression.copy": {"tf": 3.1622776601683795}, "sqlglot.expressions.Expression.append": {"tf": 4.242640687119285}, "sqlglot.expressions.Expression.set": {"tf": 4.242640687119285}, "sqlglot.expressions.Expression.find": {"tf": 4.898979485566356}, "sqlglot.expressions.Expression.find_all": {"tf": 4.898979485566356}, "sqlglot.expressions.Expression.find_ancestor": {"tf": 4}, "sqlglot.expressions.Expression.walk": {"tf": 5.0990195135927845}, "sqlglot.expressions.Expression.dfs": {"tf": 5.830951894845301}, "sqlglot.expressions.Expression.bfs": {"tf": 4.242640687119285}, "sqlglot.expressions.Expression.unnest": {"tf": 3.1622776601683795}, "sqlglot.expressions.Expression.unalias": {"tf": 3.1622776601683795}, "sqlglot.expressions.Expression.unnest_operands": {"tf": 3.1622776601683795}, "sqlglot.expressions.Expression.flatten": {"tf": 4.242640687119285}, "sqlglot.expressions.Expression.sql": {"tf": 9.055385138137417}, "sqlglot.expressions.Expression.transform": {"tf": 5.830951894845301}, "sqlglot.expressions.Expression.replace": {"tf": 3.7416573867739413}, "sqlglot.expressions.Expression.pop": {"tf": 3.1622776601683795}, "sqlglot.expressions.Expression.assert_is": {"tf": 3.872983346207417}, "sqlglot.expressions.Expression.error_messages": {"tf": 6}, "sqlglot.expressions.Expression.dump": {"tf": 3.1622776601683795}, "sqlglot.expressions.Expression.load": {"tf": 3.7416573867739413}, "sqlglot.expressions.Condition.and_": {"tf": 5.477225575051661}, "sqlglot.expressions.Condition.or_": {"tf": 5.477225575051661}, "sqlglot.expressions.Condition.not_": {"tf": 3.1622776601683795}, "sqlglot.expressions.Unionable.union": {"tf": 6}, "sqlglot.expressions.Unionable.intersect": {"tf": 6}, "sqlglot.expressions.Unionable.except_": {"tf": 6}, "sqlglot.expressions.Literal.number": {"tf": 4.898979485566356}, "sqlglot.expressions.Literal.string": {"tf": 4.898979485566356}, "sqlglot.expressions.Join.on": {"tf": 6.782329983125268}, "sqlglot.expressions.Join.using": {"tf": 6.782329983125268}, "sqlglot.expressions.Properties.from_dict": {"tf": 4.898979485566356}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 6}, "sqlglot.expressions.Subqueryable.limit": {"tf": 7.14142842854285}, "sqlglot.expressions.Subqueryable.with_": {"tf": 8.06225774829855}, "sqlglot.expressions.Union.limit": {"tf": 7.14142842854285}, "sqlglot.expressions.Select.from_": {"tf": 7.874007874011811}, "sqlglot.expressions.Select.group_by": {"tf": 7.874007874011811}, "sqlglot.expressions.Select.order_by": {"tf": 7.874007874011811}, "sqlglot.expressions.Select.sort_by": {"tf": 7.874007874011811}, "sqlglot.expressions.Select.cluster_by": {"tf": 7.874007874011811}, "sqlglot.expressions.Select.limit": {"tf": 7.14142842854285}, "sqlglot.expressions.Select.offset": {"tf": 7.14142842854285}, "sqlglot.expressions.Select.select": {"tf": 7.874007874011811}, "sqlglot.expressions.Select.lateral": {"tf": 7.874007874011811}, "sqlglot.expressions.Select.join": {"tf": 9.797958971132712}, "sqlglot.expressions.Select.where": {"tf": 7.874007874011811}, "sqlglot.expressions.Select.having": {"tf": 7.874007874011811}, "sqlglot.expressions.Select.window": {"tf": 7.874007874011811}, "sqlglot.expressions.Select.distinct": {"tf": 6}, "sqlglot.expressions.Select.ctas": {"tf": 7.745966692414834}, "sqlglot.expressions.Select.lock": {"tf": 7.14142842854285}, "sqlglot.expressions.Subquery.unnest": {"tf": 3.1622776601683795}, "sqlglot.expressions.DataType.build": {"tf": 11.357816691600547}, "sqlglot.expressions.DataType.is_type": {"tf": 5.656854249492381}, "sqlglot.expressions.TimeUnit.__init__": {"tf": 3.1622776601683795}, "sqlglot.expressions.Func.from_arg_list": {"tf": 3.7416573867739413}, "sqlglot.expressions.Func.sql_names": {"tf": 3.1622776601683795}, "sqlglot.expressions.Func.sql_name": {"tf": 3.1622776601683795}, "sqlglot.expressions.Func.default_parser_mappings": {"tf": 3.1622776601683795}, "sqlglot.expressions.Cast.is_type": {"tf": 5.656854249492381}, "sqlglot.expressions.maybe_parse": {"tf": 14.177446878757825}, "sqlglot.expressions.union": {"tf": 6}, "sqlglot.expressions.intersect": {"tf": 6}, "sqlglot.expressions.except_": {"tf": 6}, "sqlglot.expressions.select": {"tf": 6}, "sqlglot.expressions.from_": {"tf": 6}, "sqlglot.expressions.update": {"tf": 7.810249675906654}, "sqlglot.expressions.delete": {"tf": 6.48074069840786}, "sqlglot.expressions.condition": {"tf": 5.830951894845301}, "sqlglot.expressions.and_": {"tf": 6}, "sqlglot.expressions.or_": {"tf": 6}, "sqlglot.expressions.not_": {"tf": 5.830951894845301}, "sqlglot.expressions.paren": {"tf": 4.47213595499958}, "sqlglot.expressions.to_identifier": {"tf": 4.242640687119285}, "sqlglot.expressions.to_interval": {"tf": 6.164414002968976}, "sqlglot.expressions.to_table": {"tf": 7.745966692414834}, "sqlglot.expressions.to_column": {"tf": 6.708203932499369}, "sqlglot.expressions.alias_": {"tf": 13.416407864998739}, "sqlglot.expressions.subquery": {"tf": 5.656854249492381}, "sqlglot.expressions.column": {"tf": 6}, "sqlglot.expressions.cast": {"tf": 9.16515138991168}, "sqlglot.expressions.table_": {"tf": 7.54983443527075}, "sqlglot.expressions.values": {"tf": 10.44030650891055}, "sqlglot.expressions.rename_table": {"tf": 7.745966692414834}, "sqlglot.expressions.convert": {"tf": 4.47213595499958}, "sqlglot.expressions.replace_children": {"tf": 3.7416573867739413}, "sqlglot.expressions.column_table_names": {"tf": 3.1622776601683795}, "sqlglot.expressions.table_name": {"tf": 3.4641016151377544}, "sqlglot.expressions.replace_tables": {"tf": 3.7416573867739413}, "sqlglot.expressions.replace_placeholders": {"tf": 4.69041575982343}, "sqlglot.expressions.expand": {"tf": 8.306623862918075}, "sqlglot.expressions.func": {"tf": 10.04987562112089}, "sqlglot.expressions.true": {"tf": 2.6457513110645907}, "sqlglot.expressions.false": {"tf": 2.6457513110645907}, "sqlglot.expressions.null": {"tf": 2.6457513110645907}, "sqlglot.generator.Generator.__init__": {"tf": 15.033296378372908}, "sqlglot.generator.Generator.generate": {"tf": 5.744562646538029}, "sqlglot.generator.Generator.unsupported": {"tf": 4.47213595499958}, "sqlglot.generator.Generator.sep": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.seg": {"tf": 6}, "sqlglot.generator.Generator.pad_comment": {"tf": 4.47213595499958}, "sqlglot.generator.Generator.maybe_comment": {"tf": 6}, "sqlglot.generator.Generator.wrap": {"tf": 5.744562646538029}, "sqlglot.generator.Generator.no_identify": {"tf": 6.6332495807108}, "sqlglot.generator.Generator.normalize_func": {"tf": 4.47213595499958}, "sqlglot.generator.Generator.indent": {"tf": 9.327379053088816}, "sqlglot.generator.Generator.sql": {"tf": 8.94427190999916}, "sqlglot.generator.Generator.uncache_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.cache_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.characterset_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.column_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.columndef_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.columnconstraint_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.autoincrementcolumnconstraint_sql": {"tf": 4.242640687119285}, "sqlglot.generator.Generator.checkcolumnconstraint_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.commentcolumnconstraint_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.collatecolumnconstraint_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.encodecolumnconstraint_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.defaultcolumnconstraint_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.generatedasidentitycolumnconstraint_sql": {"tf": 5.477225575051661}, "sqlglot.generator.Generator.notnullcolumnconstraint_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.primarykeycolumnconstraint_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.uniquecolumnconstraint_sql": {"tf": 4.242640687119285}, "sqlglot.generator.Generator.create_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.describe_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.prepend_ctes": {"tf": 6}, "sqlglot.generator.Generator.with_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.cte_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.tablealias_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.bitstring_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.hexstring_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.datatype_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.directory_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.delete_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.drop_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.except_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.except_op": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.fetch_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.filter_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.hint_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.index_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.identifier_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.national_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.partition_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.properties_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.root_properties": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.properties": {"tf": 9.797958971132712}, "sqlglot.generator.Generator.with_properties": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.locate_properties": {"tf": 7.937253933193772}, "sqlglot.generator.Generator.property_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.likeproperty_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.fallbackproperty_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.journalproperty_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.freespaceproperty_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.afterjournalproperty_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.checksumproperty_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.mergeblockratioproperty_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.datablocksizeproperty_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.blockcompressionproperty_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.isolatedloadingproperty_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.insert_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.intersect_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.intersect_op": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.introducer_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.pseudotype_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.rowformatdelimitedproperty_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.table_sql": {"tf": 6.6332495807108}, "sqlglot.generator.Generator.tablesample_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.pivot_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.tuple_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.update_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.values_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.var_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.into_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.from_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.group_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.having_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.join_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.lambda_sql": {"tf": 6.855654600401044}, "sqlglot.generator.Generator.lateral_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.limit_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.offset_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.lock_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.literal_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.loaddata_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.null_sql": {"tf": 4.47213595499958}, "sqlglot.generator.Generator.boolean_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.order_sql": {"tf": 6.48074069840786}, "sqlglot.generator.Generator.cluster_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.distribute_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.sort_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.ordered_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.matchrecognize_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.query_modifiers": {"tf": 6.164414002968976}, "sqlglot.generator.Generator.select_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.schema_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.star_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.structkwarg_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.parameter_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.sessionparameter_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.placeholder_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.subquery_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.qualify_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.union_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.union_op": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.unnest_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.where_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.window_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.partition_by_sql": {"tf": 6.557438524302}, "sqlglot.generator.Generator.window_spec_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.withingroup_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.between_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.bracket_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.all_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.any_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.exists_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.case_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.constraint_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.extract_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.trim_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.concat_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.check_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.foreignkey_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.primarykey_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.unique_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.if_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.in_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.in_unnest_op": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.interval_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.return_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.reference_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.anonymous_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.paren_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.neg_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.not_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.alias_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.aliases_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.attimezone_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.add_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.and_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.connector_sql": {"tf": 6}, "sqlglot.generator.Generator.bitwiseand_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.bitwiseleftshift_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.bitwisenot_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.bitwiseor_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.bitwiserightshift_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.bitwisexor_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.cast_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.currentdate_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.collate_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.command_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.transaction_sql": {"tf": 4.47213595499958}, "sqlglot.generator.Generator.commit_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.rollback_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.altercolumn_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.renametable_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.altertable_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.droppartition_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.addconstraint_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.distinct_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.ignorenulls_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.respectnulls_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.intdiv_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.dpipe_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.div_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.distance_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.dot_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.eq_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.escape_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.glob_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.gt_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.gte_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.ilike_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.is_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.like_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.similarto_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.lt_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.lte_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.mod_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.mul_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.neq_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.nullsafeeq_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.nullsafeneq_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.or_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.slice_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.sub_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.trycast_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.use_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.binary": {"tf": 6}, "sqlglot.generator.Generator.function_fallback_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.format_args": {"tf": 6.708203932499369}, "sqlglot.generator.Generator.text_width": {"tf": 4.47213595499958}, "sqlglot.generator.Generator.format_time": {"tf": 5.744562646538029}, "sqlglot.generator.Generator.expressions": {"tf": 10.677078252031311}, "sqlglot.generator.Generator.op_expressions": {"tf": 7.3484692283495345}, "sqlglot.generator.Generator.naked_property": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.set_operation": {"tf": 6}, "sqlglot.generator.Generator.tag_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.token_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.userdefinedfunction_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.userdefinedfunctionkwarg_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.joinhint_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.kwarg_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.when_sql": {"tf": 5.291502622129181}, "sqlglot.generator.Generator.merge_sql": {"tf": 5.291502622129181}, "sqlglot.helper.seq_get": {"tf": 6.164414002968976}, "sqlglot.helper.ensure_list": {"tf": 3.1622776601683795}, "sqlglot.helper.ensure_collection": {"tf": 3.1622776601683795}, "sqlglot.helper.csv": {"tf": 5.477225575051661}, "sqlglot.helper.subclasses": {"tf": 9.38083151964686}, "sqlglot.helper.apply_index_offset": {"tf": 6.6332495807108}, "sqlglot.helper.camel_to_snake_case": {"tf": 4}, "sqlglot.helper.while_changing": {"tf": 8}, "sqlglot.helper.tsort": {"tf": 6.324555320336759}, "sqlglot.helper.open_file": {"tf": 3.872983346207417}, "sqlglot.helper.csv_reader": {"tf": 4.898979485566356}, "sqlglot.helper.find_new_name": {"tf": 5.385164807134504}, "sqlglot.helper.object_to_dict": {"tf": 4.69041575982343}, "sqlglot.helper.split_num_words": {"tf": 7.615773105863909}, "sqlglot.helper.is_iterable": {"tf": 4}, "sqlglot.helper.flatten": {"tf": 6.082762530298219}, "sqlglot.helper.count_params": {"tf": 4}, "sqlglot.helper.dict_depth": {"tf": 4}, "sqlglot.helper.first": {"tf": 5}, "sqlglot.lineage.Node.__init__": {"tf": 9}, "sqlglot.lineage.Node.walk": {"tf": 5}, "sqlglot.lineage.Node.to_html": {"tf": 5.0990195135927845}, "sqlglot.lineage.lineage": {"tf": 16.522711641858304}, "sqlglot.lineage.LineageHTML.__init__": {"tf": 10.44030650891055}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 5.830951894845301}, "sqlglot.optimizer.annotate_types.TypeAnnotator.__init__": {"tf": 5.291502622129181}, "sqlglot.optimizer.annotate_types.TypeAnnotator.annotate": {"tf": 3.7416573867739413}, "sqlglot.optimizer.canonicalize.canonicalize": {"tf": 5.744562646538029}, "sqlglot.optimizer.canonicalize.add_text_to_concat": {"tf": 5.656854249492381}, "sqlglot.optimizer.canonicalize.coerce_type": {"tf": 5.656854249492381}, "sqlglot.optimizer.canonicalize.remove_redundant_casts": {"tf": 5.744562646538029}, "sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"tf": 3.1622776601683795}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 3.1622776601683795}, "sqlglot.optimizer.eliminate_joins.join_condition": {"tf": 3.1622776601683795}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 3.1622776601683795}, "sqlglot.optimizer.expand_laterals.expand_laterals": {"tf": 5.744562646538029}, "sqlglot.optimizer.expand_multi_table_selects.expand_multi_table_selects": {"tf": 3.1622776601683795}, "sqlglot.optimizer.isolate_table_selects.isolate_table_selects": {"tf": 4.242640687119285}, "sqlglot.optimizer.lower_identities.lower_identities": {"tf": 3.1622776601683795}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 4.242640687119285}, "sqlglot.optimizer.merge_subqueries.merge_ctes": {"tf": 4.242640687119285}, "sqlglot.optimizer.merge_subqueries.merge_derived_tables": {"tf": 4.242640687119285}, "sqlglot.optimizer.normalize.normalize": {"tf": 5.0990195135927845}, "sqlglot.optimizer.normalize.normalized": {"tf": 4.242640687119285}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 4.242640687119285}, "sqlglot.optimizer.normalize.distributive_law": {"tf": 4.242640687119285}, "sqlglot.optimizer.optimize_joins.optimize_joins": {"tf": 3.1622776601683795}, "sqlglot.optimizer.optimize_joins.reorder_joins": {"tf": 3.1622776601683795}, "sqlglot.optimizer.optimize_joins.normalize": {"tf": 3.1622776601683795}, "sqlglot.optimizer.optimize_joins.other_table_names": {"tf": 3.7416573867739413}, "sqlglot.optimizer.optimizer.optimize": {"tf": 19}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 3.1622776601683795}, "sqlglot.optimizer.pushdown_predicates.pushdown": {"tf": 4.242640687119285}, "sqlglot.optimizer.pushdown_predicates.pushdown_cnf": {"tf": 4.242640687119285}, "sqlglot.optimizer.pushdown_predicates.pushdown_dnf": {"tf": 4.242640687119285}, "sqlglot.optimizer.pushdown_predicates.nodes_for_predicate": {"tf": 4.242640687119285}, "sqlglot.optimizer.pushdown_predicates.replace_aliases": {"tf": 3.7416573867739413}, "sqlglot.optimizer.pushdown_projections.DEFAULT_SELECTION": {"tf": 2.6457513110645907}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 3.1622776601683795}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 3.7416573867739413}, "sqlglot.optimizer.qualify_columns.validate_qualify_columns": {"tf": 3.1622776601683795}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 5.830951894845301}, "sqlglot.optimizer.scope.Scope.__init__": {"tf": 7.483314773547883}, "sqlglot.optimizer.scope.Scope.clear_cache": {"tf": 3.1622776601683795}, "sqlglot.optimizer.scope.Scope.branch": {"tf": 5.656854249492381}, "sqlglot.optimizer.scope.Scope.walk": {"tf": 4.242640687119285}, "sqlglot.optimizer.scope.Scope.find": {"tf": 4.898979485566356}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 4.898979485566356}, "sqlglot.optimizer.scope.Scope.replace": {"tf": 4.242640687119285}, "sqlglot.optimizer.scope.Scope.source_columns": {"tf": 3.7416573867739413}, "sqlglot.optimizer.scope.Scope.rename_source": {"tf": 4.242640687119285}, "sqlglot.optimizer.scope.Scope.add_source": {"tf": 4.242640687119285}, "sqlglot.optimizer.scope.Scope.remove_source": {"tf": 3.7416573867739413}, "sqlglot.optimizer.scope.Scope.traverse": {"tf": 3.1622776601683795}, "sqlglot.optimizer.scope.Scope.ref_count": {"tf": 3.1622776601683795}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 3.1622776601683795}, "sqlglot.optimizer.scope.build_scope": {"tf": 3.1622776601683795}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 4.242640687119285}, "sqlglot.optimizer.simplify.simplify": {"tf": 3.1622776601683795}, "sqlglot.optimizer.simplify.rewrite_between": {"tf": 5.744562646538029}, "sqlglot.optimizer.simplify.simplify_not": {"tf": 3.1622776601683795}, "sqlglot.optimizer.simplify.flatten": {"tf": 3.1622776601683795}, "sqlglot.optimizer.simplify.simplify_connectors": {"tf": 3.1622776601683795}, "sqlglot.optimizer.simplify.remove_compliments": {"tf": 3.1622776601683795}, "sqlglot.optimizer.simplify.uniq_sort": {"tf": 3.1622776601683795}, "sqlglot.optimizer.simplify.absorb_and_eliminate": {"tf": 3.1622776601683795}, "sqlglot.optimizer.simplify.simplify_literals": {"tf": 3.1622776601683795}, "sqlglot.optimizer.simplify.simplify_parens": {"tf": 3.1622776601683795}, "sqlglot.optimizer.simplify.remove_where_true": {"tf": 3.1622776601683795}, "sqlglot.optimizer.simplify.always_true": {"tf": 3.1622776601683795}, "sqlglot.optimizer.simplify.is_complement": {"tf": 3.7416573867739413}, "sqlglot.optimizer.simplify.eval_boolean": {"tf": 4.242640687119285}, "sqlglot.optimizer.simplify.extract_date": {"tf": 3.1622776601683795}, "sqlglot.optimizer.simplify.extract_interval": {"tf": 3.1622776601683795}, "sqlglot.optimizer.simplify.date_literal": {"tf": 3.1622776601683795}, "sqlglot.optimizer.simplify.boolean_literal": {"tf": 3.1622776601683795}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 3.1622776601683795}, "sqlglot.optimizer.unnest_subqueries.unnest": {"tf": 4.242640687119285}, "sqlglot.optimizer.unnest_subqueries.decorrelate": {"tf": 4.69041575982343}, "sqlglot.parser.parse_var_map": {"tf": 3.1622776601683795}, "sqlglot.parser.Parser.__init__": {"tf": 11.269427669584644}, "sqlglot.parser.Parser.reset": {"tf": 3.1622776601683795}, "sqlglot.parser.Parser.parse": {"tf": 8.426149773176359}, "sqlglot.parser.Parser.parse_into": {"tf": 11.313708498984761}, "sqlglot.parser.Parser.check_errors": {"tf": 3.4641016151377544}, "sqlglot.parser.Parser.raise_error": {"tf": 6.855654600401044}, "sqlglot.parser.Parser.expression": {"tf": 8.54400374531753}, "sqlglot.parser.Parser.validate_expression": {"tf": 7.0710678118654755}, "sqlglot.planner.Plan.__init__": {"tf": 4.47213595499958}, "sqlglot.planner.Step.__init__": {"tf": 2}, "sqlglot.planner.Step.from_expression": {"tf": 8.602325267042627}, "sqlglot.planner.Step.add_dependency": {"tf": 5.291502622129181}, "sqlglot.planner.Step.to_s": {"tf": 5.0990195135927845}, "sqlglot.planner.Scan.__init__": {"tf": 2}, "sqlglot.planner.Scan.from_expression": {"tf": 8.602325267042627}, "sqlglot.planner.Join.__init__": {"tf": 2}, "sqlglot.planner.Join.from_joins": {"tf": 8.888194417315589}, "sqlglot.planner.Aggregate.__init__": {"tf": 2}, "sqlglot.planner.Sort.__init__": {"tf": 2}, "sqlglot.planner.SetOperation.__init__": {"tf": 8.306623862918075}, "sqlglot.planner.SetOperation.from_expression": {"tf": 8.602325267042627}, "sqlglot.schema.Schema.add_table": {"tf": 9.539392014169456}, "sqlglot.schema.Schema.column_names": {"tf": 7.416198487095663}, "sqlglot.schema.Schema.get_column_type": {"tf": 7.745966692414834}, "sqlglot.schema.AbstractMappingSchema.__init__": {"tf": 4.795831523312719}, "sqlglot.schema.AbstractMappingSchema.table_parts": {"tf": 5.744562646538029}, "sqlglot.schema.AbstractMappingSchema.find": {"tf": 8.48528137423857}, "sqlglot.schema.MappingSchema.__init__": {"tf": 10.295630140987}, "sqlglot.schema.MappingSchema.from_mapping_schema": {"tf": 6.164414002968976}, "sqlglot.schema.MappingSchema.copy": {"tf": 5.0990195135927845}, "sqlglot.schema.MappingSchema.add_table": {"tf": 9.539392014169456}, "sqlglot.schema.MappingSchema.column_names": {"tf": 7.416198487095663}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 8.06225774829855}, "sqlglot.schema.ensure_schema": {"tf": 4.898979485566356}, "sqlglot.schema.ensure_column_mapping": {"tf": 7.483314773547883}, "sqlglot.schema.flatten_schema": {"tf": 7.54983443527075}, "sqlglot.serde.dump": {"tf": 11.045361017187261}, "sqlglot.serde.load": {"tf": 11.045361017187261}, "sqlglot.time.format_time": {"tf": 7.810249675906654}, "sqlglot.tokens.Token.__init__": {"tf": 9}, "sqlglot.tokens.Token.number": {"tf": 5.291502622129181}, "sqlglot.tokens.Token.string": {"tf": 5.291502622129181}, "sqlglot.tokens.Token.identifier": {"tf": 5.291502622129181}, "sqlglot.tokens.Token.var": {"tf": 5.291502622129181}, "sqlglot.tokens.Tokenizer.__init__": {"tf": 2}, "sqlglot.tokens.Tokenizer.reset": {"tf": 3.4641016151377544}, "sqlglot.tokens.Tokenizer.tokenize": {"tf": 5.744562646538029}, "sqlglot.transforms.unalias_group": {"tf": 5.744562646538029}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 5.744562646538029}, "sqlglot.transforms.remove_precision_parameterized_types": {"tf": 5.744562646538029}, "sqlglot.transforms.preprocess": {"tf": 10.816653826391969}, "sqlglot.transforms.delegate": {"tf": 4}, "sqlglot.trie.new_trie": {"tf": 4.898979485566356}, "sqlglot.trie.in_trie": {"tf": 6.244997998398398}}, "df": 712, "s": {"docs": {}, "df": 0, "q": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.parse": {"tf": 1}, "sqlglot.parse_one": {"tf": 1}, "sqlglot.transpile": {"tf": 1}, "sqlglot.dataframe.sql.SparkSession.table": {"tf": 1}, "sqlglot.dataframe.sql.SparkSession.createDataFrame": {"tf": 1}, "sqlglot.dataframe.sql.SparkSession.sql": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.copy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.select": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.alias": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.where": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.filter": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.groupBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.agg": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.join": {"tf": 2}, "sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.union": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.unionAll": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.unionByName": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.intersect": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.intersectAll": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.exceptAll": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.distinct": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.dropna": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.replace": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.withColumn": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.drop": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.limit": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.hint": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.repartition": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.coalesce": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.cache": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.persist": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.__init__": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.GroupedData.agg": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.GroupedData.count": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.mean": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.avg": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.max": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.min": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.sum": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.pivot": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_cols": {"tf": 1}, "sqlglot.dataframe.sql.Column.invoke_anonymous_function": {"tf": 1}, "sqlglot.dataframe.sql.Column.invoke_expression_over_column": {"tf": 1}, "sqlglot.dataframe.sql.Column.binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.inverse_binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.unary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_literal": {"tf": 1}, "sqlglot.dataframe.sql.Column.copy": {"tf": 1}, "sqlglot.dataframe.sql.Column.set_table_name": {"tf": 1}, "sqlglot.dataframe.sql.Column.alias": {"tf": 1}, "sqlglot.dataframe.sql.Column.asc": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc": {"tf": 1}, "sqlglot.dataframe.sql.Column.asc_nulls_first": {"tf": 1}, "sqlglot.dataframe.sql.Column.asc_nulls_last": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc_nulls_first": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc_nulls_last": {"tf": 1}, "sqlglot.dataframe.sql.Column.when": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.otherwise": {"tf": 1}, "sqlglot.dataframe.sql.Column.isNull": {"tf": 1}, "sqlglot.dataframe.sql.Column.isNotNull": {"tf": 1}, "sqlglot.dataframe.sql.Column.cast": {"tf": 1}, "sqlglot.dataframe.sql.Column.startswith": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.endswith": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.rlike": {"tf": 1}, "sqlglot.dataframe.sql.Column.substr": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.between": {"tf": 1}, "sqlglot.dataframe.sql.Column.over": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.drop": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.fill": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.replace": {"tf": 1}, "sqlglot.dataframe.sql.Window.partitionBy": {"tf": 1}, "sqlglot.dataframe.sql.Window.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.Window.rowsBetween": {"tf": 1}, "sqlglot.dataframe.sql.Window.rangeBetween": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.partitionBy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.rowsBetween": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.rangeBetween": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameReader.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameReader.table": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.__init__": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrameWriter.copy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.mode": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.insertInto": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.parse": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.parse_into": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.transpile": {"tf": 1}, "sqlglot.executor.execute": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1}, "sqlglot.expressions.to_table": {"tf": 1}, "sqlglot.expressions.to_column": {"tf": 1}, "sqlglot.generator.Generator.seg": {"tf": 1}, "sqlglot.generator.Generator.maybe_comment": {"tf": 1}, "sqlglot.generator.Generator.indent": {"tf": 1}, "sqlglot.generator.Generator.prepend_ctes": {"tf": 1}, "sqlglot.lineage.lineage": {"tf": 1}, "sqlglot.parser.Parser.parse": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}, "sqlglot.schema.ensure_column_mapping": {"tf": 1}, "sqlglot.tokens.Tokenizer.tokenize": {"tf": 1}, "sqlglot.transforms.preprocess": {"tf": 1}}, "df": 108, "g": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.parse": {"tf": 1.7320508075688772}, "sqlglot.parse_one": {"tf": 2.23606797749979}, "sqlglot.transpile": {"tf": 2.23606797749979}, "sqlglot.dataframe.sql.SparkSession.table": {"tf": 1}, "sqlglot.dataframe.sql.SparkSession.createDataFrame": {"tf": 1}, "sqlglot.dataframe.sql.SparkSession.sql": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.copy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.select": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.alias": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.where": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.filter": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.groupBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.agg": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.join": {"tf": 2}, "sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.union": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.unionAll": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.unionByName": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.intersect": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.intersectAll": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.exceptAll": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.distinct": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.dropna": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.replace": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.withColumn": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.drop": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.limit": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.hint": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.repartition": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.coalesce": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.cache": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.persist": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.__init__": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.GroupedData.agg": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.GroupedData.count": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.mean": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.avg": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.max": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.min": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.sum": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.pivot": {"tf": 1}, "sqlglot.dataframe.sql.Column.__init__": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_col": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_cols": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.invoke_anonymous_function": {"tf": 1}, "sqlglot.dataframe.sql.Column.invoke_expression_over_column": {"tf": 1}, "sqlglot.dataframe.sql.Column.binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.inverse_binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.unary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_literal": {"tf": 1}, "sqlglot.dataframe.sql.Column.copy": {"tf": 1}, "sqlglot.dataframe.sql.Column.set_table_name": {"tf": 1}, "sqlglot.dataframe.sql.Column.alias": {"tf": 1}, "sqlglot.dataframe.sql.Column.asc": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc": {"tf": 1}, "sqlglot.dataframe.sql.Column.asc_nulls_first": {"tf": 1}, "sqlglot.dataframe.sql.Column.asc_nulls_last": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc_nulls_first": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc_nulls_last": {"tf": 1}, "sqlglot.dataframe.sql.Column.when": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.otherwise": {"tf": 1}, "sqlglot.dataframe.sql.Column.isNull": {"tf": 1}, "sqlglot.dataframe.sql.Column.isNotNull": {"tf": 1}, "sqlglot.dataframe.sql.Column.cast": {"tf": 1}, "sqlglot.dataframe.sql.Column.startswith": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.endswith": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.rlike": {"tf": 1}, "sqlglot.dataframe.sql.Column.substr": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.between": {"tf": 1}, "sqlglot.dataframe.sql.Column.over": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.drop": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.fill": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.replace": {"tf": 1}, "sqlglot.dataframe.sql.Window.partitionBy": {"tf": 1}, "sqlglot.dataframe.sql.Window.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.Window.rowsBetween": {"tf": 1}, "sqlglot.dataframe.sql.Window.rangeBetween": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.__init__": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.partitionBy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.rowsBetween": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.rangeBetween": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameReader.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameReader.table": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.__init__": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrameWriter.copy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.mode": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.insertInto": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.array_sql": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.in_unnest_op": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.except_op": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.intersect_op": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.cte_sql": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.get_or_raise": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.Dialect.format_time": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialect.parse": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.parse_into": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.Dialect.generate": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.parser": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.generator": {"tf": 1}, "sqlglot.dialects.dialect.rename_func": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.approx_count_distinct_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.if_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.arrow_json_extract_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.arrow_json_extract_scalar_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.inline_array_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.no_ilike_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.no_paren_current_date_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.no_recursive_cte_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.no_safe_divide_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.no_tablesample_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.no_pivot_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.no_trycast_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.no_properties_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.str_position_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.struct_extract_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.var_map_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.locate_to_strposition": {"tf": 1}, "sqlglot.dialects.dialect.strposition_to_locate_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.timestrtotime_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.datestrtodate_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.trim_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.if_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.with_properties": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.renametable_sql": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.select_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.describe_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.generatedasidentitycolumnconstraint_sql": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator.cast_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.partitionedbyproperty_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.update_sql": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator.systemtime_sql": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator.returnsproperty_sql": {"tf": 1}, "sqlglot.diff.Insert.__init__": {"tf": 1}, "sqlglot.diff.Remove.__init__": {"tf": 1}, "sqlglot.diff.Move.__init__": {"tf": 1}, "sqlglot.diff.Update.__init__": {"tf": 1.4142135623730951}, "sqlglot.diff.Keep.__init__": {"tf": 1.4142135623730951}, "sqlglot.diff.diff": {"tf": 2.6457513110645907}, "sqlglot.diff.ChangeDistiller.diff": {"tf": 2.6457513110645907}, "sqlglot.errors.ParseError.new": {"tf": 1}, "sqlglot.errors.merge_errors": {"tf": 1}, "sqlglot.executor.execute": {"tf": 2.23606797749979}, "sqlglot.executor.context.Context.__init__": {"tf": 1}, "sqlglot.executor.context.Context.table_iter": {"tf": 1.4142135623730951}, "sqlglot.executor.table.ensure_tables": {"tf": 1}, "sqlglot.expressions.Expression.sql": {"tf": 1.4142135623730951}, "sqlglot.expressions.Literal.number": {"tf": 1}, "sqlglot.expressions.Literal.string": {"tf": 1}, "sqlglot.expressions.Properties.from_dict": {"tf": 1}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.Subqueryable.limit": {"tf": 1}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.Select.window": {"tf": 1}, "sqlglot.expressions.Select.distinct": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.Select.lock": {"tf": 1}, "sqlglot.expressions.DataType.build": {"tf": 2.23606797749979}, "sqlglot.expressions.DataType.is_type": {"tf": 1}, "sqlglot.expressions.Cast.is_type": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 2.449489742783178}, "sqlglot.expressions.select": {"tf": 1}, "sqlglot.expressions.from_": {"tf": 1}, "sqlglot.expressions.update": {"tf": 1}, "sqlglot.expressions.delete": {"tf": 1}, "sqlglot.expressions.condition": {"tf": 1}, "sqlglot.expressions.and_": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 1}, "sqlglot.expressions.not_": {"tf": 1}, "sqlglot.expressions.paren": {"tf": 1}, "sqlglot.expressions.to_interval": {"tf": 1.4142135623730951}, "sqlglot.expressions.to_table": {"tf": 1.4142135623730951}, "sqlglot.expressions.to_column": {"tf": 1.4142135623730951}, "sqlglot.expressions.alias_": {"tf": 2.23606797749979}, "sqlglot.expressions.column": {"tf": 1}, "sqlglot.expressions.cast": {"tf": 2}, "sqlglot.expressions.table_": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1.4142135623730951}, "sqlglot.expressions.rename_table": {"tf": 1.7320508075688772}, "sqlglot.expressions.convert": {"tf": 1}, "sqlglot.expressions.expand": {"tf": 1.7320508075688772}, "sqlglot.expressions.func": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.generate": {"tf": 1}, "sqlglot.generator.Generator.maybe_comment": {"tf": 1}, "sqlglot.generator.Generator.wrap": {"tf": 1}, "sqlglot.generator.Generator.sql": {"tf": 1}, "sqlglot.generator.Generator.uncache_sql": {"tf": 1}, "sqlglot.generator.Generator.cache_sql": {"tf": 1}, "sqlglot.generator.Generator.characterset_sql": {"tf": 1}, "sqlglot.generator.Generator.column_sql": {"tf": 1}, "sqlglot.generator.Generator.columndef_sql": {"tf": 1}, "sqlglot.generator.Generator.columnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.checkcolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.commentcolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.collatecolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.encodecolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.defaultcolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.generatedasidentitycolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.notnullcolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.primarykeycolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.create_sql": {"tf": 1}, "sqlglot.generator.Generator.describe_sql": {"tf": 1}, "sqlglot.generator.Generator.prepend_ctes": {"tf": 1}, "sqlglot.generator.Generator.with_sql": {"tf": 1}, "sqlglot.generator.Generator.cte_sql": {"tf": 1}, "sqlglot.generator.Generator.tablealias_sql": {"tf": 1}, "sqlglot.generator.Generator.bitstring_sql": {"tf": 1}, "sqlglot.generator.Generator.hexstring_sql": {"tf": 1}, "sqlglot.generator.Generator.datatype_sql": {"tf": 1}, "sqlglot.generator.Generator.directory_sql": {"tf": 1}, "sqlglot.generator.Generator.delete_sql": {"tf": 1}, "sqlglot.generator.Generator.drop_sql": {"tf": 1}, "sqlglot.generator.Generator.except_sql": {"tf": 1}, "sqlglot.generator.Generator.except_op": {"tf": 1}, "sqlglot.generator.Generator.fetch_sql": {"tf": 1}, "sqlglot.generator.Generator.filter_sql": {"tf": 1}, "sqlglot.generator.Generator.hint_sql": {"tf": 1}, "sqlglot.generator.Generator.index_sql": {"tf": 1}, "sqlglot.generator.Generator.identifier_sql": {"tf": 1}, "sqlglot.generator.Generator.national_sql": {"tf": 1}, "sqlglot.generator.Generator.partition_sql": {"tf": 1}, "sqlglot.generator.Generator.properties_sql": {"tf": 1}, "sqlglot.generator.Generator.root_properties": {"tf": 1}, "sqlglot.generator.Generator.properties": {"tf": 1}, "sqlglot.generator.Generator.with_properties": {"tf": 1}, "sqlglot.generator.Generator.locate_properties": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.property_sql": {"tf": 1}, "sqlglot.generator.Generator.likeproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.fallbackproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.journalproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.freespaceproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.afterjournalproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.checksumproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.mergeblockratioproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.datablocksizeproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.blockcompressionproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.isolatedloadingproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.insert_sql": {"tf": 1}, "sqlglot.generator.Generator.intersect_sql": {"tf": 1}, "sqlglot.generator.Generator.intersect_op": {"tf": 1}, "sqlglot.generator.Generator.introducer_sql": {"tf": 1}, "sqlglot.generator.Generator.pseudotype_sql": {"tf": 1}, "sqlglot.generator.Generator.rowformatdelimitedproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.table_sql": {"tf": 1}, "sqlglot.generator.Generator.tablesample_sql": {"tf": 1}, "sqlglot.generator.Generator.pivot_sql": {"tf": 1}, "sqlglot.generator.Generator.tuple_sql": {"tf": 1}, "sqlglot.generator.Generator.update_sql": {"tf": 1}, "sqlglot.generator.Generator.values_sql": {"tf": 1}, "sqlglot.generator.Generator.var_sql": {"tf": 1}, "sqlglot.generator.Generator.into_sql": {"tf": 1}, "sqlglot.generator.Generator.from_sql": {"tf": 1}, "sqlglot.generator.Generator.group_sql": {"tf": 1}, "sqlglot.generator.Generator.having_sql": {"tf": 1}, "sqlglot.generator.Generator.join_sql": {"tf": 1}, "sqlglot.generator.Generator.lambda_sql": {"tf": 1}, "sqlglot.generator.Generator.lateral_sql": {"tf": 1}, "sqlglot.generator.Generator.limit_sql": {"tf": 1}, "sqlglot.generator.Generator.offset_sql": {"tf": 1}, "sqlglot.generator.Generator.lock_sql": {"tf": 1}, "sqlglot.generator.Generator.literal_sql": {"tf": 1}, "sqlglot.generator.Generator.loaddata_sql": {"tf": 1}, "sqlglot.generator.Generator.boolean_sql": {"tf": 1}, "sqlglot.generator.Generator.order_sql": {"tf": 1}, "sqlglot.generator.Generator.cluster_sql": {"tf": 1}, "sqlglot.generator.Generator.distribute_sql": {"tf": 1}, "sqlglot.generator.Generator.sort_sql": {"tf": 1}, "sqlglot.generator.Generator.ordered_sql": {"tf": 1}, "sqlglot.generator.Generator.matchrecognize_sql": {"tf": 1}, "sqlglot.generator.Generator.query_modifiers": {"tf": 1}, "sqlglot.generator.Generator.select_sql": {"tf": 1}, "sqlglot.generator.Generator.schema_sql": {"tf": 1}, "sqlglot.generator.Generator.star_sql": {"tf": 1}, "sqlglot.generator.Generator.structkwarg_sql": {"tf": 1}, "sqlglot.generator.Generator.parameter_sql": {"tf": 1}, "sqlglot.generator.Generator.sessionparameter_sql": {"tf": 1}, "sqlglot.generator.Generator.placeholder_sql": {"tf": 1}, "sqlglot.generator.Generator.subquery_sql": {"tf": 1}, "sqlglot.generator.Generator.qualify_sql": {"tf": 1}, "sqlglot.generator.Generator.union_sql": {"tf": 1}, "sqlglot.generator.Generator.union_op": {"tf": 1}, "sqlglot.generator.Generator.unnest_sql": {"tf": 1}, "sqlglot.generator.Generator.where_sql": {"tf": 1}, "sqlglot.generator.Generator.window_sql": {"tf": 1}, "sqlglot.generator.Generator.partition_by_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.window_spec_sql": {"tf": 1}, "sqlglot.generator.Generator.withingroup_sql": {"tf": 1}, "sqlglot.generator.Generator.between_sql": {"tf": 1}, "sqlglot.generator.Generator.bracket_sql": {"tf": 1}, "sqlglot.generator.Generator.all_sql": {"tf": 1}, "sqlglot.generator.Generator.any_sql": {"tf": 1}, "sqlglot.generator.Generator.exists_sql": {"tf": 1}, "sqlglot.generator.Generator.case_sql": {"tf": 1}, "sqlglot.generator.Generator.constraint_sql": {"tf": 1}, "sqlglot.generator.Generator.extract_sql": {"tf": 1}, "sqlglot.generator.Generator.trim_sql": {"tf": 1}, "sqlglot.generator.Generator.concat_sql": {"tf": 1}, "sqlglot.generator.Generator.check_sql": {"tf": 1}, "sqlglot.generator.Generator.foreignkey_sql": {"tf": 1}, "sqlglot.generator.Generator.primarykey_sql": {"tf": 1}, "sqlglot.generator.Generator.unique_sql": {"tf": 1}, "sqlglot.generator.Generator.if_sql": {"tf": 1}, "sqlglot.generator.Generator.in_sql": {"tf": 1}, "sqlglot.generator.Generator.in_unnest_op": {"tf": 1}, "sqlglot.generator.Generator.interval_sql": {"tf": 1}, "sqlglot.generator.Generator.return_sql": {"tf": 1}, "sqlglot.generator.Generator.reference_sql": {"tf": 1}, "sqlglot.generator.Generator.anonymous_sql": {"tf": 1}, "sqlglot.generator.Generator.paren_sql": {"tf": 1}, "sqlglot.generator.Generator.neg_sql": {"tf": 1}, "sqlglot.generator.Generator.not_sql": {"tf": 1}, "sqlglot.generator.Generator.alias_sql": {"tf": 1}, "sqlglot.generator.Generator.aliases_sql": {"tf": 1}, "sqlglot.generator.Generator.attimezone_sql": {"tf": 1}, "sqlglot.generator.Generator.add_sql": {"tf": 1}, "sqlglot.generator.Generator.and_sql": {"tf": 1}, "sqlglot.generator.Generator.connector_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwiseand_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwiseleftshift_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwisenot_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwiseor_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwiserightshift_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwisexor_sql": {"tf": 1}, "sqlglot.generator.Generator.cast_sql": {"tf": 1}, "sqlglot.generator.Generator.currentdate_sql": {"tf": 1}, "sqlglot.generator.Generator.collate_sql": {"tf": 1}, "sqlglot.generator.Generator.command_sql": {"tf": 1}, "sqlglot.generator.Generator.commit_sql": {"tf": 1}, "sqlglot.generator.Generator.rollback_sql": {"tf": 1}, "sqlglot.generator.Generator.altercolumn_sql": {"tf": 1}, "sqlglot.generator.Generator.renametable_sql": {"tf": 1}, "sqlglot.generator.Generator.altertable_sql": {"tf": 1}, "sqlglot.generator.Generator.droppartition_sql": {"tf": 1}, "sqlglot.generator.Generator.addconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.distinct_sql": {"tf": 1}, "sqlglot.generator.Generator.ignorenulls_sql": {"tf": 1}, "sqlglot.generator.Generator.respectnulls_sql": {"tf": 1}, "sqlglot.generator.Generator.intdiv_sql": {"tf": 1}, "sqlglot.generator.Generator.dpipe_sql": {"tf": 1}, "sqlglot.generator.Generator.div_sql": {"tf": 1}, "sqlglot.generator.Generator.distance_sql": {"tf": 1}, "sqlglot.generator.Generator.dot_sql": {"tf": 1}, "sqlglot.generator.Generator.eq_sql": {"tf": 1}, "sqlglot.generator.Generator.escape_sql": {"tf": 1}, "sqlglot.generator.Generator.glob_sql": {"tf": 1}, "sqlglot.generator.Generator.gt_sql": {"tf": 1}, "sqlglot.generator.Generator.gte_sql": {"tf": 1}, "sqlglot.generator.Generator.ilike_sql": {"tf": 1}, "sqlglot.generator.Generator.is_sql": {"tf": 1}, "sqlglot.generator.Generator.like_sql": {"tf": 1}, "sqlglot.generator.Generator.similarto_sql": {"tf": 1}, "sqlglot.generator.Generator.lt_sql": {"tf": 1}, "sqlglot.generator.Generator.lte_sql": {"tf": 1}, "sqlglot.generator.Generator.mod_sql": {"tf": 1}, "sqlglot.generator.Generator.mul_sql": {"tf": 1}, "sqlglot.generator.Generator.neq_sql": {"tf": 1}, "sqlglot.generator.Generator.nullsafeeq_sql": {"tf": 1}, "sqlglot.generator.Generator.nullsafeneq_sql": {"tf": 1}, "sqlglot.generator.Generator.or_sql": {"tf": 1}, "sqlglot.generator.Generator.slice_sql": {"tf": 1}, "sqlglot.generator.Generator.sub_sql": {"tf": 1}, "sqlglot.generator.Generator.trycast_sql": {"tf": 1}, "sqlglot.generator.Generator.use_sql": {"tf": 1}, "sqlglot.generator.Generator.binary": {"tf": 1}, "sqlglot.generator.Generator.function_fallback_sql": {"tf": 1}, "sqlglot.generator.Generator.format_args": {"tf": 1}, "sqlglot.generator.Generator.format_time": {"tf": 1}, "sqlglot.generator.Generator.expressions": {"tf": 1}, "sqlglot.generator.Generator.op_expressions": {"tf": 1}, "sqlglot.generator.Generator.naked_property": {"tf": 1}, "sqlglot.generator.Generator.set_operation": {"tf": 1}, "sqlglot.generator.Generator.tag_sql": {"tf": 1}, "sqlglot.generator.Generator.token_sql": {"tf": 1}, "sqlglot.generator.Generator.userdefinedfunction_sql": {"tf": 1}, "sqlglot.generator.Generator.userdefinedfunctionkwarg_sql": {"tf": 1}, "sqlglot.generator.Generator.joinhint_sql": {"tf": 1}, "sqlglot.generator.Generator.kwarg_sql": {"tf": 1}, "sqlglot.generator.Generator.when_sql": {"tf": 1}, "sqlglot.generator.Generator.merge_sql": {"tf": 1}, "sqlglot.helper.while_changing": {"tf": 1.4142135623730951}, "sqlglot.helper.csv_reader": {"tf": 1}, "sqlglot.lineage.Node.__init__": {"tf": 1.7320508075688772}, "sqlglot.lineage.Node.walk": {"tf": 1}, "sqlglot.lineage.Node.to_html": {"tf": 1}, "sqlglot.lineage.lineage": {"tf": 2.6457513110645907}, "sqlglot.lineage.LineageHTML.__init__": {"tf": 1.7320508075688772}, "sqlglot.optimizer.canonicalize.canonicalize": {"tf": 1.4142135623730951}, "sqlglot.optimizer.canonicalize.add_text_to_concat": {"tf": 1.4142135623730951}, "sqlglot.optimizer.canonicalize.coerce_type": {"tf": 1.4142135623730951}, "sqlglot.optimizer.canonicalize.remove_redundant_casts": {"tf": 1.4142135623730951}, "sqlglot.optimizer.expand_laterals.expand_laterals": {"tf": 1.4142135623730951}, "sqlglot.optimizer.simplify.rewrite_between": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.__init__": {"tf": 1}, "sqlglot.parser.Parser.parse": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.parse_into": {"tf": 2}, "sqlglot.parser.Parser.raise_error": {"tf": 1}, "sqlglot.parser.Parser.expression": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.validate_expression": {"tf": 1}, "sqlglot.planner.Plan.__init__": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1.7320508075688772}, "sqlglot.planner.Step.add_dependency": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1.7320508075688772}, "sqlglot.planner.Join.from_joins": {"tf": 1.7320508075688772}, "sqlglot.planner.SetOperation.__init__": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1.7320508075688772}, "sqlglot.schema.Schema.add_table": {"tf": 1.4142135623730951}, "sqlglot.schema.Schema.column_names": {"tf": 1}, "sqlglot.schema.Schema.get_column_type": {"tf": 1.7320508075688772}, "sqlglot.schema.AbstractMappingSchema.table_parts": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.find": {"tf": 1}, "sqlglot.schema.MappingSchema.__init__": {"tf": 1.4142135623730951}, "sqlglot.schema.MappingSchema.from_mapping_schema": {"tf": 1.4142135623730951}, "sqlglot.schema.MappingSchema.copy": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1.4142135623730951}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1.7320508075688772}, "sqlglot.schema.ensure_schema": {"tf": 1}, "sqlglot.schema.ensure_column_mapping": {"tf": 1}, "sqlglot.serde.dump": {"tf": 1.4142135623730951}, "sqlglot.serde.load": {"tf": 1.4142135623730951}, "sqlglot.tokens.Token.__init__": {"tf": 1}, "sqlglot.tokens.Token.number": {"tf": 1}, "sqlglot.tokens.Token.string": {"tf": 1}, "sqlglot.tokens.Token.identifier": {"tf": 1}, "sqlglot.tokens.Token.var": {"tf": 1}, "sqlglot.tokens.Tokenizer.tokenize": {"tf": 1}, "sqlglot.transforms.unalias_group": {"tf": 1.4142135623730951}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1.4142135623730951}, "sqlglot.transforms.remove_precision_parameterized_types": {"tf": 1.4142135623730951}, "sqlglot.transforms.preprocess": {"tf": 2.449489742783178}}, "df": 450}}}}, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dataframe.sql.SparkSession.sql": {"tf": 1}}, "df": 1}}}}}, "s": {"docs": {"sqlglot.dialects.oracle.Oracle.Generator.query_modifiers": {"tf": 1}, "sqlglot.generator.Generator.query_modifiers": {"tf": 1}}, "df": 2}}}, "t": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.parse": {"tf": 1.4142135623730951}, "sqlglot.parse_one": {"tf": 2}, "sqlglot.transpile": {"tf": 2}, "sqlglot.dataframe.sql.SparkSession.table": {"tf": 1}, "sqlglot.dataframe.sql.SparkSession.createDataFrame": {"tf": 1}, "sqlglot.dataframe.sql.SparkSession.sql": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.sql": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.alias": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.join": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.dropDuplicates": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.dropna": {"tf": 2}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.replace": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.withColumn": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.withColumnRenamed": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.drop": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.hint": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.persist": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.agg": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.GroupedData.mean": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.avg": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.max": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.min": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.sum": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.pivot": {"tf": 1}, "sqlglot.dataframe.sql.Column.invoke_anonymous_function": {"tf": 1}, "sqlglot.dataframe.sql.Column.set_table_name": {"tf": 1}, "sqlglot.dataframe.sql.Column.sql": {"tf": 1}, "sqlglot.dataframe.sql.Column.alias": {"tf": 1}, "sqlglot.dataframe.sql.Column.cast": {"tf": 1}, "sqlglot.dataframe.sql.Column.startswith": {"tf": 1}, "sqlglot.dataframe.sql.Column.endswith": {"tf": 1}, "sqlglot.dataframe.sql.Column.rlike": {"tf": 1}, "sqlglot.dataframe.sql.Column.like": {"tf": 1}, "sqlglot.dataframe.sql.Column.ilike": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.drop": {"tf": 2}, "sqlglot.dataframe.sql.DataFrameNaFunctions.fill": {"tf": 2.23606797749979}, "sqlglot.dataframe.sql.DataFrameNaFunctions.replace": {"tf": 2}, "sqlglot.dataframe.sql.WindowSpec.sql": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameReader.table": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.sql": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.mode": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.insertInto": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.saveAsTable": {"tf": 1.7320508075688772}, "sqlglot.dialects.bigquery.BigQuery.Generator.array_sql": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.transaction_sql": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.commit_sql": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.rollback_sql": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.in_unnest_op": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.except_op": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.intersect_op": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.cte_sql": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.get_or_raise": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.format_time": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.parse": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.parse_into": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.Dialect.generate": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.transpile": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.rename_func": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.approx_count_distinct_sql": {"tf": 1}, "sqlglot.dialects.dialect.if_sql": {"tf": 1}, "sqlglot.dialects.dialect.arrow_json_extract_sql": {"tf": 1}, "sqlglot.dialects.dialect.arrow_json_extract_scalar_sql": {"tf": 1}, "sqlglot.dialects.dialect.inline_array_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_ilike_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_paren_current_date_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_recursive_cte_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_safe_divide_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_tablesample_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_pivot_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_trycast_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_properties_sql": {"tf": 1}, "sqlglot.dialects.dialect.str_position_sql": {"tf": 1}, "sqlglot.dialects.dialect.struct_extract_sql": {"tf": 1}, "sqlglot.dialects.dialect.var_map_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}, "sqlglot.dialects.dialect.parse_date_delta": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.strposition_to_locate_sql": {"tf": 1}, "sqlglot.dialects.dialect.timestrtotime_sql": {"tf": 1}, "sqlglot.dialects.dialect.datestrtodate_sql": {"tf": 1}, "sqlglot.dialects.dialect.trim_sql": {"tf": 1}, "sqlglot.dialects.drill.if_sql": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator.normalize_func": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.with_properties": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.renametable_sql": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.select_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.describe_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.generatedasidentitycolumnconstraint_sql": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator.cast_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.partitionedbyproperty_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.update_sql": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator.systemtime_sql": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator.returnsproperty_sql": {"tf": 1}, "sqlglot.errors.ParseError.__init__": {"tf": 1.4142135623730951}, "sqlglot.errors.ParseError.new": {"tf": 2.449489742783178}, "sqlglot.errors.concat_messages": {"tf": 1}, "sqlglot.errors.merge_errors": {"tf": 1}, "sqlglot.executor.execute": {"tf": 1.4142135623730951}, "sqlglot.executor.context.Context.__init__": {"tf": 1}, "sqlglot.executor.context.Context.add_columns": {"tf": 1}, "sqlglot.executor.context.Context.table_iter": {"tf": 1}, "sqlglot.executor.table.Table.add_columns": {"tf": 1}, "sqlglot.expressions.Expression.sql": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.expressions.DataType.build": {"tf": 1.4142135623730951}, "sqlglot.expressions.maybe_parse": {"tf": 2.23606797749979}, "sqlglot.expressions.to_interval": {"tf": 1}, "sqlglot.expressions.to_table": {"tf": 1}, "sqlglot.expressions.to_column": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 2}, "sqlglot.expressions.cast": {"tf": 1.4142135623730951}, "sqlglot.expressions.values": {"tf": 1.7320508075688772}, "sqlglot.expressions.rename_table": {"tf": 1.4142135623730951}, "sqlglot.expressions.table_name": {"tf": 1}, "sqlglot.expressions.expand": {"tf": 1}, "sqlglot.expressions.func": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.generate": {"tf": 1}, "sqlglot.generator.Generator.unsupported": {"tf": 1}, "sqlglot.generator.Generator.sep": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.seg": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.pad_comment": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.maybe_comment": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.wrap": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.no_identify": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.normalize_func": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.indent": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.uncache_sql": {"tf": 1}, "sqlglot.generator.Generator.cache_sql": {"tf": 1}, "sqlglot.generator.Generator.characterset_sql": {"tf": 1}, "sqlglot.generator.Generator.column_sql": {"tf": 1}, "sqlglot.generator.Generator.columndef_sql": {"tf": 1}, "sqlglot.generator.Generator.columnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.autoincrementcolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.checkcolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.commentcolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.collatecolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.encodecolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.defaultcolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.generatedasidentitycolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.notnullcolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.primarykeycolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.uniquecolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.create_sql": {"tf": 1}, "sqlglot.generator.Generator.describe_sql": {"tf": 1}, "sqlglot.generator.Generator.prepend_ctes": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.with_sql": {"tf": 1}, "sqlglot.generator.Generator.cte_sql": {"tf": 1}, "sqlglot.generator.Generator.tablealias_sql": {"tf": 1}, "sqlglot.generator.Generator.bitstring_sql": {"tf": 1}, "sqlglot.generator.Generator.hexstring_sql": {"tf": 1}, "sqlglot.generator.Generator.datatype_sql": {"tf": 1}, "sqlglot.generator.Generator.directory_sql": {"tf": 1}, "sqlglot.generator.Generator.delete_sql": {"tf": 1}, "sqlglot.generator.Generator.drop_sql": {"tf": 1}, "sqlglot.generator.Generator.except_sql": {"tf": 1}, "sqlglot.generator.Generator.except_op": {"tf": 1}, "sqlglot.generator.Generator.fetch_sql": {"tf": 1}, "sqlglot.generator.Generator.filter_sql": {"tf": 1}, "sqlglot.generator.Generator.hint_sql": {"tf": 1}, "sqlglot.generator.Generator.index_sql": {"tf": 1}, "sqlglot.generator.Generator.identifier_sql": {"tf": 1}, "sqlglot.generator.Generator.national_sql": {"tf": 1}, "sqlglot.generator.Generator.partition_sql": {"tf": 1}, "sqlglot.generator.Generator.properties_sql": {"tf": 1}, "sqlglot.generator.Generator.root_properties": {"tf": 1}, "sqlglot.generator.Generator.properties": {"tf": 2}, "sqlglot.generator.Generator.with_properties": {"tf": 1}, "sqlglot.generator.Generator.property_sql": {"tf": 1}, "sqlglot.generator.Generator.likeproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.fallbackproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.journalproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.freespaceproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.afterjournalproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.checksumproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.mergeblockratioproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.datablocksizeproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.blockcompressionproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.isolatedloadingproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.insert_sql": {"tf": 1}, "sqlglot.generator.Generator.intersect_sql": {"tf": 1}, "sqlglot.generator.Generator.intersect_op": {"tf": 1}, "sqlglot.generator.Generator.introducer_sql": {"tf": 1}, "sqlglot.generator.Generator.pseudotype_sql": {"tf": 1}, "sqlglot.generator.Generator.rowformatdelimitedproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.table_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.tablesample_sql": {"tf": 1}, "sqlglot.generator.Generator.pivot_sql": {"tf": 1}, "sqlglot.generator.Generator.tuple_sql": {"tf": 1}, "sqlglot.generator.Generator.update_sql": {"tf": 1}, "sqlglot.generator.Generator.values_sql": {"tf": 1}, "sqlglot.generator.Generator.var_sql": {"tf": 1}, "sqlglot.generator.Generator.into_sql": {"tf": 1}, "sqlglot.generator.Generator.from_sql": {"tf": 1}, "sqlglot.generator.Generator.group_sql": {"tf": 1}, "sqlglot.generator.Generator.having_sql": {"tf": 1}, "sqlglot.generator.Generator.join_sql": {"tf": 1}, "sqlglot.generator.Generator.lambda_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.lateral_sql": {"tf": 1}, "sqlglot.generator.Generator.limit_sql": {"tf": 1}, "sqlglot.generator.Generator.offset_sql": {"tf": 1}, "sqlglot.generator.Generator.lock_sql": {"tf": 1}, "sqlglot.generator.Generator.literal_sql": {"tf": 1}, "sqlglot.generator.Generator.loaddata_sql": {"tf": 1}, "sqlglot.generator.Generator.null_sql": {"tf": 1}, "sqlglot.generator.Generator.boolean_sql": {"tf": 1}, "sqlglot.generator.Generator.order_sql": {"tf": 1}, "sqlglot.generator.Generator.cluster_sql": {"tf": 1}, "sqlglot.generator.Generator.distribute_sql": {"tf": 1}, "sqlglot.generator.Generator.sort_sql": {"tf": 1}, "sqlglot.generator.Generator.ordered_sql": {"tf": 1}, "sqlglot.generator.Generator.matchrecognize_sql": {"tf": 1}, "sqlglot.generator.Generator.query_modifiers": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.select_sql": {"tf": 1}, "sqlglot.generator.Generator.schema_sql": {"tf": 1}, "sqlglot.generator.Generator.star_sql": {"tf": 1}, "sqlglot.generator.Generator.structkwarg_sql": {"tf": 1}, "sqlglot.generator.Generator.parameter_sql": {"tf": 1}, "sqlglot.generator.Generator.sessionparameter_sql": {"tf": 1}, "sqlglot.generator.Generator.placeholder_sql": {"tf": 1}, "sqlglot.generator.Generator.subquery_sql": {"tf": 1}, "sqlglot.generator.Generator.qualify_sql": {"tf": 1}, "sqlglot.generator.Generator.union_sql": {"tf": 1}, "sqlglot.generator.Generator.union_op": {"tf": 1}, "sqlglot.generator.Generator.unnest_sql": {"tf": 1}, "sqlglot.generator.Generator.where_sql": {"tf": 1}, "sqlglot.generator.Generator.window_sql": {"tf": 1}, "sqlglot.generator.Generator.partition_by_sql": {"tf": 1}, "sqlglot.generator.Generator.window_spec_sql": {"tf": 1}, "sqlglot.generator.Generator.withingroup_sql": {"tf": 1}, "sqlglot.generator.Generator.between_sql": {"tf": 1}, "sqlglot.generator.Generator.bracket_sql": {"tf": 1}, "sqlglot.generator.Generator.all_sql": {"tf": 1}, "sqlglot.generator.Generator.any_sql": {"tf": 1}, "sqlglot.generator.Generator.exists_sql": {"tf": 1}, "sqlglot.generator.Generator.case_sql": {"tf": 1}, "sqlglot.generator.Generator.constraint_sql": {"tf": 1}, "sqlglot.generator.Generator.extract_sql": {"tf": 1}, "sqlglot.generator.Generator.trim_sql": {"tf": 1}, "sqlglot.generator.Generator.concat_sql": {"tf": 1}, "sqlglot.generator.Generator.check_sql": {"tf": 1}, "sqlglot.generator.Generator.foreignkey_sql": {"tf": 1}, "sqlglot.generator.Generator.primarykey_sql": {"tf": 1}, "sqlglot.generator.Generator.unique_sql": {"tf": 1}, "sqlglot.generator.Generator.if_sql": {"tf": 1}, "sqlglot.generator.Generator.in_sql": {"tf": 1}, "sqlglot.generator.Generator.in_unnest_op": {"tf": 1}, "sqlglot.generator.Generator.interval_sql": {"tf": 1}, "sqlglot.generator.Generator.return_sql": {"tf": 1}, "sqlglot.generator.Generator.reference_sql": {"tf": 1}, "sqlglot.generator.Generator.anonymous_sql": {"tf": 1}, "sqlglot.generator.Generator.paren_sql": {"tf": 1}, "sqlglot.generator.Generator.neg_sql": {"tf": 1}, "sqlglot.generator.Generator.not_sql": {"tf": 1}, "sqlglot.generator.Generator.alias_sql": {"tf": 1}, "sqlglot.generator.Generator.aliases_sql": {"tf": 1}, "sqlglot.generator.Generator.attimezone_sql": {"tf": 1}, "sqlglot.generator.Generator.add_sql": {"tf": 1}, "sqlglot.generator.Generator.and_sql": {"tf": 1}, "sqlglot.generator.Generator.connector_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.bitwiseand_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwiseleftshift_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwisenot_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwiseor_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwiserightshift_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwisexor_sql": {"tf": 1}, "sqlglot.generator.Generator.cast_sql": {"tf": 1}, "sqlglot.generator.Generator.currentdate_sql": {"tf": 1}, "sqlglot.generator.Generator.collate_sql": {"tf": 1}, "sqlglot.generator.Generator.command_sql": {"tf": 1}, "sqlglot.generator.Generator.transaction_sql": {"tf": 1}, "sqlglot.generator.Generator.commit_sql": {"tf": 1}, "sqlglot.generator.Generator.rollback_sql": {"tf": 1}, "sqlglot.generator.Generator.altercolumn_sql": {"tf": 1}, "sqlglot.generator.Generator.renametable_sql": {"tf": 1}, "sqlglot.generator.Generator.altertable_sql": {"tf": 1}, "sqlglot.generator.Generator.droppartition_sql": {"tf": 1}, "sqlglot.generator.Generator.addconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.distinct_sql": {"tf": 1}, "sqlglot.generator.Generator.ignorenulls_sql": {"tf": 1}, "sqlglot.generator.Generator.respectnulls_sql": {"tf": 1}, "sqlglot.generator.Generator.intdiv_sql": {"tf": 1}, "sqlglot.generator.Generator.dpipe_sql": {"tf": 1}, "sqlglot.generator.Generator.div_sql": {"tf": 1}, "sqlglot.generator.Generator.distance_sql": {"tf": 1}, "sqlglot.generator.Generator.dot_sql": {"tf": 1}, "sqlglot.generator.Generator.eq_sql": {"tf": 1}, "sqlglot.generator.Generator.escape_sql": {"tf": 1}, "sqlglot.generator.Generator.glob_sql": {"tf": 1}, "sqlglot.generator.Generator.gt_sql": {"tf": 1}, "sqlglot.generator.Generator.gte_sql": {"tf": 1}, "sqlglot.generator.Generator.ilike_sql": {"tf": 1}, "sqlglot.generator.Generator.is_sql": {"tf": 1}, "sqlglot.generator.Generator.like_sql": {"tf": 1}, "sqlglot.generator.Generator.similarto_sql": {"tf": 1}, "sqlglot.generator.Generator.lt_sql": {"tf": 1}, "sqlglot.generator.Generator.lte_sql": {"tf": 1}, "sqlglot.generator.Generator.mod_sql": {"tf": 1}, "sqlglot.generator.Generator.mul_sql": {"tf": 1}, "sqlglot.generator.Generator.neq_sql": {"tf": 1}, "sqlglot.generator.Generator.nullsafeeq_sql": {"tf": 1}, "sqlglot.generator.Generator.nullsafeneq_sql": {"tf": 1}, "sqlglot.generator.Generator.or_sql": {"tf": 1}, "sqlglot.generator.Generator.slice_sql": {"tf": 1}, "sqlglot.generator.Generator.sub_sql": {"tf": 1}, "sqlglot.generator.Generator.trycast_sql": {"tf": 1}, "sqlglot.generator.Generator.use_sql": {"tf": 1}, "sqlglot.generator.Generator.binary": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.function_fallback_sql": {"tf": 1}, "sqlglot.generator.Generator.format_args": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.format_time": {"tf": 1}, "sqlglot.generator.Generator.expressions": {"tf": 2}, "sqlglot.generator.Generator.op_expressions": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.naked_property": {"tf": 1}, "sqlglot.generator.Generator.set_operation": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.tag_sql": {"tf": 1}, "sqlglot.generator.Generator.token_sql": {"tf": 1}, "sqlglot.generator.Generator.userdefinedfunction_sql": {"tf": 1}, "sqlglot.generator.Generator.userdefinedfunctionkwarg_sql": {"tf": 1}, "sqlglot.generator.Generator.joinhint_sql": {"tf": 1}, "sqlglot.generator.Generator.kwarg_sql": {"tf": 1}, "sqlglot.generator.Generator.when_sql": {"tf": 1}, "sqlglot.generator.Generator.merge_sql": {"tf": 1}, "sqlglot.helper.csv": {"tf": 1.4142135623730951}, "sqlglot.helper.subclasses": {"tf": 1}, "sqlglot.helper.camel_to_snake_case": {"tf": 1.4142135623730951}, "sqlglot.helper.open_file": {"tf": 1}, "sqlglot.helper.find_new_name": {"tf": 1.7320508075688772}, "sqlglot.helper.split_num_words": {"tf": 1.7320508075688772}, "sqlglot.lineage.Node.__init__": {"tf": 1}, "sqlglot.lineage.lineage": {"tf": 2.23606797749979}, "sqlglot.lineage.LineageHTML.__init__": {"tf": 1}, "sqlglot.parser.Parser.__init__": {"tf": 1}, "sqlglot.parser.Parser.parse": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1.7320508075688772}, "sqlglot.parser.Parser.raise_error": {"tf": 1}, "sqlglot.parser.Parser.expression": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Step.to_s": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.Join.from_joins": {"tf": 1}, "sqlglot.planner.SetOperation.__init__": {"tf": 1.4142135623730951}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1.4142135623730951}, "sqlglot.schema.Schema.column_names": {"tf": 1.4142135623730951}, "sqlglot.schema.Schema.get_column_type": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.table_parts": {"tf": 1}, "sqlglot.schema.MappingSchema.__init__": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1.4142135623730951}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1.4142135623730951}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1.4142135623730951}, "sqlglot.schema.ensure_column_mapping": {"tf": 1}, "sqlglot.schema.flatten_schema": {"tf": 1.4142135623730951}, "sqlglot.serde.dump": {"tf": 1.4142135623730951}, "sqlglot.serde.load": {"tf": 1.4142135623730951}, "sqlglot.time.format_time": {"tf": 2}, "sqlglot.tokens.Token.__init__": {"tf": 1.4142135623730951}, "sqlglot.tokens.Token.string": {"tf": 1}, "sqlglot.tokens.Token.identifier": {"tf": 1}, "sqlglot.tokens.Token.var": {"tf": 1}, "sqlglot.tokens.Tokenizer.tokenize": {"tf": 1}, "sqlglot.transforms.preprocess": {"tf": 1.4142135623730951}, "sqlglot.transforms.delegate": {"tf": 1}}, "df": 371, "p": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dialects.dialect.str_position_sql": {"tf": 1}, "sqlglot.dialects.dialect.strposition_to_locate_sql": {"tf": 1}}, "df": 2}}}}}}}}, "u": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.dialect.struct_extract_sql": {"tf": 1}}, "df": 1}}}}}}}, "k": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.generator.Generator.structkwarg_sql": {"tf": 1}}, "df": 1}}}}}, "t": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.schema.Schema.add_table": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}, "sqlglot.schema.ensure_column_mapping": {"tf": 1}}, "df": 3}}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.expressions.Literal.string": {"tf": 1}, "sqlglot.generator.Generator.__init__": {"tf": 1}, "sqlglot.time.format_time": {"tf": 1}, "sqlglot.tokens.Token.string": {"tf": 1}}, "df": 4}}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dataframe.sql.DataFrame.persist": {"tf": 1}}, "df": 1}}}}}}}}}}, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.generator.Generator.star_sql": {"tf": 1}}, "df": 1, "t": {"docs": {"sqlglot.dataframe.sql.Window.rowsBetween": {"tf": 1}, "sqlglot.dataframe.sql.Window.rangeBetween": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.rowsBetween": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.rangeBetween": {"tf": 1}, "sqlglot.errors.ParseError.new": {"tf": 1}, "sqlglot.executor.context.Context.set_range": {"tf": 1}, "sqlglot.executor.env.substring": {"tf": 1}, "sqlglot.generator.Generator.__init__": {"tf": 1.4142135623730951}, "sqlglot.helper.split_num_words": {"tf": 1}}, "df": 9, "p": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dataframe.sql.Column.substr": {"tf": 1}}, "df": 1}}}}}}, "e": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.executor.python.PythonExecutor.scan": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.scan_table": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.scan_csv": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.join": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.aggregate": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.sort": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.set_operation": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.Step.add_dependency": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.Join.from_joins": {"tf": 1.4142135623730951}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1.4142135623730951}}, "df": 12}}}, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.dataframe.sql.SparkSession.table": {"tf": 1}, "sqlglot.dataframe.sql.SparkSession.createDataFrame": {"tf": 1}, "sqlglot.dataframe.sql.SparkSession.sql": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sql": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.copy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.select": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.alias": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.where": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.filter": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.groupBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.agg": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.join": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.union": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.unionAll": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.unionByName": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.intersect": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.intersectAll": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.exceptAll": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.distinct": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.dropDuplicates": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.dropna": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.replace": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.withColumn": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.withColumnRenamed": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.drop": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.limit": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.hint": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.repartition": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.coalesce": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.cache": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.persist": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.agg": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.count": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.mean": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.avg": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.max": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.min": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.sum": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.pivot": {"tf": 1}, "sqlglot.dataframe.sql.Column.binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.inverse_binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.unary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.copy": {"tf": 1}, "sqlglot.dataframe.sql.Column.set_table_name": {"tf": 1}, "sqlglot.dataframe.sql.Column.sql": {"tf": 1}, "sqlglot.dataframe.sql.Column.alias": {"tf": 1}, "sqlglot.dataframe.sql.Column.asc": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc": {"tf": 1}, "sqlglot.dataframe.sql.Column.asc_nulls_first": {"tf": 1}, "sqlglot.dataframe.sql.Column.asc_nulls_last": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc_nulls_first": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc_nulls_last": {"tf": 1}, "sqlglot.dataframe.sql.Column.when": {"tf": 1}, "sqlglot.dataframe.sql.Column.otherwise": {"tf": 1}, "sqlglot.dataframe.sql.Column.isNull": {"tf": 1}, "sqlglot.dataframe.sql.Column.isNotNull": {"tf": 1}, "sqlglot.dataframe.sql.Column.cast": {"tf": 1}, "sqlglot.dataframe.sql.Column.startswith": {"tf": 1}, "sqlglot.dataframe.sql.Column.endswith": {"tf": 1}, "sqlglot.dataframe.sql.Column.rlike": {"tf": 1}, "sqlglot.dataframe.sql.Column.like": {"tf": 1}, "sqlglot.dataframe.sql.Column.ilike": {"tf": 1}, "sqlglot.dataframe.sql.Column.substr": {"tf": 1}, "sqlglot.dataframe.sql.Column.isin": {"tf": 1}, "sqlglot.dataframe.sql.Column.between": {"tf": 1}, "sqlglot.dataframe.sql.Column.over": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.drop": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.fill": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.replace": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.copy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.sql": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.partitionBy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.rowsBetween": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.rangeBetween": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameReader.table": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.copy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.sql": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.mode": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.insertInto": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.saveAsTable": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.array_sql": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.transaction_sql": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.commit_sql": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.rollback_sql": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.in_unnest_op": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.except_op": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.intersect_op": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.cte_sql": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.parse": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.parse_into": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.generate": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.transpile": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.parser": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.generator": {"tf": 1}, "sqlglot.dialects.dialect.approx_count_distinct_sql": {"tf": 1}, "sqlglot.dialects.dialect.if_sql": {"tf": 1}, "sqlglot.dialects.dialect.arrow_json_extract_sql": {"tf": 1}, "sqlglot.dialects.dialect.arrow_json_extract_scalar_sql": {"tf": 1}, "sqlglot.dialects.dialect.inline_array_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_ilike_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_paren_current_date_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_recursive_cte_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_safe_divide_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_tablesample_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_pivot_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_trycast_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_properties_sql": {"tf": 1}, "sqlglot.dialects.dialect.str_position_sql": {"tf": 1}, "sqlglot.dialects.dialect.struct_extract_sql": {"tf": 1}, "sqlglot.dialects.dialect.var_map_sql": {"tf": 1}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}, "sqlglot.dialects.dialect.strposition_to_locate_sql": {"tf": 1}, "sqlglot.dialects.dialect.timestrtotime_sql": {"tf": 1}, "sqlglot.dialects.dialect.datestrtodate_sql": {"tf": 1}, "sqlglot.dialects.dialect.trim_sql": {"tf": 1}, "sqlglot.dialects.drill.if_sql": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator.normalize_func": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.with_properties": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator.show_sql": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator.setitem_sql": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator.set_sql": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator.query_modifiers": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator.offset_sql": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator.table_sql": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator.transaction_sql": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.with_properties": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.renametable_sql": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.except_op": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.intersect_op": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.select_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.describe_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.generatedasidentitycolumnconstraint_sql": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator.cast_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.transaction_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.partitionedbyproperty_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.update_sql": {"tf": 1}, "sqlglot.dialects.tsql.generate_date_delta_with_unit_sql": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator.systemtime_sql": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator.returnsproperty_sql": {"tf": 1}, "sqlglot.diff.ChangeDistiller.diff": {"tf": 1}, "sqlglot.executor.context.Context.eval": {"tf": 1}, "sqlglot.executor.context.Context.eval_tuple": {"tf": 1}, "sqlglot.executor.context.Context.add_columns": {"tf": 1}, "sqlglot.executor.context.Context.table_iter": {"tf": 1}, "sqlglot.executor.context.Context.filter": {"tf": 1}, "sqlglot.executor.context.Context.sort": {"tf": 1}, "sqlglot.executor.context.Context.set_row": {"tf": 1}, "sqlglot.executor.context.Context.set_index": {"tf": 1}, "sqlglot.executor.context.Context.set_range": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.execute": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.generate": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.generate_tuple": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.context": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.table": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.scan": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.static": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.scan_table": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.scan_csv": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.join": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.nested_loop_join": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.hash_join": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.aggregate": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.sort": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.set_operation": {"tf": 1}, "sqlglot.executor.table.Table.add_columns": {"tf": 1}, "sqlglot.executor.table.Table.append": {"tf": 1}, "sqlglot.executor.table.Table.pop": {"tf": 1}, "sqlglot.expressions.Expression.text": {"tf": 1}, "sqlglot.expressions.Expression.copy": {"tf": 1}, "sqlglot.expressions.Expression.append": {"tf": 1}, "sqlglot.expressions.Expression.set": {"tf": 1}, "sqlglot.expressions.Expression.find": {"tf": 1}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.expressions.Expression.find_ancestor": {"tf": 1}, "sqlglot.expressions.Expression.walk": {"tf": 1}, "sqlglot.expressions.Expression.dfs": {"tf": 1}, "sqlglot.expressions.Expression.bfs": {"tf": 1}, "sqlglot.expressions.Expression.unnest": {"tf": 1}, "sqlglot.expressions.Expression.unalias": {"tf": 1}, "sqlglot.expressions.Expression.unnest_operands": {"tf": 1}, "sqlglot.expressions.Expression.flatten": {"tf": 1}, "sqlglot.expressions.Expression.sql": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.expressions.Expression.replace": {"tf": 1}, "sqlglot.expressions.Expression.pop": {"tf": 1}, "sqlglot.expressions.Expression.assert_is": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.expressions.Expression.dump": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Condition.not_": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.Subqueryable.limit": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.Select.window": {"tf": 1}, "sqlglot.expressions.Select.distinct": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.Select.lock": {"tf": 1}, "sqlglot.expressions.Subquery.unnest": {"tf": 1}, "sqlglot.expressions.DataType.is_type": {"tf": 1}, "sqlglot.expressions.Cast.is_type": {"tf": 1}, "sqlglot.generator.Generator.generate": {"tf": 1}, "sqlglot.generator.Generator.unsupported": {"tf": 1}, "sqlglot.generator.Generator.sep": {"tf": 1}, "sqlglot.generator.Generator.seg": {"tf": 1}, "sqlglot.generator.Generator.pad_comment": {"tf": 1}, "sqlglot.generator.Generator.maybe_comment": {"tf": 1}, "sqlglot.generator.Generator.wrap": {"tf": 1}, "sqlglot.generator.Generator.no_identify": {"tf": 1}, "sqlglot.generator.Generator.normalize_func": {"tf": 1}, "sqlglot.generator.Generator.indent": {"tf": 1}, "sqlglot.generator.Generator.sql": {"tf": 1}, "sqlglot.generator.Generator.uncache_sql": {"tf": 1}, "sqlglot.generator.Generator.cache_sql": {"tf": 1}, "sqlglot.generator.Generator.characterset_sql": {"tf": 1}, "sqlglot.generator.Generator.column_sql": {"tf": 1}, "sqlglot.generator.Generator.columndef_sql": {"tf": 1}, "sqlglot.generator.Generator.columnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.autoincrementcolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.checkcolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.commentcolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.collatecolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.encodecolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.defaultcolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.generatedasidentitycolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.notnullcolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.primarykeycolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.uniquecolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.create_sql": {"tf": 1}, "sqlglot.generator.Generator.describe_sql": {"tf": 1}, "sqlglot.generator.Generator.prepend_ctes": {"tf": 1}, "sqlglot.generator.Generator.with_sql": {"tf": 1}, "sqlglot.generator.Generator.cte_sql": {"tf": 1}, "sqlglot.generator.Generator.tablealias_sql": {"tf": 1}, "sqlglot.generator.Generator.bitstring_sql": {"tf": 1}, "sqlglot.generator.Generator.hexstring_sql": {"tf": 1}, "sqlglot.generator.Generator.datatype_sql": {"tf": 1}, "sqlglot.generator.Generator.directory_sql": {"tf": 1}, "sqlglot.generator.Generator.delete_sql": {"tf": 1}, "sqlglot.generator.Generator.drop_sql": {"tf": 1}, "sqlglot.generator.Generator.except_sql": {"tf": 1}, "sqlglot.generator.Generator.except_op": {"tf": 1}, "sqlglot.generator.Generator.fetch_sql": {"tf": 1}, "sqlglot.generator.Generator.filter_sql": {"tf": 1}, "sqlglot.generator.Generator.hint_sql": {"tf": 1}, "sqlglot.generator.Generator.index_sql": {"tf": 1}, "sqlglot.generator.Generator.identifier_sql": {"tf": 1}, "sqlglot.generator.Generator.national_sql": {"tf": 1}, "sqlglot.generator.Generator.partition_sql": {"tf": 1}, "sqlglot.generator.Generator.properties_sql": {"tf": 1}, "sqlglot.generator.Generator.root_properties": {"tf": 1}, "sqlglot.generator.Generator.properties": {"tf": 1}, "sqlglot.generator.Generator.with_properties": {"tf": 1}, "sqlglot.generator.Generator.locate_properties": {"tf": 1}, "sqlglot.generator.Generator.property_sql": {"tf": 1}, "sqlglot.generator.Generator.likeproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.fallbackproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.journalproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.freespaceproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.afterjournalproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.checksumproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.mergeblockratioproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.datablocksizeproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.blockcompressionproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.isolatedloadingproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.insert_sql": {"tf": 1}, "sqlglot.generator.Generator.intersect_sql": {"tf": 1}, "sqlglot.generator.Generator.intersect_op": {"tf": 1}, "sqlglot.generator.Generator.introducer_sql": {"tf": 1}, "sqlglot.generator.Generator.pseudotype_sql": {"tf": 1}, "sqlglot.generator.Generator.rowformatdelimitedproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.table_sql": {"tf": 1}, "sqlglot.generator.Generator.tablesample_sql": {"tf": 1}, "sqlglot.generator.Generator.pivot_sql": {"tf": 1}, "sqlglot.generator.Generator.tuple_sql": {"tf": 1}, "sqlglot.generator.Generator.update_sql": {"tf": 1}, "sqlglot.generator.Generator.values_sql": {"tf": 1}, "sqlglot.generator.Generator.var_sql": {"tf": 1}, "sqlglot.generator.Generator.into_sql": {"tf": 1}, "sqlglot.generator.Generator.from_sql": {"tf": 1}, "sqlglot.generator.Generator.group_sql": {"tf": 1}, "sqlglot.generator.Generator.having_sql": {"tf": 1}, "sqlglot.generator.Generator.join_sql": {"tf": 1}, "sqlglot.generator.Generator.lambda_sql": {"tf": 1}, "sqlglot.generator.Generator.lateral_sql": {"tf": 1}, "sqlglot.generator.Generator.limit_sql": {"tf": 1}, "sqlglot.generator.Generator.offset_sql": {"tf": 1}, "sqlglot.generator.Generator.lock_sql": {"tf": 1}, "sqlglot.generator.Generator.literal_sql": {"tf": 1}, "sqlglot.generator.Generator.loaddata_sql": {"tf": 1}, "sqlglot.generator.Generator.null_sql": {"tf": 1}, "sqlglot.generator.Generator.boolean_sql": {"tf": 1}, "sqlglot.generator.Generator.order_sql": {"tf": 1}, "sqlglot.generator.Generator.cluster_sql": {"tf": 1}, "sqlglot.generator.Generator.distribute_sql": {"tf": 1}, "sqlglot.generator.Generator.sort_sql": {"tf": 1}, "sqlglot.generator.Generator.ordered_sql": {"tf": 1}, "sqlglot.generator.Generator.matchrecognize_sql": {"tf": 1}, "sqlglot.generator.Generator.query_modifiers": {"tf": 1}, "sqlglot.generator.Generator.select_sql": {"tf": 1}, "sqlglot.generator.Generator.schema_sql": {"tf": 1}, "sqlglot.generator.Generator.star_sql": {"tf": 1}, "sqlglot.generator.Generator.structkwarg_sql": {"tf": 1}, "sqlglot.generator.Generator.parameter_sql": {"tf": 1}, "sqlglot.generator.Generator.sessionparameter_sql": {"tf": 1}, "sqlglot.generator.Generator.placeholder_sql": {"tf": 1}, "sqlglot.generator.Generator.subquery_sql": {"tf": 1}, "sqlglot.generator.Generator.qualify_sql": {"tf": 1}, "sqlglot.generator.Generator.union_sql": {"tf": 1}, "sqlglot.generator.Generator.union_op": {"tf": 1}, "sqlglot.generator.Generator.unnest_sql": {"tf": 1}, "sqlglot.generator.Generator.where_sql": {"tf": 1}, "sqlglot.generator.Generator.window_sql": {"tf": 1}, "sqlglot.generator.Generator.partition_by_sql": {"tf": 1}, "sqlglot.generator.Generator.window_spec_sql": {"tf": 1}, "sqlglot.generator.Generator.withingroup_sql": {"tf": 1}, "sqlglot.generator.Generator.between_sql": {"tf": 1}, "sqlglot.generator.Generator.bracket_sql": {"tf": 1}, "sqlglot.generator.Generator.all_sql": {"tf": 1}, "sqlglot.generator.Generator.any_sql": {"tf": 1}, "sqlglot.generator.Generator.exists_sql": {"tf": 1}, "sqlglot.generator.Generator.case_sql": {"tf": 1}, "sqlglot.generator.Generator.constraint_sql": {"tf": 1}, "sqlglot.generator.Generator.extract_sql": {"tf": 1}, "sqlglot.generator.Generator.trim_sql": {"tf": 1}, "sqlglot.generator.Generator.concat_sql": {"tf": 1}, "sqlglot.generator.Generator.check_sql": {"tf": 1}, "sqlglot.generator.Generator.foreignkey_sql": {"tf": 1}, "sqlglot.generator.Generator.primarykey_sql": {"tf": 1}, "sqlglot.generator.Generator.unique_sql": {"tf": 1}, "sqlglot.generator.Generator.if_sql": {"tf": 1}, "sqlglot.generator.Generator.in_sql": {"tf": 1}, "sqlglot.generator.Generator.in_unnest_op": {"tf": 1}, "sqlglot.generator.Generator.interval_sql": {"tf": 1}, "sqlglot.generator.Generator.return_sql": {"tf": 1}, "sqlglot.generator.Generator.reference_sql": {"tf": 1}, "sqlglot.generator.Generator.anonymous_sql": {"tf": 1}, "sqlglot.generator.Generator.paren_sql": {"tf": 1}, "sqlglot.generator.Generator.neg_sql": {"tf": 1}, "sqlglot.generator.Generator.not_sql": {"tf": 1}, "sqlglot.generator.Generator.alias_sql": {"tf": 1}, "sqlglot.generator.Generator.aliases_sql": {"tf": 1}, "sqlglot.generator.Generator.attimezone_sql": {"tf": 1}, "sqlglot.generator.Generator.add_sql": {"tf": 1}, "sqlglot.generator.Generator.and_sql": {"tf": 1}, "sqlglot.generator.Generator.connector_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwiseand_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwiseleftshift_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwisenot_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwiseor_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwiserightshift_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwisexor_sql": {"tf": 1}, "sqlglot.generator.Generator.cast_sql": {"tf": 1}, "sqlglot.generator.Generator.currentdate_sql": {"tf": 1}, "sqlglot.generator.Generator.collate_sql": {"tf": 1}, "sqlglot.generator.Generator.command_sql": {"tf": 1}, "sqlglot.generator.Generator.transaction_sql": {"tf": 1}, "sqlglot.generator.Generator.commit_sql": {"tf": 1}, "sqlglot.generator.Generator.rollback_sql": {"tf": 1}, "sqlglot.generator.Generator.altercolumn_sql": {"tf": 1}, "sqlglot.generator.Generator.renametable_sql": {"tf": 1}, "sqlglot.generator.Generator.altertable_sql": {"tf": 1}, "sqlglot.generator.Generator.droppartition_sql": {"tf": 1}, "sqlglot.generator.Generator.addconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.distinct_sql": {"tf": 1}, "sqlglot.generator.Generator.ignorenulls_sql": {"tf": 1}, "sqlglot.generator.Generator.respectnulls_sql": {"tf": 1}, "sqlglot.generator.Generator.intdiv_sql": {"tf": 1}, "sqlglot.generator.Generator.dpipe_sql": {"tf": 1}, "sqlglot.generator.Generator.div_sql": {"tf": 1}, "sqlglot.generator.Generator.distance_sql": {"tf": 1}, "sqlglot.generator.Generator.dot_sql": {"tf": 1}, "sqlglot.generator.Generator.eq_sql": {"tf": 1}, "sqlglot.generator.Generator.escape_sql": {"tf": 1}, "sqlglot.generator.Generator.glob_sql": {"tf": 1}, "sqlglot.generator.Generator.gt_sql": {"tf": 1}, "sqlglot.generator.Generator.gte_sql": {"tf": 1}, "sqlglot.generator.Generator.ilike_sql": {"tf": 1}, "sqlglot.generator.Generator.is_sql": {"tf": 1}, "sqlglot.generator.Generator.like_sql": {"tf": 1}, "sqlglot.generator.Generator.similarto_sql": {"tf": 1}, "sqlglot.generator.Generator.lt_sql": {"tf": 1}, "sqlglot.generator.Generator.lte_sql": {"tf": 1}, "sqlglot.generator.Generator.mod_sql": {"tf": 1}, "sqlglot.generator.Generator.mul_sql": {"tf": 1}, "sqlglot.generator.Generator.neq_sql": {"tf": 1}, "sqlglot.generator.Generator.nullsafeeq_sql": {"tf": 1}, "sqlglot.generator.Generator.nullsafeneq_sql": {"tf": 1}, "sqlglot.generator.Generator.or_sql": {"tf": 1}, "sqlglot.generator.Generator.slice_sql": {"tf": 1}, "sqlglot.generator.Generator.sub_sql": {"tf": 1}, "sqlglot.generator.Generator.trycast_sql": {"tf": 1}, "sqlglot.generator.Generator.use_sql": {"tf": 1}, "sqlglot.generator.Generator.binary": {"tf": 1}, "sqlglot.generator.Generator.function_fallback_sql": {"tf": 1}, "sqlglot.generator.Generator.format_args": {"tf": 1}, "sqlglot.generator.Generator.text_width": {"tf": 1}, "sqlglot.generator.Generator.format_time": {"tf": 1}, "sqlglot.generator.Generator.expressions": {"tf": 1}, "sqlglot.generator.Generator.op_expressions": {"tf": 1}, "sqlglot.generator.Generator.naked_property": {"tf": 1}, "sqlglot.generator.Generator.set_operation": {"tf": 1}, "sqlglot.generator.Generator.tag_sql": {"tf": 1}, "sqlglot.generator.Generator.token_sql": {"tf": 1}, "sqlglot.generator.Generator.userdefinedfunction_sql": {"tf": 1}, "sqlglot.generator.Generator.userdefinedfunctionkwarg_sql": {"tf": 1}, "sqlglot.generator.Generator.joinhint_sql": {"tf": 1}, "sqlglot.generator.Generator.kwarg_sql": {"tf": 1}, "sqlglot.generator.Generator.when_sql": {"tf": 1}, "sqlglot.generator.Generator.merge_sql": {"tf": 1}, "sqlglot.lineage.Node.walk": {"tf": 1}, "sqlglot.lineage.Node.to_html": {"tf": 1}, "sqlglot.optimizer.annotate_types.TypeAnnotator.annotate": {"tf": 1}, "sqlglot.optimizer.scope.Scope.clear_cache": {"tf": 1}, "sqlglot.optimizer.scope.Scope.branch": {"tf": 1}, "sqlglot.optimizer.scope.Scope.walk": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.optimizer.scope.Scope.replace": {"tf": 1}, "sqlglot.optimizer.scope.Scope.source_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.rename_source": {"tf": 1}, "sqlglot.optimizer.scope.Scope.add_source": {"tf": 1}, "sqlglot.optimizer.scope.Scope.remove_source": {"tf": 1}, "sqlglot.optimizer.scope.Scope.traverse": {"tf": 1}, "sqlglot.optimizer.scope.Scope.ref_count": {"tf": 1}, "sqlglot.parser.Parser.reset": {"tf": 1}, "sqlglot.parser.Parser.parse": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}, "sqlglot.parser.Parser.check_errors": {"tf": 1}, "sqlglot.parser.Parser.raise_error": {"tf": 1}, "sqlglot.parser.Parser.expression": {"tf": 1}, "sqlglot.parser.Parser.validate_expression": {"tf": 1}, "sqlglot.planner.Step.add_dependency": {"tf": 1}, "sqlglot.planner.Step.to_s": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1}, "sqlglot.schema.Schema.column_names": {"tf": 1}, "sqlglot.schema.Schema.get_column_type": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.table_parts": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.find": {"tf": 1}, "sqlglot.schema.MappingSchema.copy": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1}, "sqlglot.tokens.Tokenizer.reset": {"tf": 1}, "sqlglot.tokens.Tokenizer.tokenize": {"tf": 1}}, "df": 471}, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.select_sql": {"tf": 1}, "sqlglot.expressions.Subqueryable.limit": {"tf": 1}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.Select.window": {"tf": 1}, "sqlglot.expressions.Select.distinct": {"tf": 1}, "sqlglot.expressions.Select.lock": {"tf": 1}, "sqlglot.expressions.select": {"tf": 1}, "sqlglot.expressions.from_": {"tf": 1}, "sqlglot.generator.Generator.select_sql": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest": {"tf": 1.4142135623730951}, "sqlglot.optimizer.unnest_subqueries.decorrelate": {"tf": 1.4142135623730951}}, "df": 24, "s": {"docs": {"sqlglot.optimizer.optimizer.optimize": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "q": {"docs": {"sqlglot.helper.seq_get": {"tf": 1}}, "df": 1, "u": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.SparkSession.createDataFrame": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 1}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}, "sqlglot.dialects.dialect.parse_date_delta": {"tf": 1}, "sqlglot.dialects.dialect.locate_to_strposition": {"tf": 1}, "sqlglot.errors.concat_messages": {"tf": 1}, "sqlglot.errors.merge_errors": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.helper.seq_get": {"tf": 1}, "sqlglot.lineage.lineage": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.decorrelate": {"tf": 1}, "sqlglot.trie.new_trie": {"tf": 1}, "sqlglot.trie.in_trie": {"tf": 1}}, "df": 15}}}}}}, "r": {"docs": {"sqlglot.dataframe.sql.DataFrame.persist": {"tf": 1}}, "df": 1}, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe.sql.DataFrameReader.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.__init__": {"tf": 1}}, "df": 2, "p": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.generator.Generator.sessionparameter_sql": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}, "p": {"docs": {"sqlglot.generator.Generator.sep": {"tf": 1}, "sqlglot.generator.Generator.seg": {"tf": 1}, "sqlglot.generator.Generator.properties": {"tf": 1}, "sqlglot.generator.Generator.table_sql": {"tf": 1}, "sqlglot.generator.Generator.lambda_sql": {"tf": 1}, "sqlglot.generator.Generator.expressions": {"tf": 1}, "sqlglot.helper.csv": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1}}, "df": 8}}, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.dataframe.sql.SparkSession.createDataFrame": {"tf": 1}, "sqlglot.executor.execute": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.schema_sql": {"tf": 1}, "sqlglot.lineage.lineage": {"tf": 1.7320508075688772}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.optimizer.annotate_types.TypeAnnotator.__init__": {"tf": 1}, "sqlglot.optimizer.isolate_table_selects.isolate_table_selects": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1}, "sqlglot.schema.MappingSchema.__init__": {"tf": 1}, "sqlglot.schema.MappingSchema.from_mapping_schema": {"tf": 1.7320508075688772}, "sqlglot.schema.MappingSchema.copy": {"tf": 1}, "sqlglot.schema.ensure_schema": {"tf": 1.7320508075688772}, "sqlglot.schema.flatten_schema": {"tf": 1}}, "df": 15}}}}, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.pushdown_predicates.pushdown": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_cnf": {"tf": 1.4142135623730951}, "sqlglot.optimizer.pushdown_predicates.pushdown_dnf": {"tf": 1.4142135623730951}, "sqlglot.optimizer.pushdown_predicates.nodes_for_predicate": {"tf": 1}, "sqlglot.optimizer.scope.Scope.__init__": {"tf": 1}, "sqlglot.optimizer.scope.Scope.branch": {"tf": 1}}, "df": 6, "t": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.scope.Scope.__init__": {"tf": 1}}, "df": 1}}}}}}}}, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {"sqlglot.dataframe.sql.SparkSession.createDataFrame": {"tf": 1}}, "df": 1}}}}}}}}}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.DataFrameWriter.mode": {"tf": 1}}, "df": 1}}}}}}, "f": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.no_safe_divide_sql": {"tf": 1}}, "df": 1}}}}}}}}}, "p": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sql": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameReader.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.__init__": {"tf": 1}}, "df": 4, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe.sql.DataFrameReader.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.__init__": {"tf": 1}}, "df": 2}}}}}}}}}}}, "u": {"docs": {}, "df": 0, "b": {"docs": {"sqlglot.generator.Generator.sub_sql": {"tf": 1}}, "df": 1, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe.sql.DataFrame.dropDuplicates": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.dropna": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.replace": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.drop": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.fill": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.replace": {"tf": 1}}, "df": 7}}, "t": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.executor.env.str_position": {"tf": 1}}, "df": 1}}}, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.generator.Generator.subquery_sql": {"tf": 1}}, "df": 2, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.expand": {"tf": 1}, "sqlglot.lineage.lineage": {"tf": 1}}, "df": 2}}}}}, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.optimizer.optimizer.optimize": {"tf": 1.7320508075688772}}, "df": 1}}}}}}}}, "f": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "x": {"docs": {"sqlglot.generator.Generator.properties": {"tf": 1}}, "df": 1}}}}}, "y": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.tsql.TSQL.Generator.systemtime_sql": {"tf": 1}}, "df": 1}}}}}}}}}, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff.Update.__init__": {"tf": 1}, "sqlglot.diff.Keep.__init__": {"tf": 1}, "sqlglot.diff.diff": {"tf": 1}, "sqlglot.diff.ChangeDistiller.diff": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.nested_loop_join": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.hash_join": {"tf": 1}, "sqlglot.lineage.Node.__init__": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.replace_aliases": {"tf": 1}, "sqlglot.optimizer.scope.Scope.source_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.add_source": {"tf": 1}}, "df": 10, "s": {"docs": {"sqlglot.expressions.expand": {"tf": 1}, "sqlglot.lineage.lineage": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.nodes_for_predicate": {"tf": 1}, "sqlglot.optimizer.scope.Scope.__init__": {"tf": 1}, "sqlglot.optimizer.scope.Scope.branch": {"tf": 1}}, "df": 6}}}}}, "r": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.generator.Generator.sort_sql": {"tf": 1}}, "df": 1}}}, "k": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.generator.Generator.indent": {"tf": 1.4142135623730951}}, "df": 1}}}, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {"sqlglot.generator.Generator.similarto_sql": {"tf": 1}}, "df": 1}}}}}}}}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.generator.Generator.slice_sql": {"tf": 1}}, "df": 1}}}}}, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.parse": {"tf": 1}, "sqlglot.parse_one": {"tf": 1}, "sqlglot.transpile": {"tf": 1}, "sqlglot.executor.execute": {"tf": 1}, "sqlglot.helper.csv_reader": {"tf": 1}}, "df": 5, "w": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dataframe.sql.DataFrameWriter.copy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.mode": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.insertInto": {"tf": 1}}, "df": 3}}}}}}, "c": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "v": {"docs": {"sqlglot.helper.csv_reader": {"tf": 1}}, "df": 1}}}}}, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.DataFrame.replace": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.replace": {"tf": 1}}, "df": 2}}}}}, "g": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.dataframe.sql.Column.rlike": {"tf": 1}}, "df": 1}}}}, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.redshift.Redshift.Generator.renametable_sql": {"tf": 1}, "sqlglot.generator.Generator.renametable_sql": {"tf": 1}}, "df": 2}}}}}}}}}, "t": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.generator.Generator.return_sql": {"tf": 1}}, "df": 1, "s": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dialects.tsql.TSQL.Generator.returnsproperty_sql": {"tf": 1}}, "df": 1}}}}}}}}}}}}}, "m": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff.diff": {"tf": 1}, "sqlglot.diff.ChangeDistiller.diff": {"tf": 1}}, "df": 2}}}}, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.executor.env.null_if_any": {"tf": 1}}, "df": 1}}}}}}, "c": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Subqueryable.with_": {"tf": 1}}, "df": 1}}}}}}}, "f": {"docs": {"sqlglot.optimizer.pushdown_predicates.pushdown": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_cnf": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_dnf": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.nodes_for_predicate": {"tf": 1}}, "df": 4, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.generator.Generator.reference_sql": {"tf": 1}}, "df": 1}}}}}}}, "s": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.generator.Generator.respectnulls_sql": {"tf": 1}}, "df": 1}}}}}}}}}}}, "o": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot.executor.context.Context.set_row": {"tf": 1}, "sqlglot.executor.table.Table.append": {"tf": 1}}, "df": 2, "s": {"docs": {"sqlglot.executor.table.Table.__init__": {"tf": 1}}, "df": 1}, "f": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.generator.Generator.rowformatdelimitedproperty_sql": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}}}}}}}, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.generator.Generator.rollback_sql": {"tf": 1}}, "df": 1}}}}}}, "o": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.optimizer.scope.Scope.__init__": {"tf": 1}}, "df": 1}}}, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor.table.Table.__init__": {"tf": 1}, "sqlglot.executor.table.RowReader.__init__": {"tf": 1}}, "df": 2}}}, "w": {"docs": {"sqlglot.parser.Parser.parse": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}}, "df": 2}, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.schema.AbstractMappingSchema.find": {"tf": 1}}, "df": 1}}}}, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.union": {"tf": 1}, "sqlglot.expressions.intersect": {"tf": 1}, "sqlglot.expressions.except_": {"tf": 1}, "sqlglot.planner.SetOperation.__init__": {"tf": 1}}, "df": 4}}}}, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.lineage.lineage": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}}, "df": 2}}}}}, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.parse": {"tf": 1}, "sqlglot.parse_one": {"tf": 1.7320508075688772}, "sqlglot.transpile": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.SparkSession.createDataFrame": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.where": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.filter": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.join": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.dropna": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.replace": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.drop": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.hint": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.repartition": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.agg": {"tf": 1}, "sqlglot.dataframe.sql.Column.__init__": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_col": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_cols": {"tf": 1}, "sqlglot.dataframe.sql.Column.cast": {"tf": 1}, "sqlglot.dataframe.sql.Column.startswith": {"tf": 1}, "sqlglot.dataframe.sql.Column.endswith": {"tf": 1}, "sqlglot.dataframe.sql.Column.substr": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.isin": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.drop": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.fill": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrameNaFunctions.replace": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Window.partitionBy": {"tf": 1}, "sqlglot.dataframe.sql.Window.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.partitionBy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.orderBy": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.get_or_raise": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.format_time": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.parse_into": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}, "sqlglot.diff.diff": {"tf": 1}, "sqlglot.diff.ChangeDistiller.diff": {"tf": 1}, "sqlglot.executor.execute": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.sql": {"tf": 1}, "sqlglot.expressions.DataType.build": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1.7320508075688772}, "sqlglot.expressions.to_table": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1.4142135623730951}, "sqlglot.expressions.values": {"tf": 1}, "sqlglot.expressions.func": {"tf": 1}, "sqlglot.generator.Generator.sql": {"tf": 1}, "sqlglot.generator.Generator.union_sql": {"tf": 1}, "sqlglot.generator.Generator.union_op": {"tf": 1}, "sqlglot.generator.Generator.format_args": {"tf": 1}, "sqlglot.helper.subclasses": {"tf": 1.4142135623730951}, "sqlglot.helper.flatten": {"tf": 1}, "sqlglot.lineage.lineage": {"tf": 1.4142135623730951}, "sqlglot.lineage.LineageHTML.__init__": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1.4142135623730951}, "sqlglot.schema.Schema.add_table": {"tf": 1}, "sqlglot.schema.MappingSchema.__init__": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}, "sqlglot.schema.ensure_column_mapping": {"tf": 1}, "sqlglot.serde.dump": {"tf": 1.4142135623730951}, "sqlglot.serde.load": {"tf": 1.4142135623730951}}, "df": 60}}, "t": {"docs": {"sqlglot.dialects.dialect.parse_date_delta": {"tf": 1}, "sqlglot.executor.env.interval": {"tf": 1}}, "df": 2}, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.generator.Generator.unique_sql": {"tf": 1}}, "df": 1}}}}, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator.in_unnest_op": {"tf": 1}, "sqlglot.expressions.Expression.flatten": {"tf": 1}, "sqlglot.generator.Generator.__init__": {"tf": 1}, "sqlglot.generator.Generator.unnest_sql": {"tf": 1}, "sqlglot.generator.Generator.in_unnest_op": {"tf": 1.4142135623730951}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.parser.Parser.__init__": {"tf": 1}}, "df": 7}}}}, "s": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.generator.Generator.__init__": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}}, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.generator.Generator.uncache_sql": {"tf": 1}}, "df": 1}}}}}}, "p": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.generator.Generator.__init__": {"tf": 1}}, "df": 1, "b": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.dataframe.sql.Column.between": {"tf": 1}}, "df": 1}}}}}}}}, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.teradata.Teradata.Generator.update_sql": {"tf": 1}, "sqlglot.diff.diff": {"tf": 1}, "sqlglot.diff.ChangeDistiller.diff": {"tf": 1}, "sqlglot.expressions.Select.lock": {"tf": 1}, "sqlglot.expressions.update": {"tf": 1}, "sqlglot.generator.Generator.update_sql": {"tf": 1}}, "df": 6}}}}}, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.expressions.Select.join": {"tf": 1}}, "df": 1}}}, "e": {"docs": {"sqlglot.generator.Generator.use_sql": {"tf": 1}}, "df": 1, "r": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.generator.Generator.userdefinedfunction_sql": {"tf": 1}}, "df": 1, "k": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.generator.Generator.userdefinedfunctionkwarg_sql": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}}}}}}}, "d": {"docs": {"sqlglot.executor.table.ensure_tables": {"tf": 1}, "sqlglot.helper.dict_depth": {"tf": 1}}, "df": 2, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.parse": {"tf": 2}, "sqlglot.parse_one": {"tf": 2}, "sqlglot.transpile": {"tf": 2.8284271247461903}, "sqlglot.dataframe.sql.DataFrame.sql": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.get_or_raise": {"tf": 2.6457513110645907}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}, "sqlglot.executor.execute": {"tf": 2}, "sqlglot.expressions.Expression.sql": {"tf": 2.23606797749979}, "sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Subqueryable.limit": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.Select.window": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.DataType.build": {"tf": 2.23606797749979}, "sqlglot.expressions.maybe_parse": {"tf": 2.23606797749979}, "sqlglot.expressions.union": {"tf": 1}, "sqlglot.expressions.intersect": {"tf": 1}, "sqlglot.expressions.except_": {"tf": 1}, "sqlglot.expressions.select": {"tf": 1}, "sqlglot.expressions.from_": {"tf": 1}, "sqlglot.expressions.update": {"tf": 1}, "sqlglot.expressions.delete": {"tf": 1}, "sqlglot.expressions.condition": {"tf": 1}, "sqlglot.expressions.and_": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 1}, "sqlglot.expressions.not_": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 2.23606797749979}, "sqlglot.expressions.subquery": {"tf": 1}, "sqlglot.expressions.func": {"tf": 2.23606797749979}, "sqlglot.lineage.lineage": {"tf": 2.23606797749979}, "sqlglot.lineage.LineageHTML.__init__": {"tf": 2.23606797749979}, "sqlglot.schema.MappingSchema.__init__": {"tf": 2.23606797749979}}, "df": 51, "s": {"docs": {"sqlglot.parse": {"tf": 1.4142135623730951}, "sqlglot.parse_one": {"tf": 1.4142135623730951}, "sqlglot.transpile": {"tf": 2}, "sqlglot.dialects.dialect.Dialect.get_or_raise": {"tf": 1.7320508075688772}, "sqlglot.executor.execute": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.sql": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.build": {"tf": 1.4142135623730951}, "sqlglot.expressions.maybe_parse": {"tf": 1.4142135623730951}, "sqlglot.expressions.alias_": {"tf": 1.4142135623730951}, "sqlglot.expressions.func": {"tf": 1.4142135623730951}, "sqlglot.lineage.lineage": {"tf": 1.4142135623730951}, "sqlglot.lineage.LineageHTML.__init__": {"tf": 1.4142135623730951}, "sqlglot.schema.MappingSchema.__init__": {"tf": 1.4142135623730951}}, "df": 13}}}}}}, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe.sql.SparkSession.createDataFrame": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.replace": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.agg": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.fill": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.replace": {"tf": 1}, "sqlglot.dialects.dialect.parse_date_delta": {"tf": 1}, "sqlglot.errors.ParseError.__init__": {"tf": 1}, "sqlglot.errors.merge_errors": {"tf": 1}, "sqlglot.executor.execute": {"tf": 1.4142135623730951}, "sqlglot.executor.context.Context.__init__": {"tf": 1.4142135623730951}, "sqlglot.executor.table.ensure_tables": {"tf": 1}, "sqlglot.expressions.Properties.from_dict": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1}, "sqlglot.expressions.expand": {"tf": 1}, "sqlglot.generator.Generator.locate_properties": {"tf": 1}, "sqlglot.helper.tsort": {"tf": 1}, "sqlglot.helper.object_to_dict": {"tf": 1}, "sqlglot.helper.dict_depth": {"tf": 1}, "sqlglot.lineage.lineage": {"tf": 1.4142135623730951}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.Join.from_joins": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.__init__": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.find": {"tf": 1}, "sqlglot.schema.MappingSchema.__init__": {"tf": 1.4142135623730951}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}, "sqlglot.schema.ensure_column_mapping": {"tf": 1}, "sqlglot.schema.flatten_schema": {"tf": 1}, "sqlglot.serde.dump": {"tf": 1.4142135623730951}, "sqlglot.serde.load": {"tf": 1.4142135623730951}, "sqlglot.time.format_time": {"tf": 1.4142135623730951}, "sqlglot.trie.new_trie": {"tf": 1}, "sqlglot.trie.in_trie": {"tf": 1.4142135623730951}}, "df": 35}}, "s": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.dataframe.sql.DataFrame.persist": {"tf": 1}}, "df": 1}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.Select.distinct": {"tf": 1}, "sqlglot.expressions.union": {"tf": 1}, "sqlglot.expressions.intersect": {"tf": 1}, "sqlglot.expressions.except_": {"tf": 1}, "sqlglot.generator.Generator.distinct_sql": {"tf": 1}, "sqlglot.planner.SetOperation.__init__": {"tf": 1}}, "df": 9}}}}, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.generator.Generator.distribute_sql": {"tf": 1}}, "df": 1}}}}}}, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.generator.Generator.distance_sql": {"tf": 1}, "sqlglot.optimizer.normalize.normalize": {"tf": 1}, "sqlglot.optimizer.normalize.distributive_law": {"tf": 1}}, "df": 3}}}}}}, "f": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.diff.diff": {"tf": 2.23606797749979}, "sqlglot.diff.ChangeDistiller.diff": {"tf": 2.23606797749979}}, "df": 2}}, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.generator.Generator.directory_sql": {"tf": 1}}, "df": 1}}}}}}}, "v": {"docs": {"sqlglot.generator.Generator.div_sql": {"tf": 1}}, "df": 1}}, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.dataframe.sql.SparkSession.createDataFrame": {"tf": 1}}, "df": 1, "f": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.SparkSession.table": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.SparkSession.createDataFrame": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.SparkSession.sql": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.copy": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.select": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.alias": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.where": {"tf": 2}, "sqlglot.dataframe.sql.DataFrame.filter": {"tf": 2}, "sqlglot.dataframe.sql.DataFrame.groupBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.agg": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.join": {"tf": 2.8284271247461903}, "sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 2}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 2}, "sqlglot.dataframe.sql.DataFrame.union": {"tf": 2.449489742783178}, "sqlglot.dataframe.sql.DataFrame.unionAll": {"tf": 2.449489742783178}, "sqlglot.dataframe.sql.DataFrame.unionByName": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.intersect": {"tf": 2.449489742783178}, "sqlglot.dataframe.sql.DataFrame.intersectAll": {"tf": 2.449489742783178}, "sqlglot.dataframe.sql.DataFrame.exceptAll": {"tf": 2.449489742783178}, "sqlglot.dataframe.sql.DataFrame.distinct": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.dropna": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.replace": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.withColumn": {"tf": 2}, "sqlglot.dataframe.sql.DataFrame.drop": {"tf": 2}, "sqlglot.dataframe.sql.DataFrame.limit": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.hint": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.repartition": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.coalesce": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.cache": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.persist": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.GroupedData.__init__": {"tf": 2.23606797749979}, "sqlglot.dataframe.sql.GroupedData.agg": {"tf": 2}, "sqlglot.dataframe.sql.GroupedData.count": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.GroupedData.mean": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.GroupedData.avg": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.GroupedData.max": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.GroupedData.min": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.GroupedData.sum": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.GroupedData.pivot": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.ensure_cols": {"tf": 1}, "sqlglot.dataframe.sql.Column.invoke_anonymous_function": {"tf": 1}, "sqlglot.dataframe.sql.Column.invoke_expression_over_column": {"tf": 1}, "sqlglot.dataframe.sql.Column.binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.inverse_binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.unary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_literal": {"tf": 1}, "sqlglot.dataframe.sql.Column.copy": {"tf": 1}, "sqlglot.dataframe.sql.Column.set_table_name": {"tf": 1}, "sqlglot.dataframe.sql.Column.alias": {"tf": 1}, "sqlglot.dataframe.sql.Column.asc": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc": {"tf": 1}, "sqlglot.dataframe.sql.Column.asc_nulls_first": {"tf": 1}, "sqlglot.dataframe.sql.Column.asc_nulls_last": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc_nulls_first": {"tf": 1}, "sqlglot.dataframe.sql.Column.desc_nulls_last": {"tf": 1}, "sqlglot.dataframe.sql.Column.when": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.otherwise": {"tf": 1}, "sqlglot.dataframe.sql.Column.isNull": {"tf": 1}, "sqlglot.dataframe.sql.Column.isNotNull": {"tf": 1}, "sqlglot.dataframe.sql.Column.cast": {"tf": 1}, "sqlglot.dataframe.sql.Column.startswith": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.endswith": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.rlike": {"tf": 1}, "sqlglot.dataframe.sql.Column.substr": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.between": {"tf": 1}, "sqlglot.dataframe.sql.Column.over": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.__init__": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrameNaFunctions.drop": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrameNaFunctions.fill": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrameNaFunctions.replace": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Window.partitionBy": {"tf": 1}, "sqlglot.dataframe.sql.Window.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.Window.rowsBetween": {"tf": 1}, "sqlglot.dataframe.sql.Window.rangeBetween": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.partitionBy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.rowsBetween": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.rangeBetween": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameReader.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameReader.table": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrameWriter.__init__": {"tf": 2}, "sqlglot.dataframe.sql.DataFrameWriter.copy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.mode": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.insertInto": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}, "sqlglot.schema.ensure_column_mapping": {"tf": 1}}, "df": 89, "w": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dataframe.sql.DataFrameWriter.copy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.mode": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.insertInto": {"tf": 1}}, "df": 3}}}}}}}}}}}, "t": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.Column.cast": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}, "sqlglot.expressions.DataType.build": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.is_type": {"tf": 1}, "sqlglot.expressions.Cast.is_type": {"tf": 1}, "sqlglot.expressions.cast": {"tf": 1.4142135623730951}, "sqlglot.expressions.values": {"tf": 1}, "sqlglot.generator.Generator.datatype_sql": {"tf": 1}, "sqlglot.schema.Schema.get_column_type": {"tf": 1}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1}, "sqlglot.serde.dump": {"tf": 1}, "sqlglot.serde.load": {"tf": 1}}, "df": 12}}}}, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.generator.Generator.datablocksizeproperty_sql": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}, "e": {"docs": {"sqlglot.optimizer.simplify.date_literal": {"tf": 1}}, "df": 1, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.datestrtodate_sql": {"tf": 1}}, "df": 1}}}}}}}}}}}, "g": {"docs": {"sqlglot.helper.tsort": {"tf": 1}}, "df": 1}}, "f": {"docs": {"sqlglot.dataframe.sql.DataFrame.join": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.__init__": {"tf": 1}}, "df": 4}, "e": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}}, "df": 1, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.generator.Generator.defaultcolumnconstraint_sql": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}}}}, "s": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.executor.env.ordered": {"tf": 1}}, "df": 1, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.snowflake.Snowflake.Generator.describe_sql": {"tf": 1}, "sqlglot.generator.Generator.describe_sql": {"tf": 1}}, "df": 2}}, "p": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.errors.ParseError.new": {"tf": 1}}, "df": 1}}}}}}}}}, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.delete": {"tf": 1}, "sqlglot.generator.Generator.delete_sql": {"tf": 1}}, "df": 2}}}}, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.planner.Step.add_dependency": {"tf": 1}}, "df": 1}}}}}}}, "t": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.schema.flatten_schema": {"tf": 1}}, "df": 1}}}}, "t": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.DataType.build": {"tf": 1}, "sqlglot.expressions.DataType.is_type": {"tf": 1}, "sqlglot.expressions.Cast.is_type": {"tf": 1}}, "df": 3}}}}, "b": {"docs": {"sqlglot.expressions.table_": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1}}, "df": 3}, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.generator.Generator.drop_sql": {"tf": 1}}, "df": 1, "p": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.generator.Generator.droppartition_sql": {"tf": 1}}, "df": 1}}}}}}}}}}}}, "p": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.generator.Generator.dpipe_sql": {"tf": 1}}, "df": 1}}}}, "o": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.generator.Generator.dot_sql": {"tf": 1}}, "df": 1}, "w": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot.lineage.Node.__init__": {"tf": 1}}, "df": 1}}}}}}}}}, "n": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.optimizer.normalize.normalize": {"tf": 1}, "sqlglot.optimizer.normalize.normalized": {"tf": 1}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}, "sqlglot.optimizer.normalize.distributive_law": {"tf": 1}}, "df": 4}}}, "t": {"docs": {"sqlglot.diff.ChangeDistiller.__init__": {"tf": 1}, "sqlglot.helper.seq_get": {"tf": 1.4142135623730951}, "sqlglot.helper.tsort": {"tf": 1.7320508075688772}, "sqlglot.helper.first": {"tf": 1.4142135623730951}, "sqlglot.schema.AbstractMappingSchema.find": {"tf": 1}}, "df": 5, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.parse": {"tf": 1}, "sqlglot.parse_one": {"tf": 1.7320508075688772}, "sqlglot.transpile": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialect.get_or_raise": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialect.parse_into": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}, "sqlglot.dialects.dialect.parse_date_delta": {"tf": 1}, "sqlglot.executor.execute": {"tf": 1}, "sqlglot.expressions.Expression.sql": {"tf": 1}, "sqlglot.expressions.Expression.assert_is": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.expressions.DataType.build": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.is_type": {"tf": 1}, "sqlglot.expressions.Cast.is_type": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1.7320508075688772}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.expressions.cast": {"tf": 1}, "sqlglot.expressions.func": {"tf": 1}, "sqlglot.generator.Generator.token_sql": {"tf": 1}, "sqlglot.helper.subclasses": {"tf": 2.23606797749979}, "sqlglot.lineage.lineage": {"tf": 1}, "sqlglot.lineage.LineageHTML.__init__": {"tf": 1}, "sqlglot.optimizer.scope.Scope.__init__": {"tf": 1}, "sqlglot.optimizer.scope.Scope.branch": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.expression": {"tf": 1}, "sqlglot.planner.SetOperation.__init__": {"tf": 1}, "sqlglot.schema.MappingSchema.__init__": {"tf": 1}, "sqlglot.serde.dump": {"tf": 1}, "sqlglot.serde.load": {"tf": 1}, "sqlglot.tokens.Token.__init__": {"tf": 1}}, "df": 31, "s": {"docs": {"sqlglot.dataframe.sql.Column.cast": {"tf": 1}, "sqlglot.expressions.Expression.find": {"tf": 1}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.expressions.Expression.find_ancestor": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}, "sqlglot.schema.ensure_column_mapping": {"tf": 1}}, "df": 11}}}}, "r": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.transpile": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sql": {"tf": 1}, "sqlglot.executor.env.filter_nulls": {"tf": 1}, "sqlglot.expressions.Expression.find": {"tf": 1}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.expressions.Expression.walk": {"tf": 1}, "sqlglot.expressions.Expression.flatten": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1.4142135623730951}, "sqlglot.expressions.Join.using": {"tf": 1.4142135623730951}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.Subqueryable.limit": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.group_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.order_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.sort_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.cluster_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.lateral": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.join": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.where": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.having": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.window": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.distinct": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.Select.lock": {"tf": 1.4142135623730951}, "sqlglot.expressions.union": {"tf": 1}, "sqlglot.expressions.intersect": {"tf": 1}, "sqlglot.expressions.except_": {"tf": 1}, "sqlglot.expressions.expand": {"tf": 1}, "sqlglot.generator.Generator.__init__": {"tf": 1}, "sqlglot.generator.Generator.sql": {"tf": 1}, "sqlglot.generator.Generator.properties": {"tf": 1}, "sqlglot.generator.Generator.expressions": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1}, "sqlglot.lineage.LineageHTML.__init__": {"tf": 1}, "sqlglot.optimizer.scope.Scope.walk": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.find": {"tf": 1}}, "df": 48}}, "y": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.dialect.no_trycast_sql": {"tf": 1}, "sqlglot.generator.Generator.trycast_sql": {"tf": 1}}, "df": 2}}}}}, "i": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot.dialects.dialect.trim_sql": {"tf": 1}, "sqlglot.generator.Generator.trim_sql": {"tf": 1}}, "df": 2}, "e": {"docs": {"sqlglot.generator.Generator.__init__": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.find": {"tf": 1}, "sqlglot.time.format_time": {"tf": 1}, "sqlglot.trie.in_trie": {"tf": 1}}, "df": 4}}, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.transforms.preprocess": {"tf": 1}}, "df": 1}}}}}}}}}, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.Column.set_table_name": {"tf": 1}, "sqlglot.executor.execute": {"tf": 1.4142135623730951}, "sqlglot.executor.context.Context.__init__": {"tf": 1.4142135623730951}, "sqlglot.executor.context.Context.table_iter": {"tf": 1.4142135623730951}, "sqlglot.executor.table.TableIter.__init__": {"tf": 1}, "sqlglot.executor.table.RangeReader.__init__": {"tf": 1}, "sqlglot.executor.table.ensure_tables": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.update": {"tf": 1}, "sqlglot.expressions.delete": {"tf": 1}, "sqlglot.expressions.to_table": {"tf": 1.4142135623730951}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.expressions.column": {"tf": 1}, "sqlglot.expressions.table_": {"tf": 1.4142135623730951}, "sqlglot.expressions.rename_table": {"tf": 1.4142135623730951}, "sqlglot.expressions.table_name": {"tf": 1}, "sqlglot.generator.Generator.table_sql": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1.4142135623730951}, "sqlglot.schema.Schema.add_table": {"tf": 1.4142135623730951}, "sqlglot.schema.Schema.column_names": {"tf": 1.4142135623730951}, "sqlglot.schema.Schema.get_column_type": {"tf": 1.4142135623730951}, "sqlglot.schema.AbstractMappingSchema.table_parts": {"tf": 1.4142135623730951}, "sqlglot.schema.AbstractMappingSchema.find": {"tf": 1.4142135623730951}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1.4142135623730951}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1.4142135623730951}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1.4142135623730951}}, "df": 26, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.SparkSession.table": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameReader.table": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.insertInto": {"tf": 1}}, "df": 3}}}}, "s": {"docs": {"sqlglot.executor.execute": {"tf": 1}, "sqlglot.executor.context.Context.__init__": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.__init__": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.context": {"tf": 1}, "sqlglot.executor.table.ensure_tables": {"tf": 1}, "sqlglot.lineage.lineage": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_ctes": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_derived_tables": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}}, "df": 10, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.no_tablesample_sql": {"tf": 1}, "sqlglot.generator.Generator.__init__": {"tf": 1}, "sqlglot.generator.Generator.tablesample_sql": {"tf": 1}, "sqlglot.parser.Parser.__init__": {"tf": 1}}, "df": 4}}}}}}, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.executor.context.Context.table_iter": {"tf": 1}}, "df": 1}}}}, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.generator.Generator.tablealias_sql": {"tf": 1}}, "df": 1}}}}}}}}, "r": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff.Update.__init__": {"tf": 1}, "sqlglot.diff.Keep.__init__": {"tf": 1}, "sqlglot.diff.diff": {"tf": 1}, "sqlglot.diff.ChangeDistiller.diff": {"tf": 1}}, "df": 4}}}}, "g": {"docs": {"sqlglot.generator.Generator.tag_sql": {"tf": 1}}, "df": 1}, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.helper.find_new_name": {"tf": 1}}, "df": 1}}}}, "u": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.SparkSession.createDataFrame": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.dropna": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.drop": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.fill": {"tf": 1}, "sqlglot.executor.context.Context.table_iter": {"tf": 1}, "sqlglot.executor.context.Context.set_row": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1}, "sqlglot.generator.Generator.tuple_sql": {"tf": 1}, "sqlglot.helper.subclasses": {"tf": 1.4142135623730951}, "sqlglot.trie.in_trie": {"tf": 1}}, "df": 11}}}}, "h": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.dataframe.sql.DataFrame.dropna": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.drop": {"tf": 1}}, "df": 2}}}}, "i": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.executor.env.str_position": {"tf": 1}, "sqlglot.executor.env.substring": {"tf": 1}, "sqlglot.executor.env.cast": {"tf": 1}, "sqlglot.executor.env.ordered": {"tf": 1}, "sqlglot.executor.env.interval": {"tf": 1}}, "df": 5}}}, "o": {"docs": {"sqlglot.dataframe.sql.DataFrame.replace": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.replace": {"tf": 1}, "sqlglot.executor.env.cast": {"tf": 1}, "sqlglot.expressions.cast": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.optimizer.annotate_types.TypeAnnotator.__init__": {"tf": 1}, "sqlglot.transforms.preprocess": {"tf": 1}}, "df": 7, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.generator.Generator.token_sql": {"tf": 1}, "sqlglot.parser.Parser.parse": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}, "sqlglot.parser.Parser.raise_error": {"tf": 1.4142135623730951}, "sqlglot.tokens.Token.__init__": {"tf": 1}, "sqlglot.tokens.Token.number": {"tf": 1}, "sqlglot.tokens.Token.string": {"tf": 1}, "sqlglot.tokens.Token.identifier": {"tf": 1}, "sqlglot.tokens.Token.var": {"tf": 1}, "sqlglot.tokens.Tokenizer.tokenize": {"tf": 1}}, "df": 10, "s": {"docs": {"sqlglot.generator.Generator.token_sql": {"tf": 1}, "sqlglot.parser.Parser.parse": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.parse_into": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.raise_error": {"tf": 1}, "sqlglot.tokens.Token.__init__": {"tf": 1}, "sqlglot.tokens.Token.number": {"tf": 1}, "sqlglot.tokens.Token.string": {"tf": 1}, "sqlglot.tokens.Token.identifier": {"tf": 1}, "sqlglot.tokens.Token.var": {"tf": 1}, "sqlglot.tokens.Tokenizer.tokenize": {"tf": 1}}, "df": 10}, "t": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.generator.Generator.token_sql": {"tf": 1}, "sqlglot.tokens.Token.__init__": {"tf": 1}}, "df": 2}}}}}}}}, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.generator.Generator.__init__": {"tf": 1.4142135623730951}}, "df": 1, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.timestrtotime_sql": {"tf": 1}}, "df": 1}}}}}}}}}}}}, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.generator.Generator.__init__": {"tf": 1}, "sqlglot.tokens.Token.__init__": {"tf": 1}}, "df": 2, "i": {"docs": {}, "df": 0, "o": {"docs": {"sqlglot.helper.open_file": {"tf": 1}}, "df": 1}}}}}}, "n": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.parse": {"tf": 1}, "sqlglot.parse_one": {"tf": 1.4142135623730951}, "sqlglot.transpile": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.SparkSession.createDataFrame": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 2}, "sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.dropDuplicates": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.dropna": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.replace": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrameNaFunctions.drop": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrameNaFunctions.fill": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.replace": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrameWriter.__init__": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrameWriter.insertInto": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.saveAsTable": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}, "sqlglot.dialects.dialect.parse_date_delta": {"tf": 1}, "sqlglot.errors.ParseError.__init__": {"tf": 1}, "sqlglot.errors.ParseError.new": {"tf": 2.6457513110645907}, "sqlglot.executor.execute": {"tf": 1.7320508075688772}, "sqlglot.executor.context.Context.__init__": {"tf": 1}, "sqlglot.executor.context.Context.add_columns": {"tf": 1}, "sqlglot.executor.context.Context.filter": {"tf": 1}, "sqlglot.executor.context.Context.sort": {"tf": 1}, "sqlglot.executor.context.Context.set_row": {"tf": 1}, "sqlglot.executor.context.Context.set_index": {"tf": 1}, "sqlglot.executor.context.Context.set_range": {"tf": 1}, "sqlglot.executor.env.str_position": {"tf": 1}, "sqlglot.executor.env.substring": {"tf": 1.4142135623730951}, "sqlglot.executor.python.PythonExecutor.__init__": {"tf": 1.4142135623730951}, "sqlglot.executor.table.Table.__init__": {"tf": 1.4142135623730951}, "sqlglot.executor.table.Table.add_columns": {"tf": 1}, "sqlglot.executor.table.RowReader.__init__": {"tf": 1}, "sqlglot.expressions.Expression.walk": {"tf": 1}, "sqlglot.expressions.Expression.dfs": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.bfs": {"tf": 1}, "sqlglot.expressions.Expression.sql": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.Subqueryable.limit": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 2.23606797749979}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.Select.window": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.build": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1.7320508075688772}, "sqlglot.expressions.union": {"tf": 1}, "sqlglot.expressions.intersect": {"tf": 1}, "sqlglot.expressions.except_": {"tf": 1}, "sqlglot.expressions.select": {"tf": 1}, "sqlglot.expressions.from_": {"tf": 1}, "sqlglot.expressions.update": {"tf": 1.7320508075688772}, "sqlglot.expressions.delete": {"tf": 1.4142135623730951}, "sqlglot.expressions.condition": {"tf": 1}, "sqlglot.expressions.and_": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 1}, "sqlglot.expressions.not_": {"tf": 1}, "sqlglot.expressions.to_identifier": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1.4142135623730951}, "sqlglot.expressions.subquery": {"tf": 1.4142135623730951}, "sqlglot.expressions.column": {"tf": 1.4142135623730951}, "sqlglot.expressions.table_": {"tf": 2}, "sqlglot.expressions.values": {"tf": 1.4142135623730951}, "sqlglot.expressions.func": {"tf": 1}, "sqlglot.generator.Generator.__init__": {"tf": 3.1622776601683795}, "sqlglot.generator.Generator.unsupported": {"tf": 1}, "sqlglot.generator.Generator.indent": {"tf": 1}, "sqlglot.generator.Generator.sql": {"tf": 1}, "sqlglot.generator.Generator.expressions": {"tf": 1}, "sqlglot.lineage.lineage": {"tf": 1.7320508075688772}, "sqlglot.lineage.LineageHTML.__init__": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1.7320508075688772}, "sqlglot.optimizer.annotate_types.TypeAnnotator.__init__": {"tf": 1.7320508075688772}, "sqlglot.optimizer.isolate_table_selects.isolate_table_selects": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1.7320508075688772}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.Scope.__init__": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.Scope.branch": {"tf": 1}, "sqlglot.parser.Parser.__init__": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.parse": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}, "sqlglot.parser.Parser.check_errors": {"tf": 1}, "sqlglot.parser.Parser.raise_error": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.expression": {"tf": 1}, "sqlglot.parser.Parser.validate_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Step.add_dependency": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.Join.from_joins": {"tf": 1}, "sqlglot.planner.SetOperation.__init__": {"tf": 1.4142135623730951}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1.4142135623730951}, "sqlglot.schema.AbstractMappingSchema.__init__": {"tf": 1.4142135623730951}, "sqlglot.schema.AbstractMappingSchema.find": {"tf": 1}, "sqlglot.schema.MappingSchema.__init__": {"tf": 1.7320508075688772}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1.4142135623730951}, "sqlglot.schema.flatten_schema": {"tf": 1}, "sqlglot.time.format_time": {"tf": 1}, "sqlglot.tokens.Tokenizer.reset": {"tf": 1}}, "df": 120, "t": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.parse": {"tf": 1}, "sqlglot.parse_one": {"tf": 1.4142135623730951}, "sqlglot.transpile": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.dropna": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.replace": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.hint": {"tf": 1}, "sqlglot.dataframe.sql.Column.__init__": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_col": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.drop": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.fill": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.replace": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialect.get_or_raise": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.format_time": {"tf": 1}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}, "sqlglot.executor.execute": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.sql": {"tf": 1}, "sqlglot.expressions.DataType.build": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1.4142135623730951}, "sqlglot.expressions.to_table": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1}, "sqlglot.expressions.func": {"tf": 1}, "sqlglot.generator.Generator.sql": {"tf": 1}, "sqlglot.generator.Generator.format_args": {"tf": 1}, "sqlglot.lineage.lineage": {"tf": 1.4142135623730951}, "sqlglot.lineage.LineageHTML.__init__": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1}, "sqlglot.schema.MappingSchema.__init__": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}, "sqlglot.schema.ensure_column_mapping": {"tf": 1}}, "df": 33}}}}}}, "t": {"docs": {"sqlglot.expressions.not_": {"tf": 1}, "sqlglot.generator.Generator.not_sql": {"tf": 1}}, "df": 2, "n": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.generator.Generator.notnullcolumnconstraint_sql": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}}}}, "r": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.generator.Generator.__init__": {"tf": 1.4142135623730951}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}}, "df": 2}}}}}}}, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.lineage.Node.__init__": {"tf": 1}, "sqlglot.lineage.Node.walk": {"tf": 1}, "sqlglot.lineage.lineage": {"tf": 1}, "sqlglot.lineage.LineageHTML.__init__": {"tf": 1.4142135623730951}, "sqlglot.optimizer.canonicalize.add_text_to_concat": {"tf": 1}, "sqlglot.optimizer.canonicalize.coerce_type": {"tf": 1}, "sqlglot.serde.dump": {"tf": 1.4142135623730951}, "sqlglot.serde.load": {"tf": 1}}, "df": 8}}}, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.DataFrame.alias": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.hint": {"tf": 1}, "sqlglot.dataframe.sql.Column.invoke_anonymous_function": {"tf": 1}, "sqlglot.dataframe.sql.Column.set_table_name": {"tf": 1}, "sqlglot.dataframe.sql.Column.alias": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.saveAsTable": {"tf": 1}, "sqlglot.dialects.dialect.rename_func": {"tf": 1}, "sqlglot.dialects.dialect.var_map_sql": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator.normalize_func": {"tf": 1}, "sqlglot.expressions.to_identifier": {"tf": 1}, "sqlglot.expressions.rename_table": {"tf": 1.4142135623730951}, "sqlglot.expressions.func": {"tf": 1}, "sqlglot.generator.Generator.normalize_func": {"tf": 1}, "sqlglot.helper.subclasses": {"tf": 1}, "sqlglot.helper.camel_to_snake_case": {"tf": 1}, "sqlglot.helper.open_file": {"tf": 1}, "sqlglot.lineage.Node.__init__": {"tf": 1}, "sqlglot.optimizer.scope.Scope.source_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.rename_source": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.add_source": {"tf": 1}, "sqlglot.optimizer.scope.Scope.remove_source": {"tf": 1}}, "df": 22}}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.generator.Generator.national_sql": {"tf": 1}}, "df": 1}}}}}}}, "e": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot.dataframe.sql.DataFrame.withColumnRenamed": {"tf": 1}, "sqlglot.expressions.rename_table": {"tf": 1}, "sqlglot.optimizer.scope.Scope.replace": {"tf": 1}, "sqlglot.optimizer.scope.Scope.rename_source": {"tf": 1}}, "df": 4}, "g": {"docs": {"sqlglot.generator.Generator.neg_sql": {"tf": 1}}, "df": 1}, "q": {"docs": {"sqlglot.generator.Generator.neq_sql": {"tf": 1}}, "df": 1}}, "u": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot.dataframe.sql.DataFrame.limit": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1}}, "df": 2, "p": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dataframe.sql.DataFrame.repartition": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.coalesce": {"tf": 1}}, "df": 2}}}}}}}}}}, "b": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Literal.number": {"tf": 1}, "sqlglot.tokens.Token.number": {"tf": 1}}, "df": 2}}}}, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.executor.env.filter_nulls": {"tf": 1}, "sqlglot.generator.Generator.__init__": {"tf": 1}, "sqlglot.parser.Parser.__init__": {"tf": 1}}, "df": 3, "s": {"docs": {"sqlglot.executor.env.ordered": {"tf": 1}}, "df": 1, "a": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "q": {"docs": {"sqlglot.generator.Generator.nullsafeeq_sql": {"tf": 1}}, "df": 1}}, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "q": {"docs": {"sqlglot.generator.Generator.nullsafeneq_sql": {"tf": 1}}, "df": 1}}}}}}}}}}}, "o": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.__init__": {"tf": 1}, "sqlglot.generator.Generator.connector_sql": {"tf": 1}, "sqlglot.generator.Generator.binary": {"tf": 1}, "sqlglot.generator.Generator.op_expressions": {"tf": 1}, "sqlglot.generator.Generator.set_operation": {"tf": 1}, "sqlglot.planner.SetOperation.__init__": {"tf": 1}}, "df": 7, "t": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.parse": {"tf": 1}, "sqlglot.parse_one": {"tf": 1}, "sqlglot.transpile": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.parse": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.parse_into": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.generate": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.transpile": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.parser": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.generator": {"tf": 1}, "sqlglot.expressions.Expression.sql": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Subqueryable.limit": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.Select.window": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1}, "sqlglot.expressions.union": {"tf": 1}, "sqlglot.expressions.intersect": {"tf": 1}, "sqlglot.expressions.except_": {"tf": 1}, "sqlglot.expressions.select": {"tf": 1}, "sqlglot.expressions.from_": {"tf": 1}, "sqlglot.expressions.update": {"tf": 1}, "sqlglot.expressions.delete": {"tf": 1}, "sqlglot.expressions.condition": {"tf": 1}, "sqlglot.expressions.and_": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 1}, "sqlglot.expressions.not_": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.expressions.subquery": {"tf": 1}, "sqlglot.expressions.cast": {"tf": 1}, "sqlglot.lineage.Node.to_html": {"tf": 1}, "sqlglot.lineage.LineageHTML.__init__": {"tf": 1}}, "df": 51}, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.parse": {"tf": 1}, "sqlglot.transpile": {"tf": 1}, "sqlglot.dataframe.sql.SparkSession.createDataFrame": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 2}, "sqlglot.dataframe.sql.DataFrame.dropDuplicates": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.dropna": {"tf": 1}, "sqlglot.dataframe.sql.Column.invoke_anonymous_function": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.invoke_expression_over_column": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.drop": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.__init__": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrameWriter.mode": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.insertInto": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.saveAsTable": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialect.format_time": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.parse": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.parse_into": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.generate": {"tf": 1}, "sqlglot.dialects.dialect.parse_date_delta": {"tf": 1}, "sqlglot.errors.ParseError.__init__": {"tf": 1}, "sqlglot.errors.ParseError.new": {"tf": 2.6457513110645907}, "sqlglot.executor.execute": {"tf": 1}, "sqlglot.executor.context.Context.__init__": {"tf": 1}, "sqlglot.executor.table.ensure_tables": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1}, "sqlglot.expressions.to_table": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1}, "sqlglot.generator.Generator.generate": {"tf": 1}, "sqlglot.generator.Generator.indent": {"tf": 1}, "sqlglot.generator.Generator.sql": {"tf": 1}, "sqlglot.generator.Generator.format_time": {"tf": 1}, "sqlglot.generator.Generator.expressions": {"tf": 1}, "sqlglot.helper.seq_get": {"tf": 1}, "sqlglot.helper.apply_index_offset": {"tf": 1.4142135623730951}, "sqlglot.helper.while_changing": {"tf": 1.4142135623730951}, "sqlglot.helper.split_num_words": {"tf": 1}, "sqlglot.lineage.lineage": {"tf": 1}, "sqlglot.parser.Parser.__init__": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.parse": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.parse_into": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.raise_error": {"tf": 1}, "sqlglot.parser.Parser.expression": {"tf": 1}, "sqlglot.parser.Parser.validate_expression": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.Join.from_joins": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.find": {"tf": 1.4142135623730951}, "sqlglot.schema.MappingSchema.__init__": {"tf": 1.4142135623730951}, "sqlglot.schema.flatten_schema": {"tf": 1}, "sqlglot.time.format_time": {"tf": 1.4142135623730951}}, "df": 52}}}}, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.DataFrame.sql": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}}, "df": 2}}}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.GroupedData.__init__": {"tf": 1}}, "df": 2, "s": {"docs": {"sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.__init__": {"tf": 1}}, "df": 2}}}}}}}}}, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 1}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.optimizer.scope.Scope.__init__": {"tf": 1}}, "df": 1}}}}, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dataframe.sql.DataFrame.join": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.union": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.unionAll": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.unionByName": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.intersect": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.intersectAll": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.exceptAll": {"tf": 1}, "sqlglot.dataframe.sql.Column.binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.inverse_binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.like": {"tf": 1}, "sqlglot.dataframe.sql.Column.ilike": {"tf": 1}}, "df": 11}}}}, "n": {"docs": {"sqlglot.dataframe.sql.DataFrame.join": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.find": {"tf": 1}}, "df": 3, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.generator.Generator.__init__": {"tf": 1}, "sqlglot.parser.Parser.__init__": {"tf": 1}, "sqlglot.schema.Schema.column_names": {"tf": 1}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}}, "df": 4}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.DataFrameWriter.insertInto": {"tf": 1}}, "df": 1}}}}}}}}, "b": {"docs": {}, "df": 0, "j": {"docs": {"sqlglot.executor.env.reverse_key.__init__": {"tf": 1}, "sqlglot.expressions.Expression.load": {"tf": 1}, "sqlglot.helper.object_to_dict": {"tf": 1}, "sqlglot.serde.load": {"tf": 1}}, "df": 4}}, "r": {"docs": {"sqlglot.expressions.maybe_parse": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 1}, "sqlglot.generator.Generator.or_sql": {"tf": 1}}, "df": 3, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.generator.Generator.order_sql": {"tf": 1}}, "df": 1, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.generator.Generator.__init__": {"tf": 1}, "sqlglot.parser.Parser.__init__": {"tf": 1}}, "df": 2}}}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.generator.Generator.ordered_sql": {"tf": 1}}, "df": 1}}}}}}, "l": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.rename_table": {"tf": 1}, "sqlglot.optimizer.scope.Scope.replace": {"tf": 1}, "sqlglot.optimizer.scope.Scope.rename_source": {"tf": 1}}, "df": 3}}, "f": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.generator.Generator.__init__": {"tf": 1}, "sqlglot.generator.Generator.offset_sql": {"tf": 1}, "sqlglot.helper.apply_index_offset": {"tf": 1}, "sqlglot.parser.Parser.__init__": {"tf": 1}}, "df": 4}}}}}}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.parse": {"tf": 1}, "sqlglot.transpile": {"tf": 1}, "sqlglot.dataframe.sql.SparkSession.createDataFrame": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sql": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.join": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.dropDuplicates": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.dropna": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.replace": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.GroupedData.__init__": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_cols": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrameNaFunctions.drop": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.fill": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.replace": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Window.partitionBy": {"tf": 1}, "sqlglot.dataframe.sql.Window.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.partitionBy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.sql": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.parse": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.parse_into": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.transpile": {"tf": 1}, "sqlglot.diff.diff": {"tf": 1}, "sqlglot.diff.ChangeDistiller.diff": {"tf": 1}, "sqlglot.errors.ParseError.__init__": {"tf": 1}, "sqlglot.errors.merge_errors": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.generator.Generator.locate_properties": {"tf": 1}, "sqlglot.helper.subclasses": {"tf": 1}, "sqlglot.helper.apply_index_offset": {"tf": 1.4142135623730951}, "sqlglot.helper.tsort": {"tf": 1.4142135623730951}, "sqlglot.helper.split_num_words": {"tf": 1}, "sqlglot.lineage.Node.__init__": {"tf": 1}, "sqlglot.optimizer.scope.Scope.__init__": {"tf": 1}, "sqlglot.parser.Parser.parse": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.parse_into": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.expression": {"tf": 1}, "sqlglot.parser.Parser.validate_expression": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1}, "sqlglot.schema.Schema.column_names": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.table_parts": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}, "sqlglot.schema.ensure_column_mapping": {"tf": 1}, "sqlglot.schema.flatten_schema": {"tf": 1.7320508075688772}, "sqlglot.serde.dump": {"tf": 1.7320508075688772}, "sqlglot.serde.load": {"tf": 1.7320508075688772}, "sqlglot.tokens.Token.__init__": {"tf": 1}, "sqlglot.tokens.Tokenizer.tokenize": {"tf": 1}, "sqlglot.transforms.preprocess": {"tf": 1}}, "df": 53}}, "n": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.errors.ParseError.new": {"tf": 1}, "sqlglot.tokens.Token.__init__": {"tf": 1}}, "df": 2, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.lineage.Node.__init__": {"tf": 1}, "sqlglot.lineage.Node.walk": {"tf": 1}, "sqlglot.lineage.Node.to_html": {"tf": 1}, "sqlglot.lineage.lineage": {"tf": 1}, "sqlglot.lineage.LineageHTML.__init__": {"tf": 1}}, "df": 5, "h": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.lineage.Node.to_html": {"tf": 1}}, "df": 1}}}}}}}}}, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.Literal.number": {"tf": 1}, "sqlglot.expressions.Literal.string": {"tf": 1}, "sqlglot.expressions.to_interval": {"tf": 1}, "sqlglot.generator.Generator.literal_sql": {"tf": 1}}, "df": 4}}}}}, "k": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.generator.Generator.like_sql": {"tf": 1}}, "df": 1, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.generator.Generator.likeproperty_sql": {"tf": 1}}, "df": 1}}}}}}}}}}, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.generator.Generator.limit_sql": {"tf": 1}}, "df": 1}}}}, "e": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.transpile": {"tf": 1}, "sqlglot.generator.Generator.__init__": {"tf": 1}, "sqlglot.generator.Generator.indent": {"tf": 1}, "sqlglot.parser.Parser.__init__": {"tf": 1}, "sqlglot.planner.Step.to_s": {"tf": 1}}, "df": 5}}}, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.dataframe.sql.Column.substr": {"tf": 1}, "sqlglot.executor.env.substring": {"tf": 1}}, "df": 2}}}}, "f": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.union": {"tf": 1}, "sqlglot.expressions.intersect": {"tf": 1}, "sqlglot.expressions.except_": {"tf": 1}, "sqlglot.planner.SetOperation.__init__": {"tf": 1}}, "df": 4}}, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.generator.Generator.__init__": {"tf": 1}}, "df": 1}}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_ctes": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_derived_tables": {"tf": 1}}, "df": 3}}}}, "t": {"docs": {"sqlglot.dataframe.sql.SparkSession.createDataFrame": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.replace": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.repartition": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.__init__": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_col": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_cols": {"tf": 1}, "sqlglot.dataframe.sql.Column.invoke_anonymous_function": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.invoke_expression_over_column": {"tf": 1}, "sqlglot.dataframe.sql.Column.binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.inverse_binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.isin": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.between": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.over": {"tf": 1}, "sqlglot.dataframe.sql.Window.partitionBy": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Window.orderBy": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.WindowSpec.partitionBy": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.WindowSpec.orderBy": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.__init__": {"tf": 1}, "sqlglot.generator.Generator.lt_sql": {"tf": 1}, "sqlglot.helper.open_file": {"tf": 1}, "sqlglot.lineage.Node.__init__": {"tf": 1}, "sqlglot.lineage.lineage": {"tf": 1.7320508075688772}, "sqlglot.optimizer.optimizer.optimize": {"tf": 4.242640687119285}, "sqlglot.optimizer.scope.Scope.__init__": {"tf": 1}}, "df": 26, "e": {"docs": {"sqlglot.generator.Generator.lte_sql": {"tf": 1}}, "df": 1}}, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.__init__": {"tf": 1}, "sqlglot.generator.Generator.indent": {"tf": 1}}, "df": 3}}, "m": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.generator.Generator.lambda_sql": {"tf": 1}}, "df": 1}}}}, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.generator.Generator.lateral_sql": {"tf": 1}}, "df": 1, "s": {"docs": {"sqlglot.lineage.lineage": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}}, "df": 2}}}}}}}, "o": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.optimizer.optimizer.optimize": {"tf": 1}}, "df": 1, "b": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.dataframe.sql.Column.between": {"tf": 1}}, "df": 1}}}}}}}}, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.generator.Generator.locate_properties": {"tf": 1}}, "df": 1}}}}}, "k": {"docs": {"sqlglot.generator.Generator.lock_sql": {"tf": 1}}, "df": 1}}, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.generator.Generator.loaddata_sql": {"tf": 1}}, "df": 1}}}}}}}}, "e": {"docs": {"sqlglot.dialects.dialect.format_time_lambda": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.parse_date_delta": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.generate_date_delta_with_unit_sql": {"tf": 1}, "sqlglot.helper.apply_index_offset": {"tf": 1.4142135623730951}, "sqlglot.helper.while_changing": {"tf": 1.4142135623730951}}, "df": 5, "x": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}, "sqlglot.dialects.dialect.parse_date_delta": {"tf": 1}, "sqlglot.parser.Parser.expression": {"tf": 1}}, "df": 3, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.parse": {"tf": 1}, "sqlglot.parse_one": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.__init__": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.ensure_col": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_cols": {"tf": 1}, "sqlglot.dataframe.sql.Column.invoke_expression_over_column": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.__init__": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Generator.array_sql": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.in_unnest_op": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.except_op": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.intersect_op": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.cte_sql": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.format_time": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.Dialect.parse": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.parse_into": {"tf": 2}, "sqlglot.dialects.dialect.Dialect.generate": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.rename_func": {"tf": 1}, "sqlglot.dialects.dialect.approx_count_distinct_sql": {"tf": 1}, "sqlglot.dialects.dialect.if_sql": {"tf": 1}, "sqlglot.dialects.dialect.arrow_json_extract_sql": {"tf": 1}, "sqlglot.dialects.dialect.arrow_json_extract_scalar_sql": {"tf": 1}, "sqlglot.dialects.dialect.inline_array_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_ilike_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_paren_current_date_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_recursive_cte_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_safe_divide_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_tablesample_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_pivot_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_trycast_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_properties_sql": {"tf": 1}, "sqlglot.dialects.dialect.str_position_sql": {"tf": 1}, "sqlglot.dialects.dialect.struct_extract_sql": {"tf": 1}, "sqlglot.dialects.dialect.var_map_sql": {"tf": 1}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}, "sqlglot.dialects.dialect.locate_to_strposition": {"tf": 1}, "sqlglot.dialects.dialect.strposition_to_locate_sql": {"tf": 1}, "sqlglot.dialects.dialect.timestrtotime_sql": {"tf": 1}, "sqlglot.dialects.dialect.datestrtodate_sql": {"tf": 1}, "sqlglot.dialects.dialect.trim_sql": {"tf": 1}, "sqlglot.dialects.drill.if_sql": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator.show_sql": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator.setitem_sql": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator.set_sql": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator.query_modifiers": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator.offset_sql": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator.table_sql": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator.transaction_sql": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.renametable_sql": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.except_op": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.intersect_op": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.select_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.describe_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.generatedasidentitycolumnconstraint_sql": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator.cast_sql": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator.transaction_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.partitionedbyproperty_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.update_sql": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator.systemtime_sql": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator.returnsproperty_sql": {"tf": 1}, "sqlglot.diff.Insert.__init__": {"tf": 1.4142135623730951}, "sqlglot.diff.Remove.__init__": {"tf": 1.4142135623730951}, "sqlglot.diff.Move.__init__": {"tf": 1.4142135623730951}, "sqlglot.diff.Update.__init__": {"tf": 1.4142135623730951}, "sqlglot.diff.Keep.__init__": {"tf": 1.4142135623730951}, "sqlglot.diff.diff": {"tf": 1.4142135623730951}, "sqlglot.diff.ChangeDistiller.diff": {"tf": 1.4142135623730951}, "sqlglot.errors.ParseError.new": {"tf": 1}, "sqlglot.executor.execute": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.generate": {"tf": 1}, "sqlglot.expressions.Expression.find": {"tf": 1}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.expressions.Expression.find_ancestor": {"tf": 1}, "sqlglot.expressions.Expression.replace": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.Subqueryable.limit": {"tf": 1}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 2.23606797749979}, "sqlglot.expressions.condition": {"tf": 1}, "sqlglot.expressions.not_": {"tf": 1}, "sqlglot.expressions.paren": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1.4142135623730951}, "sqlglot.expressions.subquery": {"tf": 1}, "sqlglot.expressions.cast": {"tf": 1.4142135623730951}, "sqlglot.expressions.convert": {"tf": 1}, "sqlglot.expressions.replace_children": {"tf": 1}, "sqlglot.expressions.column_table_names": {"tf": 1}, "sqlglot.expressions.replace_tables": {"tf": 1}, "sqlglot.expressions.replace_placeholders": {"tf": 1}, "sqlglot.expressions.expand": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.generate": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.maybe_comment": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.wrap": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.uncache_sql": {"tf": 1}, "sqlglot.generator.Generator.cache_sql": {"tf": 1}, "sqlglot.generator.Generator.characterset_sql": {"tf": 1}, "sqlglot.generator.Generator.column_sql": {"tf": 1}, "sqlglot.generator.Generator.columndef_sql": {"tf": 1}, "sqlglot.generator.Generator.columnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.checkcolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.commentcolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.collatecolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.encodecolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.defaultcolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.generatedasidentitycolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.notnullcolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.primarykeycolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.create_sql": {"tf": 1}, "sqlglot.generator.Generator.describe_sql": {"tf": 1}, "sqlglot.generator.Generator.prepend_ctes": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.with_sql": {"tf": 1}, "sqlglot.generator.Generator.cte_sql": {"tf": 1}, "sqlglot.generator.Generator.tablealias_sql": {"tf": 1}, "sqlglot.generator.Generator.bitstring_sql": {"tf": 1}, "sqlglot.generator.Generator.hexstring_sql": {"tf": 1}, "sqlglot.generator.Generator.datatype_sql": {"tf": 1}, "sqlglot.generator.Generator.directory_sql": {"tf": 1}, "sqlglot.generator.Generator.delete_sql": {"tf": 1}, "sqlglot.generator.Generator.drop_sql": {"tf": 1}, "sqlglot.generator.Generator.except_sql": {"tf": 1}, "sqlglot.generator.Generator.except_op": {"tf": 1}, "sqlglot.generator.Generator.fetch_sql": {"tf": 1}, "sqlglot.generator.Generator.filter_sql": {"tf": 1}, "sqlglot.generator.Generator.hint_sql": {"tf": 1}, "sqlglot.generator.Generator.index_sql": {"tf": 1}, "sqlglot.generator.Generator.identifier_sql": {"tf": 1}, "sqlglot.generator.Generator.national_sql": {"tf": 1}, "sqlglot.generator.Generator.partition_sql": {"tf": 1}, "sqlglot.generator.Generator.properties_sql": {"tf": 1}, "sqlglot.generator.Generator.property_sql": {"tf": 1}, "sqlglot.generator.Generator.likeproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.fallbackproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.journalproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.freespaceproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.afterjournalproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.checksumproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.mergeblockratioproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.datablocksizeproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.blockcompressionproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.isolatedloadingproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.insert_sql": {"tf": 1}, "sqlglot.generator.Generator.intersect_sql": {"tf": 1}, "sqlglot.generator.Generator.intersect_op": {"tf": 1}, "sqlglot.generator.Generator.introducer_sql": {"tf": 1}, "sqlglot.generator.Generator.pseudotype_sql": {"tf": 1}, "sqlglot.generator.Generator.rowformatdelimitedproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.table_sql": {"tf": 1}, "sqlglot.generator.Generator.tablesample_sql": {"tf": 1}, "sqlglot.generator.Generator.pivot_sql": {"tf": 1}, "sqlglot.generator.Generator.tuple_sql": {"tf": 1}, "sqlglot.generator.Generator.update_sql": {"tf": 1}, "sqlglot.generator.Generator.values_sql": {"tf": 1}, "sqlglot.generator.Generator.var_sql": {"tf": 1}, "sqlglot.generator.Generator.into_sql": {"tf": 1}, "sqlglot.generator.Generator.from_sql": {"tf": 1}, "sqlglot.generator.Generator.group_sql": {"tf": 1}, "sqlglot.generator.Generator.having_sql": {"tf": 1}, "sqlglot.generator.Generator.join_sql": {"tf": 1}, "sqlglot.generator.Generator.lambda_sql": {"tf": 1}, "sqlglot.generator.Generator.lateral_sql": {"tf": 1}, "sqlglot.generator.Generator.limit_sql": {"tf": 1}, "sqlglot.generator.Generator.offset_sql": {"tf": 1}, "sqlglot.generator.Generator.lock_sql": {"tf": 1}, "sqlglot.generator.Generator.literal_sql": {"tf": 1}, "sqlglot.generator.Generator.loaddata_sql": {"tf": 1}, "sqlglot.generator.Generator.boolean_sql": {"tf": 1}, "sqlglot.generator.Generator.order_sql": {"tf": 1}, "sqlglot.generator.Generator.cluster_sql": {"tf": 1}, "sqlglot.generator.Generator.distribute_sql": {"tf": 1}, "sqlglot.generator.Generator.sort_sql": {"tf": 1}, "sqlglot.generator.Generator.ordered_sql": {"tf": 1}, "sqlglot.generator.Generator.matchrecognize_sql": {"tf": 1}, "sqlglot.generator.Generator.query_modifiers": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.select_sql": {"tf": 1}, "sqlglot.generator.Generator.schema_sql": {"tf": 1}, "sqlglot.generator.Generator.star_sql": {"tf": 1}, "sqlglot.generator.Generator.structkwarg_sql": {"tf": 1}, "sqlglot.generator.Generator.parameter_sql": {"tf": 1}, "sqlglot.generator.Generator.sessionparameter_sql": {"tf": 1}, "sqlglot.generator.Generator.placeholder_sql": {"tf": 1}, "sqlglot.generator.Generator.subquery_sql": {"tf": 1}, "sqlglot.generator.Generator.qualify_sql": {"tf": 1}, "sqlglot.generator.Generator.union_sql": {"tf": 1}, "sqlglot.generator.Generator.union_op": {"tf": 1}, "sqlglot.generator.Generator.unnest_sql": {"tf": 1}, "sqlglot.generator.Generator.where_sql": {"tf": 1}, "sqlglot.generator.Generator.window_sql": {"tf": 1}, "sqlglot.generator.Generator.partition_by_sql": {"tf": 1}, "sqlglot.generator.Generator.window_spec_sql": {"tf": 1}, "sqlglot.generator.Generator.withingroup_sql": {"tf": 1}, "sqlglot.generator.Generator.between_sql": {"tf": 1}, "sqlglot.generator.Generator.bracket_sql": {"tf": 1}, "sqlglot.generator.Generator.all_sql": {"tf": 1}, "sqlglot.generator.Generator.any_sql": {"tf": 1}, "sqlglot.generator.Generator.exists_sql": {"tf": 1}, "sqlglot.generator.Generator.case_sql": {"tf": 1}, "sqlglot.generator.Generator.constraint_sql": {"tf": 1}, "sqlglot.generator.Generator.extract_sql": {"tf": 1}, "sqlglot.generator.Generator.trim_sql": {"tf": 1}, "sqlglot.generator.Generator.concat_sql": {"tf": 1}, "sqlglot.generator.Generator.check_sql": {"tf": 1}, "sqlglot.generator.Generator.foreignkey_sql": {"tf": 1}, "sqlglot.generator.Generator.primarykey_sql": {"tf": 1}, "sqlglot.generator.Generator.unique_sql": {"tf": 1}, "sqlglot.generator.Generator.if_sql": {"tf": 1}, "sqlglot.generator.Generator.in_sql": {"tf": 1}, "sqlglot.generator.Generator.interval_sql": {"tf": 1}, "sqlglot.generator.Generator.return_sql": {"tf": 1}, "sqlglot.generator.Generator.reference_sql": {"tf": 1}, "sqlglot.generator.Generator.anonymous_sql": {"tf": 1}, "sqlglot.generator.Generator.paren_sql": {"tf": 1}, "sqlglot.generator.Generator.neg_sql": {"tf": 1}, "sqlglot.generator.Generator.not_sql": {"tf": 1}, "sqlglot.generator.Generator.alias_sql": {"tf": 1}, "sqlglot.generator.Generator.aliases_sql": {"tf": 1}, "sqlglot.generator.Generator.attimezone_sql": {"tf": 1}, "sqlglot.generator.Generator.add_sql": {"tf": 1}, "sqlglot.generator.Generator.and_sql": {"tf": 1}, "sqlglot.generator.Generator.connector_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwiseand_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwiseleftshift_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwisenot_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwiseor_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwiserightshift_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwisexor_sql": {"tf": 1}, "sqlglot.generator.Generator.cast_sql": {"tf": 1}, "sqlglot.generator.Generator.currentdate_sql": {"tf": 1}, "sqlglot.generator.Generator.collate_sql": {"tf": 1}, "sqlglot.generator.Generator.command_sql": {"tf": 1}, "sqlglot.generator.Generator.commit_sql": {"tf": 1}, "sqlglot.generator.Generator.rollback_sql": {"tf": 1}, "sqlglot.generator.Generator.altercolumn_sql": {"tf": 1}, "sqlglot.generator.Generator.renametable_sql": {"tf": 1}, "sqlglot.generator.Generator.altertable_sql": {"tf": 1}, "sqlglot.generator.Generator.droppartition_sql": {"tf": 1}, "sqlglot.generator.Generator.addconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.distinct_sql": {"tf": 1}, "sqlglot.generator.Generator.ignorenulls_sql": {"tf": 1}, "sqlglot.generator.Generator.respectnulls_sql": {"tf": 1}, "sqlglot.generator.Generator.intdiv_sql": {"tf": 1}, "sqlglot.generator.Generator.dpipe_sql": {"tf": 1}, "sqlglot.generator.Generator.div_sql": {"tf": 1}, "sqlglot.generator.Generator.distance_sql": {"tf": 1}, "sqlglot.generator.Generator.dot_sql": {"tf": 1}, "sqlglot.generator.Generator.eq_sql": {"tf": 1}, "sqlglot.generator.Generator.escape_sql": {"tf": 1}, "sqlglot.generator.Generator.glob_sql": {"tf": 1}, "sqlglot.generator.Generator.gt_sql": {"tf": 1}, "sqlglot.generator.Generator.gte_sql": {"tf": 1}, "sqlglot.generator.Generator.ilike_sql": {"tf": 1}, "sqlglot.generator.Generator.is_sql": {"tf": 1}, "sqlglot.generator.Generator.like_sql": {"tf": 1}, "sqlglot.generator.Generator.similarto_sql": {"tf": 1}, "sqlglot.generator.Generator.lt_sql": {"tf": 1}, "sqlglot.generator.Generator.lte_sql": {"tf": 1}, "sqlglot.generator.Generator.mod_sql": {"tf": 1}, "sqlglot.generator.Generator.mul_sql": {"tf": 1}, "sqlglot.generator.Generator.neq_sql": {"tf": 1}, "sqlglot.generator.Generator.nullsafeeq_sql": {"tf": 1}, "sqlglot.generator.Generator.nullsafeneq_sql": {"tf": 1}, "sqlglot.generator.Generator.or_sql": {"tf": 1}, "sqlglot.generator.Generator.slice_sql": {"tf": 1}, "sqlglot.generator.Generator.sub_sql": {"tf": 1}, "sqlglot.generator.Generator.trycast_sql": {"tf": 1}, "sqlglot.generator.Generator.use_sql": {"tf": 1}, "sqlglot.generator.Generator.binary": {"tf": 1}, "sqlglot.generator.Generator.function_fallback_sql": {"tf": 1}, "sqlglot.generator.Generator.format_args": {"tf": 1}, "sqlglot.generator.Generator.format_time": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.expressions": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.op_expressions": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.naked_property": {"tf": 1}, "sqlglot.generator.Generator.set_operation": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.tag_sql": {"tf": 1}, "sqlglot.generator.Generator.userdefinedfunction_sql": {"tf": 1}, "sqlglot.generator.Generator.userdefinedfunctionkwarg_sql": {"tf": 1}, "sqlglot.generator.Generator.joinhint_sql": {"tf": 1}, "sqlglot.generator.Generator.kwarg_sql": {"tf": 1}, "sqlglot.generator.Generator.when_sql": {"tf": 1}, "sqlglot.generator.Generator.merge_sql": {"tf": 1}, "sqlglot.helper.while_changing": {"tf": 1.7320508075688772}, "sqlglot.lineage.Node.__init__": {"tf": 1.7320508075688772}, "sqlglot.lineage.lineage": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.optimizer.annotate_types.TypeAnnotator.annotate": {"tf": 1}, "sqlglot.optimizer.canonicalize.canonicalize": {"tf": 1.7320508075688772}, "sqlglot.optimizer.canonicalize.add_text_to_concat": {"tf": 1.4142135623730951}, "sqlglot.optimizer.canonicalize.coerce_type": {"tf": 1.4142135623730951}, "sqlglot.optimizer.canonicalize.remove_redundant_casts": {"tf": 1.7320508075688772}, "sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 1}, "sqlglot.optimizer.expand_laterals.expand_laterals": {"tf": 1.7320508075688772}, "sqlglot.optimizer.expand_multi_table_selects.expand_multi_table_selects": {"tf": 1}, "sqlglot.optimizer.isolate_table_selects.isolate_table_selects": {"tf": 1}, "sqlglot.optimizer.lower_identities.lower_identities": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_ctes": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_derived_tables": {"tf": 1}, "sqlglot.optimizer.normalize.normalize": {"tf": 1}, "sqlglot.optimizer.normalize.normalized": {"tf": 1}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}, "sqlglot.optimizer.normalize.distributive_law": {"tf": 1}, "sqlglot.optimizer.optimize_joins.optimize_joins": {"tf": 1}, "sqlglot.optimizer.optimize_joins.reorder_joins": {"tf": 1}, "sqlglot.optimizer.optimize_joins.normalize": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 1}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 1}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1}, "sqlglot.optimizer.qualify_columns.validate_qualify_columns": {"tf": 1}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.__init__": {"tf": 1}, "sqlglot.optimizer.scope.Scope.branch": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}, "sqlglot.optimizer.scope.build_scope": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}, "sqlglot.optimizer.simplify.simplify": {"tf": 1}, "sqlglot.optimizer.simplify.rewrite_between": {"tf": 1.7320508075688772}, "sqlglot.optimizer.simplify.simplify_not": {"tf": 1}, "sqlglot.optimizer.simplify.flatten": {"tf": 1}, "sqlglot.optimizer.simplify.simplify_connectors": {"tf": 1}, "sqlglot.optimizer.simplify.remove_compliments": {"tf": 1}, "sqlglot.optimizer.simplify.uniq_sort": {"tf": 1}, "sqlglot.optimizer.simplify.absorb_and_eliminate": {"tf": 1}, "sqlglot.optimizer.simplify.simplify_literals": {"tf": 1}, "sqlglot.optimizer.simplify.simplify_parens": {"tf": 1}, "sqlglot.optimizer.simplify.remove_where_true": {"tf": 1}, "sqlglot.optimizer.simplify.always_true": {"tf": 1}, "sqlglot.optimizer.simplify.eval_boolean": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1}, "sqlglot.parser.Parser.parse": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 2}, "sqlglot.parser.Parser.expression": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.validate_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.Plan.__init__": {"tf": 1.4142135623730951}, "sqlglot.planner.Step.from_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.Scan.from_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.SetOperation.__init__": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1.4142135623730951}, "sqlglot.serde.dump": {"tf": 1}, "sqlglot.serde.load": {"tf": 1}, "sqlglot.transforms.unalias_group": {"tf": 1.7320508075688772}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1.7320508075688772}, "sqlglot.transforms.remove_precision_parameterized_types": {"tf": 1.7320508075688772}, "sqlglot.transforms.preprocess": {"tf": 2}}, "df": 358, "s": {"docs": {"sqlglot.parse": {"tf": 1}, "sqlglot.parse_one": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.__init__": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_col": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_cols": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.__init__": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.array_sql": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.in_unnest_op": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.except_op": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator.intersect_op": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.cte_sql": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.format_time": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialect.parse": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.parse_into": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.Dialect.generate": {"tf": 1}, "sqlglot.dialects.dialect.rename_func": {"tf": 1}, "sqlglot.dialects.dialect.approx_count_distinct_sql": {"tf": 1}, "sqlglot.dialects.dialect.if_sql": {"tf": 1}, "sqlglot.dialects.dialect.arrow_json_extract_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.arrow_json_extract_scalar_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.inline_array_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_ilike_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_paren_current_date_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_recursive_cte_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_safe_divide_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_tablesample_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_pivot_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_trycast_sql": {"tf": 1}, "sqlglot.dialects.dialect.no_properties_sql": {"tf": 1}, "sqlglot.dialects.dialect.str_position_sql": {"tf": 1}, "sqlglot.dialects.dialect.struct_extract_sql": {"tf": 1}, "sqlglot.dialects.dialect.var_map_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}, "sqlglot.dialects.dialect.locate_to_strposition": {"tf": 1}, "sqlglot.dialects.dialect.strposition_to_locate_sql": {"tf": 1}, "sqlglot.dialects.dialect.timestrtotime_sql": {"tf": 1}, "sqlglot.dialects.dialect.datestrtodate_sql": {"tf": 1}, "sqlglot.dialects.dialect.trim_sql": {"tf": 1}, "sqlglot.dialects.drill.if_sql": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.with_properties": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.renametable_sql": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.select_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.describe_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.generatedasidentitycolumnconstraint_sql": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator.cast_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.partitionedbyproperty_sql": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator.update_sql": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator.systemtime_sql": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator.returnsproperty_sql": {"tf": 1}, "sqlglot.diff.Insert.__init__": {"tf": 1}, "sqlglot.diff.Remove.__init__": {"tf": 1}, "sqlglot.diff.Move.__init__": {"tf": 1}, "sqlglot.diff.Update.__init__": {"tf": 1.4142135623730951}, "sqlglot.diff.Keep.__init__": {"tf": 1.4142135623730951}, "sqlglot.diff.diff": {"tf": 1.4142135623730951}, "sqlglot.diff.ChangeDistiller.diff": {"tf": 1.4142135623730951}, "sqlglot.executor.execute": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.generate_tuple": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.table": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Literal.number": {"tf": 1}, "sqlglot.expressions.Literal.string": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Properties.from_dict": {"tf": 1}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.Subqueryable.limit": {"tf": 1}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.group_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.order_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.sort_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.cluster_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.lateral": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.expressions.Select.where": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.having": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.window": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.distinct": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.Select.lock": {"tf": 1}, "sqlglot.expressions.DataType.build": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.is_type": {"tf": 1}, "sqlglot.expressions.Cast.is_type": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 2}, "sqlglot.expressions.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.from_": {"tf": 1.4142135623730951}, "sqlglot.expressions.update": {"tf": 1}, "sqlglot.expressions.delete": {"tf": 1}, "sqlglot.expressions.condition": {"tf": 1}, "sqlglot.expressions.and_": {"tf": 1.4142135623730951}, "sqlglot.expressions.or_": {"tf": 1.4142135623730951}, "sqlglot.expressions.not_": {"tf": 1}, "sqlglot.expressions.paren": {"tf": 1}, "sqlglot.expressions.to_interval": {"tf": 1.4142135623730951}, "sqlglot.expressions.to_table": {"tf": 1.4142135623730951}, "sqlglot.expressions.to_column": {"tf": 1.4142135623730951}, "sqlglot.expressions.alias_": {"tf": 1.7320508075688772}, "sqlglot.expressions.column": {"tf": 1}, "sqlglot.expressions.cast": {"tf": 2}, "sqlglot.expressions.table_": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1.4142135623730951}, "sqlglot.expressions.rename_table": {"tf": 1.7320508075688772}, "sqlglot.expressions.convert": {"tf": 1}, "sqlglot.expressions.expand": {"tf": 1.7320508075688772}, "sqlglot.expressions.func": {"tf": 1}, "sqlglot.generator.Generator.generate": {"tf": 1}, "sqlglot.generator.Generator.maybe_comment": {"tf": 1}, "sqlglot.generator.Generator.wrap": {"tf": 1}, "sqlglot.generator.Generator.sql": {"tf": 1}, "sqlglot.generator.Generator.uncache_sql": {"tf": 1}, "sqlglot.generator.Generator.cache_sql": {"tf": 1}, "sqlglot.generator.Generator.characterset_sql": {"tf": 1}, "sqlglot.generator.Generator.column_sql": {"tf": 1}, "sqlglot.generator.Generator.columndef_sql": {"tf": 1}, "sqlglot.generator.Generator.columnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.checkcolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.commentcolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.collatecolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.encodecolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.defaultcolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.generatedasidentitycolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.notnullcolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.primarykeycolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.create_sql": {"tf": 1}, "sqlglot.generator.Generator.describe_sql": {"tf": 1}, "sqlglot.generator.Generator.prepend_ctes": {"tf": 1}, "sqlglot.generator.Generator.with_sql": {"tf": 1}, "sqlglot.generator.Generator.cte_sql": {"tf": 1}, "sqlglot.generator.Generator.tablealias_sql": {"tf": 1}, "sqlglot.generator.Generator.bitstring_sql": {"tf": 1}, "sqlglot.generator.Generator.hexstring_sql": {"tf": 1}, "sqlglot.generator.Generator.datatype_sql": {"tf": 1}, "sqlglot.generator.Generator.directory_sql": {"tf": 1}, "sqlglot.generator.Generator.delete_sql": {"tf": 1}, "sqlglot.generator.Generator.drop_sql": {"tf": 1}, "sqlglot.generator.Generator.except_sql": {"tf": 1}, "sqlglot.generator.Generator.except_op": {"tf": 1}, "sqlglot.generator.Generator.fetch_sql": {"tf": 1}, "sqlglot.generator.Generator.filter_sql": {"tf": 1}, "sqlglot.generator.Generator.hint_sql": {"tf": 1}, "sqlglot.generator.Generator.index_sql": {"tf": 1}, "sqlglot.generator.Generator.identifier_sql": {"tf": 1}, "sqlglot.generator.Generator.national_sql": {"tf": 1}, "sqlglot.generator.Generator.partition_sql": {"tf": 1}, "sqlglot.generator.Generator.properties_sql": {"tf": 1}, "sqlglot.generator.Generator.root_properties": {"tf": 1}, "sqlglot.generator.Generator.properties": {"tf": 1}, "sqlglot.generator.Generator.with_properties": {"tf": 1}, "sqlglot.generator.Generator.locate_properties": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.property_sql": {"tf": 1}, "sqlglot.generator.Generator.likeproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.fallbackproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.journalproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.freespaceproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.afterjournalproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.checksumproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.mergeblockratioproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.datablocksizeproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.blockcompressionproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.isolatedloadingproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.insert_sql": {"tf": 1}, "sqlglot.generator.Generator.intersect_sql": {"tf": 1}, "sqlglot.generator.Generator.intersect_op": {"tf": 1}, "sqlglot.generator.Generator.introducer_sql": {"tf": 1}, "sqlglot.generator.Generator.pseudotype_sql": {"tf": 1}, "sqlglot.generator.Generator.rowformatdelimitedproperty_sql": {"tf": 1}, "sqlglot.generator.Generator.table_sql": {"tf": 1}, "sqlglot.generator.Generator.tablesample_sql": {"tf": 1}, "sqlglot.generator.Generator.pivot_sql": {"tf": 1}, "sqlglot.generator.Generator.tuple_sql": {"tf": 1}, "sqlglot.generator.Generator.update_sql": {"tf": 1}, "sqlglot.generator.Generator.values_sql": {"tf": 1}, "sqlglot.generator.Generator.var_sql": {"tf": 1}, "sqlglot.generator.Generator.into_sql": {"tf": 1}, "sqlglot.generator.Generator.from_sql": {"tf": 1}, "sqlglot.generator.Generator.group_sql": {"tf": 1}, "sqlglot.generator.Generator.having_sql": {"tf": 1}, "sqlglot.generator.Generator.join_sql": {"tf": 1}, "sqlglot.generator.Generator.lambda_sql": {"tf": 1}, "sqlglot.generator.Generator.lateral_sql": {"tf": 1}, "sqlglot.generator.Generator.limit_sql": {"tf": 1}, "sqlglot.generator.Generator.offset_sql": {"tf": 1}, "sqlglot.generator.Generator.lock_sql": {"tf": 1}, "sqlglot.generator.Generator.literal_sql": {"tf": 1}, "sqlglot.generator.Generator.loaddata_sql": {"tf": 1}, "sqlglot.generator.Generator.boolean_sql": {"tf": 1}, "sqlglot.generator.Generator.order_sql": {"tf": 1}, "sqlglot.generator.Generator.cluster_sql": {"tf": 1}, "sqlglot.generator.Generator.distribute_sql": {"tf": 1}, "sqlglot.generator.Generator.sort_sql": {"tf": 1}, "sqlglot.generator.Generator.ordered_sql": {"tf": 1}, "sqlglot.generator.Generator.matchrecognize_sql": {"tf": 1}, "sqlglot.generator.Generator.query_modifiers": {"tf": 1}, "sqlglot.generator.Generator.select_sql": {"tf": 1}, "sqlglot.generator.Generator.schema_sql": {"tf": 1}, "sqlglot.generator.Generator.star_sql": {"tf": 1}, "sqlglot.generator.Generator.structkwarg_sql": {"tf": 1}, "sqlglot.generator.Generator.parameter_sql": {"tf": 1}, "sqlglot.generator.Generator.sessionparameter_sql": {"tf": 1}, "sqlglot.generator.Generator.placeholder_sql": {"tf": 1}, "sqlglot.generator.Generator.subquery_sql": {"tf": 1}, "sqlglot.generator.Generator.qualify_sql": {"tf": 1}, "sqlglot.generator.Generator.union_sql": {"tf": 1}, "sqlglot.generator.Generator.union_op": {"tf": 1}, "sqlglot.generator.Generator.unnest_sql": {"tf": 1}, "sqlglot.generator.Generator.where_sql": {"tf": 1}, "sqlglot.generator.Generator.window_sql": {"tf": 1}, "sqlglot.generator.Generator.partition_by_sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.window_spec_sql": {"tf": 1}, "sqlglot.generator.Generator.withingroup_sql": {"tf": 1}, "sqlglot.generator.Generator.between_sql": {"tf": 1}, "sqlglot.generator.Generator.bracket_sql": {"tf": 1}, "sqlglot.generator.Generator.all_sql": {"tf": 1}, "sqlglot.generator.Generator.any_sql": {"tf": 1}, "sqlglot.generator.Generator.exists_sql": {"tf": 1}, "sqlglot.generator.Generator.case_sql": {"tf": 1}, "sqlglot.generator.Generator.constraint_sql": {"tf": 1}, "sqlglot.generator.Generator.extract_sql": {"tf": 1}, "sqlglot.generator.Generator.trim_sql": {"tf": 1}, "sqlglot.generator.Generator.concat_sql": {"tf": 1}, "sqlglot.generator.Generator.check_sql": {"tf": 1}, "sqlglot.generator.Generator.foreignkey_sql": {"tf": 1}, "sqlglot.generator.Generator.primarykey_sql": {"tf": 1}, "sqlglot.generator.Generator.unique_sql": {"tf": 1}, "sqlglot.generator.Generator.if_sql": {"tf": 1}, "sqlglot.generator.Generator.in_sql": {"tf": 1}, "sqlglot.generator.Generator.in_unnest_op": {"tf": 1}, "sqlglot.generator.Generator.interval_sql": {"tf": 1}, "sqlglot.generator.Generator.return_sql": {"tf": 1}, "sqlglot.generator.Generator.reference_sql": {"tf": 1}, "sqlglot.generator.Generator.anonymous_sql": {"tf": 1}, "sqlglot.generator.Generator.paren_sql": {"tf": 1}, "sqlglot.generator.Generator.neg_sql": {"tf": 1}, "sqlglot.generator.Generator.not_sql": {"tf": 1}, "sqlglot.generator.Generator.alias_sql": {"tf": 1}, "sqlglot.generator.Generator.aliases_sql": {"tf": 1}, "sqlglot.generator.Generator.attimezone_sql": {"tf": 1}, "sqlglot.generator.Generator.add_sql": {"tf": 1}, "sqlglot.generator.Generator.and_sql": {"tf": 1}, "sqlglot.generator.Generator.connector_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwiseand_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwiseleftshift_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwisenot_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwiseor_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwiserightshift_sql": {"tf": 1}, "sqlglot.generator.Generator.bitwisexor_sql": {"tf": 1}, "sqlglot.generator.Generator.cast_sql": {"tf": 1}, "sqlglot.generator.Generator.currentdate_sql": {"tf": 1}, "sqlglot.generator.Generator.collate_sql": {"tf": 1}, "sqlglot.generator.Generator.command_sql": {"tf": 1}, "sqlglot.generator.Generator.commit_sql": {"tf": 1}, "sqlglot.generator.Generator.rollback_sql": {"tf": 1}, "sqlglot.generator.Generator.altercolumn_sql": {"tf": 1}, "sqlglot.generator.Generator.renametable_sql": {"tf": 1}, "sqlglot.generator.Generator.altertable_sql": {"tf": 1}, "sqlglot.generator.Generator.droppartition_sql": {"tf": 1}, "sqlglot.generator.Generator.addconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.distinct_sql": {"tf": 1}, "sqlglot.generator.Generator.ignorenulls_sql": {"tf": 1}, "sqlglot.generator.Generator.respectnulls_sql": {"tf": 1}, "sqlglot.generator.Generator.intdiv_sql": {"tf": 1}, "sqlglot.generator.Generator.dpipe_sql": {"tf": 1}, "sqlglot.generator.Generator.div_sql": {"tf": 1}, "sqlglot.generator.Generator.distance_sql": {"tf": 1}, "sqlglot.generator.Generator.dot_sql": {"tf": 1}, "sqlglot.generator.Generator.eq_sql": {"tf": 1}, "sqlglot.generator.Generator.escape_sql": {"tf": 1}, "sqlglot.generator.Generator.glob_sql": {"tf": 1}, "sqlglot.generator.Generator.gt_sql": {"tf": 1}, "sqlglot.generator.Generator.gte_sql": {"tf": 1}, "sqlglot.generator.Generator.ilike_sql": {"tf": 1}, "sqlglot.generator.Generator.is_sql": {"tf": 1}, "sqlglot.generator.Generator.like_sql": {"tf": 1}, "sqlglot.generator.Generator.similarto_sql": {"tf": 1}, "sqlglot.generator.Generator.lt_sql": {"tf": 1}, "sqlglot.generator.Generator.lte_sql": {"tf": 1}, "sqlglot.generator.Generator.mod_sql": {"tf": 1}, "sqlglot.generator.Generator.mul_sql": {"tf": 1}, "sqlglot.generator.Generator.neq_sql": {"tf": 1}, "sqlglot.generator.Generator.nullsafeeq_sql": {"tf": 1}, "sqlglot.generator.Generator.nullsafeneq_sql": {"tf": 1}, "sqlglot.generator.Generator.or_sql": {"tf": 1}, "sqlglot.generator.Generator.slice_sql": {"tf": 1}, "sqlglot.generator.Generator.sub_sql": {"tf": 1}, "sqlglot.generator.Generator.trycast_sql": {"tf": 1}, "sqlglot.generator.Generator.use_sql": {"tf": 1}, "sqlglot.generator.Generator.binary": {"tf": 1}, "sqlglot.generator.Generator.function_fallback_sql": {"tf": 1}, "sqlglot.generator.Generator.format_args": {"tf": 1}, "sqlglot.generator.Generator.format_time": {"tf": 1}, "sqlglot.generator.Generator.expressions": {"tf": 1}, "sqlglot.generator.Generator.op_expressions": {"tf": 1}, "sqlglot.generator.Generator.naked_property": {"tf": 1}, "sqlglot.generator.Generator.set_operation": {"tf": 1}, "sqlglot.generator.Generator.tag_sql": {"tf": 1}, "sqlglot.generator.Generator.userdefinedfunction_sql": {"tf": 1}, "sqlglot.generator.Generator.userdefinedfunctionkwarg_sql": {"tf": 1}, "sqlglot.generator.Generator.joinhint_sql": {"tf": 1}, "sqlglot.generator.Generator.kwarg_sql": {"tf": 1}, "sqlglot.generator.Generator.when_sql": {"tf": 1}, "sqlglot.generator.Generator.merge_sql": {"tf": 1}, "sqlglot.helper.apply_index_offset": {"tf": 1}, "sqlglot.helper.while_changing": {"tf": 1.4142135623730951}, "sqlglot.helper.csv_reader": {"tf": 1}, "sqlglot.lineage.Node.__init__": {"tf": 1.4142135623730951}, "sqlglot.lineage.lineage": {"tf": 1.7320508075688772}, "sqlglot.optimizer.canonicalize.canonicalize": {"tf": 1.4142135623730951}, "sqlglot.optimizer.canonicalize.add_text_to_concat": {"tf": 1.4142135623730951}, "sqlglot.optimizer.canonicalize.coerce_type": {"tf": 1.4142135623730951}, "sqlglot.optimizer.canonicalize.remove_redundant_casts": {"tf": 1.4142135623730951}, "sqlglot.optimizer.expand_laterals.expand_laterals": {"tf": 1.4142135623730951}, "sqlglot.optimizer.simplify.rewrite_between": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.parse": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1.7320508075688772}, "sqlglot.parser.Parser.expression": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.validate_expression": {"tf": 1}, "sqlglot.planner.Plan.__init__": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.Join.from_joins": {"tf": 1}, "sqlglot.planner.SetOperation.__init__": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1}, "sqlglot.schema.Schema.column_names": {"tf": 1}, "sqlglot.schema.Schema.get_column_type": {"tf": 1.7320508075688772}, "sqlglot.schema.AbstractMappingSchema.table_parts": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.find": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1.7320508075688772}, "sqlglot.serde.dump": {"tf": 1.4142135623730951}, "sqlglot.serde.load": {"tf": 1.4142135623730951}, "sqlglot.transforms.unalias_group": {"tf": 1.4142135623730951}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1.4142135623730951}, "sqlglot.transforms.remove_precision_parameterized_types": {"tf": 1.4142135623730951}, "sqlglot.transforms.preprocess": {"tf": 2}}, "df": 345}}}}}}}, "s": {"docs": {"sqlglot.dataframe.sql.DataFrame.agg": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.agg": {"tf": 1}}, "df": 2}}, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.lineage.lineage": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1.4142135623730951}}, "df": 2}}}}, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.dataframe.sql.DataFrame.withColumnRenamed": {"tf": 1}}, "df": 1}}}, "s": {"docs": {"sqlglot.generator.Generator.exists_sql": {"tf": 1}}, "df": 1}}}}, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator.except_op": {"tf": 1}, "sqlglot.generator.Generator.except_sql": {"tf": 1}, "sqlglot.generator.Generator.except_op": {"tf": 1}}, "df": 3}}}, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.helper.subclasses": {"tf": 1}, "sqlglot.optimizer.optimize_joins.other_table_names": {"tf": 1}}, "df": 2}}}}}, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.executor.execute": {"tf": 1}, "sqlglot.executor.context.Context.__init__": {"tf": 1}, "sqlglot.executor.context.Context.table_iter": {"tf": 1.4142135623730951}, "sqlglot.executor.table.ensure_tables": {"tf": 1}}, "df": 4}}}}}}, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.generator.Generator.extract_sql": {"tf": 1}}, "df": 1}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.optimizer.unnest_subqueries.decorrelate": {"tf": 1}}, "df": 1}}}}}}}, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.transpile": {"tf": 1}, "sqlglot.parser.Parser.__init__": {"tf": 1.4142135623730951}}, "df": 2, "s": {"docs": {"sqlglot.transpile": {"tf": 1}, "sqlglot.errors.ParseError.__init__": {"tf": 1}, "sqlglot.errors.ParseError.new": {"tf": 1}, "sqlglot.errors.concat_messages": {"tf": 1}, "sqlglot.errors.merge_errors": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.__init__": {"tf": 1.4142135623730951}}, "df": 6}, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.transpile": {"tf": 1}, "sqlglot.generator.Generator.__init__": {"tf": 1}, "sqlglot.parser.Parser.__init__": {"tf": 1}}, "df": 3}}}}}}}}}, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.dataframe.sql.Window.rowsBetween": {"tf": 1}, "sqlglot.dataframe.sql.Window.rangeBetween": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.rowsBetween": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.rangeBetween": {"tf": 1}, "sqlglot.errors.ParseError.new": {"tf": 1}, "sqlglot.executor.context.Context.set_range": {"tf": 1}, "sqlglot.generator.Generator.__init__": {"tf": 1.4142135623730951}}, "df": 7}, "v": {"docs": {"sqlglot.executor.context.Context.__init__": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.__init__": {"tf": 1}}, "df": 2}, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.generator.Generator.encodecolumnconstraint_sql": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}}}}, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.executor.env.filter_nulls": {"tf": 1}}, "df": 1}}}}, "s": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.generator.Generator.__init__": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.escape_sql": {"tf": 1}}, "df": 2}}}}}, "q": {"docs": {"sqlglot.generator.Generator.eq_sql": {"tf": 1}}, "df": 1}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.optimizer.optimize": {"tf": 1.7320508075688772}}, "df": 1}}}}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.generator.Generator.in_sql": {"tf": 1}}, "df": 1, "t": {"docs": {"sqlglot.dataframe.sql.DataFrame.dropna": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.replace": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.limit": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.hint": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.repartition": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.coalesce": {"tf": 1}, "sqlglot.dataframe.sql.Column.substr": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrameNaFunctions.drop": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.fill": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.replace": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Window.rowsBetween": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Window.rangeBetween": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.WindowSpec.rowsBetween": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.WindowSpec.rangeBetween": {"tf": 1.4142135623730951}, "sqlglot.errors.ParseError.new": {"tf": 1.4142135623730951}, "sqlglot.errors.concat_messages": {"tf": 1}, "sqlglot.executor.context.Context.set_index": {"tf": 1}, "sqlglot.executor.context.Context.set_range": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.indent": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.text_width": {"tf": 1}, "sqlglot.helper.seq_get": {"tf": 1}, "sqlglot.helper.apply_index_offset": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1}, "sqlglot.helper.count_params": {"tf": 1}, "sqlglot.helper.dict_depth": {"tf": 1}, "sqlglot.parser.Parser.__init__": {"tf": 1.7320508075688772}, "sqlglot.planner.Step.to_s": {"tf": 1}, "sqlglot.schema.flatten_schema": {"tf": 1}, "sqlglot.serde.dump": {"tf": 1.4142135623730951}, "sqlglot.serde.load": {"tf": 1.4142135623730951}, "sqlglot.tokens.Token.__init__": {"tf": 1.4142135623730951}, "sqlglot.tokens.Token.number": {"tf": 1}, "sqlglot.trie.in_trie": {"tf": 1}}, "df": 33, "o": {"docs": {"sqlglot.parse_one": {"tf": 1}, "sqlglot.errors.ParseError.new": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1}, "sqlglot.generator.Generator.into_sql": {"tf": 1}}, "df": 4}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator.intersect_op": {"tf": 1}, "sqlglot.generator.Generator.intersect_sql": {"tf": 1}, "sqlglot.generator.Generator.intersect_op": {"tf": 1}}, "df": 3}}}}, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.to_interval": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.interval_sql": {"tf": 1}, "sqlglot.optimizer.simplify.extract_interval": {"tf": 1}}, "df": 3}}}}}, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.generator.Generator.introducer_sql": {"tf": 1}}, "df": 1}}}}}}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {"sqlglot.generator.Generator.intdiv_sql": {"tf": 1}}, "df": 1}}}}, "i": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 1}}, "df": 1}}, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dataframe.sql.DataFrame.join": {"tf": 1}}, "df": 1}}}, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff.diff": {"tf": 1}, "sqlglot.diff.ChangeDistiller.diff": {"tf": 1}, "sqlglot.generator.Generator.insert_sql": {"tf": 1}}, "df": 3}}}}, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "x": {"docs": {"sqlglot.executor.context.Context.set_index": {"tf": 1}, "sqlglot.generator.Generator.__init__": {"tf": 1}, "sqlglot.generator.Generator.index_sql": {"tf": 1}, "sqlglot.helper.seq_get": {"tf": 1}, "sqlglot.parser.Parser.__init__": {"tf": 1}}, "df": 5}, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.generator.Generator.__init__": {"tf": 1}, "sqlglot.generator.Generator.expressions": {"tf": 1}}, "df": 2}}}}}, "d": {"docs": {"sqlglot.dataframe.sql.SparkSession.createDataFrame": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 2}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.replace": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.repartition": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.__init__": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_col": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_cols": {"tf": 1}, "sqlglot.dataframe.sql.Column.invoke_anonymous_function": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.invoke_expression_over_column": {"tf": 1}, "sqlglot.dataframe.sql.Column.binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.inverse_binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.isin": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.between": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.over": {"tf": 1}, "sqlglot.dataframe.sql.Window.partitionBy": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Window.orderBy": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.WindowSpec.partitionBy": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.WindowSpec.orderBy": {"tf": 1.4142135623730951}}, "df": 19, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.transpile": {"tf": 1}}, "df": 1}, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.optimizer.optimizer.optimize": {"tf": 1}}, "df": 1}}}}, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.alias_": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.__init__": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.identifier_sql": {"tf": 1}, "sqlglot.tokens.Token.identifier": {"tf": 1}}, "df": 4}}}, "y": {"docs": {"sqlglot.generator.Generator.__init__": {"tf": 1}}, "df": 1}}}}}}}, "t": {"docs": {"sqlglot.helper.first": {"tf": 1}}, "df": 1, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.Column.isin": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.text_width": {"tf": 1}, "sqlglot.helper.flatten": {"tf": 1.4142135623730951}, "sqlglot.helper.first": {"tf": 1}, "sqlglot.planner.Join.from_joins": {"tf": 1}, "sqlglot.trie.new_trie": {"tf": 1}}, "df": 7}}}, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.executor.context.Context.table_iter": {"tf": 1}, "sqlglot.helper.flatten": {"tf": 1}, "sqlglot.lineage.Node.walk": {"tf": 1}}, "df": 3}}}}}}}, "f": {"docs": {"sqlglot.dialects.dialect.if_sql": {"tf": 1}, "sqlglot.dialects.drill.if_sql": {"tf": 1}, "sqlglot.generator.Generator.if_sql": {"tf": 1}}, "df": 3}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.no_ilike_sql": {"tf": 1}, "sqlglot.generator.Generator.ilike_sql": {"tf": 1}}, "df": 2}}}}, "s": {"docs": {"sqlglot.generator.Generator.is_sql": {"tf": 1}}, "df": 1, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.optimizer.optimize": {"tf": 1}}, "df": 1, "d": {"docs": {"sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_ctes": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_derived_tables": {"tf": 1}}, "df": 3, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.generator.Generator.isolatedloadingproperty_sql": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}}}}}, "g": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.generator.Generator.ignorenulls_sql": {"tf": 1}}, "df": 1}}}}}}}}}}, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.lineage.LineageHTML.__init__": {"tf": 1}}, "df": 1}}}}}}}, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dataframe.sql.DataFrame.withColumn": {"tf": 1}, "sqlglot.errors.ParseError.new": {"tf": 1}, "sqlglot.expressions.column": {"tf": 1}, "sqlglot.tokens.Token.__init__": {"tf": 1}}, "df": 4, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.parse_one": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.replace": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.parse_into": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1}, "sqlglot.helper.find_new_name": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}}, "df": 6}}}}}}, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.generator.Generator.collate_sql": {"tf": 1}}, "df": 1, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.generator.Generator.collatecolumnconstraint_sql": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}}}, "s": {"docs": {"sqlglot.dataframe.sql.DataFrame.select": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.groupBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.drop": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.repartition": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.__init__": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.mean": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.avg": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.max": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.min": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.sum": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.pivot": {"tf": 1}, "sqlglot.dataframe.sql.Column.isin": {"tf": 1}, "sqlglot.dataframe.sql.Window.partitionBy": {"tf": 1}, "sqlglot.dataframe.sql.Window.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.partitionBy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.orderBy": {"tf": 1}}, "df": 18}, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe.sql.DataFrame.where": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.filter": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.join": {"tf": 2}, "sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.withColumn": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.drop": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.GroupedData.__init__": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.GroupedData.agg": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.ensure_cols": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.invoke_anonymous_function": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.invoke_expression_over_column": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.binary_op": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.inverse_binary_op": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.unary_op": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.ensure_literal": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.copy": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.set_table_name": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.alias": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.asc": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.desc": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.asc_nulls_first": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.asc_nulls_last": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.desc_nulls_first": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.desc_nulls_last": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.when": {"tf": 2}, "sqlglot.dataframe.sql.Column.otherwise": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.isNull": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.isNotNull": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.startswith": {"tf": 2}, "sqlglot.dataframe.sql.Column.endswith": {"tf": 2}, "sqlglot.dataframe.sql.Column.rlike": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.substr": {"tf": 2.449489742783178}, "sqlglot.dataframe.sql.Column.between": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.over": {"tf": 1.4142135623730951}, "sqlglot.executor.table.Table.__init__": {"tf": 1}, "sqlglot.executor.table.RowReader.__init__": {"tf": 1}, "sqlglot.expressions.to_column": {"tf": 1.4142135623730951}, "sqlglot.expressions.column": {"tf": 1}, "sqlglot.generator.Generator.__init__": {"tf": 1}, "sqlglot.generator.Generator.column_sql": {"tf": 1}, "sqlglot.lineage.lineage": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.__init__": {"tf": 1}, "sqlglot.parser.Parser.__init__": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1}, "sqlglot.schema.Schema.get_column_type": {"tf": 1.4142135623730951}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1.4142135623730951}}, "df": 48, "s": {"docs": {"sqlglot.executor.context.Context.add_columns": {"tf": 1}, "sqlglot.executor.table.Table.__init__": {"tf": 1}, "sqlglot.executor.table.Table.add_columns": {"tf": 1}, "sqlglot.executor.table.RowReader.__init__": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1}, "sqlglot.lineage.lineage": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1.4142135623730951}, "sqlglot.optimizer.unnest_subqueries.decorrelate": {"tf": 1}}, "df": 8}, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.generator.Generator.columndef_sql": {"tf": 1}}, "df": 1}}}, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.generator.Generator.columnconstraint_sql": {"tf": 1}}, "df": 1}}}}}}}}}}}}}, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.DataFrame.withColumn": {"tf": 1}}, "df": 1}}}}}, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 1}}, "df": 1}}}}}, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.errors.ParseError.new": {"tf": 1.4142135623730951}, "sqlglot.executor.context.Context.table_iter": {"tf": 1.4142135623730951}, "sqlglot.executor.python.PythonExecutor.scan": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.join": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.nested_loop_join": {"tf": 1.4142135623730951}, "sqlglot.executor.python.PythonExecutor.hash_join": {"tf": 1.4142135623730951}, "sqlglot.executor.python.PythonExecutor.aggregate": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.sort": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.set_operation": {"tf": 1}, "sqlglot.parser.Parser.__init__": {"tf": 1}}, "df": 10}}}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe.sql.Column.when": {"tf": 1}, "sqlglot.executor.context.Context.filter": {"tf": 1}, "sqlglot.expressions.condition": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown": {"tf": 1}, "sqlglot.optimizer.simplify.boolean_literal": {"tf": 1}}, "df": 5}}}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.generator.Generator.constraint_sql": {"tf": 1}}, "df": 1}}}}}}}, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.generator.Generator.concat_sql": {"tf": 1}}, "df": 1}}}, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.generator.Generator.connector_sql": {"tf": 1}}, "df": 1}}}}}}}, "p": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dataframe.sql.Column.set_table_name": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.Subqueryable.limit": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.Select.window": {"tf": 1}, "sqlglot.expressions.Select.distinct": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.Select.lock": {"tf": 1}, "sqlglot.expressions.expand": {"tf": 1}}, "df": 25}}, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor.context.Context.eval": {"tf": 1}}, "df": 1, "s": {"docs": {"sqlglot.executor.context.Context.eval_tuple": {"tf": 1}}, "df": 1}}}, "m": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.generator.Generator.__init__": {"tf": 1}}, "df": 1, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.generator.Generator.command_sql": {"tf": 1}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.generator.Generator.pad_comment": {"tf": 1}, "sqlglot.generator.Generator.sql": {"tf": 1}}, "df": 2, "s": {"docs": {"sqlglot.generator.Generator.__init__": {"tf": 1}, "sqlglot.parser.Parser.expression": {"tf": 1}, "sqlglot.tokens.Token.__init__": {"tf": 1}}, "df": 3}, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.generator.Generator.commentcolumnconstraint_sql": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}}, "i": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.generator.Generator.commit_sql": {"tf": 1}}, "df": 1}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.optimizer.annotate_types.TypeAnnotator.__init__": {"tf": 1}}, "df": 2}}}}}, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.optimizer.pushdown_predicates.pushdown": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_cnf": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_dnf": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.nodes_for_predicate": {"tf": 1}}, "df": 4}}}}, "l": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dataframe.sql.Column.ensure_col": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_cols": {"tf": 1}, "sqlglot.dataframe.sql.Column.invoke_anonymous_function": {"tf": 1}, "sqlglot.dataframe.sql.Column.invoke_expression_over_column": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_literal": {"tf": 1}, "sqlglot.dataframe.sql.Window.partitionBy": {"tf": 1}, "sqlglot.dataframe.sql.Window.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.Window.rowsBetween": {"tf": 1}, "sqlglot.dataframe.sql.Window.rangeBetween": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.get_or_raise": {"tf": 1}, "sqlglot.dialects.dialect.Dialect.format_time": {"tf": 1}, "sqlglot.errors.ParseError.new": {"tf": 1}, "sqlglot.expressions.Expression.load": {"tf": 1}, "sqlglot.expressions.Literal.number": {"tf": 1}, "sqlglot.expressions.Literal.string": {"tf": 1}, "sqlglot.expressions.Properties.from_dict": {"tf": 1}, "sqlglot.expressions.DataType.build": {"tf": 1}, "sqlglot.expressions.Func.from_arg_list": {"tf": 1}, "sqlglot.expressions.Func.sql_names": {"tf": 1}, "sqlglot.expressions.Func.sql_name": {"tf": 1}, "sqlglot.expressions.Func.default_parser_mappings": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.Join.from_joins": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}, "sqlglot.schema.MappingSchema.from_mapping_schema": {"tf": 1}, "sqlglot.tokens.Token.number": {"tf": 1}, "sqlglot.tokens.Token.string": {"tf": 1}, "sqlglot.tokens.Token.identifier": {"tf": 1}, "sqlglot.tokens.Token.var": {"tf": 1}}, "df": 30}, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}, "sqlglot.dialects.dialect.parse_date_delta": {"tf": 1}, "sqlglot.helper.open_file": {"tf": 1}, "sqlglot.parser.Parser.expression": {"tf": 1}}, "df": 4, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.helper.subclasses": {"tf": 1}}, "df": 1}}}}}, "u": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.generator.Generator.cluster_sql": {"tf": 1}}, "df": 1}}}}}}, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.Column.invoke_expression_over_column": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.inverse_binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.unary_op": {"tf": 1}, "sqlglot.dialects.dialect.rename_func": {"tf": 1}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}, "sqlglot.dialects.dialect.parse_date_delta": {"tf": 1}, "sqlglot.generator.Generator.no_identify": {"tf": 1}, "sqlglot.helper.while_changing": {"tf": 1}, "sqlglot.helper.count_params": {"tf": 1}, "sqlglot.lineage.lineage": {"tf": 1}, "sqlglot.transforms.preprocess": {"tf": 1.7320508075688772}, "sqlglot.transforms.delegate": {"tf": 1}}, "df": 13}}}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.spark.Spark.Generator.cast_sql": {"tf": 1}, "sqlglot.expressions.cast": {"tf": 1}, "sqlglot.generator.Generator.cast_sql": {"tf": 1}, "sqlglot.optimizer.simplify.extract_date": {"tf": 1}}, "df": 4}, "e": {"docs": {"sqlglot.generator.Generator.case_sql": {"tf": 1}}, "df": 1}}, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.expressions.table_": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1}}, "df": 3}}}}}, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.generator.Generator.cache_sql": {"tf": 1}}, "df": 1}}}, "n": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.optimizer.optimize": {"tf": 1}}, "df": 1}}}}}}}}}}}, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.clickhouse.ClickHouse.Generator.cte_sql": {"tf": 1}, "sqlglot.generator.Generator.cte_sql": {"tf": 1}}, "df": 2, "s": {"docs": {"sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.Join.from_joins": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}}, "df": 5}}}, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.no_paren_current_date_sql": {"tf": 1}, "sqlglot.generator.Generator.currentdate_sql": {"tf": 1}}, "df": 2}}}}}}}}}}, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.generator.Generator.create_sql": {"tf": 1}}, "df": 3}}}}}, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.generator.Generator.characterset_sql": {"tf": 1}}, "df": 1}}}}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.optimizer.scope.Scope.branch": {"tf": 1}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.generator.Generator.check_sql": {"tf": 1}}, "df": 1, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.generator.Generator.checkcolumnconstraint_sql": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}, "s": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.generator.Generator.checksumproperty_sql": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}, "s": {"docs": {}, "df": 0, "v": {"docs": {"sqlglot.helper.csv_reader": {"tf": 1}}, "df": 1}}}, "w": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.transpile": {"tf": 1}}, "df": 1}}}, "a": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.generator.Generator.properties": {"tf": 1}}, "df": 1}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot.dataframe.sql.Column.over": {"tf": 1}, "sqlglot.dataframe.sql.Window.partitionBy": {"tf": 1}, "sqlglot.dataframe.sql.Window.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.Window.rowsBetween": {"tf": 1}, "sqlglot.dataframe.sql.Window.rangeBetween": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.__init__": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.partitionBy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.rowsBetween": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.rangeBetween": {"tf": 1}, "sqlglot.generator.Generator.window_sql": {"tf": 1}, "sqlglot.generator.Generator.partition_by_sql": {"tf": 1}}, "df": 12, "s": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.dataframe.sql.Window.partitionBy": {"tf": 1}, "sqlglot.dataframe.sql.Window.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.Window.rowsBetween": {"tf": 1}, "sqlglot.dataframe.sql.Window.rangeBetween": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.partitionBy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.rowsBetween": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.rangeBetween": {"tf": 1}, "sqlglot.generator.Generator.window_spec_sql": {"tf": 1}}, "df": 9}}}}}}}}, "t": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.dialects.dialect.no_recursive_cte_sql": {"tf": 1}, "sqlglot.generator.Generator.with_sql": {"tf": 1}}, "df": 2, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.generator.Generator.withingroup_sql": {"tf": 1}}, "df": 1}}}}}}}}}, "d": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.generator.Generator.__init__": {"tf": 1}}, "df": 1}}}}, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.update": {"tf": 1}, "sqlglot.expressions.delete": {"tf": 1}, "sqlglot.generator.Generator.where_sql": {"tf": 1}}, "df": 3}}, "n": {"docs": {"sqlglot.generator.Generator.when_sql": {"tf": 1}}, "df": 1}}}, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.generator.Generator.__init__": {"tf": 1.4142135623730951}}, "df": 1}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.helper.split_num_words": {"tf": 1}}, "df": 1}}}}}, "b": {"docs": {"sqlglot.optimizer.simplify.is_complement": {"tf": 1}, "sqlglot.optimizer.simplify.eval_boolean": {"tf": 1}}, "df": 2, "o": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.transpile": {"tf": 1}, "sqlglot.dataframe.sql.SparkSession.createDataFrame": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.where": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.filter": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.unionByName": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.replace": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrameNaFunctions.fill": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.replace": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrameWriter.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.insertInto": {"tf": 1}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}, "sqlglot.expressions.Select.lock": {"tf": 1.4142135623730951}, "sqlglot.expressions.DataType.is_type": {"tf": 1}, "sqlglot.expressions.Cast.is_type": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.indent": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.sql": {"tf": 1}, "sqlglot.generator.Generator.properties": {"tf": 1}, "sqlglot.generator.Generator.order_sql": {"tf": 1}, "sqlglot.generator.Generator.expressions": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.op_expressions": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1}, "sqlglot.helper.is_iterable": {"tf": 1}, "sqlglot.lineage.LineageHTML.__init__": {"tf": 1}, "sqlglot.parser.Parser.__init__": {"tf": 1.4142135623730951}, "sqlglot.planner.SetOperation.__init__": {"tf": 1}, "sqlglot.schema.Schema.column_names": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.find": {"tf": 1}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}, "sqlglot.serde.dump": {"tf": 1.4142135623730951}, "sqlglot.serde.load": {"tf": 1.4142135623730951}}, "df": 31, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.generator.Generator.boolean_sql": {"tf": 1}}, "df": 1}}}}}}, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 1}}, "df": 1}}}, "c": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.generator.Generator.bracket_sql": {"tf": 1}}, "df": 1}}}}}}, "y": {"docs": {"sqlglot.dataframe.sql.GroupedData.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.__init__": {"tf": 1}}, "df": 2}, "f": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.Expression.find": {"tf": 1}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.expressions.Expression.walk": {"tf": 1}, "sqlglot.optimizer.scope.Scope.walk": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}}, "df": 7}}, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.generator.Generator.bitstring_sql": {"tf": 1}}, "df": 1}}}}}}, "w": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.generator.Generator.bitwiseand_sql": {"tf": 1}}, "df": 1}}}, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.generator.Generator.bitwiseleftshift_sql": {"tf": 1}}, "df": 1}}}}}}}}}, "n": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.generator.Generator.bitwisenot_sql": {"tf": 1}}, "df": 1}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.generator.Generator.bitwiseor_sql": {"tf": 1}}, "df": 1}}, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.generator.Generator.bitwiserightshift_sql": {"tf": 1}}, "df": 1}}}}}}}}}}, "x": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.generator.Generator.bitwisexor_sql": {"tf": 1}}, "df": 1}}}}}}}}, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.generator.Generator.binary": {"tf": 1}}, "df": 1}}}}}, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.generator.Generator.blockcompressionproperty_sql": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}}}}}}, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.generator.Generator.between_sql": {"tf": 1}}, "df": 1}}}}}}, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.helper.find_new_name": {"tf": 1}}, "df": 1}}}}, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.dataframe.sql.SparkSession.createDataFrame": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.replace": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.repartition": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.__init__": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_col": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_cols": {"tf": 1}, "sqlglot.dataframe.sql.Column.invoke_anonymous_function": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.invoke_expression_over_column": {"tf": 1}, "sqlglot.dataframe.sql.Column.binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.inverse_binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.isin": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.between": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.over": {"tf": 1}, "sqlglot.dataframe.sql.Window.partitionBy": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Window.orderBy": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.WindowSpec.partitionBy": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.WindowSpec.orderBy": {"tf": 1.4142135623730951}}, "df": 19}}}}}}}, "p": {"docs": {"sqlglot.dialects.dialect.var_map_sql": {"tf": 1.7320508075688772}}, "df": 1, "p": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.dialects.dialect.parse_date_delta": {"tf": 1}, "sqlglot.expressions.replace_tables": {"tf": 1}, "sqlglot.generator.Generator.__init__": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema.__init__": {"tf": 1}, "sqlglot.schema.MappingSchema.from_mapping_schema": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}, "sqlglot.schema.ensure_column_mapping": {"tf": 1}, "sqlglot.time.format_time": {"tf": 1}}, "df": 9, "s": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.schema.MappingSchema.from_mapping_schema": {"tf": 1.4142135623730951}, "sqlglot.schema.MappingSchema.copy": {"tf": 1}}, "df": 2}}}}}}}}}}}, "x": {"docs": {"sqlglot.generator.Generator.__init__": {"tf": 1.4142135623730951}, "sqlglot.optimizer.normalize.normalize": {"tf": 1}, "sqlglot.optimizer.normalize.distributive_law": {"tf": 1}, "sqlglot.parser.Parser.__init__": {"tf": 1}}, "df": 4, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot.errors.concat_messages": {"tf": 1}}, "df": 1}}}}}, "t": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.generator.Generator.matchrecognize_sql": {"tf": 1}, "sqlglot.generator.Generator.partition_by_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dataframe.sql.DataFrame.persist": {"tf": 1}}, "df": 1}}}}, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.errors.ParseError.__init__": {"tf": 1}, "sqlglot.errors.ParseError.new": {"tf": 1}, "sqlglot.generator.Generator.unsupported": {"tf": 1}, "sqlglot.parser.Parser.__init__": {"tf": 1}, "sqlglot.parser.Parser.raise_error": {"tf": 1}}, "df": 5}}}}}, "r": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.generator.Generator.merge_sql": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}}, "df": 2, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.generator.Generator.mergeblockratioproperty_sql": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}}}}}, "o": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.generator.Generator.mod_sql": {"tf": 1}}, "df": 1, "e": {"docs": {"sqlglot.dataframe.sql.DataFrameWriter.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.saveAsTable": {"tf": 1}}, "df": 2}, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.helper.subclasses": {"tf": 1}}, "df": 1}}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff.diff": {"tf": 1}, "sqlglot.diff.ChangeDistiller.diff": {"tf": 1}}, "df": 2}}}, "u": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.generator.Generator.mul_sql": {"tf": 1}}, "df": 1, "t": {"docs": {}, "df": 0, "i": {"docs": {"sqlglot.optimizer.optimizer.optimize": {"tf": 1}}, "df": 1}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.helper.split_num_words": {"tf": 1}}, "df": 1}, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.schema.AbstractMappingSchema.find": {"tf": 1}}, "df": 1}}}}}}}, "g": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe.sql.SparkSession.createDataFrame": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.replace": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.repartition": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.__init__": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_col": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_cols": {"tf": 1}, "sqlglot.dataframe.sql.Column.invoke_anonymous_function": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.invoke_expression_over_column": {"tf": 1}, "sqlglot.dataframe.sql.Column.binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.inverse_binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.isin": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.between": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.over": {"tf": 1}, "sqlglot.dataframe.sql.Window.partitionBy": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Window.orderBy": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.WindowSpec.partitionBy": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.WindowSpec.orderBy": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.__init__": {"tf": 1}, "sqlglot.generator.Generator.lambda_sql": {"tf": 1}, "sqlglot.generator.Generator.gt_sql": {"tf": 1}, "sqlglot.helper.open_file": {"tf": 1}, "sqlglot.lineage.Node.__init__": {"tf": 1}, "sqlglot.lineage.lineage": {"tf": 1.7320508075688772}, "sqlglot.optimizer.optimizer.optimize": {"tf": 4.242640687119285}, "sqlglot.optimizer.scope.Scope.__init__": {"tf": 1}}, "df": 27, "e": {"docs": {"sqlglot.generator.Generator.gte_sql": {"tf": 1}}, "df": 1}}, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.dataframe.sql.DataFrame.groupBy": {"tf": 1}, "sqlglot.dataframe.sql.GroupedData.__init__": {"tf": 1}, "sqlglot.generator.Generator.group_sql": {"tf": 1}}, "df": 3, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.dataframe.sql.DataFrame.groupBy": {"tf": 1}}, "df": 1}}}}}}}}}}, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dialects.dialect.Dialect.generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.rename_func": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.approx_count_distinct_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.if_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.arrow_json_extract_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.arrow_json_extract_scalar_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.inline_array_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.no_ilike_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.no_paren_current_date_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.no_recursive_cte_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.no_safe_divide_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.no_tablesample_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.no_pivot_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.no_trycast_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.no_properties_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.str_position_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.struct_extract_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.var_map_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.strposition_to_locate_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.timestrtotime_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.datestrtodate_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.trim_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.if_sql": {"tf": 1.4142135623730951}, "sqlglot.transforms.preprocess": {"tf": 2}}, "df": 25}}, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.snowflake.Snowflake.Generator.generatedasidentitycolumnconstraint_sql": {"tf": 1}, "sqlglot.generator.Generator.generatedasidentitycolumnconstraint_sql": {"tf": 1}}, "df": 2}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "b": {"docs": {"sqlglot.generator.Generator.glob_sql": {"tf": 1}}, "df": 1}}}}, "f": {"docs": {"sqlglot.diff.ChangeDistiller.__init__": {"tf": 1}}, "df": 1, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe.sql.SparkSession.createDataFrame": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.replace": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrameNaFunctions.fill": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.replace": {"tf": 1.4142135623730951}, "sqlglot.diff.ChangeDistiller.__init__": {"tf": 1.4142135623730951}, "sqlglot.serde.dump": {"tf": 1.4142135623730951}, "sqlglot.serde.load": {"tf": 1.4142135623730951}}, "df": 7}}}, "a": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.generator.Generator.order_sql": {"tf": 1}, "sqlglot.generator.Generator.expressions": {"tf": 1}, "sqlglot.generator.Generator.op_expressions": {"tf": 1}}, "df": 3}}}, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.SparkSession.createDataFrame": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.unionByName": {"tf": 1}, "sqlglot.dataframe.sql.Column.set_table_name": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.__init__": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.generator.Generator.__init__": {"tf": 2.23606797749979}, "sqlglot.generator.Generator.indent": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.order_sql": {"tf": 1}, "sqlglot.generator.Generator.expressions": {"tf": 1}, "sqlglot.generator.Generator.op_expressions": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_ctes": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_derived_tables": {"tf": 1}, "sqlglot.optimizer.normalize.normalize": {"tf": 1}, "sqlglot.optimizer.normalize.normalized": {"tf": 1}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}, "sqlglot.parser.Parser.__init__": {"tf": 1.4142135623730951}, "sqlglot.planner.SetOperation.__init__": {"tf": 1}, "sqlglot.schema.Schema.column_names": {"tf": 1}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}}, "df": 20}}, "l": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.generator.Generator.fallbackproperty_sql": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.lineage.Node.__init__": {"tf": 1}}, "df": 1}}}}}}, "u": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.expressions.replace_children": {"tf": 1}}, "df": 2, "c": {"docs": {"sqlglot.dataframe.sql.Column.invoke_anonymous_function": {"tf": 1}, "sqlglot.dialects.dialect.var_map_sql": {"tf": 1}, "sqlglot.executor.env.filter_nulls": {"tf": 1}, "sqlglot.expressions.func": {"tf": 1}, "sqlglot.generator.Generator.no_identify": {"tf": 1}, "sqlglot.generator.Generator.function_fallback_sql": {"tf": 1}, "sqlglot.helper.while_changing": {"tf": 1}}, "df": 7, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.helper.count_params": {"tf": 1}, "sqlglot.lineage.lineage": {"tf": 1.7320508075688772}, "sqlglot.optimizer.optimizer.optimize": {"tf": 4.242640687119285}}, "df": 3, "s": {"docs": {"sqlglot.generator.Generator.__init__": {"tf": 1}}, "df": 1}}}}}}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe.sql.DataFrameWriter.saveAsTable": {"tf": 1}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.generator.Generator.foreignkey_sql": {"tf": 1}, "sqlglot.generator.Generator.primarykey_sql": {"tf": 1}}, "df": 2}}}}}}}, "w": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.serde.dump": {"tf": 1}, "sqlglot.serde.load": {"tf": 1}}, "df": 2}}}}}}}}}, "i": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.executor.env.ordered": {"tf": 1}, "sqlglot.generator.Generator.indent": {"tf": 1}}, "df": 2}}}, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.generator.Generator.filter_sql": {"tf": 1}}, "df": 1}}}, "e": {"docs": {"sqlglot.helper.open_file": {"tf": 1}}, "df": 1}, "l": {"docs": {"sqlglot.helper.split_num_words": {"tf": 1}}, "df": 1}}}, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot.expressions.update": {"tf": 1}, "sqlglot.generator.Generator.from_sql": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1}}, "df": 3}}, "e": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.generator.Generator.freespaceproperty_sql": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.generator.Generator.fetch_sql": {"tf": 1}}, "df": 1}}}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.dataframe.sql.SparkSession.createDataFrame": {"tf": 1}}, "df": 1}}}}}}}}}}}, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.replace": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_col": {"tf": 1}, "sqlglot.dataframe.sql.Column.ensure_literal": {"tf": 1}, "sqlglot.dataframe.sql.Column.when": {"tf": 1}, "sqlglot.dataframe.sql.Column.otherwise": {"tf": 1}, "sqlglot.dataframe.sql.Column.startswith": {"tf": 1}, "sqlglot.dataframe.sql.Column.endswith": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.fill": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.replace": {"tf": 1}, "sqlglot.expressions.Expression.append": {"tf": 1}, "sqlglot.expressions.Expression.set": {"tf": 1}, "sqlglot.expressions.convert": {"tf": 1}, "sqlglot.helper.ensure_list": {"tf": 1}, "sqlglot.helper.ensure_collection": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1}, "sqlglot.helper.is_iterable": {"tf": 1}}, "df": 17, "s": {"docs": {"sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.values_sql": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.values_sql": {"tf": 1}, "sqlglot.helper.flatten": {"tf": 1}}, "df": 5}}}, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.optimizer.optimize": {"tf": 1}}, "df": 1}}}}}}, "r": {"docs": {"sqlglot.generator.Generator.var_sql": {"tf": 1}, "sqlglot.tokens.Token.var": {"tf": 1}}, "df": 2, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.dialects.dialect.var_map_sql": {"tf": 1}}, "df": 1}}}}}, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.schema.Schema.column_names": {"tf": 1}, "sqlglot.schema.MappingSchema.__init__": {"tf": 1}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}}, "df": 3}}}}}}}, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 1}}, "df": 1}}}}}}, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.generator.Generator.parameter_sql": {"tf": 1}}, "df": 1, "s": {"docs": {"sqlglot.dataframe.sql.DataFrame.hint": {"tf": 1}}, "df": 1}}}}}}}, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dialects.dialect.Dialect.parser": {"tf": 1.4142135623730951}}, "df": 1}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.errors.ParseError.new": {"tf": 1}, "sqlglot.errors.merge_errors": {"tf": 1}}, "df": 2}}}}}}}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.generator.Generator.partition_sql": {"tf": 1}}, "df": 1, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dialects.teradata.Teradata.Generator.partitionedbyproperty_sql": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}, "e": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.paren": {"tf": 1}, "sqlglot.generator.Generator.paren_sql": {"tf": 1}}, "df": 2, "t": {"docs": {"sqlglot.expressions.Expression.dfs": {"tf": 1}, "sqlglot.optimizer.scope.Scope.__init__": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.decorrelate": {"tf": 1}}, "df": 4}}}}, "t": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.expressions.to_table": {"tf": 1}, "sqlglot.expressions.to_column": {"tf": 1}}, "df": 2}}, "d": {"docs": {"sqlglot.generator.Generator.__init__": {"tf": 1}, "sqlglot.generator.Generator.indent": {"tf": 1}}, "df": 2}}, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.dialect.no_pivot_sql": {"tf": 1}, "sqlglot.generator.Generator.pivot_sql": {"tf": 1}}, "df": 2}}}}, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dialects.dialect.no_properties_sql": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator.with_properties": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.with_properties": {"tf": 1.4142135623730951}, "sqlglot.expressions.Properties.from_dict": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.update": {"tf": 1}, "sqlglot.generator.Generator.properties_sql": {"tf": 1}, "sqlglot.generator.Generator.root_properties": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.properties": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.with_properties": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.locate_properties": {"tf": 1.7320508075688772}}, "df": 11}}}, "y": {"docs": {"sqlglot.generator.Generator.locate_properties": {"tf": 1}, "sqlglot.generator.Generator.property_sql": {"tf": 1}, "sqlglot.generator.Generator.naked_property": {"tf": 1}}, "df": 3}}}}}, "j": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.optimizer.optimizer.optimize": {"tf": 1}}, "df": 1}}}}}}}}}, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Expression.walk": {"tf": 1}, "sqlglot.expressions.Expression.dfs": {"tf": 1}, "sqlglot.expressions.Expression.bfs": {"tf": 1}}, "df": 3}}}, "e": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "x": {"docs": {"sqlglot.expressions.maybe_parse": {"tf": 1}, "sqlglot.generator.Generator.properties": {"tf": 1}, "sqlglot.generator.Generator.expressions": {"tf": 1}}, "df": 3}}}, "t": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.generator.Generator.__init__": {"tf": 1}}, "df": 1}}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.pushdown_predicates.nodes_for_predicate": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.replace_aliases": {"tf": 1}}, "df": 2, "s": {"docs": {"sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_cnf": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_dnf": {"tf": 1}}, "df": 3}}}}}}}}, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.generator.Generator.primarykeycolumnconstraint_sql": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}}}}}}}}, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.executor.env.str_position": {"tf": 1}}, "df": 1}}}}}, "t": {"docs": {"sqlglot.generator.Generator.__init__": {"tf": 1}, "sqlglot.parser.Parser.__init__": {"tf": 1}}, "df": 2}}}, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.executor.python.PythonExecutor.execute": {"tf": 1}}, "df": 1, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.planner.Step.from_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.Step.add_dependency": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.Join.from_joins": {"tf": 1.4142135623730951}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1.4142135623730951}}, "df": 5}}}}, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.generator.Generator.placeholder_sql": {"tf": 1}}, "df": 1}}}}}}}}}}, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.generator.Generator.pseudotype_sql": {"tf": 1}}, "df": 1}}}}}}}}}, "u": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.optimizer.optimizer.optimize": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}, "h": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.generator.Generator.hint_sql": {"tf": 1}}, "df": 1, "s": {"docs": {"sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 1}}, "df": 1}}}, "g": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.errors.ParseError.new": {"tf": 1}}, "df": 1}}}}}}}}, "o": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot.dataframe.sql.DataFrame.join": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.dropna": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.drop": {"tf": 1}}, "df": 3}}, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.generator.Generator.hexstring_sql": {"tf": 1}}, "df": 1}}}}}}}}, "a": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.generator.Generator.having_sql": {"tf": 1}}, "df": 1}}}}, "s": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.trie.new_trie": {"tf": 1}, "sqlglot.trie.in_trie": {"tf": 1}}, "df": 2}}}}}}}}, "k": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.generator.Generator.kwarg_sql": {"tf": 1}}, "df": 1, "s": {"docs": {"sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sql": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.copy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.select": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.alias": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.where": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.filter": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.groupBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.agg": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.join": {"tf": 1}, "sqlglot.dataframe.sql.Column.invoke_expression_over_column": {"tf": 1}, "sqlglot.dataframe.sql.Column.binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.inverse_binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.unary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.sql": {"tf": 1}, "sqlglot.dataframe.sql.WindowSpec.sql": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.copy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameWriter.sql": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.expressions.DataType.build": {"tf": 1}, "sqlglot.expressions.to_table": {"tf": 1}, "sqlglot.expressions.to_column": {"tf": 1}, "sqlglot.expressions.replace_placeholders": {"tf": 1}, "sqlglot.expressions.func": {"tf": 1}, "sqlglot.generator.Generator.no_identify": {"tf": 1}, "sqlglot.helper.object_to_dict": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.scope.Scope.branch": {"tf": 1}, "sqlglot.parser.Parser.expression": {"tf": 1}, "sqlglot.schema.MappingSchema.copy": {"tf": 1}}, "df": 30}}}}}, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dataframe.sql.Column.binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.inverse_binary_op": {"tf": 1}, "sqlglot.dataframe.sql.Column.unary_op": {"tf": 1}}, "df": 3}}}}, "e": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.diff.diff": {"tf": 1}, "sqlglot.diff.ChangeDistiller.diff": {"tf": 1}}, "df": 2}}, "y": {"docs": {"sqlglot.executor.context.Context.sort": {"tf": 1}, "sqlglot.expressions.Expression.text": {"tf": 1}, "sqlglot.expressions.Expression.append": {"tf": 1}, "sqlglot.expressions.Expression.set": {"tf": 1}, "sqlglot.expressions.Expression.dfs": {"tf": 1}, "sqlglot.generator.Generator.sql": {"tf": 1}, "sqlglot.generator.Generator.expressions": {"tf": 1}, "sqlglot.trie.in_trie": {"tf": 1}}, "df": 8, "s": {"docs": {"sqlglot.schema.flatten_schema": {"tf": 1}}, "df": 1}, "w": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.trie.new_trie": {"tf": 1}}, "df": 1}}}}}}}}, "a": {"docs": {"sqlglot.optimizer.simplify.is_complement": {"tf": 1}, "sqlglot.optimizer.simplify.eval_boolean": {"tf": 1}}, "df": 2, "s": {"docs": {"sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.generator.Generator.table_sql": {"tf": 1}}, "df": 2, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}}, "df": 2}}}}}}}}, "n": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.dropna": {"tf": 1}, "sqlglot.dataframe.sql.Column.when": {"tf": 1}, "sqlglot.dataframe.sql.Column.otherwise": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.drop": {"tf": 1}, "sqlglot.dataframe.sql.DataFrameNaFunctions.fill": {"tf": 1}, "sqlglot.errors.ParseError.__init__": {"tf": 1}, "sqlglot.errors.concat_messages": {"tf": 1}, "sqlglot.errors.merge_errors": {"tf": 1}, "sqlglot.expressions.Expression.__init__": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1}, "sqlglot.generator.Generator.any_sql": {"tf": 1}, "sqlglot.helper.csv_reader": {"tf": 1}, "sqlglot.helper.object_to_dict": {"tf": 1}, "sqlglot.helper.is_iterable": {"tf": 1}, "sqlglot.helper.flatten": {"tf": 1.7320508075688772}, "sqlglot.lineage.LineageHTML.__init__": {"tf": 1}, "sqlglot.schema.ensure_schema": {"tf": 1}}, "df": 19}, "d": {"docs": {"sqlglot.dataframe.sql.DataFrame.persist": {"tf": 1}, "sqlglot.expressions.and_": {"tf": 1}, "sqlglot.generator.Generator.and_sql": {"tf": 1}}, "df": 3}, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.generator.Generator.anonymous_sql": {"tf": 1}}, "df": 1}}}}}}}, "n": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.optimizer.annotate_types.TypeAnnotator.__init__": {"tf": 1}}, "df": 2}}}, "e": {"docs": {"sqlglot.optimizer.optimizer.optimize": {"tf": 1}}, "df": 1}}}}}}}, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.generator.Generator.all_sql": {"tf": 1}}, "df": 1, "o": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dataframe.sql.DataFrame.unionByName": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.expressions.subquery": {"tf": 1}, "sqlglot.expressions.table_": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1}, "sqlglot.generator.Generator.__init__": {"tf": 1}, "sqlglot.generator.Generator.alias_sql": {"tf": 1}, "sqlglot.parser.Parser.__init__": {"tf": 1}}, "df": 10, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.generator.Generator.aliases_sql": {"tf": 1}}, "df": 1}}}}}, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.rename_table": {"tf": 1}, "sqlglot.generator.Generator.altertable_sql": {"tf": 1}}, "df": 2}}}}}, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.generator.Generator.altercolumn_sql": {"tf": 1}}, "df": 1}}}}}}}}}}, "r": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.expressions.Expression.append": {"tf": 1}, "sqlglot.expressions.Expression.set": {"tf": 1}}, "df": 2, "s": {"docs": {"sqlglot.dataframe.sql.Column.ensure_cols": {"tf": 1}, "sqlglot.dataframe.sql.Column.invoke_anonymous_function": {"tf": 1}, "sqlglot.dialects.dialect.locate_to_strposition": {"tf": 1}, "sqlglot.expressions.Expression.__init__": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.expressions.TimeUnit.__init__": {"tf": 1}, "sqlglot.expressions.Func.from_arg_list": {"tf": 1}, "sqlglot.expressions.replace_placeholders": {"tf": 1}, "sqlglot.expressions.func": {"tf": 1}, "sqlglot.generator.Generator.no_identify": {"tf": 1}, "sqlglot.generator.Generator.format_args": {"tf": 1}, "sqlglot.generator.Generator.text_width": {"tf": 1}, "sqlglot.helper.csv": {"tf": 1}, "sqlglot.parser.parse_var_map": {"tf": 1}, "sqlglot.parser.Parser.validate_expression": {"tf": 1}}, "df": 16}}, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator.array_sql": {"tf": 1}, "sqlglot.dialects.dialect.inline_array_sql": {"tf": 1}}, "df": 2}}, "o": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot.generator.Generator.lambda_sql": {"tf": 1}}, "df": 1}}}}, "p": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.dialect.approx_count_distinct_sql": {"tf": 1}}, "df": 1}}}}}}}}}}}, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.Select.window": {"tf": 1}}, "df": 14}}}}}, "f": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "j": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.generator.Generator.afterjournalproperty_sql": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}}, "t": {"docs": {"sqlglot.lineage.lineage": {"tf": 1.7320508075688772}, "sqlglot.optimizer.optimizer.optimize": {"tf": 4.242640687119285}}, "df": 2, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.generator.Generator.attimezone_sql": {"tf": 1}}, "df": 1}}}}}}}, "r": {"docs": {"sqlglot.transforms.delegate": {"tf": 1}}, "df": 1}}}, "d": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.generator.Generator.add_sql": {"tf": 1}}, "df": 1, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.generator.Generator.addconstraint_sql": {"tf": 1}}, "df": 1}}}}}}}}}}}}}, "j": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.dialect.arrow_json_extract_sql": {"tf": 1}}, "df": 1, "s": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dialects.dialect.arrow_json_extract_scalar_sql": {"tf": 1}}, "df": 1}}}}}}}}}}}}}, "b": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.dialect.arrow_json_extract_sql": {"tf": 1}}, "df": 1, "s": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dialects.dialect.arrow_json_extract_scalar_sql": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}, "o": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.executor.python.PythonExecutor.nested_loop_join": {"tf": 1.4142135623730951}, "sqlglot.executor.python.PythonExecutor.hash_join": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.join": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.join_sql": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.join_condition": {"tf": 1}, "sqlglot.optimizer.optimize_joins.other_table_names": {"tf": 1}, "sqlglot.planner.Join.from_joins": {"tf": 1}}, "df": 7, "h": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.generator.Generator.joinhint_sql": {"tf": 1}}, "df": 1}}}}, "s": {"docs": {"sqlglot.optimizer.optimizer.optimize": {"tf": 1.4142135623730951}, "sqlglot.planner.Join.from_joins": {"tf": 1}}, "df": 2}}}, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.generator.Generator.journalproperty_sql": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.generator.Generator.__init__": {"tf": 1.4142135623730951}}, "df": 1, "d": {"docs": {"sqlglot.expressions.to_identifier": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.expressions.column": {"tf": 1}, "sqlglot.expressions.table_": {"tf": 1}}, "df": 4}}}}, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.generator.Generator.qualify_sql": {"tf": 1}, "sqlglot.lineage.lineage": {"tf": 1.4142135623730951}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1.7320508075688772}}, "df": 3}}}}}}}, "x": {"2": {"7": {"docs": {"sqlglot.helper.open_file": {"tf": 1.4142135623730951}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}}}, "bases": {"root": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "q": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.bigquery.BigQuery": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Tokenizer": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Tokenizer": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill": {"tf": 1}, "sqlglot.dialects.drill.Drill.Tokenizer": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Tokenizer": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive": {"tf": 1}, "sqlglot.dialects.hive.Hive.Tokenizer": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Tokenizer": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Tokenizer": {"tf": 1}, "sqlglot.dialects.postgres.Postgres": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Tokenizer": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto": {"tf": 1}, "sqlglot.dialects.presto.Presto.Tokenizer": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Tokenizer": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Tokenizer": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Tokenizer": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Tokenizer": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Tokenizer": {"tf": 1}, "sqlglot.dialects.tsql.TSQL": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Tokenizer": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.errors.ErrorLevel": {"tf": 1}, "sqlglot.executor.python.Python": {"tf": 1}, "sqlglot.executor.python.Python.Tokenizer": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.expressions.Properties.Location": {"tf": 1}, "sqlglot.expressions.DataType.Type": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1}, "sqlglot.tokens.TokenType": {"tf": 1}}, "df": 79, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.errors.UnsupportedError": {"tf": 1}, "sqlglot.errors.ParseError": {"tf": 1}, "sqlglot.errors.TokenError": {"tf": 1}, "sqlglot.errors.OptimizeError": {"tf": 1}, "sqlglot.errors.SchemaError": {"tf": 1}, "sqlglot.errors.ExecuteError": {"tf": 1}}, "df": 6}}}}}}}}}}}, "p": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.dialects.databricks.Databricks": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.4142135623730951}}, "df": 3}}}}, "t": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dialects.dialect.Dialects": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1}}, "df": 2}, "e": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.planner.Scan": {"tf": 1}, "sqlglot.planner.Join": {"tf": 1}, "sqlglot.planner.Aggregate": {"tf": 1}, "sqlglot.planner.Sort": {"tf": 1}, "sqlglot.planner.SetOperation": {"tf": 1}}, "df": 5}}}, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1.4142135623730951}}, "df": 2}}}}}, "u": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Union": {"tf": 1}, "sqlglot.expressions.Select": {"tf": 1}}, "df": 2}}}}, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.All": {"tf": 1}, "sqlglot.expressions.Any": {"tf": 1}, "sqlglot.expressions.Exists": {"tf": 1}}, "df": 3}}}}}}}}}}}}}}}}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.bigquery.BigQuery": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL": {"tf": 1.4142135623730951}, "sqlglot.executor.python.Python": {"tf": 1.4142135623730951}}, "df": 15, "s": {"docs": {"sqlglot.dialects.bigquery.BigQuery": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse": {"tf": 1}, "sqlglot.dialects.databricks.Databricks": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB": {"tf": 1}, "sqlglot.dialects.hive.Hive": {"tf": 1}, "sqlglot.dialects.mysql.MySQL": {"tf": 1}, "sqlglot.dialects.oracle.Oracle": {"tf": 1}, "sqlglot.dialects.postgres.Postgres": {"tf": 1}, "sqlglot.dialects.presto.Presto": {"tf": 1}, "sqlglot.dialects.redshift.Redshift": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Tokenizer": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake": {"tf": 1}, "sqlglot.dialects.spark.Spark": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Tokenizer": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau": {"tf": 1}, "sqlglot.dialects.teradata.Teradata": {"tf": 1}, "sqlglot.dialects.trino.Trino": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Tokenizer": {"tf": 1}, "sqlglot.dialects.tsql.TSQL": {"tf": 1}, "sqlglot.executor.python.Python": {"tf": 1}}, "df": 31}}}}}}, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "[": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.schema.MappingSchema": {"tf": 1}}, "df": 1}}}}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.UDTF": {"tf": 1}, "sqlglot.expressions.CTE": {"tf": 1}, "sqlglot.expressions.Subquery": {"tf": 1}}, "df": 3}}}}}}}}}}}}, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Tokenizer": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Tokenizer": {"tf": 1}, "sqlglot.dialects.drill.Drill.Tokenizer": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Tokenizer": {"tf": 1}, "sqlglot.dialects.hive.Hive.Tokenizer": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Tokenizer": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Tokenizer": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Tokenizer": {"tf": 1}, "sqlglot.dialects.presto.Presto.Tokenizer": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Tokenizer": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Tokenizer": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Tokenizer": {"tf": 1}, "sqlglot.executor.python.Python.Tokenizer": {"tf": 1}}, "df": 13}, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Tokenizer": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Tokenizer": {"tf": 1}, "sqlglot.dialects.drill.Drill.Tokenizer": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Tokenizer": {"tf": 1}, "sqlglot.dialects.hive.Hive.Tokenizer": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Tokenizer": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Tokenizer": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Tokenizer": {"tf": 1}, "sqlglot.dialects.presto.Presto.Tokenizer": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Tokenizer": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Tokenizer": {"tf": 1}, "sqlglot.dialects.spark.Spark.Tokenizer": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Tokenizer": {"tf": 1}, "sqlglot.dialects.trino.Trino.Tokenizer": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Tokenizer": {"tf": 1}, "sqlglot.executor.python.Python.Tokenizer": {"tf": 1}}, "df": 16}}}}}}}}, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor.table.Tables": {"tf": 1.4142135623730951}}, "df": 1}}}}, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.Interval": {"tf": 1}, "sqlglot.expressions.DateAdd": {"tf": 1}, "sqlglot.expressions.DateSub": {"tf": 1}, "sqlglot.expressions.DateDiff": {"tf": 1}, "sqlglot.expressions.DatetimeAdd": {"tf": 1}, "sqlglot.expressions.DatetimeSub": {"tf": 1}, "sqlglot.expressions.DatetimeDiff": {"tf": 1}, "sqlglot.expressions.DatetimeTrunc": {"tf": 1}, "sqlglot.expressions.TimestampAdd": {"tf": 1}, "sqlglot.expressions.TimestampSub": {"tf": 1}, "sqlglot.expressions.TimestampDiff": {"tf": 1}, "sqlglot.expressions.TimestampTrunc": {"tf": 1}, "sqlglot.expressions.TimeAdd": {"tf": 1}, "sqlglot.expressions.TimeSub": {"tf": 1}, "sqlglot.expressions.TimeDiff": {"tf": 1}, "sqlglot.expressions.TimeTrunc": {"tf": 1}, "sqlglot.expressions.TsOrDsAdd": {"tf": 1}}, "df": 17}}}}}}}, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.schema.AbstractMappingSchema": {"tf": 1}}, "df": 1}}}}}}, "p": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1.4142135623730951}}, "df": 17}}}}}, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dialects.redshift.Redshift": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Tokenizer": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.4142135623730951}}, "df": 4}}}}}}}, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {"sqlglot.dialects.trino.Trino": {"tf": 1.4142135623730951}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.trino.Trino.Tokenizer": {"tf": 1.4142135623730951}}, "df": 3}}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.SubqueryPredicate": {"tf": 1}, "sqlglot.expressions.EQ": {"tf": 1}, "sqlglot.expressions.NullSafeEQ": {"tf": 1}, "sqlglot.expressions.NullSafeNEQ": {"tf": 1}, "sqlglot.expressions.Glob": {"tf": 1}, "sqlglot.expressions.GT": {"tf": 1}, "sqlglot.expressions.GTE": {"tf": 1}, "sqlglot.expressions.ILike": {"tf": 1}, "sqlglot.expressions.Is": {"tf": 1}, "sqlglot.expressions.Like": {"tf": 1}, "sqlglot.expressions.LT": {"tf": 1}, "sqlglot.expressions.LTE": {"tf": 1}, "sqlglot.expressions.NEQ": {"tf": 1}, "sqlglot.expressions.SimilarTo": {"tf": 1}, "sqlglot.expressions.Between": {"tf": 1}, "sqlglot.expressions.In": {"tf": 1}}, "df": 16}}}}}}}, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.AlgorithmProperty": {"tf": 1}, "sqlglot.expressions.DefinerProperty": {"tf": 1}, "sqlglot.expressions.SqlSecurityProperty": {"tf": 1}, "sqlglot.expressions.TableFormatProperty": {"tf": 1}, "sqlglot.expressions.PartitionedByProperty": {"tf": 1}, "sqlglot.expressions.FileFormatProperty": {"tf": 1}, "sqlglot.expressions.DistKeyProperty": {"tf": 1}, "sqlglot.expressions.SortKeyProperty": {"tf": 1}, "sqlglot.expressions.DistStyleProperty": {"tf": 1}, "sqlglot.expressions.LikeProperty": {"tf": 1}, "sqlglot.expressions.LocationProperty": {"tf": 1}, "sqlglot.expressions.EngineProperty": {"tf": 1}, "sqlglot.expressions.AutoIncrementProperty": {"tf": 1}, "sqlglot.expressions.CharacterSetProperty": {"tf": 1}, "sqlglot.expressions.CollateProperty": {"tf": 1}, "sqlglot.expressions.SchemaCommentProperty": {"tf": 1}, "sqlglot.expressions.ReturnsProperty": {"tf": 1}, "sqlglot.expressions.LanguageProperty": {"tf": 1}, "sqlglot.expressions.ExecuteAsProperty": {"tf": 1}, "sqlglot.expressions.VolatilityProperty": {"tf": 1}, "sqlglot.expressions.RowFormatDelimitedProperty": {"tf": 1}, "sqlglot.expressions.RowFormatSerdeProperty": {"tf": 1}, "sqlglot.expressions.SerdeProperties": {"tf": 1}, "sqlglot.expressions.FallbackProperty": {"tf": 1}, "sqlglot.expressions.WithJournalTableProperty": {"tf": 1}, "sqlglot.expressions.LogProperty": {"tf": 1}, "sqlglot.expressions.JournalProperty": {"tf": 1}, "sqlglot.expressions.AfterJournalProperty": {"tf": 1}, "sqlglot.expressions.ChecksumProperty": {"tf": 1}, "sqlglot.expressions.FreespaceProperty": {"tf": 1}, "sqlglot.expressions.MergeBlockRatioProperty": {"tf": 1}, "sqlglot.expressions.DataBlocksizeProperty": {"tf": 1}, "sqlglot.expressions.BlockCompressionProperty": {"tf": 1}, "sqlglot.expressions.IsolatedLoadingProperty": {"tf": 1}}, "df": 34}}}}}}}}, "g": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.executor.python.Python.Generator": {"tf": 1.4142135623730951}}, "df": 20}}}}, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "[": {"docs": {}, "df": 0, "~": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.schema.AbstractMappingSchema": {"tf": 1}}, "df": 1}}}}}}}}}}, "b": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dialects.dialect.Dialects": {"tf": 1}, "sqlglot.errors.SqlglotError": {"tf": 1}}, "df": 2}}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.Add": {"tf": 1}, "sqlglot.expressions.Connector": {"tf": 1}, "sqlglot.expressions.BitwiseAnd": {"tf": 1}, "sqlglot.expressions.BitwiseLeftShift": {"tf": 1}, "sqlglot.expressions.BitwiseOr": {"tf": 1}, "sqlglot.expressions.BitwiseRightShift": {"tf": 1}, "sqlglot.expressions.BitwiseXor": {"tf": 1}, "sqlglot.expressions.Div": {"tf": 1}, "sqlglot.expressions.Dot": {"tf": 1}, "sqlglot.expressions.DPipe": {"tf": 1}, "sqlglot.expressions.EQ": {"tf": 1}, "sqlglot.expressions.NullSafeEQ": {"tf": 1}, "sqlglot.expressions.NullSafeNEQ": {"tf": 1}, "sqlglot.expressions.Distance": {"tf": 1}, "sqlglot.expressions.Escape": {"tf": 1}, "sqlglot.expressions.Glob": {"tf": 1}, "sqlglot.expressions.GT": {"tf": 1}, "sqlglot.expressions.GTE": {"tf": 1}, "sqlglot.expressions.ILike": {"tf": 1}, "sqlglot.expressions.IntDiv": {"tf": 1}, "sqlglot.expressions.Is": {"tf": 1}, "sqlglot.expressions.Kwarg": {"tf": 1}, "sqlglot.expressions.Like": {"tf": 1}, "sqlglot.expressions.LT": {"tf": 1}, "sqlglot.expressions.LTE": {"tf": 1}, "sqlglot.expressions.Mod": {"tf": 1}, "sqlglot.expressions.Mul": {"tf": 1}, "sqlglot.expressions.NEQ": {"tf": 1}, "sqlglot.expressions.SimilarTo": {"tf": 1}, "sqlglot.expressions.Slice": {"tf": 1}, "sqlglot.expressions.Sub": {"tf": 1}, "sqlglot.expressions.Collate": {"tf": 1}, "sqlglot.expressions.JSONBContains": {"tf": 1}, "sqlglot.expressions.JSONExtract": {"tf": 1}, "sqlglot.expressions.Pow": {"tf": 1}}, "df": 35}}}}}}, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot.dialects.dialect.Dialects": {"tf": 1.4142135623730951}, "sqlglot.helper.AutoName": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.ScopeType": {"tf": 1.4142135623730951}}, "df": 3}}}, "x": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.errors.SqlglotError": {"tf": 1}}, "df": 1}}}}}}}, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.executor.table.Tables": {"tf": 1}}, "df": 1}}}}}}, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.Condition": {"tf": 1}, "sqlglot.expressions.DerivedTable": {"tf": 1}, "sqlglot.expressions.Unionable": {"tf": 1}, "sqlglot.expressions.Cache": {"tf": 1}, "sqlglot.expressions.Uncache": {"tf": 1}, "sqlglot.expressions.Create": {"tf": 1}, "sqlglot.expressions.Describe": {"tf": 1}, "sqlglot.expressions.Set": {"tf": 1}, "sqlglot.expressions.SetItem": {"tf": 1}, "sqlglot.expressions.Show": {"tf": 1}, "sqlglot.expressions.UserDefinedFunction": {"tf": 1}, "sqlglot.expressions.UserDefinedFunctionKwarg": {"tf": 1}, "sqlglot.expressions.CharacterSet": {"tf": 1}, "sqlglot.expressions.With": {"tf": 1}, "sqlglot.expressions.WithinGroup": {"tf": 1}, "sqlglot.expressions.TableAlias": {"tf": 1}, "sqlglot.expressions.ColumnDef": {"tf": 1}, "sqlglot.expressions.AlterColumn": {"tf": 1}, "sqlglot.expressions.RenameTable": {"tf": 1}, "sqlglot.expressions.ColumnConstraint": {"tf": 1}, "sqlglot.expressions.ColumnConstraintKind": {"tf": 1}, "sqlglot.expressions.Constraint": {"tf": 1}, "sqlglot.expressions.Delete": {"tf": 1}, "sqlglot.expressions.Drop": {"tf": 1}, "sqlglot.expressions.Filter": {"tf": 1}, "sqlglot.expressions.Check": {"tf": 1}, "sqlglot.expressions.Directory": {"tf": 1}, "sqlglot.expressions.ForeignKey": {"tf": 1}, "sqlglot.expressions.PrimaryKey": {"tf": 1}, "sqlglot.expressions.Unique": {"tf": 1}, "sqlglot.expressions.Into": {"tf": 1}, "sqlglot.expressions.From": {"tf": 1}, "sqlglot.expressions.Having": {"tf": 1}, "sqlglot.expressions.Hint": {"tf": 1}, "sqlglot.expressions.JoinHint": {"tf": 1}, "sqlglot.expressions.Identifier": {"tf": 1}, "sqlglot.expressions.Index": {"tf": 1}, "sqlglot.expressions.Insert": {"tf": 1}, "sqlglot.expressions.Introducer": {"tf": 1}, "sqlglot.expressions.National": {"tf": 1}, "sqlglot.expressions.LoadData": {"tf": 1}, "sqlglot.expressions.Partition": {"tf": 1}, "sqlglot.expressions.Fetch": {"tf": 1}, "sqlglot.expressions.Group": {"tf": 1}, "sqlglot.expressions.Lambda": {"tf": 1}, "sqlglot.expressions.Limit": {"tf": 1}, "sqlglot.expressions.Join": {"tf": 1}, "sqlglot.expressions.MatchRecognize": {"tf": 1}, "sqlglot.expressions.Final": {"tf": 1}, "sqlglot.expressions.Offset": {"tf": 1}, "sqlglot.expressions.Order": {"tf": 1}, "sqlglot.expressions.Ordered": {"tf": 1}, "sqlglot.expressions.Property": {"tf": 1}, "sqlglot.expressions.Properties": {"tf": 1}, "sqlglot.expressions.Qualify": {"tf": 1}, "sqlglot.expressions.Return": {"tf": 1}, "sqlglot.expressions.Reference": {"tf": 1}, "sqlglot.expressions.Tuple": {"tf": 1}, "sqlglot.expressions.Table": {"tf": 1}, "sqlglot.expressions.SystemTime": {"tf": 1}, "sqlglot.expressions.Update": {"tf": 1}, "sqlglot.expressions.Var": {"tf": 1}, "sqlglot.expressions.Schema": {"tf": 1}, "sqlglot.expressions.Lock": {"tf": 1}, "sqlglot.expressions.TableSample": {"tf": 1}, "sqlglot.expressions.Tag": {"tf": 1}, "sqlglot.expressions.Pivot": {"tf": 1}, "sqlglot.expressions.Window": {"tf": 1}, "sqlglot.expressions.WindowSpec": {"tf": 1}, "sqlglot.expressions.Where": {"tf": 1}, "sqlglot.expressions.Star": {"tf": 1}, "sqlglot.expressions.Parameter": {"tf": 1}, "sqlglot.expressions.SessionParameter": {"tf": 1}, "sqlglot.expressions.Placeholder": {"tf": 1}, "sqlglot.expressions.DataType": {"tf": 1}, "sqlglot.expressions.PseudoType": {"tf": 1}, "sqlglot.expressions.StructKwarg": {"tf": 1}, "sqlglot.expressions.Command": {"tf": 1}, "sqlglot.expressions.Transaction": {"tf": 1}, "sqlglot.expressions.Commit": {"tf": 1}, "sqlglot.expressions.Rollback": {"tf": 1}, "sqlglot.expressions.AlterTable": {"tf": 1}, "sqlglot.expressions.AddConstraint": {"tf": 1}, "sqlglot.expressions.DropPartition": {"tf": 1}, "sqlglot.expressions.Binary": {"tf": 1}, "sqlglot.expressions.Unary": {"tf": 1}, "sqlglot.expressions.Alias": {"tf": 1}, "sqlglot.expressions.Aliases": {"tf": 1}, "sqlglot.expressions.AtTimeZone": {"tf": 1}, "sqlglot.expressions.Distinct": {"tf": 1}, "sqlglot.expressions.TimeUnit": {"tf": 1}, "sqlglot.expressions.IgnoreNulls": {"tf": 1}, "sqlglot.expressions.RespectNulls": {"tf": 1}, "sqlglot.expressions.Use": {"tf": 1}, "sqlglot.expressions.Merge": {"tf": 1}}, "df": 95}}}}}}}}}}, "h": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.spark.Spark": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Tokenizer": {"tf": 1.4142135623730951}}, "df": 4}}}, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.errors.ErrorLevel": {"tf": 1}, "sqlglot.expressions.Properties.Location": {"tf": 1}, "sqlglot.expressions.DataType.Type": {"tf": 1}, "sqlglot.tokens.TokenType": {"tf": 1}}, "df": 4}}}}}}, "m": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "q": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dialects.starrocks.StarRocks": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.4142135623730951}}, "df": 2}}}}}, "a": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.errors.ErrorLevel": {"tf": 1}, "sqlglot.expressions.Properties.Location": {"tf": 1}, "sqlglot.expressions.DataType.Type": {"tf": 1}, "sqlglot.tokens.TokenType": {"tf": 1}}, "df": 4}}}}}}}, "b": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "[": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "q": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.executor.table.Tables": {"tf": 1}}, "df": 1}}}}}}}, "t": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.schema.MappingSchema": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}}}}}}}}}, "c": {"docs": {"sqlglot.schema.Schema": {"tf": 1.4142135623730951}}, "df": 1}}, "g": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.expressions.ApproxDistinct": {"tf": 1}, "sqlglot.expressions.ArrayAgg": {"tf": 1}, "sqlglot.expressions.ArrayUnionAgg": {"tf": 1}, "sqlglot.expressions.Avg": {"tf": 1}, "sqlglot.expressions.AnyValue": {"tf": 1}, "sqlglot.expressions.Count": {"tf": 1}, "sqlglot.expressions.LogicalOr": {"tf": 1}, "sqlglot.expressions.Max": {"tf": 1}, "sqlglot.expressions.Min": {"tf": 1}, "sqlglot.expressions.PercentileCont": {"tf": 1}, "sqlglot.expressions.PercentileDisc": {"tf": 1}, "sqlglot.expressions.Quantile": {"tf": 1}, "sqlglot.expressions.Quantiles": {"tf": 1}, "sqlglot.expressions.QuantileIf": {"tf": 1}, "sqlglot.expressions.SetAgg": {"tf": 1}, "sqlglot.expressions.Sum": {"tf": 1}, "sqlglot.expressions.Stddev": {"tf": 1}, "sqlglot.expressions.StddevPop": {"tf": 1}, "sqlglot.expressions.StddevSamp": {"tf": 1}, "sqlglot.expressions.Variance": {"tf": 1}, "sqlglot.expressions.VariancePop": {"tf": 1}}, "df": 21}}}}}}}, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.Predicate": {"tf": 1}, "sqlglot.expressions.BitString": {"tf": 1}, "sqlglot.expressions.HexString": {"tf": 1}, "sqlglot.expressions.ByteString": {"tf": 1}, "sqlglot.expressions.Column": {"tf": 1}, "sqlglot.expressions.Literal": {"tf": 1}, "sqlglot.expressions.Null": {"tf": 1}, "sqlglot.expressions.Boolean": {"tf": 1}, "sqlglot.expressions.Connector": {"tf": 1}, "sqlglot.expressions.Not": {"tf": 1}, "sqlglot.expressions.Paren": {"tf": 1}, "sqlglot.expressions.Bracket": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1}}, "df": 13}}}}}}, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.And": {"tf": 1}, "sqlglot.expressions.Or": {"tf": 1}}, "df": 2}}}}}}, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.ConcatWs": {"tf": 1}}, "df": 1}}}}, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.AutoIncrementColumnConstraint": {"tf": 1}, "sqlglot.expressions.CheckColumnConstraint": {"tf": 1}, "sqlglot.expressions.CollateColumnConstraint": {"tf": 1}, "sqlglot.expressions.CommentColumnConstraint": {"tf": 1}, "sqlglot.expressions.DefaultColumnConstraint": {"tf": 1}, "sqlglot.expressions.EncodeColumnConstraint": {"tf": 1}, "sqlglot.expressions.GeneratedAsIdentityColumnConstraint": {"tf": 1}, "sqlglot.expressions.NotNullColumnConstraint": {"tf": 1}, "sqlglot.expressions.PrimaryKeyColumnConstraint": {"tf": 1}, "sqlglot.expressions.UniqueColumnConstraint": {"tf": 1}}, "df": 10}}}}}}}}}}}}}}}}}}}, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.TryCast": {"tf": 1}}, "df": 1}}}}, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.Except": {"tf": 1}, "sqlglot.expressions.Intersect": {"tf": 1}}, "df": 2, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.UDTF": {"tf": 1}, "sqlglot.expressions.Subqueryable": {"tf": 1}, "sqlglot.expressions.Subquery": {"tf": 1}}, "df": 3}}}}}}}, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.BitwiseNot": {"tf": 1}, "sqlglot.expressions.Not": {"tf": 1}, "sqlglot.expressions.Paren": {"tf": 1}, "sqlglot.expressions.Neg": {"tf": 1}}, "df": 4}}}}, "d": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.expressions.Lateral": {"tf": 1}, "sqlglot.expressions.Unnest": {"tf": 1}, "sqlglot.expressions.Values": {"tf": 1}}, "df": 3}}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Cluster": {"tf": 1}, "sqlglot.expressions.Distribute": {"tf": 1}, "sqlglot.expressions.Sort": {"tf": 1}}, "df": 3}}}}}, "f": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.expressions.AggFunc": {"tf": 1}, "sqlglot.expressions.Abs": {"tf": 1}, "sqlglot.expressions.Anonymous": {"tf": 1}, "sqlglot.expressions.Array": {"tf": 1}, "sqlglot.expressions.GenerateSeries": {"tf": 1}, "sqlglot.expressions.ArrayAll": {"tf": 1}, "sqlglot.expressions.ArrayAny": {"tf": 1}, "sqlglot.expressions.ArrayConcat": {"tf": 1}, "sqlglot.expressions.ArrayContains": {"tf": 1}, "sqlglot.expressions.ArrayFilter": {"tf": 1}, "sqlglot.expressions.ArraySize": {"tf": 1}, "sqlglot.expressions.ArraySort": {"tf": 1}, "sqlglot.expressions.ArraySum": {"tf": 1}, "sqlglot.expressions.Case": {"tf": 1}, "sqlglot.expressions.Cast": {"tf": 1}, "sqlglot.expressions.Ceil": {"tf": 1}, "sqlglot.expressions.Coalesce": {"tf": 1}, "sqlglot.expressions.Concat": {"tf": 1}, "sqlglot.expressions.CurrentDate": {"tf": 1}, "sqlglot.expressions.CurrentDatetime": {"tf": 1}, "sqlglot.expressions.CurrentTime": {"tf": 1}, "sqlglot.expressions.CurrentTimestamp": {"tf": 1}, "sqlglot.expressions.DateAdd": {"tf": 1}, "sqlglot.expressions.DateSub": {"tf": 1}, "sqlglot.expressions.DateDiff": {"tf": 1}, "sqlglot.expressions.DateTrunc": {"tf": 1}, "sqlglot.expressions.DatetimeAdd": {"tf": 1}, "sqlglot.expressions.DatetimeSub": {"tf": 1}, "sqlglot.expressions.DatetimeDiff": {"tf": 1}, "sqlglot.expressions.DatetimeTrunc": {"tf": 1}, "sqlglot.expressions.DayOfWeek": {"tf": 1}, "sqlglot.expressions.DayOfMonth": {"tf": 1}, "sqlglot.expressions.DayOfYear": {"tf": 1}, "sqlglot.expressions.WeekOfYear": {"tf": 1}, "sqlglot.expressions.LastDateOfMonth": {"tf": 1}, "sqlglot.expressions.Extract": {"tf": 1}, "sqlglot.expressions.TimestampAdd": {"tf": 1}, "sqlglot.expressions.TimestampSub": {"tf": 1}, "sqlglot.expressions.TimestampDiff": {"tf": 1}, "sqlglot.expressions.TimestampTrunc": {"tf": 1}, "sqlglot.expressions.TimeAdd": {"tf": 1}, "sqlglot.expressions.TimeSub": {"tf": 1}, "sqlglot.expressions.TimeDiff": {"tf": 1}, "sqlglot.expressions.TimeTrunc": {"tf": 1}, "sqlglot.expressions.DateFromParts": {"tf": 1}, "sqlglot.expressions.DateStrToDate": {"tf": 1}, "sqlglot.expressions.DateToDateStr": {"tf": 1}, "sqlglot.expressions.DateToDi": {"tf": 1}, "sqlglot.expressions.Day": {"tf": 1}, "sqlglot.expressions.Decode": {"tf": 1}, "sqlglot.expressions.DiToDate": {"tf": 1}, "sqlglot.expressions.Encode": {"tf": 1}, "sqlglot.expressions.Exp": {"tf": 1}, "sqlglot.expressions.Explode": {"tf": 1}, "sqlglot.expressions.Floor": {"tf": 1}, "sqlglot.expressions.Greatest": {"tf": 1}, "sqlglot.expressions.GroupConcat": {"tf": 1}, "sqlglot.expressions.Hex": {"tf": 1}, "sqlglot.expressions.If": {"tf": 1}, "sqlglot.expressions.IfNull": {"tf": 1}, "sqlglot.expressions.Initcap": {"tf": 1}, "sqlglot.expressions.JSONExtract": {"tf": 1}, "sqlglot.expressions.Least": {"tf": 1}, "sqlglot.expressions.Length": {"tf": 1}, "sqlglot.expressions.Levenshtein": {"tf": 1}, "sqlglot.expressions.Ln": {"tf": 1}, "sqlglot.expressions.Log": {"tf": 1}, "sqlglot.expressions.Log2": {"tf": 1}, "sqlglot.expressions.Log10": {"tf": 1}, "sqlglot.expressions.Lower": {"tf": 1}, "sqlglot.expressions.Map": {"tf": 1}, "sqlglot.expressions.VarMap": {"tf": 1}, "sqlglot.expressions.Matches": {"tf": 1}, "sqlglot.expressions.Month": {"tf": 1}, "sqlglot.expressions.Nvl2": {"tf": 1}, "sqlglot.expressions.Posexplode": {"tf": 1}, "sqlglot.expressions.Pow": {"tf": 1}, "sqlglot.expressions.ReadCSV": {"tf": 1}, "sqlglot.expressions.Reduce": {"tf": 1}, "sqlglot.expressions.RegexpLike": {"tf": 1}, "sqlglot.expressions.RegexpILike": {"tf": 1}, "sqlglot.expressions.RegexpSplit": {"tf": 1}, "sqlglot.expressions.Repeat": {"tf": 1}, "sqlglot.expressions.Round": {"tf": 1}, "sqlglot.expressions.RowNumber": {"tf": 1}, "sqlglot.expressions.SafeDivide": {"tf": 1}, "sqlglot.expressions.SortArray": {"tf": 1}, "sqlglot.expressions.Split": {"tf": 1}, "sqlglot.expressions.Substring": {"tf": 1}, "sqlglot.expressions.StrPosition": {"tf": 1}, "sqlglot.expressions.StrToDate": {"tf": 1}, "sqlglot.expressions.StrToTime": {"tf": 1}, "sqlglot.expressions.StrToUnix": {"tf": 1}, "sqlglot.expressions.NumberToStr": {"tf": 1}, "sqlglot.expressions.Struct": {"tf": 1}, "sqlglot.expressions.StructExtract": {"tf": 1}, "sqlglot.expressions.Sqrt": {"tf": 1}, "sqlglot.expressions.TimeToStr": {"tf": 1}, "sqlglot.expressions.TimeToTimeStr": {"tf": 1}, "sqlglot.expressions.TimeToUnix": {"tf": 1}, "sqlglot.expressions.TimeStrToDate": {"tf": 1}, "sqlglot.expressions.TimeStrToTime": {"tf": 1}, "sqlglot.expressions.TimeStrToUnix": {"tf": 1}, "sqlglot.expressions.Trim": {"tf": 1}, "sqlglot.expressions.TsOrDsAdd": {"tf": 1}, "sqlglot.expressions.TsOrDsToDateStr": {"tf": 1}, "sqlglot.expressions.TsOrDsToDate": {"tf": 1}, "sqlglot.expressions.TsOrDiToDi": {"tf": 1}, "sqlglot.expressions.Unhex": {"tf": 1}, "sqlglot.expressions.UnixToStr": {"tf": 1}, "sqlglot.expressions.UnixToTime": {"tf": 1}, "sqlglot.expressions.UnixToTimeStr": {"tf": 1}, "sqlglot.expressions.Upper": {"tf": 1}, "sqlglot.expressions.Week": {"tf": 1}, "sqlglot.expressions.Year": {"tf": 1}, "sqlglot.expressions.When": {"tf": 1}}, "df": 116}}}}, "j": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.JSONExtractScalar": {"tf": 1}, "sqlglot.expressions.JSONBExtract": {"tf": 1}, "sqlglot.expressions.JSONBExtractScalar": {"tf": 1}}, "df": 3}}}}}}}}}}}, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.ApproxQuantile": {"tf": 1}}, "df": 1}}}}}}}}}}, "doc": {"root": {"0": {"0": {"0": {"9": {"9": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "1": {"0": {"7": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "9": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "3": {"6": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "3": {"4": {"2": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "6": {"2": {"5": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "5": {"2": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "1": {"0": {"5": {"4": {"5": {"5": {"2": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "1": {"2": {"6": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "6": {"8": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "3": {"0": {"8": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "9": {"9": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {"sqlglot": {"tf": 1.7320508075688772}}, "df": 1, "/": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Matches": {"tf": 1}}, "df": 1}}}}}}}}, "2": {"docs": {"sqlglot": {"tf": 1.4142135623730951}}, "df": 1}, "3": {"2": {"8": {"0": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "9": {"1": {"7": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "6": {"9": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "4": {"4": {"1": {"0": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "9": {"5": {"8": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "6": {"6": {"7": {"1": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "7": {"6": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "8": {"0": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "5": {"4": {"3": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {"sqlglot": {"tf": 6.48074069840786}, "sqlglot.dataframe": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.persist": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.diff": {"tf": 4.47213595499958}, "sqlglot.executor": {"tf": 2}, "sqlglot.expressions.Expression.output_name": {"tf": 1.7320508075688772}, "sqlglot.expressions.Column.output_name": {"tf": 1.7320508075688772}, "sqlglot.expressions.Identifier.output_name": {"tf": 1.7320508075688772}, "sqlglot.expressions.Literal.output_name": {"tf": 1.7320508075688772}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Subquery.output_name": {"tf": 1.7320508075688772}, "sqlglot.expressions.Star.output_name": {"tf": 1.7320508075688772}, "sqlglot.expressions.Alias.output_name": {"tf": 1.7320508075688772}, "sqlglot.expressions.Cast.output_name": {"tf": 1.7320508075688772}, "sqlglot.helper.dict_depth": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1.4142135623730951}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1.7320508075688772}, "sqlglot.parser.Parser": {"tf": 1}, "sqlglot.trie.new_trie": {"tf": 2}, "sqlglot.trie.in_trie": {"tf": 2.23606797749979}}, "df": 38, "/": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "/": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}}, "df": 1}}}}}}}}}}}}, "1": {"0": {"0": {"0": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 1}}, "df": 2}, "docs": {}, "df": 0}, "2": {"docs": {}, "df": 0, "/": {"docs": {}, "df": 0, "b": {"1": {"4": {"2": {"0": {"0": {"docs": {}, "df": 0, "/": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"0": {"4": {"0": {"docs": {"sqlglot.expressions.Matches": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}}}}}}}}}}}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}}}, "4": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.limit": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.offset": {"tf": 1.4142135623730951}}, "df": 4, "^": {"1": {"2": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}}, "1": {"4": {"5": {"docs": {}, "df": 0, "/": {"2": {"6": {"4": {"2": {"9": {"3": {"7": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}}, "docs": {}, "df": 0}, "docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 1}}, "df": 2}, "2": {"2": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "3": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}, "6": {"3": {"2": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "9": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "3": {"1": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 1}}, "df": 2}, "4": {"1": {"3": {"4": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "5": {"1": {"docs": {"sqlglot": {"tf": 1.4142135623730951}}, "df": 1}, "7": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "6": {"1": {"8": {"0": {"8": {"8": {"0": {"2": {"8": {"2": {"9": {"5": {"docs": {"sqlglot": {"tf": 1.4142135623730951}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "7": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}, "8": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}, "9": {"8": {"6": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "9": {"6": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {"sqlglot": {"tf": 6}, "sqlglot.dataframe": {"tf": 2}, "sqlglot.diff": {"tf": 4}, "sqlglot.executor": {"tf": 3.7416573867739413}, "sqlglot.expressions.Expression.output_name": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 2}, "sqlglot.expressions.Condition.or_": {"tf": 2}, "sqlglot.expressions.Condition.not_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Predicate": {"tf": 1}, "sqlglot.expressions.Column.output_name": {"tf": 1}, "sqlglot.expressions.Identifier.output_name": {"tf": 1}, "sqlglot.expressions.Literal.output_name": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1.4142135623730951}, "sqlglot.expressions.Union.limit": {"tf": 2.449489742783178}, "sqlglot.expressions.Select.group_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.join": {"tf": 1.4142135623730951}, "sqlglot.expressions.Subquery.output_name": {"tf": 1}, "sqlglot.expressions.Star.output_name": {"tf": 1}, "sqlglot.expressions.Alias.output_name": {"tf": 1}, "sqlglot.expressions.Cast.output_name": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1.4142135623730951}, "sqlglot.expressions.update": {"tf": 2}, "sqlglot.expressions.delete": {"tf": 1.4142135623730951}, "sqlglot.expressions.condition": {"tf": 2.449489742783178}, "sqlglot.expressions.and_": {"tf": 2.449489742783178}, "sqlglot.expressions.or_": {"tf": 2.449489742783178}, "sqlglot.expressions.to_interval": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1.4142135623730951}, "sqlglot.expressions.cast": {"tf": 1.4142135623730951}, "sqlglot.expressions.values": {"tf": 1.4142135623730951}, "sqlglot.helper.split_num_words": {"tf": 1}, "sqlglot.helper.is_iterable": {"tf": 1}, "sqlglot.helper.flatten": {"tf": 2}, "sqlglot.helper.dict_depth": {"tf": 1.4142135623730951}, "sqlglot.optimizer.expand_laterals.expand_laterals": {"tf": 2.23606797749979}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.selects": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1.4142135623730951}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1.4142135623730951}, "sqlglot.transforms.unalias_group": {"tf": 1}, "sqlglot.trie.in_trie": {"tf": 1.4142135623730951}}, "df": 43, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1}}, "df": 2}}}, "2": {"0": {"0": {"7": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "1": {"4": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "2": {"1": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1}}, "df": 2}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "2": {"docs": {"sqlglot.dataframe": {"tf": 1.7320508075688772}}, "df": 1}, "4": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}, "5": {"1": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "5": {"docs": {"sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "6": {"4": {"2": {"9": {"8": {"2": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "6": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "docs": {"sqlglot": {"tf": 1}, "sqlglot.dataframe": {"tf": 1.7320508075688772}}, "df": 2}, "9": {"4": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {"sqlglot": {"tf": 3.3166247903554}, "sqlglot.dataframe": {"tf": 1.7320508075688772}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.diff": {"tf": 4.358898943540674}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.expressions.Expression.output_name": {"tf": 1}, "sqlglot.expressions.Column.output_name": {"tf": 1}, "sqlglot.expressions.Identifier.output_name": {"tf": 1}, "sqlglot.expressions.Literal.output_name": {"tf": 1}, "sqlglot.expressions.Subquery.output_name": {"tf": 1}, "sqlglot.expressions.Star.output_name": {"tf": 1}, "sqlglot.expressions.Alias.output_name": {"tf": 1}, "sqlglot.expressions.Cast.output_name": {"tf": 1}, "sqlglot.expressions.update": {"tf": 1.4142135623730951}, "sqlglot.expressions.alias_": {"tf": 1.4142135623730951}, "sqlglot.expressions.values": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.helper.is_iterable": {"tf": 1}, "sqlglot.helper.flatten": {"tf": 2}, "sqlglot.helper.dict_depth": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1.4142135623730951}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selects": {"tf": 1.4142135623730951}, "sqlglot.trie.in_trie": {"tf": 1.4142135623730951}}, "df": 43}, "3": {"1": {"3": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "2": {"4": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "3": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "4": {"docs": {"sqlglot.dataframe": {"tf": 2.449489742783178}}, "df": 1}, "7": {"7": {"7": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "9": {"docs": {"sqlglot": {"tf": 6.855654600401044}, "sqlglot.dataframe": {"tf": 7.0710678118654755}, "sqlglot.dialects": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.output_name": {"tf": 2.449489742783178}, "sqlglot.expressions.Expression.assert_is": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.and_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.or_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.not_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.union": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.intersect": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.except_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Column.output_name": {"tf": 2.449489742783178}, "sqlglot.expressions.Identifier.output_name": {"tf": 2.449489742783178}, "sqlglot.expressions.Literal.output_name": {"tf": 2.449489742783178}, "sqlglot.expressions.Join.on": {"tf": 1.4142135623730951}, "sqlglot.expressions.Join.using": {"tf": 1.4142135623730951}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1.4142135623730951}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Union.limit": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.from_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.group_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.order_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.sort_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.cluster_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.limit": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.offset": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.lateral": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.join": {"tf": 2.449489742783178}, "sqlglot.expressions.Select.where": {"tf": 2.8284271247461903}, "sqlglot.expressions.Select.having": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.distinct": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.ctas": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.lock": {"tf": 2.8284271247461903}, "sqlglot.expressions.Subquery.output_name": {"tf": 2.449489742783178}, "sqlglot.expressions.Star.output_name": {"tf": 2.449489742783178}, "sqlglot.expressions.Alias.output_name": {"tf": 2.449489742783178}, "sqlglot.expressions.Cast.output_name": {"tf": 2.449489742783178}, "sqlglot.expressions.union": {"tf": 1.4142135623730951}, "sqlglot.expressions.intersect": {"tf": 1.4142135623730951}, "sqlglot.expressions.except_": {"tf": 1.4142135623730951}, "sqlglot.expressions.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.from_": {"tf": 1.4142135623730951}, "sqlglot.expressions.update": {"tf": 1.4142135623730951}, "sqlglot.expressions.delete": {"tf": 1.4142135623730951}, "sqlglot.expressions.condition": {"tf": 2}, "sqlglot.expressions.and_": {"tf": 1.4142135623730951}, "sqlglot.expressions.or_": {"tf": 1.4142135623730951}, "sqlglot.expressions.not_": {"tf": 2}, "sqlglot.expressions.alias_": {"tf": 4}, "sqlglot.expressions.subquery": {"tf": 2.8284271247461903}, "sqlglot.expressions.cast": {"tf": 2.449489742783178}, "sqlglot.expressions.values": {"tf": 2}, "sqlglot.expressions.column_table_names": {"tf": 2}, "sqlglot.expressions.table_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.replace_tables": {"tf": 1.4142135623730951}, "sqlglot.expressions.replace_placeholders": {"tf": 1.4142135623730951}, "sqlglot.expressions.expand": {"tf": 1.4142135623730951}, "sqlglot.expressions.func": {"tf": 2}, "sqlglot.helper.split_num_words": {"tf": 3.4641016151377544}, "sqlglot.helper.flatten": {"tf": 1.4142135623730951}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1.4142135623730951}, "sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"tf": 1.4142135623730951}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1.4142135623730951}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 2}, "sqlglot.optimizer.expand_laterals.expand_laterals": {"tf": 1.4142135623730951}, "sqlglot.optimizer.expand_multi_table_selects.expand_multi_table_selects": {"tf": 1.4142135623730951}, "sqlglot.optimizer.lower_identities.lower_identities": {"tf": 2}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1.4142135623730951}, "sqlglot.optimizer.normalize.normalize": {"tf": 1.4142135623730951}, "sqlglot.optimizer.optimize_joins.optimize_joins": {"tf": 1.4142135623730951}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 1.4142135623730951}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 2.8284271247461903}, "sqlglot.optimizer.simplify.simplify": {"tf": 1.4142135623730951}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1.4142135623730951}, "sqlglot.time.format_time": {"tf": 1.4142135623730951}, "sqlglot.transforms.unalias_group": {"tf": 1.4142135623730951}, "sqlglot.trie.new_trie": {"tf": 3.7416573867739413}, "sqlglot.trie.in_trie": {"tf": 2.8284271247461903}}, "df": 82}, "docs": {"sqlglot": {"tf": 3.1622776601683795}, "sqlglot.dataframe": {"tf": 1.7320508075688772}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.diff": {"tf": 3.1622776601683795}, "sqlglot.executor": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1.4142135623730951}, "sqlglot.helper.flatten": {"tf": 2}, "sqlglot.helper.dict_depth": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}}, "df": 48}, "4": {"0": {"0": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "3": {"4": {"3": {"4": {"1": {"6": {"6": {"2": {"4": {"docs": {"sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}}, "df": 3}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "7": {"9": {"8": {"3": {"1": {"3": {"6": {"docs": {"sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}}, "df": 3}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "4": {"6": {"2": {"4": {"docs": {"sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}}, "df": 3}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "5": {"2": {"9": {"6": {"docs": {"sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}}, "df": 3}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "6": {"docs": {"sqlglot.diff.ChangeDistiller": {"tf": 1}}, "df": 1}, "8": {"docs": {"sqlglot.dataframe": {"tf": 1.7320508075688772}}, "df": 1}, "9": {"3": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {"sqlglot": {"tf": 2}, "sqlglot.dataframe": {"tf": 1.7320508075688772}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.diff": {"tf": 2.6457513110645907}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.helper.flatten": {"tf": 1.4142135623730951}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}}, "df": 26}, "5": {"0": {"4": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "docs": {"sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}}, "df": 18}, "3": {"1": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {"sqlglot": {"tf": 2.23606797749979}, "sqlglot.dataframe": {"tf": 1.7320508075688772}, "sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.expressions.to_interval": {"tf": 1}, "sqlglot.expressions.func": {"tf": 2}, "sqlglot.helper.flatten": {"tf": 1.4142135623730951}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1.4142135623730951}}, "df": 7}, "6": {"0": {"6": {"2": {"6": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "1": {"4": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {"sqlglot": {"tf": 1.7320508075688772}, "sqlglot.diff": {"tf": 2.23606797749979}}, "df": 2, "m": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}, "7": {"0": {"4": {"3": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "2": {"5": {"docs": {}, "df": 0, "\u2013": {"7": {"4": {"3": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}}, "docs": {}, "df": 0}, "6": {"7": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}, "9": {"1": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "2": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {"sqlglot": {"tf": 1}}, "df": 1}, "8": {"0": {"5": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}}, "df": 21}, "9": {"3": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 2}, "9": {"0": {"6": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "7": {"0": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "8": {"7": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {"sqlglot": {"tf": 61.318838867023565}, "sqlglot.pretty": {"tf": 1.7320508075688772}, "sqlglot.schema": {"tf": 1.7320508075688772}, "sqlglot.parse": {"tf": 5.916079783099616}, "sqlglot.parse_one": {"tf": 6.324555320336759}, "sqlglot.transpile": {"tf": 7.211102550927978}, "sqlglot.dataframe": {"tf": 48.86716689148246}, "sqlglot.dataframe.sql": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.SparkSession": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.SparkSession.__init__": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.SparkSession.table": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.SparkSession.createDataFrame": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.SparkSession.sql": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.__init__": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.sql": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.copy": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.select": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.alias": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.where": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.filter": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.groupBy": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.agg": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.join": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 2.23606797749979}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 2.23606797749979}, "sqlglot.dataframe.sql.DataFrame.union": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.unionAll": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.unionByName": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.intersect": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.intersectAll": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.exceptAll": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.distinct": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.dropDuplicates": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.dropna": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 2.6457513110645907}, "sqlglot.dataframe.sql.DataFrame.replace": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.withColumn": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.withColumnRenamed": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.drop": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.limit": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.hint": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.repartition": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.coalesce": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.cache": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.persist": {"tf": 2}, "sqlglot.dataframe.sql.GroupedData": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.GroupedData.__init__": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.GroupedData.agg": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.GroupedData.count": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.GroupedData.mean": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.GroupedData.avg": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.GroupedData.max": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.GroupedData.min": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.GroupedData.sum": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.GroupedData.pivot": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.__init__": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.ensure_col": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.ensure_cols": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.invoke_anonymous_function": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.invoke_expression_over_column": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.binary_op": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.inverse_binary_op": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.unary_op": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.ensure_literal": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.copy": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.set_table_name": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.sql": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.alias": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.asc": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.desc": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.asc_nulls_first": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.asc_nulls_last": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.desc_nulls_first": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.desc_nulls_last": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.when": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.otherwise": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.isNull": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.isNotNull": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.cast": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.startswith": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.endswith": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.rlike": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.like": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.ilike": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.substr": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.isin": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.between": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.over": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrameNaFunctions": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrameNaFunctions.__init__": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrameNaFunctions.drop": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrameNaFunctions.fill": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrameNaFunctions.replace": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Window": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Window.__init__": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Window.partitionBy": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Window.orderBy": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Window.rowsBetween": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Window.rangeBetween": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.WindowSpec": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.WindowSpec.__init__": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.WindowSpec.copy": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.WindowSpec.sql": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.WindowSpec.partitionBy": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.WindowSpec.orderBy": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.WindowSpec.rowsBetween": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.WindowSpec.rangeBetween": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrameReader": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrameReader.__init__": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrameReader.table": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrameWriter": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrameWriter.__init__": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrameWriter.copy": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrameWriter.sql": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrameWriter.mode": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrameWriter.insertInto": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrameWriter.saveAsTable": {"tf": 1.7320508075688772}, "sqlglot.dialects": {"tf": 21.817424229271428}, "sqlglot.dialects.bigquery": {"tf": 1.7320508075688772}, "sqlglot.dialects.bigquery.BigQuery": {"tf": 1.7320508075688772}, "sqlglot.dialects.bigquery.BigQuery.__init__": {"tf": 1.7320508075688772}, "sqlglot.dialects.bigquery.BigQuery.Tokenizer": {"tf": 1.7320508075688772}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 6.082762530298219}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 11.40175425099138}, "sqlglot.dialects.bigquery.BigQuery.Generator.array_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.bigquery.BigQuery.Generator.transaction_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.bigquery.BigQuery.Generator.commit_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.bigquery.BigQuery.Generator.rollback_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.bigquery.BigQuery.Generator.in_unnest_op": {"tf": 1.7320508075688772}, "sqlglot.dialects.bigquery.BigQuery.Generator.except_op": {"tf": 1.7320508075688772}, "sqlglot.dialects.bigquery.BigQuery.Generator.intersect_op": {"tf": 1.7320508075688772}, "sqlglot.dialects.clickhouse": {"tf": 1.7320508075688772}, "sqlglot.dialects.clickhouse.ClickHouse": {"tf": 1.7320508075688772}, "sqlglot.dialects.clickhouse.ClickHouse.__init__": {"tf": 1.7320508075688772}, "sqlglot.dialects.clickhouse.ClickHouse.Tokenizer": {"tf": 1.7320508075688772}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 6.082762530298219}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 11.40175425099138}, "sqlglot.dialects.clickhouse.ClickHouse.Generator.cte_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.databricks": {"tf": 1.7320508075688772}, "sqlglot.dialects.databricks.Databricks": {"tf": 1.7320508075688772}, "sqlglot.dialects.databricks.Databricks.__init__": {"tf": 1.7320508075688772}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 6.082762530298219}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 11.40175425099138}, "sqlglot.dialects.dialect": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.Dialects": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.Dialects.DIALECT": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.Dialects.BIGQUERY": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.Dialects.CLICKHOUSE": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.Dialects.DUCKDB": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.Dialects.HIVE": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.Dialects.MYSQL": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.Dialects.ORACLE": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.Dialects.POSTGRES": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.Dialects.PRESTO": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.Dialects.REDSHIFT": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.Dialects.SNOWFLAKE": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.Dialects.SPARK": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.Dialects.SQLITE": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.Dialects.STARROCKS": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.Dialects.TABLEAU": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.Dialects.TRINO": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.Dialects.TSQL": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.Dialects.DATABRICKS": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.Dialects.DRILL": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.Dialects.TERADATA": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.Dialect": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.Dialect.__init__": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.Dialect.get_or_raise": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.Dialect.format_time": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.Dialect.parse": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.Dialect.parse_into": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.Dialect.generate": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.Dialect.transpile": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.Dialect.parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.Dialect.generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.rename_func": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.approx_count_distinct_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.if_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.arrow_json_extract_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.arrow_json_extract_scalar_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.inline_array_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.no_ilike_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.no_paren_current_date_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.no_recursive_cte_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.no_safe_divide_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.no_tablesample_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.no_pivot_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.no_trycast_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.no_properties_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.str_position_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.struct_extract_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.var_map_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 5.744562646538029}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.parse_date_delta": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.locate_to_strposition": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.strposition_to_locate_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.timestrtotime_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.datestrtodate_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.trim_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.drill": {"tf": 1.7320508075688772}, "sqlglot.dialects.drill.if_sql": {"tf": 4.47213595499958}, "sqlglot.dialects.drill.Drill": {"tf": 1.7320508075688772}, "sqlglot.dialects.drill.Drill.__init__": {"tf": 1.7320508075688772}, "sqlglot.dialects.drill.Drill.Tokenizer": {"tf": 1.7320508075688772}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 6.082762530298219}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 11.40175425099138}, "sqlglot.dialects.drill.Drill.Generator.normalize_func": {"tf": 1.7320508075688772}, "sqlglot.dialects.duckdb": {"tf": 1.7320508075688772}, "sqlglot.dialects.duckdb.DuckDB": {"tf": 1.7320508075688772}, "sqlglot.dialects.duckdb.DuckDB.__init__": {"tf": 1.7320508075688772}, "sqlglot.dialects.duckdb.DuckDB.Tokenizer": {"tf": 1.7320508075688772}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 6.082762530298219}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 11.40175425099138}, "sqlglot.dialects.hive": {"tf": 1.7320508075688772}, "sqlglot.dialects.hive.Hive": {"tf": 1.7320508075688772}, "sqlglot.dialects.hive.Hive.__init__": {"tf": 1.7320508075688772}, "sqlglot.dialects.hive.Hive.Tokenizer": {"tf": 1.7320508075688772}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 6.082762530298219}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 11.40175425099138}, "sqlglot.dialects.hive.Hive.Generator.with_properties": {"tf": 1.7320508075688772}, "sqlglot.dialects.hive.Hive.Generator.datatype_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.mysql": {"tf": 1.7320508075688772}, "sqlglot.dialects.mysql.MySQL": {"tf": 1.7320508075688772}, "sqlglot.dialects.mysql.MySQL.__init__": {"tf": 1.7320508075688772}, "sqlglot.dialects.mysql.MySQL.Tokenizer": {"tf": 1.7320508075688772}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 6.082762530298219}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 11.40175425099138}, "sqlglot.dialects.mysql.MySQL.Generator.show_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.mysql.MySQL.Generator.setitem_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.mysql.MySQL.Generator.set_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.oracle": {"tf": 1.7320508075688772}, "sqlglot.dialects.oracle.Oracle": {"tf": 1.7320508075688772}, "sqlglot.dialects.oracle.Oracle.__init__": {"tf": 1.7320508075688772}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 6.082762530298219}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 11.40175425099138}, "sqlglot.dialects.oracle.Oracle.Generator.query_modifiers": {"tf": 1.7320508075688772}, "sqlglot.dialects.oracle.Oracle.Generator.offset_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.oracle.Oracle.Generator.table_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.oracle.Oracle.Tokenizer": {"tf": 1.7320508075688772}, "sqlglot.dialects.postgres": {"tf": 1.7320508075688772}, "sqlglot.dialects.postgres.Postgres": {"tf": 1.7320508075688772}, "sqlglot.dialects.postgres.Postgres.__init__": {"tf": 1.7320508075688772}, "sqlglot.dialects.postgres.Postgres.Tokenizer": {"tf": 1.7320508075688772}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 6.082762530298219}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 11.40175425099138}, "sqlglot.dialects.presto": {"tf": 1.7320508075688772}, "sqlglot.dialects.presto.Presto": {"tf": 1.7320508075688772}, "sqlglot.dialects.presto.Presto.__init__": {"tf": 1.7320508075688772}, "sqlglot.dialects.presto.Presto.Tokenizer": {"tf": 1.7320508075688772}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 6.082762530298219}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 11.40175425099138}, "sqlglot.dialects.presto.Presto.Generator.transaction_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.redshift": {"tf": 1.7320508075688772}, "sqlglot.dialects.redshift.Redshift": {"tf": 1.7320508075688772}, "sqlglot.dialects.redshift.Redshift.__init__": {"tf": 1.7320508075688772}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 6.082762530298219}, "sqlglot.dialects.redshift.Redshift.Tokenizer": {"tf": 1.7320508075688772}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 11.40175425099138}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 3.3166247903554}, "sqlglot.dialects.redshift.Redshift.Generator.with_properties": {"tf": 2}, "sqlglot.dialects.redshift.Redshift.Generator.renametable_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 4.47213595499958}, "sqlglot.dialects.snowflake": {"tf": 1.7320508075688772}, "sqlglot.dialects.snowflake.Snowflake": {"tf": 1.7320508075688772}, "sqlglot.dialects.snowflake.Snowflake.__init__": {"tf": 1.7320508075688772}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 6.082762530298219}, "sqlglot.dialects.snowflake.Snowflake.Tokenizer": {"tf": 1.7320508075688772}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 11.40175425099138}, "sqlglot.dialects.snowflake.Snowflake.Generator.except_op": {"tf": 1.7320508075688772}, "sqlglot.dialects.snowflake.Snowflake.Generator.intersect_op": {"tf": 1.7320508075688772}, "sqlglot.dialects.snowflake.Snowflake.Generator.values_sql": {"tf": 2.8284271247461903}, "sqlglot.dialects.snowflake.Snowflake.Generator.select_sql": {"tf": 2.8284271247461903}, "sqlglot.dialects.snowflake.Snowflake.Generator.describe_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.snowflake.Snowflake.Generator.generatedasidentitycolumnconstraint_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.spark": {"tf": 1.7320508075688772}, "sqlglot.dialects.spark.Spark": {"tf": 1.7320508075688772}, "sqlglot.dialects.spark.Spark.__init__": {"tf": 1.7320508075688772}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 6.082762530298219}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 11.40175425099138}, "sqlglot.dialects.spark.Spark.Generator.cast_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.spark.Spark.Tokenizer": {"tf": 1.7320508075688772}, "sqlglot.dialects.sqlite": {"tf": 1.7320508075688772}, "sqlglot.dialects.sqlite.SQLite": {"tf": 1.7320508075688772}, "sqlglot.dialects.sqlite.SQLite.__init__": {"tf": 1.7320508075688772}, "sqlglot.dialects.sqlite.SQLite.Tokenizer": {"tf": 1.7320508075688772}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 6.082762530298219}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 11.40175425099138}, "sqlglot.dialects.sqlite.SQLite.Generator.transaction_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.starrocks": {"tf": 1.7320508075688772}, "sqlglot.dialects.starrocks.StarRocks": {"tf": 1.7320508075688772}, "sqlglot.dialects.starrocks.StarRocks.__init__": {"tf": 1.7320508075688772}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 11.40175425099138}, "sqlglot.dialects.tableau": {"tf": 1.7320508075688772}, "sqlglot.dialects.tableau.Tableau": {"tf": 1.7320508075688772}, "sqlglot.dialects.tableau.Tableau.__init__": {"tf": 1.7320508075688772}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 11.40175425099138}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 6.082762530298219}, "sqlglot.dialects.teradata": {"tf": 1.7320508075688772}, "sqlglot.dialects.teradata.Teradata": {"tf": 1.7320508075688772}, "sqlglot.dialects.teradata.Teradata.__init__": {"tf": 1.7320508075688772}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 6.082762530298219}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 11.40175425099138}, "sqlglot.dialects.teradata.Teradata.Generator.partitionedbyproperty_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.teradata.Teradata.Generator.update_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.trino": {"tf": 1.7320508075688772}, "sqlglot.dialects.trino.Trino": {"tf": 1.7320508075688772}, "sqlglot.dialects.trino.Trino.__init__": {"tf": 1.7320508075688772}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 11.40175425099138}, "sqlglot.dialects.trino.Trino.Tokenizer": {"tf": 1.7320508075688772}, "sqlglot.dialects.tsql": {"tf": 1.7320508075688772}, "sqlglot.dialects.tsql.generate_date_delta_with_unit_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.tsql.TSQL": {"tf": 1.7320508075688772}, "sqlglot.dialects.tsql.TSQL.__init__": {"tf": 1.7320508075688772}, "sqlglot.dialects.tsql.TSQL.Tokenizer": {"tf": 1.7320508075688772}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 6.082762530298219}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 11.40175425099138}, "sqlglot.dialects.tsql.TSQL.Generator.systemtime_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.tsql.TSQL.Generator.returnsproperty_sql": {"tf": 1.7320508075688772}, "sqlglot.diff": {"tf": 50.556898639058154}, "sqlglot.diff.Insert": {"tf": 1.4142135623730951}, "sqlglot.diff.Insert.__init__": {"tf": 1.7320508075688772}, "sqlglot.diff.Remove": {"tf": 1.4142135623730951}, "sqlglot.diff.Remove.__init__": {"tf": 1.7320508075688772}, "sqlglot.diff.Move": {"tf": 1.4142135623730951}, "sqlglot.diff.Move.__init__": {"tf": 1.7320508075688772}, "sqlglot.diff.Update": {"tf": 1.4142135623730951}, "sqlglot.diff.Update.__init__": {"tf": 1.7320508075688772}, "sqlglot.diff.Keep": {"tf": 1.4142135623730951}, "sqlglot.diff.Keep.__init__": {"tf": 1.7320508075688772}, "sqlglot.diff.diff": {"tf": 11.045361017187261}, "sqlglot.diff.ChangeDistiller": {"tf": 2.6457513110645907}, "sqlglot.diff.ChangeDistiller.__init__": {"tf": 1.7320508075688772}, "sqlglot.diff.ChangeDistiller.diff": {"tf": 1.7320508075688772}, "sqlglot.errors": {"tf": 1.7320508075688772}, "sqlglot.errors.ErrorLevel": {"tf": 1.7320508075688772}, "sqlglot.errors.ErrorLevel.IGNORE": {"tf": 1.7320508075688772}, "sqlglot.errors.ErrorLevel.WARN": {"tf": 1.7320508075688772}, "sqlglot.errors.ErrorLevel.RAISE": {"tf": 1.7320508075688772}, "sqlglot.errors.ErrorLevel.IMMEDIATE": {"tf": 1.7320508075688772}, "sqlglot.errors.SqlglotError": {"tf": 1.7320508075688772}, "sqlglot.errors.UnsupportedError": {"tf": 1.7320508075688772}, "sqlglot.errors.ParseError": {"tf": 1.7320508075688772}, "sqlglot.errors.ParseError.__init__": {"tf": 1.7320508075688772}, "sqlglot.errors.ParseError.new": {"tf": 1.7320508075688772}, "sqlglot.errors.TokenError": {"tf": 1.7320508075688772}, "sqlglot.errors.OptimizeError": {"tf": 1.7320508075688772}, "sqlglot.errors.SchemaError": {"tf": 1.7320508075688772}, "sqlglot.errors.ExecuteError": {"tf": 1.7320508075688772}, "sqlglot.errors.concat_messages": {"tf": 1.7320508075688772}, "sqlglot.errors.merge_errors": {"tf": 1.7320508075688772}, "sqlglot.executor": {"tf": 35.11409973215888}, "sqlglot.executor.execute": {"tf": 7}, "sqlglot.executor.context": {"tf": 1.7320508075688772}, "sqlglot.executor.context.Context": {"tf": 3}, "sqlglot.executor.context.Context.__init__": {"tf": 1.7320508075688772}, "sqlglot.executor.context.Context.eval": {"tf": 1.7320508075688772}, "sqlglot.executor.context.Context.eval_tuple": {"tf": 1.7320508075688772}, "sqlglot.executor.context.Context.add_columns": {"tf": 1.7320508075688772}, "sqlglot.executor.context.Context.table_iter": {"tf": 1.7320508075688772}, "sqlglot.executor.context.Context.filter": {"tf": 1.7320508075688772}, "sqlglot.executor.context.Context.sort": {"tf": 1.7320508075688772}, "sqlglot.executor.context.Context.set_row": {"tf": 1.7320508075688772}, "sqlglot.executor.context.Context.set_index": {"tf": 1.7320508075688772}, "sqlglot.executor.context.Context.set_range": {"tf": 1.7320508075688772}, "sqlglot.executor.env": {"tf": 1.7320508075688772}, "sqlglot.executor.env.reverse_key": {"tf": 1.7320508075688772}, "sqlglot.executor.env.reverse_key.__init__": {"tf": 1.7320508075688772}, "sqlglot.executor.env.filter_nulls": {"tf": 1.7320508075688772}, "sqlglot.executor.env.null_if_any": {"tf": 4.58257569495584}, "sqlglot.executor.env.str_position": {"tf": 1.7320508075688772}, "sqlglot.executor.env.substring": {"tf": 1.7320508075688772}, "sqlglot.executor.env.cast": {"tf": 1.7320508075688772}, "sqlglot.executor.env.ordered": {"tf": 1.7320508075688772}, "sqlglot.executor.env.interval": {"tf": 1.7320508075688772}, "sqlglot.executor.python": {"tf": 1.7320508075688772}, "sqlglot.executor.python.PythonExecutor": {"tf": 1.7320508075688772}, "sqlglot.executor.python.PythonExecutor.__init__": {"tf": 1.7320508075688772}, "sqlglot.executor.python.PythonExecutor.execute": {"tf": 1.7320508075688772}, "sqlglot.executor.python.PythonExecutor.generate": {"tf": 1.7320508075688772}, "sqlglot.executor.python.PythonExecutor.generate_tuple": {"tf": 1.7320508075688772}, "sqlglot.executor.python.PythonExecutor.context": {"tf": 1.7320508075688772}, "sqlglot.executor.python.PythonExecutor.table": {"tf": 1.7320508075688772}, "sqlglot.executor.python.PythonExecutor.scan": {"tf": 1.7320508075688772}, "sqlglot.executor.python.PythonExecutor.static": {"tf": 1.7320508075688772}, "sqlglot.executor.python.PythonExecutor.scan_table": {"tf": 1.7320508075688772}, "sqlglot.executor.python.PythonExecutor.scan_csv": {"tf": 1.7320508075688772}, "sqlglot.executor.python.PythonExecutor.join": {"tf": 1.7320508075688772}, "sqlglot.executor.python.PythonExecutor.nested_loop_join": {"tf": 1.7320508075688772}, "sqlglot.executor.python.PythonExecutor.hash_join": {"tf": 1.7320508075688772}, "sqlglot.executor.python.PythonExecutor.aggregate": {"tf": 1.7320508075688772}, "sqlglot.executor.python.PythonExecutor.sort": {"tf": 1.7320508075688772}, "sqlglot.executor.python.PythonExecutor.set_operation": {"tf": 1.7320508075688772}, "sqlglot.executor.python.Python": {"tf": 1.7320508075688772}, "sqlglot.executor.python.Python.__init__": {"tf": 1.7320508075688772}, "sqlglot.executor.python.Python.Tokenizer": {"tf": 1.7320508075688772}, "sqlglot.executor.python.Python.Generator": {"tf": 11.40175425099138}, "sqlglot.executor.table": {"tf": 1.7320508075688772}, "sqlglot.executor.table.Table": {"tf": 1.7320508075688772}, "sqlglot.executor.table.Table.__init__": {"tf": 1.7320508075688772}, "sqlglot.executor.table.Table.add_columns": {"tf": 1.7320508075688772}, "sqlglot.executor.table.Table.append": {"tf": 1.7320508075688772}, "sqlglot.executor.table.Table.pop": {"tf": 1.7320508075688772}, "sqlglot.executor.table.TableIter": {"tf": 1.7320508075688772}, "sqlglot.executor.table.TableIter.__init__": {"tf": 1.7320508075688772}, "sqlglot.executor.table.RangeReader": {"tf": 1.7320508075688772}, "sqlglot.executor.table.RangeReader.__init__": {"tf": 1.7320508075688772}, "sqlglot.executor.table.RowReader": {"tf": 1.7320508075688772}, "sqlglot.executor.table.RowReader.__init__": {"tf": 1.7320508075688772}, "sqlglot.executor.table.Tables": {"tf": 4}, "sqlglot.executor.table.ensure_tables": {"tf": 1.7320508075688772}, "sqlglot.expressions": {"tf": 4}, "sqlglot.expressions.Expression": {"tf": 10.954451150103322}, "sqlglot.expressions.Expression.__init__": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.this": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.expression": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.expressions": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.text": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.is_string": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.is_number": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.is_int": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.alias": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.output_name": {"tf": 11.313708498984761}, "sqlglot.expressions.Expression.copy": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.append": {"tf": 4.358898943540674}, "sqlglot.expressions.Expression.set": {"tf": 4.898979485566356}, "sqlglot.expressions.Expression.depth": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.find": {"tf": 4.898979485566356}, "sqlglot.expressions.Expression.find_all": {"tf": 4.898979485566356}, "sqlglot.expressions.Expression.find_ancestor": {"tf": 4.898979485566356}, "sqlglot.expressions.Expression.parent_select": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.walk": {"tf": 5.5677643628300215}, "sqlglot.expressions.Expression.dfs": {"tf": 3.4641016151377544}, "sqlglot.expressions.Expression.bfs": {"tf": 3.4641016151377544}, "sqlglot.expressions.Expression.unnest": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.unalias": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.unnest_operands": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.flatten": {"tf": 2.449489742783178}, "sqlglot.expressions.Expression.sql": {"tf": 5.477225575051661}, "sqlglot.expressions.Expression.transform": {"tf": 5.477225575051661}, "sqlglot.expressions.Expression.replace": {"tf": 5.5677643628300215}, "sqlglot.expressions.Expression.pop": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.assert_is": {"tf": 9.643650760992955}, "sqlglot.expressions.Expression.error_messages": {"tf": 4.795831523312719}, "sqlglot.expressions.Expression.dump": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.load": {"tf": 2.23606797749979}, "sqlglot.expressions.Condition": {"tf": 1.7320508075688772}, "sqlglot.expressions.Condition.and_": {"tf": 9.746794344808963}, "sqlglot.expressions.Condition.or_": {"tf": 9.746794344808963}, "sqlglot.expressions.Condition.not_": {"tf": 7.874007874011811}, "sqlglot.expressions.Predicate": {"tf": 1.7320508075688772}, "sqlglot.expressions.DerivedTable": {"tf": 1.7320508075688772}, "sqlglot.expressions.Unionable": {"tf": 1.7320508075688772}, "sqlglot.expressions.Unionable.union": {"tf": 10.723805294763608}, "sqlglot.expressions.Unionable.intersect": {"tf": 10.677078252031311}, "sqlglot.expressions.Unionable.except_": {"tf": 10.723805294763608}, "sqlglot.expressions.UDTF": {"tf": 1.7320508075688772}, "sqlglot.expressions.Cache": {"tf": 1.7320508075688772}, "sqlglot.expressions.Uncache": {"tf": 1.7320508075688772}, "sqlglot.expressions.Create": {"tf": 1.7320508075688772}, "sqlglot.expressions.Describe": {"tf": 1.7320508075688772}, "sqlglot.expressions.Set": {"tf": 1.7320508075688772}, "sqlglot.expressions.SetItem": {"tf": 1.7320508075688772}, "sqlglot.expressions.Show": {"tf": 1.7320508075688772}, "sqlglot.expressions.UserDefinedFunction": {"tf": 1.7320508075688772}, "sqlglot.expressions.UserDefinedFunctionKwarg": {"tf": 1.7320508075688772}, "sqlglot.expressions.CharacterSet": {"tf": 1.7320508075688772}, "sqlglot.expressions.With": {"tf": 1.7320508075688772}, "sqlglot.expressions.WithinGroup": {"tf": 1.7320508075688772}, "sqlglot.expressions.CTE": {"tf": 1.7320508075688772}, "sqlglot.expressions.TableAlias": {"tf": 1.7320508075688772}, "sqlglot.expressions.BitString": {"tf": 1.7320508075688772}, "sqlglot.expressions.HexString": {"tf": 1.7320508075688772}, "sqlglot.expressions.ByteString": {"tf": 1.7320508075688772}, "sqlglot.expressions.Column": {"tf": 1.7320508075688772}, "sqlglot.expressions.Column.output_name": {"tf": 11.313708498984761}, "sqlglot.expressions.ColumnDef": {"tf": 1.7320508075688772}, "sqlglot.expressions.AlterColumn": {"tf": 1.7320508075688772}, "sqlglot.expressions.RenameTable": {"tf": 1.7320508075688772}, "sqlglot.expressions.ColumnConstraint": {"tf": 1.7320508075688772}, "sqlglot.expressions.ColumnConstraintKind": {"tf": 1.7320508075688772}, "sqlglot.expressions.AutoIncrementColumnConstraint": {"tf": 1.7320508075688772}, "sqlglot.expressions.CheckColumnConstraint": {"tf": 1.7320508075688772}, "sqlglot.expressions.CollateColumnConstraint": {"tf": 1.7320508075688772}, "sqlglot.expressions.CommentColumnConstraint": {"tf": 1.7320508075688772}, "sqlglot.expressions.DefaultColumnConstraint": {"tf": 1.7320508075688772}, "sqlglot.expressions.EncodeColumnConstraint": {"tf": 1.7320508075688772}, "sqlglot.expressions.GeneratedAsIdentityColumnConstraint": {"tf": 1.7320508075688772}, "sqlglot.expressions.NotNullColumnConstraint": {"tf": 1.7320508075688772}, "sqlglot.expressions.PrimaryKeyColumnConstraint": {"tf": 1.7320508075688772}, "sqlglot.expressions.UniqueColumnConstraint": {"tf": 1.7320508075688772}, "sqlglot.expressions.Constraint": {"tf": 1.7320508075688772}, "sqlglot.expressions.Delete": {"tf": 1.7320508075688772}, "sqlglot.expressions.Drop": {"tf": 1.7320508075688772}, "sqlglot.expressions.Filter": {"tf": 1.7320508075688772}, "sqlglot.expressions.Check": {"tf": 1.7320508075688772}, "sqlglot.expressions.Directory": {"tf": 1.7320508075688772}, "sqlglot.expressions.ForeignKey": {"tf": 1.7320508075688772}, "sqlglot.expressions.PrimaryKey": {"tf": 1.7320508075688772}, "sqlglot.expressions.Unique": {"tf": 1.7320508075688772}, "sqlglot.expressions.Into": {"tf": 1.7320508075688772}, "sqlglot.expressions.From": {"tf": 1.7320508075688772}, "sqlglot.expressions.Having": {"tf": 1.7320508075688772}, "sqlglot.expressions.Hint": {"tf": 1.7320508075688772}, "sqlglot.expressions.JoinHint": {"tf": 1.7320508075688772}, "sqlglot.expressions.Identifier": {"tf": 1.7320508075688772}, "sqlglot.expressions.Identifier.output_name": {"tf": 11.313708498984761}, "sqlglot.expressions.Index": {"tf": 1.7320508075688772}, "sqlglot.expressions.Insert": {"tf": 1.7320508075688772}, "sqlglot.expressions.Introducer": {"tf": 1.7320508075688772}, "sqlglot.expressions.National": {"tf": 1.7320508075688772}, "sqlglot.expressions.LoadData": {"tf": 1.7320508075688772}, "sqlglot.expressions.Partition": {"tf": 1.7320508075688772}, "sqlglot.expressions.Fetch": {"tf": 1.7320508075688772}, "sqlglot.expressions.Group": {"tf": 1.7320508075688772}, "sqlglot.expressions.Lambda": {"tf": 1.7320508075688772}, "sqlglot.expressions.Limit": {"tf": 1.7320508075688772}, "sqlglot.expressions.Literal": {"tf": 1.7320508075688772}, "sqlglot.expressions.Literal.number": {"tf": 1.7320508075688772}, "sqlglot.expressions.Literal.string": {"tf": 1.7320508075688772}, "sqlglot.expressions.Literal.output_name": {"tf": 11.313708498984761}, "sqlglot.expressions.Join": {"tf": 1.7320508075688772}, "sqlglot.expressions.Join.on": {"tf": 11.40175425099138}, "sqlglot.expressions.Join.using": {"tf": 11.704699910719626}, "sqlglot.expressions.Lateral": {"tf": 1.7320508075688772}, "sqlglot.expressions.MatchRecognize": {"tf": 1.7320508075688772}, "sqlglot.expressions.Final": {"tf": 1.7320508075688772}, "sqlglot.expressions.Offset": {"tf": 1.7320508075688772}, "sqlglot.expressions.Order": {"tf": 1.7320508075688772}, "sqlglot.expressions.Cluster": {"tf": 1.7320508075688772}, "sqlglot.expressions.Distribute": {"tf": 1.7320508075688772}, "sqlglot.expressions.Sort": {"tf": 1.7320508075688772}, "sqlglot.expressions.Ordered": {"tf": 1.7320508075688772}, "sqlglot.expressions.Property": {"tf": 1.7320508075688772}, "sqlglot.expressions.AlgorithmProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.DefinerProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.SqlSecurityProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.TableFormatProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.PartitionedByProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.FileFormatProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.DistKeyProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.SortKeyProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.DistStyleProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.LikeProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.LocationProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.EngineProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.AutoIncrementProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.CharacterSetProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.CollateProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.SchemaCommentProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.ReturnsProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.LanguageProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.ExecuteAsProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.VolatilityProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.RowFormatDelimitedProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.RowFormatSerdeProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.SerdeProperties": {"tf": 1.7320508075688772}, "sqlglot.expressions.FallbackProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.WithJournalTableProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.LogProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.JournalProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.AfterJournalProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.ChecksumProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.FreespaceProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.MergeBlockRatioProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataBlocksizeProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.BlockCompressionProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.IsolatedLoadingProperty": {"tf": 1.7320508075688772}, "sqlglot.expressions.Properties": {"tf": 1.7320508075688772}, "sqlglot.expressions.Properties.Location": {"tf": 1.7320508075688772}, "sqlglot.expressions.Properties.Location.POST_CREATE": {"tf": 1.7320508075688772}, "sqlglot.expressions.Properties.Location.PRE_SCHEMA": {"tf": 1.7320508075688772}, "sqlglot.expressions.Properties.Location.POST_INDEX": {"tf": 1.7320508075688772}, "sqlglot.expressions.Properties.Location.POST_SCHEMA_ROOT": {"tf": 1.7320508075688772}, "sqlglot.expressions.Properties.Location.POST_SCHEMA_WITH": {"tf": 1.7320508075688772}, "sqlglot.expressions.Properties.Location.UNSUPPORTED": {"tf": 1.7320508075688772}, "sqlglot.expressions.Properties.from_dict": {"tf": 1.7320508075688772}, "sqlglot.expressions.Qualify": {"tf": 1.7320508075688772}, "sqlglot.expressions.Return": {"tf": 1.7320508075688772}, "sqlglot.expressions.Reference": {"tf": 1.7320508075688772}, "sqlglot.expressions.Tuple": {"tf": 1.7320508075688772}, "sqlglot.expressions.Subqueryable": {"tf": 1.7320508075688772}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 11.832159566199232}, "sqlglot.expressions.Subqueryable.limit": {"tf": 1.7320508075688772}, "sqlglot.expressions.Subqueryable.with_": {"tf": 12.767145334803704}, "sqlglot.expressions.Table": {"tf": 1.7320508075688772}, "sqlglot.expressions.SystemTime": {"tf": 1.7320508075688772}, "sqlglot.expressions.Union": {"tf": 1.7320508075688772}, "sqlglot.expressions.Union.limit": {"tf": 11.090536506409418}, "sqlglot.expressions.Except": {"tf": 1.7320508075688772}, "sqlglot.expressions.Intersect": {"tf": 1.7320508075688772}, "sqlglot.expressions.Unnest": {"tf": 1.7320508075688772}, "sqlglot.expressions.Update": {"tf": 1.7320508075688772}, "sqlglot.expressions.Values": {"tf": 1.7320508075688772}, "sqlglot.expressions.Var": {"tf": 1.7320508075688772}, "sqlglot.expressions.Schema": {"tf": 1.7320508075688772}, "sqlglot.expressions.Lock": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.from_": {"tf": 11.135528725660043}, "sqlglot.expressions.Select.group_by": {"tf": 11.958260743101398}, "sqlglot.expressions.Select.order_by": {"tf": 11.704699910719626}, "sqlglot.expressions.Select.sort_by": {"tf": 11.704699910719626}, "sqlglot.expressions.Select.cluster_by": {"tf": 11.704699910719626}, "sqlglot.expressions.Select.limit": {"tf": 11.224972160321824}, "sqlglot.expressions.Select.offset": {"tf": 11.224972160321824}, "sqlglot.expressions.Select.select": {"tf": 10.535653752852738}, "sqlglot.expressions.Select.lateral": {"tf": 11.445523142259598}, "sqlglot.expressions.Select.join": {"tf": 19.026297590440446}, "sqlglot.expressions.Select.where": {"tf": 11.445523142259598}, "sqlglot.expressions.Select.having": {"tf": 12.288205727444508}, "sqlglot.expressions.Select.window": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.distinct": {"tf": 10.04987562112089}, "sqlglot.expressions.Select.ctas": {"tf": 11.357816691600547}, "sqlglot.expressions.Select.lock": {"tf": 14.696938456699069}, "sqlglot.expressions.Subquery": {"tf": 1.7320508075688772}, "sqlglot.expressions.Subquery.unnest": {"tf": 1.7320508075688772}, "sqlglot.expressions.Subquery.output_name": {"tf": 11.313708498984761}, "sqlglot.expressions.TableSample": {"tf": 1.7320508075688772}, "sqlglot.expressions.Tag": {"tf": 2.23606797749979}, "sqlglot.expressions.Pivot": {"tf": 1.7320508075688772}, "sqlglot.expressions.Window": {"tf": 1.7320508075688772}, "sqlglot.expressions.WindowSpec": {"tf": 1.7320508075688772}, "sqlglot.expressions.Where": {"tf": 1.7320508075688772}, "sqlglot.expressions.Star": {"tf": 1.7320508075688772}, "sqlglot.expressions.Star.output_name": {"tf": 11.313708498984761}, "sqlglot.expressions.Parameter": {"tf": 1.7320508075688772}, "sqlglot.expressions.SessionParameter": {"tf": 1.7320508075688772}, "sqlglot.expressions.Placeholder": {"tf": 1.7320508075688772}, "sqlglot.expressions.Null": {"tf": 1.7320508075688772}, "sqlglot.expressions.Boolean": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.CHAR": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.NCHAR": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.VARCHAR": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.NVARCHAR": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.TEXT": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.MEDIUMTEXT": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.LONGTEXT": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.MEDIUMBLOB": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.LONGBLOB": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.BINARY": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.VARBINARY": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.INT": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.TINYINT": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.SMALLINT": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.BIGINT": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.FLOAT": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.DOUBLE": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.DECIMAL": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.BOOLEAN": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.JSON": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.JSONB": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.INTERVAL": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.TIME": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.TIMESTAMP": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.TIMESTAMPTZ": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.TIMESTAMPLTZ": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.DATE": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.DATETIME": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.ARRAY": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.MAP": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.UUID": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.GEOGRAPHY": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.GEOMETRY": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.STRUCT": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.NULLABLE": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.HLLSKETCH": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.HSTORE": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.SUPER": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.SERIAL": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.SMALLSERIAL": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.BIGSERIAL": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.XML": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.UNIQUEIDENTIFIER": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.MONEY": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.SMALLMONEY": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.ROWVERSION": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.IMAGE": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.VARIANT": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.OBJECT": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.NULL": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.Type.UNKNOWN": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.build": {"tf": 1.7320508075688772}, "sqlglot.expressions.DataType.is_type": {"tf": 1.7320508075688772}, "sqlglot.expressions.PseudoType": {"tf": 1.7320508075688772}, "sqlglot.expressions.StructKwarg": {"tf": 1.7320508075688772}, "sqlglot.expressions.SubqueryPredicate": {"tf": 1.7320508075688772}, "sqlglot.expressions.All": {"tf": 1.7320508075688772}, "sqlglot.expressions.Any": {"tf": 1.7320508075688772}, "sqlglot.expressions.Exists": {"tf": 1.7320508075688772}, "sqlglot.expressions.Command": {"tf": 1.7320508075688772}, "sqlglot.expressions.Transaction": {"tf": 1.7320508075688772}, "sqlglot.expressions.Commit": {"tf": 1.7320508075688772}, "sqlglot.expressions.Rollback": {"tf": 1.7320508075688772}, "sqlglot.expressions.AlterTable": {"tf": 1.7320508075688772}, "sqlglot.expressions.AddConstraint": {"tf": 1.7320508075688772}, "sqlglot.expressions.DropPartition": {"tf": 1.7320508075688772}, "sqlglot.expressions.Binary": {"tf": 1.7320508075688772}, "sqlglot.expressions.Add": {"tf": 1.7320508075688772}, "sqlglot.expressions.Connector": {"tf": 1.7320508075688772}, "sqlglot.expressions.And": {"tf": 1.7320508075688772}, "sqlglot.expressions.Or": {"tf": 1.7320508075688772}, "sqlglot.expressions.BitwiseAnd": {"tf": 1.7320508075688772}, "sqlglot.expressions.BitwiseLeftShift": {"tf": 1.7320508075688772}, "sqlglot.expressions.BitwiseOr": {"tf": 1.7320508075688772}, "sqlglot.expressions.BitwiseRightShift": {"tf": 1.7320508075688772}, "sqlglot.expressions.BitwiseXor": {"tf": 1.7320508075688772}, "sqlglot.expressions.Div": {"tf": 1.7320508075688772}, "sqlglot.expressions.Dot": {"tf": 1.7320508075688772}, "sqlglot.expressions.DPipe": {"tf": 1.7320508075688772}, "sqlglot.expressions.EQ": {"tf": 1.7320508075688772}, "sqlglot.expressions.NullSafeEQ": {"tf": 1.7320508075688772}, "sqlglot.expressions.NullSafeNEQ": {"tf": 1.7320508075688772}, "sqlglot.expressions.Distance": {"tf": 1.7320508075688772}, "sqlglot.expressions.Escape": {"tf": 1.7320508075688772}, "sqlglot.expressions.Glob": {"tf": 1.7320508075688772}, "sqlglot.expressions.GT": {"tf": 1.7320508075688772}, "sqlglot.expressions.GTE": {"tf": 1.7320508075688772}, "sqlglot.expressions.ILike": {"tf": 1.7320508075688772}, "sqlglot.expressions.IntDiv": {"tf": 1.7320508075688772}, "sqlglot.expressions.Is": {"tf": 1.7320508075688772}, "sqlglot.expressions.Kwarg": {"tf": 1.7320508075688772}, "sqlglot.expressions.Like": {"tf": 1.7320508075688772}, "sqlglot.expressions.LT": {"tf": 1.7320508075688772}, "sqlglot.expressions.LTE": {"tf": 1.7320508075688772}, "sqlglot.expressions.Mod": {"tf": 1.7320508075688772}, "sqlglot.expressions.Mul": {"tf": 1.7320508075688772}, "sqlglot.expressions.NEQ": {"tf": 1.7320508075688772}, "sqlglot.expressions.SimilarTo": {"tf": 1.7320508075688772}, "sqlglot.expressions.Slice": {"tf": 1.7320508075688772}, "sqlglot.expressions.Sub": {"tf": 1.7320508075688772}, "sqlglot.expressions.Unary": {"tf": 1.7320508075688772}, "sqlglot.expressions.BitwiseNot": {"tf": 1.7320508075688772}, "sqlglot.expressions.Not": {"tf": 1.7320508075688772}, "sqlglot.expressions.Paren": {"tf": 1.7320508075688772}, "sqlglot.expressions.Neg": {"tf": 1.7320508075688772}, "sqlglot.expressions.Alias": {"tf": 1.7320508075688772}, "sqlglot.expressions.Alias.output_name": {"tf": 11.313708498984761}, "sqlglot.expressions.Aliases": {"tf": 1.7320508075688772}, "sqlglot.expressions.AtTimeZone": {"tf": 1.7320508075688772}, "sqlglot.expressions.Between": {"tf": 1.7320508075688772}, "sqlglot.expressions.Bracket": {"tf": 1.7320508075688772}, "sqlglot.expressions.Distinct": {"tf": 1.7320508075688772}, "sqlglot.expressions.In": {"tf": 1.7320508075688772}, "sqlglot.expressions.TimeUnit": {"tf": 1.7320508075688772}, "sqlglot.expressions.TimeUnit.__init__": {"tf": 1.7320508075688772}, "sqlglot.expressions.Interval": {"tf": 1.7320508075688772}, "sqlglot.expressions.IgnoreNulls": {"tf": 1.7320508075688772}, "sqlglot.expressions.RespectNulls": {"tf": 1.7320508075688772}, "sqlglot.expressions.Func": {"tf": 4.69041575982343}, "sqlglot.expressions.Func.from_arg_list": {"tf": 1.7320508075688772}, "sqlglot.expressions.Func.sql_names": {"tf": 1.7320508075688772}, "sqlglot.expressions.Func.sql_name": {"tf": 1.7320508075688772}, "sqlglot.expressions.Func.default_parser_mappings": {"tf": 1.7320508075688772}, "sqlglot.expressions.AggFunc": {"tf": 1.7320508075688772}, "sqlglot.expressions.Abs": {"tf": 1.7320508075688772}, "sqlglot.expressions.Anonymous": {"tf": 1.7320508075688772}, "sqlglot.expressions.ApproxDistinct": {"tf": 1.7320508075688772}, "sqlglot.expressions.Array": {"tf": 1.7320508075688772}, "sqlglot.expressions.GenerateSeries": {"tf": 1.7320508075688772}, "sqlglot.expressions.ArrayAgg": {"tf": 1.7320508075688772}, "sqlglot.expressions.ArrayAll": {"tf": 1.7320508075688772}, "sqlglot.expressions.ArrayAny": {"tf": 1.7320508075688772}, "sqlglot.expressions.ArrayConcat": {"tf": 1.7320508075688772}, "sqlglot.expressions.ArrayContains": {"tf": 1.7320508075688772}, "sqlglot.expressions.ArrayFilter": {"tf": 1.7320508075688772}, "sqlglot.expressions.ArraySize": {"tf": 1.7320508075688772}, "sqlglot.expressions.ArraySort": {"tf": 1.7320508075688772}, "sqlglot.expressions.ArraySum": {"tf": 1.7320508075688772}, "sqlglot.expressions.ArrayUnionAgg": {"tf": 1.7320508075688772}, "sqlglot.expressions.Avg": {"tf": 1.7320508075688772}, "sqlglot.expressions.AnyValue": {"tf": 1.7320508075688772}, "sqlglot.expressions.Case": {"tf": 1.7320508075688772}, "sqlglot.expressions.Cast": {"tf": 1.7320508075688772}, "sqlglot.expressions.Cast.output_name": {"tf": 11.313708498984761}, "sqlglot.expressions.Cast.is_type": {"tf": 1.7320508075688772}, "sqlglot.expressions.Collate": {"tf": 1.7320508075688772}, "sqlglot.expressions.TryCast": {"tf": 1.7320508075688772}, "sqlglot.expressions.Ceil": {"tf": 1.7320508075688772}, "sqlglot.expressions.Coalesce": {"tf": 1.7320508075688772}, "sqlglot.expressions.Concat": {"tf": 1.7320508075688772}, "sqlglot.expressions.ConcatWs": {"tf": 1.7320508075688772}, "sqlglot.expressions.Count": {"tf": 1.7320508075688772}, "sqlglot.expressions.CurrentDate": {"tf": 1.7320508075688772}, "sqlglot.expressions.CurrentDatetime": {"tf": 1.7320508075688772}, "sqlglot.expressions.CurrentTime": {"tf": 1.7320508075688772}, "sqlglot.expressions.CurrentTimestamp": {"tf": 1.7320508075688772}, "sqlglot.expressions.DateAdd": {"tf": 1.7320508075688772}, "sqlglot.expressions.DateSub": {"tf": 1.7320508075688772}, "sqlglot.expressions.DateDiff": {"tf": 1.7320508075688772}, "sqlglot.expressions.DateTrunc": {"tf": 1.7320508075688772}, "sqlglot.expressions.DatetimeAdd": {"tf": 1.7320508075688772}, "sqlglot.expressions.DatetimeSub": {"tf": 1.7320508075688772}, "sqlglot.expressions.DatetimeDiff": {"tf": 1.7320508075688772}, "sqlglot.expressions.DatetimeTrunc": {"tf": 1.7320508075688772}, "sqlglot.expressions.DayOfWeek": {"tf": 1.7320508075688772}, "sqlglot.expressions.DayOfMonth": {"tf": 1.7320508075688772}, "sqlglot.expressions.DayOfYear": {"tf": 1.7320508075688772}, "sqlglot.expressions.WeekOfYear": {"tf": 1.7320508075688772}, "sqlglot.expressions.LastDateOfMonth": {"tf": 1.7320508075688772}, "sqlglot.expressions.Extract": {"tf": 1.7320508075688772}, "sqlglot.expressions.TimestampAdd": {"tf": 1.7320508075688772}, "sqlglot.expressions.TimestampSub": {"tf": 1.7320508075688772}, "sqlglot.expressions.TimestampDiff": {"tf": 1.7320508075688772}, "sqlglot.expressions.TimestampTrunc": {"tf": 1.7320508075688772}, "sqlglot.expressions.TimeAdd": {"tf": 1.7320508075688772}, "sqlglot.expressions.TimeSub": {"tf": 1.7320508075688772}, "sqlglot.expressions.TimeDiff": {"tf": 1.7320508075688772}, "sqlglot.expressions.TimeTrunc": {"tf": 1.7320508075688772}, "sqlglot.expressions.DateFromParts": {"tf": 1.7320508075688772}, "sqlglot.expressions.DateStrToDate": {"tf": 1.7320508075688772}, "sqlglot.expressions.DateToDateStr": {"tf": 1.7320508075688772}, "sqlglot.expressions.DateToDi": {"tf": 1.7320508075688772}, "sqlglot.expressions.Day": {"tf": 1.7320508075688772}, "sqlglot.expressions.Decode": {"tf": 1.7320508075688772}, "sqlglot.expressions.DiToDate": {"tf": 1.7320508075688772}, "sqlglot.expressions.Encode": {"tf": 1.7320508075688772}, "sqlglot.expressions.Exp": {"tf": 1.7320508075688772}, "sqlglot.expressions.Explode": {"tf": 1.7320508075688772}, "sqlglot.expressions.Floor": {"tf": 1.7320508075688772}, "sqlglot.expressions.Greatest": {"tf": 1.7320508075688772}, "sqlglot.expressions.GroupConcat": {"tf": 1.7320508075688772}, "sqlglot.expressions.Hex": {"tf": 1.7320508075688772}, "sqlglot.expressions.If": {"tf": 1.7320508075688772}, "sqlglot.expressions.IfNull": {"tf": 1.7320508075688772}, "sqlglot.expressions.Initcap": {"tf": 1.7320508075688772}, "sqlglot.expressions.JSONBContains": {"tf": 1.7320508075688772}, "sqlglot.expressions.JSONExtract": {"tf": 1.7320508075688772}, "sqlglot.expressions.JSONExtractScalar": {"tf": 1.7320508075688772}, "sqlglot.expressions.JSONBExtract": {"tf": 1.7320508075688772}, "sqlglot.expressions.JSONBExtractScalar": {"tf": 1.7320508075688772}, "sqlglot.expressions.Least": {"tf": 1.7320508075688772}, "sqlglot.expressions.Length": {"tf": 1.7320508075688772}, "sqlglot.expressions.Levenshtein": {"tf": 1.7320508075688772}, "sqlglot.expressions.Ln": {"tf": 1.7320508075688772}, "sqlglot.expressions.Log": {"tf": 1.7320508075688772}, "sqlglot.expressions.Log2": {"tf": 1.7320508075688772}, "sqlglot.expressions.Log10": {"tf": 1.7320508075688772}, "sqlglot.expressions.LogicalOr": {"tf": 1.7320508075688772}, "sqlglot.expressions.Lower": {"tf": 1.7320508075688772}, "sqlglot.expressions.Map": {"tf": 1.7320508075688772}, "sqlglot.expressions.VarMap": {"tf": 1.7320508075688772}, "sqlglot.expressions.Matches": {"tf": 2.23606797749979}, "sqlglot.expressions.Max": {"tf": 1.7320508075688772}, "sqlglot.expressions.Min": {"tf": 1.7320508075688772}, "sqlglot.expressions.Month": {"tf": 1.7320508075688772}, "sqlglot.expressions.Nvl2": {"tf": 1.7320508075688772}, "sqlglot.expressions.Posexplode": {"tf": 1.7320508075688772}, "sqlglot.expressions.Pow": {"tf": 1.7320508075688772}, "sqlglot.expressions.PercentileCont": {"tf": 1.7320508075688772}, "sqlglot.expressions.PercentileDisc": {"tf": 1.7320508075688772}, "sqlglot.expressions.Quantile": {"tf": 1.7320508075688772}, "sqlglot.expressions.Quantiles": {"tf": 1.7320508075688772}, "sqlglot.expressions.QuantileIf": {"tf": 1.7320508075688772}, "sqlglot.expressions.ApproxQuantile": {"tf": 1.7320508075688772}, "sqlglot.expressions.ReadCSV": {"tf": 1.7320508075688772}, "sqlglot.expressions.Reduce": {"tf": 1.7320508075688772}, "sqlglot.expressions.RegexpLike": {"tf": 1.7320508075688772}, "sqlglot.expressions.RegexpILike": {"tf": 1.7320508075688772}, "sqlglot.expressions.RegexpSplit": {"tf": 1.7320508075688772}, "sqlglot.expressions.Repeat": {"tf": 1.7320508075688772}, "sqlglot.expressions.Round": {"tf": 1.7320508075688772}, "sqlglot.expressions.RowNumber": {"tf": 1.7320508075688772}, "sqlglot.expressions.SafeDivide": {"tf": 1.7320508075688772}, "sqlglot.expressions.SetAgg": {"tf": 1.7320508075688772}, "sqlglot.expressions.SortArray": {"tf": 1.7320508075688772}, "sqlglot.expressions.Split": {"tf": 1.7320508075688772}, "sqlglot.expressions.Substring": {"tf": 1.7320508075688772}, "sqlglot.expressions.StrPosition": {"tf": 1.7320508075688772}, "sqlglot.expressions.StrToDate": {"tf": 1.7320508075688772}, "sqlglot.expressions.StrToTime": {"tf": 1.7320508075688772}, "sqlglot.expressions.StrToUnix": {"tf": 1.7320508075688772}, "sqlglot.expressions.NumberToStr": {"tf": 1.7320508075688772}, "sqlglot.expressions.Struct": {"tf": 1.7320508075688772}, "sqlglot.expressions.StructExtract": {"tf": 1.7320508075688772}, "sqlglot.expressions.Sum": {"tf": 1.7320508075688772}, "sqlglot.expressions.Sqrt": {"tf": 1.7320508075688772}, "sqlglot.expressions.Stddev": {"tf": 1.7320508075688772}, "sqlglot.expressions.StddevPop": {"tf": 1.7320508075688772}, "sqlglot.expressions.StddevSamp": {"tf": 1.7320508075688772}, "sqlglot.expressions.TimeToStr": {"tf": 1.7320508075688772}, "sqlglot.expressions.TimeToTimeStr": {"tf": 1.7320508075688772}, "sqlglot.expressions.TimeToUnix": {"tf": 1.7320508075688772}, "sqlglot.expressions.TimeStrToDate": {"tf": 1.7320508075688772}, "sqlglot.expressions.TimeStrToTime": {"tf": 1.7320508075688772}, "sqlglot.expressions.TimeStrToUnix": {"tf": 1.7320508075688772}, "sqlglot.expressions.Trim": {"tf": 1.7320508075688772}, "sqlglot.expressions.TsOrDsAdd": {"tf": 1.7320508075688772}, "sqlglot.expressions.TsOrDsToDateStr": {"tf": 1.7320508075688772}, "sqlglot.expressions.TsOrDsToDate": {"tf": 1.7320508075688772}, "sqlglot.expressions.TsOrDiToDi": {"tf": 1.7320508075688772}, "sqlglot.expressions.Unhex": {"tf": 1.7320508075688772}, "sqlglot.expressions.UnixToStr": {"tf": 1.7320508075688772}, "sqlglot.expressions.UnixToTime": {"tf": 1.7320508075688772}, "sqlglot.expressions.UnixToTimeStr": {"tf": 1.7320508075688772}, "sqlglot.expressions.Upper": {"tf": 1.7320508075688772}, "sqlglot.expressions.Variance": {"tf": 1.7320508075688772}, "sqlglot.expressions.VariancePop": {"tf": 1.7320508075688772}, "sqlglot.expressions.Week": {"tf": 1.7320508075688772}, "sqlglot.expressions.Year": {"tf": 1.7320508075688772}, "sqlglot.expressions.Use": {"tf": 1.7320508075688772}, "sqlglot.expressions.Merge": {"tf": 1.7320508075688772}, "sqlglot.expressions.When": {"tf": 1.7320508075688772}, "sqlglot.expressions.maybe_parse": {"tf": 9.899494936611665}, "sqlglot.expressions.union": {"tf": 10.344080432788601}, "sqlglot.expressions.intersect": {"tf": 10.344080432788601}, "sqlglot.expressions.except_": {"tf": 10.392304845413264}, "sqlglot.expressions.select": {"tf": 9.9498743710662}, "sqlglot.expressions.from_": {"tf": 9.9498743710662}, "sqlglot.expressions.update": {"tf": 12.12435565298214}, "sqlglot.expressions.delete": {"tf": 9.327379053088816}, "sqlglot.expressions.condition": {"tf": 13.92838827718412}, "sqlglot.expressions.and_": {"tf": 9.848857801796104}, "sqlglot.expressions.or_": {"tf": 9.848857801796104}, "sqlglot.expressions.not_": {"tf": 8.831760866327848}, "sqlglot.expressions.paren": {"tf": 1.7320508075688772}, "sqlglot.expressions.to_identifier": {"tf": 5.291502622129181}, "sqlglot.expressions.to_interval": {"tf": 1.7320508075688772}, "sqlglot.expressions.to_table": {"tf": 5.196152422706632}, "sqlglot.expressions.to_column": {"tf": 5.291502622129181}, "sqlglot.expressions.alias_": {"tf": 12.649110640673518}, "sqlglot.expressions.subquery": {"tf": 10.198039027185569}, "sqlglot.expressions.column": {"tf": 5.385164807134504}, "sqlglot.expressions.cast": {"tf": 8.888194417315589}, "sqlglot.expressions.table_": {"tf": 5.916079783099616}, "sqlglot.expressions.values": {"tf": 8.888194417315589}, "sqlglot.expressions.rename_table": {"tf": 4.898979485566356}, "sqlglot.expressions.convert": {"tf": 5}, "sqlglot.expressions.replace_children": {"tf": 2}, "sqlglot.expressions.column_table_names": {"tf": 8.48528137423857}, "sqlglot.expressions.table_name": {"tf": 9.797958971132712}, "sqlglot.expressions.replace_tables": {"tf": 10.44030650891055}, "sqlglot.expressions.replace_placeholders": {"tf": 11.575836902790225}, "sqlglot.expressions.expand": {"tf": 10.770329614269007}, "sqlglot.expressions.func": {"tf": 12.884098726725126}, "sqlglot.expressions.true": {"tf": 1.7320508075688772}, "sqlglot.expressions.false": {"tf": 1.7320508075688772}, "sqlglot.expressions.null": {"tf": 1.7320508075688772}, "sqlglot.generator": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator": {"tf": 11.40175425099138}, "sqlglot.generator.Generator.__init__": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.generate": {"tf": 3}, "sqlglot.generator.Generator.unsupported": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.sep": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.seg": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.pad_comment": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.maybe_comment": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.wrap": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.no_identify": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.normalize_func": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.indent": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.uncache_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.cache_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.characterset_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.column_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.columndef_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.columnconstraint_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.autoincrementcolumnconstraint_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.checkcolumnconstraint_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.commentcolumnconstraint_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.collatecolumnconstraint_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.encodecolumnconstraint_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.defaultcolumnconstraint_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.generatedasidentitycolumnconstraint_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.notnullcolumnconstraint_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.primarykeycolumnconstraint_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.uniquecolumnconstraint_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.create_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.describe_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.prepend_ctes": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.with_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.cte_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.tablealias_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.bitstring_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.hexstring_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.datatype_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.directory_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.delete_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.drop_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.except_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.except_op": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.fetch_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.filter_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.hint_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.index_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.identifier_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.national_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.partition_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.properties_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.root_properties": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.properties": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.with_properties": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.locate_properties": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.property_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.likeproperty_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.fallbackproperty_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.journalproperty_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.freespaceproperty_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.afterjournalproperty_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.checksumproperty_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.mergeblockratioproperty_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.datablocksizeproperty_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.blockcompressionproperty_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.isolatedloadingproperty_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.insert_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.intersect_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.intersect_op": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.introducer_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.pseudotype_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.rowformatdelimitedproperty_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.table_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.tablesample_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.pivot_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.tuple_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.update_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.values_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.var_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.into_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.from_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.group_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.having_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.join_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.lambda_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.lateral_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.limit_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.offset_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.lock_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.literal_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.loaddata_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.null_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.boolean_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.order_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.cluster_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.distribute_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.sort_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.ordered_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.matchrecognize_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.query_modifiers": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.select_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.schema_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.star_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.structkwarg_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.parameter_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.sessionparameter_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.placeholder_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.subquery_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.qualify_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.union_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.union_op": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.unnest_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.where_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.window_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.partition_by_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.window_spec_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.withingroup_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.between_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.bracket_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.all_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.any_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.exists_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.case_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.constraint_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.extract_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.trim_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.concat_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.check_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.foreignkey_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.primarykey_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.unique_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.if_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.in_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.in_unnest_op": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.interval_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.return_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.reference_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.anonymous_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.paren_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.neg_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.not_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.alias_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.aliases_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.attimezone_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.add_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.and_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.connector_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.bitwiseand_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.bitwiseleftshift_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.bitwisenot_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.bitwiseor_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.bitwiserightshift_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.bitwisexor_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.cast_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.currentdate_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.collate_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.command_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.transaction_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.commit_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.rollback_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.altercolumn_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.renametable_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.altertable_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.droppartition_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.addconstraint_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.distinct_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.ignorenulls_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.respectnulls_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.intdiv_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.dpipe_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.div_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.distance_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.dot_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.eq_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.escape_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.glob_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.gt_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.gte_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.ilike_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.is_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.like_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.similarto_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.lt_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.lte_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.mod_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.mul_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.neq_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.nullsafeeq_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.nullsafeneq_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.or_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.slice_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.sub_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.trycast_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.use_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.binary": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.function_fallback_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.format_args": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.text_width": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.format_time": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.expressions": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.op_expressions": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.naked_property": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.set_operation": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.tag_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.token_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.userdefinedfunction_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.userdefinedfunctionkwarg_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.joinhint_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.kwarg_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.when_sql": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.merge_sql": {"tf": 1.7320508075688772}, "sqlglot.helper": {"tf": 1.7320508075688772}, "sqlglot.helper.AutoName": {"tf": 2.449489742783178}, "sqlglot.helper.seq_get": {"tf": 3.3166247903554}, "sqlglot.helper.ensure_list": {"tf": 4.795831523312719}, "sqlglot.helper.ensure_collection": {"tf": 5.196152422706632}, "sqlglot.helper.csv": {"tf": 5.291502622129181}, "sqlglot.helper.subclasses": {"tf": 5.744562646538029}, "sqlglot.helper.apply_index_offset": {"tf": 5.477225575051661}, "sqlglot.helper.camel_to_snake_case": {"tf": 2.23606797749979}, "sqlglot.helper.while_changing": {"tf": 5.291502622129181}, "sqlglot.helper.tsort": {"tf": 4.795831523312719}, "sqlglot.helper.open_file": {"tf": 1.7320508075688772}, "sqlglot.helper.csv_reader": {"tf": 5.477225575051661}, "sqlglot.helper.find_new_name": {"tf": 5.291502622129181}, "sqlglot.helper.object_to_dict": {"tf": 1.7320508075688772}, "sqlglot.helper.split_num_words": {"tf": 12.649110640673518}, "sqlglot.helper.is_iterable": {"tf": 8.94427190999916}, "sqlglot.helper.flatten": {"tf": 11.224972160321824}, "sqlglot.helper.count_params": {"tf": 1.7320508075688772}, "sqlglot.helper.dict_depth": {"tf": 11.489125293076057}, "sqlglot.helper.first": {"tf": 2.449489742783178}, "sqlglot.lineage": {"tf": 1.7320508075688772}, "sqlglot.lineage.Node": {"tf": 1.7320508075688772}, "sqlglot.lineage.Node.__init__": {"tf": 1.7320508075688772}, "sqlglot.lineage.Node.walk": {"tf": 1.7320508075688772}, "sqlglot.lineage.Node.to_html": {"tf": 1.7320508075688772}, "sqlglot.lineage.lineage": {"tf": 6.928203230275509}, "sqlglot.lineage.LineageHTML": {"tf": 2.6457513110645907}, "sqlglot.lineage.LineageHTML.__init__": {"tf": 1.7320508075688772}, "sqlglot.optimizer": {"tf": 1.7320508075688772}, "sqlglot.optimizer.annotate_types": {"tf": 1.7320508075688772}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 13.2664991614216}, "sqlglot.optimizer.annotate_types.TypeAnnotator": {"tf": 1.7320508075688772}, "sqlglot.optimizer.annotate_types.TypeAnnotator.__init__": {"tf": 1.7320508075688772}, "sqlglot.optimizer.annotate_types.TypeAnnotator.annotate": {"tf": 1.7320508075688772}, "sqlglot.optimizer.canonicalize": {"tf": 1.7320508075688772}, "sqlglot.optimizer.canonicalize.canonicalize": {"tf": 4.123105625617661}, "sqlglot.optimizer.canonicalize.add_text_to_concat": {"tf": 1.7320508075688772}, "sqlglot.optimizer.canonicalize.coerce_type": {"tf": 1.7320508075688772}, "sqlglot.optimizer.canonicalize.remove_redundant_casts": {"tf": 1.7320508075688772}, "sqlglot.optimizer.eliminate_ctes": {"tf": 1.7320508075688772}, "sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"tf": 10.198039027185569}, "sqlglot.optimizer.eliminate_joins": {"tf": 1.7320508075688772}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 10.344080432788601}, "sqlglot.optimizer.eliminate_joins.join_condition": {"tf": 4.58257569495584}, "sqlglot.optimizer.eliminate_subqueries": {"tf": 1.7320508075688772}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 12.727922061357855}, "sqlglot.optimizer.expand_laterals": {"tf": 1.7320508075688772}, "sqlglot.optimizer.expand_laterals.expand_laterals": {"tf": 10.63014581273465}, "sqlglot.optimizer.expand_multi_table_selects": {"tf": 1.7320508075688772}, "sqlglot.optimizer.expand_multi_table_selects.expand_multi_table_selects": {"tf": 7.874007874011811}, "sqlglot.optimizer.isolate_table_selects": {"tf": 1.7320508075688772}, "sqlglot.optimizer.isolate_table_selects.isolate_table_selects": {"tf": 1.7320508075688772}, "sqlglot.optimizer.lower_identities": {"tf": 1.7320508075688772}, "sqlglot.optimizer.lower_identities.lower_identities": {"tf": 9.797958971132712}, "sqlglot.optimizer.merge_subqueries": {"tf": 1.7320508075688772}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 11.180339887498949}, "sqlglot.optimizer.merge_subqueries.merge_ctes": {"tf": 1.7320508075688772}, "sqlglot.optimizer.merge_subqueries.merge_derived_tables": {"tf": 1.7320508075688772}, "sqlglot.optimizer.normalize": {"tf": 1.7320508075688772}, "sqlglot.optimizer.normalize.normalize": {"tf": 10.14889156509222}, "sqlglot.optimizer.normalize.normalized": {"tf": 1.7320508075688772}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 9.591663046625438}, "sqlglot.optimizer.normalize.distributive_law": {"tf": 2.23606797749979}, "sqlglot.optimizer.optimize_joins": {"tf": 1.7320508075688772}, "sqlglot.optimizer.optimize_joins.optimize_joins": {"tf": 7.874007874011811}, "sqlglot.optimizer.optimize_joins.reorder_joins": {"tf": 1.7320508075688772}, "sqlglot.optimizer.optimize_joins.normalize": {"tf": 1.7320508075688772}, "sqlglot.optimizer.optimize_joins.other_table_names": {"tf": 1.7320508075688772}, "sqlglot.optimizer.optimizer": {"tf": 1.7320508075688772}, "sqlglot.optimizer.optimizer.optimize": {"tf": 7.54983443527075}, "sqlglot.optimizer.pushdown_predicates": {"tf": 1.7320508075688772}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 10.344080432788601}, "sqlglot.optimizer.pushdown_predicates.pushdown": {"tf": 1.7320508075688772}, "sqlglot.optimizer.pushdown_predicates.pushdown_cnf": {"tf": 1.7320508075688772}, "sqlglot.optimizer.pushdown_predicates.pushdown_dnf": {"tf": 1.7320508075688772}, "sqlglot.optimizer.pushdown_predicates.nodes_for_predicate": {"tf": 1.7320508075688772}, "sqlglot.optimizer.pushdown_predicates.replace_aliases": {"tf": 1.7320508075688772}, "sqlglot.optimizer.pushdown_projections": {"tf": 1.7320508075688772}, "sqlglot.optimizer.pushdown_projections.DEFAULT_SELECTION": {"tf": 1.7320508075688772}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 10.198039027185569}, "sqlglot.optimizer.qualify_columns": {"tf": 1.7320508075688772}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 11.575836902790225}, "sqlglot.optimizer.qualify_columns.validate_qualify_columns": {"tf": 2}, "sqlglot.optimizer.qualify_tables": {"tf": 1.7320508075688772}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 10.862780491200215}, "sqlglot.optimizer.scope": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.ScopeType": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.ScopeType.ROOT": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.ScopeType.SUBQUERY": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.ScopeType.DERIVED_TABLE": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.ScopeType.CTE": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.ScopeType.UNION": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.ScopeType.UDTF": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.Scope": {"tf": 8.246211251235321}, "sqlglot.optimizer.scope.Scope.__init__": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.Scope.clear_cache": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.Scope.branch": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.walk": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.Scope.find": {"tf": 5.744562646538029}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 5.656854249492381}, "sqlglot.optimizer.scope.Scope.replace": {"tf": 5.477225575051661}, "sqlglot.optimizer.scope.Scope.tables": {"tf": 3.3166247903554}, "sqlglot.optimizer.scope.Scope.ctes": {"tf": 3.3166247903554}, "sqlglot.optimizer.scope.Scope.derived_tables": {"tf": 4.47213595499958}, "sqlglot.optimizer.scope.Scope.subqueries": {"tf": 4.47213595499958}, "sqlglot.optimizer.scope.Scope.columns": {"tf": 3.4641016151377544}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 3.872983346207417}, "sqlglot.optimizer.scope.Scope.cte_sources": {"tf": 3.3166247903554}, "sqlglot.optimizer.scope.Scope.selects": {"tf": 4}, "sqlglot.optimizer.scope.Scope.external_columns": {"tf": 3.4641016151377544}, "sqlglot.optimizer.scope.Scope.unqualified_columns": {"tf": 3.3166247903554}, "sqlglot.optimizer.scope.Scope.join_hints": {"tf": 3.1622776601683795}, "sqlglot.optimizer.scope.Scope.source_columns": {"tf": 4.898979485566356}, "sqlglot.optimizer.scope.Scope.is_subquery": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.is_derived_table": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.is_union": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.is_cte": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.is_root": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.is_udtf": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.Scope.is_correlated_subquery": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.rename_source": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.add_source": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.remove_source": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.traverse": {"tf": 3.3166247903554}, "sqlglot.optimizer.scope.Scope.ref_count": {"tf": 3.3166247903554}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 13.601470508735444}, "sqlglot.optimizer.scope.build_scope": {"tf": 4.69041575982343}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 5.385164807134504}, "sqlglot.optimizer.simplify": {"tf": 1.7320508075688772}, "sqlglot.optimizer.simplify.simplify": {"tf": 9.643650760992955}, "sqlglot.optimizer.simplify.rewrite_between": {"tf": 2.449489742783178}, "sqlglot.optimizer.simplify.simplify_not": {"tf": 2}, "sqlglot.optimizer.simplify.flatten": {"tf": 2}, "sqlglot.optimizer.simplify.simplify_connectors": {"tf": 1.7320508075688772}, "sqlglot.optimizer.simplify.remove_compliments": {"tf": 2.6457513110645907}, "sqlglot.optimizer.simplify.uniq_sort": {"tf": 2.449489742783178}, "sqlglot.optimizer.simplify.absorb_and_eliminate": {"tf": 2.8284271247461903}, "sqlglot.optimizer.simplify.simplify_literals": {"tf": 1.7320508075688772}, "sqlglot.optimizer.simplify.simplify_parens": {"tf": 1.7320508075688772}, "sqlglot.optimizer.simplify.remove_where_true": {"tf": 1.7320508075688772}, "sqlglot.optimizer.simplify.always_true": {"tf": 1.7320508075688772}, "sqlglot.optimizer.simplify.is_complement": {"tf": 1.7320508075688772}, "sqlglot.optimizer.simplify.eval_boolean": {"tf": 1.7320508075688772}, "sqlglot.optimizer.simplify.extract_date": {"tf": 1.7320508075688772}, "sqlglot.optimizer.simplify.extract_interval": {"tf": 1.7320508075688772}, "sqlglot.optimizer.simplify.date_literal": {"tf": 1.7320508075688772}, "sqlglot.optimizer.simplify.boolean_literal": {"tf": 1.7320508075688772}, "sqlglot.optimizer.unnest_subqueries": {"tf": 1.7320508075688772}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 9.899494936611665}, "sqlglot.optimizer.unnest_subqueries.unnest": {"tf": 1.7320508075688772}, "sqlglot.optimizer.unnest_subqueries.decorrelate": {"tf": 1.7320508075688772}, "sqlglot.parser": {"tf": 1.7320508075688772}, "sqlglot.parser.parse_var_map": {"tf": 1.7320508075688772}, "sqlglot.parser.Parser": {"tf": 6.082762530298219}, "sqlglot.parser.Parser.__init__": {"tf": 1.7320508075688772}, "sqlglot.parser.Parser.reset": {"tf": 1.7320508075688772}, "sqlglot.parser.Parser.parse": {"tf": 5.291502622129181}, "sqlglot.parser.Parser.parse_into": {"tf": 5.744562646538029}, "sqlglot.parser.Parser.check_errors": {"tf": 1.7320508075688772}, "sqlglot.parser.Parser.raise_error": {"tf": 1.7320508075688772}, "sqlglot.parser.Parser.expression": {"tf": 5.744562646538029}, "sqlglot.parser.Parser.validate_expression": {"tf": 4.358898943540674}, "sqlglot.planner": {"tf": 1.7320508075688772}, "sqlglot.planner.Plan": {"tf": 1.7320508075688772}, "sqlglot.planner.Plan.__init__": {"tf": 1.7320508075688772}, "sqlglot.planner.Step": {"tf": 1.7320508075688772}, "sqlglot.planner.Step.__init__": {"tf": 1.7320508075688772}, "sqlglot.planner.Step.from_expression": {"tf": 7.681145747868608}, "sqlglot.planner.Step.add_dependency": {"tf": 1.7320508075688772}, "sqlglot.planner.Step.to_s": {"tf": 1.7320508075688772}, "sqlglot.planner.Scan": {"tf": 1.7320508075688772}, "sqlglot.planner.Scan.__init__": {"tf": 1.7320508075688772}, "sqlglot.planner.Scan.from_expression": {"tf": 7.681145747868608}, "sqlglot.planner.Join": {"tf": 1.7320508075688772}, "sqlglot.planner.Join.__init__": {"tf": 1.7320508075688772}, "sqlglot.planner.Join.from_joins": {"tf": 1.7320508075688772}, "sqlglot.planner.Aggregate": {"tf": 1.7320508075688772}, "sqlglot.planner.Aggregate.__init__": {"tf": 1.7320508075688772}, "sqlglot.planner.Sort": {"tf": 1.7320508075688772}, "sqlglot.planner.Sort.__init__": {"tf": 1.7320508075688772}, "sqlglot.planner.SetOperation": {"tf": 1.7320508075688772}, "sqlglot.planner.SetOperation.__init__": {"tf": 1.7320508075688772}, "sqlglot.planner.SetOperation.from_expression": {"tf": 7.681145747868608}, "sqlglot.schema.Schema": {"tf": 1.4142135623730951}, "sqlglot.schema.Schema.add_table": {"tf": 4.358898943540674}, "sqlglot.schema.Schema.column_names": {"tf": 5.477225575051661}, "sqlglot.schema.Schema.get_column_type": {"tf": 5.477225575051661}, "sqlglot.schema.Schema.supported_table_args": {"tf": 2.449489742783178}, "sqlglot.schema.AbstractMappingSchema": {"tf": 4}, "sqlglot.schema.AbstractMappingSchema.__init__": {"tf": 1.7320508075688772}, "sqlglot.schema.AbstractMappingSchema.table_parts": {"tf": 1.7320508075688772}, "sqlglot.schema.AbstractMappingSchema.find": {"tf": 1.7320508075688772}, "sqlglot.schema.MappingSchema": {"tf": 7.211102550927978}, "sqlglot.schema.MappingSchema.__init__": {"tf": 1.7320508075688772}, "sqlglot.schema.MappingSchema.from_mapping_schema": {"tf": 1.7320508075688772}, "sqlglot.schema.MappingSchema.copy": {"tf": 1.7320508075688772}, "sqlglot.schema.MappingSchema.add_table": {"tf": 4.58257569495584}, "sqlglot.schema.MappingSchema.column_names": {"tf": 5.477225575051661}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 5.477225575051661}, "sqlglot.schema.ensure_schema": {"tf": 1.7320508075688772}, "sqlglot.schema.ensure_column_mapping": {"tf": 1.7320508075688772}, "sqlglot.schema.flatten_schema": {"tf": 1.7320508075688772}, "sqlglot.serde": {"tf": 1.7320508075688772}, "sqlglot.serde.dump": {"tf": 1.7320508075688772}, "sqlglot.serde.load": {"tf": 2.23606797749979}, "sqlglot.time": {"tf": 1.7320508075688772}, "sqlglot.time.format_time": {"tf": 7.681145747868608}, "sqlglot.tokens": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.L_PAREN": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.R_PAREN": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.L_BRACKET": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.R_BRACKET": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.L_BRACE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.R_BRACE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.COMMA": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.DOT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.DASH": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.PLUS": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.COLON": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.DCOLON": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.SEMICOLON": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.STAR": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.BACKSLASH": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.SLASH": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.LT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.LTE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.GT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.GTE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.NOT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.EQ": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.NEQ": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.NULLSAFE_EQ": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.AND": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.OR": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.AMP": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.DPIPE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.PIPE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.CARET": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.TILDA": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.ARROW": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.DARROW": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.FARROW": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.HASH": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.HASH_ARROW": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.DHASH_ARROW": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.LR_ARROW": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.DOLLAR": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.PARAMETER": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.SESSION_PARAMETER": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.NATIONAL": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.BLOCK_START": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.BLOCK_END": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.SPACE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.BREAK": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.STRING": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.NUMBER": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.IDENTIFIER": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.COLUMN": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.COLUMN_DEF": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.SCHEMA": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.TABLE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.VAR": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.BIT_STRING": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.HEX_STRING": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.BYTE_STRING": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.BOOLEAN": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.TINYINT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.SMALLINT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.INT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.BIGINT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.FLOAT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.DOUBLE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.DECIMAL": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.CHAR": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.NCHAR": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.VARCHAR": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.NVARCHAR": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.TEXT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.MEDIUMTEXT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.LONGTEXT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.MEDIUMBLOB": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.LONGBLOB": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.BINARY": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.VARBINARY": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.JSON": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.JSONB": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.TIME": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.TIMESTAMP": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.TIMESTAMPTZ": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.TIMESTAMPLTZ": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.DATETIME": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.DATE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.UUID": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.GEOGRAPHY": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.NULLABLE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.GEOMETRY": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.HLLSKETCH": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.HSTORE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.SUPER": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.SERIAL": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.SMALLSERIAL": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.BIGSERIAL": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.XML": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.UNIQUEIDENTIFIER": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.MONEY": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.SMALLMONEY": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.ROWVERSION": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.IMAGE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.VARIANT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.OBJECT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.ALIAS": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.ALTER": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.ALWAYS": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.ALL": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.ANTI": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.ANY": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.APPLY": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.ARRAY": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.ASC": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.ASOF": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.AT_TIME_ZONE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.AUTO_INCREMENT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.BEGIN": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.BETWEEN": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.BOTH": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.BUCKET": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.BY_DEFAULT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.CACHE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.CASCADE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.CASE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.CHARACTER_SET": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.CHECK": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.CLUSTER_BY": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.COLLATE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.COMMAND": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.COMMENT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.COMMIT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.COMPOUND": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.CONSTRAINT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.CREATE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.CROSS": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.CUBE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.CURRENT_DATE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.CURRENT_DATETIME": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.CURRENT_ROW": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.CURRENT_TIME": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.CURRENT_TIMESTAMP": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.DEFAULT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.DELETE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.DESC": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.DESCRIBE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.DISTINCT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.DISTINCT_FROM": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.DISTRIBUTE_BY": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.DIV": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.DROP": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.ELSE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.ENCODE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.END": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.ESCAPE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.EXCEPT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.EXECUTE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.EXISTS": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.FALSE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.FETCH": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.FILTER": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.FINAL": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.FIRST": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.FOLLOWING": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.FOR": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.FOREIGN_KEY": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.FORMAT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.FROM": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.FULL": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.FUNCTION": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.GENERATED": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.GLOB": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.GLOBAL": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.GROUP_BY": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.GROUPING_SETS": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.HAVING": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.HINT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.IDENTITY": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.IF": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.IGNORE_NULLS": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.ILIKE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.IN": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.INDEX": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.INNER": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.INSERT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.INTERSECT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.INTERVAL": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.INTO": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.INTRODUCER": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.IRLIKE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.IS": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.ISNULL": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.JOIN": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.LANGUAGE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.LATERAL": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.LAZY": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.LEADING": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.LEFT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.LIKE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.LIMIT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.LOAD_DATA": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.LOCAL": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.MAP": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.MATCH_RECOGNIZE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.MATERIALIZED": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.MERGE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.MOD": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.NATURAL": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.NEXT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.NO_ACTION": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.NOTNULL": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.NULL": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.NULLS_FIRST": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.NULLS_LAST": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.OFFSET": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.ON": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.ONLY": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.OPTIONS": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.ORDER_BY": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.ORDERED": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.ORDINALITY": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.OUTER": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.OUT_OF": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.OVER": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.OVERWRITE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.PARTITION": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.PARTITION_BY": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.PERCENT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.PIVOT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.PLACEHOLDER": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.PRECEDING": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.PRIMARY_KEY": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.PROCEDURE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.PROPERTIES": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.PSEUDO_TYPE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.QUALIFY": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.QUOTE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.RANGE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.RECURSIVE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.REPLACE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.RESPECT_NULLS": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.REFERENCES": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.RIGHT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.RLIKE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.ROLLBACK": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.ROLLUP": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.ROW": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.ROWS": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.SCHEMA_COMMENT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.SEED": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.SELECT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.SEMI": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.SEPARATOR": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.SERDE_PROPERTIES": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.SET": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.SHOW": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.SIMILAR_TO": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.SOME": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.SORTKEY": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.SORT_BY": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.STRUCT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.TABLE_SAMPLE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.TEMPORARY": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.TOP": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.THEN": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.TRAILING": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.TRUE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.UNBOUNDED": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.UNCACHE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.UNION": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.UNLOGGED": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.UNNEST": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.UNPIVOT": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.UPDATE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.USE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.USING": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.VALUES": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.VIEW": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.VOLATILE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.WHEN": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.WHERE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.WINDOW": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.WITH": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.WITH_TIME_ZONE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.WITH_LOCAL_TIME_ZONE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.WITHIN_GROUP": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.WITHOUT_TIME_ZONE": {"tf": 1.7320508075688772}, "sqlglot.tokens.TokenType.UNIQUE": {"tf": 1.7320508075688772}, "sqlglot.tokens.Token": {"tf": 1.7320508075688772}, "sqlglot.tokens.Token.__init__": {"tf": 1.7320508075688772}, "sqlglot.tokens.Token.number": {"tf": 2.23606797749979}, "sqlglot.tokens.Token.string": {"tf": 2.23606797749979}, "sqlglot.tokens.Token.identifier": {"tf": 2.23606797749979}, "sqlglot.tokens.Token.var": {"tf": 2.23606797749979}, "sqlglot.tokens.Tokenizer": {"tf": 1.7320508075688772}, "sqlglot.tokens.Tokenizer.__init__": {"tf": 1.7320508075688772}, "sqlglot.tokens.Tokenizer.reset": {"tf": 1.7320508075688772}, "sqlglot.tokens.Tokenizer.tokenize": {"tf": 2.23606797749979}, "sqlglot.transforms": {"tf": 1.7320508075688772}, "sqlglot.transforms.unalias_group": {"tf": 9.327379053088816}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 5.0990195135927845}, "sqlglot.transforms.remove_precision_parameterized_types": {"tf": 1.7320508075688772}, "sqlglot.transforms.preprocess": {"tf": 5.477225575051661}, "sqlglot.transforms.delegate": {"tf": 2.6457513110645907}, "sqlglot.trie": {"tf": 1.7320508075688772}, "sqlglot.trie.new_trie": {"tf": 9.327379053088816}, "sqlglot.trie.in_trie": {"tf": 13.228756555322953}}, "df": 1667, "s": {"docs": {"sqlglot": {"tf": 2.449489742783178}, "sqlglot.dataframe": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.diff": {"tf": 2.23606797749979}, "sqlglot.diff.Move": {"tf": 1}, "sqlglot.executor": {"tf": 3.4641016151377544}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.expressions.Expression.alias": {"tf": 1}, "sqlglot.expressions.Expression.append": {"tf": 1}, "sqlglot.expressions.Expression.find": {"tf": 1}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.expressions.Expression.find_ancestor": {"tf": 1}, "sqlglot.expressions.Expression.flatten": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.helper.AutoName": {"tf": 1}, "sqlglot.helper.ensure_list": {"tf": 1}, "sqlglot.helper.ensure_collection": {"tf": 1}, "sqlglot.helper.apply_index_offset": {"tf": 1}, "sqlglot.helper.tsort": {"tf": 1}, "sqlglot.helper.object_to_dict": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.optimizer.scope.Scope.derived_tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.subqueries": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}, "sqlglot.optimizer.simplify.simplify_not": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}, "sqlglot.parser.Parser.validate_expression": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.Scan.from_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1.4142135623730951}}, "df": 54, "q": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot": {"tf": 5.5677643628300215}, "sqlglot.pretty": {"tf": 1}, "sqlglot.parse": {"tf": 2.23606797749979}, "sqlglot.parse_one": {"tf": 2.23606797749979}, "sqlglot.transpile": {"tf": 2.449489742783178}, "sqlglot.dataframe": {"tf": 6.48074069840786}, "sqlglot.dialects": {"tf": 2.23606797749979}, "sqlglot.dialects.bigquery": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}, "sqlglot.dialects.drill.if_sql": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.select_sql": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 3.605551275463989}, "sqlglot.executor": {"tf": 6.324555320336759}, "sqlglot.executor.execute": {"tf": 2}, "sqlglot.executor.context.Context": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.generate": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.generate_tuple": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1.4142135623730951}, "sqlglot.expressions": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.expressions.Expression.sql": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.replace": {"tf": 1}, "sqlglot.expressions.Expression.assert_is": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.or_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.not_": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.intersect": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.except_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Join.on": {"tf": 1.4142135623730951}, "sqlglot.expressions.Join.using": {"tf": 1.4142135623730951}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1.7320508075688772}, "sqlglot.expressions.Union.limit": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.from_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.group_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.order_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.sort_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.cluster_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.limit": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.offset": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.lateral": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.join": {"tf": 2.449489742783178}, "sqlglot.expressions.Select.where": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.having": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.distinct": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.lock": {"tf": 1.4142135623730951}, "sqlglot.expressions.Tag": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 2}, "sqlglot.expressions.maybe_parse": {"tf": 2.23606797749979}, "sqlglot.expressions.union": {"tf": 1.7320508075688772}, "sqlglot.expressions.intersect": {"tf": 1.7320508075688772}, "sqlglot.expressions.except_": {"tf": 1.7320508075688772}, "sqlglot.expressions.select": {"tf": 2}, "sqlglot.expressions.from_": {"tf": 2}, "sqlglot.expressions.update": {"tf": 2}, "sqlglot.expressions.delete": {"tf": 1.4142135623730951}, "sqlglot.expressions.condition": {"tf": 2.23606797749979}, "sqlglot.expressions.and_": {"tf": 1.4142135623730951}, "sqlglot.expressions.or_": {"tf": 1.4142135623730951}, "sqlglot.expressions.not_": {"tf": 1.4142135623730951}, "sqlglot.expressions.to_table": {"tf": 1.4142135623730951}, "sqlglot.expressions.to_column": {"tf": 1.4142135623730951}, "sqlglot.expressions.alias_": {"tf": 1.7320508075688772}, "sqlglot.expressions.subquery": {"tf": 1.4142135623730951}, "sqlglot.expressions.cast": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1.4142135623730951}, "sqlglot.expressions.replace_tables": {"tf": 1}, "sqlglot.expressions.replace_placeholders": {"tf": 1}, "sqlglot.expressions.expand": {"tf": 1}, "sqlglot.expressions.func": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator.generate": {"tf": 1.4142135623730951}, "sqlglot.lineage.lineage": {"tf": 2}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1.4142135623730951}, "sqlglot.optimizer.canonicalize.canonicalize": {"tf": 1}, "sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"tf": 1.7320508075688772}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1.7320508075688772}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 1.4142135623730951}, "sqlglot.optimizer.expand_laterals.expand_laterals": {"tf": 1.7320508075688772}, "sqlglot.optimizer.expand_multi_table_selects.expand_multi_table_selects": {"tf": 1}, "sqlglot.optimizer.lower_identities.lower_identities": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1.4142135623730951}, "sqlglot.optimizer.normalize.normalize": {"tf": 1}, "sqlglot.optimizer.optimize_joins.optimize_joins": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 1.7320508075688772}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 1.7320508075688772}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1.4142135623730951}, "sqlglot.optimizer.simplify.simplify": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1}, "sqlglot.parser.Parser.parse": {"tf": 1.7320508075688772}, "sqlglot.parser.Parser.parse_into": {"tf": 1.4142135623730951}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}, "sqlglot.tokens.Tokenizer.tokenize": {"tf": 1.4142135623730951}, "sqlglot.transforms.unalias_group": {"tf": 1}, "sqlglot.transforms.preprocess": {"tf": 1.7320508075688772}}, "df": 122, "g": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 6.4031242374328485}, "sqlglot.schema": {"tf": 1}, "sqlglot.parse": {"tf": 1}, "sqlglot.parse_one": {"tf": 1.4142135623730951}, "sqlglot.transpile": {"tf": 1}, "sqlglot.dataframe": {"tf": 5}, "sqlglot.dataframe.sql.Column.cast": {"tf": 1}, "sqlglot.dialects": {"tf": 3}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.diff": {"tf": 2}, "sqlglot.executor": {"tf": 4.242640687119285}, "sqlglot.expressions": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.expressions.Expression.output_name": {"tf": 1}, "sqlglot.expressions.Expression.sql": {"tf": 1}, "sqlglot.expressions.Expression.assert_is": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.union": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.intersect": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.except_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Column.output_name": {"tf": 1}, "sqlglot.expressions.Identifier.output_name": {"tf": 1}, "sqlglot.expressions.Literal.output_name": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1.4142135623730951}, "sqlglot.expressions.Join.using": {"tf": 1.4142135623730951}, "sqlglot.expressions.Subquery.output_name": {"tf": 1}, "sqlglot.expressions.Star.output_name": {"tf": 1}, "sqlglot.expressions.Alias.output_name": {"tf": 1}, "sqlglot.expressions.Cast.output_name": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1}, "sqlglot.expressions.column_table_names": {"tf": 1.7320508075688772}, "sqlglot.expressions.table_name": {"tf": 1}, "sqlglot.expressions.replace_tables": {"tf": 1.4142135623730951}, "sqlglot.expressions.replace_placeholders": {"tf": 1.4142135623730951}, "sqlglot.expressions.expand": {"tf": 1}, "sqlglot.expressions.func": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 2}, "sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"tf": 2}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 2}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 2.23606797749979}, "sqlglot.optimizer.expand_laterals.expand_laterals": {"tf": 1.4142135623730951}, "sqlglot.optimizer.expand_multi_table_selects.expand_multi_table_selects": {"tf": 1}, "sqlglot.optimizer.lower_identities.lower_identities": {"tf": 2}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 2.449489742783178}, "sqlglot.optimizer.normalize.normalize": {"tf": 2.23606797749979}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1.7320508075688772}, "sqlglot.optimizer.optimize_joins.optimize_joins": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 2}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 2.23606797749979}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 2.23606797749979}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 2.23606797749979}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 2.23606797749979}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1.4142135623730951}, "sqlglot.optimizer.simplify.simplify": {"tf": 2.23606797749979}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 2.23606797749979}, "sqlglot.parser.Parser": {"tf": 1}, "sqlglot.schema.Schema.get_column_type": {"tf": 1}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1}, "sqlglot.transforms.unalias_group": {"tf": 1.4142135623730951}}, "df": 74, "\u2019": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 1}}, "df": 2}}}}}, "o": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.optimizer.optimizer.optimize": {"tf": 1}}, "df": 1}}}, "f": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}}}, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}}, "p": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}}}}}, "o": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}}}}}, "p": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot": {"tf": 1.7320508075688772}, "sqlglot.parse": {"tf": 1}, "sqlglot.parse_one": {"tf": 1}, "sqlglot.transpile": {"tf": 1.4142135623730951}, "sqlglot.dataframe": {"tf": 3.4641016151377544}, "sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.executor.execute": {"tf": 1}, "sqlglot.expressions.Expression.sql": {"tf": 1}}, "df": 11, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe": {"tf": 3.3166247903554}}, "df": 1}}}}}}, "q": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1}}, "df": 3}}}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}}, "t": {"docs": {"sqlglot.helper.split_num_words": {"tf": 2.6457513110645907}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.transpile": {"tf": 1}, "sqlglot.expressions.Expression.find": {"tf": 1}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}}, "df": 5}, "s": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 2.449489742783178}, "sqlglot.executor.python.Python.Generator": {"tf": 2.449489742783178}, "sqlglot.generator.Generator": {"tf": 2.449489742783178}}, "df": 21}}, "c": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.diff": {"tf": 1}}, "df": 2, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}, "y": {"docs": {"sqlglot.dialects": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1.4142135623730951}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1.4142135623730951}}, "df": 3}}, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.expressions.Kwarg": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1}}, "df": 3}}}}}}, "n": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dataframe": {"tf": 2.8284271247461903}, "sqlglot.dialects.snowflake.Snowflake.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.select_sql": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 5}}}}}}}, "a": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Func": {"tf": 1}, "sqlglot.helper.camel_to_snake_case": {"tf": 1}}, "df": 2}}}}, "y": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}}}}}}, "x": {"docs": {"sqlglot": {"tf": 2}, "sqlglot.parse": {"tf": 1.4142135623730951}, "sqlglot.parse_one": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.persist": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.expressions.union": {"tf": 1.4142135623730951}, "sqlglot.expressions.intersect": {"tf": 1.4142135623730951}, "sqlglot.expressions.except_": {"tf": 1.4142135623730951}, "sqlglot.expressions.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.from_": {"tf": 1.4142135623730951}, "sqlglot.expressions.update": {"tf": 1}, "sqlglot.expressions.delete": {"tf": 1}, "sqlglot.expressions.condition": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.generator.Generator.generate": {"tf": 1.4142135623730951}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}, "sqlglot.parser.Parser.parse": {"tf": 1.4142135623730951}}, "df": 57}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}}, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "x": {"docs": {"sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}}, "df": 1}}}}}, "s": {"docs": {"sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}}, "df": 1}}, "u": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.not_": {"tf": 1.4142135623730951}}, "df": 1, "e": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}, "p": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 1}}, "df": 2, "d": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}, "r": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dataframe": {"tf": 1}, "sqlglot.dialects": {"tf": 1}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.schema.Schema.supported_table_args": {"tf": 1}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1.4142135623730951}}, "df": 6, "s": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.renametable_sql": {"tf": 1}, "sqlglot.executor.env.null_if_any": {"tf": 1}}, "df": 4}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dataframe": {"tf": 2.23606797749979}, "sqlglot.dialects": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.expressions": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1}}, "df": 6}}}}}}}, "b": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.trie.in_trie": {"tf": 1}}, "df": 3, "c": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dialects": {"tf": 1}, "sqlglot.expressions": {"tf": 1}}, "df": 2, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.helper.subclasses": {"tf": 2}}, "df": 1}}}}}}}, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}, "t": {"docs": {"sqlglot.expressions.Func": {"tf": 1}}, "df": 1}}}}}}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.replace_placeholders": {"tf": 1.4142135623730951}}, "df": 1}}}}}}, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.optimizer.scope.Scope.find": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}}, "df": 2}}}}}}, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}}, "df": 1}}}, "i": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.trie.in_trie": {"tf": 1.4142135623730951}}, "df": 1}}}}, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.expressions.expand": {"tf": 1}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 1.7320508075688772}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope": {"tf": 1}, "sqlglot.optimizer.scope.Scope.subqueries": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.columns": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 2}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}}, "df": 11}}}, "y": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 2.449489742783178}, "sqlglot.expressions.Subquery.unnest": {"tf": 1}, "sqlglot.expressions.subquery": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.Scope": {"tf": 1}, "sqlglot.optimizer.scope.Scope.derived_tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.subqueries": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_subquery": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_correlated_subquery": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1}}, "df": 11, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.optimizer.scope.Scope.subqueries": {"tf": 1}}, "df": 2, "s": {"docs": {"sqlglot.expressions.expand": {"tf": 1}}, "df": 1}}}}}}}}}}}, "s": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "i": {"docs": {"sqlglot": {"tf": 2.6457513110645907}}, "df": 1}}}, "m": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 2}, "sqlglot.planner.Step.from_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.Scan.from_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1.4142135623730951}}, "df": 5, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}}, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.values_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator.select_sql": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 1}, "sqlglot.parser.Parser.validate_expression": {"tf": 1}}, "df": 6}, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}}}}, "c": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.diff": {"tf": 3}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.expressions": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.expressions.Expression.text": {"tf": 1}, "sqlglot.expressions.Expression.find": {"tf": 1}}, "df": 6}, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.parser.Parser.parse_into": {"tf": 1}}, "df": 1}}}}}}, "g": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}}}, "h": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.transpile": {"tf": 1}, "sqlglot.dataframe": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.select_sql": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 2.449489742783178}, "sqlglot.diff.diff": {"tf": 1}, "sqlglot.expressions.Expression.walk": {"tf": 1}, "sqlglot.expressions.Select.distinct": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1}}, "df": 10, "n": {"docs": {"sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}}, "df": 2}}}}, "w": {"docs": {"sqlglot.dataframe": {"tf": 1.4142135623730951}}, "df": 1, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}}}}, "s": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}, "r": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 2, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}, "r": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "e": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.dataframe": {"tf": 1.7320508075688772}}, "df": 1}}}}}}, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Select.lock": {"tf": 1.4142135623730951}}, "df": 1}}}}, "o": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.cast": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.with_properties": {"tf": 1}, "sqlglot.diff": {"tf": 2.449489742783178}, "sqlglot.executor": {"tf": 2.8284271247461903}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}}, "df": 12, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dialects": {"tf": 1}, "sqlglot.diff": {"tf": 2.23606797749979}, "sqlglot.executor": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.helper.subclasses": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1}, "sqlglot.transforms.remove_precision_parameterized_types": {"tf": 1}}, "df": 10, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}}, "df": 1}}}}}, "w": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.transpile": {"tf": 2}, "sqlglot.diff": {"tf": 7.937253933193772}, "sqlglot.diff.diff": {"tf": 2.6457513110645907}, "sqlglot.executor": {"tf": 1}, "sqlglot.expressions.expand": {"tf": 1}, "sqlglot.expressions.func": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.join_condition": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1}, "sqlglot.optimizer.scope.Scope.cte_sources": {"tf": 1}, "sqlglot.optimizer.scope.Scope.source_columns": {"tf": 2}, "sqlglot.optimizer.scope.Scope.rename_source": {"tf": 1}, "sqlglot.optimizer.scope.Scope.add_source": {"tf": 1}, "sqlglot.optimizer.scope.Scope.remove_source": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.Scan.from_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1.4142135623730951}, "sqlglot.schema.Schema.get_column_type": {"tf": 1}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1}}, "df": 21, "d": {"docs": {"sqlglot.dialects.snowflake.Snowflake.Generator.select_sql": {"tf": 1}}, "df": 1}, "s": {"docs": {"sqlglot.expressions.expand": {"tf": 1.4142135623730951}, "sqlglot.lineage.lineage": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.cte_sources": {"tf": 1}, "sqlglot.optimizer.scope.Scope.external_columns": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1.4142135623730951}}, "df": 7}}}}}, "f": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}}, "df": 1}}}}}}, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.diff": {"tf": 2.23606797749979}}, "df": 1, "s": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}, "r": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 2}, "sqlglot.optimizer.optimize_joins.reorder_joins": {"tf": 1}, "sqlglot.optimizer.simplify.uniq_sort": {"tf": 1}}, "df": 4, "s": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.helper.tsort": {"tf": 1}}, "df": 2}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.helper.tsort": {"tf": 1}}, "df": 1}}}}}, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator": {"tf": 1.4142135623730951}, "sqlglot.helper.split_num_words": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}}, "df": 25, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.executor": {"tf": 1.4142135623730951}}, "df": 2}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.executor.python.Python.Generator": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator": {"tf": 1.4142135623730951}}, "df": 21}}}}, "s": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.parse": {"tf": 1}, "sqlglot.parse_one": {"tf": 1.4142135623730951}, "sqlglot.transpile": {"tf": 1}, "sqlglot.dataframe": {"tf": 1}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}, "sqlglot.executor": {"tf": 1}, "sqlglot.executor.execute": {"tf": 1}, "sqlglot.expressions.Expression.parent_select": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.except_": {"tf": 1}, "sqlglot.expressions.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.from_": {"tf": 1.4142135623730951}, "sqlglot.expressions.update": {"tf": 2.23606797749979}, "sqlglot.expressions.delete": {"tf": 1.7320508075688772}, "sqlglot.expressions.values": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}, "sqlglot.parser.Parser.parse": {"tf": 1}}, "df": 18, "s": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.transpile": {"tf": 1}, "sqlglot.dataframe": {"tf": 2.6457513110645907}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1}}, "df": 27}}}}}}, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.dialects": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery": {"tf": 1}, "sqlglot.optimizer.canonicalize.canonicalize": {"tf": 1}}, "df": 3, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}}}}}, "f": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff.ChangeDistiller": {"tf": 1}}, "df": 1}}}}}, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 1}}, "df": 2}}, "b": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}, "r": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 2.8284271247461903}, "sqlglot.executor.python.Python.Generator": {"tf": 2.8284271247461903}, "sqlglot.expressions.Expression.append": {"tf": 1}, "sqlglot.expressions.Expression.set": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.or_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.union": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.intersect": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.except_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Join.on": {"tf": 1.4142135623730951}, "sqlglot.expressions.Join.using": {"tf": 1.4142135623730951}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1.7320508075688772}, "sqlglot.expressions.Union.limit": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.from_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.group_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.order_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.sort_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.cluster_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.limit": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.offset": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.lateral": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.join": {"tf": 2.23606797749979}, "sqlglot.expressions.Select.where": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.having": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.ctas": {"tf": 1.4142135623730951}, "sqlglot.expressions.union": {"tf": 1.7320508075688772}, "sqlglot.expressions.intersect": {"tf": 1.7320508075688772}, "sqlglot.expressions.except_": {"tf": 1.7320508075688772}, "sqlglot.expressions.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.from_": {"tf": 1.4142135623730951}, "sqlglot.expressions.update": {"tf": 1.7320508075688772}, "sqlglot.expressions.delete": {"tf": 1}, "sqlglot.expressions.condition": {"tf": 1.4142135623730951}, "sqlglot.expressions.and_": {"tf": 1.4142135623730951}, "sqlglot.expressions.or_": {"tf": 1.4142135623730951}, "sqlglot.expressions.not_": {"tf": 1.4142135623730951}, "sqlglot.expressions.subquery": {"tf": 1.7320508075688772}, "sqlglot.expressions.column": {"tf": 1.4142135623730951}, "sqlglot.expressions.table_": {"tf": 1.7320508075688772}, "sqlglot.expressions.table_name": {"tf": 1}, "sqlglot.expressions.replace_tables": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 2.8284271247461903}, "sqlglot.helper.ensure_collection": {"tf": 1}, "sqlglot.helper.is_iterable": {"tf": 1}, "sqlglot.helper.flatten": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.source_columns": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1}}, "df": 70, "f": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}}}, "u": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe": {"tf": 2}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor.execute": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}}, "df": 5, "d": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 1}}, "df": 2}, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "t": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe": {"tf": 2.23606797749979}}, "df": 1}}}}, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.dataframe": {"tf": 3.7416573867739413}}, "df": 1}}}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot": {"tf": 1.7320508075688772}, "sqlglot.parse": {"tf": 1.4142135623730951}, "sqlglot.parse_one": {"tf": 1.4142135623730951}, "sqlglot.transpile": {"tf": 2}, "sqlglot.dataframe": {"tf": 2.6457513110645907}, "sqlglot.dataframe.sql.Column.cast": {"tf": 1}, "sqlglot.dialects": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 2.449489742783178}, "sqlglot.diff": {"tf": 3.1622776601683795}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.executor.python.Python.Generator": {"tf": 2.449489742783178}, "sqlglot.expressions.Expression.is_string": {"tf": 1}, "sqlglot.expressions.Expression.alias": {"tf": 1}, "sqlglot.expressions.Expression.output_name": {"tf": 1}, "sqlglot.expressions.Expression.sql": {"tf": 1.7320508075688772}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.Column.output_name": {"tf": 1}, "sqlglot.expressions.Identifier.output_name": {"tf": 1}, "sqlglot.expressions.Literal.output_name": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.Subquery.output_name": {"tf": 1}, "sqlglot.expressions.Star.output_name": {"tf": 1}, "sqlglot.expressions.Alias.output_name": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1}, "sqlglot.expressions.Cast.output_name": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 2.449489742783178}, "sqlglot.expressions.union": {"tf": 1.4142135623730951}, "sqlglot.expressions.intersect": {"tf": 1.4142135623730951}, "sqlglot.expressions.except_": {"tf": 1.4142135623730951}, "sqlglot.expressions.select": {"tf": 1.7320508075688772}, "sqlglot.expressions.from_": {"tf": 1.7320508075688772}, "sqlglot.expressions.condition": {"tf": 1.7320508075688772}, "sqlglot.expressions.to_interval": {"tf": 1}, "sqlglot.expressions.to_table": {"tf": 1}, "sqlglot.expressions.to_column": {"tf": 1}, "sqlglot.expressions.table_name": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator": {"tf": 2.449489742783178}, "sqlglot.generator.Generator.generate": {"tf": 1.4142135623730951}, "sqlglot.helper.AutoName": {"tf": 1}, "sqlglot.helper.csv": {"tf": 1.7320508075688772}, "sqlglot.lineage.lineage": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}, "sqlglot.parser.Parser.parse": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}, "sqlglot.time.format_time": {"tf": 1.4142135623730951}, "sqlglot.tokens.Token.string": {"tf": 1.4142135623730951}, "sqlglot.tokens.Tokenizer.tokenize": {"tf": 1}, "sqlglot.transforms.preprocess": {"tf": 1}}, "df": 92, "s": {"docs": {"sqlglot.transpile": {"tf": 1}, "sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.expressions.Expression.text": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.and_": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 1}, "sqlglot.expressions.not_": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.expressions.subquery": {"tf": 1}, "sqlglot.trie.new_trie": {"tf": 1}}, "df": 23}, "t": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe": {"tf": 2.6457513110645907}}, "df": 1}}}}}}}, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}}, "df": 1}}}}}}}}}}}}, "|": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.delete": {"tf": 1}}, "df": 1}}}}}}}}}}}, "e": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.dataframe": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1.7320508075688772}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.Scan.from_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1.4142135623730951}}, "df": 7, "s": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.diff.diff": {"tf": 1}, "sqlglot.executor": {"tf": 2.23606797749979}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}}, "df": 6}}, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.dataframe": {"tf": 1.4142135623730951}, "sqlglot.expressions.Func": {"tf": 1}}, "df": 2}}, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.DataFrame.persist": {"tf": 1}}, "df": 1}}}}, "p": {"docs": {"sqlglot.expressions.Expression.walk": {"tf": 1}}, "df": 1, "p": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}}, "df": 2}}}}, "s": {"docs": {"sqlglot.trie.in_trie": {"tf": 1}}, "df": 1}}}, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}}}, "o": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.executor": {"tf": 1.4142135623730951}}, "df": 2}}, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 6.164414002968976}, "sqlglot.dataframe": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.select_sql": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.diff": {"tf": 2.8284271247461903}, "sqlglot.executor": {"tf": 3.605551275463989}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.expressions": {"tf": 1}, "sqlglot.expressions.Expression.output_name": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.parent_select": {"tf": 1}, "sqlglot.expressions.Expression.replace": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.assert_is": {"tf": 2}, "sqlglot.expressions.Unionable.union": {"tf": 2}, "sqlglot.expressions.Unionable.intersect": {"tf": 2}, "sqlglot.expressions.Unionable.except_": {"tf": 2}, "sqlglot.expressions.Column.output_name": {"tf": 1.7320508075688772}, "sqlglot.expressions.Identifier.output_name": {"tf": 1.7320508075688772}, "sqlglot.expressions.Literal.output_name": {"tf": 1.7320508075688772}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 2.449489742783178}, "sqlglot.expressions.Subqueryable.with_": {"tf": 2.449489742783178}, "sqlglot.expressions.Union.limit": {"tf": 2.449489742783178}, "sqlglot.expressions.Select.from_": {"tf": 2}, "sqlglot.expressions.Select.group_by": {"tf": 2}, "sqlglot.expressions.Select.order_by": {"tf": 2}, "sqlglot.expressions.Select.sort_by": {"tf": 2}, "sqlglot.expressions.Select.cluster_by": {"tf": 2}, "sqlglot.expressions.Select.limit": {"tf": 2}, "sqlglot.expressions.Select.offset": {"tf": 2}, "sqlglot.expressions.Select.select": {"tf": 2.23606797749979}, "sqlglot.expressions.Select.lateral": {"tf": 2}, "sqlglot.expressions.Select.join": {"tf": 3.1622776601683795}, "sqlglot.expressions.Select.where": {"tf": 2}, "sqlglot.expressions.Select.having": {"tf": 2}, "sqlglot.expressions.Select.distinct": {"tf": 2.23606797749979}, "sqlglot.expressions.Select.ctas": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.lock": {"tf": 2.449489742783178}, "sqlglot.expressions.Subquery.output_name": {"tf": 1.7320508075688772}, "sqlglot.expressions.Tag": {"tf": 1}, "sqlglot.expressions.Star.output_name": {"tf": 1.7320508075688772}, "sqlglot.expressions.Alias.output_name": {"tf": 1.7320508075688772}, "sqlglot.expressions.Cast.output_name": {"tf": 1.7320508075688772}, "sqlglot.expressions.union": {"tf": 2}, "sqlglot.expressions.intersect": {"tf": 2}, "sqlglot.expressions.except_": {"tf": 2}, "sqlglot.expressions.select": {"tf": 2.449489742783178}, "sqlglot.expressions.from_": {"tf": 2.23606797749979}, "sqlglot.expressions.condition": {"tf": 1.7320508075688772}, "sqlglot.expressions.alias_": {"tf": 1.4142135623730951}, "sqlglot.expressions.subquery": {"tf": 2.449489742783178}, "sqlglot.expressions.table_name": {"tf": 1}, "sqlglot.expressions.replace_tables": {"tf": 1.4142135623730951}, "sqlglot.expressions.replace_placeholders": {"tf": 1.4142135623730951}, "sqlglot.expressions.expand": {"tf": 2}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1.4142135623730951}, "sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"tf": 1.7320508075688772}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1.7320508075688772}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 3}, "sqlglot.optimizer.expand_laterals.expand_laterals": {"tf": 1.4142135623730951}, "sqlglot.optimizer.expand_multi_table_selects.expand_multi_table_selects": {"tf": 1.4142135623730951}, "sqlglot.optimizer.lower_identities.lower_identities": {"tf": 1.4142135623730951}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 2.6457513110645907}, "sqlglot.optimizer.optimize_joins.optimize_joins": {"tf": 1.4142135623730951}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 2}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 2}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope": {"tf": 2.449489742783178}, "sqlglot.optimizer.scope.Scope.derived_tables": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.subqueries": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selects": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 2.449489742783178}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 2}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}, "sqlglot.transforms.unalias_group": {"tf": 1.7320508075688772}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1.4142135623730951}}, "df": 97, "s": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.optimizer.expand_multi_table_selects.expand_multi_table_selects": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}}, "df": 3}, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.Expression.output_name": {"tf": 1}, "sqlglot.expressions.Column.output_name": {"tf": 1}, "sqlglot.expressions.Identifier.output_name": {"tf": 1}, "sqlglot.expressions.Literal.output_name": {"tf": 1}, "sqlglot.expressions.Subquery.output_name": {"tf": 1}, "sqlglot.expressions.Star.output_name": {"tf": 1}, "sqlglot.expressions.Alias.output_name": {"tf": 1}, "sqlglot.expressions.Cast.output_name": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1}}, "df": 9}}}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1.7320508075688772}}, "df": 2}}, "|": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.optimizer.scope.Scope": {"tf": 1}}, "df": 1}}}}, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1}}, "df": 1}}}}}}}, "f": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.dialects": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.if_sql": {"tf": 1}, "sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.expressions.Expression.unnest": {"tf": 1}, "sqlglot.helper.count_params": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1}}, "df": 8}}, "e": {"docs": {"sqlglot": {"tf": 1.7320508075688772}, "sqlglot.dataframe": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 3, "n": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}, "q": {"docs": {"sqlglot.helper.seq_get": {"tf": 1}}, "df": 1, "u": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 2.23606797749979}, "sqlglot.diff.diff": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1.7320508075688772}, "sqlglot.transforms.preprocess": {"tf": 1.4142135623730951}, "sqlglot.trie.new_trie": {"tf": 1}}, "df": 7, "s": {"docs": {"sqlglot.diff": {"tf": 2.449489742783178}}, "df": 1}}}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}}}}, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 1.7320508075688772}}, "df": 2, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}}}}}}}}}}, "r": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1.4142135623730951}}, "df": 1, "r": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.diff": {"tf": 1}}, "df": 2}}, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Expression.dump": {"tf": 1}, "sqlglot.serde.dump": {"tf": 1}}, "df": 2}}}}}}}}}}, "t": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.transpile": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 2}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 2}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 2}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 2}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 2}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 2}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 2}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 2}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 2}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 2}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 2}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 2}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 2}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 2}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 2}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 2}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 2}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 2}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 2}, "sqlglot.diff": {"tf": 5.916079783099616}, "sqlglot.executor": {"tf": 1}, "sqlglot.executor.context.Context": {"tf": 1.4142135623730951}, "sqlglot.executor.python.Python.Generator": {"tf": 2}, "sqlglot.expressions.Expression.set": {"tf": 1}, "sqlglot.expressions.Expression.walk": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.Select.distinct": {"tf": 1}, "sqlglot.expressions.Select.lock": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1.4142135623730951}, "sqlglot.expressions.union": {"tf": 1}, "sqlglot.expressions.intersect": {"tf": 1}, "sqlglot.expressions.except_": {"tf": 1}, "sqlglot.expressions.update": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator": {"tf": 2}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}, "sqlglot.parser.Parser.expression": {"tf": 1}, "sqlglot.parser.Parser.validate_expression": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1.7320508075688772}}, "df": 80, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}, "s": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.append": {"tf": 1}, "sqlglot.expressions.Expression.set": {"tf": 1}, "sqlglot.helper.first": {"tf": 1}}, "df": 4}, "t": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.parser.Parser.check_errors": {"tf": 1}, "sqlglot.parser.Parser.raise_error": {"tf": 1}}, "df": 3}}}}}, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}}, "df": 1, "s": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 1}}, "df": 2}}}}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}}, "df": 1, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.Expression.assert_is": {"tf": 1}}, "df": 1}}}}}}, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe": {"tf": 2.449489742783178}}, "df": 1}}}}}, "g": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.executor.python.Python.Generator": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator": {"tf": 1.4142135623730951}}, "df": 21}}}}}, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"1": {"docs": {"sqlglot.expressions.Matches": {"tf": 1}}, "df": 1}, "docs": {"sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.helper.subclasses": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.optimizer.scope.Scope.traverse": {"tf": 1}, "sqlglot.trie.in_trie": {"tf": 1.4142135623730951}}, "df": 6, "n": {"docs": {"sqlglot.expressions.Matches": {"tf": 1}}, "df": 1}, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.helper.find_new_name": {"tf": 1}}, "df": 1}, "d": {"docs": {"sqlglot.trie.in_trie": {"tf": 1}}, "df": 1}}}}}, "m": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}}, "p": {"docs": {"sqlglot.helper.csv": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1}}, "df": 2, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.helper.csv": {"tf": 1}}, "df": 1}}}}}}}}, "s": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.schema": {"tf": 1}, "sqlglot.dataframe": {"tf": 4.242640687119285}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 2.23606797749979}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.executor.execute": {"tf": 1.7320508075688772}, "sqlglot.expressions.to_table": {"tf": 1.7320508075688772}, "sqlglot.expressions.to_column": {"tf": 1}, "sqlglot.lineage.lineage": {"tf": 1.4142135623730951}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 2.6457513110645907}, "sqlglot.optimizer.lower_identities.lower_identities": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 2.6457513110645907}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 2.23606797749979}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1}, "sqlglot.schema.Schema.get_column_type": {"tf": 1}, "sqlglot.schema.Schema.supported_table_args": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 2}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1}}, "df": 20, "s": {"docs": {"sqlglot.schema.Schema": {"tf": 1}}, "df": 1}}}}}, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.executor": {"tf": 1.4142135623730951}}, "df": 2}}}, "p": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "i": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 3.605551275463989}}, "df": 1, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 4.242640687119285}}, "df": 1, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.executor.context.Context.__init__": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 3.3166247903554}, "sqlglot.optimizer.scope.Scope.branch": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.optimizer.scope.Scope.replace": {"tf": 1}, "sqlglot.optimizer.scope.Scope.tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.ctes": {"tf": 1}, "sqlglot.optimizer.scope.Scope.derived_tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.subqueries": {"tf": 1}, "sqlglot.optimizer.scope.Scope.columns": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1}, "sqlglot.optimizer.scope.Scope.cte_sources": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.selects": {"tf": 1}, "sqlglot.optimizer.scope.Scope.external_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.unqualified_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.join_hints": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.source_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_subquery": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_derived_table": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_union": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_cte": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_root": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_udtf": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_correlated_subquery": {"tf": 1}, "sqlglot.optimizer.scope.Scope.rename_source": {"tf": 1}, "sqlglot.optimizer.scope.Scope.add_source": {"tf": 1}, "sqlglot.optimizer.scope.Scope.remove_source": {"tf": 1}, "sqlglot.optimizer.scope.Scope.traverse": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.Scope.ref_count": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.build_scope": {"tf": 2}}, "df": 33, "t": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.scope.Scope": {"tf": 1}}, "df": 1}}}}, "s": {"docs": {"sqlglot.optimizer.scope.Scope": {"tf": 2.8284271247461903}, "sqlglot.optimizer.scope.Scope.external_columns": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 2.449489742783178}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}}, "df": 4}}}}, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "s": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.executor": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}, "r": {"docs": {"sqlglot.executor.context.Context": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1}}, "df": 2, "s": {"docs": {"sqlglot.executor.context.Context": {"tf": 1}}, "df": 1}}}}, "n": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.Scan.from_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1.4142135623730951}}, "df": 4}}}, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser": {"tf": 1.4142135623730951}}, "df": 41, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dialects": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}}, "df": 3}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}}, "df": 21}}}}}}, "k": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.dataframe": {"tf": 1}}, "df": 2, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.dataframe": {"tf": 1}}, "df": 1}}}}}}, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1}, "sqlglot.optimizer.simplify.simplify": {"tf": 1.7320508075688772}}, "df": 3}, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.optimizer.simplify.rewrite_between": {"tf": 1}}, "df": 2}}}}}}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.optimizer.simplify.simplify": {"tf": 1}}, "df": 1}}}}}, "e": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 2.449489742783178}, "sqlglot.executor.execute": {"tf": 1}}, "df": 3}, "y": {"docs": {"sqlglot.optimizer.pushdown_predicates.pushdown_cnf": {"tf": 1}}, "df": 1}}}, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dataframe": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 2}, "sqlglot.executor": {"tf": 1}}, "df": 3, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 4.242640687119285}}, "df": 1}}}}}}}}, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.transpile": {"tf": 1}, "sqlglot.dataframe": {"tf": 1}, "sqlglot.errors.ErrorLevel.RAISE": {"tf": 1}, "sqlglot.executor": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}, "sqlglot.trie.new_trie": {"tf": 1}}, "df": 11}}}, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.dialects": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 4}}}, "z": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}}, "df": 21}}, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 2.449489742783178}, "sqlglot.expressions.union": {"tf": 1.4142135623730951}, "sqlglot.expressions.intersect": {"tf": 1.4142135623730951}, "sqlglot.expressions.except_": {"tf": 1.4142135623730951}}, "df": 4}}, "g": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}}}, "m": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "t": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator.select_sql": {"tf": 1}, "sqlglot.diff": {"tf": 2.6457513110645907}, "sqlglot.executor": {"tf": 1}, "sqlglot.expressions.Expression.flatten": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}}, "df": 7}}, "w": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}}}}, "y": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}, "w": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.expressions.Expression.replace": {"tf": 1}}, "df": 1, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}, "i": {"docs": {"sqlglot": {"tf": 1.7320508075688772}, "sqlglot.diff": {"tf": 4.795831523312719}, "sqlglot.executor": {"tf": 4.69041575982343}, "sqlglot.expressions.Expression": {"tf": 1}}, "df": 4, "s": {"docs": {"sqlglot": {"tf": 3.1622776601683795}, "sqlglot.transpile": {"tf": 1}, "sqlglot.dataframe": {"tf": 3.4641016151377544}, "sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dialects": {"tf": 1.7320508075688772}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 2}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 2}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 2}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 2}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 2}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 2}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 2}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 2}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 2}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 2}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 2}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 2}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 2}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 2}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 2}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 2}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 2}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 2}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 2}, "sqlglot.diff": {"tf": 7.54983443527075}, "sqlglot.diff.ChangeDistiller": {"tf": 1}, "sqlglot.executor": {"tf": 4.58257569495584}, "sqlglot.executor.context.Context": {"tf": 1.4142135623730951}, "sqlglot.executor.python.Python.Generator": {"tf": 2}, "sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.expressions": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 2.449489742783178}, "sqlglot.expressions.Expression.is_string": {"tf": 1}, "sqlglot.expressions.Expression.is_number": {"tf": 1}, "sqlglot.expressions.Expression.is_int": {"tf": 1}, "sqlglot.expressions.Expression.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.unalias": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.assert_is": {"tf": 2}, "sqlglot.expressions.Expression.error_messages": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.and_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.or_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.union": {"tf": 1.7320508075688772}, "sqlglot.expressions.Unionable.intersect": {"tf": 1.7320508075688772}, "sqlglot.expressions.Unionable.except_": {"tf": 1.7320508075688772}, "sqlglot.expressions.Column.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.Identifier.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.Literal.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.Join.on": {"tf": 1.4142135623730951}, "sqlglot.expressions.Join.using": {"tf": 1.4142135623730951}, "sqlglot.expressions.Subqueryable.with_": {"tf": 2.23606797749979}, "sqlglot.expressions.Union.limit": {"tf": 2}, "sqlglot.expressions.Select.from_": {"tf": 2}, "sqlglot.expressions.Select.group_by": {"tf": 2.449489742783178}, "sqlglot.expressions.Select.order_by": {"tf": 2}, "sqlglot.expressions.Select.sort_by": {"tf": 2}, "sqlglot.expressions.Select.cluster_by": {"tf": 2}, "sqlglot.expressions.Select.limit": {"tf": 2}, "sqlglot.expressions.Select.offset": {"tf": 2}, "sqlglot.expressions.Select.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.lateral": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.join": {"tf": 2.449489742783178}, "sqlglot.expressions.Select.where": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.having": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.ctas": {"tf": 1.4142135623730951}, "sqlglot.expressions.Subquery.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.Star.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.Alias.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.Func": {"tf": 1.4142135623730951}, "sqlglot.expressions.Cast.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.maybe_parse": {"tf": 1.7320508075688772}, "sqlglot.expressions.union": {"tf": 2.23606797749979}, "sqlglot.expressions.intersect": {"tf": 2.23606797749979}, "sqlglot.expressions.except_": {"tf": 2.23606797749979}, "sqlglot.expressions.select": {"tf": 2.23606797749979}, "sqlglot.expressions.from_": {"tf": 2.23606797749979}, "sqlglot.expressions.condition": {"tf": 2.449489742783178}, "sqlglot.expressions.and_": {"tf": 1.7320508075688772}, "sqlglot.expressions.or_": {"tf": 1.7320508075688772}, "sqlglot.expressions.not_": {"tf": 1.7320508075688772}, "sqlglot.expressions.to_table": {"tf": 1.4142135623730951}, "sqlglot.expressions.to_column": {"tf": 1.7320508075688772}, "sqlglot.expressions.alias_": {"tf": 2}, "sqlglot.expressions.subquery": {"tf": 1.7320508075688772}, "sqlglot.expressions.values": {"tf": 1.4142135623730951}, "sqlglot.expressions.convert": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 2}, "sqlglot.helper.AutoName": {"tf": 1.4142135623730951}, "sqlglot.helper.seq_get": {"tf": 1}, "sqlglot.helper.ensure_list": {"tf": 1}, "sqlglot.helper.ensure_collection": {"tf": 1}, "sqlglot.helper.while_changing": {"tf": 1}, "sqlglot.helper.is_iterable": {"tf": 2.23606797749979}, "sqlglot.optimizer.lower_identities.lower_identities": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1.4142135623730951}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.Scope.replace": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_subquery": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_derived_table": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_union": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_cte": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_root": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_udtf": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_correlated_subquery": {"tf": 1}, "sqlglot.optimizer.scope.Scope.ref_count": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1.4142135623730951}, "sqlglot.optimizer.simplify.rewrite_between": {"tf": 1.4142135623730951}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.parse_into": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1}, "sqlglot.transforms.delegate": {"tf": 1}, "sqlglot.trie.new_trie": {"tf": 1.7320508075688772}, "sqlglot.trie.in_trie": {"tf": 2.23606797749979}}, "df": 143, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 2}}}}}}}}, "s": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe": {"tf": 1}}, "df": 1}}}, "n": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1.7320508075688772}}, "df": 1}}}}}}}, "t": {"docs": {"sqlglot": {"tf": 3.1622776601683795}, "sqlglot.dataframe": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.cast": {"tf": 1}, "sqlglot.dialects": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.with_properties": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.diff": {"tf": 3.7416573867739413}, "sqlglot.executor": {"tf": 4.898979485566356}, "sqlglot.executor.python.PythonExecutor.generate": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.expressions": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.alias": {"tf": 1}, "sqlglot.expressions.Expression.append": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.assert_is": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.Select.lock": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1}, "sqlglot.expressions.union": {"tf": 1.4142135623730951}, "sqlglot.expressions.intersect": {"tf": 1.4142135623730951}, "sqlglot.expressions.except_": {"tf": 1.4142135623730951}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.helper.ensure_list": {"tf": 1.4142135623730951}, "sqlglot.helper.ensure_collection": {"tf": 1.4142135623730951}, "sqlglot.helper.apply_index_offset": {"tf": 1.4142135623730951}, "sqlglot.helper.open_file": {"tf": 1}, "sqlglot.helper.is_iterable": {"tf": 1.4142135623730951}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1}, "sqlglot.parser.Parser.raise_error": {"tf": 1}, "sqlglot.parser.Parser.validate_expression": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}}, "df": 82, "e": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1}}, "df": 2, "s": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.expressions.Func": {"tf": 1}, "sqlglot.parser.Parser.validate_expression": {"tf": 1}}, "df": 3}}, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "s": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.helper.is_iterable": {"tf": 2.23606797749979}, "sqlglot.helper.flatten": {"tf": 2}, "sqlglot.helper.first": {"tf": 1}}, "df": 3, "s": {"docs": {"sqlglot.helper.flatten": {"tf": 1}}, "df": 1}}}}}}}, "s": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dialects": {"tf": 1.7320508075688772}, "sqlglot.diff": {"tf": 2.6457513110645907}, "sqlglot.executor": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.pop": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1}, "sqlglot.parser.Parser.validate_expression": {"tf": 1}, "sqlglot.tokens.Token.number": {"tf": 1}, "sqlglot.tokens.Token.string": {"tf": 1}, "sqlglot.tokens.Token.identifier": {"tf": 1}, "sqlglot.tokens.Token.var": {"tf": 1}}, "df": 12, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.dialects.redshift.Redshift.Generator.renametable_sql": {"tf": 1}, "sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}}, "df": 3}}}}, "\u2019": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}, "n": {"docs": {"sqlglot": {"tf": 4.123105625617661}, "sqlglot.schema": {"tf": 1}, "sqlglot.transpile": {"tf": 1.4142135623730951}, "sqlglot.dataframe": {"tf": 3.605551275463989}, "sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dialects": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}, "sqlglot.dialects.drill.if_sql": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.snowflake.Snowflake.Generator.values_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator.select_sql": {"tf": 2.449489742783178}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 2.8284271247461903}, "sqlglot.diff": {"tf": 8.12403840463596}, "sqlglot.diff.diff": {"tf": 1}, "sqlglot.diff.ChangeDistiller": {"tf": 1.7320508075688772}, "sqlglot.executor": {"tf": 3.605551275463989}, "sqlglot.executor.execute": {"tf": 1}, "sqlglot.executor.env.null_if_any": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 2.8284271247461903}, "sqlglot.expressions": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 2.23606797749979}, "sqlglot.expressions.Expression.find": {"tf": 1}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.expressions.Expression.walk": {"tf": 1}, "sqlglot.expressions.Expression.dfs": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.bfs": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.expressions.Expression.assert_is": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.expressions.Union.limit": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.from_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.group_by": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.order_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.sort_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.cluster_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.limit": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.offset": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.Select.distinct": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.Select.lock": {"tf": 1}, "sqlglot.expressions.Kwarg": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1.4142135623730951}, "sqlglot.expressions.maybe_parse": {"tf": 1.4142135623730951}, "sqlglot.expressions.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.from_": {"tf": 1.4142135623730951}, "sqlglot.expressions.condition": {"tf": 1.4142135623730951}, "sqlglot.expressions.to_table": {"tf": 1}, "sqlglot.expressions.to_column": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1}, "sqlglot.expressions.column_table_names": {"tf": 1}, "sqlglot.expressions.replace_tables": {"tf": 1}, "sqlglot.expressions.replace_placeholders": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator": {"tf": 2.8284271247461903}, "sqlglot.helper.seq_get": {"tf": 1}, "sqlglot.helper.ensure_list": {"tf": 1}, "sqlglot.helper.ensure_collection": {"tf": 1}, "sqlglot.helper.subclasses": {"tf": 1}, "sqlglot.helper.apply_index_offset": {"tf": 1.4142135623730951}, "sqlglot.helper.tsort": {"tf": 1.4142135623730951}, "sqlglot.helper.open_file": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1}, "sqlglot.helper.flatten": {"tf": 1}, "sqlglot.helper.count_params": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1.4142135623730951}, "sqlglot.optimizer.normalize.normalize": {"tf": 1}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1.4142135623730951}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1.7320508075688772}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_cnf": {"tf": 1.4142135623730951}, "sqlglot.optimizer.pushdown_predicates.pushdown_dnf": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.optimizer.scope.Scope.tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.ctes": {"tf": 1}, "sqlglot.optimizer.scope.Scope.derived_tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.subqueries": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.columns": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.Scope.external_columns": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.unqualified_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.join_hints": {"tf": 1}, "sqlglot.optimizer.scope.Scope.source_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.rename_source": {"tf": 1}, "sqlglot.optimizer.scope.Scope.traverse": {"tf": 1}, "sqlglot.optimizer.scope.Scope.ref_count": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.raise_error": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}, "sqlglot.schema.Schema.get_column_type": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1.4142135623730951}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1}, "sqlglot.time.format_time": {"tf": 1}, "sqlglot.transforms.unalias_group": {"tf": 1}, "sqlglot.transforms.remove_precision_parameterized_types": {"tf": 1.7320508075688772}, "sqlglot.transforms.preprocess": {"tf": 1}, "sqlglot.trie.new_trie": {"tf": 1}, "sqlglot.trie.in_trie": {"tf": 2.449489742783178}}, "df": 148, "p": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.transpile": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.if_sql": {"tf": 1}, "sqlglot.executor": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.or_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.union": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.intersect": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.except_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Join.on": {"tf": 1.4142135623730951}, "sqlglot.expressions.Join.using": {"tf": 1.4142135623730951}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Union.limit": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.from_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.group_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.order_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.sort_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.cluster_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.limit": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.offset": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.lateral": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.join": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.where": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.having": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.ctas": {"tf": 1.4142135623730951}, "sqlglot.expressions.maybe_parse": {"tf": 2}, "sqlglot.expressions.union": {"tf": 1.4142135623730951}, "sqlglot.expressions.intersect": {"tf": 1.4142135623730951}, "sqlglot.expressions.except_": {"tf": 1.4142135623730951}, "sqlglot.expressions.select": {"tf": 2}, "sqlglot.expressions.from_": {"tf": 2}, "sqlglot.expressions.update": {"tf": 1.4142135623730951}, "sqlglot.expressions.delete": {"tf": 1.4142135623730951}, "sqlglot.expressions.condition": {"tf": 2}, "sqlglot.expressions.and_": {"tf": 1.4142135623730951}, "sqlglot.expressions.or_": {"tf": 1.4142135623730951}, "sqlglot.expressions.not_": {"tf": 1.4142135623730951}, "sqlglot.expressions.alias_": {"tf": 1.4142135623730951}, "sqlglot.expressions.subquery": {"tf": 1.4142135623730951}, "sqlglot.lineage.lineage": {"tf": 1}}, "df": 40, "s": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}}, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}}}}}}}}}, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.scope.traverse_scope": {"tf": 1}}, "df": 1}}}}}}}, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}}}}}}, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}}, "df": 1}}}}}, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}, "sqlglot.schema.Schema.column_names": {"tf": 1}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}}, "df": 41, "s": {"docs": {"sqlglot.expressions.maybe_parse": {"tf": 1}}, "df": 1}, "d": {"docs": {"sqlglot.expressions.subquery": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1}}, "df": 2}}}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot": {"tf": 2.23606797749979}, "sqlglot.dataframe": {"tf": 1}}, "df": 2}}, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.Column.cast": {"tf": 1}, "sqlglot.executor.execute": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.expressions.Expression.assert_is": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.load": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1.4142135623730951}, "sqlglot.expressions.Join.using": {"tf": 1.4142135623730951}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1.7320508075688772}, "sqlglot.expressions.Union.limit": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.from_": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.group_by": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.order_by": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.sort_by": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.cluster_by": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.limit": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.offset": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.lateral": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.join": {"tf": 2}, "sqlglot.expressions.Select.where": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.having": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.distinct": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.lock": {"tf": 1}, "sqlglot.expressions.union": {"tf": 1.4142135623730951}, "sqlglot.expressions.intersect": {"tf": 1.4142135623730951}, "sqlglot.expressions.except_": {"tf": 1.4142135623730951}, "sqlglot.expressions.select": {"tf": 1}, "sqlglot.expressions.from_": {"tf": 1}, "sqlglot.expressions.condition": {"tf": 1}, "sqlglot.expressions.and_": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 1}, "sqlglot.expressions.not_": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.expressions.subquery": {"tf": 1}, "sqlglot.expressions.column": {"tf": 1}, "sqlglot.expressions.table_": {"tf": 1}, "sqlglot.expressions.func": {"tf": 1}, "sqlglot.helper.count_params": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1}, "sqlglot.optimizer.scope.Scope.ref_count": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1}, "sqlglot.schema.Schema.column_names": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}}, "df": 52, "s": {"docs": {"sqlglot.expressions.Expression.text": {"tf": 1}, "sqlglot.optimizer.scope.Scope.columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.external_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.source_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.traverse": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}}, "df": 6}}}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.expressions.func": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.expression": {"tf": 1}, "sqlglot.parser.Parser.validate_expression": {"tf": 1}}, "df": 5, "d": {"docs": {"sqlglot.parser.Parser.validate_expression": {"tf": 1}}, "df": 1}}}}}}}}, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dataframe": {"tf": 1}, "sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.executor": {"tf": 1}, "sqlglot.expressions.Expression.walk": {"tf": 1}, "sqlglot.optimizer.normalize.normalize": {"tf": 1}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}, "sqlglot.optimizer.scope.Scope.replace": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}}, "df": 10}}}, "r": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dataframe": {"tf": 1}}, "df": 1}}}}}}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 2.449489742783178}, "sqlglot.diff.diff": {"tf": 1.4142135623730951}}, "df": 3, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 2}, "sqlglot.diff.Insert": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1}}, "df": 3}}}}, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.lower_identities.lower_identities": {"tf": 1}}, "df": 1}}}}}}}}, "p": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}}, "df": 2}}}}}}, "t": {"6": {"4": {"docs": {"sqlglot": {"tf": 2.23606797749979}, "sqlglot.dialects": {"tf": 2.23606797749979}}, "df": 2}, "docs": {}, "df": 0}, "docs": {"sqlglot": {"tf": 2.6457513110645907}, "sqlglot.dataframe": {"tf": 2}, "sqlglot.dialects": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.7320508075688772}, "sqlglot.diff": {"tf": 2.23606797749979}, "sqlglot.executor.python.Python.Generator": {"tf": 1.7320508075688772}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.expressions.cast": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator": {"tf": 1.7320508075688772}, "sqlglot.helper.dict_depth": {"tf": 1}, "sqlglot.optimizer.normalize.normalize": {"tf": 1}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.ref_count": {"tf": 1}}, "df": 34, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}, "d": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}}}, "o": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.parse": {"tf": 1}, "sqlglot.parse_one": {"tf": 1.4142135623730951}, "sqlglot.transpile": {"tf": 1}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.diff": {"tf": 2.449489742783178}, "sqlglot.diff.diff": {"tf": 1}, "sqlglot.executor": {"tf": 4.242640687119285}, "sqlglot.executor.python.PythonExecutor.generate": {"tf": 1.4142135623730951}, "sqlglot.executor.python.PythonExecutor.generate_tuple": {"tf": 1}, "sqlglot.expressions.Expression.load": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.TimeUnit": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1.4142135623730951}, "sqlglot.expressions.update": {"tf": 1.4142135623730951}, "sqlglot.expressions.delete": {"tf": 1}, "sqlglot.expressions.to_identifier": {"tf": 1}, "sqlglot.expressions.convert": {"tf": 1}, "sqlglot.expressions.expand": {"tf": 1}, "sqlglot.helper.ensure_list": {"tf": 1}, "sqlglot.helper.ensure_collection": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.optimizer.canonicalize.canonicalize": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1.4142135623730951}, "sqlglot.optimizer.normalize.normalize": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1.7320508075688772}, "sqlglot.parser.Parser.parse_into": {"tf": 1.7320508075688772}, "sqlglot.serde.dump": {"tf": 1}, "sqlglot.serde.load": {"tf": 1}}, "df": 39}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.expressions.to_interval": {"tf": 1}}, "df": 2}}}, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 1}}, "df": 1, "s": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}}, "df": 21}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.generator.Generator.generate": {"tf": 1}}, "df": 1}}}}}}}, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "/": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}}}}, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.func": {"tf": 1.7320508075688772}, "sqlglot.helper.ensure_list": {"tf": 1}, "sqlglot.helper.ensure_collection": {"tf": 1}}, "df": 3, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}}, "f": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.Unionable.intersect": {"tf": 2.23606797749979}, "sqlglot.expressions.intersect": {"tf": 2.23606797749979}}, "df": 2}}}}}, "g": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot": {"tf": 1.4142135623730951}}, "df": 1}}}}, "l": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Expression.is_int": {"tf": 1}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.helper.apply_index_offset": {"tf": 1}}, "df": 5, "t": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe": {"tf": 2.6457513110645907}}, "df": 1}}}}}}}, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}, "\u2019": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}, "v": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 1}}, "df": 2}, "d": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}}}, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.schema.Schema.column_names": {"tf": 1}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}}, "df": 2}}}}}}}, "f": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe": {"tf": 1}}, "df": 1}}}}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 2.23606797749979}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1}}, "df": 6}}}}}, "s": {"docs": {"sqlglot.expressions.Expression": {"tf": 1}}, "df": 1}}}}, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}, "r": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}}, "df": 2, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.Expression": {"tf": 1}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.values": {"tf": 1}, "sqlglot.optimizer.canonicalize.canonicalize": {"tf": 1}}, "df": 2}}}}}}}, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "x": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1.4142135623730951}, "sqlglot.helper.seq_get": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser": {"tf": 1.4142135623730951}}, "df": 19}, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}}, "df": 21, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}}, "df": 21}}}}}}}}, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Expression": {"tf": 1}}, "df": 1, "s": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.diff.Insert": {"tf": 1}, "sqlglot.diff.Remove": {"tf": 1}, "sqlglot.diff.Move": {"tf": 1}, "sqlglot.diff.Update": {"tf": 1}, "sqlglot.diff.Keep": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}}, "df": 45}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.helper.is_iterable": {"tf": 1}}, "df": 1}}}}}}, "v": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.diff": {"tf": 2}}, "df": 1}}}}}}}, "u": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}}, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.condition": {"tf": 1}}, "df": 1, "s": {"docs": {"sqlglot.expressions.union": {"tf": 1}, "sqlglot.expressions.intersect": {"tf": 1}, "sqlglot.expressions.except_": {"tf": 1}, "sqlglot.expressions.select": {"tf": 1}, "sqlglot.expressions.from_": {"tf": 1}}, "df": 5}}}}}}}}}, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.diff": {"tf": 3.1622776601683795}, "sqlglot.expressions.Expression.unalias": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}, "sqlglot.optimizer.optimize_joins.normalize": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1}, "sqlglot.optimizer.scope.Scope.branch": {"tf": 1}}, "df": 6, "m": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1}}, "df": 2}}}}}}}}}, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 4.358898943540674}, "sqlglot.dataframe": {"tf": 4.47213595499958}, "sqlglot.dialects": {"tf": 2}, "sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.output_name": {"tf": 1}, "sqlglot.expressions.Expression.assert_is": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.Column.output_name": {"tf": 1}, "sqlglot.expressions.Identifier.output_name": {"tf": 1}, "sqlglot.expressions.Literal.output_name": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Subquery.output_name": {"tf": 1}, "sqlglot.expressions.Star.output_name": {"tf": 1}, "sqlglot.expressions.Alias.output_name": {"tf": 1}, "sqlglot.expressions.Cast.output_name": {"tf": 1}, "sqlglot.expressions.column_table_names": {"tf": 1}, "sqlglot.expressions.table_name": {"tf": 1}, "sqlglot.expressions.replace_tables": {"tf": 1}, "sqlglot.expressions.replace_placeholders": {"tf": 1}, "sqlglot.expressions.expand": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 1}, "sqlglot.optimizer.expand_laterals.expand_laterals": {"tf": 1}, "sqlglot.optimizer.expand_multi_table_selects.expand_multi_table_selects": {"tf": 1}, "sqlglot.optimizer.lower_identities.lower_identities": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}, "sqlglot.optimizer.normalize.normalize": {"tf": 1}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}, "sqlglot.optimizer.optimize_joins.optimize_joins": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 1}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 1}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}, "sqlglot.optimizer.simplify.simplify": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1}, "sqlglot.transforms.unalias_group": {"tf": 1}}, "df": 42, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1.4142135623730951}}, "df": 2, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dialects": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1}}, "df": 3}}}, "s": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.dialects": {"tf": 1}}, "df": 2}, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}, "sqlglot.dialects": {"tf": 1}, "sqlglot.diff": {"tf": 3.4641016151377544}, "sqlglot.diff.ChangeDistiller": {"tf": 1}, "sqlglot.executor": {"tf": 1.7320508075688772}, "sqlglot.expressions": {"tf": 1}}, "df": 7, "s": {"docs": {"sqlglot.dataframe": {"tf": 1}}, "df": 1}}}}}}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}}, "df": 1}}}}}}}}, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 2}}}}}}}}, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}}, "df": 1}}}}, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.errors.ErrorLevel.IMMEDIATE": {"tf": 1}}, "df": 1}}}}}}}}}}, "d": {"docs": {"sqlglot": {"tf": 4.69041575982343}, "sqlglot.dataframe": {"tf": 3}, "sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.expressions.update": {"tf": 1.4142135623730951}, "sqlglot.expressions.delete": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.ref_count": {"tf": 1}}, "df": 6, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot": {"tf": 3}, "sqlglot.dialects": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 2}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 2}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 2}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 2}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 2}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 2}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 2}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 2}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 2}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 2}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 2}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 2}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 2}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 2}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 2}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 2}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 2}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 2}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 2}, "sqlglot.diff": {"tf": 3.3166247903554}, "sqlglot.diff.diff": {"tf": 2}, "sqlglot.executor.python.Python.Generator": {"tf": 2}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1.4142135623730951}, "sqlglot.expressions.to_identifier": {"tf": 2}, "sqlglot.generator.Generator": {"tf": 2}, "sqlglot.tokens.Token.identifier": {"tf": 1.4142135623730951}}, "df": 29, "s": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.dialects": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 2}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 2}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 2}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 2}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 2}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 2}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 2}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 2}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 2}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 2}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 2}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 2}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 2}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 2}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 2}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 2}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 2}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 2}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 2}, "sqlglot.diff": {"tf": 2}, "sqlglot.executor.python.Python.Generator": {"tf": 2}, "sqlglot.expressions.Expression.text": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 2}, "sqlglot.optimizer.lower_identities.lower_identities": {"tf": 1.4142135623730951}}, "df": 26}}, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}, "y": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.select_sql": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}}, "df": 24, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}}, "df": 1}}}}}, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.transpile": {"tf": 1}}, "df": 1}, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.optimizer.lower_identities.lower_identities": {"tf": 1}}, "df": 1}}}}, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}, "a": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}}, "df": 1}}, "s": {"docs": {"sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}}, "df": 3}}, "f": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.transpile": {"tf": 1.4142135623730951}, "sqlglot.dataframe": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1.7320508075688772}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.drill.if_sql": {"tf": 2}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 2.8284271247461903}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 2.8284271247461903}, "sqlglot.diff": {"tf": 4.69041575982343}, "sqlglot.executor": {"tf": 1.7320508075688772}, "sqlglot.executor.env.null_if_any": {"tf": 1.4142135623730951}, "sqlglot.executor.python.Python.Generator": {"tf": 2.8284271247461903}, "sqlglot.expressions.Expression.alias": {"tf": 1}, "sqlglot.expressions.Expression.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.append": {"tf": 1}, "sqlglot.expressions.Expression.find": {"tf": 1}, "sqlglot.expressions.Expression.walk": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.unalias": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.assert_is": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1.7320508075688772}, "sqlglot.expressions.Unionable.intersect": {"tf": 1.7320508075688772}, "sqlglot.expressions.Unionable.except_": {"tf": 1.7320508075688772}, "sqlglot.expressions.Column.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.Identifier.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.Literal.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.Join.on": {"tf": 1.7320508075688772}, "sqlglot.expressions.Join.using": {"tf": 1.7320508075688772}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 2}, "sqlglot.expressions.Union.limit": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.from_": {"tf": 2}, "sqlglot.expressions.Select.group_by": {"tf": 2.23606797749979}, "sqlglot.expressions.Select.order_by": {"tf": 2}, "sqlglot.expressions.Select.sort_by": {"tf": 2}, "sqlglot.expressions.Select.cluster_by": {"tf": 2}, "sqlglot.expressions.Select.limit": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.offset": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.select": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.lateral": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.join": {"tf": 2.449489742783178}, "sqlglot.expressions.Select.where": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.having": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.distinct": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.lock": {"tf": 1.4142135623730951}, "sqlglot.expressions.Subquery.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.Star.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.Alias.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.Func": {"tf": 1}, "sqlglot.expressions.Cast.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.union": {"tf": 2}, "sqlglot.expressions.intersect": {"tf": 2}, "sqlglot.expressions.except_": {"tf": 2}, "sqlglot.expressions.select": {"tf": 1}, "sqlglot.expressions.from_": {"tf": 1}, "sqlglot.expressions.condition": {"tf": 1}, "sqlglot.expressions.and_": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 1}, "sqlglot.expressions.not_": {"tf": 1}, "sqlglot.expressions.to_table": {"tf": 1}, "sqlglot.expressions.to_column": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1.4142135623730951}, "sqlglot.expressions.subquery": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1.4142135623730951}, "sqlglot.expressions.convert": {"tf": 1}, "sqlglot.expressions.func": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 2.8284271247461903}, "sqlglot.helper.seq_get": {"tf": 1}, "sqlglot.helper.ensure_list": {"tf": 1}, "sqlglot.helper.ensure_collection": {"tf": 1}, "sqlglot.helper.apply_index_offset": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1}, "sqlglot.helper.is_iterable": {"tf": 1.7320508075688772}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1.7320508075688772}, "sqlglot.optimizer.optimize_joins.optimize_joins": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1.4142135623730951}, "sqlglot.optimizer.pushdown_predicates.pushdown_cnf": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_dnf": {"tf": 1}, "sqlglot.optimizer.qualify_columns.validate_qualify_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_subquery": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_derived_table": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_union": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_cte": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_root": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_udtf": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_correlated_subquery": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1.7320508075688772}, "sqlglot.parser.Parser.parse_into": {"tf": 1}, "sqlglot.parser.Parser.validate_expression": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}}, "df": 135}, "b": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}}}, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}}, "df": 2}}}}}}}}, "g": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.errors.ErrorLevel.IGNORE": {"tf": 1}}, "df": 2}}}}}, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "v": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}}}, "\u2019": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "m": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}, "e": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.diff.ChangeDistiller": {"tf": 1}}, "df": 2}}}, "o": {"docs": {}, "df": 0, "/": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.lineage.LineageHTML": {"tf": 1}}, "df": 1}}}}}}, "a": {"docs": {"sqlglot": {"tf": 7.615773105863909}, "sqlglot.parse": {"tf": 1}, "sqlglot.parse_one": {"tf": 1}, "sqlglot.transpile": {"tf": 1.4142135623730951}, "sqlglot.dataframe": {"tf": 3.1622776601683795}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.cast": {"tf": 1.4142135623730951}, "sqlglot.dialects": {"tf": 2.449489742783178}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 2.23606797749979}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 3}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 2.23606797749979}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 3}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 2.23606797749979}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 3}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 2.23606797749979}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 3}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 2.23606797749979}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 3}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 2.23606797749979}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 3}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 2.23606797749979}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 3}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 2.23606797749979}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 3}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 2.23606797749979}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 3}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 2.23606797749979}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 3}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 2.23606797749979}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 3}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 2.23606797749979}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 3}, "sqlglot.dialects.snowflake.Snowflake.Generator.values_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.snowflake.Snowflake.Generator.select_sql": {"tf": 2.449489742783178}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 2.23606797749979}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 3}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 2.23606797749979}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 3}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 3}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 3}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 2.23606797749979}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 2.23606797749979}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 3}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 3}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 2.23606797749979}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 3}, "sqlglot.diff": {"tf": 8.94427190999916}, "sqlglot.diff.Insert": {"tf": 1}, "sqlglot.diff.diff": {"tf": 2.23606797749979}, "sqlglot.errors.ErrorLevel.RAISE": {"tf": 1}, "sqlglot.executor": {"tf": 8.660254037844387}, "sqlglot.executor.execute": {"tf": 1.7320508075688772}, "sqlglot.executor.env.null_if_any": {"tf": 1.4142135623730951}, "sqlglot.executor.python.PythonExecutor.generate": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 3}, "sqlglot.executor.table.Tables": {"tf": 1.4142135623730951}, "sqlglot.expressions": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression": {"tf": 2.6457513110645907}, "sqlglot.expressions.Expression.text": {"tf": 1}, "sqlglot.expressions.Expression.is_string": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.is_number": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.is_int": {"tf": 1}, "sqlglot.expressions.Expression.output_name": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.copy": {"tf": 1}, "sqlglot.expressions.Expression.append": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.expressions.Expression.find_ancestor": {"tf": 1}, "sqlglot.expressions.Expression.walk": {"tf": 1}, "sqlglot.expressions.Expression.dfs": {"tf": 1}, "sqlglot.expressions.Expression.bfs": {"tf": 1}, "sqlglot.expressions.Expression.unnest_operands": {"tf": 1}, "sqlglot.expressions.Expression.flatten": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.transform": {"tf": 2}, "sqlglot.expressions.Expression.replace": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.dump": {"tf": 1}, "sqlglot.expressions.Expression.load": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Column.output_name": {"tf": 1.7320508075688772}, "sqlglot.expressions.Identifier.output_name": {"tf": 1.7320508075688772}, "sqlglot.expressions.Literal.output_name": {"tf": 1.7320508075688772}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.Union.limit": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.from_": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.group_by": {"tf": 2}, "sqlglot.expressions.Select.order_by": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.sort_by": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.cluster_by": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.limit": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.offset": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.join": {"tf": 2}, "sqlglot.expressions.Select.where": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.Select.lock": {"tf": 2}, "sqlglot.expressions.Subquery.output_name": {"tf": 1.7320508075688772}, "sqlglot.expressions.Star.output_name": {"tf": 1.7320508075688772}, "sqlglot.expressions.Alias.output_name": {"tf": 1.7320508075688772}, "sqlglot.expressions.TimeUnit": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1.7320508075688772}, "sqlglot.expressions.Cast.output_name": {"tf": 1.7320508075688772}, "sqlglot.expressions.maybe_parse": {"tf": 2.23606797749979}, "sqlglot.expressions.union": {"tf": 1}, "sqlglot.expressions.intersect": {"tf": 1}, "sqlglot.expressions.except_": {"tf": 1}, "sqlglot.expressions.select": {"tf": 2}, "sqlglot.expressions.from_": {"tf": 2.23606797749979}, "sqlglot.expressions.update": {"tf": 1.4142135623730951}, "sqlglot.expressions.delete": {"tf": 1.4142135623730951}, "sqlglot.expressions.condition": {"tf": 1.7320508075688772}, "sqlglot.expressions.not_": {"tf": 1.4142135623730951}, "sqlglot.expressions.to_interval": {"tf": 1}, "sqlglot.expressions.to_table": {"tf": 2.23606797749979}, "sqlglot.expressions.to_column": {"tf": 2}, "sqlglot.expressions.alias_": {"tf": 2}, "sqlglot.expressions.subquery": {"tf": 1.4142135623730951}, "sqlglot.expressions.column": {"tf": 1}, "sqlglot.expressions.cast": {"tf": 1.4142135623730951}, "sqlglot.expressions.table_": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1}, "sqlglot.expressions.convert": {"tf": 1.7320508075688772}, "sqlglot.expressions.replace_children": {"tf": 1}, "sqlglot.expressions.column_table_names": {"tf": 1.7320508075688772}, "sqlglot.expressions.table_name": {"tf": 2}, "sqlglot.expressions.replace_tables": {"tf": 1.4142135623730951}, "sqlglot.expressions.replace_placeholders": {"tf": 1.4142135623730951}, "sqlglot.expressions.expand": {"tf": 1}, "sqlglot.expressions.func": {"tf": 1}, "sqlglot.expressions.true": {"tf": 1}, "sqlglot.expressions.false": {"tf": 1}, "sqlglot.expressions.null": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 3}, "sqlglot.generator.Generator.generate": {"tf": 1}, "sqlglot.helper.ensure_list": {"tf": 2.449489742783178}, "sqlglot.helper.ensure_collection": {"tf": 2.23606797749979}, "sqlglot.helper.csv": {"tf": 1}, "sqlglot.helper.subclasses": {"tf": 1}, "sqlglot.helper.apply_index_offset": {"tf": 1.7320508075688772}, "sqlglot.helper.while_changing": {"tf": 1.7320508075688772}, "sqlglot.helper.tsort": {"tf": 1.4142135623730951}, "sqlglot.helper.open_file": {"tf": 1}, "sqlglot.helper.csv_reader": {"tf": 1.7320508075688772}, "sqlglot.helper.find_new_name": {"tf": 1.4142135623730951}, "sqlglot.helper.object_to_dict": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 2}, "sqlglot.helper.is_iterable": {"tf": 1}, "sqlglot.helper.count_params": {"tf": 1}, "sqlglot.helper.dict_depth": {"tf": 2}, "sqlglot.lineage.lineage": {"tf": 2}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.optimizer.canonicalize.canonicalize": {"tf": 1.4142135623730951}, "sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"tf": 1.7320508075688772}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1.4142135623730951}, "sqlglot.optimizer.eliminate_joins.join_condition": {"tf": 1}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 2}, "sqlglot.optimizer.expand_laterals.expand_laterals": {"tf": 1.7320508075688772}, "sqlglot.optimizer.lower_identities.lower_identities": {"tf": 2}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 2.8284271247461903}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}, "sqlglot.optimizer.optimize_joins.optimize_joins": {"tf": 2.8284271247461903}, "sqlglot.optimizer.optimizer.optimize": {"tf": 2.6457513110645907}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 1.4142135623730951}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 2.8284271247461903}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 2.23606797749979}, "sqlglot.optimizer.scope.Scope.branch": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.optimizer.scope.Scope.derived_tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.subqueries": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 2}, "sqlglot.optimizer.scope.Scope.selects": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.source_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_subquery": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_derived_table": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_union": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_cte": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_udtf": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_correlated_subquery": {"tf": 1}, "sqlglot.optimizer.scope.Scope.rename_source": {"tf": 1}, "sqlglot.optimizer.scope.Scope.add_source": {"tf": 1}, "sqlglot.optimizer.scope.Scope.remove_source": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 3}, "sqlglot.optimizer.scope.build_scope": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}, "sqlglot.optimizer.simplify.flatten": {"tf": 2}, "sqlglot.optimizer.simplify.remove_compliments": {"tf": 2}, "sqlglot.optimizer.simplify.uniq_sort": {"tf": 1.7320508075688772}, "sqlglot.optimizer.simplify.absorb_and_eliminate": {"tf": 4.242640687119285}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 3.4641016151377544}, "sqlglot.parser.Parser": {"tf": 2.23606797749979}, "sqlglot.parser.Parser.parse": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.parse_into": {"tf": 1.7320508075688772}, "sqlglot.parser.Parser.expression": {"tf": 1}, "sqlglot.parser.Parser.validate_expression": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 3.4641016151377544}, "sqlglot.planner.Scan.from_expression": {"tf": 3.4641016151377544}, "sqlglot.planner.SetOperation.from_expression": {"tf": 3.4641016151377544}, "sqlglot.schema.Schema.add_table": {"tf": 1.4142135623730951}, "sqlglot.schema.Schema.column_names": {"tf": 1}, "sqlglot.schema.Schema.get_column_type": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1.4142135623730951}, "sqlglot.schema.MappingSchema": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1.7320508075688772}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1}, "sqlglot.serde.dump": {"tf": 1}, "sqlglot.serde.load": {"tf": 1}, "sqlglot.time.format_time": {"tf": 1.4142135623730951}, "sqlglot.tokens.Token.number": {"tf": 1}, "sqlglot.tokens.Token.string": {"tf": 1}, "sqlglot.tokens.Tokenizer.tokenize": {"tf": 1}, "sqlglot.transforms.unalias_group": {"tf": 1.4142135623730951}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1.4142135623730951}, "sqlglot.transforms.preprocess": {"tf": 2}, "sqlglot.transforms.delegate": {"tf": 1}, "sqlglot.trie.new_trie": {"tf": 2.23606797749979}, "sqlglot.trie.in_trie": {"tf": 2.449489742783178}}, "df": 220, "n": {"docs": {"sqlglot": {"tf": 2}, "sqlglot.dataframe": {"tf": 1.7320508075688772}, "sqlglot.dialects": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.Dialects": {"tf": 1}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator.select_sql": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 3}, "sqlglot.diff.Remove": {"tf": 1}, "sqlglot.diff.Move": {"tf": 1}, "sqlglot.diff.Update": {"tf": 1}, "sqlglot.diff.Keep": {"tf": 1}, "sqlglot.errors.ErrorLevel": {"tf": 1}, "sqlglot.errors.ErrorLevel.IMMEDIATE": {"tf": 1}, "sqlglot.executor": {"tf": 2.449489742783178}, "sqlglot.executor.execute": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.generate_tuple": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression": {"tf": 2.6457513110645907}, "sqlglot.expressions.Expression.is_int": {"tf": 1}, "sqlglot.expressions.Expression.alias": {"tf": 1}, "sqlglot.expressions.Expression.output_name": {"tf": 1}, "sqlglot.expressions.Expression.unalias": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.expressions.Expression.assert_is": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.load": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.except_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Column.output_name": {"tf": 1}, "sqlglot.expressions.Identifier.output_name": {"tf": 1}, "sqlglot.expressions.Literal.output_name": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1.4142135623730951}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Properties.Location": {"tf": 1}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1.4142135623730951}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.where": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.having": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.Subquery.output_name": {"tf": 1}, "sqlglot.expressions.Star.output_name": {"tf": 1}, "sqlglot.expressions.DataType.Type": {"tf": 1}, "sqlglot.expressions.Alias.output_name": {"tf": 1}, "sqlglot.expressions.Cast.output_name": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1.7320508075688772}, "sqlglot.expressions.union": {"tf": 1.4142135623730951}, "sqlglot.expressions.intersect": {"tf": 1.4142135623730951}, "sqlglot.expressions.except_": {"tf": 1.4142135623730951}, "sqlglot.expressions.select": {"tf": 1.7320508075688772}, "sqlglot.expressions.from_": {"tf": 1}, "sqlglot.expressions.update": {"tf": 1}, "sqlglot.expressions.condition": {"tf": 1}, "sqlglot.expressions.and_": {"tf": 1.4142135623730951}, "sqlglot.expressions.or_": {"tf": 1.4142135623730951}, "sqlglot.expressions.not_": {"tf": 1}, "sqlglot.expressions.to_identifier": {"tf": 1.4142135623730951}, "sqlglot.expressions.to_interval": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1.4142135623730951}, "sqlglot.expressions.subquery": {"tf": 1}, "sqlglot.expressions.cast": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1}, "sqlglot.expressions.convert": {"tf": 1.4142135623730951}, "sqlglot.expressions.replace_children": {"tf": 1}, "sqlglot.expressions.column_table_names": {"tf": 1}, "sqlglot.expressions.replace_placeholders": {"tf": 1}, "sqlglot.expressions.expand": {"tf": 1}, "sqlglot.expressions.func": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator": {"tf": 1.4142135623730951}, "sqlglot.helper.apply_index_offset": {"tf": 1}, "sqlglot.helper.object_to_dict": {"tf": 1}, "sqlglot.helper.is_iterable": {"tf": 1.7320508075688772}, "sqlglot.helper.flatten": {"tf": 1}, "sqlglot.helper.first": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify_columns.validate_qualify_columns": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}, "sqlglot.parser.Parser.raise_error": {"tf": 1}, "sqlglot.parser.Parser.expression": {"tf": 1}, "sqlglot.parser.Parser.validate_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}, "sqlglot.serde.dump": {"tf": 1}, "sqlglot.serde.load": {"tf": 1}, "sqlglot.tokens.TokenType": {"tf": 1}, "sqlglot.tokens.Token.identifier": {"tf": 1}, "sqlglot.tokens.Token.var": {"tf": 1}, "sqlglot.transforms.preprocess": {"tf": 1}}, "df": 120, "d": {"docs": {"sqlglot": {"tf": 5.196152422706632}, "sqlglot.parse_one": {"tf": 1}, "sqlglot.transpile": {"tf": 1.7320508075688772}, "sqlglot.dataframe": {"tf": 3.1622776601683795}, "sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1.4142135623730951}, "sqlglot.dialects": {"tf": 1.7320508075688772}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.snowflake.Snowflake.Generator.select_sql": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.7320508075688772}, "sqlglot.diff": {"tf": 7.0710678118654755}, "sqlglot.diff.diff": {"tf": 1.7320508075688772}, "sqlglot.diff.ChangeDistiller": {"tf": 1}, "sqlglot.errors.ErrorLevel.RAISE": {"tf": 1}, "sqlglot.executor": {"tf": 7.0710678118654755}, "sqlglot.executor.context.Context": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.generate": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.text": {"tf": 1}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.expressions.Expression.flatten": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.transform": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.and_": {"tf": 2}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.where": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.having": {"tf": 1.4142135623730951}, "sqlglot.expressions.Func": {"tf": 1.4142135623730951}, "sqlglot.expressions.union": {"tf": 1}, "sqlglot.expressions.intersect": {"tf": 1}, "sqlglot.expressions.except_": {"tf": 1}, "sqlglot.expressions.condition": {"tf": 1.4142135623730951}, "sqlglot.expressions.and_": {"tf": 2.449489742783178}, "sqlglot.expressions.to_table": {"tf": 1}, "sqlglot.expressions.column_table_names": {"tf": 1.4142135623730951}, "sqlglot.expressions.replace_tables": {"tf": 1}, "sqlglot.expressions.replace_placeholders": {"tf": 1}, "sqlglot.expressions.func": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1.7320508075688772}, "sqlglot.helper.ensure_collection": {"tf": 1}, "sqlglot.helper.camel_to_snake_case": {"tf": 1}, "sqlglot.helper.open_file": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1}, "sqlglot.helper.is_iterable": {"tf": 1}, "sqlglot.helper.flatten": {"tf": 1.4142135623730951}, "sqlglot.helper.count_params": {"tf": 1.4142135623730951}, "sqlglot.lineage.lineage": {"tf": 1}, "sqlglot.optimizer.normalize.normalize": {"tf": 1.4142135623730951}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1.7320508075688772}, "sqlglot.optimizer.normalize.distributive_law": {"tf": 2.6457513110645907}, "sqlglot.optimizer.optimize_joins.optimize_joins": {"tf": 1.7320508075688772}, "sqlglot.optimizer.optimize_joins.normalize": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.selects": {"tf": 1}, "sqlglot.optimizer.simplify.simplify": {"tf": 1}, "sqlglot.optimizer.simplify.rewrite_between": {"tf": 1.4142135623730951}, "sqlglot.optimizer.simplify.simplify_not": {"tf": 1.4142135623730951}, "sqlglot.optimizer.simplify.flatten": {"tf": 2}, "sqlglot.optimizer.simplify.remove_compliments": {"tf": 1}, "sqlglot.optimizer.simplify.uniq_sort": {"tf": 2.449489742783178}, "sqlglot.optimizer.simplify.absorb_and_eliminate": {"tf": 2.8284271247461903}, "sqlglot.parser.Parser": {"tf": 1}, "sqlglot.parser.Parser.parse": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}, "sqlglot.transforms.remove_precision_parameterized_types": {"tf": 1}, "sqlglot.transforms.preprocess": {"tf": 1}, "sqlglot.trie.in_trie": {"tf": 1}}, "df": 115}, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 1}}, "df": 2}}}}}, "o": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator.select_sql": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1}}, "df": 14}}}}, "n": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.func": {"tf": 1}}, "df": 1}}}}}}}, "y": {"docs": {"sqlglot.dataframe": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.select_sql": {"tf": 1}, "sqlglot.diff": {"tf": 2.23606797749979}, "sqlglot.executor": {"tf": 1}, "sqlglot.executor.env.null_if_any": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.expressions.Expression.append": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.update": {"tf": 1}, "sqlglot.expressions.convert": {"tf": 1}, "sqlglot.helper.csv": {"tf": 1}, "sqlglot.optimizer.qualify_columns.validate_qualify_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1}, "sqlglot.parser.Parser.check_errors": {"tf": 1}}, "df": 28, "w": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}}, "df": 2}}}}, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "n": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1.7320508075688772}, "sqlglot.optimizer.canonicalize.canonicalize": {"tf": 1}}, "df": 3, "d": {"docs": {"sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1.7320508075688772}}, "df": 1}}, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}}, "df": 1}}}, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}}, "df": 1}}}}}}}}}, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dialects": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 3}}}, "l": {"docs": {"sqlglot.diff": {"tf": 2.23606797749979}, "sqlglot.diff.ChangeDistiller": {"tf": 1}}, "df": 2, "s": {"docs": {}, "df": 0, "o": {"docs": {"sqlglot": {"tf": 2.449489742783178}, "sqlglot.dialects": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.select_sql": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 2.6457513110645907}, "sqlglot.executor": {"tf": 1}, "sqlglot.executor.env.null_if_any": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1}}, "df": 17}}, "l": {"docs": {"sqlglot": {"tf": 2.449489742783178}, "sqlglot.dataframe": {"tf": 1.4142135623730951}, "sqlglot.dialects": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.select_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 2.6457513110645907}, "sqlglot.errors.ErrorLevel.IGNORE": {"tf": 1}, "sqlglot.errors.ErrorLevel.WARN": {"tf": 1}, "sqlglot.errors.ErrorLevel.RAISE": {"tf": 1}, "sqlglot.errors.SqlglotError": {"tf": 1}, "sqlglot.errors.UnsupportedError": {"tf": 1}, "sqlglot.errors.ParseError": {"tf": 1}, "sqlglot.errors.TokenError": {"tf": 1}, "sqlglot.errors.OptimizeError": {"tf": 1}, "sqlglot.errors.SchemaError": {"tf": 1}, "sqlglot.errors.ExecuteError": {"tf": 1}, "sqlglot.executor": {"tf": 2.8284271247461903}, "sqlglot.executor.env.null_if_any": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1.4142135623730951}, "sqlglot.expressions": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.expressions.Expression.walk": {"tf": 1}, "sqlglot.expressions.Expression.dfs": {"tf": 1}, "sqlglot.expressions.Expression.bfs": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1}, "sqlglot.expressions.column_table_names": {"tf": 1}, "sqlglot.expressions.replace_tables": {"tf": 1}, "sqlglot.expressions.expand": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1.4142135623730951}, "sqlglot.helper.subclasses": {"tf": 1}, "sqlglot.helper.tsort": {"tf": 1}, "sqlglot.optimizer.lower_identities.lower_identities": {"tf": 1.4142135623730951}, "sqlglot.optimizer.pushdown_predicates.pushdown_dnf": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1}, "sqlglot.optimizer.scope.Scope.source_columns": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}, "sqlglot.parser.Parser.validate_expression": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1}}, "df": 67, "o": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot.transforms.remove_precision_parameterized_types": {"tf": 1}}, "df": 1, "s": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.executor.context.Context": {"tf": 1}}, "df": 2}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}, "/": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "/": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot": {"tf": 2}, "sqlglot.dataframe": {"tf": 2.23606797749979}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.select_sql": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.alias": {"tf": 1}, "sqlglot.expressions.Expression.unalias": {"tf": 1}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1.7320508075688772}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 2.8284271247461903}, "sqlglot.expressions.subquery": {"tf": 1.4142135623730951}, "sqlglot.expressions.values": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator": {"tf": 1.4142135623730951}, "sqlglot.optimizer.expand_laterals.expand_laterals": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1}, "sqlglot.optimizer.scope.Scope.cte_sources": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1.4142135623730951}}, "df": 54, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.executor.python.Python.Generator": {"tf": 1.4142135623730951}, "sqlglot.expressions.Func": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1.4142135623730951}, "sqlglot.transforms.unalias_group": {"tf": 1}}, "df": 23}, "d": {"docs": {"sqlglot.expressions.Expression.alias": {"tf": 1}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}}, "df": 6}}}}}, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.expressions.rename_table": {"tf": 1.4142135623730951}, "sqlglot.helper.find_new_name": {"tf": 1}}, "df": 3, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}}, "w": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}}}}, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1, "g": {"docs": {"sqlglot.parser.Parser.expression": {"tf": 1}}, "df": 1, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}, "g": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot.diff": {"tf": 5}, "sqlglot.diff.ChangeDistiller": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1}}, "df": 3, "s": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}}, "df": 1}, "\u2019": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}}}}, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.optimizer.expand_laterals.expand_laterals": {"tf": 1}, "sqlglot.parser.Parser.validate_expression": {"tf": 1}}, "df": 5}}}}}}, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 2.23606797749979}, "sqlglot.dataframe": {"tf": 1.7320508075688772}, "sqlglot.dialects": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 2.23606797749979}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 2.23606797749979}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 2.23606797749979}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 2.23606797749979}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 2.23606797749979}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 2.23606797749979}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 2.23606797749979}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 2.23606797749979}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 2.23606797749979}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 2.23606797749979}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 2.23606797749979}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 2.23606797749979}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.snowflake.Snowflake.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.select_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 2.23606797749979}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 2.23606797749979}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 2.23606797749979}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 2.23606797749979}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 2.23606797749979}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 2.449489742783178}, "sqlglot.diff": {"tf": 4.47213595499958}, "sqlglot.executor": {"tf": 3.605551275463989}, "sqlglot.executor.env.null_if_any": {"tf": 1.4142135623730951}, "sqlglot.executor.python.Python.Generator": {"tf": 2.449489742783178}, "sqlglot.expressions": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.text": {"tf": 1}, "sqlglot.expressions.Expression.flatten": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.Tag": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1}, "sqlglot.expressions.update": {"tf": 1}, "sqlglot.expressions.to_table": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1}, "sqlglot.expressions.func": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 2.449489742783178}, "sqlglot.helper.split_num_words": {"tf": 1}, "sqlglot.helper.flatten": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}, "sqlglot.optimizer.optimize_joins.normalize": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_cnf": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_dnf": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.cte_sources": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selects": {"tf": 1}, "sqlglot.optimizer.scope.Scope.join_hints": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 2.23606797749979}, "sqlglot.parser.Parser.validate_expression": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1.4142135623730951}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}}, "df": 75, "n": {"docs": {"sqlglot.optimizer.qualify_columns.validate_qualify_columns": {"tf": 1}}, "df": 1}}, "g": {"docs": {"sqlglot.expressions.Expression": {"tf": 2.6457513110645907}, "sqlglot.expressions.Expression.append": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.set": {"tf": 2}, "sqlglot.expressions.Expression.walk": {"tf": 1}, "sqlglot.expressions.TimeUnit": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}}, "df": 7, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.select_sql": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.this": {"tf": 1}, "sqlglot.expressions.Expression.expression": {"tf": 1}, "sqlglot.expressions.Expression.expressions": {"tf": 1}, "sqlglot.expressions.Expression.text": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1.7320508075688772}, "sqlglot.helper.csv": {"tf": 1}, "sqlglot.helper.apply_index_offset": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}}, "df": 14, "s": {"docs": {"sqlglot.parse": {"tf": 1}, "sqlglot.parse_one": {"tf": 1}, "sqlglot.transpile": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}, "sqlglot.dialects.drill.if_sql": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.diff.diff": {"tf": 1}, "sqlglot.executor.execute": {"tf": 1}, "sqlglot.executor.env.null_if_any": {"tf": 1.7320508075688772}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.append": {"tf": 1}, "sqlglot.expressions.Expression.set": {"tf": 1}, "sqlglot.expressions.Expression.find": {"tf": 1}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.expressions.Expression.find_ancestor": {"tf": 1}, "sqlglot.expressions.Expression.walk": {"tf": 1}, "sqlglot.expressions.Expression.sql": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.expressions.Expression.replace": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.Select.distinct": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.Select.lock": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1}, "sqlglot.expressions.union": {"tf": 1}, "sqlglot.expressions.intersect": {"tf": 1}, "sqlglot.expressions.except_": {"tf": 1}, "sqlglot.expressions.select": {"tf": 1}, "sqlglot.expressions.from_": {"tf": 1}, "sqlglot.expressions.update": {"tf": 1}, "sqlglot.expressions.delete": {"tf": 1}, "sqlglot.expressions.condition": {"tf": 1}, "sqlglot.expressions.and_": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 1}, "sqlglot.expressions.not_": {"tf": 1}, "sqlglot.expressions.to_identifier": {"tf": 1}, "sqlglot.expressions.to_table": {"tf": 1}, "sqlglot.expressions.to_column": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.expressions.subquery": {"tf": 1}, "sqlglot.expressions.column": {"tf": 1}, "sqlglot.expressions.cast": {"tf": 1}, "sqlglot.expressions.table_": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1}, "sqlglot.expressions.rename_table": {"tf": 1}, "sqlglot.expressions.convert": {"tf": 1}, "sqlglot.expressions.column_table_names": {"tf": 1}, "sqlglot.expressions.table_name": {"tf": 1}, "sqlglot.expressions.replace_tables": {"tf": 1}, "sqlglot.expressions.replace_placeholders": {"tf": 1.4142135623730951}, "sqlglot.expressions.expand": {"tf": 1}, "sqlglot.expressions.func": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.helper.ensure_list": {"tf": 1}, "sqlglot.helper.ensure_collection": {"tf": 1}, "sqlglot.helper.csv": {"tf": 2}, "sqlglot.helper.subclasses": {"tf": 1}, "sqlglot.helper.apply_index_offset": {"tf": 1}, "sqlglot.helper.while_changing": {"tf": 1}, "sqlglot.helper.tsort": {"tf": 1}, "sqlglot.helper.csv_reader": {"tf": 1}, "sqlglot.helper.find_new_name": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1}, "sqlglot.helper.is_iterable": {"tf": 1}, "sqlglot.helper.flatten": {"tf": 1}, "sqlglot.helper.dict_depth": {"tf": 1}, "sqlglot.lineage.lineage": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.optimizer.canonicalize.canonicalize": {"tf": 1}, "sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.join_condition": {"tf": 1}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 1}, "sqlglot.optimizer.expand_laterals.expand_laterals": {"tf": 1}, "sqlglot.optimizer.lower_identities.lower_identities": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}, "sqlglot.optimizer.normalize.normalize": {"tf": 1}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 1}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 1}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.optimizer.scope.Scope.replace": {"tf": 1}, "sqlglot.optimizer.scope.Scope.source_columns": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}, "sqlglot.optimizer.scope.build_scope": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}, "sqlglot.optimizer.simplify.simplify": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}, "sqlglot.parser.Parser.parse": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}, "sqlglot.parser.Parser.expression": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.validate_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1}, "sqlglot.schema.Schema.column_names": {"tf": 1}, "sqlglot.schema.Schema.get_column_type": {"tf": 1}, "sqlglot.schema.Schema.supported_table_args": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1}, "sqlglot.transforms.unalias_group": {"tf": 1}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1}, "sqlglot.transforms.preprocess": {"tf": 1}, "sqlglot.trie.new_trie": {"tf": 1}, "sqlglot.trie.in_trie": {"tf": 1}}, "df": 172}}}}}}, "s": {"docs": {"sqlglot.executor.context.Context.__init__": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.text": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1.4142135623730951}, "sqlglot.expressions.Func": {"tf": 1}, "sqlglot.expressions.replace_placeholders": {"tf": 1}, "sqlglot.expressions.func": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator.generate": {"tf": 1}, "sqlglot.helper.csv": {"tf": 1}, "sqlglot.parser.Parser.validate_expression": {"tf": 1}, "sqlglot.time.format_time": {"tf": 1}}, "df": 11}}, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dialects": {"tf": 1}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.generate_tuple": {"tf": 1}}, "df": 4, "s": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}}, "df": 18}, "[": {"0": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}}, "df": 18}, "1": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}}, "df": 18}, "docs": {}, "df": 0}}}, "o": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.dialects.drill.if_sql": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1.7320508075688772}}, "df": 4}}}}, "b": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.expressions.Tag": {"tf": 1}}, "df": 2}}}}}}}}, "s": {"docs": {"sqlglot": {"tf": 4.47213595499958}, "sqlglot.transpile": {"tf": 1}, "sqlglot.dataframe": {"tf": 3}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dialects": {"tf": 2.23606797749979}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator.with_properties": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 4}, "sqlglot.executor": {"tf": 4.242640687119285}, "sqlglot.executor.python.Python.Generator": {"tf": 1.4142135623730951}, "sqlglot.executor.table.Tables": {"tf": 1.4142135623730951}, "sqlglot.expressions": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.text": {"tf": 1}, "sqlglot.expressions.Expression.output_name": {"tf": 1}, "sqlglot.expressions.Expression.append": {"tf": 1}, "sqlglot.expressions.Expression.unnest_operands": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.expressions.Expression.load": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.Column.output_name": {"tf": 1}, "sqlglot.expressions.Identifier.output_name": {"tf": 1}, "sqlglot.expressions.Literal.output_name": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 2.6457513110645907}, "sqlglot.expressions.Union.limit": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.join": {"tf": 2.23606797749979}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 2.23606797749979}, "sqlglot.expressions.Subquery.output_name": {"tf": 1}, "sqlglot.expressions.Star.output_name": {"tf": 1}, "sqlglot.expressions.Alias.output_name": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 2}, "sqlglot.expressions.Cast.output_name": {"tf": 1}, "sqlglot.expressions.union": {"tf": 1.4142135623730951}, "sqlglot.expressions.intersect": {"tf": 1.4142135623730951}, "sqlglot.expressions.except_": {"tf": 1.4142135623730951}, "sqlglot.expressions.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.from_": {"tf": 1.4142135623730951}, "sqlglot.expressions.condition": {"tf": 1}, "sqlglot.expressions.and_": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 1}, "sqlglot.expressions.not_": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1.7320508075688772}, "sqlglot.expressions.subquery": {"tf": 1.4142135623730951}, "sqlglot.expressions.cast": {"tf": 1}, "sqlglot.expressions.table_name": {"tf": 1}, "sqlglot.expressions.expand": {"tf": 1.4142135623730951}, "sqlglot.expressions.func": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1.4142135623730951}, "sqlglot.helper.ensure_list": {"tf": 1}, "sqlglot.helper.csv": {"tf": 1.4142135623730951}, "sqlglot.helper.open_file": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1}, "sqlglot.helper.flatten": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 2.23606797749979}, "sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 3}, "sqlglot.optimizer.expand_laterals.expand_laterals": {"tf": 2.23606797749979}, "sqlglot.optimizer.lower_identities.lower_identities": {"tf": 1.4142135623730951}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}, "sqlglot.optimizer.optimize_joins.normalize": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1.4142135623730951}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 2}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 2.6457513110645907}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.Scope.selects": {"tf": 2}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1.4142135623730951}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 2.6457513110645907}, "sqlglot.parser.Parser": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 2}, "sqlglot.planner.Scan.from_expression": {"tf": 2}, "sqlglot.planner.SetOperation.from_expression": {"tf": 2}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1.4142135623730951}, "sqlglot.serde.load": {"tf": 1}, "sqlglot.tokens.Token.number": {"tf": 1}, "sqlglot.tokens.Token.string": {"tf": 1}, "sqlglot.tokens.Token.identifier": {"tf": 1}, "sqlglot.tokens.Token.var": {"tf": 1}, "sqlglot.transforms.unalias_group": {"tf": 1.4142135623730951}, "sqlglot.transforms.preprocess": {"tf": 1}, "sqlglot.trie.new_trie": {"tf": 1}}, "df": 137, "t": {"docs": {"sqlglot": {"tf": 2.6457513110645907}, "sqlglot.diff": {"tf": 3.1622776601683795}, "sqlglot.executor": {"tf": 3.4641016151377544}, "sqlglot.expressions": {"tf": 1}, "sqlglot.expressions.Expression.pop": {"tf": 1}, "sqlglot.expressions.to_identifier": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}, "sqlglot.optimizer.normalize.normalize": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 1}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 1}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1}, "sqlglot.optimizer.simplify.simplify": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1}, "sqlglot.serde.dump": {"tf": 1}, "sqlglot.serde.load": {"tf": 1}}, "df": 17, "s": {"docs": {"sqlglot.diff": {"tf": 2}}, "df": 1}}, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.diff": {"tf": 1}}, "df": 2}}}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.assert_is": {"tf": 1.4142135623730951}}, "df": 2, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.Expression.assert_is": {"tf": 1}}, "df": 1}}}}}, "s": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}, "o": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.dialects": {"tf": 1}, "sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1.4142135623730951}}, "df": 4}}}}}}}, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dialects.snowflake.Snowflake.Generator.select_sql": {"tf": 1}}, "df": 1}}}}}, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.optimizer.expand_laterals.expand_laterals": {"tf": 1}}, "df": 2}, "d": {"docs": {"sqlglot.schema.MappingSchema": {"tf": 1}}, "df": 1}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.optimizer.lower_identities.lower_identities": {"tf": 1}}, "df": 1}}}}}}, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}}, "df": 2}}}}}}}, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "c": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}}}}, "p": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dataframe.sql.Column.cast": {"tf": 1.4142135623730951}}, "df": 1}}}}, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 2}}, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}}}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.transpile": {"tf": 1}}, "df": 1}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.expressions.replace_tables": {"tf": 1}}, "df": 1, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 2}}}}}, "t": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 2, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.executor": {"tf": 2.449489742783178}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1}}, "df": 3}}}}}, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}}, "s": {"docs": {"sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}}, "df": 1}}, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.executor": {"tf": 1.4142135623730951}}, "df": 2}}}}, "m": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}, "y": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.helper.tsort": {"tf": 1}}, "df": 1}}}}}}, "p": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "x": {"docs": {"sqlglot": {"tf": 2}}, "df": 1, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}}, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.transforms.preprocess": {"tf": 1}}, "df": 2, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}}, "df": 1}}}}}}}}, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.diff": {"tf": 2}, "sqlglot.executor": {"tf": 1}}, "df": 2, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.parse": {"tf": 1}, "sqlglot.parse_one": {"tf": 1}, "sqlglot.executor.execute": {"tf": 1}, "sqlglot.lineage.lineage": {"tf": 1}}, "df": 4, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 1.7320508075688772}}, "df": 2}}}}, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.walk": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.helper.apply_index_offset": {"tf": 1.7320508075688772}, "sqlglot.helper.while_changing": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}}, "df": 7}, "s": {"docs": {"sqlglot.executor": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.helper.apply_index_offset": {"tf": 1}, "sqlglot.helper.while_changing": {"tf": 1}}, "df": 4}}}}, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 2.23606797749979}, "sqlglot.expressions.Expression.append": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1.4142135623730951}, "sqlglot.expressions.Join.using": {"tf": 1.4142135623730951}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.lateral": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.join": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.where": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.having": {"tf": 1.4142135623730951}}, "df": 15, "s": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.expressions.Expression.append": {"tf": 1}, "sqlglot.parser.Parser.raise_error": {"tf": 1}}, "df": 3}}}, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.optimizer.scope.Scope.external_columns": {"tf": 1}}, "df": 1, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}, "i": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dataframe": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 1}}, "df": 3}, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.DataFrame.persist": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}}}}}, "d": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.dataframe": {"tf": 2}, "sqlglot.diff": {"tf": 2.449489742783178}, "sqlglot.diff.diff": {"tf": 1.4142135623730951}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.optimizer.scope.Scope.add_source": {"tf": 1}}, "df": 14, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dataframe": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1}}, "df": 3}}, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1}, "sqlglot.executor.execute": {"tf": 1}}, "df": 3, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.expressions": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_dnf": {"tf": 1}}, "df": 4}}}}}}}}, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.dialects": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.select_sql": {"tf": 1}}, "df": 4}}}, "s": {"docs": {"sqlglot.dialects.drill.if_sql": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}, "j": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "u": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}, "g": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.dataframe": {"tf": 2.23606797749979}}, "df": 1, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.executor.context.Context": {"tf": 1}}, "df": 1, "s": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}}, "df": 4}, "/": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}}}, "e": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}}, "df": 4}}}}}}}, "e": {"docs": {"sqlglot.dataframe": {"tf": 3.872983346207417}}, "df": 1}, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.maybe_parse": {"tf": 1}, "sqlglot.expressions.select": {"tf": 1}, "sqlglot.expressions.from_": {"tf": 1}, "sqlglot.expressions.condition": {"tf": 1}}, "df": 4, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff.diff": {"tf": 1}, "sqlglot.executor.execute": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}}, "df": 3}}}}}}, "b": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.func": {"tf": 1.4142135623730951}}, "df": 1, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.schema.Schema": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1}}, "df": 5}}}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.optimizer.simplify.absorb_and_eliminate": {"tf": 1}}, "df": 1}}}}}}}}, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.executor": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}}, "df": 3}}, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 2}, "sqlglot.expressions.Expression": {"tf": 1}}, "df": 2}}}, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "b": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}}}, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}, "u": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe": {"tf": 1.7320508075688772}}, "df": 1}}}}, "x": {"docs": {"sqlglot.dataframe.sql.DataFrame.persist": {"tf": 1}}, "df": 1}, "t": {"docs": {}, "df": 0, "o": {"docs": {"sqlglot.expressions.update": {"tf": 1}, "sqlglot.helper.AutoName": {"tf": 1}}, "df": 2, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.expressions.TimeUnit": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1}}, "df": 3}}}}}}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}, "h": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}}, "df": 1}}}}}, "g": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.helper.split_num_words": {"tf": 1}}, "df": 1}}}}}}}}, "m": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}}, "df": 18, "s": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}, "p": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}}, "df": 2}}, "f": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.select_sql": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.diff": {"tf": 2.23606797749979}, "sqlglot.executor": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}}, "df": 43}}}, "f": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}, "t": {"docs": {"sqlglot.diff": {"tf": 3.4641016151377544}, "sqlglot.executor": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.find": {"tf": 1}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.helper.seq_get": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}, "sqlglot.trie.in_trie": {"tf": 1}}, "df": 13, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.optimizer.normalize.normalize": {"tf": 1}}, "df": 1, "s": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}, "r": {"docs": {"sqlglot.transforms.delegate": {"tf": 1}}, "df": 1, "i": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1}, "sqlglot.helper.object_to_dict": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1}}, "df": 4}}}}}}}, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.parser.Parser.expression": {"tf": 1}}, "df": 1}}}}}, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.helper.find_new_name": {"tf": 1}}, "df": 2}}}}}}}, "o": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "n": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1}}, "df": 2, "o": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 2}, "sqlglot.executor.env.null_if_any": {"tf": 1}, "sqlglot.expressions.Expression.output_name": {"tf": 1}, "sqlglot.expressions.Expression.find": {"tf": 1}, "sqlglot.expressions.Column.output_name": {"tf": 1}, "sqlglot.expressions.Identifier.output_name": {"tf": 1}, "sqlglot.expressions.Literal.output_name": {"tf": 1}, "sqlglot.expressions.Subquery.output_name": {"tf": 1}, "sqlglot.expressions.Star.output_name": {"tf": 1}, "sqlglot.expressions.Alias.output_name": {"tf": 1}, "sqlglot.expressions.Cast.output_name": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}}, "df": 14, "t": {"docs": {"sqlglot": {"tf": 2.449489742783178}, "sqlglot.transpile": {"tf": 1}, "sqlglot.dataframe": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.redshift.Redshift.Generator.renametable_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.snowflake.Snowflake.Generator.select_sql": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.7320508075688772}, "sqlglot.diff": {"tf": 3.605551275463989}, "sqlglot.executor": {"tf": 3}, "sqlglot.executor.python.Python.Generator": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.expressions.Expression.alias": {"tf": 1}, "sqlglot.expressions.Expression.assert_is": {"tf": 1}, "sqlglot.expressions.Condition.not_": {"tf": 2}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.not_": {"tf": 2}, "sqlglot.expressions.to_identifier": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1.4142135623730951}, "sqlglot.expressions.convert": {"tf": 1}, "sqlglot.expressions.expand": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1.7320508075688772}, "sqlglot.helper.flatten": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.optimizer.simplify.simplify_not": {"tf": 2.449489742783178}, "sqlglot.optimizer.simplify.remove_compliments": {"tf": 1.4142135623730951}, "sqlglot.optimizer.simplify.absorb_and_eliminate": {"tf": 2}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1}, "sqlglot.transforms.remove_precision_parameterized_types": {"tf": 1}}, "df": 68, "e": {"docs": {"sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.select_sql": {"tf": 1}, "sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.expressions.func": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}}, "df": 7, "d": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "h": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.expressions.Select.group_by": {"tf": 1}}, "df": 1}}}}}, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 2.23606797749979}, "sqlglot.diff": {"tf": 7.745966692414834}, "sqlglot.diff.Insert": {"tf": 1}, "sqlglot.diff.Remove": {"tf": 1}, "sqlglot.diff.Move": {"tf": 1}, "sqlglot.diff.Update": {"tf": 1}, "sqlglot.diff.Keep": {"tf": 1}, "sqlglot.diff.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1}, "sqlglot.expressions": {"tf": 1}, "sqlglot.expressions.Expression.find": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.find_ancestor": {"tf": 1}, "sqlglot.expressions.Expression.walk": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 2.23606797749979}, "sqlglot.expressions.Expression.replace": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1}, "sqlglot.expressions.to_identifier": {"tf": 1}, "sqlglot.expressions.cast": {"tf": 1}, "sqlglot.expressions.table_name": {"tf": 1}, "sqlglot.expressions.replace_tables": {"tf": 1}, "sqlglot.expressions.replace_placeholders": {"tf": 1}, "sqlglot.lineage.lineage": {"tf": 1}, "sqlglot.lineage.LineageHTML": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.Scope.replace": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.traverse": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}}, "df": 27, "s": {"docs": {"sqlglot.diff": {"tf": 8.12403840463596}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.expressions.Expression.walk": {"tf": 1}, "sqlglot.expressions.Expression.dfs": {"tf": 1}, "sqlglot.expressions.Expression.bfs": {"tf": 1}, "sqlglot.expressions.Expression.flatten": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.helper.tsort": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1.4142135623730951}}, "df": 12}, "\u2019": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "n": {"docs": {"sqlglot.errors.SqlglotError": {"tf": 1}, "sqlglot.errors.UnsupportedError": {"tf": 1}, "sqlglot.errors.ParseError": {"tf": 1}, "sqlglot.errors.TokenError": {"tf": 1}, "sqlglot.errors.OptimizeError": {"tf": 1}, "sqlglot.errors.SchemaError": {"tf": 1}, "sqlglot.errors.ExecuteError": {"tf": 1}, "sqlglot.expressions.Expression.unnest": {"tf": 1}, "sqlglot.expressions.Subquery.unnest": {"tf": 1}, "sqlglot.helper.flatten": {"tf": 1.4142135623730951}}, "df": 10, "e": {"docs": {"sqlglot.dataframe": {"tf": 2}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor.env.null_if_any": {"tf": 1.4142135623730951}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.expressions.Expression.find": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.expressions.update": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.helper.seq_get": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 2.23606797749979}, "sqlglot.helper.dict_depth": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1}}, "df": 32}}, "r": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.optimizer.normalize.normalize": {"tf": 1.4142135623730951}}, "df": 2, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.7320508075688772}, "sqlglot.executor": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator": {"tf": 1.7320508075688772}, "sqlglot.optimizer.normalize.normalize": {"tf": 1.4142135623730951}}, "df": 23, "d": {"docs": {"sqlglot.optimizer.normalize.normalize": {"tf": 1}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}}, "df": 2}}, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}}, "df": 1}}}}}}}}}}}, "w": {"docs": {"sqlglot.diff": {"tf": 2.6457513110645907}, "sqlglot.executor": {"tf": 1.4142135623730951}}, "df": 2}}, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 2}, "sqlglot.dataframe": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator.renametable_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.select_sql": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.expressions.Expression.output_name": {"tf": 2.23606797749979}, "sqlglot.expressions.Expression.append": {"tf": 1}, "sqlglot.expressions.Expression.set": {"tf": 1}, "sqlglot.expressions.Column.output_name": {"tf": 2.23606797749979}, "sqlglot.expressions.Identifier.output_name": {"tf": 2.23606797749979}, "sqlglot.expressions.Literal.output_name": {"tf": 2.23606797749979}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.Subquery.output_name": {"tf": 2.23606797749979}, "sqlglot.expressions.Star.output_name": {"tf": 2.23606797749979}, "sqlglot.expressions.Alias.output_name": {"tf": 2.23606797749979}, "sqlglot.expressions.Func": {"tf": 2.23606797749979}, "sqlglot.expressions.Cast.output_name": {"tf": 2.23606797749979}, "sqlglot.expressions.to_identifier": {"tf": 1.4142135623730951}, "sqlglot.expressions.alias_": {"tf": 1.4142135623730951}, "sqlglot.expressions.subquery": {"tf": 1}, "sqlglot.expressions.column": {"tf": 1.4142135623730951}, "sqlglot.expressions.table_": {"tf": 1.7320508075688772}, "sqlglot.expressions.rename_table": {"tf": 2}, "sqlglot.expressions.table_name": {"tf": 1.7320508075688772}, "sqlglot.expressions.expand": {"tf": 1}, "sqlglot.expressions.func": {"tf": 1.7320508075688772}, "sqlglot.helper.AutoName": {"tf": 1}, "sqlglot.helper.subclasses": {"tf": 1.4142135623730951}, "sqlglot.helper.camel_to_snake_case": {"tf": 1}, "sqlglot.helper.csv_reader": {"tf": 1}, "sqlglot.helper.find_new_name": {"tf": 1.7320508075688772}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope": {"tf": 1}, "sqlglot.optimizer.scope.Scope.source_columns": {"tf": 1.7320508075688772}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1}}, "df": 42, "s": {"docs": {"sqlglot.dataframe": {"tf": 1.7320508075688772}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1.4142135623730951}, "sqlglot.expressions.column_table_names": {"tf": 2}, "sqlglot.expressions.replace_tables": {"tf": 1}, "sqlglot.expressions.replace_placeholders": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.helper.find_new_name": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}, "sqlglot.schema.Schema.column_names": {"tf": 1.4142135623730951}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1.4142135623730951}}, "df": 33}, "d": {"docs": {"sqlglot.expressions.replace_placeholders": {"tf": 1}}, "df": 1}}}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}}}}, "\u00ef": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}}, "df": 1}}}}, "e": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dialects": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.diff.Insert": {"tf": 1}, "sqlglot.executor": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.expressions.Expression.append": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.replace": {"tf": 1.7320508075688772}, "sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Condition.not_": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.and_": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 1}, "sqlglot.expressions.not_": {"tf": 1}, "sqlglot.expressions.subquery": {"tf": 1}, "sqlglot.expressions.rename_table": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.helper.find_new_name": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.branch": {"tf": 1}, "sqlglot.optimizer.scope.Scope.replace": {"tf": 1.7320508075688772}, "sqlglot.parser.Parser.expression": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}, "sqlglot.transforms.preprocess": {"tf": 1}, "sqlglot.transforms.delegate": {"tf": 1}, "sqlglot.trie.new_trie": {"tf": 1.4142135623730951}, "sqlglot.trie.in_trie": {"tf": 1.7320508075688772}}, "df": 50, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.helper.open_file": {"tf": 1}}, "df": 1}}}}}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.dialects": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.select_sql": {"tf": 1}, "sqlglot.diff": {"tf": 2.449489742783178}, "sqlglot.executor": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}}, "df": 7, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dialects": {"tf": 1}, "sqlglot.diff.diff": {"tf": 1}}, "df": 3}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}, "x": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1.4142135623730951}}, "df": 3}}, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1}, "sqlglot.trie.new_trie": {"tf": 1}}, "df": 3}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.helper.dict_depth": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1}}, "df": 2}}}}}, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "g": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}}}}, "t": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "x": {"docs": {"sqlglot.executor": {"tf": 1.4142135623730951}}, "df": 1}}}}, "w": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "/": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "/": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.lineage.LineageHTML": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}}}, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.Expression": {"tf": 1}}, "df": 1}}}}}}}, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.Expression.find_ancestor": {"tf": 1}}, "df": 1}}}}}}, "u": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot.dataframe": {"tf": 3}, "sqlglot.diff": {"tf": 2.23606797749979}, "sqlglot.helper.split_num_words": {"tf": 2}}, "df": 3, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dialects": {"tf": 1}}, "df": 2}}}}, "b": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 2.449489742783178}, "sqlglot.executor.python.Python.Generator": {"tf": 1.4142135623730951}, "sqlglot.expressions": {"tf": 1}, "sqlglot.expressions.Expression.is_number": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1.4142135623730951}, "sqlglot.helper.csv": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1.4142135623730951}, "sqlglot.helper.count_params": {"tf": 1}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}, "sqlglot.optimizer.scope.Scope.ref_count": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1.4142135623730951}, "sqlglot.tokens.Token.number": {"tf": 1.4142135623730951}}, "df": 49}}}, "p": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "/": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "/": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1}, "sqlglot.executor.env.null_if_any": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1.4142135623730951}, "sqlglot.expressions.update": {"tf": 1.4142135623730951}, "sqlglot.expressions.null": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser": {"tf": 1.4142135623730951}}, "df": 44, "s": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 2}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 2}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 2}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 2}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 2}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 2}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 2}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 2}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 2}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 2}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 2}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 2}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 2}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 2}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 2}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 2}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 2}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 2}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 2}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 2}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 2}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 2}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 2}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 2}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 2}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 2}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 2}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 2}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 2}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 2}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 2}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 2}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 2}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 2}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 2}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 2}, "sqlglot.executor.python.Python.Generator": {"tf": 2}, "sqlglot.generator.Generator": {"tf": 2}, "sqlglot.parser.Parser": {"tf": 2}}, "df": 39}}}}, "^": {"2": {"docs": {"sqlglot.diff": {"tf": 2.23606797749979}}, "df": 1}, "docs": {}, "df": 0}, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}, "d": {"docs": {"sqlglot": {"tf": 3.1622776601683795}, "sqlglot.diff": {"tf": 2.8284271247461903}, "sqlglot.executor": {"tf": 2}, "sqlglot.expressions.column_table_names": {"tf": 1}, "sqlglot.helper.dict_depth": {"tf": 1}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}}, "df": 6, "e": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 1}, "sqlglot.optimizer.optimize_joins.optimize_joins": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.Scan.from_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1.4142135623730951}}, "df": 6}}}}, "t": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.parser.Parser.check_errors": {"tf": 1}, "sqlglot.parser.Parser.raise_error": {"tf": 1}}, "df": 3}}}}}}, "t": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.expressions.Expression.depth": {"tf": 1}, "sqlglot.expressions.Expression.dfs": {"tf": 1}, "sqlglot.helper.dict_depth": {"tf": 2.6457513110645907}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.optimizer.scope.Scope.traverse": {"tf": 1}}, "df": 6}}}, "v": {"docs": {"sqlglot": {"tf": 1}}, "df": 1, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1.4142135623730951}}, "df": 3}}}}}}}}}, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}}, "df": 1, "s": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "r": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.scope.Scope.is_subquery": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_derived_table": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_union": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_cte": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_root": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_udtf": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_correlated_subquery": {"tf": 1}}, "df": 7, "s": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.7320508075688772}, "sqlglot.executor.python.Python.Generator": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1.7320508075688772}, "sqlglot.parser.Parser": {"tf": 1}}, "df": 41}}}}}}}, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}, "s": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.expressions.Select.order_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.sort_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.cluster_by": {"tf": 1.4142135623730951}}, "df": 3, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}}}, "b": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.diff.ChangeDistiller": {"tf": 1.4142135623730951}}, "df": 2}, "s": {"docs": {"sqlglot.schema.Schema.add_table": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}}, "df": 2}}}}}, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}, "i": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.transpile": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser": {"tf": 1}}, "df": 20}}, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}, "g": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}}, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.trie.new_trie": {"tf": 1}}, "df": 1}}}}}}, "p": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}, "f": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 2}, "sqlglot.executor.env.null_if_any": {"tf": 1}, "sqlglot.executor.table.Tables": {"tf": 1.4142135623730951}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1.4142135623730951}}, "df": 5, "a": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.pretty": {"tf": 1}, "sqlglot.schema": {"tf": 1}, "sqlglot.dataframe": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 2.6457513110645907}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 4.358898943540674}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 2.6457513110645907}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 4.358898943540674}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 2.6457513110645907}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 4.358898943540674}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 2.6457513110645907}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 4.358898943540674}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 2.6457513110645907}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 4.358898943540674}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 2.6457513110645907}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 4.358898943540674}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 2.6457513110645907}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 4.358898943540674}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 2.6457513110645907}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 4.358898943540674}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 2.6457513110645907}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 4.358898943540674}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 2.6457513110645907}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 4.358898943540674}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 2.6457513110645907}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 4.358898943540674}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 2.6457513110645907}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 4.358898943540674}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 2.6457513110645907}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 4.358898943540674}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 2.6457513110645907}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 4.358898943540674}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 4.358898943540674}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 4.358898943540674}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 2.6457513110645907}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 2.6457513110645907}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 4.358898943540674}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 4.358898943540674}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 2.6457513110645907}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 4.358898943540674}, "sqlglot.executor.python.Python.Generator": {"tf": 4.358898943540674}, "sqlglot.executor.table.Tables": {"tf": 1.4142135623730951}, "sqlglot.expressions.Func": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 4.358898943540674}, "sqlglot.lineage.lineage": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1.7320508075688772}, "sqlglot.parser.Parser": {"tf": 2.6457513110645907}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1.4142135623730951}}, "df": 49, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}}, "s": {"docs": {"sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.expressions.expand": {"tf": 1}}, "df": 2}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1, "d": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_udtf": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1}, "sqlglot.transforms.remove_precision_parameterized_types": {"tf": 1}}, "df": 7}, "s": {"docs": {"sqlglot.optimizer.scope.Scope": {"tf": 1}}, "df": 1}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.dialects.redshift.Redshift.Generator.renametable_sql": {"tf": 1}}, "df": 1}}, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.Expression": {"tf": 1}}, "df": 1, "s": {"docs": {"sqlglot.executor": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}}, "c": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dialects": {"tf": 1}}, "df": 2}}}, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "d": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.executor.env.null_if_any": {"tf": 1}}, "df": 1}}, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.executor.env.null_if_any": {"tf": 1}}, "df": 1}}}}}}, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Matches": {"tf": 1}}, "df": 1}}}, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1}}, "df": 2}}}}}}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 2}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 2}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 2}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 2}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 2}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 2}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 2}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 2}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 2}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 2}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 2}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 2}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 2}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 2}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 2}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 2}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 2}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 2}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 2}, "sqlglot.executor.python.Python.Generator": {"tf": 2}, "sqlglot.generator.Generator": {"tf": 2}}, "df": 21, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.helper.csv_reader": {"tf": 1}}, "df": 1, "s": {"docs": {"sqlglot.dialects": {"tf": 1}}, "df": 1}}, "d": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}}, "df": 21}}}}}}, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "e": {"docs": {"sqlglot.expressions.delete": {"tf": 2.23606797749979}}, "df": 1}}, "g": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.transforms.delegate": {"tf": 1}}, "df": 1, "s": {"docs": {"sqlglot.transforms.delegate": {"tf": 1}}, "df": 1}}}}}}}, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}, "e": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1, "d": {"docs": {"sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.Scope.derived_tables": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.Scope.is_derived_table": {"tf": 1}}, "df": 5}}}}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}}}}, "u": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 1}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 1}}, "df": 1}}}}}}}}}}, "b": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.parser.Parser.parse": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}}, "df": 3}}}, "e": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.expressions.Expression.copy": {"tf": 1}}, "df": 1}}, "m": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.optimizer.simplify.simplify_not": {"tf": 1}}, "df": 1}}}}}}}, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot": {"tf": 2.23606797749979}, "sqlglot.diff": {"tf": 3.4641016151377544}, "sqlglot.diff.diff": {"tf": 1.4142135623730951}}, "df": 3, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}}, "df": 3, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dialects": {"tf": 1}, "sqlglot.diff": {"tf": 2.6457513110645907}, "sqlglot.executor": {"tf": 1}}, "df": 4, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dataframe.sql.Column.cast": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1.4142135623730951}}, "df": 5}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects": {"tf": 1}}, "df": 1}}}}, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}}, "df": 1}}}, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 2.6457513110645907}, "sqlglot.parse": {"tf": 1}, "sqlglot.parse_one": {"tf": 1}, "sqlglot.transpile": {"tf": 2.6457513110645907}, "sqlglot.dataframe": {"tf": 2.449489742783178}, "sqlglot.dialects": {"tf": 3.1622776601683795}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.if_sql": {"tf": 1}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.executor.execute": {"tf": 1}, "sqlglot.expressions.Expression.sql": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.and_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.or_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.union": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.intersect": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.except_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Join.on": {"tf": 1.4142135623730951}, "sqlglot.expressions.Join.using": {"tf": 1.4142135623730951}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Union.limit": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.from_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.group_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.order_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.sort_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.cluster_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.limit": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.offset": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.lateral": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.join": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.where": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.having": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.ctas": {"tf": 1.4142135623730951}, "sqlglot.expressions.maybe_parse": {"tf": 1.4142135623730951}, "sqlglot.expressions.union": {"tf": 1.4142135623730951}, "sqlglot.expressions.intersect": {"tf": 1.4142135623730951}, "sqlglot.expressions.except_": {"tf": 1.4142135623730951}, "sqlglot.expressions.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.from_": {"tf": 1.4142135623730951}, "sqlglot.expressions.update": {"tf": 1.4142135623730951}, "sqlglot.expressions.delete": {"tf": 1.4142135623730951}, "sqlglot.expressions.condition": {"tf": 1.4142135623730951}, "sqlglot.expressions.and_": {"tf": 1.4142135623730951}, "sqlglot.expressions.or_": {"tf": 1.4142135623730951}, "sqlglot.expressions.not_": {"tf": 1.4142135623730951}, "sqlglot.expressions.alias_": {"tf": 1.4142135623730951}, "sqlglot.expressions.subquery": {"tf": 1.4142135623730951}, "sqlglot.expressions.func": {"tf": 1.4142135623730951}, "sqlglot.lineage.lineage": {"tf": 1.4142135623730951}, "sqlglot.schema.MappingSchema": {"tf": 1.4142135623730951}}, "df": 50, "s": {"docs": {"sqlglot": {"tf": 2.6457513110645907}, "sqlglot.dataframe": {"tf": 1.7320508075688772}, "sqlglot.dialects": {"tf": 2}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1}, "sqlglot.transforms.remove_precision_parameterized_types": {"tf": 1}}, "df": 7}}}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 2}, "sqlglot.dataframe": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.intersect": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.except_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.distinct": {"tf": 2}, "sqlglot.expressions.union": {"tf": 1.4142135623730951}, "sqlglot.expressions.intersect": {"tf": 1.4142135623730951}, "sqlglot.expressions.except_": {"tf": 1.4142135623730951}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1.4142135623730951}}, "df": 11, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.diff": {"tf": 2.23606797749979}, "sqlglot.diff.ChangeDistiller": {"tf": 1}}, "df": 2}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.normalize.normalize": {"tf": 1.4142135623730951}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1.7320508075688772}}, "df": 2}}}}}, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}}, "df": 18}}}}}}, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}, "u": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "o": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}, "j": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.normalize.normalize": {"tf": 1}}, "df": 1}}}}}}}}}, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.expressions.Expression.dump": {"tf": 1}, "sqlglot.expressions.Expression.load": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.helper.dict_depth": {"tf": 2.449489742783178}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1.4142135623730951}, "sqlglot.schema.MappingSchema": {"tf": 1.4142135623730951}, "sqlglot.serde.dump": {"tf": 1}, "sqlglot.serde.load": {"tf": 1}}, "df": 29, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.trie.new_trie": {"tf": 1}}, "df": 2}}}, "y": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor.context.Context.__init__": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.expressions.update": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1.4142135623730951}, "sqlglot.expressions.expand": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.helper.object_to_dict": {"tf": 1}, "sqlglot.helper.dict_depth": {"tf": 1.4142135623730951}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}, "sqlglot.time.format_time": {"tf": 1}}, "df": 32}}}}}}, "[": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.update": {"tf": 1}, "sqlglot.expressions.replace_tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1}, "sqlglot.optimizer.scope.Scope.cte_sources": {"tf": 1}}, "df": 5}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.optimizer.scope.Scope.ref_count": {"tf": 1}}, "df": 1}}}}, "|": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "q": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1}}, "df": 3}}}}}}}}}, "e": {"docs": {"sqlglot.diff": {"tf": 2.23606797749979}}, "df": 1}}, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.helper.tsort": {"tf": 1}}, "df": 1}}}}}}, "v": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "d": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}}, "u": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "b": {"docs": {"sqlglot": {"tf": 1.7320508075688772}, "sqlglot.dataframe": {"tf": 1}}, "df": 2}}}}, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.parse": {"tf": 1}, "sqlglot.parse_one": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor.execute": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1.4142135623730951}, "sqlglot.expressions.expand": {"tf": 1}}, "df": 6}}}}, "e": {"docs": {"sqlglot.dialects.snowflake.Snowflake.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.select_sql": {"tf": 1}, "sqlglot.diff": {"tf": 1}}, "df": 3}, "m": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.expressions.Expression.dump": {"tf": 1}, "sqlglot.expressions.Expression.load": {"tf": 1}, "sqlglot.serde.dump": {"tf": 1}, "sqlglot.serde.load": {"tf": 1}}, "df": 4}}, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1}}, "df": 1}}}}}}}}, "o": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 2.6457513110645907}, "sqlglot.executor": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}}, "df": 4, "c": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot": {"tf": 1.7320508075688772}}, "df": 1}}}}}}}}}}, "s": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}, "sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}}, "df": 6, "n": {"docs": {"sqlglot.dataframe.sql.Column.cast": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.with_properties": {"tf": 1}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.expressions.func": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1}}, "df": 5, "\u2019": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}}, "df": 1}}}}}, "u": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.dialects": {"tf": 1.4142135623730951}, "sqlglot.expressions.func": {"tf": 1.4142135623730951}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1.4142135623730951}}, "df": 4}}}}, "n": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1}, "sqlglot.optimizer.scope.Scope.external_columns": {"tf": 1}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1}}, "df": 5, "e": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.dialects": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1}, "sqlglot.optimizer.simplify.rewrite_between": {"tf": 1.4142135623730951}}, "df": 5}, "\u2019": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}, "m": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "w": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_dnf": {"tf": 1}}, "df": 2}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.optimizer.optimizer.optimize": {"tf": 1}}, "df": 1}}}}, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 2}, "sqlglot.optimizer.scope.Scope.replace": {"tf": 1}}, "df": 2, "/": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}}}, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}}}, "a": {"docs": {"sqlglot.dataframe": {"tf": 2.449489742783178}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 3.605551275463989}, "sqlglot.executor.execute": {"tf": 1.4142135623730951}, "sqlglot.executor.context.Context": {"tf": 1}, "sqlglot.expressions.cast": {"tf": 1}}, "df": 7, "t": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 3}, "sqlglot.dataframe.sql.Column.cast": {"tf": 1.4142135623730951}, "sqlglot.dialects": {"tf": 3}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.expressions.cast": {"tf": 1}, "sqlglot.expressions.func": {"tf": 1}, "sqlglot.schema.Schema.get_column_type": {"tf": 1}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1}}, "df": 8}}}}, "f": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe": {"tf": 4.58257569495584}, "sqlglot.executor": {"tf": 1}}, "df": 2, "s": {"docs": {"sqlglot.dataframe": {"tf": 1}}, "df": 1}}}}}}, "b": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor.execute": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1.7320508075688772}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1}, "sqlglot.schema.Schema": {"tf": 1}}, "df": 6}}}}}}, "g": {"docs": {"sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.helper.tsort": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 2.23606797749979}, "sqlglot.planner.Scan.from_expression": {"tf": 2.23606797749979}, "sqlglot.planner.SetOperation.from_expression": {"tf": 2.23606797749979}}, "df": 5}, "y": {"docs": {"sqlglot.expressions.to_interval": {"tf": 1}}, "df": 1}}, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.dataframe": {"tf": 1}}, "df": 1}}, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dialects.drill.if_sql": {"tf": 1.4142135623730951}}, "df": 1}}}, "a": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "u": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}, "f": {"docs": {"sqlglot.dataframe": {"tf": 2.8284271247461903}}, "df": 1, "s": {"docs": {"sqlglot.expressions.Expression.walk": {"tf": 1}, "sqlglot.expressions.Expression.dfs": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}}, "df": 3}}, "b": {"docs": {"sqlglot.dialects.redshift.Redshift.Generator.renametable_sql": {"tf": 1}, "sqlglot.executor": {"tf": 1}, "sqlglot.executor.execute": {"tf": 1.4142135623730951}, "sqlglot.expressions.table_": {"tf": 1.4142135623730951}, "sqlglot.helper.split_num_words": {"tf": 2.449489742783178}, "sqlglot.optimizer.optimizer.optimize": {"tf": 2}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 2}, "sqlglot.schema.Schema.supported_table_args": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 2}}, "df": 9, "/": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}}}, "y": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}, "n": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.optimizer.normalize.normalize": {"tf": 1}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1.4142135623730951}, "sqlglot.optimizer.pushdown_predicates.pushdown_dnf": {"tf": 1}}, "df": 4}}, "s": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}, "d": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.transforms.remove_precision_parameterized_types": {"tf": 1}}, "df": 1}}}, "p": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 3.872983346207417}, "sqlglot.parse": {"tf": 1}, "sqlglot.parse_one": {"tf": 1.4142135623730951}, "sqlglot.transpile": {"tf": 1}, "sqlglot.diff": {"tf": 2.23606797749979}, "sqlglot.diff.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.output_name": {"tf": 2}, "sqlglot.expressions.Expression.assert_is": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1.7320508075688772}, "sqlglot.expressions.Condition.or_": {"tf": 1.7320508075688772}, "sqlglot.expressions.Unionable.union": {"tf": 1.7320508075688772}, "sqlglot.expressions.Unionable.intersect": {"tf": 1.7320508075688772}, "sqlglot.expressions.Unionable.except_": {"tf": 1.7320508075688772}, "sqlglot.expressions.Column.output_name": {"tf": 2}, "sqlglot.expressions.Identifier.output_name": {"tf": 2}, "sqlglot.expressions.Literal.output_name": {"tf": 2}, "sqlglot.expressions.Join.on": {"tf": 2}, "sqlglot.expressions.Join.using": {"tf": 2}, "sqlglot.expressions.Subqueryable.with_": {"tf": 2}, "sqlglot.expressions.Union.limit": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.from_": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.group_by": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.order_by": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.sort_by": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.cluster_by": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.limit": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.offset": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.select": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.lateral": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.join": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.where": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.having": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.ctas": {"tf": 1.7320508075688772}, "sqlglot.expressions.Subquery.output_name": {"tf": 2}, "sqlglot.expressions.Star.output_name": {"tf": 2}, "sqlglot.expressions.Alias.output_name": {"tf": 2}, "sqlglot.expressions.Cast.output_name": {"tf": 2}, "sqlglot.expressions.maybe_parse": {"tf": 2.23606797749979}, "sqlglot.expressions.union": {"tf": 1.4142135623730951}, "sqlglot.expressions.intersect": {"tf": 1.4142135623730951}, "sqlglot.expressions.except_": {"tf": 1.4142135623730951}, "sqlglot.expressions.select": {"tf": 1.7320508075688772}, "sqlglot.expressions.from_": {"tf": 1.7320508075688772}, "sqlglot.expressions.update": {"tf": 1.4142135623730951}, "sqlglot.expressions.delete": {"tf": 1.4142135623730951}, "sqlglot.expressions.condition": {"tf": 1.7320508075688772}, "sqlglot.expressions.and_": {"tf": 1.7320508075688772}, "sqlglot.expressions.or_": {"tf": 1.7320508075688772}, "sqlglot.expressions.not_": {"tf": 1.7320508075688772}, "sqlglot.expressions.alias_": {"tf": 1.7320508075688772}, "sqlglot.expressions.subquery": {"tf": 1.7320508075688772}, "sqlglot.expressions.column_table_names": {"tf": 1}, "sqlglot.expressions.table_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.replace_tables": {"tf": 1.4142135623730951}, "sqlglot.expressions.replace_placeholders": {"tf": 1.4142135623730951}, "sqlglot.expressions.expand": {"tf": 1.7320508075688772}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 1.4142135623730951}, "sqlglot.optimizer.expand_laterals.expand_laterals": {"tf": 1}, "sqlglot.optimizer.expand_multi_table_selects.expand_multi_table_selects": {"tf": 1.4142135623730951}, "sqlglot.optimizer.lower_identities.lower_identities": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1.4142135623730951}, "sqlglot.optimizer.normalize.normalize": {"tf": 1}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}, "sqlglot.optimizer.optimize_joins.optimize_joins": {"tf": 1.4142135623730951}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 1}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 1}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}, "sqlglot.optimizer.simplify.simplify": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1.4142135623730951}, "sqlglot.transforms.unalias_group": {"tf": 1}}, "df": 77, "r": {"docs": {"sqlglot": {"tf": 2.6457513110645907}, "sqlglot.parse": {"tf": 1.4142135623730951}, "sqlglot.parse_one": {"tf": 1.4142135623730951}, "sqlglot.transpile": {"tf": 1}, "sqlglot.dialects": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.executor": {"tf": 2.6457513110645907}, "sqlglot.parser.Parser": {"tf": 1}}, "df": 24, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}}}, "/": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}}}}}, "s": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}}, "df": 19}}}}}, "d": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.parse": {"tf": 1}, "sqlglot.parse_one": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1.4142135623730951}, "sqlglot.expressions.update": {"tf": 1.4142135623730951}, "sqlglot.expressions.delete": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}, "sqlglot.parser.Parser.parse": {"tf": 1}}, "df": 26}, "s": {"docs": {"sqlglot.parse": {"tf": 1}, "sqlglot.parse_one": {"tf": 1}, "sqlglot.transpile": {"tf": 1}, "sqlglot.executor": {"tf": 1}, "sqlglot.parser.Parser.parse": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}}, "df": 6}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.parse": {"tf": 1}, "sqlglot.parse_one": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 2}, "sqlglot.executor.execute": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}}, "df": 7, "/": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}}, "t": {"docs": {"sqlglot.dialects.redshift.Redshift.Generator.with_properties": {"tf": 1}, "sqlglot.diff": {"tf": 2.23606797749979}, "sqlglot.executor": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}}, "df": 4, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1.4142135623730951}}, "df": 1}}}}}}, "c": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.optimizer.scope.Scope.source_columns": {"tf": 1}}, "df": 2, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}}, "s": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1}}, "df": 2}}, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.find_ancestor": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.parent_select": {"tf": 1}, "sqlglot.expressions.Expression.walk": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_cnf": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}}, "df": 8, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.Expression.unnest": {"tf": 1}}, "df": 1}}}}}, "s": {"docs": {"sqlglot.expressions.Expression.flatten": {"tf": 1}}, "df": 1}}}}, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1}, "sqlglot.transforms.remove_precision_parameterized_types": {"tf": 1.4142135623730951}}, "df": 3}}}}, "s": {"docs": {"sqlglot.helper.count_params": {"tf": 1}}, "df": 1}}}}}}}}, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot": {"tf": 1}}, "df": 1, "/": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}}}}, "s": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1}}, "df": 3, "w": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.dataframe": {"tf": 1}}, "df": 1}}}}, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "d": {"docs": {"sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Union.limit": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.from_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.group_by": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.order_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.sort_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.cluster_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.limit": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.offset": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.union": {"tf": 1.4142135623730951}, "sqlglot.expressions.intersect": {"tf": 1.4142135623730951}, "sqlglot.expressions.except_": {"tf": 1.4142135623730951}, "sqlglot.expressions.select": {"tf": 1}, "sqlglot.expressions.from_": {"tf": 1}, "sqlglot.expressions.condition": {"tf": 1}, "sqlglot.expressions.and_": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 1}, "sqlglot.expressions.not_": {"tf": 1}, "sqlglot.expressions.to_table": {"tf": 1}, "sqlglot.expressions.to_column": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.expressions.subquery": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.time.format_time": {"tf": 1}}, "df": 37}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}, "d": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}}, "df": 21, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}}, "df": 21}}}}}, "i": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.diff": {"tf": 2.6457513110645907}, "sqlglot.trie.in_trie": {"tf": 1}}, "df": 2, "s": {"docs": {"sqlglot.diff": {"tf": 3.4641016151377544}}, "df": 1}}}, "t": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.expressions.to_table": {"tf": 1.4142135623730951}, "sqlglot.expressions.to_column": {"tf": 1.4142135623730951}}, "df": 3, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.Matches": {"tf": 1}}, "df": 1}}}}}, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.diff": {"tf": 2}, "sqlglot.diff.ChangeDistiller": {"tf": 1}}, "df": 2}}}}, "r": {"docs": {"sqlglot.dataframe": {"tf": 1}}, "df": 1, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {"sqlglot": {"tf": 1.7320508075688772}, "sqlglot.parse": {"tf": 1}, "sqlglot.parse_one": {"tf": 1}, "sqlglot.transpile": {"tf": 1.4142135623730951}, "sqlglot.dataframe": {"tf": 1}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.executor.execute": {"tf": 1}, "sqlglot.expressions.Expression.sql": {"tf": 1}}, "df": 8}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}}, "df": 22, "d": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}}}}, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}, "t": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot": {"tf": 1.7320508075688772}, "sqlglot.dataframe": {"tf": 2}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor.python.Python.Generator": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator": {"tf": 1.4142135623730951}}, "df": 24}}}, "v": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "/": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "q": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dataframe.sql.DataFrame.persist": {"tf": 1}}, "df": 1}}}}}}, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.snowflake.Snowflake.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.select_sql": {"tf": 1}}, "df": 2, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "c": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1.4142135623730951}, "sqlglot.transforms.remove_precision_parameterized_types": {"tf": 1.4142135623730951}}, "df": 2}}}, "e": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.eliminate_joins.join_condition": {"tf": 1}, "sqlglot.optimizer.optimize_joins.optimize_joins": {"tf": 1}, "sqlglot.optimizer.optimize_joins.reorder_joins": {"tf": 1}}, "df": 3, "s": {"docs": {"sqlglot.executor": {"tf": 1.7320508075688772}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 1.4142135623730951}, "sqlglot.optimizer.pushdown_predicates.pushdown_cnf": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_dnf": {"tf": 1.4142135623730951}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1}}, "df": 6}}}}}}}, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "x": {"docs": {"sqlglot.expressions.maybe_parse": {"tf": 1.4142135623730951}, "sqlglot.trie.in_trie": {"tf": 1}}, "df": 2}}}}, "o": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot": {"tf": 1}}, "df": 1, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.executor": {"tf": 1}, "sqlglot.expressions": {"tf": 1}}, "df": 3}}}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}}, "j": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot": {"tf": 1.4142135623730951}}, "df": 1, "s": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.executor": {"tf": 1}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 1.4142135623730951}, "sqlglot.planner.Step.from_expression": {"tf": 2}, "sqlglot.planner.Scan.from_expression": {"tf": 2}, "sqlglot.planner.SetOperation.from_expression": {"tf": 2}}, "df": 6}}}}}}}}, "v": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1}}, "df": 3, "d": {"docs": {"sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1.4142135623730951}, "sqlglot.helper.apply_index_offset": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}}, "df": 11}, "s": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}, "e": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.select_sql": {"tf": 1}}, "df": 2}}, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}}, "df": 1}, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dialects.redshift.Redshift.Generator.with_properties": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1.4142135623730951}, "sqlglot.expressions.update": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}}, "df": 4}}}}}}, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "d": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1}, "sqlglot.parser.Parser.parse": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}}, "df": 4, "d": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}}, "df": 22}, "s": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}}, "df": 41}}, "t": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}}, "df": 1}}}}, "c": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}}, "df": 1, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}, "d": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}}, "df": 1, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}}, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}}, "df": 1}}}, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}}}}, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 3.1622776601683795}, "sqlglot.dataframe": {"tf": 2}}, "df": 2}}, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 2.449489742783178}}, "df": 1}}, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dataframe": {"tf": 1.4142135623730951}}, "df": 1, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.dataframe": {"tf": 1}}, "df": 1}}}}, "y": {"docs": {"sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}}, "df": 2}}}}}, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Expression.walk": {"tf": 1}}, "df": 1}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.parse": {"tf": 1}, "sqlglot.parser.Parser.parse": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}}, "df": 5, "f": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1}}, "df": 2, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 2.23606797749979}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.time.format_time": {"tf": 1}}, "df": 3}}}}, "s": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 2}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.schema.MappingSchema.add_table": {"tf": 1}}, "df": 1}}}}}}, "m": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}}}}, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}}, "df": 1}}}}}, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor": {"tf": 1.4142135623730951}}, "df": 1, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}, "p": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}, "s": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.optimizer.pushdown_predicates.pushdown_dnf": {"tf": 1}}, "df": 3, "d": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 1.4142135623730951}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 1}}, "df": 3}}}}}}}, "y": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot": {"tf": 2.23606797749979}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 4.898979485566356}, "sqlglot.executor.python.PythonExecutor.generate": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.generate_tuple": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.expressions.convert": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.helper.csv_reader": {"tf": 1}}, "df": 28, "\u2019": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}, "p": {"docs": {}, "df": 0, "i": {"docs": {"sqlglot": {"tf": 1}}, "df": 1, "k": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}, "s": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.dataframe": {"tf": 2.8284271247461903}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dataframe.sql.Column.cast": {"tf": 1}}, "df": 3, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}}}}}}, "i": {"docs": {}, "df": 0, "p": {"3": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1.4142135623730951}}, "df": 2}}}}}}}, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "c": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}}, "df": 1, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "n": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.diff.ChangeDistiller": {"tf": 1}}, "df": 2}}}}}}, "d": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}, "f": {"docs": {"sqlglot.diff.ChangeDistiller": {"tf": 1}}, "df": 1}}, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}}}, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "a": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.Select.distinct": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.Select.lock": {"tf": 1}}, "df": 23, "h": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.replace_placeholders": {"tf": 2}}, "df": 1}}}}}}}}}, "n": {"docs": {"sqlglot.executor": {"tf": 2.8284271247461903}}, "df": 1, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.executor": {"tf": 1.7320508075688772}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}, "r": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}, "t": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot.executor": {"tf": 1.7320508075688772}}, "df": 1}}}}}}, "u": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.optimizer.scope.Scope.columns": {"tf": 1}}, "df": 2}}}, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}}, "df": 1}}}}}, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects": {"tf": 1}, "sqlglot.diff": {"tf": 2}, "sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1}, "sqlglot.expressions.convert": {"tf": 1}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 1}, "sqlglot.optimizer.optimize_joins.optimize_joins": {"tf": 1}}, "df": 7}, "y": {"docs": {"sqlglot.helper.subclasses": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1}}, "df": 2}}}}}, "t": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.diff": {"tf": 2.23606797749979}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.optimizer.scope.Scope.traverse": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}}, "df": 42, "s": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.diff.Move": {"tf": 1}, "sqlglot.helper.seq_get": {"tf": 1}}, "df": 3, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.replace_placeholders": {"tf": 1}}, "df": 1}}}}}}}}, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects": {"tf": 1}}, "df": 1}}}}}}, "p": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1}}, "df": 2}}}}}}, "w": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.helper.while_changing": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1}, "sqlglot.trie.in_trie": {"tf": 1}}, "df": 4, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "p": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}, "h": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.executor": {"tf": 1.7320508075688772}}, "df": 1}}}}}}}}, "t": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dataframe.sql.Column.cast": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.with_properties": {"tf": 1}, "sqlglot.diff": {"tf": 3.3166247903554}, "sqlglot.diff.Keep": {"tf": 1}, "sqlglot.executor": {"tf": 2.6457513110645907}, "sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.expressions.func": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_dnf": {"tf": 1}, "sqlglot.optimizer.qualify_columns.validate_qualify_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.external_columns": {"tf": 1}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1}, "sqlglot.trie.in_trie": {"tf": 1.4142135623730951}}, "df": 18, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "p": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 2.8284271247461903}, "sqlglot.transpile": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 3, "r": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}, "d": {"docs": {"sqlglot.transpile": {"tf": 1}}, "df": 1}, "s": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot": {"tf": 1.7320508075688772}, "sqlglot.dataframe": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1}}, "df": 3}}}, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dialects": {"tf": 1}}, "df": 1}}}}}}}}, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1.7320508075688772}, "sqlglot.executor": {"tf": 1}}, "df": 2}}}}, "f": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot": {"tf": 1.7320508075688772}, "sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.diff.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.transforms.unalias_group": {"tf": 1}, "sqlglot.transforms.preprocess": {"tf": 2}}, "df": 6, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot": {"tf": 1.4142135623730951}}, "df": 1}, "d": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.transpile": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 1.7320508075688772}, "sqlglot.expressions.Func": {"tf": 1}, "sqlglot.expressions.replace_tables": {"tf": 1}, "sqlglot.expressions.replace_placeholders": {"tf": 1}, "sqlglot.expressions.expand": {"tf": 1}, "sqlglot.helper.while_changing": {"tf": 1.4142135623730951}, "sqlglot.transforms.unalias_group": {"tf": 1.4142135623730951}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1.4142135623730951}}, "df": 11}}, "s": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dialects": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1}, "sqlglot.expressions.expand": {"tf": 1}, "sqlglot.transforms.remove_precision_parameterized_types": {"tf": 1}, "sqlglot.transforms.preprocess": {"tf": 1.4142135623730951}, "sqlglot.transforms.delegate": {"tf": 1}}, "df": 7}, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.expressions.expand": {"tf": 1}, "sqlglot.helper.while_changing": {"tf": 1.4142135623730951}}, "df": 3, "s": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.transforms.preprocess": {"tf": 1}}, "df": 3}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.executor": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.optimizer.scope.Scope.traverse": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1.7320508075688772}}, "df": 5, "d": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.walk": {"tf": 1}}, "df": 2}}}, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.Expression.walk": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1.4142135623730951}}, "df": 2, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}}, "df": 21}}}}}}, "e": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 2.6457513110645907}, "sqlglot.parse": {"tf": 1}, "sqlglot.parse_one": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.diff": {"tf": 3.872983346207417}, "sqlglot.diff.Move": {"tf": 1}, "sqlglot.diff.diff": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.expressions.Expression.depth": {"tf": 1}, "sqlglot.expressions.Expression.find": {"tf": 1}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.expressions.Expression.walk": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.dfs": {"tf": 1}, "sqlglot.expressions.Expression.bfs": {"tf": 1}, "sqlglot.expressions.Expression.sql": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 2.23606797749979}, "sqlglot.expressions.Expression.replace": {"tf": 1.7320508075688772}, "sqlglot.expressions.union": {"tf": 1.4142135623730951}, "sqlglot.expressions.intersect": {"tf": 1.4142135623730951}, "sqlglot.expressions.except_": {"tf": 1.4142135623730951}, "sqlglot.expressions.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.from_": {"tf": 1.4142135623730951}, "sqlglot.expressions.update": {"tf": 1}, "sqlglot.expressions.delete": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.generator.Generator.generate": {"tf": 1.4142135623730951}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.optimizer.scope.Scope.traverse": {"tf": 1}, "sqlglot.optimizer.scope.Scope.ref_count": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}, "sqlglot.optimizer.scope.build_scope": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}, "sqlglot.parser.Parser.parse": {"tf": 1}}, "df": 70, "s": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.parse": {"tf": 1}, "sqlglot.diff": {"tf": 3.3166247903554}, "sqlglot.diff.diff": {"tf": 1}, "sqlglot.expressions.condition": {"tf": 1}, "sqlglot.parser.Parser.parse": {"tf": 1.4142135623730951}}, "df": 6}}, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.Func": {"tf": 1}}, "df": 1}}}}}, "u": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 2}, "sqlglot.transpile": {"tf": 1}, "sqlglot.dataframe": {"tf": 2}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.snowflake.Snowflake.Generator.select_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 2.449489742783178}, "sqlglot.executor": {"tf": 1.7320508075688772}, "sqlglot.executor.python.Python.Generator": {"tf": 2.449489742783178}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.expressions.Expression.walk": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.Select.lock": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1}, "sqlglot.expressions.union": {"tf": 1}, "sqlglot.expressions.intersect": {"tf": 1}, "sqlglot.expressions.except_": {"tf": 1}, "sqlglot.expressions.expand": {"tf": 1}, "sqlglot.expressions.true": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 2.449489742783178}, "sqlglot.helper.is_iterable": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1.4142135623730951}, "sqlglot.optimizer.optimize_joins.optimize_joins": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}, "sqlglot.optimizer.simplify.simplify": {"tf": 1.7320508075688772}, "sqlglot.optimizer.simplify.remove_compliments": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1}, "sqlglot.trie.new_trie": {"tf": 1.7320508075688772}, "sqlglot.trie.in_trie": {"tf": 1.7320508075688772}}, "df": 65}}, "y": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dataframe": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1.4142135623730951}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1}}, "df": 7}, "i": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.7320508075688772}, "sqlglot.executor.python.Python.Generator": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator": {"tf": 1.7320508075688772}, "sqlglot.time.format_time": {"tf": 1.4142135623730951}, "sqlglot.trie.new_trie": {"tf": 2.449489742783178}, "sqlglot.trie.in_trie": {"tf": 3.4641016151377544}}, "df": 24, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}, "v": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}}, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "\u2019": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}}, "o": {"docs": {"sqlglot": {"tf": 4.358898943540674}, "sqlglot.pretty": {"tf": 1}, "sqlglot.parse": {"tf": 1.4142135623730951}, "sqlglot.parse_one": {"tf": 1.7320508075688772}, "sqlglot.transpile": {"tf": 2.23606797749979}, "sqlglot.dataframe": {"tf": 3.7416573867739413}, "sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 2.23606797749979}, "sqlglot.dialects": {"tf": 2}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 3.7416573867739413}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 3.7416573867739413}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 3.7416573867739413}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 3.7416573867739413}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 3.7416573867739413}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 3.7416573867739413}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 3.7416573867739413}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 3.7416573867739413}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 3.7416573867739413}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 3.7416573867739413}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 3.7416573867739413}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 3.7416573867739413}, "sqlglot.dialects.snowflake.Snowflake.Generator.values_sql": {"tf": 2.23606797749979}, "sqlglot.dialects.snowflake.Snowflake.Generator.select_sql": {"tf": 2.23606797749979}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 3.7416573867739413}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 3.7416573867739413}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 3.7416573867739413}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 3.7416573867739413}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 3.7416573867739413}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 3.7416573867739413}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 3.7416573867739413}, "sqlglot.diff": {"tf": 8.48528137423857}, "sqlglot.diff.diff": {"tf": 1}, "sqlglot.executor": {"tf": 6.708203932499369}, "sqlglot.executor.execute": {"tf": 1.4142135623730951}, "sqlglot.executor.context.Context": {"tf": 2}, "sqlglot.executor.python.Python.Generator": {"tf": 3.7416573867739413}, "sqlglot.expressions": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 2.449489742783178}, "sqlglot.expressions.Expression.text": {"tf": 1}, "sqlglot.expressions.Expression.append": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.set": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.find": {"tf": 1}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.expressions.Expression.find_ancestor": {"tf": 1}, "sqlglot.expressions.Expression.walk": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.error_messages": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.dump": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 2}, "sqlglot.expressions.Condition.or_": {"tf": 2}, "sqlglot.expressions.Unionable.union": {"tf": 1.7320508075688772}, "sqlglot.expressions.Unionable.intersect": {"tf": 1.7320508075688772}, "sqlglot.expressions.Unionable.except_": {"tf": 1.7320508075688772}, "sqlglot.expressions.Join.on": {"tf": 2.449489742783178}, "sqlglot.expressions.Join.using": {"tf": 2.449489742783178}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 2.8284271247461903}, "sqlglot.expressions.Union.limit": {"tf": 2}, "sqlglot.expressions.Select.from_": {"tf": 2.23606797749979}, "sqlglot.expressions.Select.group_by": {"tf": 2.449489742783178}, "sqlglot.expressions.Select.order_by": {"tf": 2.23606797749979}, "sqlglot.expressions.Select.sort_by": {"tf": 2.23606797749979}, "sqlglot.expressions.Select.cluster_by": {"tf": 2.23606797749979}, "sqlglot.expressions.Select.limit": {"tf": 2}, "sqlglot.expressions.Select.offset": {"tf": 2}, "sqlglot.expressions.Select.select": {"tf": 2.449489742783178}, "sqlglot.expressions.Select.lateral": {"tf": 2.449489742783178}, "sqlglot.expressions.Select.join": {"tf": 2.6457513110645907}, "sqlglot.expressions.Select.where": {"tf": 2.449489742783178}, "sqlglot.expressions.Select.having": {"tf": 2.449489742783178}, "sqlglot.expressions.Select.ctas": {"tf": 2.23606797749979}, "sqlglot.expressions.Func": {"tf": 2.449489742783178}, "sqlglot.expressions.maybe_parse": {"tf": 2.449489742783178}, "sqlglot.expressions.union": {"tf": 2.23606797749979}, "sqlglot.expressions.intersect": {"tf": 2.23606797749979}, "sqlglot.expressions.except_": {"tf": 2.23606797749979}, "sqlglot.expressions.select": {"tf": 2}, "sqlglot.expressions.from_": {"tf": 2}, "sqlglot.expressions.update": {"tf": 2.23606797749979}, "sqlglot.expressions.delete": {"tf": 1.7320508075688772}, "sqlglot.expressions.condition": {"tf": 2}, "sqlglot.expressions.and_": {"tf": 2}, "sqlglot.expressions.or_": {"tf": 2}, "sqlglot.expressions.not_": {"tf": 2}, "sqlglot.expressions.to_identifier": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 2.6457513110645907}, "sqlglot.expressions.subquery": {"tf": 2.23606797749979}, "sqlglot.expressions.cast": {"tf": 2.23606797749979}, "sqlglot.expressions.values": {"tf": 2}, "sqlglot.expressions.column_table_names": {"tf": 1}, "sqlglot.expressions.replace_tables": {"tf": 1.4142135623730951}, "sqlglot.expressions.replace_placeholders": {"tf": 1}, "sqlglot.expressions.expand": {"tf": 2}, "sqlglot.expressions.func": {"tf": 2.23606797749979}, "sqlglot.generator.Generator": {"tf": 3.7416573867739413}, "sqlglot.helper.csv": {"tf": 1}, "sqlglot.helper.subclasses": {"tf": 1.7320508075688772}, "sqlglot.helper.apply_index_offset": {"tf": 1.7320508075688772}, "sqlglot.helper.camel_to_snake_case": {"tf": 1}, "sqlglot.helper.while_changing": {"tf": 1.7320508075688772}, "sqlglot.helper.tsort": {"tf": 1}, "sqlglot.helper.find_new_name": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 2}, "sqlglot.helper.is_iterable": {"tf": 1}, "sqlglot.helper.flatten": {"tf": 1}, "sqlglot.lineage.lineage": {"tf": 1.7320508075688772}, "sqlglot.lineage.LineageHTML": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 2}, "sqlglot.optimizer.canonicalize.canonicalize": {"tf": 1}, "sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1}, "sqlglot.optimizer.expand_laterals.expand_laterals": {"tf": 1}, "sqlglot.optimizer.lower_identities.lower_identities": {"tf": 1.4142135623730951}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1.4142135623730951}, "sqlglot.optimizer.normalize.normalize": {"tf": 1.4142135623730951}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1.4142135623730951}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1.7320508075688772}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 1.4142135623730951}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.Scope": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.branch": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.Scope.replace": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.cte_sources": {"tf": 1}, "sqlglot.optimizer.scope.Scope.external_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.add_source": {"tf": 1}, "sqlglot.optimizer.scope.Scope.ref_count": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}, "sqlglot.optimizer.scope.build_scope": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}, "sqlglot.optimizer.simplify.simplify": {"tf": 1.4142135623730951}, "sqlglot.optimizer.simplify.rewrite_between": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1.7320508075688772}, "sqlglot.parser.Parser": {"tf": 1.7320508075688772}, "sqlglot.parser.Parser.parse": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1.7320508075688772}, "sqlglot.parser.Parser.expression": {"tf": 2}, "sqlglot.parser.Parser.validate_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.Step.from_expression": {"tf": 2.23606797749979}, "sqlglot.planner.Scan.from_expression": {"tf": 2.23606797749979}, "sqlglot.planner.SetOperation.from_expression": {"tf": 2.23606797749979}, "sqlglot.schema.Schema.add_table": {"tf": 1}, "sqlglot.schema.Schema.column_names": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1.4142135623730951}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}, "sqlglot.time.format_time": {"tf": 1}, "sqlglot.tokens.Tokenizer.tokenize": {"tf": 1}, "sqlglot.transforms.unalias_group": {"tf": 1}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1}, "sqlglot.transforms.remove_precision_parameterized_types": {"tf": 1}, "sqlglot.transforms.preprocess": {"tf": 1.7320508075688772}, "sqlglot.transforms.delegate": {"tf": 1.4142135623730951}, "sqlglot.trie.new_trie": {"tf": 1.7320508075688772}, "sqlglot.trie.in_trie": {"tf": 1}}, "df": 178, "u": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1}}, "df": 2}}, "g": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.executor": {"tf": 2.23606797749979}, "sqlglot.parser.Parser.parse_into": {"tf": 1.4142135623730951}, "sqlglot.tokens.Token.number": {"tf": 1}, "sqlglot.tokens.Token.string": {"tf": 1}, "sqlglot.tokens.Token.identifier": {"tf": 1}, "sqlglot.tokens.Token.var": {"tf": 1}}, "df": 6, "s": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dialects": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 2}, "sqlglot.parser.Parser": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.parse": {"tf": 1.7320508075688772}, "sqlglot.parser.Parser.parse_into": {"tf": 1.7320508075688772}, "sqlglot.tokens.Tokenizer.tokenize": {"tf": 1}}, "df": 24}, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot": {"tf": 2}, "sqlglot.dialects": {"tf": 2.23606797749979}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.executor": {"tf": 2}, "sqlglot.parser.Parser": {"tf": 1}}, "df": 21}, "d": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.executor": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "t": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1.7320508075688772}, "sqlglot.dialects": {"tf": 1.7320508075688772}}, "df": 2}}}}}}}, "o": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1, "l": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.diff": {"tf": 1}}, "df": 2, "s": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1}}, "df": 2}, "k": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}, "k": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}, "p": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.helper.tsort": {"tf": 1.4142135623730951}, "sqlglot.optimizer.optimize_joins.reorder_joins": {"tf": 1}}, "df": 3}}}}}}}}}, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.diff": {"tf": 2}}, "df": 1}}}, "b": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}, "w": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}, "g": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}, "h": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 4.242640687119285}, "sqlglot.schema": {"tf": 1.4142135623730951}, "sqlglot.parse": {"tf": 2}, "sqlglot.parse_one": {"tf": 2.6457513110645907}, "sqlglot.transpile": {"tf": 4}, "sqlglot.dataframe": {"tf": 4.358898943540674}, "sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 3}, "sqlglot.dataframe.sql.Column.cast": {"tf": 1}, "sqlglot.dialects": {"tf": 2.6457513110645907}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 2.6457513110645907}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 4.47213595499958}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 2.6457513110645907}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 4.47213595499958}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 2.6457513110645907}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 4.47213595499958}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 2}, "sqlglot.dialects.drill.if_sql": {"tf": 2.23606797749979}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 2.6457513110645907}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 4.47213595499958}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 2.6457513110645907}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 4.47213595499958}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 2.6457513110645907}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 4.47213595499958}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 2.6457513110645907}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 4.47213595499958}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 2.6457513110645907}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 4.47213595499958}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 2.6457513110645907}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 4.47213595499958}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 2.6457513110645907}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 4.47213595499958}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 2.6457513110645907}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 4.47213595499958}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.renametable_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 2.6457513110645907}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 4.47213595499958}, "sqlglot.dialects.snowflake.Snowflake.Generator.values_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.snowflake.Snowflake.Generator.select_sql": {"tf": 2.23606797749979}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 2.6457513110645907}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 4.47213595499958}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 2.6457513110645907}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 4.47213595499958}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 4.47213595499958}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 4.47213595499958}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 2.6457513110645907}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 2.6457513110645907}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 4.47213595499958}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 4.47213595499958}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 2.6457513110645907}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 4.47213595499958}, "sqlglot.diff": {"tf": 15.066519173319364}, "sqlglot.diff.Move": {"tf": 1}, "sqlglot.diff.diff": {"tf": 3.3166247903554}, "sqlglot.diff.ChangeDistiller": {"tf": 1.7320508075688772}, "sqlglot.errors.ErrorLevel.IMMEDIATE": {"tf": 1}, "sqlglot.executor": {"tf": 8.48528137423857}, "sqlglot.executor.execute": {"tf": 1.4142135623730951}, "sqlglot.executor.context.Context.__init__": {"tf": 1.7320508075688772}, "sqlglot.executor.env.null_if_any": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 4.47213595499958}, "sqlglot.expressions": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 3.1622776601683795}, "sqlglot.expressions.Expression.this": {"tf": 1}, "sqlglot.expressions.Expression.expression": {"tf": 1}, "sqlglot.expressions.Expression.expressions": {"tf": 1}, "sqlglot.expressions.Expression.text": {"tf": 1}, "sqlglot.expressions.Expression.alias": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.copy": {"tf": 1}, "sqlglot.expressions.Expression.append": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.set": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.depth": {"tf": 1}, "sqlglot.expressions.Expression.find": {"tf": 2.23606797749979}, "sqlglot.expressions.Expression.find_all": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.find_ancestor": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.parent_select": {"tf": 1}, "sqlglot.expressions.Expression.walk": {"tf": 2.23606797749979}, "sqlglot.expressions.Expression.dfs": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.bfs": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.unnest": {"tf": 1}, "sqlglot.expressions.Expression.unalias": {"tf": 1}, "sqlglot.expressions.Expression.flatten": {"tf": 1}, "sqlglot.expressions.Expression.sql": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.transform": {"tf": 2.6457513110645907}, "sqlglot.expressions.Expression.replace": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.and_": {"tf": 2.23606797749979}, "sqlglot.expressions.Condition.or_": {"tf": 2.23606797749979}, "sqlglot.expressions.Condition.not_": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 2.449489742783178}, "sqlglot.expressions.Unionable.intersect": {"tf": 2.449489742783178}, "sqlglot.expressions.Unionable.except_": {"tf": 2.449489742783178}, "sqlglot.expressions.Column.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.Identifier.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.Literal.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.Join.on": {"tf": 2.8284271247461903}, "sqlglot.expressions.Join.using": {"tf": 3}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1.4142135623730951}, "sqlglot.expressions.Subqueryable.with_": {"tf": 3.4641016151377544}, "sqlglot.expressions.Union.limit": {"tf": 2.449489742783178}, "sqlglot.expressions.Select.from_": {"tf": 2.6457513110645907}, "sqlglot.expressions.Select.group_by": {"tf": 2.8284271247461903}, "sqlglot.expressions.Select.order_by": {"tf": 2.6457513110645907}, "sqlglot.expressions.Select.sort_by": {"tf": 2.6457513110645907}, "sqlglot.expressions.Select.cluster_by": {"tf": 2.6457513110645907}, "sqlglot.expressions.Select.limit": {"tf": 2.449489742783178}, "sqlglot.expressions.Select.offset": {"tf": 2.449489742783178}, "sqlglot.expressions.Select.select": {"tf": 2.6457513110645907}, "sqlglot.expressions.Select.lateral": {"tf": 2.6457513110645907}, "sqlglot.expressions.Select.join": {"tf": 3.3166247903554}, "sqlglot.expressions.Select.where": {"tf": 2.8284271247461903}, "sqlglot.expressions.Select.having": {"tf": 2.8284271247461903}, "sqlglot.expressions.Select.distinct": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.ctas": {"tf": 2.449489742783178}, "sqlglot.expressions.Select.lock": {"tf": 1.7320508075688772}, "sqlglot.expressions.Subquery.unnest": {"tf": 1}, "sqlglot.expressions.Subquery.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.Star.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.Alias.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.Func": {"tf": 2.8284271247461903}, "sqlglot.expressions.Cast.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.maybe_parse": {"tf": 3}, "sqlglot.expressions.union": {"tf": 3.1622776601683795}, "sqlglot.expressions.intersect": {"tf": 3.1622776601683795}, "sqlglot.expressions.except_": {"tf": 3.1622776601683795}, "sqlglot.expressions.select": {"tf": 3}, "sqlglot.expressions.from_": {"tf": 3.3166247903554}, "sqlglot.expressions.update": {"tf": 2.23606797749979}, "sqlglot.expressions.delete": {"tf": 2.23606797749979}, "sqlglot.expressions.condition": {"tf": 3}, "sqlglot.expressions.and_": {"tf": 2.23606797749979}, "sqlglot.expressions.or_": {"tf": 2.23606797749979}, "sqlglot.expressions.not_": {"tf": 2.23606797749979}, "sqlglot.expressions.to_identifier": {"tf": 1.7320508075688772}, "sqlglot.expressions.alias_": {"tf": 2.8284271247461903}, "sqlglot.expressions.subquery": {"tf": 2.449489742783178}, "sqlglot.expressions.cast": {"tf": 1.4142135623730951}, "sqlglot.expressions.values": {"tf": 2}, "sqlglot.expressions.rename_table": {"tf": 2}, "sqlglot.expressions.convert": {"tf": 1}, "sqlglot.expressions.replace_children": {"tf": 1}, "sqlglot.expressions.table_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.replace_tables": {"tf": 1.4142135623730951}, "sqlglot.expressions.replace_placeholders": {"tf": 1.4142135623730951}, "sqlglot.expressions.expand": {"tf": 1.7320508075688772}, "sqlglot.expressions.func": {"tf": 3}, "sqlglot.generator.Generator": {"tf": 4.47213595499958}, "sqlglot.generator.Generator.generate": {"tf": 1.7320508075688772}, "sqlglot.helper.AutoName": {"tf": 1.4142135623730951}, "sqlglot.helper.seq_get": {"tf": 1}, "sqlglot.helper.ensure_list": {"tf": 1.7320508075688772}, "sqlglot.helper.ensure_collection": {"tf": 1.7320508075688772}, "sqlglot.helper.csv": {"tf": 1.7320508075688772}, "sqlglot.helper.subclasses": {"tf": 2.23606797749979}, "sqlglot.helper.apply_index_offset": {"tf": 2.449489742783178}, "sqlglot.helper.camel_to_snake_case": {"tf": 1}, "sqlglot.helper.while_changing": {"tf": 1.7320508075688772}, "sqlglot.helper.tsort": {"tf": 1.4142135623730951}, "sqlglot.helper.csv_reader": {"tf": 1}, "sqlglot.helper.find_new_name": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 2.6457513110645907}, "sqlglot.helper.is_iterable": {"tf": 1.7320508075688772}, "sqlglot.helper.flatten": {"tf": 1}, "sqlglot.helper.count_params": {"tf": 1}, "sqlglot.helper.dict_depth": {"tf": 1}, "sqlglot.helper.first": {"tf": 1}, "sqlglot.lineage.lineage": {"tf": 2.449489742783178}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1.4142135623730951}, "sqlglot.optimizer.canonicalize.canonicalize": {"tf": 1.4142135623730951}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.join_condition": {"tf": 1}, "sqlglot.optimizer.lower_identities.lower_identities": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}, "sqlglot.optimizer.normalize.normalize": {"tf": 1}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 2.449489742783178}, "sqlglot.optimizer.optimizer.optimize": {"tf": 2.449489742783178}, "sqlglot.optimizer.pushdown_predicates.pushdown_cnf": {"tf": 1.4142135623730951}, "sqlglot.optimizer.pushdown_predicates.pushdown_dnf": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.Scope.branch": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find": {"tf": 2.449489742783178}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.replace": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selects": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.Scope.external_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.unqualified_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.join_hints": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.source_columns": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.is_root": {"tf": 1}, "sqlglot.optimizer.scope.Scope.traverse": {"tf": 1}, "sqlglot.optimizer.scope.Scope.ref_count": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.build_scope": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1.7320508075688772}, "sqlglot.parser.Parser": {"tf": 2.6457513110645907}, "sqlglot.parser.Parser.parse": {"tf": 1.7320508075688772}, "sqlglot.parser.Parser.parse_into": {"tf": 2.8284271247461903}, "sqlglot.parser.Parser.check_errors": {"tf": 1}, "sqlglot.parser.Parser.raise_error": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.expression": {"tf": 2.23606797749979}, "sqlglot.parser.Parser.validate_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.Step.from_expression": {"tf": 2.449489742783178}, "sqlglot.planner.Scan.from_expression": {"tf": 2.449489742783178}, "sqlglot.planner.SetOperation.from_expression": {"tf": 2.449489742783178}, "sqlglot.schema.Schema.add_table": {"tf": 1.7320508075688772}, "sqlglot.schema.Schema.column_names": {"tf": 1.7320508075688772}, "sqlglot.schema.Schema.get_column_type": {"tf": 2.23606797749979}, "sqlglot.schema.MappingSchema": {"tf": 2.23606797749979}, "sqlglot.schema.MappingSchema.add_table": {"tf": 2}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1.7320508075688772}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 2.23606797749979}, "sqlglot.time.format_time": {"tf": 1}, "sqlglot.tokens.Tokenizer.tokenize": {"tf": 1}, "sqlglot.transforms.unalias_group": {"tf": 1.4142135623730951}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1.4142135623730951}, "sqlglot.transforms.remove_precision_parameterized_types": {"tf": 1.7320508075688772}, "sqlglot.transforms.preprocess": {"tf": 1.4142135623730951}, "sqlglot.trie.new_trie": {"tf": 2.23606797749979}, "sqlglot.trie.in_trie": {"tf": 2.23606797749979}}, "df": 224, "n": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dataframe": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}, "sqlglot.diff": {"tf": 2.6457513110645907}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.executor.context.Context": {"tf": 1}, "sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.to_table": {"tf": 1}, "sqlglot.expressions.to_column": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1.4142135623730951}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1}}, "df": 16}, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dialects": {"tf": 1}, "sqlglot.diff": {"tf": 3.1622776601683795}, "sqlglot.executor": {"tf": 2}}, "df": 4, "f": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}, "sqlglot.diff": {"tf": 1}}, "df": 2}}}}, "\u2019": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "m": {"docs": {"sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dialects.drill.if_sql": {"tf": 1}, "sqlglot.diff": {"tf": 2.23606797749979}, "sqlglot.helper.subclasses": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}}, "df": 5}, "y": {"docs": {"sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dialects": {"tf": 1}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}, "sqlglot.diff": {"tf": 2}, "sqlglot.executor": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}, "sqlglot.optimizer.optimize_joins.normalize": {"tf": 1}}, "df": 7}, "i": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dialects.redshift.Redshift.Generator.with_properties": {"tf": 1}, "sqlglot.diff": {"tf": 2.449489742783178}, "sqlglot.diff.ChangeDistiller": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1.4142135623730951}, "sqlglot.optimizer.pushdown_predicates.pushdown_dnf": {"tf": 1}, "sqlglot.parser.Parser.expression": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}}, "df": 9}}, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.expressions.Func": {"tf": 1}, "sqlglot.transforms.preprocess": {"tf": 1}}, "df": 4}}}, "a": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 2}, "sqlglot.dataframe": {"tf": 3.1622776601683795}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dialects": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.values_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.snowflake.Snowflake.Generator.select_sql": {"tf": 2.449489742783178}, "sqlglot.diff": {"tf": 5.744562646538029}, "sqlglot.diff.Insert": {"tf": 1}, "sqlglot.diff.Remove": {"tf": 1}, "sqlglot.diff.Move": {"tf": 1}, "sqlglot.diff.Update": {"tf": 1}, "sqlglot.diff.Keep": {"tf": 1}, "sqlglot.executor": {"tf": 3.605551275463989}, "sqlglot.executor.env.null_if_any": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 2.23606797749979}, "sqlglot.expressions.Expression.text": {"tf": 1}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.expressions.Expression.walk": {"tf": 1}, "sqlglot.expressions.Expression.assert_is": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1.7320508075688772}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1.4142135623730951}, "sqlglot.expressions.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.from_": {"tf": 1.4142135623730951}, "sqlglot.expressions.condition": {"tf": 1.4142135623730951}, "sqlglot.expressions.to_table": {"tf": 1}, "sqlglot.expressions.to_column": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1}, "sqlglot.expressions.replace_placeholders": {"tf": 1.4142135623730951}, "sqlglot.helper.ensure_list": {"tf": 1}, "sqlglot.helper.ensure_collection": {"tf": 1}, "sqlglot.helper.apply_index_offset": {"tf": 1}, "sqlglot.helper.tsort": {"tf": 1}, "sqlglot.helper.open_file": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1.7320508075688772}, "sqlglot.helper.flatten": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1.4142135623730951}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_dnf": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.optimizer.scope.Scope.derived_tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.subqueries": {"tf": 1}, "sqlglot.optimizer.scope.Scope.columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.cte_sources": {"tf": 1}, "sqlglot.optimizer.scope.Scope.external_columns": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.join_hints": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.Scope.source_columns": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}, "sqlglot.parser.Parser.validate_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.Step.from_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.Scan.from_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1.4142135623730951}, "sqlglot.schema.Schema.add_table": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}, "sqlglot.transforms.unalias_group": {"tf": 1}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1.4142135623730951}, "sqlglot.transforms.preprocess": {"tf": 1.4142135623730951}, "sqlglot.transforms.delegate": {"tf": 1.4142135623730951}, "sqlglot.trie.new_trie": {"tf": 1}}, "df": 63}, "n": {"docs": {"sqlglot.diff": {"tf": 2.6457513110645907}, "sqlglot.helper.apply_index_offset": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}}, "df": 3, "k": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1.4142135623730951}}, "df": 3}}, "k": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}, "s": {"docs": {"sqlglot": {"tf": 4.58257569495584}, "sqlglot.dataframe": {"tf": 2.449489742783178}, "sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dataframe.sql.Column.cast": {"tf": 1}, "sqlglot.dialects": {"tf": 1.7320508075688772}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.if_sql": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.select_sql": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.diff": {"tf": 6.244997998398398}, "sqlglot.diff.diff": {"tf": 3.3166247903554}, "sqlglot.executor": {"tf": 2.6457513110645907}, "sqlglot.executor.execute": {"tf": 1}, "sqlglot.executor.context.Context": {"tf": 1}, "sqlglot.executor.env.null_if_any": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.executor.table.Tables": {"tf": 1.4142135623730951}, "sqlglot.expressions": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 2.23606797749979}, "sqlglot.expressions.Expression.this": {"tf": 1}, "sqlglot.expressions.Expression.text": {"tf": 1}, "sqlglot.expressions.Expression.output_name": {"tf": 1}, "sqlglot.expressions.Expression.depth": {"tf": 1}, "sqlglot.expressions.Expression.find": {"tf": 1}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.expressions.Expression.walk": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.dfs": {"tf": 1}, "sqlglot.expressions.Expression.bfs": {"tf": 1}, "sqlglot.expressions.Expression.unalias": {"tf": 1}, "sqlglot.expressions.Expression.sql": {"tf": 1}, "sqlglot.expressions.Expression.replace": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.pop": {"tf": 1}, "sqlglot.expressions.Expression.assert_is": {"tf": 2.23606797749979}, "sqlglot.expressions.Expression.error_messages": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.dump": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Condition.not_": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.Column.output_name": {"tf": 1}, "sqlglot.expressions.Identifier.output_name": {"tf": 1}, "sqlglot.expressions.Literal.output_name": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1.4142135623730951}, "sqlglot.expressions.Join.using": {"tf": 1.4142135623730951}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1.4142135623730951}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1.7320508075688772}, "sqlglot.expressions.Union.limit": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.from_": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.group_by": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.order_by": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.sort_by": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.cluster_by": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.limit": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.offset": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.lateral": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.join": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.where": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.having": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.distinct": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.lock": {"tf": 1.4142135623730951}, "sqlglot.expressions.Subquery.output_name": {"tf": 1}, "sqlglot.expressions.Star.output_name": {"tf": 1}, "sqlglot.expressions.Alias.output_name": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1.4142135623730951}, "sqlglot.expressions.Cast.output_name": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1.4142135623730951}, "sqlglot.expressions.union": {"tf": 1}, "sqlglot.expressions.intersect": {"tf": 1}, "sqlglot.expressions.except_": {"tf": 1}, "sqlglot.expressions.select": {"tf": 1}, "sqlglot.expressions.from_": {"tf": 1}, "sqlglot.expressions.condition": {"tf": 1.4142135623730951}, "sqlglot.expressions.and_": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 1}, "sqlglot.expressions.not_": {"tf": 1.7320508075688772}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.expressions.subquery": {"tf": 1}, "sqlglot.expressions.func": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.helper.AutoName": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.optimizer.canonicalize.canonicalize": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 1}, "sqlglot.optimizer.expand_laterals.expand_laterals": {"tf": 1}, "sqlglot.optimizer.lower_identities.lower_identities": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1.4142135623730951}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 3}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.replace": {"tf": 1}, "sqlglot.optimizer.scope.Scope.tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.ctes": {"tf": 1}, "sqlglot.optimizer.scope.Scope.derived_tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.subqueries": {"tf": 1}, "sqlglot.optimizer.scope.Scope.columns": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selects": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_subquery": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_derived_table": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_union": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_cte": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_root": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_udtf": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_correlated_subquery": {"tf": 1}, "sqlglot.optimizer.scope.Scope.rename_source": {"tf": 1}, "sqlglot.optimizer.scope.Scope.add_source": {"tf": 1}, "sqlglot.optimizer.scope.Scope.remove_source": {"tf": 1}, "sqlglot.optimizer.scope.Scope.traverse": {"tf": 1}, "sqlglot.optimizer.scope.Scope.ref_count": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}, "sqlglot.optimizer.simplify.rewrite_between": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}, "sqlglot.schema.Schema.supported_table_args": {"tf": 1.4142135623730951}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1.4142135623730951}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1}, "sqlglot.transforms.remove_precision_parameterized_types": {"tf": 1}, "sqlglot.transforms.delegate": {"tf": 1}}, "df": 165}, "r": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 3, "t": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}}, "df": 4}}}, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.expressions.column_table_names": {"tf": 1}}, "df": 3, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.snowflake.Snowflake.Generator.select_sql": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}}}}}}}, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 2}}, "df": 1}}}}}}}}, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.transpile": {"tf": 2}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.diff": {"tf": 7.280109889280518}, "sqlglot.diff.diff": {"tf": 2.6457513110645907}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.helper.subclasses": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}, "sqlglot.parser.Parser.expression": {"tf": 1}, "sqlglot.schema.Schema.get_column_type": {"tf": 1}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1}, "sqlglot.time.format_time": {"tf": 1}, "sqlglot.trie.in_trie": {"tf": 1}}, "df": 33, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 2}, "sqlglot.dataframe": {"tf": 3.3166247903554}, "sqlglot.dataframe.sql.DataFrame.persist": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator.renametable_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.select_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 2}, "sqlglot.executor.execute": {"tf": 1.7320508075688772}, "sqlglot.executor.python.Python.Generator": {"tf": 1.4142135623730951}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.ctas": {"tf": 2.8284271247461903}, "sqlglot.expressions.update": {"tf": 1.4142135623730951}, "sqlglot.expressions.delete": {"tf": 1.4142135623730951}, "sqlglot.expressions.to_table": {"tf": 2.449489742783178}, "sqlglot.expressions.to_column": {"tf": 1.7320508075688772}, "sqlglot.expressions.alias_": {"tf": 1.7320508075688772}, "sqlglot.expressions.column": {"tf": 1.4142135623730951}, "sqlglot.expressions.table_": {"tf": 2}, "sqlglot.expressions.rename_table": {"tf": 2}, "sqlglot.expressions.column_table_names": {"tf": 1.7320508075688772}, "sqlglot.expressions.table_name": {"tf": 2.6457513110645907}, "sqlglot.expressions.replace_tables": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1.4142135623730951}, "sqlglot.helper.split_num_words": {"tf": 2.449489742783178}, "sqlglot.optimizer.expand_multi_table_selects.expand_multi_table_selects": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1.4142135623730951}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.Scope": {"tf": 2.23606797749979}, "sqlglot.optimizer.scope.Scope.tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.derived_tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_derived_table": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_cte": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_udtf": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 2.23606797749979}, "sqlglot.schema.Schema.column_names": {"tf": 1.7320508075688772}, "sqlglot.schema.Schema.get_column_type": {"tf": 1.4142135623730951}, "sqlglot.schema.Schema.supported_table_args": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 2.449489742783178}, "sqlglot.schema.MappingSchema.add_table": {"tf": 2.23606797749979}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1.7320508075688772}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1.4142135623730951}}, "df": 81, "s": {"docs": {"sqlglot": {"tf": 2.449489742783178}, "sqlglot.dialects.redshift.Redshift.Generator.renametable_sql": {"tf": 1}, "sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1.7320508075688772}, "sqlglot.executor.execute": {"tf": 1.4142135623730951}, "sqlglot.executor.context.Context": {"tf": 1}, "sqlglot.executor.context.Context.__init__": {"tf": 1}, "sqlglot.expressions.replace_tables": {"tf": 1.4142135623730951}, "sqlglot.lineage.lineage": {"tf": 1.4142135623730951}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 2}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope": {"tf": 1}, "sqlglot.optimizer.scope.Scope.tables": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.derived_tables": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1}, "sqlglot.optimizer.scope.Scope.join_hints": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1}}, "df": 22, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.executor.python.Python.Generator": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser": {"tf": 1.4142135623730951}}, "df": 39}}}}}}, "|": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.scope.Scope": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1}}, "df": 2}}}}}, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1}}, "df": 1}}}}}}}, "k": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1.7320508075688772}}, "df": 4, "s": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.transform": {"tf": 1}}, "df": 3}, "n": {"docs": {"sqlglot.helper.find_new_name": {"tf": 1.4142135623730951}}, "df": 1}}}, "s": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}, "g": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.Tag": {"tf": 1}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.executor": {"tf": 1.7320508075688772}, "sqlglot.helper.is_iterable": {"tf": 1}}, "df": 3, "s": {"docs": {"sqlglot": {"tf": 1.7320508075688772}, "sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 2}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}}}}}, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}}}}}}, "x": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dialects": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.tokens.Token.number": {"tf": 1}, "sqlglot.tokens.Token.string": {"tf": 1}, "sqlglot.tokens.Token.identifier": {"tf": 1}, "sqlglot.tokens.Token.var": {"tf": 1}}, "df": 29, "u": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.Expression.text": {"tf": 1}}, "df": 1}}}}}, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "m": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}}, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}, "a": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dataframe": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1.7320508075688772}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 2.449489742783178}, "sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.executor": {"tf": 1.7320508075688772}, "sqlglot.executor.python.Python.Generator": {"tf": 2.449489742783178}, "sqlglot.generator.Generator": {"tf": 2.449489742783178}, "sqlglot.time.format_time": {"tf": 2.23606797749979}}, "df": 27, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}}}, "s": {"docs": {"sqlglot.optimizer.scope.Scope.ref_count": {"tf": 1}}, "df": 1}}}, "n": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dialects": {"tf": 1}}, "df": 2}}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "\u2019": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}, "b": {"docs": {}, "df": 0, "l": {"1": {"docs": {"sqlglot.expressions.Select.join": {"tf": 2}}, "df": 1}, "2": {"docs": {"sqlglot.expressions.Subqueryable.with_": {"tf": 2}, "sqlglot.expressions.Select.lateral": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.join": {"tf": 2.8284271247461903}}, "df": 3}, "docs": {"sqlglot": {"tf": 2.449489742783178}, "sqlglot.expressions.Expression.replace": {"tf": 1.4142135623730951}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1.4142135623730951}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.from_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.group_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.order_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.sort_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.cluster_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.limit": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.offset": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.lateral": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.join": {"tf": 2}, "sqlglot.expressions.Select.where": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.having": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.distinct": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.ctas": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.lock": {"tf": 2}, "sqlglot.expressions.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.from_": {"tf": 1.4142135623730951}, "sqlglot.expressions.condition": {"tf": 1.4142135623730951}, "sqlglot.expressions.subquery": {"tf": 1.4142135623730951}, "sqlglot.expressions.replace_placeholders": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 2}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1.7320508075688772}}, "df": 25}}, "w": {"docs": {}, "df": 0, "o": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}, "sqlglot.diff": {"tf": 5}, "sqlglot.executor": {"tf": 1}}, "df": 5}, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}, "y": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 3.1622776601683795}, "sqlglot.dataframe": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 2.23606797749979}, "sqlglot.dialects": {"tf": 3.1622776601683795}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 2.23606797749979}, "sqlglot.executor": {"tf": 1}, "sqlglot.executor.execute": {"tf": 1.7320508075688772}, "sqlglot.executor.table.Tables": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.find": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.find_all": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.find_ancestor": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.assert_is": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.join": {"tf": 2.23606797749979}, "sqlglot.expressions.Select.lock": {"tf": 1}, "sqlglot.expressions.cast": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1.4142135623730951}, "sqlglot.helper.flatten": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 2.23606797749979}, "sqlglot.optimizer.canonicalize.canonicalize": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.Scope": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.parse_into": {"tf": 1.4142135623730951}, "sqlglot.schema.Schema.get_column_type": {"tf": 1.4142135623730951}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1.7320508075688772}, "sqlglot.schema.MappingSchema": {"tf": 2}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1.4142135623730951}}, "df": 30, "s": {"docs": {"sqlglot.dataframe": {"tf": 5.656854249492381}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dialects": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.expressions": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.find": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.find_all": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.find_ancestor": {"tf": 1.4142135623730951}, "sqlglot.expressions.Func": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1}, "sqlglot.helper.is_iterable": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 2}, "sqlglot.optimizer.canonicalize.canonicalize": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.parse_into": {"tf": 1.4142135623730951}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1}, "sqlglot.transforms.remove_precision_parameterized_types": {"tf": 1.4142135623730951}}, "df": 21}, "o": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}}, "df": 1}}}, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dialects": {"tf": 1}}, "df": 1, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1}}, "df": 2}}}}}}}}, "p": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.executor": {"tf": 1.4142135623730951}}, "df": 1, "h": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.diff.ChangeDistiller": {"tf": 1}, "sqlglot.expressions.to_identifier": {"tf": 1}}, "df": 3}}, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor.python.PythonExecutor.generate_tuple": {"tf": 1}, "sqlglot.expressions.Expression.unnest_operands": {"tf": 1}, "sqlglot.helper.ensure_list": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.join_condition": {"tf": 1}}, "df": 4, "[": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "[": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.optimizer.eliminate_joins.join_condition": {"tf": 1}}, "df": 1}}}}}}}}, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}}, "df": 1}}}}}}}}}, "o": {"docs": {"sqlglot": {"tf": 2}, "sqlglot.diff": {"tf": 2.23606797749979}, "sqlglot.trie.new_trie": {"tf": 1.4142135623730951}}, "df": 3, "p": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1}, "sqlglot.optimizer.expand_laterals.expand_laterals": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}, "sqlglot.optimizer.optimize_joins.optimize_joins": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 1}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 1}}, "df": 11, "r": {"docs": {"sqlglot": {"tf": 2.23606797749979}, "sqlglot.schema": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.lineage.lineage": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1.4142135623730951}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1.7320508075688772}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1}}, "df": 9, "/": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}}}, "d": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 2}, "sqlglot.executor": {"tf": 1}, "sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1}, "sqlglot.optimizer.expand_laterals.expand_laterals": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1.4142135623730951}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 1}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 1}}, "df": 10}, "s": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.optimizer.qualify_columns.validate_qualify_columns": {"tf": 1}}, "df": 1}}}}}}, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}}, "df": 1, "s": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1.4142135623730951}}, "df": 2}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.executor": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}}, "df": 2}}}}}}, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression": {"tf": 1.4142135623730951}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.to_table": {"tf": 1}, "sqlglot.expressions.to_column": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1.4142135623730951}, "sqlglot.optimizer.optimize_joins.normalize": {"tf": 1}, "sqlglot.parser.Parser.expression": {"tf": 1}, "sqlglot.parser.Parser.validate_expression": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1}, "sqlglot.time.format_time": {"tf": 1}}, "df": 12, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1.4142135623730951}}, "df": 2}}, "[": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}}, "df": 1}}}}}}, "s": {"docs": {"sqlglot.parse": {"tf": 1}, "sqlglot.parse_one": {"tf": 1}, "sqlglot.transpile": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.persist": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.expressions.Expression.sql": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1}, "sqlglot.expressions.union": {"tf": 1}, "sqlglot.expressions.intersect": {"tf": 1}, "sqlglot.expressions.except_": {"tf": 1}, "sqlglot.expressions.select": {"tf": 1}, "sqlglot.expressions.from_": {"tf": 1}, "sqlglot.expressions.update": {"tf": 1}, "sqlglot.expressions.delete": {"tf": 1}, "sqlglot.expressions.condition": {"tf": 1}, "sqlglot.expressions.and_": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 1}, "sqlglot.expressions.not_": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.expressions.subquery": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}}, "df": 81}}}}, "s": {"docs": {"sqlglot.parse": {"tf": 1}, "sqlglot.parse_one": {"tf": 1}, "sqlglot.transpile": {"tf": 1}, "sqlglot.expressions.Expression.sql": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1}, "sqlglot.expressions.union": {"tf": 1}, "sqlglot.expressions.intersect": {"tf": 1}, "sqlglot.expressions.except_": {"tf": 1}, "sqlglot.expressions.select": {"tf": 1}, "sqlglot.expressions.from_": {"tf": 1}, "sqlglot.expressions.update": {"tf": 1}, "sqlglot.expressions.delete": {"tf": 1}, "sqlglot.expressions.condition": {"tf": 1}, "sqlglot.expressions.and_": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 1}, "sqlglot.expressions.not_": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.expressions.subquery": {"tf": 1}}, "df": 40}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe": {"tf": 1}}, "df": 1, "s": {"docs": {"sqlglot.dataframe": {"tf": 2.23606797749979}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 3}}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.and_": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 1}, "sqlglot.expressions.not_": {"tf": 1}}, "df": 6, "s": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.Expression.unnest_operands": {"tf": 1}}, "df": 1}}}}}, "n": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.executor": {"tf": 1}, "sqlglot.helper.open_file": {"tf": 1}}, "df": 3}}, "p": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}, "y": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}}}}, "r": {"docs": {"sqlglot": {"tf": 3.605551275463989}, "sqlglot.dataframe": {"tf": 2}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.7320508075688772}, "sqlglot.diff": {"tf": 3.7416573867739413}, "sqlglot.executor": {"tf": 2.6457513110645907}, "sqlglot.executor.execute": {"tf": 1}, "sqlglot.executor.context.Context": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1.7320508075688772}, "sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.text": {"tf": 1}, "sqlglot.expressions.Expression.alias": {"tf": 1}, "sqlglot.expressions.Expression.append": {"tf": 1}, "sqlglot.expressions.Expression.find": {"tf": 1}, "sqlglot.expressions.Expression.unnest": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.expressions.Expression.replace": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 2.23606797749979}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.expressions.Select.where": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 2}, "sqlglot.expressions.select": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 2.449489742783178}, "sqlglot.expressions.to_identifier": {"tf": 1}, "sqlglot.expressions.to_interval": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1.4142135623730951}, "sqlglot.expressions.values": {"tf": 1}, "sqlglot.expressions.table_name": {"tf": 1}, "sqlglot.expressions.expand": {"tf": 1}, "sqlglot.expressions.func": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1.7320508075688772}, "sqlglot.helper.seq_get": {"tf": 1}, "sqlglot.helper.ensure_list": {"tf": 1.7320508075688772}, "sqlglot.helper.ensure_collection": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1}, "sqlglot.lineage.lineage": {"tf": 1}, "sqlglot.optimizer.normalize.normalize": {"tf": 1.7320508075688772}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}, "sqlglot.optimizer.normalize.distributive_law": {"tf": 2.8284271247461903}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1}, "sqlglot.optimizer.simplify.simplify_not": {"tf": 1.4142135623730951}, "sqlglot.optimizer.simplify.flatten": {"tf": 2}, "sqlglot.optimizer.simplify.remove_compliments": {"tf": 1}, "sqlglot.optimizer.simplify.absorb_and_eliminate": {"tf": 2.8284271247461903}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1}, "sqlglot.parser.Parser.check_errors": {"tf": 1}, "sqlglot.parser.Parser.raise_error": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1.4142135623730951}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1.4142135623730951}, "sqlglot.trie.new_trie": {"tf": 1}, "sqlglot.trie.in_trie": {"tf": 1}}, "df": 80, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot": {"tf": 2.6457513110645907}, "sqlglot.diff": {"tf": 2}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.walk": {"tf": 1}, "sqlglot.expressions.Expression.dfs": {"tf": 1}, "sqlglot.expressions.Expression.bfs": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 2.23606797749979}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1}, "sqlglot.expressions.replace_placeholders": {"tf": 1}, "sqlglot.helper.tsort": {"tf": 1.4142135623730951}, "sqlglot.optimizer.optimize_joins.reorder_joins": {"tf": 1}, "sqlglot.optimizer.scope.Scope.traverse": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}, "sqlglot.transforms.preprocess": {"tf": 1}}, "df": 17, "s": {"docs": {"sqlglot": {"tf": 1.4142135623730951}}, "df": 1}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1.4142135623730951}}, "df": 3}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.executor.python.Python.Generator": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser": {"tf": 1.4142135623730951}}, "df": 39}}}}}}, "g": {"docs": {}, "df": 0, "/": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "/": {"3": {"docs": {"sqlglot.dataframe.sql.DataFrame.persist": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}}, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "/": {"4": {"3": {"3": {"9": {"2": {"3": {"0": {"docs": {"sqlglot.diff.ChangeDistiller": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}}}}}}}}}}}, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.helper.apply_index_offset": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_dnf": {"tf": 1}, "sqlglot.parser.Parser.parse": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}}, "df": 5}}}}}}, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Matches": {"tf": 1}}, "df": 1, "/": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Matches": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}, "f": {"docs": {"sqlglot": {"tf": 3}, "sqlglot.parse": {"tf": 1}, "sqlglot.transpile": {"tf": 1.7320508075688772}, "sqlglot.dataframe": {"tf": 3.1622776601683795}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.Column.cast": {"tf": 1}, "sqlglot.dialects": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 2.23606797749979}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 2.23606797749979}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 2.23606797749979}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.if_sql": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 2.23606797749979}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 2.23606797749979}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 2.23606797749979}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 2.23606797749979}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 2.23606797749979}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 2.23606797749979}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 2.23606797749979}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 2.23606797749979}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.redshift.Redshift.Generator.with_properties": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 2.23606797749979}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 2.23606797749979}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 2.23606797749979}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 2.23606797749979}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 2.23606797749979}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 2.23606797749979}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 2.23606797749979}, "sqlglot.diff": {"tf": 10.488088481701515}, "sqlglot.diff.diff": {"tf": 1.7320508075688772}, "sqlglot.diff.ChangeDistiller": {"tf": 1}, "sqlglot.executor": {"tf": 4.69041575982343}, "sqlglot.executor.execute": {"tf": 1.4142135623730951}, "sqlglot.executor.context.Context": {"tf": 1}, "sqlglot.executor.context.Context.__init__": {"tf": 1.4142135623730951}, "sqlglot.executor.env.null_if_any": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.generate_tuple": {"tf": 1.4142135623730951}, "sqlglot.executor.python.Python.Generator": {"tf": 2.23606797749979}, "sqlglot.expressions": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression": {"tf": 2}, "sqlglot.expressions.Expression.text": {"tf": 1}, "sqlglot.expressions.Expression.alias": {"tf": 1}, "sqlglot.expressions.Expression.output_name": {"tf": 1}, "sqlglot.expressions.Expression.copy": {"tf": 1}, "sqlglot.expressions.Expression.append": {"tf": 1}, "sqlglot.expressions.Expression.set": {"tf": 1}, "sqlglot.expressions.Expression.depth": {"tf": 1}, "sqlglot.expressions.Expression.find": {"tf": 1}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.expressions.Expression.walk": {"tf": 1}, "sqlglot.expressions.Expression.sql": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.assert_is": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.error_messages": {"tf": 1.4142135623730951}, "sqlglot.expressions.Column.output_name": {"tf": 1}, "sqlglot.expressions.Identifier.output_name": {"tf": 1}, "sqlglot.expressions.Literal.output_name": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.Subquery.output_name": {"tf": 1}, "sqlglot.expressions.Star.output_name": {"tf": 1}, "sqlglot.expressions.Alias.output_name": {"tf": 1}, "sqlglot.expressions.Cast.output_name": {"tf": 1}, "sqlglot.expressions.select": {"tf": 1}, "sqlglot.expressions.from_": {"tf": 1}, "sqlglot.expressions.update": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1.7320508075688772}, "sqlglot.expressions.rename_table": {"tf": 1.4142135623730951}, "sqlglot.expressions.replace_children": {"tf": 1.4142135623730951}, "sqlglot.expressions.column_table_names": {"tf": 1}, "sqlglot.expressions.table_name": {"tf": 1}, "sqlglot.expressions.replace_tables": {"tf": 1}, "sqlglot.expressions.expand": {"tf": 1}, "sqlglot.expressions.func": {"tf": 2.23606797749979}, "sqlglot.generator.Generator": {"tf": 2.23606797749979}, "sqlglot.helper.AutoName": {"tf": 1}, "sqlglot.helper.seq_get": {"tf": 1}, "sqlglot.helper.ensure_list": {"tf": 1}, "sqlglot.helper.ensure_collection": {"tf": 1}, "sqlglot.helper.csv": {"tf": 1}, "sqlglot.helper.subclasses": {"tf": 2}, "sqlglot.helper.tsort": {"tf": 1}, "sqlglot.helper.find_new_name": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 2}, "sqlglot.helper.flatten": {"tf": 1}, "sqlglot.helper.count_params": {"tf": 1.4142135623730951}, "sqlglot.helper.dict_depth": {"tf": 1}, "sqlglot.lineage.lineage": {"tf": 2}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1.4142135623730951}, "sqlglot.optimizer.canonicalize.canonicalize": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.join_condition": {"tf": 1}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1.7320508075688772}, "sqlglot.optimizer.optimizer.optimize": {"tf": 2.23606797749979}, "sqlglot.optimizer.scope.Scope": {"tf": 3.1622776601683795}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.optimizer.scope.Scope.replace": {"tf": 1}, "sqlglot.optimizer.scope.Scope.tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.ctes": {"tf": 1}, "sqlglot.optimizer.scope.Scope.derived_tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.subqueries": {"tf": 1}, "sqlglot.optimizer.scope.Scope.columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1}, "sqlglot.optimizer.scope.Scope.cte_sources": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selects": {"tf": 1}, "sqlglot.optimizer.scope.Scope.source_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.ref_count": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 2.23606797749979}, "sqlglot.parser.Parser.parse": {"tf": 2}, "sqlglot.parser.Parser.parse_into": {"tf": 2}, "sqlglot.parser.Parser.raise_error": {"tf": 1}, "sqlglot.parser.Parser.expression": {"tf": 1}, "sqlglot.parser.Parser.validate_expression": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1}, "sqlglot.schema.Schema.column_names": {"tf": 1}, "sqlglot.schema.Schema.get_column_type": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1.7320508075688772}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1}, "sqlglot.time.format_time": {"tf": 1}, "sqlglot.tokens.Tokenizer.tokenize": {"tf": 1}, "sqlglot.transforms.preprocess": {"tf": 1.4142135623730951}, "sqlglot.trie.new_trie": {"tf": 1.7320508075688772}, "sqlglot.trie.in_trie": {"tf": 1}}, "df": 150, "f": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.offset": {"tf": 2.23606797749979}, "sqlglot.expressions.Select.distinct": {"tf": 1}, "sqlglot.helper.apply_index_offset": {"tf": 2.23606797749979}, "sqlglot.parser.Parser": {"tf": 1.4142135623730951}}, "df": 21}}}}, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "u": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 2.23606797749979}, "sqlglot.executor": {"tf": 1}, "sqlglot.expressions.Expression.replace": {"tf": 1}, "sqlglot.helper.seq_get": {"tf": 1}, "sqlglot.trie.new_trie": {"tf": 1}}, "df": 5, "p": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.dataframe": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.7320508075688772}, "sqlglot.diff": {"tf": 2.23606797749979}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.executor.python.Python.Generator": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.output_name": {"tf": 2.23606797749979}, "sqlglot.expressions.Expression.sql": {"tf": 1}, "sqlglot.expressions.Column.output_name": {"tf": 2.23606797749979}, "sqlglot.expressions.Identifier.output_name": {"tf": 2.23606797749979}, "sqlglot.expressions.Literal.output_name": {"tf": 2.23606797749979}, "sqlglot.expressions.Subquery.output_name": {"tf": 2.23606797749979}, "sqlglot.expressions.Star.output_name": {"tf": 2.23606797749979}, "sqlglot.expressions.Alias.output_name": {"tf": 2.23606797749979}, "sqlglot.expressions.Cast.output_name": {"tf": 2.23606797749979}, "sqlglot.generator.Generator": {"tf": 1.7320508075688772}}, "df": 34, "s": {"docs": {"sqlglot.optimizer.scope.Scope.selects": {"tf": 1}}, "df": 1}}}}, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Select.lateral": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.join": {"tf": 1.4142135623730951}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1.4142135623730951}, "sqlglot.optimizer.optimize_joins.normalize": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.Scope.external_columns": {"tf": 1}}, "df": 6}}}, "r": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 2.449489742783178}, "sqlglot.executor": {"tf": 1}}, "df": 3}}, "n": {"docs": {"sqlglot": {"tf": 2.8284271247461903}, "sqlglot.dataframe": {"tf": 1.7320508075688772}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.diff": {"tf": 4.795831523312719}, "sqlglot.diff.ChangeDistiller": {"tf": 1}, "sqlglot.errors.ErrorLevel.IMMEDIATE": {"tf": 1}, "sqlglot.executor": {"tf": 3}, "sqlglot.executor.context.Context": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.join": {"tf": 2.449489742783178}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1.4142135623730951}, "sqlglot.optimizer.canonicalize.canonicalize": {"tf": 1.4142135623730951}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1}, "sqlglot.optimizer.optimize_joins.optimize_joins": {"tf": 2}, "sqlglot.optimizer.optimize_joins.reorder_joins": {"tf": 1}, "sqlglot.optimizer.simplify.rewrite_between": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1}, "sqlglot.parser.Parser.check_errors": {"tf": 1}, "sqlglot.parser.Parser.raise_error": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.Scan.from_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1.4142135623730951}, "sqlglot.schema.MappingSchema": {"tf": 1}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1.4142135623730951}}, "df": 44, "e": {"docs": {"sqlglot": {"tf": 4.242640687119285}, "sqlglot.parse": {"tf": 1}, "sqlglot.dataframe": {"tf": 1}, "sqlglot.dialects.drill.if_sql": {"tf": 1}, "sqlglot.diff": {"tf": 3.3166247903554}, "sqlglot.diff.diff": {"tf": 1.7320508075688772}, "sqlglot.executor": {"tf": 1}, "sqlglot.executor.execute": {"tf": 1}, "sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.expressions.Expression.output_name": {"tf": 2}, "sqlglot.expressions.Expression.find": {"tf": 1}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.expressions.Expression.assert_is": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.Column.output_name": {"tf": 2}, "sqlglot.expressions.Identifier.output_name": {"tf": 2}, "sqlglot.expressions.Literal.output_name": {"tf": 2}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Subquery.output_name": {"tf": 2}, "sqlglot.expressions.Star.output_name": {"tf": 2}, "sqlglot.expressions.Alias.output_name": {"tf": 2}, "sqlglot.expressions.Cast.output_name": {"tf": 2}, "sqlglot.expressions.union": {"tf": 1}, "sqlglot.expressions.intersect": {"tf": 1}, "sqlglot.expressions.except_": {"tf": 1}, "sqlglot.expressions.select": {"tf": 1}, "sqlglot.expressions.column_table_names": {"tf": 1}, "sqlglot.expressions.table_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.replace_tables": {"tf": 1.4142135623730951}, "sqlglot.expressions.replace_placeholders": {"tf": 1.4142135623730951}, "sqlglot.expressions.expand": {"tf": 1.7320508075688772}, "sqlglot.helper.ensure_list": {"tf": 1}, "sqlglot.helper.apply_index_offset": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 1.4142135623730951}, "sqlglot.optimizer.expand_laterals.expand_laterals": {"tf": 1}, "sqlglot.optimizer.expand_multi_table_selects.expand_multi_table_selects": {"tf": 1.4142135623730951}, "sqlglot.optimizer.lower_identities.lower_identities": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1.4142135623730951}, "sqlglot.optimizer.normalize.normalize": {"tf": 1}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}, "sqlglot.optimizer.optimize_joins.optimize_joins": {"tf": 1.4142135623730951}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 1}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 1}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}, "sqlglot.optimizer.simplify.simplify": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1}, "sqlglot.parser.Parser.parse": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1}, "sqlglot.transforms.unalias_group": {"tf": 1}}, "df": 64, "s": {"docs": {"sqlglot.diff": {"tf": 2}, "sqlglot.expressions.Expression.transform": {"tf": 1}}, "df": 2}}, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.dataframe.sql.Column.cast": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 2}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 2}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 2}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 2}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 2}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 2}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 2}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 2}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 2}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 2}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 2}, "sqlglot.dialects.redshift.Redshift.Generator.renametable_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 2}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 2}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 2}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 2}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 2}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 2}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 2}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 2}, "sqlglot.diff": {"tf": 2.449489742783178}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.executor.python.Python.Generator": {"tf": 2}, "sqlglot.expressions.Expression.text": {"tf": 1}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.union": {"tf": 1}, "sqlglot.expressions.intersect": {"tf": 1}, "sqlglot.expressions.except_": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 2}, "sqlglot.lineage.lineage": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_dnf": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1}, "sqlglot.optimizer.simplify.rewrite_between": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}, "sqlglot.schema.Schema.column_names": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}, "sqlglot.transforms.remove_precision_parameterized_types": {"tf": 1}}, "df": 63}}, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 2}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}}, "df": 3}}, "t": {"docs": {}, "df": 0, "o": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}, "b": {"docs": {}, "df": 0, "j": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.find_all": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.walk": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.dfs": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.bfs": {"tf": 1.4142135623730951}, "sqlglot.expressions.values": {"tf": 1}, "sqlglot.expressions.convert": {"tf": 1.7320508075688772}, "sqlglot.helper.object_to_dict": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}}, "df": 11, "s": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.diff.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1}, "sqlglot.expressions.update": {"tf": 1}, "sqlglot.helper.flatten": {"tf": 1}}, "df": 6}}}}}, "v": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.parse": {"tf": 1}, "sqlglot.parse_one": {"tf": 1}, "sqlglot.transpile": {"tf": 1}, "sqlglot.dataframe": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1}, "sqlglot.expressions.Expression.sql": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1}, "sqlglot.expressions.union": {"tf": 1}, "sqlglot.expressions.intersect": {"tf": 1}, "sqlglot.expressions.except_": {"tf": 1}, "sqlglot.expressions.select": {"tf": 1}, "sqlglot.expressions.from_": {"tf": 1}, "sqlglot.expressions.update": {"tf": 1}, "sqlglot.expressions.delete": {"tf": 1}, "sqlglot.expressions.condition": {"tf": 1}, "sqlglot.expressions.and_": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 1}, "sqlglot.expressions.not_": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.expressions.subquery": {"tf": 1}, "sqlglot.transforms.remove_precision_parameterized_types": {"tf": 1}}, "df": 44, "w": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Expression.walk": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.expressions.Expression.assert_is": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.helper.ensure_list": {"tf": 1}, "sqlglot.helper.ensure_collection": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}}, "df": 19}}}}}}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1.7320508075688772}}, "df": 5, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}}, "df": 1, "p": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "/": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}}}}}}}}}, "s": {"docs": {"sqlglot.dataframe": {"tf": 2}}, "df": 1}, "w": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dialects": {"tf": 1}, "sqlglot.executor": {"tf": 1.4142135623730951}}, "df": 2}}, "l": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.rename_table": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.replace": {"tf": 1.7320508075688772}}, "df": 2}}}, "e": {"docs": {"sqlglot": {"tf": 2}, "sqlglot.schema": {"tf": 1}, "sqlglot.dialects": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 3.1622776601683795}, "sqlglot.executor": {"tf": 1}, "sqlglot.executor.env.null_if_any": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.expressions.column_table_names": {"tf": 1}, "sqlglot.schema.Schema.supported_table_args": {"tf": 1}}, "df": 10, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 2}, "sqlglot.dataframe": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 4}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}}, "df": 6, "s": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.dialects": {"tf": 1}, "sqlglot.executor": {"tf": 2}}, "df": 3}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}}}, "d": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.7320508075688772}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator": {"tf": 1.7320508075688772}, "sqlglot.helper.split_num_words": {"tf": 1}}, "df": 24, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.executor.python.Python.Generator": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator": {"tf": 1.4142135623730951}}, "df": 21}}}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.executor": {"tf": 1.4142135623730951}}, "df": 1}}}, "v": {"docs": {"sqlglot.executor.context.Context.__init__": {"tf": 1}}, "df": 1, "i": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe": {"tf": 1.7320508075688772}}, "df": 1}}}}}, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dialects": {"tf": 1}}, "df": 1}}}}, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}}}}}, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}}, "df": 21}}}}}}}, "a": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.Expression": {"tf": 1}}, "df": 1}}}}}}}}}}, "u": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot.helper.AutoName": {"tf": 1}}, "df": 1, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dialects.dialect.Dialects": {"tf": 1}, "sqlglot.errors.ErrorLevel": {"tf": 1}, "sqlglot.expressions.Properties.Location": {"tf": 1}, "sqlglot.expressions.DataType.Type": {"tf": 1}, "sqlglot.optimizer.scope.ScopeType": {"tf": 1}, "sqlglot.tokens.TokenType": {"tf": 1}}, "df": 6}}}}}}}}}, "s": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.optimizer.scope.Scope.replace": {"tf": 1}}, "df": 2, "s": {"docs": {"sqlglot.helper.ensure_list": {"tf": 1}, "sqlglot.helper.ensure_collection": {"tf": 1}}, "df": 2}}}}}, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Expression": {"tf": 1}}, "df": 1}}}}}, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot": {"tf": 1.7320508075688772}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 3}}, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.executor": {"tf": 1.7320508075688772}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}}, "df": 4}}}, "y": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}, "e": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}, "c": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.transpile": {"tf": 1}, "sqlglot.dialects": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 2.449489742783178}, "sqlglot.diff.diff": {"tf": 1}, "sqlglot.executor": {"tf": 2.8284271247461903}, "sqlglot.expressions.Expression": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_cnf": {"tf": 1}, "sqlglot.optimizer.scope.Scope.ref_count": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}}, "df": 11}}, "r": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}}}}, "x": {"docs": {"sqlglot.dataframe": {"tf": 2.23606797749979}}, "df": 1, "p": {"docs": {"sqlglot": {"tf": 4.123105625617661}, "sqlglot.dialects": {"tf": 3.3166247903554}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}, "sqlglot.expressions.replace_children": {"tf": 1}, "sqlglot.expressions.table_name": {"tf": 1.7320508075688772}, "sqlglot.expressions.replace_tables": {"tf": 1}, "sqlglot.expressions.replace_placeholders": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.join_condition": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.optimizer.scope.Scope.replace": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}, "sqlglot.optimizer.scope.build_scope": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}, "sqlglot.parser.Parser.expression": {"tf": 1}, "sqlglot.schema.Schema.get_column_type": {"tf": 1}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1}}, "df": 19, "r": {"docs": {"sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1.4142135623730951}}, "df": 1, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot": {"tf": 3.4641016151377544}, "sqlglot.parse_one": {"tf": 1}, "sqlglot.dialects": {"tf": 1}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.if_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator.select_sql": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 2.23606797749979}, "sqlglot.diff.diff": {"tf": 2.449489742783178}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.executor.python.PythonExecutor.generate": {"tf": 1}, "sqlglot.expressions": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression": {"tf": 3.7416573867739413}, "sqlglot.expressions.Expression.expression": {"tf": 1}, "sqlglot.expressions.Expression.text": {"tf": 1}, "sqlglot.expressions.Expression.is_string": {"tf": 1}, "sqlglot.expressions.Expression.is_number": {"tf": 1}, "sqlglot.expressions.Expression.is_int": {"tf": 1}, "sqlglot.expressions.Expression.alias": {"tf": 1}, "sqlglot.expressions.Expression.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.copy": {"tf": 1}, "sqlglot.expressions.Expression.append": {"tf": 1}, "sqlglot.expressions.Expression.set": {"tf": 1}, "sqlglot.expressions.Expression.find": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.find_all": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.find_ancestor": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.unalias": {"tf": 1}, "sqlglot.expressions.Expression.replace": {"tf": 2}, "sqlglot.expressions.Expression.pop": {"tf": 1}, "sqlglot.expressions.Expression.assert_is": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.error_messages": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.dump": {"tf": 1}, "sqlglot.expressions.Expression.load": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.and_": {"tf": 1.7320508075688772}, "sqlglot.expressions.Condition.or_": {"tf": 1.7320508075688772}, "sqlglot.expressions.Unionable.union": {"tf": 2.449489742783178}, "sqlglot.expressions.Unionable.intersect": {"tf": 2.449489742783178}, "sqlglot.expressions.Unionable.except_": {"tf": 2.449489742783178}, "sqlglot.expressions.Column.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.Identifier.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.Literal.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.Join.on": {"tf": 2.449489742783178}, "sqlglot.expressions.Join.using": {"tf": 2.23606797749979}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1.7320508075688772}, "sqlglot.expressions.Subqueryable.with_": {"tf": 3}, "sqlglot.expressions.Union.limit": {"tf": 2.449489742783178}, "sqlglot.expressions.Select.from_": {"tf": 2.8284271247461903}, "sqlglot.expressions.Select.group_by": {"tf": 3}, "sqlglot.expressions.Select.order_by": {"tf": 2.8284271247461903}, "sqlglot.expressions.Select.sort_by": {"tf": 2.8284271247461903}, "sqlglot.expressions.Select.cluster_by": {"tf": 2.8284271247461903}, "sqlglot.expressions.Select.limit": {"tf": 2.6457513110645907}, "sqlglot.expressions.Select.offset": {"tf": 2.6457513110645907}, "sqlglot.expressions.Select.select": {"tf": 2}, "sqlglot.expressions.Select.lateral": {"tf": 2}, "sqlglot.expressions.Select.join": {"tf": 3}, "sqlglot.expressions.Select.where": {"tf": 2.449489742783178}, "sqlglot.expressions.Select.having": {"tf": 2.449489742783178}, "sqlglot.expressions.Select.distinct": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.ctas": {"tf": 2.23606797749979}, "sqlglot.expressions.Select.lock": {"tf": 1.7320508075688772}, "sqlglot.expressions.Subquery.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.Star.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.Alias.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.Func": {"tf": 1.4142135623730951}, "sqlglot.expressions.Cast.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.maybe_parse": {"tf": 2.8284271247461903}, "sqlglot.expressions.union": {"tf": 2.6457513110645907}, "sqlglot.expressions.intersect": {"tf": 2.6457513110645907}, "sqlglot.expressions.except_": {"tf": 2.449489742783178}, "sqlglot.expressions.select": {"tf": 2}, "sqlglot.expressions.from_": {"tf": 2.449489742783178}, "sqlglot.expressions.condition": {"tf": 2.8284271247461903}, "sqlglot.expressions.and_": {"tf": 1.7320508075688772}, "sqlglot.expressions.or_": {"tf": 1.7320508075688772}, "sqlglot.expressions.not_": {"tf": 2}, "sqlglot.expressions.to_interval": {"tf": 1}, "sqlglot.expressions.to_table": {"tf": 1.4142135623730951}, "sqlglot.expressions.to_column": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 2.23606797749979}, "sqlglot.expressions.subquery": {"tf": 2.6457513110645907}, "sqlglot.expressions.column": {"tf": 1.4142135623730951}, "sqlglot.expressions.cast": {"tf": 1.7320508075688772}, "sqlglot.expressions.table_": {"tf": 1.7320508075688772}, "sqlglot.expressions.values": {"tf": 1}, "sqlglot.expressions.rename_table": {"tf": 1.4142135623730951}, "sqlglot.expressions.convert": {"tf": 1.7320508075688772}, "sqlglot.expressions.replace_children": {"tf": 1}, "sqlglot.expressions.column_table_names": {"tf": 2}, "sqlglot.expressions.table_name": {"tf": 1}, "sqlglot.expressions.replace_tables": {"tf": 2.23606797749979}, "sqlglot.expressions.replace_placeholders": {"tf": 2.23606797749979}, "sqlglot.expressions.expand": {"tf": 2.23606797749979}, "sqlglot.expressions.func": {"tf": 1}, "sqlglot.expressions.true": {"tf": 1}, "sqlglot.expressions.false": {"tf": 1}, "sqlglot.expressions.null": {"tf": 1}, "sqlglot.generator.Generator.generate": {"tf": 1}, "sqlglot.helper.apply_index_offset": {"tf": 1.7320508075688772}, "sqlglot.helper.while_changing": {"tf": 2}, "sqlglot.helper.csv_reader": {"tf": 1}, "sqlglot.lineage.lineage": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 2.8284271247461903}, "sqlglot.optimizer.canonicalize.canonicalize": {"tf": 1.7320508075688772}, "sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"tf": 2.8284271247461903}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 2.8284271247461903}, "sqlglot.optimizer.eliminate_joins.join_condition": {"tf": 1.4142135623730951}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 3}, "sqlglot.optimizer.expand_laterals.expand_laterals": {"tf": 2.23606797749979}, "sqlglot.optimizer.lower_identities.lower_identities": {"tf": 2.6457513110645907}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 3}, "sqlglot.optimizer.normalize.normalize": {"tf": 2.6457513110645907}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 2.449489742783178}, "sqlglot.optimizer.optimizer.optimize": {"tf": 2.23606797749979}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 2.6457513110645907}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 2.6457513110645907}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 2.6457513110645907}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 2.6457513110645907}, "sqlglot.optimizer.scope.Scope": {"tf": 2}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 2}, "sqlglot.optimizer.scope.Scope.replace": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.Scope.selects": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.is_cte": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 3}, "sqlglot.optimizer.scope.build_scope": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 2}, "sqlglot.optimizer.simplify.simplify": {"tf": 2.6457513110645907}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 2.6457513110645907}, "sqlglot.parser.Parser.parse_into": {"tf": 2.23606797749979}, "sqlglot.parser.Parser.expression": {"tf": 2.23606797749979}, "sqlglot.parser.Parser.validate_expression": {"tf": 2}, "sqlglot.planner.Step.from_expression": {"tf": 2.6457513110645907}, "sqlglot.planner.Scan.from_expression": {"tf": 2.6457513110645907}, "sqlglot.planner.SetOperation.from_expression": {"tf": 2.6457513110645907}, "sqlglot.schema.Schema.add_table": {"tf": 1}, "sqlglot.schema.Schema.column_names": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}, "sqlglot.transforms.unalias_group": {"tf": 1.7320508075688772}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1.7320508075688772}, "sqlglot.transforms.preprocess": {"tf": 1.4142135623730951}}, "df": 142, "s": {"docs": {"sqlglot": {"tf": 2.6457513110645907}, "sqlglot.dialects": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.diff.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.executor.context.Context": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.generate_tuple": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.expressions": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression": {"tf": 2.23606797749979}, "sqlglot.expressions.Expression.expressions": {"tf": 1}, "sqlglot.expressions.Expression.output_name": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.replace": {"tf": 1}, "sqlglot.expressions.Expression.assert_is": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1.7320508075688772}, "sqlglot.expressions.Condition.or_": {"tf": 1.7320508075688772}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.Column.output_name": {"tf": 1.7320508075688772}, "sqlglot.expressions.Identifier.output_name": {"tf": 1.7320508075688772}, "sqlglot.expressions.Literal.output_name": {"tf": 1.7320508075688772}, "sqlglot.expressions.Join.on": {"tf": 2.449489742783178}, "sqlglot.expressions.Join.using": {"tf": 2.23606797749979}, "sqlglot.expressions.Subqueryable.with_": {"tf": 2}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.group_by": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.order_by": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.sort_by": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.cluster_by": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 2.449489742783178}, "sqlglot.expressions.Select.lateral": {"tf": 2.449489742783178}, "sqlglot.expressions.Select.join": {"tf": 2.23606797749979}, "sqlglot.expressions.Select.where": {"tf": 2.449489742783178}, "sqlglot.expressions.Select.having": {"tf": 2.449489742783178}, "sqlglot.expressions.Subquery.output_name": {"tf": 1.7320508075688772}, "sqlglot.expressions.Star.output_name": {"tf": 1.7320508075688772}, "sqlglot.expressions.Alias.output_name": {"tf": 1.7320508075688772}, "sqlglot.expressions.Func": {"tf": 1}, "sqlglot.expressions.Cast.output_name": {"tf": 1.7320508075688772}, "sqlglot.expressions.maybe_parse": {"tf": 1.4142135623730951}, "sqlglot.expressions.union": {"tf": 1}, "sqlglot.expressions.intersect": {"tf": 1}, "sqlglot.expressions.except_": {"tf": 1}, "sqlglot.expressions.select": {"tf": 2.23606797749979}, "sqlglot.expressions.from_": {"tf": 1.7320508075688772}, "sqlglot.expressions.update": {"tf": 1.4142135623730951}, "sqlglot.expressions.delete": {"tf": 1.4142135623730951}, "sqlglot.expressions.condition": {"tf": 1}, "sqlglot.expressions.and_": {"tf": 1.4142135623730951}, "sqlglot.expressions.or_": {"tf": 1.4142135623730951}, "sqlglot.expressions.not_": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.expressions.subquery": {"tf": 1}, "sqlglot.expressions.func": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.helper.apply_index_offset": {"tf": 1.7320508075688772}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.optimizer.expand_multi_table_selects.expand_multi_table_selects": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selects": {"tf": 1.7320508075688772}, "sqlglot.optimizer.simplify.simplify": {"tf": 1}, "sqlglot.transforms.remove_precision_parameterized_types": {"tf": 1.4142135623730951}}, "df": 85}, "|": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Expression.replace": {"tf": 1}}, "df": 1}}}}}}}}}}}}, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 2}}, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Select.lateral": {"tf": 1.4142135623730951}}, "df": 1}}}, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}}, "df": 39}}}}}}}, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot": {"tf": 1.4142135623730951}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.values": {"tf": 1}, "sqlglot.helper.count_params": {"tf": 1}}, "df": 2}}}}, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1}}, "df": 2}}}}, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}}}}}}, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.expressions.expand": {"tf": 1.4142135623730951}, "sqlglot.optimizer.expand_laterals.expand_laterals": {"tf": 1.4142135623730951}, "sqlglot.optimizer.expand_multi_table_selects.expand_multi_table_selects": {"tf": 1}}, "df": 4, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.expressions.expand": {"tf": 1}}, "df": 1}}}}}}, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions": {"tf": 1}}, "df": 1}}}, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}}, "df": 1}}}}}}}}}, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 2}, "sqlglot.dataframe": {"tf": 1.4142135623730951}, "sqlglot.dialects": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 2.23606797749979}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.expressions.Expression.output_name": {"tf": 1}, "sqlglot.expressions.Expression.replace": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Condition.not_": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.Column.output_name": {"tf": 1}, "sqlglot.expressions.Identifier.output_name": {"tf": 1}, "sqlglot.expressions.Literal.output_name": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.Select.distinct": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.Subquery.output_name": {"tf": 1}, "sqlglot.expressions.Star.output_name": {"tf": 1}, "sqlglot.expressions.Alias.output_name": {"tf": 1}, "sqlglot.expressions.Cast.output_name": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1}, "sqlglot.expressions.union": {"tf": 1}, "sqlglot.expressions.intersect": {"tf": 1}, "sqlglot.expressions.except_": {"tf": 1}, "sqlglot.expressions.select": {"tf": 1}, "sqlglot.expressions.from_": {"tf": 1}, "sqlglot.expressions.update": {"tf": 1}, "sqlglot.expressions.delete": {"tf": 1}, "sqlglot.expressions.condition": {"tf": 1}, "sqlglot.expressions.and_": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 1}, "sqlglot.expressions.not_": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.expressions.subquery": {"tf": 1}, "sqlglot.expressions.cast": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1}, "sqlglot.expressions.column_table_names": {"tf": 1}, "sqlglot.helper.dict_depth": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 1}, "sqlglot.optimizer.expand_laterals.expand_laterals": {"tf": 1}, "sqlglot.optimizer.expand_multi_table_selects.expand_multi_table_selects": {"tf": 1}, "sqlglot.optimizer.lower_identities.lower_identities": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}, "sqlglot.optimizer.normalize.normalize": {"tf": 1}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}, "sqlglot.optimizer.optimize_joins.optimize_joins": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 1}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 1}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.derived_tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.subqueries": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selects": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}, "sqlglot.optimizer.simplify.simplify": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1}, "sqlglot.transforms.unalias_group": {"tf": 1}, "sqlglot.trie.new_trie": {"tf": 1}}, "df": 87, "s": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.dataframe": {"tf": 1.4142135623730951}, "sqlglot.diff.diff": {"tf": 1}, "sqlglot.expressions.Expression.assert_is": {"tf": 1}, "sqlglot.expressions.Select.lock": {"tf": 1}, "sqlglot.expressions.table_name": {"tf": 1}, "sqlglot.expressions.replace_tables": {"tf": 1}, "sqlglot.expressions.replace_placeholders": {"tf": 1}, "sqlglot.expressions.expand": {"tf": 1}, "sqlglot.expressions.func": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1}, "sqlglot.helper.is_iterable": {"tf": 1}, "sqlglot.helper.flatten": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}, "sqlglot.time.format_time": {"tf": 1}, "sqlglot.trie.in_trie": {"tf": 1}}, "df": 16}}}}}, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot": {"tf": 1.7320508075688772}, "sqlglot.executor": {"tf": 2.8284271247461903}, "sqlglot.executor.context.Context": {"tf": 1}, "sqlglot.executor.context.Context.__init__": {"tf": 1.4142135623730951}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}}, "df": 7}}, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.dataframe": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1.4142135623730951}}, "df": 2}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}, "e": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.dataframe": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}}, "df": 6, "s": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}, "d": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}}, "df": 2}}}}}}, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 2.23606797749979}, "sqlglot.expressions.except_": {"tf": 2.23606797749979}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1}}, "df": 5, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.errors.ErrorLevel.RAISE": {"tf": 1}, "sqlglot.errors.ErrorLevel.IMMEDIATE": {"tf": 1}}, "df": 4, "s": {"docs": {"sqlglot.errors.SqlglotError": {"tf": 1}, "sqlglot.errors.UnsupportedError": {"tf": 1}, "sqlglot.errors.ParseError": {"tf": 1}, "sqlglot.errors.TokenError": {"tf": 1}, "sqlglot.errors.OptimizeError": {"tf": 1}, "sqlglot.errors.SchemaError": {"tf": 1}, "sqlglot.errors.ExecuteError": {"tf": 1}}, "df": 7}}}}}}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1}}, "df": 2}}}, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.helper.subclasses": {"tf": 1.4142135623730951}}, "df": 2}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.helper.ensure_collection": {"tf": 1}, "sqlglot.helper.subclasses": {"tf": 1}, "sqlglot.helper.is_iterable": {"tf": 1}}, "df": 4}}}}, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.expressions.func": {"tf": 1}}, "df": 2}}}}}}}, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.diff": {"tf": 1}}, "df": 2, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dataframe": {"tf": 1.4142135623730951}}, "df": 1}}, "s": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.dataframe": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}}}, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects": {"tf": 1}}, "df": 1}}}, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}}, "df": 1}}, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.dialects": {"tf": 1}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1.4142135623730951}}, "df": 2}}}}}, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.optimizer.eliminate_joins.join_condition": {"tf": 1}}, "df": 1, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}}, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1}, "sqlglot.optimizer.scope.Scope.join_hints": {"tf": 1}}, "df": 3, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff.Remove": {"tf": 1}, "sqlglot.diff.Move": {"tf": 1}, "sqlglot.diff.Update": {"tf": 1}, "sqlglot.diff.Keep": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.func": {"tf": 1}, "sqlglot.transforms.delegate": {"tf": 1}}, "df": 19}}}}}, "t": {"docs": {"sqlglot.errors.SqlglotError": {"tf": 1}, "sqlglot.errors.UnsupportedError": {"tf": 1}, "sqlglot.errors.ParseError": {"tf": 1}, "sqlglot.errors.TokenError": {"tf": 1}, "sqlglot.errors.OptimizeError": {"tf": 1}, "sqlglot.errors.SchemaError": {"tf": 1}, "sqlglot.errors.ExecuteError": {"tf": 1}}, "df": 7}}}, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.transpile": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 2.449489742783178}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 2.449489742783178}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 2.449489742783178}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 2.449489742783178}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 2.449489742783178}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 2.449489742783178}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 2.449489742783178}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 2.449489742783178}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 2.449489742783178}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 2.449489742783178}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 2.449489742783178}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 2.449489742783178}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 2.449489742783178}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 2.449489742783178}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 2.449489742783178}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 2.449489742783178}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 2.449489742783178}, "sqlglot.errors.ErrorLevel.IMMEDIATE": {"tf": 1}, "sqlglot.executor": {"tf": 1}, "sqlglot.expressions.Expression.assert_is": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.expressions.convert": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 2.449489742783178}, "sqlglot.parser.Parser.check_errors": {"tf": 1}, "sqlglot.parser.Parser.raise_error": {"tf": 1.4142135623730951}}, "df": 27, "s": {"docs": {"sqlglot": {"tf": 3.1622776601683795}, "sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.errors.ErrorLevel.IGNORE": {"tf": 1}, "sqlglot.errors.ErrorLevel.WARN": {"tf": 1}, "sqlglot.errors.ErrorLevel.RAISE": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}, "sqlglot.parser.Parser.check_errors": {"tf": 1}, "sqlglot.parser.Parser.raise_error": {"tf": 1}}, "df": 27}, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.7320508075688772}, "sqlglot.executor.python.Python.Generator": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator": {"tf": 1.7320508075688772}, "sqlglot.parser.Parser": {"tf": 1.4142135623730951}}, "df": 39}}}}}}}}}, "p": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1.4142135623730951}}, "df": 3, "t": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions": {"tf": 1}}, "df": 1, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}}, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.executor.context.Context": {"tf": 1}}, "df": 2, "u": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.executor.context.Context": {"tf": 1}}, "df": 2, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "s": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.executor.context.Context": {"tf": 1}}, "df": 2}}, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.executor.context.Context": {"tf": 1}}, "df": 1}}}}}}}}, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}, "f": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}}}, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.executor.context.Context": {"tf": 1}}, "df": 1, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}}}}, "g": {"docs": {"sqlglot.parse": {"tf": 1}, "sqlglot.parse_one": {"tf": 1}, "sqlglot.transpile": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.executor.execute": {"tf": 1}, "sqlglot.expressions.Expression.sql": {"tf": 1}, "sqlglot.expressions.update": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}}, "df": 24}, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe": {"tf": 4.123105625617661}}, "df": 1, "s": {"docs": {"sqlglot.dataframe": {"tf": 3}}, "df": 1}}}}}}, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.Expression.alias": {"tf": 1}, "sqlglot.expressions.Expression.output_name": {"tf": 1}, "sqlglot.expressions.Column.output_name": {"tf": 1}, "sqlglot.expressions.Identifier.output_name": {"tf": 1}, "sqlglot.expressions.Literal.output_name": {"tf": 1}, "sqlglot.expressions.Subquery.output_name": {"tf": 1}, "sqlglot.expressions.Star.output_name": {"tf": 1}, "sqlglot.expressions.Alias.output_name": {"tf": 1}, "sqlglot.expressions.Cast.output_name": {"tf": 1}}, "df": 9}}}, "b": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "s": {"docs": {"sqlglot.helper.subclasses": {"tf": 1.4142135623730951}}, "df": 1, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 2}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 2}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 2}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 2}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 2}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 2}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 2}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 2}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 2}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 2}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 2}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 2}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 2}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 2}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 2}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 2}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 2}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 2}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 2}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 2}, "sqlglot.generator.Generator": {"tf": 2}}, "df": 22}}}}, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1}}, "df": 2}}}}}}}}, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.optimizer.lower_identities.lower_identities": {"tf": 1}}, "df": 2}}}}}}}}}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}}, "df": 1, "d": {"docs": {"sqlglot.optimizer.normalize.normalize": {"tf": 1}}, "df": 1}}}}}}}}, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.helper.first": {"tf": 1}}, "df": 1, "s": {"docs": {"sqlglot.diff": {"tf": 2.8284271247461903}, "sqlglot.helper.flatten": {"tf": 1.4142135623730951}}, "df": 2}, "\u2019": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 2.449489742783178}, "sqlglot.expressions.Select.lock": {"tf": 1}, "sqlglot.expressions.Matches": {"tf": 1}, "sqlglot.helper.ensure_list": {"tf": 1}, "sqlglot.helper.ensure_collection": {"tf": 1}}, "df": 5}}, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 1.4142135623730951}}, "df": 3}, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.optimizer.simplify.absorb_and_eliminate": {"tf": 1}}, "df": 1}}}}}}}}}}, "y": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}}}, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1}, "sqlglot.executor.execute": {"tf": 1}, "sqlglot.executor.context.Context": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1}, "sqlglot.trie.new_trie": {"tf": 1}, "sqlglot.trie.in_trie": {"tf": 1}}, "df": 9}}}}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 3.872983346207417}}, "df": 1, "s": {"docs": {"sqlglot.diff": {"tf": 3}}, "df": 1}}}, "u": {"docs": {}, "df": 0, ":": {"8": {"0": {"9": {"0": {"docs": {}, "df": 0, "/": {"1": {"1": {"5": {"docs": {}, "df": 0, "/": {"1": {"docs": {}, "df": 0, "/": {"1": {"9": {"9": {"5": {"docs": {"sqlglot.diff.ChangeDistiller": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}}, "docs": {}, "df": 0}}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}}}, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}}, "df": 1, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 2.23606797749979}}, "df": 1}}}}}, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.expressions.convert": {"tf": 1}}, "df": 2}}}}}}}}}, "t": {"docs": {"sqlglot.diff": {"tf": 2.23606797749979}, "sqlglot.diff.ChangeDistiller": {"tf": 1}}, "df": 2, "c": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1}}, "df": 3}}, "u": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot.executor": {"tf": 1.7320508075688772}}, "df": 1}}}}}}}}}, "c": {"docs": {"sqlglot": {"tf": 3.605551275463989}, "sqlglot.diff": {"tf": 3.4641016151377544}, "sqlglot.diff.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.flatten": {"tf": 1.4142135623730951}, "sqlglot.expressions.Column.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.Identifier.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.Literal.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.Subquery.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.Star.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.Alias.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.Cast.output_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.column_table_names": {"tf": 1.7320508075688772}, "sqlglot.expressions.table_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.replace_tables": {"tf": 1.4142135623730951}, "sqlglot.optimizer.expand_laterals.expand_laterals": {"tf": 1.4142135623730951}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.simplify.flatten": {"tf": 2}, "sqlglot.optimizer.simplify.uniq_sort": {"tf": 1.4142135623730951}, "sqlglot.trie.in_trie": {"tf": 1}}, "df": 22, "a": {"docs": {"sqlglot.trie.in_trie": {"tf": 1}}, "df": 1, "n": {"docs": {"sqlglot": {"tf": 3.872983346207417}, "sqlglot.dataframe": {"tf": 2}, "sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.select_sql": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 4.242640687119285}, "sqlglot.executor": {"tf": 4}, "sqlglot.executor.execute": {"tf": 1}, "sqlglot.executor.context.Context": {"tf": 1.4142135623730951}, "sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.expressions.Expression.text": {"tf": 1}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.helper.flatten": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_cnf": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_dnf": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.replace": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1}, "sqlglot.time.format_time": {"tf": 1}, "sqlglot.transforms.preprocess": {"tf": 1}}, "df": 27, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.executor": {"tf": 1.7320508075688772}}, "df": 3, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.canonicalize.canonicalize": {"tf": 1}}, "df": 1}}}}}}}}}, "n": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dataframe": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 3}}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 2.6457513110645907}}, "df": 1, "s": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 2.6457513110645907}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dataframe.sql.Column.cast": {"tf": 1}, "sqlglot.expressions.cast": {"tf": 2.449489742783178}, "sqlglot.expressions.func": {"tf": 1.4142135623730951}, "sqlglot.helper.ensure_list": {"tf": 1}}, "df": 6, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.values": {"tf": 1}}, "df": 1}}, "s": {"docs": {"sqlglot.helper.ensure_list": {"tf": 1}}, "df": 1}}, "e": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.select_sql": {"tf": 1}, "sqlglot.diff": {"tf": 2.6457513110645907}, "sqlglot.executor.env.null_if_any": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1.4142135623730951}, "sqlglot.expressions.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.from_": {"tf": 1.4142135623730951}, "sqlglot.expressions.condition": {"tf": 1.4142135623730951}, "sqlglot.helper.camel_to_snake_case": {"tf": 1}, "sqlglot.helper.count_params": {"tf": 1}, "sqlglot.optimizer.lower_identities.lower_identities": {"tf": 1.7320508075688772}}, "df": 13, "s": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.select_sql": {"tf": 1}, "sqlglot.diff": {"tf": 2.23606797749979}, "sqlglot.executor": {"tf": 1.4142135623730951}}, "df": 5}, "d": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}}, "df": 21}}}, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.helper.csv_reader": {"tf": 1}}, "df": 1, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.dialects": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1.4142135623730951}, "sqlglot.transforms.preprocess": {"tf": 1}}, "df": 4}}, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}, "sqlglot.expressions.Expression.walk": {"tf": 1}}, "df": 2}}}}}, "c": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}}, "df": 1, "d": {"docs": {"sqlglot.diff.diff": {"tf": 1}}, "df": 1}}, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}}, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor": {"tf": 1.7320508075688772}}, "df": 1}}}}}, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.diff": {"tf": 1}}, "df": 2}}}, "e": {"docs": {"sqlglot.dataframe.sql.DataFrame.persist": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 1}}, "df": 2}}}, "p": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}}, "df": 18, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}}}}, "e": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}}, "df": 2}}, "t": {"docs": {"sqlglot.trie.in_trie": {"tf": 2}}, "df": 1, "c": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.executor.execute": {"tf": 1}, "sqlglot.expressions.to_table": {"tf": 1.7320508075688772}, "sqlglot.expressions.table_": {"tf": 1.4142135623730951}, "sqlglot.optimizer.optimizer.optimize": {"tf": 2}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1.4142135623730951}, "sqlglot.schema.Schema.supported_table_args": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1.4142135623730951}}, "df": 7}}}}}, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "l": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.helper.camel_to_snake_case": {"tf": 1}}, "df": 1}}}}}}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 3}}, "s": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.func": {"tf": 1}}, "df": 1, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.dialects": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.expressions.Expression.text": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.expressions.union": {"tf": 1.4142135623730951}, "sqlglot.expressions.intersect": {"tf": 1.4142135623730951}, "sqlglot.expressions.except_": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.helper.AutoName": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.Scan.from_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1.4142135623730951}, "sqlglot.tokens.Tokenizer.tokenize": {"tf": 1}, "sqlglot.trie.new_trie": {"tf": 1}}, "df": 36}}}}}}}}, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.optimizer.scope.Scope.columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_correlated_subquery": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1}}, "df": 3}}}}}}}, "e": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1}}, "df": 2}}, "m": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 1}}, "df": 2}}}}}}}, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.helper.open_file": {"tf": 1}}, "df": 1}}}}}}, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 2}, "sqlglot.executor": {"tf": 1}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1.4142135623730951}}, "df": 4, "d": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 2.449489742783178}}, "df": 1, "d": {"docs": {"sqlglot.diff": {"tf": 2.23606797749979}}, "df": 1}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 2.449489742783178}}, "df": 1}}, "s": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.optimizer.simplify.rewrite_between": {"tf": 1}}, "df": 1}}}}}}, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 2}}, "df": 1, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "x": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}}, "df": 2}}}}}, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.optimizer.simplify.remove_compliments": {"tf": 1}}, "df": 1}}}}}}}, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.expressions.condition": {"tf": 1}}, "df": 2}}}}, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor.python.PythonExecutor.generate": {"tf": 1}}, "df": 1, "r": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}, "m": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}}}, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 3.7416573867739413}}, "df": 1, "s": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.executor.python.Python.Generator": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.expression": {"tf": 1.4142135623730951}}, "df": 25}}}}, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.dialects": {"tf": 1}, "sqlglot.diff": {"tf": 3.4641016151377544}, "sqlglot.errors.SqlglotError": {"tf": 1}, "sqlglot.errors.UnsupportedError": {"tf": 1}, "sqlglot.errors.ParseError": {"tf": 1}, "sqlglot.errors.TokenError": {"tf": 1}, "sqlglot.errors.OptimizeError": {"tf": 1}, "sqlglot.errors.SchemaError": {"tf": 1}, "sqlglot.errors.ExecuteError": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_cte": {"tf": 1}}, "df": 13, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}, "a": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.executor.python.Python.Generator": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator": {"tf": 1.4142135623730951}}, "df": 21, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.dataframe": {"tf": 1.4142135623730951}}, "df": 1}}}}, "b": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.and_": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 1}}, "df": 2, "d": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}}, "df": 4}}, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}}}, "e": {"docs": {"sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}, "sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 3, "s": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}}, "df": 40}}, "/": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "/": {"docs": {}, "df": 0, "b": {"1": {"9": {"3": {"0": {"6": {"docs": {"sqlglot.expressions.Matches": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}, "docs": {}, "df": 0}}}}, "d": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "/": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "/": {"8": {"docs": {"sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}}, "df": 1}, "docs": {}, "df": 0}}}}}}}}}}}}}, "n": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}}}}}}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot.transpile": {"tf": 1}}, "df": 1}}}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}}, "df": 1}}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}, "u": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.optimizer.scope.traverse_scope": {"tf": 1}}, "df": 1}}}}}}, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dataframe": {"tf": 1}}, "df": 2, "s": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}}, "e": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}}, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}, "x": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor.context.Context": {"tf": 1.4142135623730951}, "sqlglot.executor.context.Context.__init__": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1.4142135623730951}, "sqlglot.planner.Step.from_expression": {"tf": 2}, "sqlglot.planner.Scan.from_expression": {"tf": 2}, "sqlglot.planner.SetOperation.from_expression": {"tf": 2}}, "df": 27}}}, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.helper.flatten": {"tf": 1}}, "df": 1, "s": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.expressions": {"tf": 1}, "sqlglot.helper.apply_index_offset": {"tf": 1}, "sqlglot.helper.tsort": {"tf": 1}}, "df": 5}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.lineage.lineage": {"tf": 1}}, "df": 2}}}}}, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.and_": {"tf": 1.7320508075688772}, "sqlglot.expressions.Condition.or_": {"tf": 1.7320508075688772}, "sqlglot.expressions.Condition.not_": {"tf": 1.7320508075688772}, "sqlglot.expressions.condition": {"tf": 2}, "sqlglot.expressions.and_": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 1}, "sqlglot.expressions.not_": {"tf": 1.4142135623730951}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.join_condition": {"tf": 1}}, "df": 10, "s": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.expressions.and_": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_dnf": {"tf": 1}}, "df": 4}, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.update": {"tf": 1}, "sqlglot.expressions.delete": {"tf": 1}}, "df": 2}}}}}}}}, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe": {"tf": 1}}, "df": 1, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dataframe": {"tf": 1.4142135623730951}, "sqlglot.optimizer.simplify.uniq_sort": {"tf": 1}}, "df": 2}}}}}}, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dataframe": {"tf": 1}}, "df": 1}}}, "r": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Join.using": {"tf": 1}}, "df": 1}}}}}}}}, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dialects": {"tf": 1}, "sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 2, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}}, "df": 22}}, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.executor": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}}, "df": 19}}}}, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "r": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.Expression.transform": {"tf": 1}}, "df": 1}}}}}}}, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 2.8284271247461903}, "sqlglot.executor.python.PythonExecutor.generate": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.generate_tuple": {"tf": 1}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.convert": {"tf": 1}, "sqlglot.optimizer.lower_identities.lower_identities": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1.7320508075688772}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1}}, "df": 10, "s": {"docs": {"sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}, "sqlglot.executor": {"tf": 1}, "sqlglot.expressions.TimeUnit": {"tf": 1}, "sqlglot.helper.camel_to_snake_case": {"tf": 1}, "sqlglot.optimizer.canonicalize.canonicalize": {"tf": 1}, "sqlglot.time.format_time": {"tf": 1}, "sqlglot.transforms.preprocess": {"tf": 1.4142135623730951}}, "df": 8}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.executor": {"tf": 2.449489742783178}, "sqlglot.expressions.update": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1}, "sqlglot.time.format_time": {"tf": 1}}, "df": 4}}}, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.convert": {"tf": 1}, "sqlglot.optimizer.normalize.normalize": {"tf": 1}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}}, "df": 3, "s": {"docs": {"sqlglot.optimizer.canonicalize.canonicalize": {"tf": 1}}, "df": 1}}}}}}, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.executor.context.Context": {"tf": 1}}, "df": 1}}}}}}}, "j": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.optimizer.normalize.normalize": {"tf": 1}}, "df": 2}}}}}}}}}, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.parse": {"tf": 1}, "sqlglot.parse_one": {"tf": 1}, "sqlglot.transpile": {"tf": 1}, "sqlglot.dataframe": {"tf": 1}, "sqlglot.dialects": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.diff": {"tf": 3.605551275463989}, "sqlglot.executor": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.generate": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.generate_tuple": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1}, "sqlglot.expressions.union": {"tf": 1.4142135623730951}, "sqlglot.expressions.intersect": {"tf": 1.4142135623730951}, "sqlglot.expressions.except_": {"tf": 1.4142135623730951}, "sqlglot.expressions.select": {"tf": 1}, "sqlglot.expressions.from_": {"tf": 1}, "sqlglot.expressions.condition": {"tf": 1}, "sqlglot.expressions.and_": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 1}, "sqlglot.expressions.not_": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.expressions.subquery": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}}, "df": 66}}, "l": {"1": {"docs": {"sqlglot.expressions.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.from_": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope": {"tf": 1.4142135623730951}}, "df": 3}, "2": {"docs": {"sqlglot.expressions.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.from_": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope": {"tf": 1.4142135623730951}}, "df": 3}, "docs": {"sqlglot": {"tf": 1.7320508075688772}, "sqlglot.dataframe": {"tf": 3.1622776601683795}, "sqlglot.executor.execute": {"tf": 1.7320508075688772}, "sqlglot.expressions.column": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1.7320508075688772}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 2}, "sqlglot.schema.MappingSchema": {"tf": 1.7320508075688772}}, "df": 7, "a": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.dataframe": {"tf": 2}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 2.6457513110645907}}, "df": 3}, "b": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.dataframe": {"tf": 2}}, "df": 2}, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot": {"tf": 3.3166247903554}, "sqlglot.dataframe": {"tf": 2.23606797749979}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1.7320508075688772}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator.values_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator.select_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 3.1622776601683795}, "sqlglot.diff.diff": {"tf": 2}, "sqlglot.executor": {"tf": 1}, "sqlglot.executor.context.Context": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.output_name": {"tf": 1}, "sqlglot.expressions.Expression.replace": {"tf": 1.7320508075688772}, "sqlglot.expressions.Column.output_name": {"tf": 1}, "sqlglot.expressions.Identifier.output_name": {"tf": 1}, "sqlglot.expressions.Literal.output_name": {"tf": 1}, "sqlglot.expressions.Subquery.output_name": {"tf": 1}, "sqlglot.expressions.Star.output_name": {"tf": 1}, "sqlglot.expressions.Alias.output_name": {"tf": 1}, "sqlglot.expressions.Cast.output_name": {"tf": 1}, "sqlglot.expressions.to_column": {"tf": 2.449489742783178}, "sqlglot.expressions.column": {"tf": 2}, "sqlglot.expressions.table_": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1.7320508075688772}, "sqlglot.expressions.column_table_names": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1.4142135623730951}, "sqlglot.lineage.lineage": {"tf": 1.7320508075688772}, "sqlglot.optimizer.expand_laterals.expand_laterals": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.Scope.columns": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.external_columns": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.unqualified_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.source_columns": {"tf": 1.4142135623730951}, "sqlglot.schema.Schema.add_table": {"tf": 1.7320508075688772}, "sqlglot.schema.Schema.column_names": {"tf": 1.4142135623730951}, "sqlglot.schema.Schema.get_column_type": {"tf": 2}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1.7320508075688772}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1.4142135623730951}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 2}}, "df": 58, "s": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.select_sql": {"tf": 1.7320508075688772}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.executor.context.Context": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1}, "sqlglot.expressions.column_table_names": {"tf": 1}, "sqlglot.lineage.lineage": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.optimizer.expand_laterals.expand_laterals": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1.4142135623730951}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 1}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify_columns.validate_qualify_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1}, "sqlglot.optimizer.scope.Scope.columns": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.external_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.unqualified_columns": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.source_columns": {"tf": 1}, "sqlglot.schema.Schema.column_names": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1.4142135623730951}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}}, "df": 26}, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.executor.execute": {"tf": 1}}, "df": 1}}}}}, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.errors.ErrorLevel.RAISE": {"tf": 1}}, "df": 1, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.parse": {"tf": 1.4142135623730951}, "sqlglot.helper.ensure_collection": {"tf": 1.4142135623730951}, "sqlglot.helper.subclasses": {"tf": 1}, "sqlglot.helper.find_new_name": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}, "sqlglot.trie.new_trie": {"tf": 1}}, "df": 6, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}}, "s": {"docs": {"sqlglot.schema.MappingSchema": {"tf": 1.7320508075688772}}, "df": 1}}, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.dataframe": {"tf": 1}, "sqlglot.diff": {"tf": 2.449489742783178}, "sqlglot.expressions.Select.group_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.having": {"tf": 2}, "sqlglot.optimizer.scope.Scope.ref_count": {"tf": 1.4142135623730951}}, "df": 6, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe": {"tf": 2.23606797749979}}, "df": 1}}}}}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.helper.count_params": {"tf": 1}}, "df": 2}}}}}, "l": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.executor": {"tf": 2.23606797749979}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}}, "df": 3, "n": {"docs": {}, "df": 0, "\u2019": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.dataframe": {"tf": 1}}, "df": 1}}}}}, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "s": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "t": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}}, "df": 2, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "g": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "\u2019": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}, "e": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 2.23606797749979}}, "df": 1}}}}}}}}, "r": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}}, "df": 1}, "d": {"docs": {"sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}}, "df": 1}}}}}, "p": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.copy": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.Select.distinct": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.Select.lock": {"tf": 1}, "sqlglot.expressions.expand": {"tf": 1.4142135623730951}}, "df": 24}}}, "u": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot": {"tf": 2.449489742783178}, "sqlglot.dataframe": {"tf": 1.4142135623730951}, "sqlglot.dialects": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1}}, "df": 26, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}}}}}, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.executor.context.Context.__init__": {"tf": 1}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}, "sqlglot.optimizer.scope.Scope.branch": {"tf": 1}, "sqlglot.optimizer.scope.Scope.external_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.unqualified_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.source_columns": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}}, "df": 8, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dataframe": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.Column.cast": {"tf": 1}, "sqlglot.executor": {"tf": 1.4142135623730951}}, "df": 3}}}}}}, "s": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dataframe": {"tf": 1}}, "df": 1}}}, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.helper.is_iterable": {"tf": 1}}, "df": 6, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}}, "df": 1}}}, "s": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1}, "sqlglot.expressions.Expression.is_string": {"tf": 1}, "sqlglot.expressions.Expression.is_number": {"tf": 1}, "sqlglot.expressions.Expression.is_int": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.helper.is_iterable": {"tf": 1}, "sqlglot.trie.in_trie": {"tf": 1}}, "df": 8}}}}, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}}}, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 3.3166247903554}, "sqlglot.diff.ChangeDistiller": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}}, "df": 3, "s": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 2.6457513110645907}, "sqlglot.diff.diff": {"tf": 1}}, "df": 3}, "d": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.diff.Move": {"tf": 1}, "sqlglot.diff.Keep": {"tf": 1}}, "df": 3}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 2.6457513110645907}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 2.6457513110645907}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 2.6457513110645907}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 2.6457513110645907}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 2.6457513110645907}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 2.6457513110645907}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 2.6457513110645907}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 2.6457513110645907}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 2.6457513110645907}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 2.6457513110645907}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 2.6457513110645907}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 2.6457513110645907}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 2.6457513110645907}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 2.6457513110645907}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 2.6457513110645907}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 2.6457513110645907}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 2.6457513110645907}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 2.6457513110645907}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 2.6457513110645907}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.executor.python.Python.Generator": {"tf": 2.6457513110645907}, "sqlglot.generator.Generator": {"tf": 2.6457513110645907}, "sqlglot.trie.new_trie": {"tf": 1}}, "df": 23, "s": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}}, "df": 40}}}}}}}, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}}, "w": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.diff.ChangeDistiller": {"tf": 1}}, "df": 2}}}}}, "t": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.Expression.assert_is": {"tf": 1}}, "df": 1}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.transforms.preprocess": {"tf": 1}}, "df": 1}}}}}}, "o": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1, "n": {"docs": {"sqlglot.parser.Parser.check_errors": {"tf": 1}, "sqlglot.parser.Parser.raise_error": {"tf": 1}}, "df": 2}}}}, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.unnest": {"tf": 1}, "sqlglot.expressions.Expression.flatten": {"tf": 1}, "sqlglot.expressions.replace_children": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 2}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}}, "df": 7, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.diff": {"tf": 2.8284271247461903}, "sqlglot.expressions.replace_children": {"tf": 1}}, "df": 2}}}}}}}, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.ctes": {"tf": 1}}, "df": 4, "s": {"docs": {"sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"tf": 1.4142135623730951}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1}, "sqlglot.optimizer.scope.Scope.ctes": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.cte_sources": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.Scan.from_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1.4142135623730951}}, "df": 9}}, "x": {"docs": {"sqlglot.dataframe": {"tf": 1.7320508075688772}}, "df": 1}, "a": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.Select.ctas": {"tf": 1}}, "df": 1}}}, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.ctas": {"tf": 2}, "sqlglot.expressions.to_table": {"tf": 1}, "sqlglot.expressions.to_column": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1.4142135623730951}, "sqlglot.transforms.delegate": {"tf": 1}, "sqlglot.trie.new_trie": {"tf": 1}}, "df": 9, "d": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.helper.object_to_dict": {"tf": 1}}, "df": 2, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe": {"tf": 2.23606797749979}}, "df": 1}}}}}}}}}, "s": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.expressions.update": {"tf": 1}, "sqlglot.parser.Parser.expression": {"tf": 1}, "sqlglot.transforms.preprocess": {"tf": 1}, "sqlglot.trie.new_trie": {"tf": 1}}, "df": 5}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.helper.AutoName": {"tf": 1}, "sqlglot.transforms.delegate": {"tf": 1}}, "df": 24}}, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}, "a": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}, "f": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.diff": {"tf": 3.3166247903554}, "sqlglot.expressions.Expression.find": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1.4142135623730951}}, "df": 4}}}}}}, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.optimizer.expand_multi_table_selects.expand_multi_table_selects": {"tf": 1}, "sqlglot.optimizer.optimize_joins.optimize_joins": {"tf": 1.4142135623730951}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1}}, "df": 3}}}}, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot": {"tf": 2}, "sqlglot.dataframe": {"tf": 2.23606797749979}, "sqlglot.dataframe.sql.Column.cast": {"tf": 1.4142135623730951}, "sqlglot.dialects": {"tf": 2}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1.4142135623730951}, "sqlglot.errors.SqlglotError": {"tf": 1}, "sqlglot.errors.UnsupportedError": {"tf": 1}, "sqlglot.errors.ParseError": {"tf": 1}, "sqlglot.errors.TokenError": {"tf": 1}, "sqlglot.errors.OptimizeError": {"tf": 1}, "sqlglot.errors.SchemaError": {"tf": 1}, "sqlglot.errors.ExecuteError": {"tf": 1}, "sqlglot.executor.table.Tables": {"tf": 2}, "sqlglot.expressions.Expression": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.flatten": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1.4142135623730951}, "sqlglot.expressions.func": {"tf": 1}, "sqlglot.helper.subclasses": {"tf": 1.4142135623730951}, "sqlglot.helper.count_params": {"tf": 1}, "sqlglot.parser.Parser.expression": {"tf": 1.4142135623730951}, "sqlglot.schema.Schema": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema": {"tf": 2}}, "df": 22, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dialects": {"tf": 1.4142135623730951}, "sqlglot.helper.AutoName": {"tf": 1}, "sqlglot.helper.subclasses": {"tf": 1.4142135623730951}, "sqlglot.schema.Schema.add_table": {"tf": 1}}, "df": 4}}}}, "i": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe": {"tf": 1.7320508075688772}}, "df": 1}}, "m": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}, "u": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1}}, "df": 1, "s": {"docs": {"sqlglot.transforms.unalias_group": {"tf": 1}}, "df": 1}}}}}, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe": {"tf": 2.23606797749979}}, "df": 1}}}}, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.dataframe": {"tf": 1}}, "df": 1}}, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe": {"tf": 1.4142135623730951}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "u": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.Select.cluster_by": {"tf": 2}}, "df": 1}}}}}, "s": {"docs": {"sqlglot.helper.count_params": {"tf": 1}}, "df": 1}}, "s": {"docs": {"sqlglot.dataframe": {"tf": 2}}, "df": 1, "v": {"docs": {"sqlglot.helper.csv": {"tf": 1.4142135623730951}, "sqlglot.helper.csv_reader": {"tf": 2}}, "df": 2}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dialects.drill.if_sql": {"tf": 1}, "sqlglot.diff": {"tf": 1}}, "df": 2}}}}}}, "i": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}, "n": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.optimizer.normalize.normalize": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_cnf": {"tf": 1}}, "df": 3}}}, "b": {"docs": {"sqlglot": {"tf": 4.795831523312719}, "sqlglot.diff": {"tf": 3.7416573867739413}, "sqlglot.diff.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 2.23606797749979}, "sqlglot.executor.env.null_if_any": {"tf": 1}, "sqlglot.expressions.Expression.output_name": {"tf": 1}, "sqlglot.expressions.Expression.flatten": {"tf": 1.4142135623730951}, "sqlglot.expressions.Column.output_name": {"tf": 1}, "sqlglot.expressions.Identifier.output_name": {"tf": 1}, "sqlglot.expressions.Literal.output_name": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.where": {"tf": 1.4142135623730951}, "sqlglot.expressions.Subquery.output_name": {"tf": 1}, "sqlglot.expressions.Star.output_name": {"tf": 1}, "sqlglot.expressions.Alias.output_name": {"tf": 1}, "sqlglot.expressions.Cast.output_name": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1.4142135623730951}, "sqlglot.expressions.column_table_names": {"tf": 1}, "sqlglot.expressions.table_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.replace_tables": {"tf": 1.4142135623730951}, "sqlglot.expressions.replace_placeholders": {"tf": 1.4142135623730951}, "sqlglot.helper.dict_depth": {"tf": 1.4142135623730951}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1.7320508075688772}, "sqlglot.optimizer.expand_laterals.expand_laterals": {"tf": 1.7320508075688772}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.selects": {"tf": 1.4142135623730951}, "sqlglot.optimizer.simplify.flatten": {"tf": 2}, "sqlglot.optimizer.simplify.uniq_sort": {"tf": 1.7320508075688772}, "sqlglot.optimizer.simplify.absorb_and_eliminate": {"tf": 3.1622776601683795}, "sqlglot.planner.Step.from_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.Scan.from_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1.4142135623730951}, "sqlglot.transforms.unalias_group": {"tf": 1.7320508075688772}, "sqlglot.trie.new_trie": {"tf": 1.4142135623730951}}, "df": 35, "e": {"docs": {"sqlglot": {"tf": 3}, "sqlglot.transpile": {"tf": 1.4142135623730951}, "sqlglot.dataframe": {"tf": 3}, "sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dialects": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator.select_sql": {"tf": 2}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 4.242640687119285}, "sqlglot.diff.diff": {"tf": 1}, "sqlglot.executor": {"tf": 4}, "sqlglot.executor.execute": {"tf": 1}, "sqlglot.executor.context.Context": {"tf": 1.4142135623730951}, "sqlglot.executor.python.Python.Generator": {"tf": 1.4142135623730951}, "sqlglot.executor.table.Tables": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.text": {"tf": 1}, "sqlglot.expressions.Expression.walk": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.expressions.Union.limit": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.offset": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.Select.distinct": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.Select.lock": {"tf": 1.4142135623730951}, "sqlglot.expressions.Func": {"tf": 1.4142135623730951}, "sqlglot.expressions.union": {"tf": 1.4142135623730951}, "sqlglot.expressions.intersect": {"tf": 1.4142135623730951}, "sqlglot.expressions.except_": {"tf": 1.4142135623730951}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1.4142135623730951}, "sqlglot.expressions.replace_tables": {"tf": 1}, "sqlglot.expressions.replace_placeholders": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1.4142135623730951}, "sqlglot.helper.apply_index_offset": {"tf": 1.4142135623730951}, "sqlglot.helper.while_changing": {"tf": 1.4142135623730951}, "sqlglot.helper.tsort": {"tf": 1}, "sqlglot.helper.open_file": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1.7320508075688772}, "sqlglot.helper.flatten": {"tf": 1}, "sqlglot.lineage.lineage": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 2.449489742783178}, "sqlglot.optimizer.scope.Scope": {"tf": 1}, "sqlglot.optimizer.scope.Scope.replace": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1.4142135623730951}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1.4142135623730951}, "sqlglot.schema.MappingSchema": {"tf": 1.7320508075688772}, "sqlglot.time.format_time": {"tf": 1}, "sqlglot.transforms.unalias_group": {"tf": 1}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1}, "sqlglot.transforms.remove_precision_parameterized_types": {"tf": 1}, "sqlglot.transforms.preprocess": {"tf": 1.4142135623730951}, "sqlglot.trie.in_trie": {"tf": 1}}, "df": 97, "t": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 2.8284271247461903}, "sqlglot.diff.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}, "sqlglot.optimizer.simplify.rewrite_between": {"tf": 1}}, "df": 6}}}}, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.diff": {"tf": 2}}, "df": 1}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}, "sqlglot.dialects.drill.if_sql": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 4}}}, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot": {"tf": 1.7320508075688772}}, "df": 1}}}}}}}, "e": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}, "c": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 3}}, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}}, "df": 23}}}}}}, "f": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}}, "df": 23}}}}, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 2}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.optimizer.canonicalize.canonicalize": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}, "sqlglot.optimizer.simplify.rewrite_between": {"tf": 1}}, "df": 26}}}, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}, "o": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1.4142135623730951}}, "df": 2, "s": {"docs": {"sqlglot.diff": {"tf": 2.449489742783178}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1}}, "df": 2}}}}}, "e": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.diff": {"tf": 2.449489742783178}, "sqlglot.diff.Insert": {"tf": 1}, "sqlglot.diff.Remove": {"tf": 1}, "sqlglot.diff.Update": {"tf": 1}, "sqlglot.diff.Keep": {"tf": 1}, "sqlglot.executor": {"tf": 2.23606797749979}}, "df": 6}}, "y": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "g": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}, "a": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff.ChangeDistiller": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}}}, "i": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dataframe": {"tf": 2.449489742783178}, "sqlglot.dialects.bigquery": {"tf": 1}}, "df": 3}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.dialects": {"tf": 1.4142135623730951}}, "df": 2}}}, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot.diff": {"tf": 3.872983346207417}}, "df": 1, "s": {"docs": {"sqlglot.diff": {"tf": 2}}, "df": 1}}}}, "g": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "u": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot": {"tf": 1.7320508075688772}, "sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.expressions": {"tf": 1}, "sqlglot.expressions.subquery": {"tf": 1}, "sqlglot.expressions.column": {"tf": 1}, "sqlglot.expressions.table_": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1}, "sqlglot.expressions.rename_table": {"tf": 1}, "sqlglot.expressions.func": {"tf": 1.4142135623730951}, "sqlglot.lineage.lineage": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.build_scope": {"tf": 1.4142135623730951}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}}, "df": 14, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 2}, "sqlglot.executor": {"tf": 1}, "sqlglot.lineage.lineage": {"tf": 1}}, "df": 4}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.executor": {"tf": 1.4142135623730951}}, "df": 2}}, "s": {"docs": {"sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.delete": {"tf": 1}, "sqlglot.expressions.to_identifier": {"tf": 1}, "sqlglot.expressions.to_interval": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}}, "df": 9}}, "t": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 2}}, "df": 2}}}, "g": {"docs": {"sqlglot.dialects.snowflake.Snowflake.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.select_sql": {"tf": 1}}, "df": 2}, "t": {"docs": {"sqlglot.dialects.snowflake.Snowflake.Generator.select_sql": {"tf": 1}, "sqlglot.diff": {"tf": 2.6457513110645907}, "sqlglot.executor": {"tf": 2.23606797749979}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1}}, "df": 5}, "l": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}, "y": {"docs": {"sqlglot": {"tf": 2.449489742783178}, "sqlglot.pretty": {"tf": 1}, "sqlglot.schema": {"tf": 1}, "sqlglot.dataframe": {"tf": 2}, "sqlglot.dialects": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.select_sql": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.diff": {"tf": 4.47213595499958}, "sqlglot.diff.ChangeDistiller": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 2.23606797749979}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.expressions": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.load": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 2}, "sqlglot.expressions.Select.order_by": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.sort_by": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.cluster_by": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.having": {"tf": 1.4142135623730951}, "sqlglot.expressions.Func": {"tf": 1}, "sqlglot.expressions.expand": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.generator.Generator.generate": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1.4142135623730951}, "sqlglot.helper.count_params": {"tf": 1}, "sqlglot.lineage.lineage": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}, "sqlglot.optimizer.optimize_joins.reorder_joins": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.Scan.from_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1.4142135623730951}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1}, "sqlglot.serde.load": {"tf": 1}, "sqlglot.transforms.unalias_group": {"tf": 1.7320508075688772}, "sqlglot.transforms.preprocess": {"tf": 1}, "sqlglot.trie.new_trie": {"tf": 1.4142135623730951}}, "df": 79, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor.python.PythonExecutor.generate_tuple": {"tf": 1}}, "df": 1, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor.python.PythonExecutor.generate": {"tf": 1}}, "df": 1}}}}, "s": {"docs": {"sqlglot.helper.ensure_collection": {"tf": 1}, "sqlglot.helper.is_iterable": {"tf": 1}, "sqlglot.helper.flatten": {"tf": 1}}, "df": 3}}}}, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dialects.drill.if_sql": {"tf": 1.7320508075688772}}, "df": 2}}}}}, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "g": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}, "z": {"docs": {"sqlglot": {"tf": 2.8284271247461903}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.expressions.update": {"tf": 1.4142135623730951}}, "df": 3}, "r": {"docs": {"sqlglot": {"tf": 2.8284271247461903}, "sqlglot.executor": {"tf": 2}, "sqlglot.expressions.alias_": {"tf": 2}, "sqlglot.expressions.subquery": {"tf": 1.4142135623730951}, "sqlglot.optimizer.lower_identities.lower_identities": {"tf": 2}}, "df": 5, "e": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}, "a": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "c": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}, "e": {"docs": {"sqlglot.dialects": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.errors.SqlglotError": {"tf": 1}, "sqlglot.errors.UnsupportedError": {"tf": 1}, "sqlglot.errors.ParseError": {"tf": 1}, "sqlglot.errors.TokenError": {"tf": 1}, "sqlglot.errors.OptimizeError": {"tf": 1}, "sqlglot.errors.SchemaError": {"tf": 1}, "sqlglot.errors.ExecuteError": {"tf": 1}, "sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1}, "sqlglot.helper.find_new_name": {"tf": 1.4142135623730951}, "sqlglot.schema.Schema": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1}}, "df": 15, "d": {"docs": {"sqlglot.diff": {"tf": 2.449489742783178}, "sqlglot.diff.ChangeDistiller": {"tf": 1}, "sqlglot.executor": {"tf": 2}, "sqlglot.optimizer.optimize_joins.optimize_joins": {"tf": 1}, "sqlglot.optimizer.optimize_joins.reorder_joins": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1}}, "df": 6}}}, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}}, "o": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dialects": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 2.449489742783178}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 2.449489742783178}, "sqlglot.executor.python.Python.Generator": {"tf": 2.449489742783178}, "sqlglot.expressions.Expression.walk": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1.4142135623730951}, "sqlglot.expressions.Join.using": {"tf": 1.4142135623730951}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1.7320508075688772}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.group_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.order_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.sort_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.cluster_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.lateral": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.join": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.where": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.having": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.distinct": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1}, "sqlglot.expressions.union": {"tf": 1}, "sqlglot.expressions.intersect": {"tf": 1}, "sqlglot.expressions.except_": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 2.449489742783178}, "sqlglot.helper.is_iterable": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}, "sqlglot.optimizer.normalize.normalize": {"tf": 1}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}}, "df": 58, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dialects": {"tf": 1}, "sqlglot.executor": {"tf": 1}, "sqlglot.expressions.true": {"tf": 1}, "sqlglot.expressions.false": {"tf": 1}}, "df": 5, "s": {"docs": {"sqlglot.expressions.Expression": {"tf": 1}}, "df": 1}}}}}}, "t": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.transpile": {"tf": 1}, "sqlglot.diff": {"tf": 2}, "sqlglot.executor": {"tf": 1}, "sqlglot.helper.flatten": {"tf": 1}}, "df": 4}, "t": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}}, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.helper.seq_get": {"tf": 1}}, "df": 1}}}}, "b": {"docs": {"sqlglot.trie.in_trie": {"tf": 1}}, "df": 1}}, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dialects": {"tf": 1}}, "df": 1}}}}, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}, "e": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}, "u": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "d": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.expressions.Expression.bfs": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}}, "df": 3}}}}}, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.expressions.Expression.walk": {"tf": 1}, "sqlglot.optimizer.scope.Scope.branch": {"tf": 1}}, "df": 2}}}}}, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}, "c": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.optimizer.pushdown_predicates.pushdown_cnf": {"tf": 1}}, "df": 1, "s": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_dnf": {"tf": 1}}, "df": 2}}}}, "a": {"docs": {"sqlglot.expressions.Unionable.union": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.intersect": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.except_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Join.using": {"tf": 1.4142135623730951}, "sqlglot.expressions.union": {"tf": 1.4142135623730951}, "sqlglot.expressions.intersect": {"tf": 1.4142135623730951}, "sqlglot.expressions.except_": {"tf": 1.4142135623730951}, "sqlglot.helper.flatten": {"tf": 1.4142135623730951}, "sqlglot.trie.new_trie": {"tf": 1}}, "df": 9, "n": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}, "c": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.expressions.not_": {"tf": 1.4142135623730951}}, "df": 1}}, "b": {"docs": {"sqlglot.trie.new_trie": {"tf": 1}}, "df": 1}}}, "f": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.Expression.walk": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.bfs": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1.4142135623730951}}, "df": 5}}}, "u": {"docs": {"sqlglot.executor": {"tf": 2}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1.7320508075688772}}, "df": 2, "s": {"docs": {"sqlglot.expressions.Expression": {"tf": 1}}, "df": 1, "e": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dataframe": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 2.23606797749979}, "sqlglot.diff": {"tf": 2}, "sqlglot.executor": {"tf": 1.7320508075688772}, "sqlglot.executor.python.Python.Generator": {"tf": 2.23606797749979}, "sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1}, "sqlglot.expressions.union": {"tf": 1}, "sqlglot.expressions.intersect": {"tf": 1}, "sqlglot.expressions.except_": {"tf": 1}, "sqlglot.expressions.select": {"tf": 1}, "sqlglot.expressions.from_": {"tf": 1}, "sqlglot.expressions.update": {"tf": 1}, "sqlglot.expressions.delete": {"tf": 1}, "sqlglot.expressions.condition": {"tf": 1}, "sqlglot.expressions.and_": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 1}, "sqlglot.expressions.not_": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1.4142135623730951}, "sqlglot.expressions.subquery": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator": {"tf": 2.23606797749979}, "sqlglot.helper.split_num_words": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser": {"tf": 1}}, "df": 84, "d": {"docs": {"sqlglot": {"tf": 2}, "sqlglot.schema": {"tf": 1}, "sqlglot.transpile": {"tf": 1.4142135623730951}, "sqlglot.dataframe": {"tf": 1}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.executor": {"tf": 1.7320508075688772}, "sqlglot.executor.context.Context": {"tf": 1.4142135623730951}, "sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.expressions": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.text": {"tf": 1}, "sqlglot.expressions.Expression.walk": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.and_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.or_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.union": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.intersect": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.except_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Join.on": {"tf": 1.4142135623730951}, "sqlglot.expressions.Join.using": {"tf": 1.4142135623730951}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1.7320508075688772}, "sqlglot.expressions.Union.limit": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.from_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.group_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.order_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.sort_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.cluster_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.limit": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.offset": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.lateral": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.join": {"tf": 2}, "sqlglot.expressions.Select.where": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.having": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.ctas": {"tf": 1.4142135623730951}, "sqlglot.expressions.Tag": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1}, "sqlglot.expressions.union": {"tf": 1.7320508075688772}, "sqlglot.expressions.intersect": {"tf": 1.7320508075688772}, "sqlglot.expressions.except_": {"tf": 1.7320508075688772}, "sqlglot.expressions.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.from_": {"tf": 1.4142135623730951}, "sqlglot.expressions.update": {"tf": 1}, "sqlglot.expressions.delete": {"tf": 1}, "sqlglot.expressions.condition": {"tf": 1.4142135623730951}, "sqlglot.expressions.and_": {"tf": 1.4142135623730951}, "sqlglot.expressions.or_": {"tf": 1.4142135623730951}, "sqlglot.expressions.not_": {"tf": 1.4142135623730951}, "sqlglot.expressions.alias_": {"tf": 1.4142135623730951}, "sqlglot.expressions.subquery": {"tf": 1.4142135623730951}, "sqlglot.expressions.func": {"tf": 1.4142135623730951}, "sqlglot.helper.AutoName": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1}, "sqlglot.lineage.lineage": {"tf": 1}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.scope.Scope.replace": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}, "sqlglot.parser.Parser.parse": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}, "sqlglot.parser.Parser.validate_expression": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1}, "sqlglot.transforms.preprocess": {"tf": 1}, "sqlglot.trie.new_trie": {"tf": 1}}, "df": 68}, "s": {"docs": {"sqlglot": {"tf": 1.7320508075688772}, "sqlglot.dialects": {"tf": 1}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression": {"tf": 1}}, "df": 4}, "f": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.executor": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.expressions.Expression.assert_is": {"tf": 1}, "sqlglot.helper.first": {"tf": 1}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1}, "sqlglot.transforms.delegate": {"tf": 1}}, "df": 7}}}, "r": {"docs": {"sqlglot": {"tf": 2.23606797749979}, "sqlglot.dataframe": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.is_udtf": {"tf": 1}}, "df": 3, "s": {"docs": {"sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.select_sql": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 5}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dataframe": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.select_sql": {"tf": 1}, "sqlglot.diff": {"tf": 2}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.expressions.Join.using": {"tf": 2}, "sqlglot.expressions.Select.join": {"tf": 2}, "sqlglot.lineage.LineageHTML": {"tf": 1}, "sqlglot.transforms.preprocess": {"tf": 1}}, "df": 10}}}, "u": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dialects": {"tf": 1}}, "df": 1}}}}}}, "n": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}}}}, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator": {"tf": 1.4142135623730951}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1.4142135623730951}}, "df": 23, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.Expression.unnest_operands": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1}}, "df": 2}}}}}, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.replace_placeholders": {"tf": 1}}, "df": 1}}}}}, "s": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 2.23606797749979}, "sqlglot.executor.python.Python.Generator": {"tf": 2.23606797749979}, "sqlglot.generator.Generator": {"tf": 2.23606797749979}}, "df": 22, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}}, "df": 21}}}}}}}, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe": {"tf": 1}}, "df": 1}}}}}}}}}, "r": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}}}, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}, "c": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.trie.in_trie": {"tf": 1}}, "df": 1}}}}}}}}}}, "i": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}}}, "t": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.executor": {"tf": 1}, "sqlglot.expressions.TimeUnit": {"tf": 1}}, "df": 3}, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dialects": {"tf": 1}, "sqlglot.executor": {"tf": 1}, "sqlglot.helper.open_file": {"tf": 1}}, "df": 3}}}}}}, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.Unionable.union": {"tf": 2.23606797749979}, "sqlglot.expressions.Union.limit": {"tf": 1.4142135623730951}, "sqlglot.expressions.union": {"tf": 2.23606797749979}, "sqlglot.optimizer.scope.Scope": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.Scope.is_union": {"tf": 1}}, "df": 5, "s": {"docs": {"sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1.4142135623730951}}, "df": 1}, "/": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}}, "q": {"docs": {"sqlglot.optimizer.simplify.uniq_sort": {"tf": 1}}, "df": 1, "u": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.expressions.column_table_names": {"tf": 1}}, "df": 3}}}}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}}, "df": 2}}}}}, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.optimizer.optimizer.optimize": {"tf": 1}}, "df": 1}}}}, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.snowflake.Snowflake.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.select_sql": {"tf": 1}}, "df": 2, "d": {"docs": {"sqlglot.dialects.snowflake.Snowflake.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.select_sql": {"tf": 1.7320508075688772}, "sqlglot.optimizer.lower_identities.lower_identities": {"tf": 1}}, "df": 3}}}}, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.optimizer.scope.Scope.unqualified_columns": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}}, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "l": {"docs": {}, "df": 0, "y": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}}}}}, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}}, "df": 1}}}}}}}, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 4}}, "df": 1}}}}}}}, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}}, "f": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.helper.apply_index_offset": {"tf": 1}}, "df": 1}}}}}}}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.transforms.unalias_group": {"tf": 1}}, "df": 1}}}}}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.helper.while_changing": {"tf": 1}}, "df": 1}}}, "u": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 1}}, "df": 3}}}}}, "p": {"docs": {"sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}, "sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.replace": {"tf": 1}}, "df": 5, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.executor.python.Python.Generator": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator": {"tf": 1.4142135623730951}}, "df": 21}}}, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 2}, "sqlglot.diff.diff": {"tf": 1}, "sqlglot.expressions.Select.lock": {"tf": 2}, "sqlglot.expressions.update": {"tf": 2.23606797749979}, "sqlglot.schema.Schema.add_table": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}}, "df": 6, "s": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}}, "df": 2}, "d": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.diff.Update": {"tf": 1}}, "df": 2}}}}}}, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}}}, "d": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}, "t": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.optimizer.scope.Scope.is_udtf": {"tf": 1}}, "df": 1}}}}, "f": {"docs": {"sqlglot": {"tf": 3}, "sqlglot.dataframe": {"tf": 4.69041575982343}, "sqlglot.dialects": {"tf": 1}, "sqlglot.diff": {"tf": 2.8284271247461903}, "sqlglot.trie.new_trie": {"tf": 1}}, "df": 5, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot": {"tf": 3.4641016151377544}, "sqlglot.parse_one": {"tf": 1.4142135623730951}, "sqlglot.dataframe": {"tf": 2.8284271247461903}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dialects": {"tf": 1.7320508075688772}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.diff": {"tf": 6.855654600401044}, "sqlglot.diff.diff": {"tf": 1}, "sqlglot.errors.SqlglotError": {"tf": 1}, "sqlglot.errors.UnsupportedError": {"tf": 1}, "sqlglot.errors.ParseError": {"tf": 1}, "sqlglot.errors.TokenError": {"tf": 1}, "sqlglot.errors.OptimizeError": {"tf": 1}, "sqlglot.errors.SchemaError": {"tf": 1}, "sqlglot.errors.ExecuteError": {"tf": 1}, "sqlglot.executor": {"tf": 4}, "sqlglot.executor.context.Context": {"tf": 1}, "sqlglot.executor.table.Tables": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression": {"tf": 2}, "sqlglot.expressions.Expression.text": {"tf": 1}, "sqlglot.expressions.Expression.replace": {"tf": 1}, "sqlglot.expressions.Expression.assert_is": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.Select.lock": {"tf": 2.23606797749979}, "sqlglot.expressions.Tag": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1.4142135623730951}, "sqlglot.expressions.union": {"tf": 1}, "sqlglot.expressions.intersect": {"tf": 1}, "sqlglot.expressions.except_": {"tf": 1}, "sqlglot.expressions.select": {"tf": 1}, "sqlglot.expressions.from_": {"tf": 1}, "sqlglot.expressions.update": {"tf": 1}, "sqlglot.expressions.delete": {"tf": 1}, "sqlglot.expressions.condition": {"tf": 1}, "sqlglot.helper.AutoName": {"tf": 1}, "sqlglot.helper.subclasses": {"tf": 1.4142135623730951}, "sqlglot.helper.find_new_name": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1}, "sqlglot.helper.dict_depth": {"tf": 1}, "sqlglot.helper.first": {"tf": 1}, "sqlglot.lineage.lineage": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope": {"tf": 2.6457513110645907}, "sqlglot.optimizer.scope.Scope.derived_tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.subqueries": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selects": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.source_columns": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.build_scope": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}, "sqlglot.parser.Parser.expression": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.Scan.from_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1.4142135623730951}, "sqlglot.schema.Schema": {"tf": 1}, "sqlglot.schema.Schema.column_names": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1.4142135623730951}, "sqlglot.schema.MappingSchema": {"tf": 1}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}, "sqlglot.time.format_time": {"tf": 1}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1}, "sqlglot.transforms.remove_precision_parameterized_types": {"tf": 1}, "sqlglot.transforms.delegate": {"tf": 1}}, "df": 81, "m": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.helper.AutoName": {"tf": 1}, "sqlglot.optimizer.canonicalize.canonicalize": {"tf": 1}, "sqlglot.optimizer.normalize.normalize": {"tf": 1.4142135623730951}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_cnf": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_dnf": {"tf": 1.4142135623730951}}, "df": 10, "a": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.pretty": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.executor.python.Python.Generator": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator": {"tf": 1.4142135623730951}, "sqlglot.helper.csv": {"tf": 1}, "sqlglot.time.format_time": {"tf": 1.7320508075688772}}, "df": 26, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1}}, "df": 3}}}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.7320508075688772}, "sqlglot.executor.python.Python.Generator": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator": {"tf": 1.7320508075688772}, "sqlglot.helper.csv": {"tf": 1}}, "df": 23}}}, "s": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.helper.csv": {"tf": 1}}, "df": 2}}, "l": {"docs": {"sqlglot.helper.count_params": {"tf": 1}}, "df": 1, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}}}}, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}, "s": {"docs": {"sqlglot.executor.execute": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1}}, "df": 3}}, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.expressions.to_identifier": {"tf": 1}}, "df": 2, "s": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}, "o": {"docs": {"sqlglot": {"tf": 3.1622776601683795}, "sqlglot.executor.env.null_if_any": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.union": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.intersect": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.except_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Join.using": {"tf": 1.4142135623730951}, "sqlglot.expressions.union": {"tf": 1.4142135623730951}, "sqlglot.expressions.intersect": {"tf": 1.4142135623730951}, "sqlglot.expressions.except_": {"tf": 1.4142135623730951}, "sqlglot.expressions.alias_": {"tf": 1.4142135623730951}, "sqlglot.expressions.replace_placeholders": {"tf": 1.4142135623730951}, "sqlglot.optimizer.lower_identities.lower_identities": {"tf": 1.4142135623730951}, "sqlglot.trie.new_trie": {"tf": 1}}, "df": 14}, "u": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.errors.ErrorLevel.IMMEDIATE": {"tf": 1}, "sqlglot.executor": {"tf": 1}, "sqlglot.expressions.Expression.find": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}, "sqlglot.parser.Parser.check_errors": {"tf": 1}}, "df": 8, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2, "s": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}}}}}}}, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dataframe": {"tf": 1}, "sqlglot.dialects": {"tf": 1}, "sqlglot.diff": {"tf": 2.8284271247461903}, "sqlglot.executor": {"tf": 1.7320508075688772}, "sqlglot.executor.execute": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selects": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.Scan.from_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1.4142135623730951}, "sqlglot.schema.MappingSchema": {"tf": 1}}, "df": 12}}}, "s": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1}}, "df": 3}}}}, "k": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}, "c": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}}}}, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot": {"tf": 6.557438524302}, "sqlglot.dataframe": {"tf": 4.58257569495584}, "sqlglot.dialects": {"tf": 2}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.select_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.diff": {"tf": 4.358898943540674}, "sqlglot.executor": {"tf": 4}, "sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.expressions.Expression.output_name": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.expressions.Expression.replace": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.pop": {"tf": 1}, "sqlglot.expressions.Expression.assert_is": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.union": {"tf": 2}, "sqlglot.expressions.Unionable.intersect": {"tf": 2}, "sqlglot.expressions.Unionable.except_": {"tf": 2}, "sqlglot.expressions.Column.output_name": {"tf": 1}, "sqlglot.expressions.Identifier.output_name": {"tf": 1}, "sqlglot.expressions.Literal.output_name": {"tf": 1}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 2}, "sqlglot.expressions.Subqueryable.with_": {"tf": 2}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 2.449489742783178}, "sqlglot.expressions.Select.group_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.order_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.sort_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.cluster_by": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.limit": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.offset": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.lateral": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.join": {"tf": 2.449489742783178}, "sqlglot.expressions.Select.where": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.having": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.distinct": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.ctas": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.lock": {"tf": 2}, "sqlglot.expressions.Subquery.output_name": {"tf": 1}, "sqlglot.expressions.Star.output_name": {"tf": 1}, "sqlglot.expressions.Alias.output_name": {"tf": 1}, "sqlglot.expressions.Cast.output_name": {"tf": 1}, "sqlglot.expressions.union": {"tf": 2.23606797749979}, "sqlglot.expressions.intersect": {"tf": 2.23606797749979}, "sqlglot.expressions.except_": {"tf": 2.23606797749979}, "sqlglot.expressions.select": {"tf": 1.7320508075688772}, "sqlglot.expressions.from_": {"tf": 2.23606797749979}, "sqlglot.expressions.update": {"tf": 2}, "sqlglot.expressions.delete": {"tf": 1}, "sqlglot.expressions.condition": {"tf": 1.4142135623730951}, "sqlglot.expressions.to_interval": {"tf": 1}, "sqlglot.expressions.to_table": {"tf": 1}, "sqlglot.expressions.to_column": {"tf": 1}, "sqlglot.expressions.subquery": {"tf": 1.7320508075688772}, "sqlglot.expressions.table_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.replace_tables": {"tf": 1.7320508075688772}, "sqlglot.expressions.replace_placeholders": {"tf": 1.7320508075688772}, "sqlglot.expressions.expand": {"tf": 2.23606797749979}, "sqlglot.helper.subclasses": {"tf": 1}, "sqlglot.helper.camel_to_snake_case": {"tf": 1}, "sqlglot.helper.object_to_dict": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1.4142135623730951}, "sqlglot.helper.first": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1.4142135623730951}, "sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"tf": 2}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 2}, "sqlglot.optimizer.eliminate_joins.join_condition": {"tf": 1}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 3}, "sqlglot.optimizer.expand_laterals.expand_laterals": {"tf": 1.4142135623730951}, "sqlglot.optimizer.expand_multi_table_selects.expand_multi_table_selects": {"tf": 2}, "sqlglot.optimizer.lower_identities.lower_identities": {"tf": 1.4142135623730951}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 2.8284271247461903}, "sqlglot.optimizer.normalize.normalize": {"tf": 1}, "sqlglot.optimizer.optimize_joins.optimize_joins": {"tf": 1.7320508075688772}, "sqlglot.optimizer.optimize_joins.normalize": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 2}, "sqlglot.optimizer.pushdown_predicates.pushdown_dnf": {"tf": 1}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 2}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope": {"tf": 2}, "sqlglot.optimizer.scope.Scope.branch": {"tf": 1}, "sqlglot.optimizer.scope.Scope.derived_tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.subqueries": {"tf": 1}, "sqlglot.optimizer.scope.Scope.columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.selects": {"tf": 1}, "sqlglot.optimizer.scope.Scope.remove_source": {"tf": 1}, "sqlglot.optimizer.scope.Scope.traverse": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 2.23606797749979}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 2}, "sqlglot.parser.Parser": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1.7320508075688772}, "sqlglot.planner.Scan.from_expression": {"tf": 1.7320508075688772}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1.7320508075688772}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1}, "sqlglot.transforms.unalias_group": {"tf": 1.4142135623730951}, "sqlglot.transforms.remove_precision_parameterized_types": {"tf": 1}, "sqlglot.trie.new_trie": {"tf": 1}}, "df": 116, "s": {"docs": {"sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 1}}, "df": 1}}}, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.dialects": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}}}}}}}, "e": {"docs": {}, "df": 0, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}, "u": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.expressions.replace_children": {"tf": 1}}, "df": 4, "c": {"docs": {"sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.expressions.Kwarg": {"tf": 1}, "sqlglot.expressions.func": {"tf": 2}, "sqlglot.helper.while_changing": {"tf": 1}, "sqlglot.parser.Parser.validate_expression": {"tf": 1}}, "df": 5, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.if_sql": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor": {"tf": 1}, "sqlglot.executor.env.null_if_any": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 2}, "sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1.7320508075688772}, "sqlglot.expressions.func": {"tf": 2.23606797749979}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.helper.csv_reader": {"tf": 1}, "sqlglot.helper.count_params": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_udtf": {"tf": 1}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1}, "sqlglot.transforms.preprocess": {"tf": 1.4142135623730951}}, "df": 36, "s": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dataframe": {"tf": 2.23606797749979}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor.context.Context": {"tf": 1}, "sqlglot.executor.context.Context.__init__": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.expressions": {"tf": 1}, "sqlglot.expressions.Kwarg": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1}, "sqlglot.transforms.preprocess": {"tf": 1}, "sqlglot.transforms.delegate": {"tf": 1}}, "df": 30}, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dataframe": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dataframe.sql.Column.cast": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 4}}}}}}}}}}}, "g": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}, "t": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.diff": {"tf": 1}}, "df": 2}}}}, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.executor": {"tf": 1}, "sqlglot.expressions.table_name": {"tf": 1}}, "df": 3, "y": {"docs": {"sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1}}, "df": 2}}}, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"6": {"4": {"docs": {"sqlglot": {"tf": 1.7320508075688772}, "sqlglot.dialects": {"tf": 1.7320508075688772}}, "df": 2}, "docs": {}, "df": 0}, "docs": {"sqlglot": {"tf": 1.7320508075688772}, "sqlglot.dialects": {"tf": 1}}, "df": 2}}, "r": {"docs": {}, "df": 0, "\u00e9": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.helper.flatten": {"tf": 1.4142135623730951}}, "df": 1, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}, "s": {"docs": {"sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.helper.flatten": {"tf": 1}}, "df": 6}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.helper.flatten": {"tf": 1}}, "df": 1}}}}}}, "g": {"docs": {"sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.union": {"tf": 1}, "sqlglot.expressions.intersect": {"tf": 1}, "sqlglot.expressions.except_": {"tf": 1}}, "df": 6}}, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.diff.ChangeDistiller": {"tf": 1}}, "df": 2}}}, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot": {"tf": 2.449489742783178}, "sqlglot.dialects.snowflake.Snowflake.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.select_sql": {"tf": 1}, "sqlglot.diff": {"tf": 2}, "sqlglot.executor": {"tf": 1}, "sqlglot.expressions.Expression.replace": {"tf": 1}, "sqlglot.expressions.column_table_names": {"tf": 1}, "sqlglot.expressions.table_name": {"tf": 1}, "sqlglot.helper.subclasses": {"tf": 1}}, "df": 9, "/": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe": {"tf": 1}}, "df": 1}}}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.transforms.preprocess": {"tf": 1}}, "df": 2, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1.7320508075688772}}, "df": 3}}, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}, "e": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.parse_one": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 2.8284271247461903}, "sqlglot.errors.ErrorLevel.IMMEDIATE": {"tf": 1}, "sqlglot.executor": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.find": {"tf": 1}, "sqlglot.expressions.Expression.dfs": {"tf": 1}, "sqlglot.expressions.Expression.bfs": {"tf": 1}, "sqlglot.expressions.Expression.unnest": {"tf": 1}, "sqlglot.expressions.Subquery.unnest": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1}, "sqlglot.helper.first": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.traverse": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}}, "df": 15}}}, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.helper.open_file": {"tf": 1}}, "df": 2, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.executor": {"tf": 1.4142135623730951}}, "df": 1}}}}, "l": {"docs": {"sqlglot.helper.split_num_words": {"tf": 1.4142135623730951}}, "df": 1}}, "g": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 2.23606797749979}}, "df": 1}}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}, "x": {"docs": {"sqlglot.helper.while_changing": {"tf": 1}}, "df": 1}}, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 3.1622776601683795}, "sqlglot.dataframe": {"tf": 3.4641016151377544}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.7320508075688772}, "sqlglot.diff": {"tf": 1}, "sqlglot.diff.diff": {"tf": 2}, "sqlglot.executor": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.Select.distinct": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.Select.lock": {"tf": 1.4142135623730951}, "sqlglot.expressions.maybe_parse": {"tf": 1.4142135623730951}, "sqlglot.expressions.false": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1.7320508075688772}, "sqlglot.helper.split_num_words": {"tf": 1}, "sqlglot.helper.is_iterable": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.optimizer.simplify.remove_compliments": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}}, "df": 72}}, "l": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1.7320508075688772}}, "df": 2, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}, "h": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "c": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}}, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}}}}}}}, "e": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.dataframe": {"tf": 1}}, "df": 1}}}}}}, "t": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.dataframe": {"tf": 1}}, "df": 1}}}}}}, "w": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe": {"tf": 2}}, "df": 1}}}}}, "l": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.trie.new_trie": {"tf": 1}}, "df": 3, "i": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1.7320508075688772}, "sqlglot.dataframe": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 2.23606797749979}, "sqlglot.expressions.Predicate": {"tf": 1}, "sqlglot.expressions.Tag": {"tf": 1}, "sqlglot.expressions.Kwarg": {"tf": 1}, "sqlglot.expressions.to_interval": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_cnf": {"tf": 1}}, "df": 9}}, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 1.4142135623730951}}, "df": 1}, "e": {"docs": {"sqlglot": {"tf": 2}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}}, "df": 22, "s": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}}, "df": 22}, "/": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.lineage.lineage": {"tf": 2}}, "df": 1}}}}, "k": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor.python.PythonExecutor.generate": {"tf": 1}, "sqlglot.expressions.Expression.is_string": {"tf": 1}, "sqlglot.expressions.Expression.is_number": {"tf": 1}, "sqlglot.expressions.Expression.is_int": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1}, "sqlglot.helper.apply_index_offset": {"tf": 1}}, "df": 8, "s": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.expressions.Expression.text": {"tf": 1}}, "df": 2}}}}}, "t": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 2}, "sqlglot.executor": {"tf": 1}}, "df": 2, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe": {"tf": 1.7320508075688772}}, "df": 1}}}}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.transpile": {"tf": 1.7320508075688772}, "sqlglot.dataframe": {"tf": 1.7320508075688772}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.diff.diff": {"tf": 1.7320508075688772}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.expressions.Expression.append": {"tf": 2}, "sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1.7320508075688772}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1}, "sqlglot.expressions.column_table_names": {"tf": 1.4142135623730951}, "sqlglot.helper.ensure_list": {"tf": 2}, "sqlglot.helper.ensure_collection": {"tf": 1.4142135623730951}, "sqlglot.helper.subclasses": {"tf": 1}, "sqlglot.helper.apply_index_offset": {"tf": 1.4142135623730951}, "sqlglot.helper.tsort": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1.4142135623730951}, "sqlglot.helper.flatten": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope": {"tf": 2.8284271247461903}, "sqlglot.optimizer.scope.Scope.tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.ctes": {"tf": 1}, "sqlglot.optimizer.scope.Scope.derived_tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.subqueries": {"tf": 1}, "sqlglot.optimizer.scope.Scope.columns": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1.7320508075688772}, "sqlglot.parser.Parser": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.parse": {"tf": 2}, "sqlglot.parser.Parser.parse_into": {"tf": 2}, "sqlglot.parser.Parser.raise_error": {"tf": 1}, "sqlglot.parser.Parser.expression": {"tf": 1}, "sqlglot.parser.Parser.validate_expression": {"tf": 1}, "sqlglot.schema.Schema.column_names": {"tf": 1}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}, "sqlglot.tokens.Tokenizer.tokenize": {"tf": 1}}, "df": 53, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "[": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.optimizer.eliminate_joins.join_condition": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1}}, "df": 2}}, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.scope.Scope": {"tf": 2}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}}, "df": 2}}}}}, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.optimizer.scope.Scope.tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.ctes": {"tf": 1}, "sqlglot.optimizer.scope.Scope.derived_tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.subqueries": {"tf": 1}, "sqlglot.optimizer.scope.Scope.columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selects": {"tf": 1}, "sqlglot.optimizer.scope.Scope.external_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.unqualified_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.join_hints": {"tf": 1}, "sqlglot.optimizer.scope.Scope.source_columns": {"tf": 1}}, "df": 10}}}}}}, "b": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1}}, "df": 3}}}}}, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.expressions.Union.limit": {"tf": 2.23606797749979}, "sqlglot.expressions.Select.limit": {"tf": 2.23606797749979}}, "df": 4, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.Union.limit": {"tf": 1}}, "df": 1}}}}}, "g": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}}}}, "o": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dataframe": {"tf": 1}}, "df": 2, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "k": {"docs": {"sqlglot.expressions.Select.lock": {"tf": 1.4142135623730951}}, "df": 1, "e": {"docs": {"sqlglot.dataframe": {"tf": 1.7320508075688772}}, "df": 1}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.expressions.Select.lock": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot": {"tf": 1}}, "df": 1, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}}, "df": 1}}}}}, "g": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.errors.ErrorLevel.WARN": {"tf": 1}}, "df": 2, "i": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.dialects": {"tf": 1}}, "df": 1, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.executor": {"tf": 2.23606797749979}, "sqlglot.expressions.condition": {"tf": 1.4142135623730951}, "sqlglot.expressions.and_": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 1}}, "df": 4}}}}, "s": {"docs": {"sqlglot.parser.Parser.check_errors": {"tf": 1}}, "df": 1}}, "w": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.executor.python.Python.Generator": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator": {"tf": 1.4142135623730951}, "sqlglot.optimizer.lower_identities.lower_identities": {"tf": 1.7320508075688772}}, "df": 22}}}, "t": {"docs": {"sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.diff": {"tf": 2}, "sqlglot.executor": {"tf": 1}}, "df": 3}, "o": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}, "s": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}, "u": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1}}, "df": 2}}}, "p": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}, "a": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.Expression.load": {"tf": 1}, "sqlglot.serde.load": {"tf": 1}}, "df": 2}}}, "e": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 1}}, "df": 1, "s": {"docs": {"sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}}, "df": 2}, "\u2019": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}, "f": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.join": {"tf": 1.4142135623730951}, "sqlglot.expressions.union": {"tf": 1.4142135623730951}, "sqlglot.expressions.intersect": {"tf": 1.4142135623730951}, "sqlglot.expressions.except_": {"tf": 1.4142135623730951}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1.4142135623730951}}, "df": 10}}, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.transpile": {"tf": 1.4142135623730951}, "sqlglot.dataframe.sql.DataFrame.persist": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1.7320508075688772}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor.python.Python.Generator": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser": {"tf": 1.7320508075688772}, "sqlglot.parser.Parser.check_errors": {"tf": 1}, "sqlglot.parser.Parser.raise_error": {"tf": 1}}, "df": 44}, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1, "s": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}, "a": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.executor.python.Python.Generator": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator": {"tf": 1.4142135623730951}}, "df": 21}}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1.7320508075688772}}, "df": 1, "s": {"docs": {"sqlglot.diff": {"tf": 5.196152422706632}}, "df": 1}}}, "f": {"docs": {"sqlglot.diff": {"tf": 6.324555320336759}, "sqlglot.expressions.Expression.text": {"tf": 1}}, "df": 2}, "r": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.Expression.find": {"tf": 1}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}}, "df": 4}}}, "n": {"docs": {"sqlglot.diff": {"tf": 3.1622776601683795}, "sqlglot.expressions.Func": {"tf": 1}}, "df": 2, "g": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1.4142135623730951}, "sqlglot.expressions.Func": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1.4142135623730951}}, "df": 24}}}}, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "s": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}, "x": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}, "a": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1, "m": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dialects": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.expressions.replace_children": {"tf": 1}}, "df": 4}}}}, "c": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.diff": {"tf": 1}}, "df": 2, "s": {"docs": {"sqlglot.dataframe": {"tf": 1}}, "df": 1}}}, "r": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}}, "df": 42, "r": {"docs": {"sqlglot.expressions.condition": {"tf": 1}}, "df": 1}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}}, "df": 41}}, "b": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}}, "t": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1}}, "df": 2, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.Select.lateral": {"tf": 1.7320508075688772}, "sqlglot.optimizer.expand_laterals.expand_laterals": {"tf": 1}}, "df": 2, "s": {"docs": {"sqlglot.optimizer.expand_laterals.expand_laterals": {"tf": 1}}, "df": 1}}}}}}, "y": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}, "w": {"docs": {"sqlglot.optimizer.simplify.simplify_not": {"tf": 1}}, "df": 1}}, "t": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dataframe": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1}, "sqlglot.expressions.Select.where": {"tf": 1.4142135623730951}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.optimizer.scope.Scope.derived_tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.subqueries": {"tf": 1}, "sqlglot.optimizer.simplify.rewrite_between": {"tf": 1}}, "df": 8, "/": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "/": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "/": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.simplify.rewrite_between": {"tf": 1}}, "df": 1}}}}}}}}}}}}, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe": {"tf": 2.23606797749979}}, "df": 1}}}}, "c": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 2}}, "df": 1}}, "l": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}, "r": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1}}, "df": 2, "e": {"docs": {"sqlglot.dialects": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}}, "df": 2, "a": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot": {"tf": 2.6457513110645907}, "sqlglot.parse": {"tf": 1}, "sqlglot.parse_one": {"tf": 1}, "sqlglot.transpile": {"tf": 1}, "sqlglot.dataframe": {"tf": 1}, "sqlglot.executor.execute": {"tf": 1}, "sqlglot.expressions.Select.lock": {"tf": 1}, "sqlglot.helper.csv_reader": {"tf": 1.4142135623730951}}, "df": 8, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.dataframe": {"tf": 1}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.helper.csv_reader": {"tf": 1.4142135623730951}}, "df": 1}}, "c": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "v": {"docs": {"sqlglot.helper.csv_reader": {"tf": 1}}, "df": 1}}}}, "l": {"docs": {"sqlglot": {"tf": 1.7320508075688772}}, "df": 1, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "d": {"docs": {"sqlglot.helper.while_changing": {"tf": 1}}, "df": 1}}}}, "s": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1, "s": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}}}}}, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1}}, "df": 4, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}}}, "d": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.executor.env.null_if_any": {"tf": 1.4142135623730951}, "sqlglot.expressions.values": {"tf": 1}}, "df": 4}, "s": {"docs": {"sqlglot.dialects.drill.if_sql": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1}}, "df": 3}}}}}}, "f": {"docs": {"sqlglot.dataframe.sql.DataFrame.persist": {"tf": 1}}, "df": 1, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1}}, "df": 2, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.optimizer.scope.Scope.columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.external_columns": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.join_hints": {"tf": 1}, "sqlglot.optimizer.scope.Scope.source_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.ref_count": {"tf": 1}}, "df": 6, "s": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor.context.Context": {"tf": 1.4142135623730951}, "sqlglot.optimizer.expand_laterals.expand_laterals": {"tf": 1}, "sqlglot.optimizer.optimize_joins.reorder_joins": {"tf": 1}, "sqlglot.transforms.unalias_group": {"tf": 1}}, "df": 6}, "d": {"docs": {"sqlglot.dialects.snowflake.Snowflake.Generator.select_sql": {"tf": 1}, "sqlglot.expressions.column_table_names": {"tf": 1}, "sqlglot.expressions.expand": {"tf": 1}, "sqlglot.optimizer.scope.Scope.join_hints": {"tf": 1}, "sqlglot.optimizer.scope.Scope.ref_count": {"tf": 1}}, "df": 5}}}}}}}, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}}, "c": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1.4142135623730951}}, "df": 4, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.serde.dump": {"tf": 1}, "sqlglot.serde.load": {"tf": 1}}, "df": 5}}}}}}}}, "o": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.diff": {"tf": 1}}, "df": 2}}}}}}}, "r": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.parser.Parser.raise_error": {"tf": 1}}, "df": 1}}}}}, "e": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Expression": {"tf": 1}}, "df": 1}}}}}, "t": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.dataframe": {"tf": 1}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}, "sqlglot.diff": {"tf": 2.23606797749979}, "sqlglot.executor.env.null_if_any": {"tf": 1}, "sqlglot.executor.table.Tables": {"tf": 1.4142135623730951}, "sqlglot.expressions.column_table_names": {"tf": 1}, "sqlglot.helper.open_file": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1.4142135623730951}}, "df": 10, "s": {"docs": {"sqlglot.parse": {"tf": 1}, "sqlglot.parse_one": {"tf": 1.4142135623730951}, "sqlglot.transpile": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}, "sqlglot.dialects.drill.if_sql": {"tf": 1}, "sqlglot.diff.diff": {"tf": 1.4142135623730951}, "sqlglot.executor.execute": {"tf": 1}, "sqlglot.expressions.Expression.text": {"tf": 1}, "sqlglot.expressions.Expression.alias": {"tf": 1}, "sqlglot.expressions.Expression.copy": {"tf": 1}, "sqlglot.expressions.Expression.depth": {"tf": 1}, "sqlglot.expressions.Expression.find": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.find_all": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.find_ancestor": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.parent_select": {"tf": 1}, "sqlglot.expressions.Expression.walk": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.dfs": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.bfs": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.unnest": {"tf": 1}, "sqlglot.expressions.Expression.unalias": {"tf": 1}, "sqlglot.expressions.Expression.unnest_operands": {"tf": 1}, "sqlglot.expressions.Expression.flatten": {"tf": 1}, "sqlglot.expressions.Expression.sql": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.transform": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.replace": {"tf": 1}, "sqlglot.expressions.Expression.assert_is": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Condition.not_": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.Select.distinct": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.Select.lock": {"tf": 1}, "sqlglot.expressions.Subquery.unnest": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1}, "sqlglot.expressions.union": {"tf": 1}, "sqlglot.expressions.intersect": {"tf": 1}, "sqlglot.expressions.except_": {"tf": 1}, "sqlglot.expressions.select": {"tf": 1}, "sqlglot.expressions.from_": {"tf": 1}, "sqlglot.expressions.update": {"tf": 1}, "sqlglot.expressions.delete": {"tf": 1}, "sqlglot.expressions.condition": {"tf": 1}, "sqlglot.expressions.and_": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 1}, "sqlglot.expressions.not_": {"tf": 1}, "sqlglot.expressions.to_identifier": {"tf": 1}, "sqlglot.expressions.to_table": {"tf": 1}, "sqlglot.expressions.to_column": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.expressions.subquery": {"tf": 1}, "sqlglot.expressions.column": {"tf": 1}, "sqlglot.expressions.cast": {"tf": 1}, "sqlglot.expressions.table_": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1}, "sqlglot.expressions.rename_table": {"tf": 1}, "sqlglot.expressions.convert": {"tf": 1}, "sqlglot.expressions.column_table_names": {"tf": 1}, "sqlglot.expressions.table_name": {"tf": 1}, "sqlglot.expressions.replace_tables": {"tf": 1}, "sqlglot.expressions.replace_placeholders": {"tf": 1}, "sqlglot.expressions.expand": {"tf": 1}, "sqlglot.expressions.func": {"tf": 1.4142135623730951}, "sqlglot.expressions.true": {"tf": 1}, "sqlglot.expressions.false": {"tf": 1}, "sqlglot.expressions.null": {"tf": 1}, "sqlglot.generator.Generator.generate": {"tf": 1}, "sqlglot.helper.seq_get": {"tf": 1}, "sqlglot.helper.ensure_list": {"tf": 1}, "sqlglot.helper.ensure_collection": {"tf": 1}, "sqlglot.helper.csv": {"tf": 1}, "sqlglot.helper.subclasses": {"tf": 1.4142135623730951}, "sqlglot.helper.apply_index_offset": {"tf": 1}, "sqlglot.helper.camel_to_snake_case": {"tf": 1}, "sqlglot.helper.while_changing": {"tf": 1}, "sqlglot.helper.tsort": {"tf": 1}, "sqlglot.helper.csv_reader": {"tf": 1}, "sqlglot.helper.find_new_name": {"tf": 1}, "sqlglot.helper.object_to_dict": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1}, "sqlglot.helper.is_iterable": {"tf": 1}, "sqlglot.helper.count_params": {"tf": 1}, "sqlglot.helper.dict_depth": {"tf": 1}, "sqlglot.helper.first": {"tf": 1}, "sqlglot.lineage.lineage": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.join_condition": {"tf": 1}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 1}, "sqlglot.optimizer.expand_laterals.expand_laterals": {"tf": 1}, "sqlglot.optimizer.lower_identities.lower_identities": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}, "sqlglot.optimizer.normalize.normalize": {"tf": 1}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 1}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 1}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.optimizer.scope.Scope.tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.ctes": {"tf": 1}, "sqlglot.optimizer.scope.Scope.derived_tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope.subqueries": {"tf": 1}, "sqlglot.optimizer.scope.Scope.columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1}, "sqlglot.optimizer.scope.Scope.cte_sources": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selects": {"tf": 1}, "sqlglot.optimizer.scope.Scope.external_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.unqualified_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.join_hints": {"tf": 1}, "sqlglot.optimizer.scope.Scope.source_columns": {"tf": 1}, "sqlglot.optimizer.scope.Scope.ref_count": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.build_scope": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}, "sqlglot.optimizer.simplify.simplify": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1}, "sqlglot.parser.Parser.parse": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.parse_into": {"tf": 1}, "sqlglot.parser.Parser.expression": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}, "sqlglot.schema.Schema.column_names": {"tf": 1}, "sqlglot.schema.Schema.get_column_type": {"tf": 1}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1}, "sqlglot.time.format_time": {"tf": 1}, "sqlglot.tokens.Token.number": {"tf": 1}, "sqlglot.tokens.Token.string": {"tf": 1}, "sqlglot.tokens.Token.identifier": {"tf": 1}, "sqlglot.tokens.Token.var": {"tf": 1}, "sqlglot.tokens.Tokenizer.tokenize": {"tf": 1}, "sqlglot.transforms.unalias_group": {"tf": 1}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1}, "sqlglot.transforms.preprocess": {"tf": 1}, "sqlglot.trie.new_trie": {"tf": 1}, "sqlglot.trie.in_trie": {"tf": 1}}, "df": 161}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.transpile": {"tf": 1}, "sqlglot.dataframe": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.expressions.Expression.output_name": {"tf": 1}, "sqlglot.expressions.Expression.load": {"tf": 1}, "sqlglot.expressions.Column.output_name": {"tf": 1}, "sqlglot.expressions.Identifier.output_name": {"tf": 1}, "sqlglot.expressions.Literal.output_name": {"tf": 1}, "sqlglot.expressions.Subquery.output_name": {"tf": 1}, "sqlglot.expressions.Star.output_name": {"tf": 1}, "sqlglot.expressions.Alias.output_name": {"tf": 1}, "sqlglot.expressions.Cast.output_name": {"tf": 1}, "sqlglot.expressions.to_table": {"tf": 1}, "sqlglot.expressions.to_column": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.helper.subclasses": {"tf": 1}, "sqlglot.helper.apply_index_offset": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1}, "sqlglot.serde.load": {"tf": 1}}, "df": 38}}}}}, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}}}, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.expressions.Expression": {"tf": 1}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.Expression.this": {"tf": 1}, "sqlglot.expressions.Expression.expression": {"tf": 1}, "sqlglot.expressions.Expression.expressions": {"tf": 1}}, "df": 3}}}}}}}, "w": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}, "sqlglot.optimizer.normalize.normalize": {"tf": 1.4142135623730951}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 1}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 1}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1}, "sqlglot.optimizer.simplify.simplify": {"tf": 1}, "sqlglot.optimizer.simplify.rewrite_between": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1}}, "df": 12}}}}, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}}}}}}}, "p": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot": {"tf": 1.4142135623730951}}, "df": 1, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe": {"tf": 1}}, "df": 1, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.expressions": {"tf": 1}, "sqlglot.trie.new_trie": {"tf": 1}}, "df": 3}}, "s": {"docs": {"sqlglot.transpile": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.diff.diff": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}}, "df": 25}, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.text": {"tf": 1}, "sqlglot.expressions.Expression.sql": {"tf": 1}}, "df": 4, "s": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.executor.context.Context.__init__": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}}, "df": 4}}}}}}}}}, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.executor": {"tf": 1}, "sqlglot.expressions.Expression.replace": {"tf": 1}, "sqlglot.expressions.replace_children": {"tf": 1}, "sqlglot.expressions.replace_tables": {"tf": 1.4142135623730951}, "sqlglot.expressions.replace_placeholders": {"tf": 1.4142135623730951}, "sqlglot.optimizer.expand_multi_table_selects.expand_multi_table_selects": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_cnf": {"tf": 1}, "sqlglot.optimizer.scope.Scope.replace": {"tf": 1.4142135623730951}, "sqlglot.transforms.unalias_group": {"tf": 1}}, "df": 10, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1.4142135623730951}}, "df": 2}}}}, "d": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.expressions.replace_tables": {"tf": 1}, "sqlglot.expressions.replace_placeholders": {"tf": 1}}, "df": 3}}}}, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.Column.cast": {"tf": 1}}, "df": 1}}}}}}}, "m": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.with_properties": {"tf": 1}, "sqlglot.diff": {"tf": 3.1622776601683795}, "sqlglot.diff.diff": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.pop": {"tf": 1}, "sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1}, "sqlglot.optimizer.optimize_joins.normalize": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_dnf": {"tf": 1}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 1}, "sqlglot.optimizer.scope.Scope.remove_source": {"tf": 1}}, "df": 12, "d": {"docs": {"sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}, "sqlglot.diff": {"tf": 2}, "sqlglot.diff.Remove": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 1}}, "df": 4}, "s": {"docs": {"sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1}, "sqlglot.optimizer.optimize_joins.optimize_joins": {"tf": 1}, "sqlglot.transforms.remove_precision_parameterized_types": {"tf": 1}}, "df": 3}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.optimizer.simplify.remove_compliments": {"tf": 1}}, "df": 1}}}}}, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 2}, "sqlglot.optimizer.eliminate_joins.join_condition": {"tf": 1}}, "df": 2}}}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}}}}, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.scope.Scope": {"tf": 1}}, "df": 1, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}}, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.Predicate": {"tf": 1}}, "df": 1}}}}}}}}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dataframe": {"tf": 1}}, "df": 1}}}}, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1}, "sqlglot.executor.context.Context": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}}, "df": 42}}}}}, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.optimizer.canonicalize.canonicalize": {"tf": 1}}, "df": 2}, "d": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}, "y": {"docs": {"sqlglot.optimizer.canonicalize.canonicalize": {"tf": 1}}, "df": 1}}, "s": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"1": {"docs": {"sqlglot.expressions.Matches": {"tf": 1}}, "df": 1}, "docs": {"sqlglot.dataframe": {"tf": 2}, "sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.expressions.replace_children": {"tf": 1}, "sqlglot.helper.camel_to_snake_case": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1.4142135623730951}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}}, "df": 10, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.parse": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.schema.Schema.get_column_type": {"tf": 1}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1}, "sqlglot.transforms.preprocess": {"tf": 1.4142135623730951}}, "df": 5}}}, "s": {"docs": {"sqlglot.dataframe": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1}}, "df": 3}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}, "n": {"docs": {"sqlglot.expressions.Matches": {"tf": 1}}, "df": 1}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.dialects.drill.if_sql": {"tf": 1}}, "df": 1}}}}, "m": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "t": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}}, "df": 8}}}, "t": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}}, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.parser.Parser.expression": {"tf": 1}}, "df": 2, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.helper.count_params": {"tf": 1}}, "df": 1}}}}}, "a": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}}}, "g": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.executor.execute": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1}}, "df": 4, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.dataframe": {"tf": 1.4142135623730951}}, "df": 1}}}}}}}}, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.helper.flatten": {"tf": 1}}, "df": 1}}}}}}, "y": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dataframe": {"tf": 1.7320508075688772}}, "df": 1}}}, "d": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.redshift.Redshift.Generator.with_properties": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.renametable_sql": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1.4142135623730951}}, "df": 3}}}}}, "u": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}, "n": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.dialects.redshift.Redshift.Generator.renametable_sql": {"tf": 1}}, "df": 1}}}, "e": {"docs": {"sqlglot.expressions.rename_table": {"tf": 1}, "sqlglot.optimizer.scope.Scope.rename_source": {"tf": 1}}, "df": 2}}}}, "v": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "w": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}, "u": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.optimizer.optimize_joins.optimize_joins": {"tf": 1}, "sqlglot.optimizer.optimize_joins.reorder_joins": {"tf": 1}}, "df": 2, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}}}, "o": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}}, "w": {"docs": {"sqlglot.dataframe": {"tf": 2.449489742783178}, "sqlglot.executor.context.Context": {"tf": 1}}, "df": 2, "s": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1}}, "df": 2}}, "o": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "t": {"docs": {"sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1}, "sqlglot.optimizer.scope.Scope.is_root": {"tf": 1}, "sqlglot.optimizer.scope.build_scope": {"tf": 1}}, "df": 4}}, "u": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.errors.ErrorLevel.RAISE": {"tf": 1}, "sqlglot.errors.ErrorLevel.IMMEDIATE": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.optimizer.qualify_columns.validate_qualify_columns": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1.4142135623730951}}, "df": 43, "s": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.expressions.Expression.assert_is": {"tf": 1}, "sqlglot.expressions.convert": {"tf": 1}, "sqlglot.parser.Parser.check_errors": {"tf": 1}, "sqlglot.parser.Parser.raise_error": {"tf": 1}}, "df": 5}, "d": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}}, "df": 39}}}}, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor.context.Context": {"tf": 1}}, "df": 2}}, "k": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}}, "d": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}, "i": {"docs": {}, "df": 0, "o": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}, "e": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}, "j": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}, "w": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.parser.Parser.parse": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}}, "df": 3}}, "u": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot": {"tf": 1.7320508075688772}, "sqlglot.dataframe": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.executor": {"tf": 2.23606797749979}, "sqlglot.executor.execute": {"tf": 1}, "sqlglot.optimizer.expand_laterals.expand_laterals": {"tf": 1}}, "df": 6, "n": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.executor": {"tf": 1.7320508075688772}}, "df": 2}}}}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}, "sqlglot.diff": {"tf": 2.449489742783178}}, "df": 3}}}}, "s": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.optimizer.optimize": {"tf": 1}}, "df": 1, "s": {"docs": {"sqlglot.executor": {"tf": 2.23606797749979}, "sqlglot.lineage.lineage": {"tf": 1.4142135623730951}, "sqlglot.optimizer.optimizer.optimize": {"tf": 2}}, "df": 3}}}}, "d": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.dataframe": {"tf": 1}}, "df": 1}}, "\u00e9": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.expressions.union": {"tf": 1.4142135623730951}, "sqlglot.expressions.intersect": {"tf": 1.4142135623730951}, "sqlglot.expressions.except_": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope": {"tf": 1}}, "df": 4}}}}}, "w": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "t": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}}, "df": 21}}, "o": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}, "t": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot": {"tf": 2.6457513110645907}, "sqlglot.transpile": {"tf": 1}, "sqlglot.dataframe": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dialects": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.if_sql": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.with_properties": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 4.58257569495584}, "sqlglot.executor": {"tf": 3.4641016151377544}, "sqlglot.executor.context.Context": {"tf": 1}, "sqlglot.executor.env.null_if_any": {"tf": 1}, "sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.this": {"tf": 1}, "sqlglot.expressions.Expression.expression": {"tf": 1}, "sqlglot.expressions.Expression.expressions": {"tf": 1}, "sqlglot.expressions.Expression.replace": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Condition.not_": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1}, "sqlglot.expressions.and_": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 1}, "sqlglot.expressions.not_": {"tf": 1}, "sqlglot.expressions.subquery": {"tf": 1}, "sqlglot.expressions.values": {"tf": 1}, "sqlglot.expressions.replace_children": {"tf": 1}, "sqlglot.helper.apply_index_offset": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"tf": 1}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 1.4142135623730951}, "sqlglot.optimizer.expand_multi_table_selects.expand_multi_table_selects": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.scope.Scope.replace": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1}, "sqlglot.parser.Parser.expression": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1}, "sqlglot.tokens.Token.number": {"tf": 1}, "sqlglot.tokens.Token.string": {"tf": 1}, "sqlglot.tokens.Token.identifier": {"tf": 1}, "sqlglot.tokens.Token.var": {"tf": 1}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1}}, "df": 47, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.helper.count_params": {"tf": 1}}, "df": 6}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.diff.Move": {"tf": 1}, "sqlglot.executor.context.Context.__init__": {"tf": 1}, "sqlglot.optimizer.scope.Scope.join_hints": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}}, "df": 5}}}}, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.transpile": {"tf": 1}, "sqlglot.dataframe": {"tf": 2}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.7320508075688772}, "sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1.7320508075688772}, "sqlglot.executor.python.Python.Generator": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.walk": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.Select.lock": {"tf": 1.4142135623730951}, "sqlglot.expressions.Func": {"tf": 1.4142135623730951}, "sqlglot.expressions.union": {"tf": 1.4142135623730951}, "sqlglot.expressions.intersect": {"tf": 1.4142135623730951}, "sqlglot.expressions.except_": {"tf": 1.4142135623730951}, "sqlglot.expressions.values": {"tf": 1.4142135623730951}, "sqlglot.expressions.replace_placeholders": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator": {"tf": 1.7320508075688772}, "sqlglot.helper.apply_index_offset": {"tf": 1.4142135623730951}, "sqlglot.lineage.lineage": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.parse_into": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1}, "sqlglot.transforms.unalias_group": {"tf": 1}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1}, "sqlglot.transforms.preprocess": {"tf": 1}}, "df": 70}}, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.transforms.eliminate_distinct_on": {"tf": 1.4142135623730951}}, "df": 2}}}}, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}, "h": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dialects": {"tf": 1}, "sqlglot.diff": {"tf": 2.8284271247461903}, "sqlglot.executor": {"tf": 1}, "sqlglot.executor.context.Context": {"tf": 1}}, "df": 5}}, "c": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.transpile": {"tf": 1}, "sqlglot.dataframe": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 2.23606797749979}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 2.23606797749979}, "sqlglot.diff": {"tf": 4}, "sqlglot.diff.diff": {"tf": 1}, "sqlglot.diff.ChangeDistiller": {"tf": 1}, "sqlglot.executor": {"tf": 1.7320508075688772}, "sqlglot.executor.context.Context": {"tf": 1}, "sqlglot.executor.env.null_if_any": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 2.23606797749979}, "sqlglot.expressions": {"tf": 1}, "sqlglot.expressions.Expression.find": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.expressions.Expression.walk": {"tf": 1}, "sqlglot.expressions.Expression.dfs": {"tf": 1}, "sqlglot.expressions.Expression.bfs": {"tf": 1}, "sqlglot.expressions.Expression.flatten": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.expressions.update": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 2.23606797749979}, "sqlglot.lineage.lineage": {"tf": 1}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1}, "sqlglot.trie.new_trie": {"tf": 1}}, "df": 49}}, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 3}, "sqlglot.dataframe": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.select_sql": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 2}, "sqlglot.executor": {"tf": 3}, "sqlglot.expressions.Select.where": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.lock": {"tf": 2}, "sqlglot.expressions.update": {"tf": 2}, "sqlglot.expressions.delete": {"tf": 2}, "sqlglot.expressions.condition": {"tf": 2.449489742783178}, "sqlglot.expressions.replace_placeholders": {"tf": 1.4142135623730951}, "sqlglot.helper.AutoName": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 1.7320508075688772}, "sqlglot.optimizer.scope.Scope.subqueries": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 2}, "sqlglot.trie.in_trie": {"tf": 1.4142135623730951}}, "df": 18}}, "n": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.dataframe": {"tf": 1.7320508075688772}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.renametable_sql": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.select_sql": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.diff": {"tf": 3.4641016151377544}, "sqlglot.executor": {"tf": 2}, "sqlglot.executor.context.Context": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}}, "df": 51}, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.pretty": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.is_string": {"tf": 1}, "sqlglot.expressions.Expression.is_number": {"tf": 1}, "sqlglot.expressions.Expression.is_int": {"tf": 1}, "sqlglot.expressions.Select.distinct": {"tf": 1}, "sqlglot.expressions.to_identifier": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1.4142135623730951}, "sqlglot.expressions.expand": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.schema.Schema.column_names": {"tf": 1}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}, "sqlglot.trie.in_trie": {"tf": 1}}, "df": 34}}}}}, "a": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe": {"tf": 1.4142135623730951}, "sqlglot.dialects": {"tf": 1}, "sqlglot.diff": {"tf": 2.23606797749979}, "sqlglot.executor": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}}, "df": 6, "e": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}}, "df": 2}}}}}}, "o": {"docs": {"sqlglot.expressions.Expression.flatten": {"tf": 1}}, "df": 1, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}, "\u2019": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}, "y": {"docs": {"sqlglot.executor": {"tf": 2}}, "df": 1}}, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}}}, "e": {"docs": {"sqlglot": {"tf": 2}, "sqlglot.transpile": {"tf": 1}, "sqlglot.dataframe": {"tf": 1}, "sqlglot.dialects": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 5}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dataframe": {"tf": 1}, "sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 2}}, "df": 4}}}}}, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}, "p": {"docs": {"sqlglot.expressions.Condition.not_": {"tf": 1}, "sqlglot.expressions.not_": {"tf": 1}}, "df": 2, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.helper.ensure_list": {"tf": 1}, "sqlglot.helper.ensure_collection": {"tf": 1}, "sqlglot.helper.apply_index_offset": {"tf": 1.4142135623730951}}, "df": 11}}}, "s": {"docs": {"sqlglot.helper.ensure_list": {"tf": 1}, "sqlglot.helper.ensure_collection": {"tf": 1}}, "df": 2}}}}, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}}, "df": 22}, "e": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.values_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator.select_sql": {"tf": 1.4142135623730951}, "sqlglot.helper.subclasses": {"tf": 1.4142135623730951}}, "df": 4, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}, "y": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dataframe": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1.4142135623730951}}, "df": 4, "s": {"docs": {"sqlglot.dataframe": {"tf": 1}}, "df": 1}}, "s": {"docs": {"sqlglot.diff": {"tf": 2.6457513110645907}, "sqlglot.executor": {"tf": 2.6457513110645907}, "sqlglot.expressions.Expression.find": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}, "sqlglot.parser.Parser.validate_expression": {"tf": 1}, "sqlglot.trie.in_trie": {"tf": 1}}, "df": 6, "n": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}, "l": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}, "e": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.dialects": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator.with_properties": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.snowflake.Snowflake.Generator.values_sql": {"tf": 2.23606797749979}, "sqlglot.dialects.snowflake.Snowflake.Generator.select_sql": {"tf": 2.449489742783178}, "sqlglot.diff": {"tf": 5.5677643628300215}, "sqlglot.executor": {"tf": 2.23606797749979}, "sqlglot.helper.subclasses": {"tf": 1.4142135623730951}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_cnf": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_dnf": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1.4142135623730951}, "sqlglot.trie.in_trie": {"tf": 1}}, "df": 15, "l": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}}, "l": {"docs": {"sqlglot.dialects": {"tf": 1}, "sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.expressions.Func": {"tf": 1}}, "df": 3}}, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 2.449489742783178}, "sqlglot.executor": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1.4142135623730951}}, "df": 3}}, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}}, "\u2019": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}}, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.executor": {"tf": 1.7320508075688772}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1}}, "df": 5, "n": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1, "\u2019": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}, "n": {"docs": {"sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}}, "df": 1}, "r": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dialects.drill.if_sql": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 2.8284271247461903}}, "df": 2}}, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "t": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}, "k": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 2.23606797749979}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}}, "df": 5, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}, "l": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.expressions.TimeUnit": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1}, "sqlglot.tokens.Token.var": {"tf": 1.4142135623730951}}, "df": 3, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot": {"tf": 1.4142135623730951}}, "df": 1}}}, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dialects": {"tf": 1.4142135623730951}}, "df": 1, "s": {"docs": {"sqlglot.dialects": {"tf": 1}, "sqlglot.diff": {"tf": 1}}, "df": 2}}}}}, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.Func": {"tf": 1}}, "df": 1, "s": {"docs": {"sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1}}, "df": 2}}}}}, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}, "y": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 2.23606797749979}}, "df": 1}}}}}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.Expression.error_messages": {"tf": 1}}, "df": 1, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}, "e": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.parser.Parser.validate_expression": {"tf": 1}}, "df": 2, "d": {"docs": {"sqlglot.parser.Parser.expression": {"tf": 1}}, "df": 1}, "s": {"docs": {"sqlglot.parser.Parser.validate_expression": {"tf": 1}}, "df": 1}}}}}}, "u": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}, "sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.append": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.set": {"tf": 1.7320508075688772}, "sqlglot.expressions.Func": {"tf": 1}, "sqlglot.expressions.Matches": {"tf": 1}, "sqlglot.expressions.convert": {"tf": 1.4142135623730951}, "sqlglot.helper.AutoName": {"tf": 1}, "sqlglot.helper.seq_get": {"tf": 1}, "sqlglot.helper.ensure_list": {"tf": 2.23606797749979}, "sqlglot.helper.ensure_collection": {"tf": 2.23606797749979}, "sqlglot.helper.split_num_words": {"tf": 2}, "sqlglot.helper.is_iterable": {"tf": 2}, "sqlglot.helper.flatten": {"tf": 1}, "sqlglot.trie.in_trie": {"tf": 1.7320508075688772}}, "df": 16, "s": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.select_sql": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 2.23606797749979}, "sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1}, "sqlglot.expressions.values": {"tf": 2.8284271247461903}, "sqlglot.helper.split_num_words": {"tf": 1.4142135623730951}, "sqlglot.helper.flatten": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.expression": {"tf": 1}}, "df": 11}}}}}, "e": {"docs": {"sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}}, "df": 2, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot": {"tf": 1.7320508075688772}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.executor": {"tf": 1.4142135623730951}}, "df": 4}, "s": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot": {"tf": 1}}, "df": 1, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "a": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}, "b": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}, "c": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1}}, "df": 2}}}}, "s": {"docs": {"sqlglot.executor.context.Context": {"tf": 1.4142135623730951}}, "df": 1}}}}}}, "s": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}}, "df": 18}, "i": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.lineage.LineageHTML": {"tf": 1}}, "df": 1, "u": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "z": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.expressions.Expression.walk": {"tf": 1}, "sqlglot.expressions.Expression.dfs": {"tf": 1}, "sqlglot.expressions.Expression.bfs": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}}, "df": 7}}, "b": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.schema.Schema.column_names": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1.7320508075688772}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}}, "df": 3}}}}}, "c": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}, "e": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot.expressions.Select.lateral": {"tf": 1}}, "df": 1}}}, "\u00e4": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}, "t": {"docs": {"sqlglot.executor.table.Tables": {"tf": 2.23606797749979}, "sqlglot.schema.AbstractMappingSchema": {"tf": 2.23606797749979}}, "df": 2}}, "g": {"docs": {"sqlglot.schema": {"tf": 1}, "sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.executor": {"tf": 1}, "sqlglot.executor.env.null_if_any": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.schema.Schema.supported_table_args": {"tf": 1}}, "df": 6, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dialects": {"tf": 1}, "sqlglot.executor": {"tf": 1}, "sqlglot.executor.table.Tables": {"tf": 1.7320508075688772}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1.7320508075688772}}, "df": 5, "[": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1}}, "df": 2}}}}}, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot": {"tf": 2}, "sqlglot.transpile": {"tf": 1.4142135623730951}, "sqlglot.dataframe": {"tf": 1}, "sqlglot.dialects": {"tf": 2.23606797749979}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.find_all": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.walk": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.dfs": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.bfs": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.flatten": {"tf": 1}, "sqlglot.expressions.Expression.sql": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator": {"tf": 1.4142135623730951}, "sqlglot.lineage.LineageHTML": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}, "sqlglot.transforms.preprocess": {"tf": 1.4142135623730951}, "sqlglot.transforms.delegate": {"tf": 1.4142135623730951}}, "df": 38}}, "e": {"docs": {"sqlglot.dataframe": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 2}, "sqlglot.executor": {"tf": 1.4142135623730951}}, "df": 3, "d": {"docs": {"sqlglot.pretty": {"tf": 1}, "sqlglot.diff": {"tf": 1}}, "df": 2}, "s": {"docs": {"sqlglot.generator.Generator.generate": {"tf": 1}}, "df": 1}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.select_sql": {"tf": 1}, "sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.expressions.Tag": {"tf": 1}}, "df": 5}}, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dialects": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.expressions.Func": {"tf": 1}}, "df": 3}}}}, "l": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}}, "df": 1}}}}}}}, "t": {"docs": {"sqlglot": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}, "sqlglot.diff": {"tf": 2.449489742783178}, "sqlglot.executor": {"tf": 1}, "sqlglot.expressions.table_name": {"tf": 1}, "sqlglot.helper.dict_depth": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.optimizer.scope.Scope.source_columns": {"tf": 1}, "sqlglot.schema.Schema.column_names": {"tf": 1}, "sqlglot.schema.Schema.get_column_type": {"tf": 1}, "sqlglot.schema.MappingSchema.column_names": {"tf": 1}, "sqlglot.schema.MappingSchema.get_column_type": {"tf": 1}, "sqlglot.trie.in_trie": {"tf": 1}}, "df": 14, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe": {"tf": 1}}, "df": 1}}}}}}}}, "s": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1}}, "df": 2}, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1}}, "df": 2}}}}}, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}, "o": {"docs": {"sqlglot": {"tf": 1}}, "df": 1, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}}, "o": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe": {"tf": 1}}, "df": 1}}}, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.executor": {"tf": 1}, "sqlglot.helper.split_num_words": {"tf": 1}}, "df": 2}}}}, "u": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}}}, "m": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}}, "df": 1}}}}}}, "t": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dataframe": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 5.656854249492381}, "sqlglot.diff.diff": {"tf": 1.7320508075688772}, "sqlglot.executor": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.output_name": {"tf": 3.4641016151377544}, "sqlglot.expressions.Expression.replace": {"tf": 3}, "sqlglot.expressions.Expression.assert_is": {"tf": 2.449489742783178}, "sqlglot.expressions.Condition.and_": {"tf": 1.7320508075688772}, "sqlglot.expressions.Condition.or_": {"tf": 1.7320508075688772}, "sqlglot.expressions.Condition.not_": {"tf": 1.7320508075688772}, "sqlglot.expressions.Predicate": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.union": {"tf": 2.449489742783178}, "sqlglot.expressions.Unionable.intersect": {"tf": 2.449489742783178}, "sqlglot.expressions.Unionable.except_": {"tf": 2.449489742783178}, "sqlglot.expressions.Column.output_name": {"tf": 3.4641016151377544}, "sqlglot.expressions.Identifier.output_name": {"tf": 3.4641016151377544}, "sqlglot.expressions.Literal.output_name": {"tf": 3.4641016151377544}, "sqlglot.expressions.Join.on": {"tf": 2.449489742783178}, "sqlglot.expressions.Join.using": {"tf": 2.449489742783178}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 2.449489742783178}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1.7320508075688772}, "sqlglot.expressions.Union.limit": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.from_": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.group_by": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.order_by": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.sort_by": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.cluster_by": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.limit": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.offset": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.select": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.lateral": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.join": {"tf": 3}, "sqlglot.expressions.Select.where": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.having": {"tf": 2.23606797749979}, "sqlglot.expressions.Select.distinct": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.ctas": {"tf": 1.7320508075688772}, "sqlglot.expressions.Select.lock": {"tf": 2.449489742783178}, "sqlglot.expressions.Subquery.output_name": {"tf": 3.4641016151377544}, "sqlglot.expressions.Star.output_name": {"tf": 3.4641016151377544}, "sqlglot.expressions.Kwarg": {"tf": 1}, "sqlglot.expressions.Alias.output_name": {"tf": 3.4641016151377544}, "sqlglot.expressions.Cast.output_name": {"tf": 3.4641016151377544}, "sqlglot.expressions.maybe_parse": {"tf": 2.449489742783178}, "sqlglot.expressions.union": {"tf": 1.7320508075688772}, "sqlglot.expressions.intersect": {"tf": 1.7320508075688772}, "sqlglot.expressions.except_": {"tf": 1.7320508075688772}, "sqlglot.expressions.select": {"tf": 1.7320508075688772}, "sqlglot.expressions.from_": {"tf": 1.7320508075688772}, "sqlglot.expressions.update": {"tf": 2.23606797749979}, "sqlglot.expressions.delete": {"tf": 2.23606797749979}, "sqlglot.expressions.condition": {"tf": 3.4641016151377544}, "sqlglot.expressions.and_": {"tf": 1.7320508075688772}, "sqlglot.expressions.or_": {"tf": 1.7320508075688772}, "sqlglot.expressions.not_": {"tf": 1.7320508075688772}, "sqlglot.expressions.alias_": {"tf": 2.449489742783178}, "sqlglot.expressions.subquery": {"tf": 1.7320508075688772}, "sqlglot.expressions.cast": {"tf": 1.7320508075688772}, "sqlglot.expressions.values": {"tf": 1.7320508075688772}, "sqlglot.expressions.column_table_names": {"tf": 2.449489742783178}, "sqlglot.expressions.table_name": {"tf": 2.449489742783178}, "sqlglot.expressions.replace_tables": {"tf": 2.449489742783178}, "sqlglot.expressions.replace_placeholders": {"tf": 2.449489742783178}, "sqlglot.expressions.expand": {"tf": 2.449489742783178}, "sqlglot.expressions.func": {"tf": 2.449489742783178}, "sqlglot.helper.split_num_words": {"tf": 3}, "sqlglot.helper.is_iterable": {"tf": 2.449489742783178}, "sqlglot.helper.flatten": {"tf": 2.449489742783178}, "sqlglot.helper.dict_depth": {"tf": 3.872983346207417}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 4}, "sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"tf": 3.4641016151377544}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 3.4641016151377544}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 3.872983346207417}, "sqlglot.optimizer.expand_laterals.expand_laterals": {"tf": 3.4641016151377544}, "sqlglot.optimizer.expand_multi_table_selects.expand_multi_table_selects": {"tf": 2.449489742783178}, "sqlglot.optimizer.lower_identities.lower_identities": {"tf": 3}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 3}, "sqlglot.optimizer.normalize.normalize": {"tf": 3}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 3}, "sqlglot.optimizer.optimize_joins.optimize_joins": {"tf": 2.449489742783178}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 3.4641016151377544}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 3.4641016151377544}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 3.4641016151377544}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 3}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 3.872983346207417}, "sqlglot.optimizer.simplify.simplify": {"tf": 3}, "sqlglot.optimizer.simplify.rewrite_between": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 3}, "sqlglot.time.format_time": {"tf": 1.7320508075688772}, "sqlglot.transforms.unalias_group": {"tf": 2.449489742783178}, "sqlglot.trie.new_trie": {"tf": 1.7320508075688772}, "sqlglot.trie.in_trie": {"tf": 3}}, "df": 93}, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dataframe": {"tf": 1}, "sqlglot.executor": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 2.6457513110645907}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1.4142135623730951}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1.4142135623730951}, "sqlglot.planner.Step.from_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.Scan.from_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1.4142135623730951}, "sqlglot.transforms.unalias_group": {"tf": 2}}, "df": 13, "b": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dataframe": {"tf": 2.23606797749979}}, "df": 1}}}}}, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}}, "p": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.helper.tsort": {"tf": 1.7320508075688772}, "sqlglot.lineage.lineage": {"tf": 1}}, "df": 3}}, "m": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 2.23606797749979}}, "df": 1}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}}, "c": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.maybe_parse": {"tf": 1}}, "df": 1}}}}}}}}, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}}, "df": 1}}}}}}, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "n": {"docs": {"sqlglot.parse": {"tf": 1}, "sqlglot.parse_one": {"tf": 1}, "sqlglot.transpile": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1.7320508075688772}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1}, "sqlglot.expressions.replace_placeholders": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.generator.Generator.generate": {"tf": 1}, "sqlglot.helper.apply_index_offset": {"tf": 1}, "sqlglot.helper.while_changing": {"tf": 1}, "sqlglot.helper.tsort": {"tf": 1}, "sqlglot.helper.csv_reader": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1.4142135623730951}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}, "sqlglot.time.format_time": {"tf": 1}}, "df": 40}}}, "t": {"docs": {"sqlglot.diff": {"tf": 2}}, "df": 1, "h": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "b": {"docs": {"sqlglot.lineage.LineageHTML": {"tf": 1}}, "df": 1}}}}}, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "l": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}, "r": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "z": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.helper.open_file": {"tf": 1}}, "df": 1}}}}, "q": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.executor": {"tf": 1.7320508075688772}}, "df": 3}}, "c": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot": {"tf": 2}, "sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.executor": {"tf": 2.8284271247461903}, "sqlglot.lineage.lineage": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}}, "df": 6}, "d": {"docs": {"sqlglot.executor.context.Context": {"tf": 1}}, "df": 1}}}, "y": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.dataframe": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.diff": {"tf": 2.449489742783178}, "sqlglot.executor": {"tf": 3.7416573867739413}, "sqlglot.executor.execute": {"tf": 1}, "sqlglot.lineage.lineage": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser": {"tf": 1}}, "df": 26, "b": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "u": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}, "o": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 14.594519519326424}, "sqlglot.dataframe": {"tf": 10.392304845413264}, "sqlglot.dialects": {"tf": 5.385164807134504}, "sqlglot.diff": {"tf": 5.830951894845301}, "sqlglot.diff.diff": {"tf": 2}, "sqlglot.executor": {"tf": 3.7416573867739413}, "sqlglot.expressions.Expression": {"tf": 2}, "sqlglot.expressions.Expression.output_name": {"tf": 2.449489742783178}, "sqlglot.expressions.Expression.assert_is": {"tf": 2}, "sqlglot.expressions.Condition.and_": {"tf": 2}, "sqlglot.expressions.Condition.or_": {"tf": 2}, "sqlglot.expressions.Condition.not_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Unionable.union": {"tf": 2}, "sqlglot.expressions.Unionable.intersect": {"tf": 2}, "sqlglot.expressions.Unionable.except_": {"tf": 2}, "sqlglot.expressions.Column.output_name": {"tf": 2.449489742783178}, "sqlglot.expressions.Identifier.output_name": {"tf": 2.449489742783178}, "sqlglot.expressions.Literal.output_name": {"tf": 2.449489742783178}, "sqlglot.expressions.Join.on": {"tf": 2}, "sqlglot.expressions.Join.using": {"tf": 2.449489742783178}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 2.449489742783178}, "sqlglot.expressions.Subqueryable.with_": {"tf": 2.8284271247461903}, "sqlglot.expressions.Union.limit": {"tf": 2}, "sqlglot.expressions.Select.from_": {"tf": 2}, "sqlglot.expressions.Select.group_by": {"tf": 2.8284271247461903}, "sqlglot.expressions.Select.order_by": {"tf": 2.449489742783178}, "sqlglot.expressions.Select.sort_by": {"tf": 2.449489742783178}, "sqlglot.expressions.Select.cluster_by": {"tf": 2.449489742783178}, "sqlglot.expressions.Select.limit": {"tf": 2}, "sqlglot.expressions.Select.offset": {"tf": 2}, "sqlglot.expressions.Select.select": {"tf": 2}, "sqlglot.expressions.Select.lateral": {"tf": 2.449489742783178}, "sqlglot.expressions.Select.join": {"tf": 5.477225575051661}, "sqlglot.expressions.Select.where": {"tf": 2.8284271247461903}, "sqlglot.expressions.Select.having": {"tf": 3.1622776601683795}, "sqlglot.expressions.Select.distinct": {"tf": 2}, "sqlglot.expressions.Select.ctas": {"tf": 2.449489742783178}, "sqlglot.expressions.Select.lock": {"tf": 4.47213595499958}, "sqlglot.expressions.Subquery.output_name": {"tf": 2.449489742783178}, "sqlglot.expressions.Star.output_name": {"tf": 2.449489742783178}, "sqlglot.expressions.Alias.output_name": {"tf": 2.449489742783178}, "sqlglot.expressions.Cast.output_name": {"tf": 2.449489742783178}, "sqlglot.expressions.maybe_parse": {"tf": 2}, "sqlglot.expressions.union": {"tf": 2}, "sqlglot.expressions.intersect": {"tf": 2}, "sqlglot.expressions.except_": {"tf": 2}, "sqlglot.expressions.select": {"tf": 2.449489742783178}, "sqlglot.expressions.from_": {"tf": 2.449489742783178}, "sqlglot.expressions.update": {"tf": 4}, "sqlglot.expressions.delete": {"tf": 2}, "sqlglot.expressions.condition": {"tf": 3.1622776601683795}, "sqlglot.expressions.and_": {"tf": 2.449489742783178}, "sqlglot.expressions.or_": {"tf": 2.449489742783178}, "sqlglot.expressions.not_": {"tf": 2}, "sqlglot.expressions.values": {"tf": 1.4142135623730951}, "sqlglot.expressions.column_table_names": {"tf": 1.4142135623730951}, "sqlglot.expressions.table_name": {"tf": 1.4142135623730951}, "sqlglot.expressions.replace_tables": {"tf": 2.449489742783178}, "sqlglot.expressions.replace_placeholders": {"tf": 2.8284271247461903}, "sqlglot.expressions.expand": {"tf": 2.449489742783178}, "sqlglot.expressions.func": {"tf": 2.449489742783178}, "sqlglot.helper.split_num_words": {"tf": 3.4641016151377544}, "sqlglot.helper.is_iterable": {"tf": 1.4142135623730951}, "sqlglot.helper.flatten": {"tf": 1.4142135623730951}, "sqlglot.helper.dict_depth": {"tf": 3.1622776601683795}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 3.1622776601683795}, "sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"tf": 1.4142135623730951}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1.4142135623730951}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 2}, "sqlglot.optimizer.expand_laterals.expand_laterals": {"tf": 1.4142135623730951}, "sqlglot.optimizer.expand_multi_table_selects.expand_multi_table_selects": {"tf": 1.4142135623730951}, "sqlglot.optimizer.lower_identities.lower_identities": {"tf": 2}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1.4142135623730951}, "sqlglot.optimizer.normalize.normalize": {"tf": 1.4142135623730951}, "sqlglot.optimizer.normalize.normalization_distance": {"tf": 1.4142135623730951}, "sqlglot.optimizer.optimize_joins.optimize_joins": {"tf": 1.4142135623730951}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 1.4142135623730951}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 2.8284271247461903}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 2}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1.4142135623730951}, "sqlglot.optimizer.simplify.simplify": {"tf": 1.4142135623730951}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1.4142135623730951}, "sqlglot.time.format_time": {"tf": 2.449489742783178}, "sqlglot.transforms.unalias_group": {"tf": 1.4142135623730951}, "sqlglot.trie.new_trie": {"tf": 2.449489742783178}, "sqlglot.trie.in_trie": {"tf": 3.4641016151377544}}, "df": 87, "e": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator.select_sql": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.executor.python.Python.Generator": {"tf": 1.4142135623730951}, "sqlglot.expressions.to_identifier": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1.4142135623730951}, "sqlglot.optimizer.lower_identities.lower_identities": {"tf": 1}}, "df": 25, "d": {"docs": {"sqlglot": {"tf": 3}, "sqlglot.diff.diff": {"tf": 2}, "sqlglot.expressions.maybe_parse": {"tf": 1}, "sqlglot.expressions.to_identifier": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1.4142135623730951}, "sqlglot.optimizer.lower_identities.lower_identities": {"tf": 1}}, "df": 6}, "s": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dialects": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.select_sql": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.executor.python.Python.Generator": {"tf": 1.4142135623730951}, "sqlglot.generator.Generator": {"tf": 1.4142135623730951}}, "df": 25}}}, "k": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}}}}, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1}, "sqlglot.optimizer.expand_laterals.expand_laterals": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1.4142135623730951}}, "df": 6, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.lineage.lineage": {"tf": 1}}, "df": 1}}}}, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}, "d": {"docs": {"sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1.4142135623730951}, "sqlglot.optimizer.qualify_columns.validate_qualify_columns": {"tf": 1}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1.4142135623730951}}, "df": 4}}}}}}}}}, "y": {"docs": {"sqlglot": {"tf": 3.3166247903554}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 3.4641016151377544}, "sqlglot.expressions.Expression.replace": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression.assert_is": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.and_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.or_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Predicate": {"tf": 1.4142135623730951}, "sqlglot.expressions.Join.on": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.lateral": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.join": {"tf": 3.1622776601683795}, "sqlglot.expressions.Select.having": {"tf": 2}, "sqlglot.expressions.Kwarg": {"tf": 1}, "sqlglot.expressions.update": {"tf": 1.4142135623730951}, "sqlglot.expressions.condition": {"tf": 1.4142135623730951}, "sqlglot.expressions.and_": {"tf": 1.4142135623730951}, "sqlglot.expressions.or_": {"tf": 1.4142135623730951}, "sqlglot.expressions.expand": {"tf": 1.4142135623730951}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 2}, "sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 2}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 3}, "sqlglot.optimizer.expand_multi_table_selects.expand_multi_table_selects": {"tf": 1.4142135623730951}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 2}, "sqlglot.optimizer.normalize.normalize": {"tf": 1.4142135623730951}, "sqlglot.optimizer.normalize.distributive_law": {"tf": 2.8284271247461903}, "sqlglot.optimizer.optimize_joins.optimize_joins": {"tf": 2}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 2}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 2}, "sqlglot.optimizer.scope.Scope": {"tf": 2.23606797749979}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1.7320508075688772}, "sqlglot.optimizer.simplify.rewrite_between": {"tf": 1.4142135623730951}, "sqlglot.optimizer.simplify.simplify_not": {"tf": 2}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 2.8284271247461903}, "sqlglot.planner.Step.from_expression": {"tf": 2.8284271247461903}, "sqlglot.planner.Scan.from_expression": {"tf": 2.8284271247461903}, "sqlglot.planner.SetOperation.from_expression": {"tf": 2.8284271247461903}, "sqlglot.time.format_time": {"tf": 1.4142135623730951}}, "df": 39, "o": {"docs": {}, "df": 0, "u": {"docs": {"sqlglot": {"tf": 2.23606797749979}, "sqlglot.dataframe": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 2}, "sqlglot.executor": {"tf": 2}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1.4142135623730951}}, "df": 7, "r": {"docs": {"sqlglot.dataframe": {"tf": 1.7320508075688772}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 4}}}, "y": {"docs": {"sqlglot": {"tf": 1}}, "df": 1, "y": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.time.format_time": {"tf": 1.4142135623730951}}, "df": 1}}}, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "s": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.expressions.Expression.find_all": {"tf": 1}, "sqlglot.expressions.Expression.flatten": {"tf": 1}, "sqlglot.helper.csv_reader": {"tf": 1}, "sqlglot.helper.flatten": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.Scope.traverse": {"tf": 1}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}}, "df": 8}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}, "e": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}, "h": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1}}, "df": 2, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot": {"tf": 1}}, "df": 1, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}}}}}, "e": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 2}, "sqlglot.parse": {"tf": 1}, "sqlglot.parse_one": {"tf": 1}, "sqlglot.transpile": {"tf": 1.4142135623730951}, "sqlglot.dialects.dialect.create_with_partitions_sql": {"tf": 1}, "sqlglot.executor.execute": {"tf": 1}, "sqlglot.expressions.Expression.sql": {"tf": 1}}, "df": 7}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}, "y": {"docs": {"sqlglot.expressions.Expression": {"tf": 1}}, "df": 1}}}}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {"sqlglot.diff": {"tf": 3.7416573867739413}}, "df": 1, "g": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}}, "df": 1}}}}}}}}, "m": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.optimizer.scope.Scope.join_hints": {"tf": 1.4142135623730951}}, "df": 1}}}}, "o": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot.dataframe": {"tf": 1.4142135623730951}, "sqlglot.dialects": {"tf": 1}, "sqlglot.diff": {"tf": 2.449489742783178}, "sqlglot.executor": {"tf": 1.4142135623730951}}, "df": 4, "e": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1.7320508075688772}}, "df": 3}}}}}, "p": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}, "l": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.executor.context.Context": {"tf": 1}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 1}}, "df": 2}, "d": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}}, "df": 18}, "v": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "p": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "q": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "p": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}, "o": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}}}, "l": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.expressions.values": {"tf": 1}}, "df": 3, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dialects.dialect.format_time_lambda": {"tf": 1}, "sqlglot.expressions": {"tf": 1}}, "df": 2, "s": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "f": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.expressions.condition": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}, "sqlglot.parser.Parser.parse": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}}, "df": 4}}}}}, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 2.23606797749979}}, "df": 1}}}}}}}}, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1}}, "df": 2}}, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.with_properties": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}, "sqlglot.diff": {"tf": 4.69041575982343}, "sqlglot.executor": {"tf": 1.7320508075688772}, "sqlglot.optimizer.qualify_columns.qualify_columns": {"tf": 1}, "sqlglot.optimizer.qualify_tables.qualify_tables": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1}}, "df": 9, "n": {"docs": {}, "df": 0, "\u2019": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.executor": {"tf": 2}, "sqlglot.expressions.Select.having": {"tf": 1.7320508075688772}}, "df": 2}}}}, "s": {"docs": {"sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}, "sqlglot.dialects": {"tf": 1}, "sqlglot.diff": {"tf": 2.23606797749979}, "sqlglot.diff.Insert": {"tf": 1}, "sqlglot.diff.Remove": {"tf": 1}, "sqlglot.diff.Move": {"tf": 1}, "sqlglot.diff.Update": {"tf": 1}, "sqlglot.executor": {"tf": 2.449489742783178}, "sqlglot.expressions.Expression.output_name": {"tf": 1}, "sqlglot.expressions.Column.output_name": {"tf": 1}, "sqlglot.expressions.Identifier.output_name": {"tf": 1}, "sqlglot.expressions.Literal.output_name": {"tf": 1}, "sqlglot.expressions.Subquery.output_name": {"tf": 1}, "sqlglot.expressions.Star.output_name": {"tf": 1}, "sqlglot.expressions.Alias.output_name": {"tf": 1}, "sqlglot.expressions.Cast.output_name": {"tf": 1}, "sqlglot.expressions.alias_": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}}, "df": 19, "n": {"docs": {"sqlglot.diff.Keep": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2, "\u2019": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}, "h": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.expressions.Expression": {"tf": 1}}, "df": 1}}}}}, "d": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.executor": {"tf": 1}}, "df": 2}, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.union": {"tf": 1.4142135623730951}, "sqlglot.expressions.intersect": {"tf": 1.4142135623730951}, "sqlglot.expressions.except_": {"tf": 1.4142135623730951}}, "df": 3, "y": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1}}, "df": 2}}, "w": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}}}, "l": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "f": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}, "p": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}, "u": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "o": {"docs": {"sqlglot.dataframe": {"tf": 1.7320508075688772}}, "df": 1}}, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "\u2019": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "t": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, ":": {"docs": {}, "df": 0, "/": {"docs": {}, "df": 0, "/": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.dataframe.sql.DataFrame.persist": {"tf": 1}}, "df": 1}}}}}, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "x": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff.ChangeDistiller": {"tf": 1}}, "df": 1}}}}}}}}}}, "d": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.expressions.Matches": {"tf": 1}}, "df": 1}}}, "e": {"docs": {}, "df": 0, "v": {"docs": {"sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}}, "df": 1}}}, "v": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "j": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.lineage.LineageHTML": {"tf": 1}}, "df": 1}}}}}}}}}, ":": {"docs": {}, "df": 0, "/": {"docs": {}, "df": 0, "/": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "p": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "b": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff.ChangeDistiller": {"tf": 1}}, "df": 1}}}}}}}}}}}, "m": {"docs": {"sqlglot.expressions.Matches": {"tf": 1}}, "df": 1, "l": {"docs": {"sqlglot.dataframe.sql.DataFrame.persist": {"tf": 1}, "sqlglot.lineage.LineageHTML": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}}, "df": 3}}}}, "m": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1.7320508075688772}}, "df": 3, "a": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.dataframe": {"tf": 1}, "sqlglot.dialects": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.values_sql": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.helper.open_file": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1}}, "df": 9, "b": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.expressions.maybe_parse": {"tf": 1.4142135623730951}}, "df": 1}}}, "k": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 2}, "sqlglot.dataframe": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator.values_sql": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator.select_sql": {"tf": 1.7320508075688772}, "sqlglot.diff": {"tf": 1}}, "df": 6, "s": {"docs": {"sqlglot.dialects": {"tf": 1}, "sqlglot.executor": {"tf": 1}, "sqlglot.executor.env.null_if_any": {"tf": 1}, "sqlglot.optimizer.lower_identities.lower_identities": {"tf": 1}}, "df": 4}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.parser.Parser.validate_expression": {"tf": 1}}, "df": 2}}}}, "p": {"docs": {"sqlglot.expressions.Func": {"tf": 1}}, "df": 1, "p": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.dialects": {"tf": 1.4142135623730951}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.4142135623730951}, "sqlglot.executor.execute": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1.4142135623730951}, "sqlglot.executor.table.Tables": {"tf": 1.7320508075688772}, "sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.replace_tables": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator": {"tf": 1.4142135623730951}, "sqlglot.lineage.lineage": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.scope.Scope": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1}, "sqlglot.optimizer.scope.Scope.cte_sources": {"tf": 1}, "sqlglot.optimizer.scope.Scope.ref_count": {"tf": 1}, "sqlglot.schema.Schema.add_table": {"tf": 1.4142135623730951}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1.7320508075688772}, "sqlglot.schema.MappingSchema": {"tf": 1.7320508075688772}, "sqlglot.schema.MappingSchema.add_table": {"tf": 1.7320508075688772}, "sqlglot.time.format_time": {"tf": 1.4142135623730951}}, "df": 39, "s": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.schema.MappingSchema": {"tf": 1}}, "df": 22}, "[": {"docs": {}, "df": 0, "k": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1}}, "df": 2}, "e": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1}}, "df": 2}}}}}}}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.replace_tables": {"tf": 1}, "sqlglot.expressions.replace_placeholders": {"tf": 1}}, "df": 2}}}, "s": {"docs": {"sqlglot.expressions.Expression": {"tf": 1}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1.4142135623730951}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}}, "df": 5}}, "i": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1.4142135623730951}}, "df": 3, "t": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions": {"tf": 1}}, "df": 1}}}}, "n": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.executor": {"tf": 1.7320508075688772}, "sqlglot.optimizer.canonicalize.canonicalize": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1.4142135623730951}}, "df": 6}, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.parser.Parser.validate_expression": {"tf": 1}}, "df": 2}}}}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.dataframe": {"tf": 1}}, "df": 1}}}}, "t": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.diff": {"tf": 2}, "sqlglot.expressions.Expression.find": {"tf": 1}, "sqlglot.expressions.Expression.find_all": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.find_ancestor": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find_all": {"tf": 1.4142135623730951}}, "df": 7, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.values_sql": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.select_sql": {"tf": 1}, "sqlglot.expressions.Expression.find": {"tf": 1.4142135623730951}, "sqlglot.expressions.Matches": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1.4142135623730951}}, "df": 6}, "d": {"docs": {"sqlglot.diff": {"tf": 2.23606797749979}}, "df": 1}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.diff": {"tf": 7.14142842854285}, "sqlglot.expressions.Expression.find_ancestor": {"tf": 1}, "sqlglot.expressions.Matches": {"tf": 1}, "sqlglot.optimizer.scope.Scope.find": {"tf": 1}}, "df": 4, "s": {"docs": {"sqlglot.diff": {"tf": 3}}, "df": 1}}}}}}, "t": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "i": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}, "h": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}, "x": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1.7320508075688772}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1.7320508075688772}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1.7320508075688772}, "sqlglot.diff": {"tf": 2.449489742783178}, "sqlglot.executor.python.Python.Generator": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator": {"tf": 1.7320508075688772}, "sqlglot.optimizer.normalize.normalize": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}}, "df": 42, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}}, "df": 39}}, "a": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot.optimizer.normalize.normalize": {"tf": 1}}, "df": 1}}}}}, "r": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.diff.ChangeDistiller": {"tf": 1}}, "df": 2, "e": {"docs": {}, "df": 0, "z": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}, "o": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}, "e": {"docs": {"sqlglot.diff": {"tf": 1.7320508075688772}, "sqlglot.executor": {"tf": 1}}, "df": 2, "t": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "a": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.dataframe": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 3}}}}, "l": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}, "h": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.optimizer.canonicalize.canonicalize": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}, "sqlglot.transforms.delegate": {"tf": 1}}, "df": 45, "s": {"docs": {"sqlglot.helper.count_params": {"tf": 1}, "sqlglot.transforms.delegate": {"tf": 1}}, "df": 2}}}}, "r": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "s": {"docs": {"sqlglot.executor": {"tf": 1.7320508075688772}}, "df": 1}}}}}, "s": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1.4142135623730951}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser": {"tf": 1.4142135623730951}}, "df": 18, "s": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Parser": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Parser": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Parser": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Parser": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Parser": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Parser": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Parser": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Parser": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Parser": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Parser": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Parser": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Parser": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Parser": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Parser": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Parser": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Parser": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.expressions.Expression.error_messages": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.parser.Parser": {"tf": 1}, "sqlglot.parser.Parser.parse": {"tf": 1}, "sqlglot.parser.Parser.parse_into": {"tf": 1}}, "df": 43}}}}}}, "a": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}}, "df": 1, "t": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}, "r": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 2}}, "df": 1, "s": {"docs": {"sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}}, "df": 1}}}}}, "o": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "f": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot": {"tf": 1.7320508075688772}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.Select.distinct": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.Select.lock": {"tf": 1}}, "df": 21}, "i": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.expressions.Expression.transform": {"tf": 1}}, "df": 2}}}}}}}, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.expressions.Expression.transform": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.Select.distinct": {"tf": 1}, "sqlglot.expressions.Select.lock": {"tf": 1}}, "df": 18}}}}}, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.expressions": {"tf": 1}, "sqlglot.helper.subclasses": {"tf": 1.4142135623730951}}, "df": 4}}}, "e": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.expressions.Select.lock": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.helper.open_file": {"tf": 1}}, "df": 23, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.dataframe": {"tf": 1}}, "df": 1}}}}}, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot": {"tf": 1.4142135623730951}}, "df": 1, "s": {"docs": {"sqlglot.expressions.to_interval": {"tf": 1}}, "df": 1}}}, "p": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}, "v": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 3.1622776601683795}, "sqlglot.diff.diff": {"tf": 1}}, "df": 3, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 2}}, "df": 1}}}}}, "d": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}}, "df": 1}}}, "z": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "r": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}, "sqlglot.diff": {"tf": 2.6457513110645907}, "sqlglot.executor": {"tf": 2}, "sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.helper.apply_index_offset": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1}}, "df": 8}, "a": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe": {"tf": 1.4142135623730951}, "sqlglot.dialects": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator.select_sql": {"tf": 1}, "sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1}}, "df": 5, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1}}, "df": 3}}}}, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}}, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "c": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.executor": {"tf": 1.4142135623730951}}, "df": 1}}}, "s": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}, "u": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "i": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.optimizer.expand_multi_table_selects.expand_multi_table_selects": {"tf": 1}}, "df": 2, "p": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "e": {"docs": {"sqlglot.executor": {"tf": 1.7320508075688772}, "sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.select": {"tf": 1}, "sqlglot.expressions.and_": {"tf": 1}, "sqlglot.expressions.or_": {"tf": 1}, "sqlglot.optimizer.expand_multi_table_selects.expand_multi_table_selects": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}}, "df": 11}}}}}}, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}}, "df": 4}}, "c": {"docs": {}, "df": 0, "h": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1.4142135623730951}}, "df": 2}}, "t": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.expressions.func": {"tf": 1}}, "df": 2}}}}}}}, "y": {"docs": {"sqlglot.diff": {"tf": 2.449489742783178}, "sqlglot.executor": {"tf": 2.23606797749979}, "sqlglot.expressions.update": {"tf": 1.4142135623730951}, "sqlglot.expressions.delete": {"tf": 1.4142135623730951}}, "df": 4, "s": {"docs": {}, "df": 0, "q": {"docs": {}, "df": 0, "l": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.parse": {"tf": 1}, "sqlglot.parse_one": {"tf": 1}, "sqlglot.transpile": {"tf": 1.4142135623730951}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.executor.execute": {"tf": 1}, "sqlglot.expressions.Expression.sql": {"tf": 1}, "sqlglot.expressions.Select.lock": {"tf": 1.4142135623730951}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 1}}, "df": 9}}, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "f": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 2.6457513110645907}}, "df": 1}}}}, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "c": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}}}, "x": {"docs": {"sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}}, "df": 1, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.dataframe.sql.DataFrame.orderBy": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.sort": {"tf": 1}}, "df": 2}}}}, "g": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dialects.snowflake.Snowflake.Generator.select_sql": {"tf": 1}, "sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 1}, "sqlglot.planner.Step.from_expression": {"tf": 1}, "sqlglot.planner.Scan.from_expression": {"tf": 1}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1}}, "df": 8}}}, "n": {"docs": {"sqlglot.diff": {"tf": 1.4142135623730951}, "sqlglot.helper.split_num_words": {"tf": 1}}, "df": 2, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}, "i": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "u": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot.helper.split_num_words": {"tf": 1}}, "df": 1}}}}}, "d": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "s": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "h": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}}, "l": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}, "s": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}}}}}}}, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.schema.MappingSchema": {"tf": 1}}, "df": 1}}}}}}, "j": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "o": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot": {"tf": 3}, "sqlglot.executor": {"tf": 2.449489742783178}, "sqlglot.expressions.Join.on": {"tf": 2.23606797749979}, "sqlglot.expressions.Join.using": {"tf": 2.23606797749979}, "sqlglot.expressions.Select.join": {"tf": 3.7416573867739413}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1.4142135623730951}, "sqlglot.optimizer.eliminate_joins.join_condition": {"tf": 2.23606797749979}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 1.4142135623730951}, "sqlglot.optimizer.expand_multi_table_selects.expand_multi_table_selects": {"tf": 1}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 2}, "sqlglot.optimizer.optimize_joins.optimize_joins": {"tf": 2}, "sqlglot.optimizer.scope.Scope.selected_sources": {"tf": 1}, "sqlglot.optimizer.scope.Scope.join_hints": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1.4142135623730951}, "sqlglot.planner.Step.from_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.Scan.from_expression": {"tf": 1.4142135623730951}, "sqlglot.planner.SetOperation.from_expression": {"tf": 1.4142135623730951}}, "df": 17, "s": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.executor": {"tf": 1.4142135623730951}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1.7320508075688772}, "sqlglot.optimizer.expand_multi_table_selects.expand_multi_table_selects": {"tf": 1}, "sqlglot.optimizer.optimize_joins.optimize_joins": {"tf": 1.7320508075688772}, "sqlglot.optimizer.optimize_joins.reorder_joins": {"tf": 1}, "sqlglot.optimizer.optimize_joins.normalize": {"tf": 1}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 1}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 1.4142135623730951}}, "df": 9}, "h": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.optimizer.scope.Scope.join_hints": {"tf": 1}}, "df": 1}}}}}}, "h": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.dataframe": {"tf": 1.7320508075688772}}, "df": 1}}, "u": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "y": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}, "b": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}, "u": {"docs": {}, "df": 0, "s": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.dataframe.sql.DataFrame.fillna": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator.datatype_sql": {"tf": 1}, "sqlglot.diff": {"tf": 2.23606797749979}, "sqlglot.executor": {"tf": 2.23606797749979}}, "df": 5}}, "n": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}, "a": {"docs": {}, "df": 0, "c": {"docs": {}, "df": 0, "k": {"docs": {"sqlglot.dataframe": {"tf": 1.7320508075688772}}, "df": 1}}, "m": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}, "v": {"docs": {}, "df": 0, "a": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 1}}, "df": 2}}}, "e": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}, "v": {"docs": {}, "df": 0, "m": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}, "s": {"docs": {"sqlglot.lineage.LineageHTML": {"tf": 1}}, "df": 1, "o": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.expressions.Expression.dump": {"tf": 1}, "sqlglot.serde.dump": {"tf": 1}}, "df": 2}}}}, "x": {"docs": {"sqlglot": {"tf": 4.898979485566356}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor": {"tf": 4.358898943540674}, "sqlglot.expressions.Expression.replace": {"tf": 1}, "sqlglot.expressions.Expression.assert_is": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.and_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.or_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Condition.not_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Predicate": {"tf": 1.7320508075688772}, "sqlglot.expressions.Join.on": {"tf": 1.4142135623730951}, "sqlglot.expressions.Join.using": {"tf": 1.4142135623730951}, "sqlglot.expressions.Subqueryable.subquery": {"tf": 2}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.from_": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.group_by": {"tf": 2}, "sqlglot.expressions.Select.order_by": {"tf": 2}, "sqlglot.expressions.Select.sort_by": {"tf": 2}, "sqlglot.expressions.Select.cluster_by": {"tf": 2}, "sqlglot.expressions.Select.limit": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.offset": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.select": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.lateral": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.join": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.where": {"tf": 2.449489742783178}, "sqlglot.expressions.Select.having": {"tf": 2}, "sqlglot.expressions.Select.distinct": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.ctas": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.lock": {"tf": 2.8284271247461903}, "sqlglot.expressions.Tag": {"tf": 1}, "sqlglot.expressions.maybe_parse": {"tf": 1.4142135623730951}, "sqlglot.expressions.update": {"tf": 1.4142135623730951}, "sqlglot.expressions.condition": {"tf": 2}, "sqlglot.expressions.and_": {"tf": 1.4142135623730951}, "sqlglot.expressions.or_": {"tf": 1.4142135623730951}, "sqlglot.expressions.subquery": {"tf": 2}, "sqlglot.expressions.cast": {"tf": 1.4142135623730951}, "sqlglot.expressions.expand": {"tf": 1.7320508075688772}, "sqlglot.optimizer.annotate_types.annotate_types": {"tf": 1.7320508075688772}, "sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 2.23606797749979}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 2.23606797749979}, "sqlglot.optimizer.expand_laterals.expand_laterals": {"tf": 2.23606797749979}, "sqlglot.optimizer.expand_multi_table_selects.expand_multi_table_selects": {"tf": 1.4142135623730951}, "sqlglot.optimizer.merge_subqueries.merge_subqueries": {"tf": 2.8284271247461903}, "sqlglot.optimizer.normalize.normalize": {"tf": 1.4142135623730951}, "sqlglot.optimizer.normalize.distributive_law": {"tf": 2.449489742783178}, "sqlglot.optimizer.optimize_joins.optimize_joins": {"tf": 2}, "sqlglot.optimizer.pushdown_predicates.pushdown_predicates": {"tf": 2}, "sqlglot.optimizer.pushdown_projections.pushdown_projections": {"tf": 2.23606797749979}, "sqlglot.optimizer.scope.Scope": {"tf": 2.23606797749979}, "sqlglot.optimizer.scope.Scope.subqueries": {"tf": 1}, "sqlglot.optimizer.scope.Scope.selects": {"tf": 1}, "sqlglot.optimizer.scope.traverse_scope": {"tf": 2}, "sqlglot.optimizer.simplify.rewrite_between": {"tf": 1.7320508075688772}, "sqlglot.optimizer.simplify.simplify_not": {"tf": 2}, "sqlglot.optimizer.unnest_subqueries.unnest_subqueries": {"tf": 2.449489742783178}, "sqlglot.planner.Step.from_expression": {"tf": 4}, "sqlglot.planner.Scan.from_expression": {"tf": 4}, "sqlglot.planner.SetOperation.from_expression": {"tf": 4}, "sqlglot.transforms.unalias_group": {"tf": 1.4142135623730951}}, "df": 60, "a": {"docs": {}, "df": 0, "v": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}, "z": {"docs": {"sqlglot": {"tf": 3}, "sqlglot.expressions.Expression.assert_is": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.lateral": {"tf": 1.4142135623730951}, "sqlglot.expressions.Select.join": {"tf": 1.4142135623730951}, "sqlglot.expressions.update": {"tf": 1.4142135623730951}, "sqlglot.expressions.and_": {"tf": 1.4142135623730951}, "sqlglot.expressions.or_": {"tf": 1.4142135623730951}, "sqlglot.expressions.expand": {"tf": 1.4142135623730951}, "sqlglot.optimizer.eliminate_ctes.eliminate_ctes": {"tf": 1.4142135623730951}, "sqlglot.optimizer.eliminate_subqueries.eliminate_subqueries": {"tf": 1.4142135623730951}, "sqlglot.optimizer.normalize.normalize": {"tf": 1.7320508075688772}, "sqlglot.optimizer.normalize.distributive_law": {"tf": 2.23606797749979}, "sqlglot.optimizer.optimize_joins.optimize_joins": {"tf": 2.449489742783178}, "sqlglot.optimizer.simplify.rewrite_between": {"tf": 1.4142135623730951}}, "df": 14, "e": {"docs": {}, "df": 0, "i": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "m": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "n": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}, "r": {"docs": {}, "df": 0, "o": {"docs": {"sqlglot.executor": {"tf": 1}}, "df": 1}}}}, "k": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "p": {"docs": {"sqlglot": {"tf": 1}, "sqlglot.diff": {"tf": 3.605551275463989}, "sqlglot.diff.diff": {"tf": 1.7320508075688772}, "sqlglot.executor": {"tf": 1}}, "df": 4}}, "y": {"docs": {"sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.executor.table.Tables": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression": {"tf": 2}, "sqlglot.expressions.Expression.this": {"tf": 1}, "sqlglot.expressions.Expression.expression": {"tf": 1}, "sqlglot.expressions.Expression.expressions": {"tf": 1}, "sqlglot.expressions.Expression.text": {"tf": 1}, "sqlglot.expressions.Expression.append": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.set": {"tf": 1.4142135623730951}, "sqlglot.expressions.Expression.walk": {"tf": 1}, "sqlglot.generator.Generator": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.join_condition": {"tf": 1.4142135623730951}, "sqlglot.optimizer.scope.walk_in_scope": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1.4142135623730951}, "sqlglot.trie.in_trie": {"tf": 2}}, "df": 34, "w": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.dialects.drill.if_sql": {"tf": 1}, "sqlglot.executor": {"tf": 1}, "sqlglot.expressions.replace_placeholders": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}, "sqlglot.trie.new_trie": {"tf": 1}, "sqlglot.trie.in_trie": {"tf": 1}}, "df": 6, "s": {"docs": {"sqlglot": {"tf": 1.4142135623730951}, "sqlglot.dialects": {"tf": 1.4142135623730951}, "sqlglot.trie.new_trie": {"tf": 2}}, "df": 3}}}}}, "s": {"docs": {"sqlglot.dataframe": {"tf": 1}, "sqlglot.dialects.bigquery.BigQuery.Generator": {"tf": 1}, "sqlglot.dialects.clickhouse.ClickHouse.Generator": {"tf": 1}, "sqlglot.dialects.databricks.Databricks.Generator": {"tf": 1}, "sqlglot.dialects.drill.Drill.Generator": {"tf": 1}, "sqlglot.dialects.duckdb.DuckDB.Generator": {"tf": 1}, "sqlglot.dialects.hive.Hive.Generator": {"tf": 1}, "sqlglot.dialects.mysql.MySQL.Generator": {"tf": 1}, "sqlglot.dialects.oracle.Oracle.Generator": {"tf": 1}, "sqlglot.dialects.postgres.Postgres.Generator": {"tf": 1}, "sqlglot.dialects.presto.Presto.Generator": {"tf": 1}, "sqlglot.dialects.redshift.Redshift.Generator": {"tf": 1}, "sqlglot.dialects.snowflake.Snowflake.Generator": {"tf": 1}, "sqlglot.dialects.spark.Spark.Generator": {"tf": 1}, "sqlglot.dialects.sqlite.SQLite.Generator": {"tf": 1}, "sqlglot.dialects.starrocks.StarRocks.Generator": {"tf": 1}, "sqlglot.dialects.tableau.Tableau.Generator": {"tf": 1}, "sqlglot.dialects.teradata.Teradata.Generator": {"tf": 1}, "sqlglot.dialects.trino.Trino.Generator": {"tf": 1}, "sqlglot.dialects.tsql.TSQL.Generator": {"tf": 1}, "sqlglot.diff": {"tf": 1}, "sqlglot.executor.python.Python.Generator": {"tf": 1}, "sqlglot.expressions.Expression": {"tf": 1.7320508075688772}, "sqlglot.generator.Generator": {"tf": 1}}, "df": 24}, "e": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "r": {"docs": {"sqlglot.executor.table.Tables": {"tf": 1}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1}}, "df": 2}}}}, "d": {"docs": {"sqlglot.trie.new_trie": {"tf": 1}}, "df": 1}}}, "r": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "l": {"docs": {}, "df": 0, "s": {"docs": {"sqlglot": {"tf": 1}}, "df": 1}}}}}, "p": {"docs": {}, "df": 0, "t": {"docs": {"sqlglot.optimizer.scope.Scope.replace": {"tf": 1}}, "df": 1}}}, "a": {"docs": {}, "df": 0, "t": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.dataframe": {"tf": 1.7320508075688772}}, "df": 1}}}, "i": {"docs": {}, "df": 0, "n": {"docs": {}, "df": 0, "d": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1, "s": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}, "n": {"docs": {}, "df": 0, "o": {"docs": {}, "df": 0, "w": {"docs": {"sqlglot.diff": {"tf": 1}, "sqlglot.optimizer.eliminate_joins.eliminate_joins": {"tf": 1}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1}}, "df": 3, "l": {"docs": {}, "df": 0, "e": {"docs": {}, "df": 0, "d": {"docs": {}, "df": 0, "g": {"docs": {}, "df": 0, "e": {"docs": {"sqlglot.diff": {"tf": 1}}, "df": 1}}}}}}}}, "t": {"docs": {"sqlglot.executor.table.Tables": {"tf": 1.4142135623730951}, "sqlglot.schema.AbstractMappingSchema": {"tf": 1.4142135623730951}}, "df": 2}, "w": {"docs": {}, "df": 0, "a": {"docs": {}, "df": 0, "r": {"docs": {}, "df": 0, "g": {"docs": {"sqlglot.expressions.Kwarg": {"tf": 1.4142135623730951}}, "df": 1, "s": {"docs": {"sqlglot.expressions.Condition.and_": {"tf": 1}, "sqlglot.expressions.Condition.or_": {"tf": 1}, "sqlglot.expressions.Unionable.union": {"tf": 1}, "sqlglot.expressions.Unionable.intersect": {"tf": 1}, "sqlglot.expressions.Unionable.except_": {"tf": 1}, "sqlglot.expressions.Join.on": {"tf": 1}, "sqlglot.expressions.Join.using": {"tf": 1}, "sqlglot.expressions.Subqueryable.with_": {"tf": 1}, "sqlglot.expressions.Union.limit": {"tf": 1}, "sqlglot.expressions.Select.from_": {"tf": 1}, "sqlglot.expressions.Select.group_by": {"tf": 1}, "sqlglot.expressions.Select.order_by": {"tf": 1}, "sqlglot.expressions.Select.sort_by": {"tf": 1}, "sqlglot.expressions.Select.cluster_by": {"tf": 1}, "sqlglot.expressions.Select.limit": {"tf": 1}, "sqlglot.expressions.Select.offset": {"tf": 1}, "sqlglot.expressions.Select.select": {"tf": 1}, "sqlglot.expressions.Select.lateral": {"tf": 1}, "sqlglot.expressions.Select.join": {"tf": 1}, "sqlglot.expressions.Select.where": {"tf": 1}, "sqlglot.expressions.Select.having": {"tf": 1}, "sqlglot.expressions.Select.ctas": {"tf": 1}, "sqlglot.expressions.union": {"tf": 1}, "sqlglot.expressions.intersect": {"tf": 1}, "sqlglot.expressions.except_": {"tf": 1}, "sqlglot.expressions.replace_placeholders": {"tf": 1}, "sqlglot.expressions.func": {"tf": 1.7320508075688772}, "sqlglot.optimizer.optimizer.optimize": {"tf": 1.4142135623730951}, "sqlglot.parser.Parser.expression": {"tf": 1}}, "df": 29}}}}}}}}}, "pipeline": ["trimmer"], "_isPrebuiltIndex": true}; + + // mirrored in build-search-index.js (part 1) + // Also split on html tags. this is a cheap heuristic, but good enough. + elasticlunr.tokenizer.setSeperator(/[\s\-.;&_'"=,()]+|<[^>]*>/); + + let searchIndex; + if (docs._isPrebuiltIndex) { + console.info("using precompiled search index"); + searchIndex = elasticlunr.Index.load(docs); + } else { + console.time("building search index"); + // mirrored in build-search-index.js (part 2) + searchIndex = elasticlunr(function () { + this.pipeline.remove(elasticlunr.stemmer); + this.pipeline.remove(elasticlunr.stopWordFilter); + this.addField("qualname"); + this.addField("fullname"); + this.addField("annotation"); + this.addField("default_value"); + this.addField("signature"); + this.addField("bases"); + this.addField("doc"); + this.setRef("fullname"); + }); + for (let doc of docs) { + searchIndex.addDoc(doc); + } + console.timeEnd("building search index"); + } + + return (term) => searchIndex.search(term, { + fields: { + qualname: {boost: 4}, + fullname: {boost: 2}, + annotation: {boost: 2}, + default_value: {boost: 2}, + signature: {boost: 2}, + bases: {boost: 2}, + doc: {boost: 1}, + }, + expand: true + }); +})(); \ No newline at end of file diff --git a/docs/sqlglot.html b/docs/sqlglot.html new file mode 100644 index 0000000..e7a1724 --- /dev/null +++ b/docs/sqlglot.html @@ -0,0 +1,1226 @@ + + + + + + + sqlglot API documentation + + + + + + + + + +
+
+ Edit on GitHub + +

SQLGlot

+ +

SQLGlot is a no-dependency SQL parser, transpiler, optimizer, and engine. It can be used to format SQL or translate between 19 different dialects like DuckDB, Presto, Spark, Snowflake, and BigQuery. It aims to read a wide variety of SQL inputs and output syntactically correct SQL in the targeted dialects.

+ +

It is a very comprehensive generic SQL parser with a robust test suite. It is also quite performant, while being written purely in Python.

+ +

You can easily customize the parser, analyze queries, traverse expression trees, and programmatically build SQL.

+ +

Syntax errors are highlighted and dialect incompatibilities can warn or raise depending on configurations. However, it should be noted that SQL validation is not SQLGlot’s goal, so some syntax errors may go unnoticed.

+ +

Contributions are very welcome in SQLGlot; read the contribution guide to get started!

+ +

Table of Contents

+ + + +

Install

+ +

From PyPI:

+ +
pip3 install sqlglot
+
+ +

Or with a local checkout:

+ +
make install
+
+ +

Requirements for development (optional):

+ +
make install-dev
+
+ +

Get in Touch

+ +

We'd love to hear from you. Join our community Slack channel!

+ +

Examples

+ +

Formatting and Transpiling

+ +

Easily translate from one dialect to another. For example, date/time functions vary from dialects and can be hard to deal with:

+ +
+
import sqlglot
+sqlglot.transpile("SELECT EPOCH_MS(1618088028295)", read="duckdb", write="hive")[0]
+
+
+ +
+
'SELECT FROM_UNIXTIME(1618088028295 / 1000)'
+
+
+ +

SQLGlot can even translate custom time formats:

+ +
+
import sqlglot
+sqlglot.transpile("SELECT STRFTIME(x, '%y-%-m-%S')", read="duckdb", write="hive")[0]
+
+
+ +
+
"SELECT DATE_FORMAT(x, 'yy-M-ss')"
+
+
+ +

As another example, let's suppose that we want to read in a SQL query that contains a CTE and a cast to REAL, and then transpile it to Spark, which uses backticks for identifiers and FLOAT instead of REAL:

+ +
+
import sqlglot
+
+sql = """WITH baz AS (SELECT a, c FROM foo WHERE a = 1) SELECT f.a, b.b, baz.c, CAST("b"."a" AS REAL) d FROM foo f JOIN bar b ON f.a = b.a LEFT JOIN baz ON f.a = baz.a"""
+print(sqlglot.transpile(sql, write="spark", identify=True, pretty=True)[0])
+
+
+ +
+
WITH `baz` AS (
+  SELECT
+    `a`,
+    `c`
+  FROM `foo`
+  WHERE
+    `a` = 1
+)
+SELECT
+  `f`.`a`,
+  `b`.`b`,
+  `baz`.`c`,
+  CAST(`b`.`a` AS FLOAT) AS `d`
+FROM `foo` AS `f`
+JOIN `bar` AS `b`
+  ON `f`.`a` = `b`.`a`
+LEFT JOIN `baz`
+  ON `f`.`a` = `baz`.`a`
+
+
+ +

Comments are also preserved in a best-effort basis when transpiling SQL code:

+ +
+
sql = """
+/* multi
+   line
+   comment
+*/
+SELECT
+  tbl.cola /* comment 1 */ + tbl.colb /* comment 2 */,
+  CAST(x AS INT), # comment 3
+  y               -- comment 4
+FROM
+  bar /* comment 5 */,
+  tbl #          comment 6
+"""
+
+print(sqlglot.transpile(sql, read='mysql', pretty=True)[0])
+
+
+ +
+
/* multi
+   line
+   comment
+*/
+SELECT
+  tbl.cola /* comment 1 */ + tbl.colb /* comment 2 */,
+  CAST(x AS INT), /* comment 3 */
+  y /* comment 4 */
+FROM bar /* comment 5 */, tbl /*          comment 6 */
+
+
+ +

Metadata

+ +

You can explore SQL with expression helpers to do things like find columns and tables:

+ +
+
from sqlglot import parse_one, exp
+
+# print all column references (a and b)
+for column in parse_one("SELECT a, b + 1 AS c FROM d").find_all(exp.Column):
+    print(column.alias_or_name)
+
+# find all projections in select statements (a and c)
+for select in parse_one("SELECT a, b + 1 AS c FROM d").find_all(exp.Select):
+    for projection in select.expressions:
+        print(projection.alias_or_name)
+
+# find all tables (x, y, z)
+for table in parse_one("SELECT * FROM x JOIN y JOIN z").find_all(exp.Table):
+    print(table.name)
+
+
+ +

Parser Errors

+ +

When the parser detects an error in the syntax, it raises a ParserError:

+ +
+
import sqlglot
+sqlglot.transpile("SELECT foo( FROM bar")
+
+
+ +
sqlglot.errors.ParseError: Expecting ). Line 1, Col: 13.
+  select foo( FROM bar
+              ~~~~
+
+ +

Structured syntax errors are accessible for programmatic use:

+ +
+
import sqlglot
+try:
+    sqlglot.transpile("SELECT foo( FROM bar")
+except sqlglot.errors.ParseError as e:
+    print(e.errors)
+
+
+ +
+
[{
+  'description': 'Expecting )',
+  'line': 1,
+  'col': 13,
+  'start_context': 'SELECT foo( ',
+  'highlight': 'FROM',
+  'end_context': ' bar'
+}]
+
+
+ +

Unsupported Errors

+ +

Presto APPROX_DISTINCT supports the accuracy argument which is not supported in Hive:

+ +
+
import sqlglot
+sqlglot.transpile("SELECT APPROX_DISTINCT(a, 0.1) FROM foo", read="presto", write="hive")
+
+
+ +
+
APPROX_COUNT_DISTINCT does not support accuracy
+'SELECT APPROX_COUNT_DISTINCT(a) FROM foo'
+
+
+ +

Build and Modify SQL

+ +

SQLGlot supports incrementally building sql expressions:

+ +
+
from sqlglot import select, condition
+
+where = condition("x=1").and_("y=1")
+select("*").from_("y").where(where).sql()
+
+
+ +
+
'SELECT * FROM y WHERE x = 1 AND y = 1'
+
+
+ +

You can also modify a parsed tree:

+ +
+
from sqlglot import parse_one
+parse_one("SELECT x FROM y").from_("z").sql()
+
+
+ +
+
'SELECT x FROM y, z'
+
+
+ +

There is also a way to recursively transform the parsed tree by applying a mapping function to each tree node:

+ +
+
from sqlglot import exp, parse_one
+
+expression_tree = parse_one("SELECT a FROM x")
+
+def transformer(node):
+    if isinstance(node, exp.Column) and node.name == "a":
+        return parse_one("FUN(a)")
+    return node
+
+transformed_tree = expression_tree.transform(transformer)
+transformed_tree.sql()
+
+
+ +
+
'SELECT FUN(a) FROM x'
+
+
+ +

SQL Optimizer

+ +

SQLGlot can rewrite queries into an "optimized" form. It performs a variety of techniques to create a new canonical AST. This AST can be used to standardize queries or provide the foundations for implementing an actual engine. For example:

+ +
+
import sqlglot
+from sqlglot.optimizer import optimize
+
+print(
+    optimize(
+        sqlglot.parse_one("""
+            SELECT A OR (B OR (C AND D))
+            FROM x
+            WHERE Z = date '2021-01-01' + INTERVAL '1' month OR 1 = 0
+        """),
+        schema={"x": {"A": "INT", "B": "INT", "C": "INT", "D": "INT", "Z": "STRING"}}
+    ).sql(pretty=True)
+)
+
+
+ +
+
SELECT
+  (
+    "x"."a" OR "x"."b" OR "x"."c"
+  ) AND (
+    "x"."a" OR "x"."b" OR "x"."d"
+  ) AS "_col_0"
+FROM "x" AS "x"
+WHERE
+  CAST("x"."z" AS DATE) = CAST('2021-02-01' AS DATE)
+
+
+ +

AST Introspection

+ +

You can see the AST version of the sql by calling repr:

+ +
+
from sqlglot import parse_one
+print(repr(parse_one("SELECT a + 1 AS z")))
+
+
+ +
+
(SELECT expressions:
+  (ALIAS this:
+    (ADD this:
+      (COLUMN this:
+        (IDENTIFIER this: a, quoted: False)), expression:
+      (LITERAL this: 1, is_string: False)), alias:
+    (IDENTIFIER this: z, quoted: False)))
+
+
+ +

AST Diff

+ +

SQLGlot can calculate the difference between two expressions and output changes in a form of a sequence of actions needed to transform a source expression into a target one:

+ +
+
from sqlglot import diff, parse_one
+diff(parse_one("SELECT a + b, c, d"), parse_one("SELECT c, a - b, d"))
+
+
+ +
+
[
+  Remove(expression=(ADD this:
+    (COLUMN this:
+      (IDENTIFIER this: a, quoted: False)), expression:
+    (COLUMN this:
+      (IDENTIFIER this: b, quoted: False)))),
+  Insert(expression=(SUB this:
+    (COLUMN this:
+      (IDENTIFIER this: a, quoted: False)), expression:
+    (COLUMN this:
+      (IDENTIFIER this: b, quoted: False)))),
+  Move(expression=(COLUMN this:
+    (IDENTIFIER this: c, quoted: False))),
+  Keep(source=(IDENTIFIER this: b, quoted: False), target=(IDENTIFIER this: b, quoted: False)),
+  ...
+]
+
+
+ +

See also: Semantic Diff for SQL.

+ +

Custom Dialects

+ +

Dialects can be added by subclassing Dialect:

+ +
+
from sqlglot import exp
+from sqlglot.dialects.dialect import Dialect
+from sqlglot.generator import Generator
+from sqlglot.tokens import Tokenizer, TokenType
+
+
+class Custom(Dialect):
+    class Tokenizer(Tokenizer):
+        QUOTES = ["'", '"']
+        IDENTIFIERS = ["`"]
+
+        KEYWORDS = {
+            **Tokenizer.KEYWORDS,
+            "INT64": TokenType.BIGINT,
+            "FLOAT64": TokenType.DOUBLE,
+        }
+
+    class Generator(Generator):
+        TRANSFORMS = {exp.Array: lambda self, e: f"[{self.expressions(e)}]"}
+
+        TYPE_MAPPING = {
+            exp.DataType.Type.TINYINT: "INT64",
+            exp.DataType.Type.SMALLINT: "INT64",
+            exp.DataType.Type.INT: "INT64",
+            exp.DataType.Type.BIGINT: "INT64",
+            exp.DataType.Type.DECIMAL: "NUMERIC",
+            exp.DataType.Type.FLOAT: "FLOAT64",
+            exp.DataType.Type.DOUBLE: "FLOAT64",
+            exp.DataType.Type.BOOLEAN: "BOOL",
+            exp.DataType.Type.TEXT: "STRING",
+        }
+
+print(Dialect["custom"])
+
+
+ +
<class '__main__.Custom'>
+
+ +

SQL Execution

+ +

One can even interpret SQL queries using SQLGlot, where the tables are represented as Python dictionaries. Although the engine is not very fast (it's not supposed to be) and is in a relatively early stage of development, it can be useful for unit testing and running SQL natively across Python objects. Additionally, the foundation can be easily integrated with fast compute kernels (arrow, pandas). Below is an example showcasing the execution of a SELECT expression that involves aggregations and JOINs:

+ +
+
from sqlglot.executor import execute
+
+tables = {
+    "sushi": [
+        {"id": 1, "price": 1.0},
+        {"id": 2, "price": 2.0},
+        {"id": 3, "price": 3.0},
+    ],
+    "order_items": [
+        {"sushi_id": 1, "order_id": 1},
+        {"sushi_id": 1, "order_id": 1},
+        {"sushi_id": 2, "order_id": 1},
+        {"sushi_id": 3, "order_id": 2},
+    ],
+    "orders": [
+        {"id": 1, "user_id": 1},
+        {"id": 2, "user_id": 2},
+    ],
+}
+
+execute(
+    """
+    SELECT
+      o.user_id,
+      SUM(s.price) AS price
+    FROM orders o
+    JOIN order_items i
+      ON o.id = i.order_id
+    JOIN sushi s
+      ON i.sushi_id = s.id
+    GROUP BY o.user_id
+    """,
+    tables=tables
+)
+
+
+ +
+
user_id price
+      1   4.0
+      2   3.0
+
+
+ +

See also: Writing a Python SQL engine from scratch.

+ +

Used By

+ + + +

Documentation

+ +

SQLGlot uses pdoc to serve its API documentation:

+ +
make docs-serve
+
+ +

Run Tests and Lint

+ +
make check  # Set SKIP_INTEGRATION=1 to skip integration tests
+
+ +

Benchmarks

+ +

Benchmarks run on Python 3.10.5 in seconds.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Querysqlglotsqlfluffsqltreesqlparsemoz_sql_parsersqloxide
tpch0.01308 (1.0)1.60626 (122.7)0.01168 (0.893)0.04958 (3.791)0.08543 (6.531)0.00136 (0.104)
short0.00109 (1.0)0.14134 (129.2)0.00099 (0.906)0.00342 (3.131)0.00652 (5.970)8.76E-5 (0.080)
long0.01399 (1.0)2.12632 (151.9)0.01126 (0.805)0.04410 (3.151)0.06671 (4.767)0.00107 (0.076)
crazy0.03969 (1.0)24.3777 (614.1)0.03917 (0.987)11.7043 (294.8)1.03280 (26.02)0.00625 (0.157)
+ +

Optional Dependencies

+ +

SQLGlot uses dateutil to simplify literal timedelta expressions. The optimizer will not simplify expressions like the following if the module cannot be found:

+ +
+
x + interval '1' month
+
+
+ +
+
+ + + + + +
  1"""
+  2.. include:: ../README.md
+  3
+  4----
+  5"""
+  6
+  7from __future__ import annotations
+  8
+  9import typing as t
+ 10
+ 11from sqlglot import expressions as exp
+ 12from sqlglot.dialects import Dialect, Dialects
+ 13from sqlglot.diff import diff
+ 14from sqlglot.errors import ErrorLevel, ParseError, TokenError, UnsupportedError
+ 15from sqlglot.expressions import Expression
+ 16from sqlglot.expressions import alias_ as alias
+ 17from sqlglot.expressions import (
+ 18    and_,
+ 19    column,
+ 20    condition,
+ 21    except_,
+ 22    from_,
+ 23    intersect,
+ 24    maybe_parse,
+ 25    not_,
+ 26    or_,
+ 27    select,
+ 28    subquery,
+ 29)
+ 30from sqlglot.expressions import table_ as table
+ 31from sqlglot.expressions import to_column, to_table, union
+ 32from sqlglot.generator import Generator
+ 33from sqlglot.parser import Parser
+ 34from sqlglot.schema import MappingSchema, Schema
+ 35from sqlglot.tokens import Tokenizer, TokenType
+ 36
+ 37if t.TYPE_CHECKING:
+ 38    from sqlglot.dialects.dialect import DialectType
+ 39
+ 40    T = t.TypeVar("T", bound=Expression)
+ 41
+ 42
+ 43__version__ = "11.0.0"
+ 44
+ 45pretty = False
+ 46"""Whether to format generated SQL by default."""
+ 47
+ 48schema = MappingSchema()
+ 49"""The default schema used by SQLGlot (e.g. in the optimizer)."""
+ 50
+ 51
+ 52def parse(sql: str, read: DialectType = None, **opts) -> t.List[t.Optional[Expression]]:
+ 53    """
+ 54    Parses the given SQL string into a collection of syntax trees, one per parsed SQL statement.
+ 55
+ 56    Args:
+ 57        sql: the SQL code string to parse.
+ 58        read: the SQL dialect to apply during parsing (eg. "spark", "hive", "presto", "mysql").
+ 59        **opts: other `sqlglot.parser.Parser` options.
+ 60
+ 61    Returns:
+ 62        The resulting syntax tree collection.
+ 63    """
+ 64    dialect = Dialect.get_or_raise(read)()
+ 65    return dialect.parse(sql, **opts)
+ 66
+ 67
+ 68@t.overload
+ 69def parse_one(
+ 70    sql: str,
+ 71    read: None = None,
+ 72    into: t.Type[T] = ...,
+ 73    **opts,
+ 74) -> T:
+ 75    ...
+ 76
+ 77
+ 78@t.overload
+ 79def parse_one(
+ 80    sql: str,
+ 81    read: DialectType,
+ 82    into: t.Type[T],
+ 83    **opts,
+ 84) -> T:
+ 85    ...
+ 86
+ 87
+ 88@t.overload
+ 89def parse_one(
+ 90    sql: str,
+ 91    read: None = None,
+ 92    into: t.Union[str, t.Collection[t.Union[str, t.Type[Expression]]]] = ...,
+ 93    **opts,
+ 94) -> Expression:
+ 95    ...
+ 96
+ 97
+ 98@t.overload
+ 99def parse_one(
+100    sql: str,
+101    read: DialectType,
+102    into: t.Union[str, t.Collection[t.Union[str, t.Type[Expression]]]],
+103    **opts,
+104) -> Expression:
+105    ...
+106
+107
+108@t.overload
+109def parse_one(
+110    sql: str,
+111    **opts,
+112) -> Expression:
+113    ...
+114
+115
+116def parse_one(
+117    sql: str,
+118    read: DialectType = None,
+119    into: t.Optional[exp.IntoType] = None,
+120    **opts,
+121) -> Expression:
+122    """
+123    Parses the given SQL string and returns a syntax tree for the first parsed SQL statement.
+124
+125    Args:
+126        sql: the SQL code string to parse.
+127        read: the SQL dialect to apply during parsing (eg. "spark", "hive", "presto", "mysql").
+128        into: the SQLGlot Expression to parse into.
+129        **opts: other `sqlglot.parser.Parser` options.
+130
+131    Returns:
+132        The syntax tree for the first parsed statement.
+133    """
+134
+135    dialect = Dialect.get_or_raise(read)()
+136
+137    if into:
+138        result = dialect.parse_into(into, sql, **opts)
+139    else:
+140        result = dialect.parse(sql, **opts)
+141
+142    for expression in result:
+143        if not expression:
+144            raise ParseError(f"No expression was parsed from '{sql}'")
+145        return expression
+146    else:
+147        raise ParseError(f"No expression was parsed from '{sql}'")
+148
+149
+150def transpile(
+151    sql: str,
+152    read: DialectType = None,
+153    write: DialectType = None,
+154    identity: bool = True,
+155    error_level: t.Optional[ErrorLevel] = None,
+156    **opts,
+157) -> t.List[str]:
+158    """
+159    Parses the given SQL string in accordance with the source dialect and returns a list of SQL strings transformed
+160    to conform to the target dialect. Each string in the returned list represents a single transformed SQL statement.
+161
+162    Args:
+163        sql: the SQL code string to transpile.
+164        read: the source dialect used to parse the input string (eg. "spark", "hive", "presto", "mysql").
+165        write: the target dialect into which the input should be transformed (eg. "spark", "hive", "presto", "mysql").
+166        identity: if set to `True` and if the target dialect is not specified the source dialect will be used as both:
+167            the source and the target dialect.
+168        error_level: the desired error level of the parser.
+169        **opts: other `sqlglot.generator.Generator` options.
+170
+171    Returns:
+172        The list of transpiled SQL statements.
+173    """
+174    write = write or read if identity else write
+175    return [
+176        Dialect.get_or_raise(write)().generate(expression, **opts)
+177        for expression in parse(sql, read, error_level=error_level)
+178    ]
+
+ + +
+
+
+ pretty = False + + +
+ + +

Whether to format generated SQL by default.

+
+ + +
+
+
+ schema = <sqlglot.schema.MappingSchema object> + + +
+ + +

The default schema used by SQLGlot (e.g. in the optimizer).

+
+ + +
+
+ +
+ + def + parse( sql: str, read: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None, **opts) -> List[Optional[sqlglot.expressions.Expression]]: + + + +
+ +
53def parse(sql: str, read: DialectType = None, **opts) -> t.List[t.Optional[Expression]]:
+54    """
+55    Parses the given SQL string into a collection of syntax trees, one per parsed SQL statement.
+56
+57    Args:
+58        sql: the SQL code string to parse.
+59        read: the SQL dialect to apply during parsing (eg. "spark", "hive", "presto", "mysql").
+60        **opts: other `sqlglot.parser.Parser` options.
+61
+62    Returns:
+63        The resulting syntax tree collection.
+64    """
+65    dialect = Dialect.get_or_raise(read)()
+66    return dialect.parse(sql, **opts)
+
+ + +

Parses the given SQL string into a collection of syntax trees, one per parsed SQL statement.

+ +
Arguments:
+ +
    +
  • sql: the SQL code string to parse.
  • +
  • read: the SQL dialect to apply during parsing (eg. "spark", "hive", "presto", "mysql").
  • +
  • **opts: other sqlglot.parser.Parser options.
  • +
+ +
Returns:
+ +
+

The resulting syntax tree collection.

+
+
+ + +
+
+ +
+ + def + parse_one( sql: str, read: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None, into: Union[str, Type[sqlglot.expressions.Expression], Collection[Union[str, Type[sqlglot.expressions.Expression]]], NoneType] = None, **opts) -> sqlglot.expressions.Expression: + + + +
+ +
117def parse_one(
+118    sql: str,
+119    read: DialectType = None,
+120    into: t.Optional[exp.IntoType] = None,
+121    **opts,
+122) -> Expression:
+123    """
+124    Parses the given SQL string and returns a syntax tree for the first parsed SQL statement.
+125
+126    Args:
+127        sql: the SQL code string to parse.
+128        read: the SQL dialect to apply during parsing (eg. "spark", "hive", "presto", "mysql").
+129        into: the SQLGlot Expression to parse into.
+130        **opts: other `sqlglot.parser.Parser` options.
+131
+132    Returns:
+133        The syntax tree for the first parsed statement.
+134    """
+135
+136    dialect = Dialect.get_or_raise(read)()
+137
+138    if into:
+139        result = dialect.parse_into(into, sql, **opts)
+140    else:
+141        result = dialect.parse(sql, **opts)
+142
+143    for expression in result:
+144        if not expression:
+145            raise ParseError(f"No expression was parsed from '{sql}'")
+146        return expression
+147    else:
+148        raise ParseError(f"No expression was parsed from '{sql}'")
+
+ + +

Parses the given SQL string and returns a syntax tree for the first parsed SQL statement.

+ +
Arguments:
+ +
    +
  • sql: the SQL code string to parse.
  • +
  • read: the SQL dialect to apply during parsing (eg. "spark", "hive", "presto", "mysql").
  • +
  • into: the SQLGlot Expression to parse into.
  • +
  • **opts: other sqlglot.parser.Parser options.
  • +
+ +
Returns:
+ +
+

The syntax tree for the first parsed statement.

+
+
+ + +
+
+ +
+ + def + transpile( sql: str, read: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None, write: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None, identity: bool = True, error_level: Optional[sqlglot.errors.ErrorLevel] = None, **opts) -> List[str]: + + + +
+ +
151def transpile(
+152    sql: str,
+153    read: DialectType = None,
+154    write: DialectType = None,
+155    identity: bool = True,
+156    error_level: t.Optional[ErrorLevel] = None,
+157    **opts,
+158) -> t.List[str]:
+159    """
+160    Parses the given SQL string in accordance with the source dialect and returns a list of SQL strings transformed
+161    to conform to the target dialect. Each string in the returned list represents a single transformed SQL statement.
+162
+163    Args:
+164        sql: the SQL code string to transpile.
+165        read: the source dialect used to parse the input string (eg. "spark", "hive", "presto", "mysql").
+166        write: the target dialect into which the input should be transformed (eg. "spark", "hive", "presto", "mysql").
+167        identity: if set to `True` and if the target dialect is not specified the source dialect will be used as both:
+168            the source and the target dialect.
+169        error_level: the desired error level of the parser.
+170        **opts: other `sqlglot.generator.Generator` options.
+171
+172    Returns:
+173        The list of transpiled SQL statements.
+174    """
+175    write = write or read if identity else write
+176    return [
+177        Dialect.get_or_raise(write)().generate(expression, **opts)
+178        for expression in parse(sql, read, error_level=error_level)
+179    ]
+
+ + +

Parses the given SQL string in accordance with the source dialect and returns a list of SQL strings transformed +to conform to the target dialect. Each string in the returned list represents a single transformed SQL statement.

+ +
Arguments:
+ +
    +
  • sql: the SQL code string to transpile.
  • +
  • read: the source dialect used to parse the input string (eg. "spark", "hive", "presto", "mysql").
  • +
  • write: the target dialect into which the input should be transformed (eg. "spark", "hive", "presto", "mysql").
  • +
  • identity: if set to True and if the target dialect is not specified the source dialect will be used as both: +the source and the target dialect.
  • +
  • error_level: the desired error level of the parser.
  • +
  • **opts: other sqlglot.generator.Generator options.
  • +
+ +
Returns:
+ +
+

The list of transpiled SQL statements.

+
+
+ + +
+
+ + \ No newline at end of file diff --git a/docs/sqlglot/dataframe.html b/docs/sqlglot/dataframe.html new file mode 100644 index 0000000..ea6f345 --- /dev/null +++ b/docs/sqlglot/dataframe.html @@ -0,0 +1,506 @@ + + + + + + + sqlglot.dataframe API documentation + + + + + + + + + +
+
+ Edit on GitHub + +

PySpark DataFrame SQL Generator

+ +

This is a drop-in replacement for the PySpark DataFrame API that will generate SQL instead of executing DataFrame operations directly. This, when combined with the transpiling support in SQLGlot, allows one to write PySpark DataFrame code and execute it on other engines like DuckDB, Presto, Spark, Snowflake, and BigQuery.

+ +

Currently many of the common operations are covered and more functionality will be added over time. Please open an issue or PR with your feedback or contribution to help influence what should be prioritized next and make sure your use case is properly supported.

+ +

How to use

+ +

Instructions

+ +
    +
  • Install SQLGlot and that is all that is required to just generate SQL. The examples show generating SQL and then executing that SQL on a specific engine and that will require that engine's client library.
  • +
  • Find/replace all from pyspark.sql with from sqlglot.dataframe.
  • +
  • Prior to any spark.read.table or spark.table run sqlglot.schema.add_table('<table_name>', <column_structure>). +
      +
    • The column structure can be defined the following ways: +
        +
      • Dictionary where the keys are column names and values are string of the Spark SQL type name. +
          +
        • Ex: {'cola': 'string', 'colb': 'int'}
        • +
      • +
      • PySpark DataFrame StructType similar to when using createDataFrame. +
          +
        • Ex: StructType([StructField('cola', StringType()), StructField('colb', IntegerType())])
        • +
      • +
      • A string of names and types similar to what is supported in createDataFrame. +
          +
        • Ex: cola: STRING, colb: INT
        • +
      • +
      • [Not Recommended] A list of string column names without type. +
          +
        • Ex: ['cola', 'colb']
        • +
        • The lack of types may limit functionality in future releases.
        • +
      • +
    • +
    • See Registering Custom Schema for information on how to skip this step if the information is stored externally.
    • +
  • +
  • Add .sql(pretty=True) to your final DataFrame command to return a list of sql statements to run that command. +
      +
    • In most cases a single SQL statement is returned. Currently the only exception is when caching DataFrames which isn't supported in other dialects.
    • +
    • Spark is the default output dialect. See dialects for a full list of dialects.
    • +
    • Ex: .sql(pretty=True, dialect='bigquery')
    • +
  • +
+ +

Examples

+ +
+
import sqlglot
+from sqlglot.dataframe.sql.session import SparkSession
+from sqlglot.dataframe.sql import functions as F
+
+sqlglot.schema.add_table('employee', {
+  'employee_id': 'INT',
+  'fname': 'STRING',
+  'lname': 'STRING',
+  'age': 'INT',
+})  # Register the table structure prior to reading from the table
+
+spark = SparkSession()
+
+df = (
+    spark
+    .table('employee')
+    .groupBy(F.col("age"))
+    .agg(F.countDistinct(F.col("employee_id")).alias("num_employees")) 
+)
+
+print(df.sql(pretty=True))  # Spark will be the dialect used by default
+
+
+ +
SELECT
+  `employee`.`age` AS `age`,
+  COUNT(DISTINCT `employee`.`employee_id`) AS `num_employees`
+FROM `employee` AS `employee`
+GROUP BY
+  `employee`.`age`
+
+ +

Registering Custom Schema Class

+ +

The step of adding sqlglot.schema.add_table can be skipped if you have the column structure stored externally like in a file or from an external metadata table. This can be done by writing a class that implements the sqlglot.schema.Schema abstract class and then assigning that class to sqlglot.schema.

+ +
+
import sqlglot
+from sqlglot.dataframe.sql.session import SparkSession
+from sqlglot.dataframe.sql import functions as F
+from sqlglot.schema import Schema
+
+
+class ExternalSchema(Schema):
+  ...
+
+sqlglot.schema = ExternalSchema()
+
+spark = SparkSession()
+
+df = (
+    spark
+    .table('employee')
+    .groupBy(F.col("age"))
+    .agg(F.countDistinct(F.col("employee_id")).alias("num_employees")) 
+)
+
+print(df.sql(pretty=True))
+
+
+ +

Example Implementations

+ +

Bigquery

+ +
+
from google.cloud import bigquery
+from sqlglot.dataframe.sql.session import SparkSession
+from sqlglot.dataframe.sql import types
+from sqlglot.dataframe.sql import functions as F
+
+client = bigquery.Client()
+
+data = [
+    (1, "Jack", "Shephard", 34),
+    (2, "John", "Locke", 48),
+    (3, "Kate", "Austen", 34),
+    (4, "Claire", "Littleton", 22),
+    (5, "Hugo", "Reyes", 26),
+]
+schema = types.StructType([
+    types.StructField('employee_id', types.IntegerType(), False),
+    types.StructField('fname', types.StringType(), False),
+    types.StructField('lname', types.StringType(), False),
+    types.StructField('age', types.IntegerType(), False),
+])
+
+sql_statements = (
+    SparkSession()
+    .createDataFrame(data, schema)
+    .groupBy(F.col("age"))
+    .agg(F.countDistinct(F.col("employee_id")).alias("num_employees"))
+    .sql(dialect="bigquery")
+)
+
+result = None
+for sql in sql_statements:
+  result = client.query(sql)
+
+assert result is not None
+for row in client.query(result):
+    print(f"Age: {row['age']}, Num Employees: {row['num_employees']}")
+
+
+ +

Snowflake

+ +
+
import os
+
+import snowflake.connector
+from sqlglot.dataframe.session import SparkSession
+from sqlglot.dataframe import types
+from sqlglot.dataframe import functions as F
+
+ctx = snowflake.connector.connect(
+    user=os.environ["SNOWFLAKE_USER"],
+    password=os.environ["SNOWFLAKE_PASS"],
+    account=os.environ["SNOWFLAKE_ACCOUNT"]
+)
+cs = ctx.cursor()
+
+data = [
+    (1, "Jack", "Shephard", 34),
+    (2, "John", "Locke", 48),
+    (3, "Kate", "Austen", 34),
+    (4, "Claire", "Littleton", 22),
+    (5, "Hugo", "Reyes", 26),
+]
+schema = types.StructType([
+    types.StructField('employee_id', types.IntegerType(), False),
+    types.StructField('fname', types.StringType(), False),
+    types.StructField('lname', types.StringType(), False),
+    types.StructField('age', types.IntegerType(), False),
+])
+
+sql_statements = (
+    SparkSession()
+    .createDataFrame(data, schema)
+    .groupBy(F.col("age"))
+    .agg(F.countDistinct(F.col("lname")).alias("num_employees"))
+    .sql(dialect="snowflake")
+)
+
+try:
+    for sql in sql_statements:
+        cs.execute(sql)
+    results = cs.fetchall()
+    for row in results:
+        print(f"Age: {row[0]}, Num Employees: {row[1]}")
+finally:
+    cs.close()
+ctx.close()
+
+
+ +

Spark

+ +
+
from pyspark.sql.session import SparkSession as PySparkSession
+from sqlglot.dataframe.sql.session import SparkSession
+from sqlglot.dataframe.sql import types
+from sqlglot.dataframe.sql import functions as F
+
+data = [
+    (1, "Jack", "Shephard", 34),
+    (2, "John", "Locke", 48),
+    (3, "Kate", "Austen", 34),
+    (4, "Claire", "Littleton", 22),
+    (5, "Hugo", "Reyes", 26),
+]
+schema = types.StructType([
+    types.StructField('employee_id', types.IntegerType(), False),
+    types.StructField('fname', types.StringType(), False),
+    types.StructField('lname', types.StringType(), False),
+    types.StructField('age', types.IntegerType(), False),
+])
+
+sql_statements = (
+    SparkSession()
+    .createDataFrame(data, schema)
+    .groupBy(F.col("age"))
+    .agg(F.countDistinct(F.col("employee_id")).alias("num_employees"))
+    .sql(dialect="spark")
+)
+
+pyspark = PySparkSession.builder.master("local[*]").getOrCreate()
+
+df = None
+for sql in sql_statements:
+    df = pyspark.sql(sql)
+
+assert df is not None
+df.show()
+
+
+ +

Unsupportable Operations

+ +

Any operation that lacks a way to represent it in SQL cannot be supported by this tool. An example of this would be rdd operations. Since the DataFrame API though is mostly modeled around SQL concepts most operations can be supported.

+
+ + + + + +
1"""
+2.. include:: ./README.md
+3"""
+
+ + +
+
+ + \ No newline at end of file diff --git a/docs/sqlglot/dataframe/sql.html b/docs/sqlglot/dataframe/sql.html new file mode 100644 index 0000000..83f5418 --- /dev/null +++ b/docs/sqlglot/dataframe/sql.html @@ -0,0 +1,4953 @@ + + + + + + + sqlglot.dataframe.sql API documentation + + + + + + + + + +
+
+ Edit on GitHub +

+sqlglot.dataframe.sql

+ + + + + + +
 1from sqlglot.dataframe.sql.column import Column
+ 2from sqlglot.dataframe.sql.dataframe import DataFrame, DataFrameNaFunctions
+ 3from sqlglot.dataframe.sql.group import GroupedData
+ 4from sqlglot.dataframe.sql.readwriter import DataFrameReader, DataFrameWriter
+ 5from sqlglot.dataframe.sql.session import SparkSession
+ 6from sqlglot.dataframe.sql.window import Window, WindowSpec
+ 7
+ 8__all__ = [
+ 9    "SparkSession",
+10    "DataFrame",
+11    "GroupedData",
+12    "Column",
+13    "DataFrameNaFunctions",
+14    "Window",
+15    "WindowSpec",
+16    "DataFrameReader",
+17    "DataFrameWriter",
+18]
+
+ + +
+
+ +
+ + class + SparkSession: + + + +
+ +
 20class SparkSession:
+ 21    known_ids: t.ClassVar[t.Set[str]] = set()
+ 22    known_branch_ids: t.ClassVar[t.Set[str]] = set()
+ 23    known_sequence_ids: t.ClassVar[t.Set[str]] = set()
+ 24    name_to_sequence_id_mapping: t.ClassVar[t.Dict[str, t.List[str]]] = defaultdict(list)
+ 25
+ 26    def __init__(self):
+ 27        self.incrementing_id = 1
+ 28
+ 29    def __getattr__(self, name: str) -> SparkSession:
+ 30        return self
+ 31
+ 32    def __call__(self, *args, **kwargs) -> SparkSession:
+ 33        return self
+ 34
+ 35    @property
+ 36    def read(self) -> DataFrameReader:
+ 37        return DataFrameReader(self)
+ 38
+ 39    def table(self, tableName: str) -> DataFrame:
+ 40        return self.read.table(tableName)
+ 41
+ 42    def createDataFrame(
+ 43        self,
+ 44        data: t.Sequence[t.Union[t.Dict[str, ColumnLiterals], t.List[ColumnLiterals], t.Tuple]],
+ 45        schema: t.Optional[SchemaInput] = None,
+ 46        samplingRatio: t.Optional[float] = None,
+ 47        verifySchema: bool = False,
+ 48    ) -> DataFrame:
+ 49        from sqlglot.dataframe.sql.dataframe import DataFrame
+ 50
+ 51        if samplingRatio is not None or verifySchema:
+ 52            raise NotImplementedError("Sampling Ratio and Verify Schema are not supported")
+ 53        if schema is not None and (
+ 54            not isinstance(schema, (StructType, str, list))
+ 55            or (isinstance(schema, list) and not isinstance(schema[0], str))
+ 56        ):
+ 57            raise NotImplementedError("Only schema of either list or string of list supported")
+ 58        if not data:
+ 59            raise ValueError("Must provide data to create into a DataFrame")
+ 60
+ 61        column_mapping: t.Dict[str, t.Optional[str]]
+ 62        if schema is not None:
+ 63            column_mapping = get_column_mapping_from_schema_input(schema)
+ 64        elif isinstance(data[0], dict):
+ 65            column_mapping = {col_name.strip(): None for col_name in data[0]}
+ 66        else:
+ 67            column_mapping = {f"_{i}": None for i in range(1, len(data[0]) + 1)}
+ 68
+ 69        data_expressions = [
+ 70            exp.Tuple(
+ 71                expressions=list(
+ 72                    map(
+ 73                        lambda x: F.lit(x).expression,
+ 74                        row if not isinstance(row, dict) else row.values(),
+ 75                    )
+ 76                )
+ 77            )
+ 78            for row in data
+ 79        ]
+ 80
+ 81        sel_columns = [
+ 82            F.col(name).cast(data_type).alias(name).expression
+ 83            if data_type is not None
+ 84            else F.col(name).expression
+ 85            for name, data_type in column_mapping.items()
+ 86        ]
+ 87
+ 88        select_kwargs = {
+ 89            "expressions": sel_columns,
+ 90            "from": exp.From(
+ 91                expressions=[
+ 92                    exp.Values(
+ 93                        expressions=data_expressions,
+ 94                        alias=exp.TableAlias(
+ 95                            this=exp.to_identifier(self._auto_incrementing_name),
+ 96                            columns=[exp.to_identifier(col_name) for col_name in column_mapping],
+ 97                        ),
+ 98                    ),
+ 99                ],
+100            ),
+101        }
+102
+103        sel_expression = exp.Select(**select_kwargs)
+104        return DataFrame(self, sel_expression)
+105
+106    def sql(self, sqlQuery: str) -> DataFrame:
+107        expression = sqlglot.parse_one(sqlQuery, read="spark")
+108        if isinstance(expression, exp.Select):
+109            df = DataFrame(self, expression)
+110            df = df._convert_leaf_to_cte()
+111        elif isinstance(expression, (exp.Create, exp.Insert)):
+112            select_expression = expression.expression.copy()
+113            if isinstance(expression, exp.Insert):
+114                select_expression.set("with", expression.args.get("with"))
+115                expression.set("with", None)
+116            del expression.args["expression"]
+117            df = DataFrame(self, select_expression, output_expression_container=expression)  # type: ignore
+118            df = df._convert_leaf_to_cte()
+119        else:
+120            raise ValueError(
+121                "Unknown expression type provided in the SQL. Please create an issue with the SQL."
+122            )
+123        return df
+124
+125    @property
+126    def _auto_incrementing_name(self) -> str:
+127        name = f"a{self.incrementing_id}"
+128        self.incrementing_id += 1
+129        return name
+130
+131    @property
+132    def _random_name(self) -> str:
+133        return "r" + uuid.uuid4().hex
+134
+135    @property
+136    def _random_branch_id(self) -> str:
+137        id = self._random_id
+138        self.known_branch_ids.add(id)
+139        return id
+140
+141    @property
+142    def _random_sequence_id(self):
+143        id = self._random_id
+144        self.known_sequence_ids.add(id)
+145        return id
+146
+147    @property
+148    def _random_id(self) -> str:
+149        id = self._random_name
+150        self.known_ids.add(id)
+151        return id
+152
+153    @property
+154    def _join_hint_names(self) -> t.Set[str]:
+155        return {"BROADCAST", "MERGE", "SHUFFLE_HASH", "SHUFFLE_REPLICATE_NL"}
+156
+157    def _add_alias_to_mapping(self, name: str, sequence_id: str):
+158        self.name_to_sequence_id_mapping[name].append(sequence_id)
+
+ + + + +
+ +
+ + SparkSession() + + + +
+ +
26    def __init__(self):
+27        self.incrementing_id = 1
+
+ + + + +
+
+ +
+ + def + table(self, tableName: str) -> sqlglot.dataframe.sql.DataFrame: + + + +
+ +
39    def table(self, tableName: str) -> DataFrame:
+40        return self.read.table(tableName)
+
+ + + + +
+
+ +
+ + def + createDataFrame( self, data: Sequence[Union[Dict[str, <MagicMock id='140700333898336'>], List[<MagicMock id='140700333898336'>], Tuple]], schema: Optional[<MagicMock id='140700333867312'>] = None, samplingRatio: Optional[float] = None, verifySchema: bool = False) -> sqlglot.dataframe.sql.DataFrame: + + + +
+ +
 42    def createDataFrame(
+ 43        self,
+ 44        data: t.Sequence[t.Union[t.Dict[str, ColumnLiterals], t.List[ColumnLiterals], t.Tuple]],
+ 45        schema: t.Optional[SchemaInput] = None,
+ 46        samplingRatio: t.Optional[float] = None,
+ 47        verifySchema: bool = False,
+ 48    ) -> DataFrame:
+ 49        from sqlglot.dataframe.sql.dataframe import DataFrame
+ 50
+ 51        if samplingRatio is not None or verifySchema:
+ 52            raise NotImplementedError("Sampling Ratio and Verify Schema are not supported")
+ 53        if schema is not None and (
+ 54            not isinstance(schema, (StructType, str, list))
+ 55            or (isinstance(schema, list) and not isinstance(schema[0], str))
+ 56        ):
+ 57            raise NotImplementedError("Only schema of either list or string of list supported")
+ 58        if not data:
+ 59            raise ValueError("Must provide data to create into a DataFrame")
+ 60
+ 61        column_mapping: t.Dict[str, t.Optional[str]]
+ 62        if schema is not None:
+ 63            column_mapping = get_column_mapping_from_schema_input(schema)
+ 64        elif isinstance(data[0], dict):
+ 65            column_mapping = {col_name.strip(): None for col_name in data[0]}
+ 66        else:
+ 67            column_mapping = {f"_{i}": None for i in range(1, len(data[0]) + 1)}
+ 68
+ 69        data_expressions = [
+ 70            exp.Tuple(
+ 71                expressions=list(
+ 72                    map(
+ 73                        lambda x: F.lit(x).expression,
+ 74                        row if not isinstance(row, dict) else row.values(),
+ 75                    )
+ 76                )
+ 77            )
+ 78            for row in data
+ 79        ]
+ 80
+ 81        sel_columns = [
+ 82            F.col(name).cast(data_type).alias(name).expression
+ 83            if data_type is not None
+ 84            else F.col(name).expression
+ 85            for name, data_type in column_mapping.items()
+ 86        ]
+ 87
+ 88        select_kwargs = {
+ 89            "expressions": sel_columns,
+ 90            "from": exp.From(
+ 91                expressions=[
+ 92                    exp.Values(
+ 93                        expressions=data_expressions,
+ 94                        alias=exp.TableAlias(
+ 95                            this=exp.to_identifier(self._auto_incrementing_name),
+ 96                            columns=[exp.to_identifier(col_name) for col_name in column_mapping],
+ 97                        ),
+ 98                    ),
+ 99                ],
+100            ),
+101        }
+102
+103        sel_expression = exp.Select(**select_kwargs)
+104        return DataFrame(self, sel_expression)
+
+ + + + +
+
+ +
+ + def + sql(self, sqlQuery: str) -> sqlglot.dataframe.sql.DataFrame: + + + +
+ +
106    def sql(self, sqlQuery: str) -> DataFrame:
+107        expression = sqlglot.parse_one(sqlQuery, read="spark")
+108        if isinstance(expression, exp.Select):
+109            df = DataFrame(self, expression)
+110            df = df._convert_leaf_to_cte()
+111        elif isinstance(expression, (exp.Create, exp.Insert)):
+112            select_expression = expression.expression.copy()
+113            if isinstance(expression, exp.Insert):
+114                select_expression.set("with", expression.args.get("with"))
+115                expression.set("with", None)
+116            del expression.args["expression"]
+117            df = DataFrame(self, select_expression, output_expression_container=expression)  # type: ignore
+118            df = df._convert_leaf_to_cte()
+119        else:
+120            raise ValueError(
+121                "Unknown expression type provided in the SQL. Please create an issue with the SQL."
+122            )
+123        return df
+
+ + + + +
+
+
+ +
+ + class + DataFrame: + + + +
+ +
 46class DataFrame:
+ 47    def __init__(
+ 48        self,
+ 49        spark: SparkSession,
+ 50        expression: exp.Select,
+ 51        branch_id: t.Optional[str] = None,
+ 52        sequence_id: t.Optional[str] = None,
+ 53        last_op: Operation = Operation.INIT,
+ 54        pending_hints: t.Optional[t.List[exp.Expression]] = None,
+ 55        output_expression_container: t.Optional[OutputExpressionContainer] = None,
+ 56        **kwargs,
+ 57    ):
+ 58        self.spark = spark
+ 59        self.expression = expression
+ 60        self.branch_id = branch_id or self.spark._random_branch_id
+ 61        self.sequence_id = sequence_id or self.spark._random_sequence_id
+ 62        self.last_op = last_op
+ 63        self.pending_hints = pending_hints or []
+ 64        self.output_expression_container = output_expression_container or exp.Select()
+ 65
+ 66    def __getattr__(self, column_name: str) -> Column:
+ 67        return self[column_name]
+ 68
+ 69    def __getitem__(self, column_name: str) -> Column:
+ 70        column_name = f"{self.branch_id}.{column_name}"
+ 71        return Column(column_name)
+ 72
+ 73    def __copy__(self):
+ 74        return self.copy()
+ 75
+ 76    @property
+ 77    def sparkSession(self):
+ 78        return self.spark
+ 79
+ 80    @property
+ 81    def write(self):
+ 82        return DataFrameWriter(self)
+ 83
+ 84    @property
+ 85    def latest_cte_name(self) -> str:
+ 86        if not self.expression.ctes:
+ 87            from_exp = self.expression.args["from"]
+ 88            if from_exp.alias_or_name:
+ 89                return from_exp.alias_or_name
+ 90            table_alias = from_exp.find(exp.TableAlias)
+ 91            if not table_alias:
+ 92                raise RuntimeError(
+ 93                    f"Could not find an alias name for this expression: {self.expression}"
+ 94                )
+ 95            return table_alias.alias_or_name
+ 96        return self.expression.ctes[-1].alias
+ 97
+ 98    @property
+ 99    def pending_join_hints(self):
+100        return [hint for hint in self.pending_hints if isinstance(hint, exp.JoinHint)]
+101
+102    @property
+103    def pending_partition_hints(self):
+104        return [hint for hint in self.pending_hints if isinstance(hint, exp.Anonymous)]
+105
+106    @property
+107    def columns(self) -> t.List[str]:
+108        return self.expression.named_selects
+109
+110    @property
+111    def na(self) -> DataFrameNaFunctions:
+112        return DataFrameNaFunctions(self)
+113
+114    def _replace_cte_names_with_hashes(self, expression: exp.Select):
+115        replacement_mapping = {}
+116        for cte in expression.ctes:
+117            old_name_id = cte.args["alias"].this
+118            new_hashed_id = exp.to_identifier(
+119                self._create_hash_from_expression(cte.this), quoted=old_name_id.args["quoted"]
+120            )
+121            replacement_mapping[old_name_id] = new_hashed_id
+122            expression = expression.transform(replace_id_value, replacement_mapping)
+123        return expression
+124
+125    def _create_cte_from_expression(
+126        self,
+127        expression: exp.Expression,
+128        branch_id: t.Optional[str] = None,
+129        sequence_id: t.Optional[str] = None,
+130        **kwargs,
+131    ) -> t.Tuple[exp.CTE, str]:
+132        name = self.spark._random_name
+133        expression_to_cte = expression.copy()
+134        expression_to_cte.set("with", None)
+135        cte = exp.Select().with_(name, as_=expression_to_cte, **kwargs).ctes[0]
+136        cte.set("branch_id", branch_id or self.branch_id)
+137        cte.set("sequence_id", sequence_id or self.sequence_id)
+138        return cte, name
+139
+140    @t.overload
+141    def _ensure_list_of_columns(self, cols: t.Collection[ColumnOrLiteral]) -> t.List[Column]:
+142        ...
+143
+144    @t.overload
+145    def _ensure_list_of_columns(self, cols: ColumnOrLiteral) -> t.List[Column]:
+146        ...
+147
+148    def _ensure_list_of_columns(self, cols):
+149        return Column.ensure_cols(ensure_list(cols))
+150
+151    def _ensure_and_normalize_cols(self, cols):
+152        cols = self._ensure_list_of_columns(cols)
+153        normalize(self.spark, self.expression, cols)
+154        return cols
+155
+156    def _ensure_and_normalize_col(self, col):
+157        col = Column.ensure_col(col)
+158        normalize(self.spark, self.expression, col)
+159        return col
+160
+161    def _convert_leaf_to_cte(self, sequence_id: t.Optional[str] = None) -> DataFrame:
+162        df = self._resolve_pending_hints()
+163        sequence_id = sequence_id or df.sequence_id
+164        expression = df.expression.copy()
+165        cte_expression, cte_name = df._create_cte_from_expression(
+166            expression=expression, sequence_id=sequence_id
+167        )
+168        new_expression = df._add_ctes_to_expression(
+169            exp.Select(), expression.ctes + [cte_expression]
+170        )
+171        sel_columns = df._get_outer_select_columns(cte_expression)
+172        new_expression = new_expression.from_(cte_name).select(
+173            *[x.alias_or_name for x in sel_columns]
+174        )
+175        return df.copy(expression=new_expression, sequence_id=sequence_id)
+176
+177    def _resolve_pending_hints(self) -> DataFrame:
+178        df = self.copy()
+179        if not self.pending_hints:
+180            return df
+181        expression = df.expression
+182        hint_expression = expression.args.get("hint") or exp.Hint(expressions=[])
+183        for hint in df.pending_partition_hints:
+184            hint_expression.append("expressions", hint)
+185            df.pending_hints.remove(hint)
+186
+187        join_aliases = {
+188            join_table.alias_or_name
+189            for join_table in get_tables_from_expression_with_join(expression)
+190        }
+191        if join_aliases:
+192            for hint in df.pending_join_hints:
+193                for sequence_id_expression in hint.expressions:
+194                    sequence_id_or_name = sequence_id_expression.alias_or_name
+195                    sequence_ids_to_match = [sequence_id_or_name]
+196                    if sequence_id_or_name in df.spark.name_to_sequence_id_mapping:
+197                        sequence_ids_to_match = df.spark.name_to_sequence_id_mapping[
+198                            sequence_id_or_name
+199                        ]
+200                    matching_ctes = [
+201                        cte
+202                        for cte in reversed(expression.ctes)
+203                        if cte.args["sequence_id"] in sequence_ids_to_match
+204                    ]
+205                    for matching_cte in matching_ctes:
+206                        if matching_cte.alias_or_name in join_aliases:
+207                            sequence_id_expression.set("this", matching_cte.args["alias"].this)
+208                            df.pending_hints.remove(hint)
+209                            break
+210                hint_expression.append("expressions", hint)
+211        if hint_expression.expressions:
+212            expression.set("hint", hint_expression)
+213        return df
+214
+215    def _hint(self, hint_name: str, args: t.List[Column]) -> DataFrame:
+216        hint_name = hint_name.upper()
+217        hint_expression = (
+218            exp.JoinHint(
+219                this=hint_name,
+220                expressions=[exp.to_table(parameter.alias_or_name) for parameter in args],
+221            )
+222            if hint_name in JOIN_HINTS
+223            else exp.Anonymous(
+224                this=hint_name, expressions=[parameter.expression for parameter in args]
+225            )
+226        )
+227        new_df = self.copy()
+228        new_df.pending_hints.append(hint_expression)
+229        return new_df
+230
+231    def _set_operation(self, klass: t.Callable, other: DataFrame, distinct: bool):
+232        other_df = other._convert_leaf_to_cte()
+233        base_expression = self.expression.copy()
+234        base_expression = self._add_ctes_to_expression(base_expression, other_df.expression.ctes)
+235        all_ctes = base_expression.ctes
+236        other_df.expression.set("with", None)
+237        base_expression.set("with", None)
+238        operation = klass(this=base_expression, distinct=distinct, expression=other_df.expression)
+239        operation.set("with", exp.With(expressions=all_ctes))
+240        return self.copy(expression=operation)._convert_leaf_to_cte()
+241
+242    def _cache(self, storage_level: str):
+243        df = self._convert_leaf_to_cte()
+244        df.expression.ctes[-1].set("cache_storage_level", storage_level)
+245        return df
+246
+247    @classmethod
+248    def _add_ctes_to_expression(cls, expression: exp.Select, ctes: t.List[exp.CTE]) -> exp.Select:
+249        expression = expression.copy()
+250        with_expression = expression.args.get("with")
+251        if with_expression:
+252            existing_ctes = with_expression.expressions
+253            existsing_cte_names = {x.alias_or_name for x in existing_ctes}
+254            for cte in ctes:
+255                if cte.alias_or_name not in existsing_cte_names:
+256                    existing_ctes.append(cte)
+257        else:
+258            existing_ctes = ctes
+259        expression.set("with", exp.With(expressions=existing_ctes))
+260        return expression
+261
+262    @classmethod
+263    def _get_outer_select_columns(cls, item: t.Union[exp.Expression, DataFrame]) -> t.List[Column]:
+264        expression = item.expression if isinstance(item, DataFrame) else item
+265        return [Column(x) for x in expression.find(exp.Select).expressions]
+266
+267    @classmethod
+268    def _create_hash_from_expression(cls, expression: exp.Select):
+269        value = expression.sql(dialect="spark").encode("utf-8")
+270        return f"t{zlib.crc32(value)}"[:6]
+271
+272    def _get_select_expressions(
+273        self,
+274    ) -> t.List[t.Tuple[t.Union[t.Type[exp.Cache], OutputExpressionContainer], exp.Select]]:
+275        select_expressions: t.List[
+276            t.Tuple[t.Union[t.Type[exp.Cache], OutputExpressionContainer], exp.Select]
+277        ] = []
+278        main_select_ctes: t.List[exp.CTE] = []
+279        for cte in self.expression.ctes:
+280            cache_storage_level = cte.args.get("cache_storage_level")
+281            if cache_storage_level:
+282                select_expression = cte.this.copy()
+283                select_expression.set("with", exp.With(expressions=copy(main_select_ctes)))
+284                select_expression.set("cte_alias_name", cte.alias_or_name)
+285                select_expression.set("cache_storage_level", cache_storage_level)
+286                select_expressions.append((exp.Cache, select_expression))
+287            else:
+288                main_select_ctes.append(cte)
+289        main_select = self.expression.copy()
+290        if main_select_ctes:
+291            main_select.set("with", exp.With(expressions=main_select_ctes))
+292        expression_select_pair = (type(self.output_expression_container), main_select)
+293        select_expressions.append(expression_select_pair)  # type: ignore
+294        return select_expressions
+295
+296    def sql(self, dialect="spark", optimize=True, **kwargs) -> t.List[str]:
+297        df = self._resolve_pending_hints()
+298        select_expressions = df._get_select_expressions()
+299        output_expressions: t.List[t.Union[exp.Select, exp.Cache, exp.Drop]] = []
+300        replacement_mapping: t.Dict[exp.Identifier, exp.Identifier] = {}
+301        for expression_type, select_expression in select_expressions:
+302            select_expression = select_expression.transform(replace_id_value, replacement_mapping)
+303            if optimize:
+304                select_expression = optimize_func(select_expression)
+305            select_expression = df._replace_cte_names_with_hashes(select_expression)
+306            expression: t.Union[exp.Select, exp.Cache, exp.Drop]
+307            if expression_type == exp.Cache:
+308                cache_table_name = df._create_hash_from_expression(select_expression)
+309                cache_table = exp.to_table(cache_table_name)
+310                original_alias_name = select_expression.args["cte_alias_name"]
+311
+312                replacement_mapping[exp.to_identifier(original_alias_name)] = exp.to_identifier(  # type: ignore
+313                    cache_table_name
+314                )
+315                sqlglot.schema.add_table(
+316                    cache_table_name,
+317                    {
+318                        expression.alias_or_name: expression.type.sql("spark")
+319                        for expression in select_expression.expressions
+320                    },
+321                )
+322                cache_storage_level = select_expression.args["cache_storage_level"]
+323                options = [
+324                    exp.Literal.string("storageLevel"),
+325                    exp.Literal.string(cache_storage_level),
+326                ]
+327                expression = exp.Cache(
+328                    this=cache_table, expression=select_expression, lazy=True, options=options
+329                )
+330                # We will drop the "view" if it exists before running the cache table
+331                output_expressions.append(exp.Drop(this=cache_table, exists=True, kind="VIEW"))
+332            elif expression_type == exp.Create:
+333                expression = df.output_expression_container.copy()
+334                expression.set("expression", select_expression)
+335            elif expression_type == exp.Insert:
+336                expression = df.output_expression_container.copy()
+337                select_without_ctes = select_expression.copy()
+338                select_without_ctes.set("with", None)
+339                expression.set("expression", select_without_ctes)
+340                if select_expression.ctes:
+341                    expression.set("with", exp.With(expressions=select_expression.ctes))
+342            elif expression_type == exp.Select:
+343                expression = select_expression
+344            else:
+345                raise ValueError(f"Invalid expression type: {expression_type}")
+346            output_expressions.append(expression)
+347
+348        return [
+349            expression.sql(**{"dialect": dialect, **kwargs}) for expression in output_expressions
+350        ]
+351
+352    def copy(self, **kwargs) -> DataFrame:
+353        return DataFrame(**object_to_dict(self, **kwargs))
+354
+355    @operation(Operation.SELECT)
+356    def select(self, *cols, **kwargs) -> DataFrame:
+357        cols = self._ensure_and_normalize_cols(cols)
+358        kwargs["append"] = kwargs.get("append", False)
+359        if self.expression.args.get("joins"):
+360            ambiguous_cols = [col for col in cols if not col.column_expression.table]
+361            if ambiguous_cols:
+362                join_table_identifiers = [
+363                    x.this for x in get_tables_from_expression_with_join(self.expression)
+364                ]
+365                cte_names_in_join = [x.this for x in join_table_identifiers]
+366                for ambiguous_col in ambiguous_cols:
+367                    ctes_with_column = [
+368                        cte
+369                        for cte in self.expression.ctes
+370                        if cte.alias_or_name in cte_names_in_join
+371                        and ambiguous_col.alias_or_name in cte.this.named_selects
+372                    ]
+373                    # If the select column does not specify a table and there is a join
+374                    # then we assume they are referring to the left table
+375                    if len(ctes_with_column) > 1:
+376                        table_identifier = self.expression.args["from"].args["expressions"][0].this
+377                    else:
+378                        table_identifier = ctes_with_column[0].args["alias"].this
+379                    ambiguous_col.expression.set("table", table_identifier)
+380        expression = self.expression.select(*[x.expression for x in cols], **kwargs)
+381        qualify_columns(expression, sqlglot.schema)
+382        return self.copy(expression=expression, **kwargs)
+383
+384    @operation(Operation.NO_OP)
+385    def alias(self, name: str, **kwargs) -> DataFrame:
+386        new_sequence_id = self.spark._random_sequence_id
+387        df = self.copy()
+388        for join_hint in df.pending_join_hints:
+389            for expression in join_hint.expressions:
+390                if expression.alias_or_name == self.sequence_id:
+391                    expression.set("this", Column.ensure_col(new_sequence_id).expression)
+392        df.spark._add_alias_to_mapping(name, new_sequence_id)
+393        return df._convert_leaf_to_cte(sequence_id=new_sequence_id)
+394
+395    @operation(Operation.WHERE)
+396    def where(self, column: t.Union[Column, bool], **kwargs) -> DataFrame:
+397        col = self._ensure_and_normalize_col(column)
+398        return self.copy(expression=self.expression.where(col.expression))
+399
+400    filter = where
+401
+402    @operation(Operation.GROUP_BY)
+403    def groupBy(self, *cols, **kwargs) -> GroupedData:
+404        columns = self._ensure_and_normalize_cols(cols)
+405        return GroupedData(self, columns, self.last_op)
+406
+407    @operation(Operation.SELECT)
+408    def agg(self, *exprs, **kwargs) -> DataFrame:
+409        cols = self._ensure_and_normalize_cols(exprs)
+410        return self.groupBy().agg(*cols)
+411
+412    @operation(Operation.FROM)
+413    def join(
+414        self,
+415        other_df: DataFrame,
+416        on: t.Union[str, t.List[str], Column, t.List[Column]],
+417        how: str = "inner",
+418        **kwargs,
+419    ) -> DataFrame:
+420        other_df = other_df._convert_leaf_to_cte()
+421        pre_join_self_latest_cte_name = self.latest_cte_name
+422        columns = self._ensure_and_normalize_cols(on)
+423        join_type = how.replace("_", " ")
+424        if isinstance(columns[0].expression, exp.Column):
+425            join_columns = [
+426                Column(x).set_table_name(pre_join_self_latest_cte_name) for x in columns
+427            ]
+428            join_clause = functools.reduce(
+429                lambda x, y: x & y,
+430                [
+431                    col.copy().set_table_name(pre_join_self_latest_cte_name)
+432                    == col.copy().set_table_name(other_df.latest_cte_name)
+433                    for col in columns
+434                ],
+435            )
+436        else:
+437            if len(columns) > 1:
+438                columns = [functools.reduce(lambda x, y: x & y, columns)]
+439            join_clause = columns[0]
+440            join_columns = [
+441                Column(x).set_table_name(pre_join_self_latest_cte_name)
+442                if i % 2 == 0
+443                else Column(x).set_table_name(other_df.latest_cte_name)
+444                for i, x in enumerate(join_clause.expression.find_all(exp.Column))
+445            ]
+446        self_columns = [
+447            column.set_table_name(pre_join_self_latest_cte_name, copy=True)
+448            for column in self._get_outer_select_columns(self)
+449        ]
+450        other_columns = [
+451            column.set_table_name(other_df.latest_cte_name, copy=True)
+452            for column in self._get_outer_select_columns(other_df)
+453        ]
+454        column_value_mapping = {
+455            column.alias_or_name
+456            if not isinstance(column.expression.this, exp.Star)
+457            else column.sql(): column
+458            for column in other_columns + self_columns + join_columns
+459        }
+460        all_columns = [
+461            column_value_mapping[name]
+462            for name in {x.alias_or_name: None for x in join_columns + self_columns + other_columns}
+463        ]
+464        new_df = self.copy(
+465            expression=self.expression.join(
+466                other_df.latest_cte_name, on=join_clause.expression, join_type=join_type
+467            )
+468        )
+469        new_df.expression = new_df._add_ctes_to_expression(
+470            new_df.expression, other_df.expression.ctes
+471        )
+472        new_df.pending_hints.extend(other_df.pending_hints)
+473        new_df = new_df.select.__wrapped__(new_df, *all_columns)
+474        return new_df
+475
+476    @operation(Operation.ORDER_BY)
+477    def orderBy(
+478        self,
+479        *cols: t.Union[str, Column],
+480        ascending: t.Optional[t.Union[t.Any, t.List[t.Any]]] = None,
+481    ) -> DataFrame:
+482        """
+483        This implementation lets any ordered columns take priority over whatever is provided in `ascending`. Spark
+484        has irregular behavior and can result in runtime errors. Users shouldn't be mixing the two anyways so this
+485        is unlikely to come up.
+486        """
+487        columns = self._ensure_and_normalize_cols(cols)
+488        pre_ordered_col_indexes = [
+489            x
+490            for x in [
+491                i if isinstance(col.expression, exp.Ordered) else None
+492                for i, col in enumerate(columns)
+493            ]
+494            if x is not None
+495        ]
+496        if ascending is None:
+497            ascending = [True] * len(columns)
+498        elif not isinstance(ascending, list):
+499            ascending = [ascending] * len(columns)
+500        ascending = [bool(x) for i, x in enumerate(ascending)]
+501        assert len(columns) == len(
+502            ascending
+503        ), "The length of items in ascending must equal the number of columns provided"
+504        col_and_ascending = list(zip(columns, ascending))
+505        order_by_columns = [
+506            exp.Ordered(this=col.expression, desc=not asc)
+507            if i not in pre_ordered_col_indexes
+508            else columns[i].column_expression
+509            for i, (col, asc) in enumerate(col_and_ascending)
+510        ]
+511        return self.copy(expression=self.expression.order_by(*order_by_columns))
+512
+513    sort = orderBy
+514
+515    @operation(Operation.FROM)
+516    def union(self, other: DataFrame) -> DataFrame:
+517        return self._set_operation(exp.Union, other, False)
+518
+519    unionAll = union
+520
+521    @operation(Operation.FROM)
+522    def unionByName(self, other: DataFrame, allowMissingColumns: bool = False):
+523        l_columns = self.columns
+524        r_columns = other.columns
+525        if not allowMissingColumns:
+526            l_expressions = l_columns
+527            r_expressions = l_columns
+528        else:
+529            l_expressions = []
+530            r_expressions = []
+531            r_columns_unused = copy(r_columns)
+532            for l_column in l_columns:
+533                l_expressions.append(l_column)
+534                if l_column in r_columns:
+535                    r_expressions.append(l_column)
+536                    r_columns_unused.remove(l_column)
+537                else:
+538                    r_expressions.append(exp.alias_(exp.Null(), l_column))
+539            for r_column in r_columns_unused:
+540                l_expressions.append(exp.alias_(exp.Null(), r_column))
+541                r_expressions.append(r_column)
+542        r_df = (
+543            other.copy()._convert_leaf_to_cte().select(*self._ensure_list_of_columns(r_expressions))
+544        )
+545        l_df = self.copy()
+546        if allowMissingColumns:
+547            l_df = l_df._convert_leaf_to_cte().select(*self._ensure_list_of_columns(l_expressions))
+548        return l_df._set_operation(exp.Union, r_df, False)
+549
+550    @operation(Operation.FROM)
+551    def intersect(self, other: DataFrame) -> DataFrame:
+552        return self._set_operation(exp.Intersect, other, True)
+553
+554    @operation(Operation.FROM)
+555    def intersectAll(self, other: DataFrame) -> DataFrame:
+556        return self._set_operation(exp.Intersect, other, False)
+557
+558    @operation(Operation.FROM)
+559    def exceptAll(self, other: DataFrame) -> DataFrame:
+560        return self._set_operation(exp.Except, other, False)
+561
+562    @operation(Operation.SELECT)
+563    def distinct(self) -> DataFrame:
+564        return self.copy(expression=self.expression.distinct())
+565
+566    @operation(Operation.SELECT)
+567    def dropDuplicates(self, subset: t.Optional[t.List[str]] = None):
+568        if not subset:
+569            return self.distinct()
+570        column_names = ensure_list(subset)
+571        window = Window.partitionBy(*column_names).orderBy(*column_names)
+572        return (
+573            self.copy()
+574            .withColumn("row_num", F.row_number().over(window))
+575            .where(F.col("row_num") == F.lit(1))
+576            .drop("row_num")
+577        )
+578
+579    @operation(Operation.FROM)
+580    def dropna(
+581        self,
+582        how: str = "any",
+583        thresh: t.Optional[int] = None,
+584        subset: t.Optional[t.Union[str, t.Tuple[str, ...], t.List[str]]] = None,
+585    ) -> DataFrame:
+586        minimum_non_null = thresh or 0  # will be determined later if thresh is null
+587        new_df = self.copy()
+588        all_columns = self._get_outer_select_columns(new_df.expression)
+589        if subset:
+590            null_check_columns = self._ensure_and_normalize_cols(subset)
+591        else:
+592            null_check_columns = all_columns
+593        if thresh is None:
+594            minimum_num_nulls = 1 if how == "any" else len(null_check_columns)
+595        else:
+596            minimum_num_nulls = len(null_check_columns) - minimum_non_null + 1
+597        if minimum_num_nulls > len(null_check_columns):
+598            raise RuntimeError(
+599                f"The minimum num nulls for dropna must be less than or equal to the number of columns. "
+600                f"Minimum num nulls: {minimum_num_nulls}, Num Columns: {len(null_check_columns)}"
+601            )
+602        if_null_checks = [
+603            F.when(column.isNull(), F.lit(1)).otherwise(F.lit(0)) for column in null_check_columns
+604        ]
+605        nulls_added_together = functools.reduce(lambda x, y: x + y, if_null_checks)
+606        num_nulls = nulls_added_together.alias("num_nulls")
+607        new_df = new_df.select(num_nulls, append=True)
+608        filtered_df = new_df.where(F.col("num_nulls") < F.lit(minimum_num_nulls))
+609        final_df = filtered_df.select(*all_columns)
+610        return final_df
+611
+612    @operation(Operation.FROM)
+613    def fillna(
+614        self,
+615        value: t.Union[ColumnLiterals],
+616        subset: t.Optional[t.Union[str, t.Tuple[str, ...], t.List[str]]] = None,
+617    ) -> DataFrame:
+618        """
+619        Functionality Difference: If you provide a value to replace a null and that type conflicts
+620        with the type of the column then PySpark will just ignore your replacement.
+621        This will try to cast them to be the same in some cases. So they won't always match.
+622        Best to not mix types so make sure replacement is the same type as the column
+623
+624        Possibility for improvement: Use `typeof` function to get the type of the column
+625        and check if it matches the type of the value provided. If not then make it null.
+626        """
+627        from sqlglot.dataframe.sql.functions import lit
+628
+629        values = None
+630        columns = None
+631        new_df = self.copy()
+632        all_columns = self._get_outer_select_columns(new_df.expression)
+633        all_column_mapping = {column.alias_or_name: column for column in all_columns}
+634        if isinstance(value, dict):
+635            values = list(value.values())
+636            columns = self._ensure_and_normalize_cols(list(value))
+637        if not columns:
+638            columns = self._ensure_and_normalize_cols(subset) if subset else all_columns
+639        if not values:
+640            values = [value] * len(columns)
+641        value_columns = [lit(value) for value in values]
+642
+643        null_replacement_mapping = {
+644            column.alias_or_name: (
+645                F.when(column.isNull(), value).otherwise(column).alias(column.alias_or_name)
+646            )
+647            for column, value in zip(columns, value_columns)
+648        }
+649        null_replacement_mapping = {**all_column_mapping, **null_replacement_mapping}
+650        null_replacement_columns = [
+651            null_replacement_mapping[column.alias_or_name] for column in all_columns
+652        ]
+653        new_df = new_df.select(*null_replacement_columns)
+654        return new_df
+655
+656    @operation(Operation.FROM)
+657    def replace(
+658        self,
+659        to_replace: t.Union[bool, int, float, str, t.List, t.Dict],
+660        value: t.Optional[t.Union[bool, int, float, str, t.List]] = None,
+661        subset: t.Optional[t.Collection[ColumnOrName] | ColumnOrName] = None,
+662    ) -> DataFrame:
+663        from sqlglot.dataframe.sql.functions import lit
+664
+665        old_values = None
+666        new_df = self.copy()
+667        all_columns = self._get_outer_select_columns(new_df.expression)
+668        all_column_mapping = {column.alias_or_name: column for column in all_columns}
+669
+670        columns = self._ensure_and_normalize_cols(subset) if subset else all_columns
+671        if isinstance(to_replace, dict):
+672            old_values = list(to_replace)
+673            new_values = list(to_replace.values())
+674        elif not old_values and isinstance(to_replace, list):
+675            assert isinstance(value, list), "value must be a list since the replacements are a list"
+676            assert len(to_replace) == len(
+677                value
+678            ), "the replacements and values must be the same length"
+679            old_values = to_replace
+680            new_values = value
+681        else:
+682            old_values = [to_replace] * len(columns)
+683            new_values = [value] * len(columns)
+684        old_values = [lit(value) for value in old_values]
+685        new_values = [lit(value) for value in new_values]
+686
+687        replacement_mapping = {}
+688        for column in columns:
+689            expression = Column(None)
+690            for i, (old_value, new_value) in enumerate(zip(old_values, new_values)):
+691                if i == 0:
+692                    expression = F.when(column == old_value, new_value)
+693                else:
+694                    expression = expression.when(column == old_value, new_value)  # type: ignore
+695            replacement_mapping[column.alias_or_name] = expression.otherwise(column).alias(
+696                column.expression.alias_or_name
+697            )
+698
+699        replacement_mapping = {**all_column_mapping, **replacement_mapping}
+700        replacement_columns = [replacement_mapping[column.alias_or_name] for column in all_columns]
+701        new_df = new_df.select(*replacement_columns)
+702        return new_df
+703
+704    @operation(Operation.SELECT)
+705    def withColumn(self, colName: str, col: Column) -> DataFrame:
+706        col = self._ensure_and_normalize_col(col)
+707        existing_col_names = self.expression.named_selects
+708        existing_col_index = (
+709            existing_col_names.index(colName) if colName in existing_col_names else None
+710        )
+711        if existing_col_index:
+712            expression = self.expression.copy()
+713            expression.expressions[existing_col_index] = col.expression
+714            return self.copy(expression=expression)
+715        return self.copy().select(col.alias(colName), append=True)
+716
+717    @operation(Operation.SELECT)
+718    def withColumnRenamed(self, existing: str, new: str):
+719        expression = self.expression.copy()
+720        existing_columns = [
+721            expression
+722            for expression in expression.expressions
+723            if expression.alias_or_name == existing
+724        ]
+725        if not existing_columns:
+726            raise ValueError("Tried to rename a column that doesn't exist")
+727        for existing_column in existing_columns:
+728            if isinstance(existing_column, exp.Column):
+729                existing_column.replace(exp.alias_(existing_column.copy(), new))
+730            else:
+731                existing_column.set("alias", exp.to_identifier(new))
+732        return self.copy(expression=expression)
+733
+734    @operation(Operation.SELECT)
+735    def drop(self, *cols: t.Union[str, Column]) -> DataFrame:
+736        all_columns = self._get_outer_select_columns(self.expression)
+737        drop_cols = self._ensure_and_normalize_cols(cols)
+738        new_columns = [
+739            col
+740            for col in all_columns
+741            if col.alias_or_name not in [drop_column.alias_or_name for drop_column in drop_cols]
+742        ]
+743        return self.copy().select(*new_columns, append=False)
+744
+745    @operation(Operation.LIMIT)
+746    def limit(self, num: int) -> DataFrame:
+747        return self.copy(expression=self.expression.limit(num))
+748
+749    @operation(Operation.NO_OP)
+750    def hint(self, name: str, *parameters: t.Optional[t.Union[str, int]]) -> DataFrame:
+751        parameter_list = ensure_list(parameters)
+752        parameter_columns = (
+753            self._ensure_list_of_columns(parameter_list)
+754            if parameters
+755            else Column.ensure_cols([self.sequence_id])
+756        )
+757        return self._hint(name, parameter_columns)
+758
+759    @operation(Operation.NO_OP)
+760    def repartition(
+761        self, numPartitions: t.Union[int, ColumnOrName], *cols: ColumnOrName
+762    ) -> DataFrame:
+763        num_partition_cols = self._ensure_list_of_columns(numPartitions)
+764        columns = self._ensure_and_normalize_cols(cols)
+765        args = num_partition_cols + columns
+766        return self._hint("repartition", args)
+767
+768    @operation(Operation.NO_OP)
+769    def coalesce(self, numPartitions: int) -> DataFrame:
+770        num_partitions = Column.ensure_cols([numPartitions])
+771        return self._hint("coalesce", num_partitions)
+772
+773    @operation(Operation.NO_OP)
+774    def cache(self) -> DataFrame:
+775        return self._cache(storage_level="MEMORY_AND_DISK")
+776
+777    @operation(Operation.NO_OP)
+778    def persist(self, storageLevel: str = "MEMORY_AND_DISK_SER") -> DataFrame:
+779        """
+780        Storage Level Options: https://spark.apache.org/docs/3.0.0-preview/sql-ref-syntax-aux-cache-cache-table.html
+781        """
+782        return self._cache(storageLevel)
+
+ + + + +
+ +
+ + DataFrame( spark: <MagicMock id='140700332957056'>, expression: sqlglot.expressions.Select, branch_id: Optional[str] = None, sequence_id: Optional[str] = None, last_op: sqlglot.dataframe.sql.operations.Operation = <Operation.INIT: -1>, pending_hints: Optional[List[sqlglot.expressions.Expression]] = None, output_expression_container: Optional[<MagicMock id='140700332981504'>] = None, **kwargs) + + + +
+ +
47    def __init__(
+48        self,
+49        spark: SparkSession,
+50        expression: exp.Select,
+51        branch_id: t.Optional[str] = None,
+52        sequence_id: t.Optional[str] = None,
+53        last_op: Operation = Operation.INIT,
+54        pending_hints: t.Optional[t.List[exp.Expression]] = None,
+55        output_expression_container: t.Optional[OutputExpressionContainer] = None,
+56        **kwargs,
+57    ):
+58        self.spark = spark
+59        self.expression = expression
+60        self.branch_id = branch_id or self.spark._random_branch_id
+61        self.sequence_id = sequence_id or self.spark._random_sequence_id
+62        self.last_op = last_op
+63        self.pending_hints = pending_hints or []
+64        self.output_expression_container = output_expression_container or exp.Select()
+
+ + + + +
+
+ +
+ + def + sql(self, dialect='spark', optimize=True, **kwargs) -> List[str]: + + + +
+ +
296    def sql(self, dialect="spark", optimize=True, **kwargs) -> t.List[str]:
+297        df = self._resolve_pending_hints()
+298        select_expressions = df._get_select_expressions()
+299        output_expressions: t.List[t.Union[exp.Select, exp.Cache, exp.Drop]] = []
+300        replacement_mapping: t.Dict[exp.Identifier, exp.Identifier] = {}
+301        for expression_type, select_expression in select_expressions:
+302            select_expression = select_expression.transform(replace_id_value, replacement_mapping)
+303            if optimize:
+304                select_expression = optimize_func(select_expression)
+305            select_expression = df._replace_cte_names_with_hashes(select_expression)
+306            expression: t.Union[exp.Select, exp.Cache, exp.Drop]
+307            if expression_type == exp.Cache:
+308                cache_table_name = df._create_hash_from_expression(select_expression)
+309                cache_table = exp.to_table(cache_table_name)
+310                original_alias_name = select_expression.args["cte_alias_name"]
+311
+312                replacement_mapping[exp.to_identifier(original_alias_name)] = exp.to_identifier(  # type: ignore
+313                    cache_table_name
+314                )
+315                sqlglot.schema.add_table(
+316                    cache_table_name,
+317                    {
+318                        expression.alias_or_name: expression.type.sql("spark")
+319                        for expression in select_expression.expressions
+320                    },
+321                )
+322                cache_storage_level = select_expression.args["cache_storage_level"]
+323                options = [
+324                    exp.Literal.string("storageLevel"),
+325                    exp.Literal.string(cache_storage_level),
+326                ]
+327                expression = exp.Cache(
+328                    this=cache_table, expression=select_expression, lazy=True, options=options
+329                )
+330                # We will drop the "view" if it exists before running the cache table
+331                output_expressions.append(exp.Drop(this=cache_table, exists=True, kind="VIEW"))
+332            elif expression_type == exp.Create:
+333                expression = df.output_expression_container.copy()
+334                expression.set("expression", select_expression)
+335            elif expression_type == exp.Insert:
+336                expression = df.output_expression_container.copy()
+337                select_without_ctes = select_expression.copy()
+338                select_without_ctes.set("with", None)
+339                expression.set("expression", select_without_ctes)
+340                if select_expression.ctes:
+341                    expression.set("with", exp.With(expressions=select_expression.ctes))
+342            elif expression_type == exp.Select:
+343                expression = select_expression
+344            else:
+345                raise ValueError(f"Invalid expression type: {expression_type}")
+346            output_expressions.append(expression)
+347
+348        return [
+349            expression.sql(**{"dialect": dialect, **kwargs}) for expression in output_expressions
+350        ]
+
+ + + + +
+
+ +
+ + def + copy(self, **kwargs) -> sqlglot.dataframe.sql.DataFrame: + + + +
+ +
352    def copy(self, **kwargs) -> DataFrame:
+353        return DataFrame(**object_to_dict(self, **kwargs))
+
+ + + + +
+
+ +
+
@operation(Operation.SELECT)
+ + def + select(self, *cols, **kwargs) -> sqlglot.dataframe.sql.DataFrame: + + + +
+ +
355    @operation(Operation.SELECT)
+356    def select(self, *cols, **kwargs) -> DataFrame:
+357        cols = self._ensure_and_normalize_cols(cols)
+358        kwargs["append"] = kwargs.get("append", False)
+359        if self.expression.args.get("joins"):
+360            ambiguous_cols = [col for col in cols if not col.column_expression.table]
+361            if ambiguous_cols:
+362                join_table_identifiers = [
+363                    x.this for x in get_tables_from_expression_with_join(self.expression)
+364                ]
+365                cte_names_in_join = [x.this for x in join_table_identifiers]
+366                for ambiguous_col in ambiguous_cols:
+367                    ctes_with_column = [
+368                        cte
+369                        for cte in self.expression.ctes
+370                        if cte.alias_or_name in cte_names_in_join
+371                        and ambiguous_col.alias_or_name in cte.this.named_selects
+372                    ]
+373                    # If the select column does not specify a table and there is a join
+374                    # then we assume they are referring to the left table
+375                    if len(ctes_with_column) > 1:
+376                        table_identifier = self.expression.args["from"].args["expressions"][0].this
+377                    else:
+378                        table_identifier = ctes_with_column[0].args["alias"].this
+379                    ambiguous_col.expression.set("table", table_identifier)
+380        expression = self.expression.select(*[x.expression for x in cols], **kwargs)
+381        qualify_columns(expression, sqlglot.schema)
+382        return self.copy(expression=expression, **kwargs)
+
+ + + + +
+
+ +
+
@operation(Operation.NO_OP)
+ + def + alias(self, name: str, **kwargs) -> sqlglot.dataframe.sql.DataFrame: + + + +
+ +
384    @operation(Operation.NO_OP)
+385    def alias(self, name: str, **kwargs) -> DataFrame:
+386        new_sequence_id = self.spark._random_sequence_id
+387        df = self.copy()
+388        for join_hint in df.pending_join_hints:
+389            for expression in join_hint.expressions:
+390                if expression.alias_or_name == self.sequence_id:
+391                    expression.set("this", Column.ensure_col(new_sequence_id).expression)
+392        df.spark._add_alias_to_mapping(name, new_sequence_id)
+393        return df._convert_leaf_to_cte(sequence_id=new_sequence_id)
+
+ + + + +
+
+ +
+
@operation(Operation.WHERE)
+ + def + where( self, column: Union[sqlglot.dataframe.sql.Column, bool], **kwargs) -> sqlglot.dataframe.sql.DataFrame: + + + +
+ +
395    @operation(Operation.WHERE)
+396    def where(self, column: t.Union[Column, bool], **kwargs) -> DataFrame:
+397        col = self._ensure_and_normalize_col(column)
+398        return self.copy(expression=self.expression.where(col.expression))
+
+ + + + +
+
+ +
+
@operation(Operation.WHERE)
+ + def + filter( self, column: Union[sqlglot.dataframe.sql.Column, bool], **kwargs) -> sqlglot.dataframe.sql.DataFrame: + + + +
+ +
395    @operation(Operation.WHERE)
+396    def where(self, column: t.Union[Column, bool], **kwargs) -> DataFrame:
+397        col = self._ensure_and_normalize_col(column)
+398        return self.copy(expression=self.expression.where(col.expression))
+
+ + + + +
+
+ +
+
@operation(Operation.GROUP_BY)
+ + def + groupBy(self, *cols, **kwargs) -> sqlglot.dataframe.sql.GroupedData: + + + +
+ +
402    @operation(Operation.GROUP_BY)
+403    def groupBy(self, *cols, **kwargs) -> GroupedData:
+404        columns = self._ensure_and_normalize_cols(cols)
+405        return GroupedData(self, columns, self.last_op)
+
+ + + + +
+
+ +
+
@operation(Operation.SELECT)
+ + def + agg(self, *exprs, **kwargs) -> sqlglot.dataframe.sql.DataFrame: + + + +
+ +
407    @operation(Operation.SELECT)
+408    def agg(self, *exprs, **kwargs) -> DataFrame:
+409        cols = self._ensure_and_normalize_cols(exprs)
+410        return self.groupBy().agg(*cols)
+
+ + + + +
+
+ +
+
@operation(Operation.FROM)
+ + def + join( self, other_df: sqlglot.dataframe.sql.DataFrame, on: Union[str, List[str], sqlglot.dataframe.sql.Column, List[sqlglot.dataframe.sql.Column]], how: str = 'inner', **kwargs) -> sqlglot.dataframe.sql.DataFrame: + + + +
+ +
412    @operation(Operation.FROM)
+413    def join(
+414        self,
+415        other_df: DataFrame,
+416        on: t.Union[str, t.List[str], Column, t.List[Column]],
+417        how: str = "inner",
+418        **kwargs,
+419    ) -> DataFrame:
+420        other_df = other_df._convert_leaf_to_cte()
+421        pre_join_self_latest_cte_name = self.latest_cte_name
+422        columns = self._ensure_and_normalize_cols(on)
+423        join_type = how.replace("_", " ")
+424        if isinstance(columns[0].expression, exp.Column):
+425            join_columns = [
+426                Column(x).set_table_name(pre_join_self_latest_cte_name) for x in columns
+427            ]
+428            join_clause = functools.reduce(
+429                lambda x, y: x & y,
+430                [
+431                    col.copy().set_table_name(pre_join_self_latest_cte_name)
+432                    == col.copy().set_table_name(other_df.latest_cte_name)
+433                    for col in columns
+434                ],
+435            )
+436        else:
+437            if len(columns) > 1:
+438                columns = [functools.reduce(lambda x, y: x & y, columns)]
+439            join_clause = columns[0]
+440            join_columns = [
+441                Column(x).set_table_name(pre_join_self_latest_cte_name)
+442                if i % 2 == 0
+443                else Column(x).set_table_name(other_df.latest_cte_name)
+444                for i, x in enumerate(join_clause.expression.find_all(exp.Column))
+445            ]
+446        self_columns = [
+447            column.set_table_name(pre_join_self_latest_cte_name, copy=True)
+448            for column in self._get_outer_select_columns(self)
+449        ]
+450        other_columns = [
+451            column.set_table_name(other_df.latest_cte_name, copy=True)
+452            for column in self._get_outer_select_columns(other_df)
+453        ]
+454        column_value_mapping = {
+455            column.alias_or_name
+456            if not isinstance(column.expression.this, exp.Star)
+457            else column.sql(): column
+458            for column in other_columns + self_columns + join_columns
+459        }
+460        all_columns = [
+461            column_value_mapping[name]
+462            for name in {x.alias_or_name: None for x in join_columns + self_columns + other_columns}
+463        ]
+464        new_df = self.copy(
+465            expression=self.expression.join(
+466                other_df.latest_cte_name, on=join_clause.expression, join_type=join_type
+467            )
+468        )
+469        new_df.expression = new_df._add_ctes_to_expression(
+470            new_df.expression, other_df.expression.ctes
+471        )
+472        new_df.pending_hints.extend(other_df.pending_hints)
+473        new_df = new_df.select.__wrapped__(new_df, *all_columns)
+474        return new_df
+
+ + + + +
+
+ +
+
@operation(Operation.ORDER_BY)
+ + def + orderBy( self, *cols: Union[str, sqlglot.dataframe.sql.Column], ascending: Union[Any, List[Any], NoneType] = None) -> sqlglot.dataframe.sql.DataFrame: + + + +
+ +
476    @operation(Operation.ORDER_BY)
+477    def orderBy(
+478        self,
+479        *cols: t.Union[str, Column],
+480        ascending: t.Optional[t.Union[t.Any, t.List[t.Any]]] = None,
+481    ) -> DataFrame:
+482        """
+483        This implementation lets any ordered columns take priority over whatever is provided in `ascending`. Spark
+484        has irregular behavior and can result in runtime errors. Users shouldn't be mixing the two anyways so this
+485        is unlikely to come up.
+486        """
+487        columns = self._ensure_and_normalize_cols(cols)
+488        pre_ordered_col_indexes = [
+489            x
+490            for x in [
+491                i if isinstance(col.expression, exp.Ordered) else None
+492                for i, col in enumerate(columns)
+493            ]
+494            if x is not None
+495        ]
+496        if ascending is None:
+497            ascending = [True] * len(columns)
+498        elif not isinstance(ascending, list):
+499            ascending = [ascending] * len(columns)
+500        ascending = [bool(x) for i, x in enumerate(ascending)]
+501        assert len(columns) == len(
+502            ascending
+503        ), "The length of items in ascending must equal the number of columns provided"
+504        col_and_ascending = list(zip(columns, ascending))
+505        order_by_columns = [
+506            exp.Ordered(this=col.expression, desc=not asc)
+507            if i not in pre_ordered_col_indexes
+508            else columns[i].column_expression
+509            for i, (col, asc) in enumerate(col_and_ascending)
+510        ]
+511        return self.copy(expression=self.expression.order_by(*order_by_columns))
+
+ + +

This implementation lets any ordered columns take priority over whatever is provided in ascending. Spark +has irregular behavior and can result in runtime errors. Users shouldn't be mixing the two anyways so this +is unlikely to come up.

+
+ + +
+
+ +
+
@operation(Operation.ORDER_BY)
+ + def + sort( self, *cols: Union[str, sqlglot.dataframe.sql.Column], ascending: Union[Any, List[Any], NoneType] = None) -> sqlglot.dataframe.sql.DataFrame: + + + +
+ +
476    @operation(Operation.ORDER_BY)
+477    def orderBy(
+478        self,
+479        *cols: t.Union[str, Column],
+480        ascending: t.Optional[t.Union[t.Any, t.List[t.Any]]] = None,
+481    ) -> DataFrame:
+482        """
+483        This implementation lets any ordered columns take priority over whatever is provided in `ascending`. Spark
+484        has irregular behavior and can result in runtime errors. Users shouldn't be mixing the two anyways so this
+485        is unlikely to come up.
+486        """
+487        columns = self._ensure_and_normalize_cols(cols)
+488        pre_ordered_col_indexes = [
+489            x
+490            for x in [
+491                i if isinstance(col.expression, exp.Ordered) else None
+492                for i, col in enumerate(columns)
+493            ]
+494            if x is not None
+495        ]
+496        if ascending is None:
+497            ascending = [True] * len(columns)
+498        elif not isinstance(ascending, list):
+499            ascending = [ascending] * len(columns)
+500        ascending = [bool(x) for i, x in enumerate(ascending)]
+501        assert len(columns) == len(
+502            ascending
+503        ), "The length of items in ascending must equal the number of columns provided"
+504        col_and_ascending = list(zip(columns, ascending))
+505        order_by_columns = [
+506            exp.Ordered(this=col.expression, desc=not asc)
+507            if i not in pre_ordered_col_indexes
+508            else columns[i].column_expression
+509            for i, (col, asc) in enumerate(col_and_ascending)
+510        ]
+511        return self.copy(expression=self.expression.order_by(*order_by_columns))
+
+ + +

This implementation lets any ordered columns take priority over whatever is provided in ascending. Spark +has irregular behavior and can result in runtime errors. Users shouldn't be mixing the two anyways so this +is unlikely to come up.

+
+ + +
+
+ +
+
@operation(Operation.FROM)
+ + def + union( self, other: sqlglot.dataframe.sql.DataFrame) -> sqlglot.dataframe.sql.DataFrame: + + + +
+ +
515    @operation(Operation.FROM)
+516    def union(self, other: DataFrame) -> DataFrame:
+517        return self._set_operation(exp.Union, other, False)
+
+ + + + +
+
+ +
+
@operation(Operation.FROM)
+ + def + unionAll( self, other: sqlglot.dataframe.sql.DataFrame) -> sqlglot.dataframe.sql.DataFrame: + + + +
+ +
515    @operation(Operation.FROM)
+516    def union(self, other: DataFrame) -> DataFrame:
+517        return self._set_operation(exp.Union, other, False)
+
+ + + + +
+
+ +
+
@operation(Operation.FROM)
+ + def + unionByName( self, other: sqlglot.dataframe.sql.DataFrame, allowMissingColumns: bool = False): + + + +
+ +
521    @operation(Operation.FROM)
+522    def unionByName(self, other: DataFrame, allowMissingColumns: bool = False):
+523        l_columns = self.columns
+524        r_columns = other.columns
+525        if not allowMissingColumns:
+526            l_expressions = l_columns
+527            r_expressions = l_columns
+528        else:
+529            l_expressions = []
+530            r_expressions = []
+531            r_columns_unused = copy(r_columns)
+532            for l_column in l_columns:
+533                l_expressions.append(l_column)
+534                if l_column in r_columns:
+535                    r_expressions.append(l_column)
+536                    r_columns_unused.remove(l_column)
+537                else:
+538                    r_expressions.append(exp.alias_(exp.Null(), l_column))
+539            for r_column in r_columns_unused:
+540                l_expressions.append(exp.alias_(exp.Null(), r_column))
+541                r_expressions.append(r_column)
+542        r_df = (
+543            other.copy()._convert_leaf_to_cte().select(*self._ensure_list_of_columns(r_expressions))
+544        )
+545        l_df = self.copy()
+546        if allowMissingColumns:
+547            l_df = l_df._convert_leaf_to_cte().select(*self._ensure_list_of_columns(l_expressions))
+548        return l_df._set_operation(exp.Union, r_df, False)
+
+ + + + +
+
+ +
+
@operation(Operation.FROM)
+ + def + intersect( self, other: sqlglot.dataframe.sql.DataFrame) -> sqlglot.dataframe.sql.DataFrame: + + + +
+ +
550    @operation(Operation.FROM)
+551    def intersect(self, other: DataFrame) -> DataFrame:
+552        return self._set_operation(exp.Intersect, other, True)
+
+ + + + +
+
+ +
+
@operation(Operation.FROM)
+ + def + intersectAll( self, other: sqlglot.dataframe.sql.DataFrame) -> sqlglot.dataframe.sql.DataFrame: + + + +
+ +
554    @operation(Operation.FROM)
+555    def intersectAll(self, other: DataFrame) -> DataFrame:
+556        return self._set_operation(exp.Intersect, other, False)
+
+ + + + +
+
+ +
+
@operation(Operation.FROM)
+ + def + exceptAll( self, other: sqlglot.dataframe.sql.DataFrame) -> sqlglot.dataframe.sql.DataFrame: + + + +
+ +
558    @operation(Operation.FROM)
+559    def exceptAll(self, other: DataFrame) -> DataFrame:
+560        return self._set_operation(exp.Except, other, False)
+
+ + + + +
+
+ +
+
@operation(Operation.SELECT)
+ + def + distinct(self) -> sqlglot.dataframe.sql.DataFrame: + + + +
+ +
562    @operation(Operation.SELECT)
+563    def distinct(self) -> DataFrame:
+564        return self.copy(expression=self.expression.distinct())
+
+ + + + +
+
+ +
+
@operation(Operation.SELECT)
+ + def + dropDuplicates(self, subset: Optional[List[str]] = None): + + + +
+ +
566    @operation(Operation.SELECT)
+567    def dropDuplicates(self, subset: t.Optional[t.List[str]] = None):
+568        if not subset:
+569            return self.distinct()
+570        column_names = ensure_list(subset)
+571        window = Window.partitionBy(*column_names).orderBy(*column_names)
+572        return (
+573            self.copy()
+574            .withColumn("row_num", F.row_number().over(window))
+575            .where(F.col("row_num") == F.lit(1))
+576            .drop("row_num")
+577        )
+
+ + + + +
+
+ +
+
@operation(Operation.FROM)
+ + def + dropna( self, how: str = 'any', thresh: Optional[int] = None, subset: Union[str, Tuple[str, ...], List[str], NoneType] = None) -> sqlglot.dataframe.sql.DataFrame: + + + +
+ +
579    @operation(Operation.FROM)
+580    def dropna(
+581        self,
+582        how: str = "any",
+583        thresh: t.Optional[int] = None,
+584        subset: t.Optional[t.Union[str, t.Tuple[str, ...], t.List[str]]] = None,
+585    ) -> DataFrame:
+586        minimum_non_null = thresh or 0  # will be determined later if thresh is null
+587        new_df = self.copy()
+588        all_columns = self._get_outer_select_columns(new_df.expression)
+589        if subset:
+590            null_check_columns = self._ensure_and_normalize_cols(subset)
+591        else:
+592            null_check_columns = all_columns
+593        if thresh is None:
+594            minimum_num_nulls = 1 if how == "any" else len(null_check_columns)
+595        else:
+596            minimum_num_nulls = len(null_check_columns) - minimum_non_null + 1
+597        if minimum_num_nulls > len(null_check_columns):
+598            raise RuntimeError(
+599                f"The minimum num nulls for dropna must be less than or equal to the number of columns. "
+600                f"Minimum num nulls: {minimum_num_nulls}, Num Columns: {len(null_check_columns)}"
+601            )
+602        if_null_checks = [
+603            F.when(column.isNull(), F.lit(1)).otherwise(F.lit(0)) for column in null_check_columns
+604        ]
+605        nulls_added_together = functools.reduce(lambda x, y: x + y, if_null_checks)
+606        num_nulls = nulls_added_together.alias("num_nulls")
+607        new_df = new_df.select(num_nulls, append=True)
+608        filtered_df = new_df.where(F.col("num_nulls") < F.lit(minimum_num_nulls))
+609        final_df = filtered_df.select(*all_columns)
+610        return final_df
+
+ + + + +
+
+ +
+
@operation(Operation.FROM)
+ + def + fillna( self, value: <MagicMock id='140700331804992'>, subset: Union[str, Tuple[str, ...], List[str], NoneType] = None) -> sqlglot.dataframe.sql.DataFrame: + + + +
+ +
612    @operation(Operation.FROM)
+613    def fillna(
+614        self,
+615        value: t.Union[ColumnLiterals],
+616        subset: t.Optional[t.Union[str, t.Tuple[str, ...], t.List[str]]] = None,
+617    ) -> DataFrame:
+618        """
+619        Functionality Difference: If you provide a value to replace a null and that type conflicts
+620        with the type of the column then PySpark will just ignore your replacement.
+621        This will try to cast them to be the same in some cases. So they won't always match.
+622        Best to not mix types so make sure replacement is the same type as the column
+623
+624        Possibility for improvement: Use `typeof` function to get the type of the column
+625        and check if it matches the type of the value provided. If not then make it null.
+626        """
+627        from sqlglot.dataframe.sql.functions import lit
+628
+629        values = None
+630        columns = None
+631        new_df = self.copy()
+632        all_columns = self._get_outer_select_columns(new_df.expression)
+633        all_column_mapping = {column.alias_or_name: column for column in all_columns}
+634        if isinstance(value, dict):
+635            values = list(value.values())
+636            columns = self._ensure_and_normalize_cols(list(value))
+637        if not columns:
+638            columns = self._ensure_and_normalize_cols(subset) if subset else all_columns
+639        if not values:
+640            values = [value] * len(columns)
+641        value_columns = [lit(value) for value in values]
+642
+643        null_replacement_mapping = {
+644            column.alias_or_name: (
+645                F.when(column.isNull(), value).otherwise(column).alias(column.alias_or_name)
+646            )
+647            for column, value in zip(columns, value_columns)
+648        }
+649        null_replacement_mapping = {**all_column_mapping, **null_replacement_mapping}
+650        null_replacement_columns = [
+651            null_replacement_mapping[column.alias_or_name] for column in all_columns
+652        ]
+653        new_df = new_df.select(*null_replacement_columns)
+654        return new_df
+
+ + +

Functionality Difference: If you provide a value to replace a null and that type conflicts +with the type of the column then PySpark will just ignore your replacement. +This will try to cast them to be the same in some cases. So they won't always match. +Best to not mix types so make sure replacement is the same type as the column

+ +

Possibility for improvement: Use typeof function to get the type of the column +and check if it matches the type of the value provided. If not then make it null.

+
+ + +
+
+ +
+
@operation(Operation.FROM)
+ + def + replace( self, to_replace: Union[bool, int, float, str, List, Dict], value: Union[bool, int, float, str, List, NoneType] = None, subset: Union[Collection[<MagicMock id='140700331990208'>], <MagicMock id='140700331990208'>, NoneType] = None) -> sqlglot.dataframe.sql.DataFrame: + + + +
+ +
656    @operation(Operation.FROM)
+657    def replace(
+658        self,
+659        to_replace: t.Union[bool, int, float, str, t.List, t.Dict],
+660        value: t.Optional[t.Union[bool, int, float, str, t.List]] = None,
+661        subset: t.Optional[t.Collection[ColumnOrName] | ColumnOrName] = None,
+662    ) -> DataFrame:
+663        from sqlglot.dataframe.sql.functions import lit
+664
+665        old_values = None
+666        new_df = self.copy()
+667        all_columns = self._get_outer_select_columns(new_df.expression)
+668        all_column_mapping = {column.alias_or_name: column for column in all_columns}
+669
+670        columns = self._ensure_and_normalize_cols(subset) if subset else all_columns
+671        if isinstance(to_replace, dict):
+672            old_values = list(to_replace)
+673            new_values = list(to_replace.values())
+674        elif not old_values and isinstance(to_replace, list):
+675            assert isinstance(value, list), "value must be a list since the replacements are a list"
+676            assert len(to_replace) == len(
+677                value
+678            ), "the replacements and values must be the same length"
+679            old_values = to_replace
+680            new_values = value
+681        else:
+682            old_values = [to_replace] * len(columns)
+683            new_values = [value] * len(columns)
+684        old_values = [lit(value) for value in old_values]
+685        new_values = [lit(value) for value in new_values]
+686
+687        replacement_mapping = {}
+688        for column in columns:
+689            expression = Column(None)
+690            for i, (old_value, new_value) in enumerate(zip(old_values, new_values)):
+691                if i == 0:
+692                    expression = F.when(column == old_value, new_value)
+693                else:
+694                    expression = expression.when(column == old_value, new_value)  # type: ignore
+695            replacement_mapping[column.alias_or_name] = expression.otherwise(column).alias(
+696                column.expression.alias_or_name
+697            )
+698
+699        replacement_mapping = {**all_column_mapping, **replacement_mapping}
+700        replacement_columns = [replacement_mapping[column.alias_or_name] for column in all_columns]
+701        new_df = new_df.select(*replacement_columns)
+702        return new_df
+
+ + + + +
+
+ +
+
@operation(Operation.SELECT)
+ + def + withColumn( self, colName: str, col: sqlglot.dataframe.sql.Column) -> sqlglot.dataframe.sql.DataFrame: + + + +
+ +
704    @operation(Operation.SELECT)
+705    def withColumn(self, colName: str, col: Column) -> DataFrame:
+706        col = self._ensure_and_normalize_col(col)
+707        existing_col_names = self.expression.named_selects
+708        existing_col_index = (
+709            existing_col_names.index(colName) if colName in existing_col_names else None
+710        )
+711        if existing_col_index:
+712            expression = self.expression.copy()
+713            expression.expressions[existing_col_index] = col.expression
+714            return self.copy(expression=expression)
+715        return self.copy().select(col.alias(colName), append=True)
+
+ + + + +
+
+ +
+
@operation(Operation.SELECT)
+ + def + withColumnRenamed(self, existing: str, new: str): + + + +
+ +
717    @operation(Operation.SELECT)
+718    def withColumnRenamed(self, existing: str, new: str):
+719        expression = self.expression.copy()
+720        existing_columns = [
+721            expression
+722            for expression in expression.expressions
+723            if expression.alias_or_name == existing
+724        ]
+725        if not existing_columns:
+726            raise ValueError("Tried to rename a column that doesn't exist")
+727        for existing_column in existing_columns:
+728            if isinstance(existing_column, exp.Column):
+729                existing_column.replace(exp.alias_(existing_column.copy(), new))
+730            else:
+731                existing_column.set("alias", exp.to_identifier(new))
+732        return self.copy(expression=expression)
+
+ + + + +
+
+ +
+
@operation(Operation.SELECT)
+ + def + drop( self, *cols: Union[str, sqlglot.dataframe.sql.Column]) -> sqlglot.dataframe.sql.DataFrame: + + + +
+ +
734    @operation(Operation.SELECT)
+735    def drop(self, *cols: t.Union[str, Column]) -> DataFrame:
+736        all_columns = self._get_outer_select_columns(self.expression)
+737        drop_cols = self._ensure_and_normalize_cols(cols)
+738        new_columns = [
+739            col
+740            for col in all_columns
+741            if col.alias_or_name not in [drop_column.alias_or_name for drop_column in drop_cols]
+742        ]
+743        return self.copy().select(*new_columns, append=False)
+
+ + + + +
+
+ +
+
@operation(Operation.LIMIT)
+ + def + limit(self, num: int) -> sqlglot.dataframe.sql.DataFrame: + + + +
+ +
745    @operation(Operation.LIMIT)
+746    def limit(self, num: int) -> DataFrame:
+747        return self.copy(expression=self.expression.limit(num))
+
+ + + + +
+
+ +
+
@operation(Operation.NO_OP)
+ + def + hint( self, name: str, *parameters: Union[str, int, NoneType]) -> sqlglot.dataframe.sql.DataFrame: + + + +
+ +
749    @operation(Operation.NO_OP)
+750    def hint(self, name: str, *parameters: t.Optional[t.Union[str, int]]) -> DataFrame:
+751        parameter_list = ensure_list(parameters)
+752        parameter_columns = (
+753            self._ensure_list_of_columns(parameter_list)
+754            if parameters
+755            else Column.ensure_cols([self.sequence_id])
+756        )
+757        return self._hint(name, parameter_columns)
+
+ + + + +
+
+ +
+
@operation(Operation.NO_OP)
+ + def + repartition( self, numPartitions: Union[int, <MagicMock id='140700332136032'>], *cols: <MagicMock id='140700332245248'>) -> sqlglot.dataframe.sql.DataFrame: + + + +
+ +
759    @operation(Operation.NO_OP)
+760    def repartition(
+761        self, numPartitions: t.Union[int, ColumnOrName], *cols: ColumnOrName
+762    ) -> DataFrame:
+763        num_partition_cols = self._ensure_list_of_columns(numPartitions)
+764        columns = self._ensure_and_normalize_cols(cols)
+765        args = num_partition_cols + columns
+766        return self._hint("repartition", args)
+
+ + + + +
+
+ +
+
@operation(Operation.NO_OP)
+ + def + coalesce(self, numPartitions: int) -> sqlglot.dataframe.sql.DataFrame: + + + +
+ +
768    @operation(Operation.NO_OP)
+769    def coalesce(self, numPartitions: int) -> DataFrame:
+770        num_partitions = Column.ensure_cols([numPartitions])
+771        return self._hint("coalesce", num_partitions)
+
+ + + + +
+
+ +
+
@operation(Operation.NO_OP)
+ + def + cache(self) -> sqlglot.dataframe.sql.DataFrame: + + + +
+ +
773    @operation(Operation.NO_OP)
+774    def cache(self) -> DataFrame:
+775        return self._cache(storage_level="MEMORY_AND_DISK")
+
+ + + + +
+
+ +
+
@operation(Operation.NO_OP)
+ + def + persist( self, storageLevel: str = 'MEMORY_AND_DISK_SER') -> sqlglot.dataframe.sql.DataFrame: + + + +
+ +
777    @operation(Operation.NO_OP)
+778    def persist(self, storageLevel: str = "MEMORY_AND_DISK_SER") -> DataFrame:
+779        """
+780        Storage Level Options: https://spark.apache.org/docs/3.0.0-preview/sql-ref-syntax-aux-cache-cache-table.html
+781        """
+782        return self._cache(storageLevel)
+
+ + + + + +
+
+
+ +
+ + class + GroupedData: + + + +
+ +
14class GroupedData:
+15    def __init__(self, df: DataFrame, group_by_cols: t.List[Column], last_op: Operation):
+16        self._df = df.copy()
+17        self.spark = df.spark
+18        self.last_op = last_op
+19        self.group_by_cols = group_by_cols
+20
+21    def _get_function_applied_columns(
+22        self, func_name: str, cols: t.Tuple[str, ...]
+23    ) -> t.List[Column]:
+24        func_name = func_name.lower()
+25        return [getattr(F, func_name)(name).alias(f"{func_name}({name})") for name in cols]
+26
+27    @operation(Operation.SELECT)
+28    def agg(self, *exprs: t.Union[Column, t.Dict[str, str]]) -> DataFrame:
+29        columns = (
+30            [Column(f"{agg_func}({column_name})") for column_name, agg_func in exprs[0].items()]
+31            if isinstance(exprs[0], dict)
+32            else exprs
+33        )
+34        cols = self._df._ensure_and_normalize_cols(columns)
+35
+36        expression = self._df.expression.group_by(
+37            *[x.expression for x in self.group_by_cols]
+38        ).select(*[x.expression for x in self.group_by_cols + cols], append=False)
+39        return self._df.copy(expression=expression)
+40
+41    def count(self) -> DataFrame:
+42        return self.agg(F.count("*").alias("count"))
+43
+44    def mean(self, *cols: str) -> DataFrame:
+45        return self.avg(*cols)
+46
+47    def avg(self, *cols: str) -> DataFrame:
+48        return self.agg(*self._get_function_applied_columns("avg", cols))
+49
+50    def max(self, *cols: str) -> DataFrame:
+51        return self.agg(*self._get_function_applied_columns("max", cols))
+52
+53    def min(self, *cols: str) -> DataFrame:
+54        return self.agg(*self._get_function_applied_columns("min", cols))
+55
+56    def sum(self, *cols: str) -> DataFrame:
+57        return self.agg(*self._get_function_applied_columns("sum", cols))
+58
+59    def pivot(self, *cols: str) -> DataFrame:
+60        raise NotImplementedError("Sum distinct is not currently implemented")
+
+ + + + +
+ +
+ + GroupedData( df: sqlglot.dataframe.sql.DataFrame, group_by_cols: List[sqlglot.dataframe.sql.Column], last_op: sqlglot.dataframe.sql.operations.Operation) + + + +
+ +
15    def __init__(self, df: DataFrame, group_by_cols: t.List[Column], last_op: Operation):
+16        self._df = df.copy()
+17        self.spark = df.spark
+18        self.last_op = last_op
+19        self.group_by_cols = group_by_cols
+
+ + + + +
+
+ +
+
@operation(Operation.SELECT)
+ + def + agg( self, *exprs: Union[sqlglot.dataframe.sql.Column, Dict[str, str]]) -> sqlglot.dataframe.sql.DataFrame: + + + +
+ +
27    @operation(Operation.SELECT)
+28    def agg(self, *exprs: t.Union[Column, t.Dict[str, str]]) -> DataFrame:
+29        columns = (
+30            [Column(f"{agg_func}({column_name})") for column_name, agg_func in exprs[0].items()]
+31            if isinstance(exprs[0], dict)
+32            else exprs
+33        )
+34        cols = self._df._ensure_and_normalize_cols(columns)
+35
+36        expression = self._df.expression.group_by(
+37            *[x.expression for x in self.group_by_cols]
+38        ).select(*[x.expression for x in self.group_by_cols + cols], append=False)
+39        return self._df.copy(expression=expression)
+
+ + + + +
+
+ +
+ + def + count(self) -> sqlglot.dataframe.sql.DataFrame: + + + +
+ +
41    def count(self) -> DataFrame:
+42        return self.agg(F.count("*").alias("count"))
+
+ + + + +
+
+ +
+ + def + mean(self, *cols: str) -> sqlglot.dataframe.sql.DataFrame: + + + +
+ +
44    def mean(self, *cols: str) -> DataFrame:
+45        return self.avg(*cols)
+
+ + + + +
+
+ +
+ + def + avg(self, *cols: str) -> sqlglot.dataframe.sql.DataFrame: + + + +
+ +
47    def avg(self, *cols: str) -> DataFrame:
+48        return self.agg(*self._get_function_applied_columns("avg", cols))
+
+ + + + +
+
+ +
+ + def + max(self, *cols: str) -> sqlglot.dataframe.sql.DataFrame: + + + +
+ +
50    def max(self, *cols: str) -> DataFrame:
+51        return self.agg(*self._get_function_applied_columns("max", cols))
+
+ + + + +
+
+ +
+ + def + min(self, *cols: str) -> sqlglot.dataframe.sql.DataFrame: + + + +
+ +
53    def min(self, *cols: str) -> DataFrame:
+54        return self.agg(*self._get_function_applied_columns("min", cols))
+
+ + + + +
+
+ +
+ + def + sum(self, *cols: str) -> sqlglot.dataframe.sql.DataFrame: + + + +
+ +
56    def sum(self, *cols: str) -> DataFrame:
+57        return self.agg(*self._get_function_applied_columns("sum", cols))
+
+ + + + +
+
+ +
+ + def + pivot(self, *cols: str) -> sqlglot.dataframe.sql.DataFrame: + + + +
+ +
59    def pivot(self, *cols: str) -> DataFrame:
+60        raise NotImplementedError("Sum distinct is not currently implemented")
+
+ + + + +
+
+
+ +
+ + class + Column: + + + +
+ +
 16class Column:
+ 17    def __init__(self, expression: t.Optional[t.Union[ColumnOrLiteral, exp.Expression]]):
+ 18        if isinstance(expression, Column):
+ 19            expression = expression.expression  # type: ignore
+ 20        elif expression is None or not isinstance(expression, (str, exp.Expression)):
+ 21            expression = self._lit(expression).expression  # type: ignore
+ 22
+ 23        expression = sqlglot.maybe_parse(expression, dialect="spark")
+ 24        if expression is None:
+ 25            raise ValueError(f"Could not parse {expression}")
+ 26        self.expression: exp.Expression = expression
+ 27
+ 28    def __repr__(self):
+ 29        return repr(self.expression)
+ 30
+ 31    def __hash__(self):
+ 32        return hash(self.expression)
+ 33
+ 34    def __eq__(self, other: ColumnOrLiteral) -> Column:  # type: ignore
+ 35        return self.binary_op(exp.EQ, other)
+ 36
+ 37    def __ne__(self, other: ColumnOrLiteral) -> Column:  # type: ignore
+ 38        return self.binary_op(exp.NEQ, other)
+ 39
+ 40    def __gt__(self, other: ColumnOrLiteral) -> Column:
+ 41        return self.binary_op(exp.GT, other)
+ 42
+ 43    def __ge__(self, other: ColumnOrLiteral) -> Column:
+ 44        return self.binary_op(exp.GTE, other)
+ 45
+ 46    def __lt__(self, other: ColumnOrLiteral) -> Column:
+ 47        return self.binary_op(exp.LT, other)
+ 48
+ 49    def __le__(self, other: ColumnOrLiteral) -> Column:
+ 50        return self.binary_op(exp.LTE, other)
+ 51
+ 52    def __and__(self, other: ColumnOrLiteral) -> Column:
+ 53        return self.binary_op(exp.And, other)
+ 54
+ 55    def __or__(self, other: ColumnOrLiteral) -> Column:
+ 56        return self.binary_op(exp.Or, other)
+ 57
+ 58    def __mod__(self, other: ColumnOrLiteral) -> Column:
+ 59        return self.binary_op(exp.Mod, other)
+ 60
+ 61    def __add__(self, other: ColumnOrLiteral) -> Column:
+ 62        return self.binary_op(exp.Add, other)
+ 63
+ 64    def __sub__(self, other: ColumnOrLiteral) -> Column:
+ 65        return self.binary_op(exp.Sub, other)
+ 66
+ 67    def __mul__(self, other: ColumnOrLiteral) -> Column:
+ 68        return self.binary_op(exp.Mul, other)
+ 69
+ 70    def __truediv__(self, other: ColumnOrLiteral) -> Column:
+ 71        return self.binary_op(exp.Div, other)
+ 72
+ 73    def __div__(self, other: ColumnOrLiteral) -> Column:
+ 74        return self.binary_op(exp.Div, other)
+ 75
+ 76    def __neg__(self) -> Column:
+ 77        return self.unary_op(exp.Neg)
+ 78
+ 79    def __radd__(self, other: ColumnOrLiteral) -> Column:
+ 80        return self.inverse_binary_op(exp.Add, other)
+ 81
+ 82    def __rsub__(self, other: ColumnOrLiteral) -> Column:
+ 83        return self.inverse_binary_op(exp.Sub, other)
+ 84
+ 85    def __rmul__(self, other: ColumnOrLiteral) -> Column:
+ 86        return self.inverse_binary_op(exp.Mul, other)
+ 87
+ 88    def __rdiv__(self, other: ColumnOrLiteral) -> Column:
+ 89        return self.inverse_binary_op(exp.Div, other)
+ 90
+ 91    def __rtruediv__(self, other: ColumnOrLiteral) -> Column:
+ 92        return self.inverse_binary_op(exp.Div, other)
+ 93
+ 94    def __rmod__(self, other: ColumnOrLiteral) -> Column:
+ 95        return self.inverse_binary_op(exp.Mod, other)
+ 96
+ 97    def __pow__(self, power: ColumnOrLiteral, modulo=None):
+ 98        return Column(exp.Pow(this=self.expression, expression=Column(power).expression))
+ 99
+100    def __rpow__(self, power: ColumnOrLiteral):
+101        return Column(exp.Pow(this=Column(power).expression, expression=self.expression))
+102
+103    def __invert__(self):
+104        return self.unary_op(exp.Not)
+105
+106    def __rand__(self, other: ColumnOrLiteral) -> Column:
+107        return self.inverse_binary_op(exp.And, other)
+108
+109    def __ror__(self, other: ColumnOrLiteral) -> Column:
+110        return self.inverse_binary_op(exp.Or, other)
+111
+112    @classmethod
+113    def ensure_col(cls, value: t.Optional[t.Union[ColumnOrLiteral, exp.Expression]]):
+114        return cls(value)
+115
+116    @classmethod
+117    def ensure_cols(cls, args: t.List[t.Union[ColumnOrLiteral, exp.Expression]]) -> t.List[Column]:
+118        return [cls.ensure_col(x) if not isinstance(x, Column) else x for x in args]
+119
+120    @classmethod
+121    def _lit(cls, value: ColumnOrLiteral) -> Column:
+122        if isinstance(value, dict):
+123            columns = [cls._lit(v).alias(k).expression for k, v in value.items()]
+124            return cls(exp.Struct(expressions=columns))
+125        return cls(exp.convert(value))
+126
+127    @classmethod
+128    def invoke_anonymous_function(
+129        cls, column: t.Optional[ColumnOrLiteral], func_name: str, *args: t.Optional[ColumnOrLiteral]
+130    ) -> Column:
+131        columns = [] if column is None else [cls.ensure_col(column)]
+132        column_args = [cls.ensure_col(arg) for arg in args]
+133        expressions = [x.expression for x in columns + column_args]
+134        new_expression = exp.Anonymous(this=func_name.upper(), expressions=expressions)
+135        return Column(new_expression)
+136
+137    @classmethod
+138    def invoke_expression_over_column(
+139        cls, column: t.Optional[ColumnOrLiteral], callable_expression: t.Callable, **kwargs
+140    ) -> Column:
+141        ensured_column = None if column is None else cls.ensure_col(column)
+142        ensure_expression_values = {
+143            k: [Column.ensure_col(x).expression for x in v]
+144            if is_iterable(v)
+145            else Column.ensure_col(v).expression
+146            for k, v in kwargs.items()
+147        }
+148        new_expression = (
+149            callable_expression(**ensure_expression_values)
+150            if ensured_column is None
+151            else callable_expression(
+152                this=ensured_column.column_expression, **ensure_expression_values
+153            )
+154        )
+155        return Column(new_expression)
+156
+157    def binary_op(self, klass: t.Callable, other: ColumnOrLiteral, **kwargs) -> Column:
+158        return Column(
+159            klass(this=self.column_expression, expression=Column(other).column_expression, **kwargs)
+160        )
+161
+162    def inverse_binary_op(self, klass: t.Callable, other: ColumnOrLiteral, **kwargs) -> Column:
+163        return Column(
+164            klass(this=Column(other).column_expression, expression=self.column_expression, **kwargs)
+165        )
+166
+167    def unary_op(self, klass: t.Callable, **kwargs) -> Column:
+168        return Column(klass(this=self.column_expression, **kwargs))
+169
+170    @property
+171    def is_alias(self):
+172        return isinstance(self.expression, exp.Alias)
+173
+174    @property
+175    def is_column(self):
+176        return isinstance(self.expression, exp.Column)
+177
+178    @property
+179    def column_expression(self) -> exp.Column:
+180        return self.expression.unalias()
+181
+182    @property
+183    def alias_or_name(self) -> str:
+184        return self.expression.alias_or_name
+185
+186    @classmethod
+187    def ensure_literal(cls, value) -> Column:
+188        from sqlglot.dataframe.sql.functions import lit
+189
+190        if isinstance(value, cls):
+191            value = value.expression
+192        if not isinstance(value, exp.Literal):
+193            return lit(value)
+194        return Column(value)
+195
+196    def copy(self) -> Column:
+197        return Column(self.expression.copy())
+198
+199    def set_table_name(self, table_name: str, copy=False) -> Column:
+200        expression = self.expression.copy() if copy else self.expression
+201        expression.set("table", exp.to_identifier(table_name))
+202        return Column(expression)
+203
+204    def sql(self, **kwargs) -> str:
+205        return self.expression.sql(**{"dialect": "spark", **kwargs})
+206
+207    def alias(self, name: str) -> Column:
+208        new_expression = exp.alias_(self.column_expression, name)
+209        return Column(new_expression)
+210
+211    def asc(self) -> Column:
+212        new_expression = exp.Ordered(this=self.column_expression, desc=False, nulls_first=True)
+213        return Column(new_expression)
+214
+215    def desc(self) -> Column:
+216        new_expression = exp.Ordered(this=self.column_expression, desc=True, nulls_first=False)
+217        return Column(new_expression)
+218
+219    asc_nulls_first = asc
+220
+221    def asc_nulls_last(self) -> Column:
+222        new_expression = exp.Ordered(this=self.column_expression, desc=False, nulls_first=False)
+223        return Column(new_expression)
+224
+225    def desc_nulls_first(self) -> Column:
+226        new_expression = exp.Ordered(this=self.column_expression, desc=True, nulls_first=True)
+227        return Column(new_expression)
+228
+229    desc_nulls_last = desc
+230
+231    def when(self, condition: Column, value: t.Any) -> Column:
+232        from sqlglot.dataframe.sql.functions import when
+233
+234        column_with_if = when(condition, value)
+235        if not isinstance(self.expression, exp.Case):
+236            return column_with_if
+237        new_column = self.copy()
+238        new_column.expression.args["ifs"].extend(column_with_if.expression.args["ifs"])
+239        return new_column
+240
+241    def otherwise(self, value: t.Any) -> Column:
+242        from sqlglot.dataframe.sql.functions import lit
+243
+244        true_value = value if isinstance(value, Column) else lit(value)
+245        new_column = self.copy()
+246        new_column.expression.set("default", true_value.column_expression)
+247        return new_column
+248
+249    def isNull(self) -> Column:
+250        new_expression = exp.Is(this=self.column_expression, expression=exp.Null())
+251        return Column(new_expression)
+252
+253    def isNotNull(self) -> Column:
+254        new_expression = exp.Not(this=exp.Is(this=self.column_expression, expression=exp.Null()))
+255        return Column(new_expression)
+256
+257    def cast(self, dataType: t.Union[str, DataType]):
+258        """
+259        Functionality Difference: PySpark cast accepts a datatype instance of the datatype class
+260        Sqlglot doesn't currently replicate this class so it only accepts a string
+261        """
+262        if isinstance(dataType, DataType):
+263            dataType = dataType.simpleString()
+264        return Column(exp.cast(self.column_expression, dataType, dialect="spark"))
+265
+266    def startswith(self, value: t.Union[str, Column]) -> Column:
+267        value = self._lit(value) if not isinstance(value, Column) else value
+268        return self.invoke_anonymous_function(self, "STARTSWITH", value)
+269
+270    def endswith(self, value: t.Union[str, Column]) -> Column:
+271        value = self._lit(value) if not isinstance(value, Column) else value
+272        return self.invoke_anonymous_function(self, "ENDSWITH", value)
+273
+274    def rlike(self, regexp: str) -> Column:
+275        return self.invoke_expression_over_column(
+276            column=self, callable_expression=exp.RegexpLike, expression=self._lit(regexp).expression
+277        )
+278
+279    def like(self, other: str):
+280        return self.invoke_expression_over_column(
+281            self, exp.Like, expression=self._lit(other).expression
+282        )
+283
+284    def ilike(self, other: str):
+285        return self.invoke_expression_over_column(
+286            self, exp.ILike, expression=self._lit(other).expression
+287        )
+288
+289    def substr(self, startPos: t.Union[int, Column], length: t.Union[int, Column]) -> Column:
+290        startPos = self._lit(startPos) if not isinstance(startPos, Column) else startPos
+291        length = self._lit(length) if not isinstance(length, Column) else length
+292        return Column.invoke_expression_over_column(
+293            self, exp.Substring, start=startPos.expression, length=length.expression
+294        )
+295
+296    def isin(self, *cols: t.Union[ColumnOrLiteral, t.Iterable[ColumnOrLiteral]]):
+297        columns = flatten(cols) if isinstance(cols[0], (list, set, tuple)) else cols  # type: ignore
+298        expressions = [self._lit(x).expression for x in columns]
+299        return Column.invoke_expression_over_column(self, exp.In, expressions=expressions)  # type: ignore
+300
+301    def between(
+302        self,
+303        lowerBound: t.Union[ColumnOrLiteral],
+304        upperBound: t.Union[ColumnOrLiteral],
+305    ) -> Column:
+306        lower_bound_exp = (
+307            self._lit(lowerBound) if not isinstance(lowerBound, Column) else lowerBound
+308        )
+309        upper_bound_exp = (
+310            self._lit(upperBound) if not isinstance(upperBound, Column) else upperBound
+311        )
+312        return Column(
+313            exp.Between(
+314                this=self.column_expression,
+315                low=lower_bound_exp.expression,
+316                high=upper_bound_exp.expression,
+317            )
+318        )
+319
+320    def over(self, window: WindowSpec) -> Column:
+321        window_expression = window.expression.copy()
+322        window_expression.set("this", self.column_expression)
+323        return Column(window_expression)
+
+ + + + +
+ +
+ + Column( expression: Union[<MagicMock id='140700332259696'>, sqlglot.expressions.Expression, NoneType]) + + + +
+ +
17    def __init__(self, expression: t.Optional[t.Union[ColumnOrLiteral, exp.Expression]]):
+18        if isinstance(expression, Column):
+19            expression = expression.expression  # type: ignore
+20        elif expression is None or not isinstance(expression, (str, exp.Expression)):
+21            expression = self._lit(expression).expression  # type: ignore
+22
+23        expression = sqlglot.maybe_parse(expression, dialect="spark")
+24        if expression is None:
+25            raise ValueError(f"Could not parse {expression}")
+26        self.expression: exp.Expression = expression
+
+ + + + +
+
+ +
+
@classmethod
+ + def + ensure_col( cls, value: Union[<MagicMock id='140700330611696'>, sqlglot.expressions.Expression, NoneType]): + + + +
+ +
112    @classmethod
+113    def ensure_col(cls, value: t.Optional[t.Union[ColumnOrLiteral, exp.Expression]]):
+114        return cls(value)
+
+ + + + +
+
+ +
+
@classmethod
+ + def + ensure_cols( cls, args: List[Union[<MagicMock id='140700330840736'>, sqlglot.expressions.Expression]]) -> List[sqlglot.dataframe.sql.Column]: + + + +
+ +
116    @classmethod
+117    def ensure_cols(cls, args: t.List[t.Union[ColumnOrLiteral, exp.Expression]]) -> t.List[Column]:
+118        return [cls.ensure_col(x) if not isinstance(x, Column) else x for x in args]
+
+ + + + +
+
+ +
+
@classmethod
+ + def + invoke_anonymous_function( cls, column: Optional[<MagicMock id='140700330924096'>], func_name: str, *args: Optional[<MagicMock id='140700330964112'>]) -> sqlglot.dataframe.sql.Column: + + + +
+ +
127    @classmethod
+128    def invoke_anonymous_function(
+129        cls, column: t.Optional[ColumnOrLiteral], func_name: str, *args: t.Optional[ColumnOrLiteral]
+130    ) -> Column:
+131        columns = [] if column is None else [cls.ensure_col(column)]
+132        column_args = [cls.ensure_col(arg) for arg in args]
+133        expressions = [x.expression for x in columns + column_args]
+134        new_expression = exp.Anonymous(this=func_name.upper(), expressions=expressions)
+135        return Column(new_expression)
+
+ + + + +
+
+ +
+
@classmethod
+ + def + invoke_expression_over_column( cls, column: Optional[<MagicMock id='140700331029648'>], callable_expression: Callable, **kwargs) -> sqlglot.dataframe.sql.Column: + + + +
+ +
137    @classmethod
+138    def invoke_expression_over_column(
+139        cls, column: t.Optional[ColumnOrLiteral], callable_expression: t.Callable, **kwargs
+140    ) -> Column:
+141        ensured_column = None if column is None else cls.ensure_col(column)
+142        ensure_expression_values = {
+143            k: [Column.ensure_col(x).expression for x in v]
+144            if is_iterable(v)
+145            else Column.ensure_col(v).expression
+146            for k, v in kwargs.items()
+147        }
+148        new_expression = (
+149            callable_expression(**ensure_expression_values)
+150            if ensured_column is None
+151            else callable_expression(
+152                this=ensured_column.column_expression, **ensure_expression_values
+153            )
+154        )
+155        return Column(new_expression)
+
+ + + + +
+
+ +
+ + def + binary_op( self, klass: Callable, other: <MagicMock id='140700331083136'>, **kwargs) -> sqlglot.dataframe.sql.Column: + + + +
+ +
157    def binary_op(self, klass: t.Callable, other: ColumnOrLiteral, **kwargs) -> Column:
+158        return Column(
+159            klass(this=self.column_expression, expression=Column(other).column_expression, **kwargs)
+160        )
+
+ + + + +
+
+ +
+ + def + inverse_binary_op( self, klass: Callable, other: <MagicMock id='140700331093216'>, **kwargs) -> sqlglot.dataframe.sql.Column: + + + +
+ +
162    def inverse_binary_op(self, klass: t.Callable, other: ColumnOrLiteral, **kwargs) -> Column:
+163        return Column(
+164            klass(this=Column(other).column_expression, expression=self.column_expression, **kwargs)
+165        )
+
+ + + + +
+
+ +
+ + def + unary_op(self, klass: Callable, **kwargs) -> sqlglot.dataframe.sql.Column: + + + +
+ +
167    def unary_op(self, klass: t.Callable, **kwargs) -> Column:
+168        return Column(klass(this=self.column_expression, **kwargs))
+
+ + + + +
+
+ +
+
@classmethod
+ + def + ensure_literal(cls, value) -> sqlglot.dataframe.sql.Column: + + + +
+ +
186    @classmethod
+187    def ensure_literal(cls, value) -> Column:
+188        from sqlglot.dataframe.sql.functions import lit
+189
+190        if isinstance(value, cls):
+191            value = value.expression
+192        if not isinstance(value, exp.Literal):
+193            return lit(value)
+194        return Column(value)
+
+ + + + +
+
+ +
+ + def + copy(self) -> sqlglot.dataframe.sql.Column: + + + +
+ +
196    def copy(self) -> Column:
+197        return Column(self.expression.copy())
+
+ + + + +
+
+ +
+ + def + set_table_name(self, table_name: str, copy=False) -> sqlglot.dataframe.sql.Column: + + + +
+ +
199    def set_table_name(self, table_name: str, copy=False) -> Column:
+200        expression = self.expression.copy() if copy else self.expression
+201        expression.set("table", exp.to_identifier(table_name))
+202        return Column(expression)
+
+ + + + +
+
+ +
+ + def + sql(self, **kwargs) -> str: + + + +
+ +
204    def sql(self, **kwargs) -> str:
+205        return self.expression.sql(**{"dialect": "spark", **kwargs})
+
+ + + + +
+
+ +
+ + def + alias(self, name: str) -> sqlglot.dataframe.sql.Column: + + + +
+ +
207    def alias(self, name: str) -> Column:
+208        new_expression = exp.alias_(self.column_expression, name)
+209        return Column(new_expression)
+
+ + + + +
+
+ +
+ + def + asc(self) -> sqlglot.dataframe.sql.Column: + + + +
+ +
211    def asc(self) -> Column:
+212        new_expression = exp.Ordered(this=self.column_expression, desc=False, nulls_first=True)
+213        return Column(new_expression)
+
+ + + + +
+
+ +
+ + def + desc(self) -> sqlglot.dataframe.sql.Column: + + + +
+ +
215    def desc(self) -> Column:
+216        new_expression = exp.Ordered(this=self.column_expression, desc=True, nulls_first=False)
+217        return Column(new_expression)
+
+ + + + +
+
+ +
+ + def + asc_nulls_first(self) -> sqlglot.dataframe.sql.Column: + + + +
+ +
211    def asc(self) -> Column:
+212        new_expression = exp.Ordered(this=self.column_expression, desc=False, nulls_first=True)
+213        return Column(new_expression)
+
+ + + + +
+
+ +
+ + def + asc_nulls_last(self) -> sqlglot.dataframe.sql.Column: + + + +
+ +
221    def asc_nulls_last(self) -> Column:
+222        new_expression = exp.Ordered(this=self.column_expression, desc=False, nulls_first=False)
+223        return Column(new_expression)
+
+ + + + +
+
+ +
+ + def + desc_nulls_first(self) -> sqlglot.dataframe.sql.Column: + + + +
+ +
225    def desc_nulls_first(self) -> Column:
+226        new_expression = exp.Ordered(this=self.column_expression, desc=True, nulls_first=True)
+227        return Column(new_expression)
+
+ + + + +
+
+ +
+ + def + desc_nulls_last(self) -> sqlglot.dataframe.sql.Column: + + + +
+ +
215    def desc(self) -> Column:
+216        new_expression = exp.Ordered(this=self.column_expression, desc=True, nulls_first=False)
+217        return Column(new_expression)
+
+ + + + +
+
+ +
+ + def + when( self, condition: sqlglot.dataframe.sql.Column, value: Any) -> sqlglot.dataframe.sql.Column: + + + +
+ +
231    def when(self, condition: Column, value: t.Any) -> Column:
+232        from sqlglot.dataframe.sql.functions import when
+233
+234        column_with_if = when(condition, value)
+235        if not isinstance(self.expression, exp.Case):
+236            return column_with_if
+237        new_column = self.copy()
+238        new_column.expression.args["ifs"].extend(column_with_if.expression.args["ifs"])
+239        return new_column
+
+ + + + +
+
+ +
+ + def + otherwise(self, value: Any) -> sqlglot.dataframe.sql.Column: + + + +
+ +
241    def otherwise(self, value: t.Any) -> Column:
+242        from sqlglot.dataframe.sql.functions import lit
+243
+244        true_value = value if isinstance(value, Column) else lit(value)
+245        new_column = self.copy()
+246        new_column.expression.set("default", true_value.column_expression)
+247        return new_column
+
+ + + + +
+
+ +
+ + def + isNull(self) -> sqlglot.dataframe.sql.Column: + + + +
+ +
249    def isNull(self) -> Column:
+250        new_expression = exp.Is(this=self.column_expression, expression=exp.Null())
+251        return Column(new_expression)
+
+ + + + +
+
+ +
+ + def + isNotNull(self) -> sqlglot.dataframe.sql.Column: + + + +
+ +
253    def isNotNull(self) -> Column:
+254        new_expression = exp.Not(this=exp.Is(this=self.column_expression, expression=exp.Null()))
+255        return Column(new_expression)
+
+ + + + +
+
+ +
+ + def + cast(self, dataType: Union[str, sqlglot.dataframe.sql.types.DataType]): + + + +
+ +
257    def cast(self, dataType: t.Union[str, DataType]):
+258        """
+259        Functionality Difference: PySpark cast accepts a datatype instance of the datatype class
+260        Sqlglot doesn't currently replicate this class so it only accepts a string
+261        """
+262        if isinstance(dataType, DataType):
+263            dataType = dataType.simpleString()
+264        return Column(exp.cast(self.column_expression, dataType, dialect="spark"))
+
+ + +

Functionality Difference: PySpark cast accepts a datatype instance of the datatype class +Sqlglot doesn't currently replicate this class so it only accepts a string

+
+ + +
+
+ +
+ + def + startswith( self, value: Union[str, sqlglot.dataframe.sql.Column]) -> sqlglot.dataframe.sql.Column: + + + +
+ +
266    def startswith(self, value: t.Union[str, Column]) -> Column:
+267        value = self._lit(value) if not isinstance(value, Column) else value
+268        return self.invoke_anonymous_function(self, "STARTSWITH", value)
+
+ + + + +
+
+ +
+ + def + endswith( self, value: Union[str, sqlglot.dataframe.sql.Column]) -> sqlglot.dataframe.sql.Column: + + + +
+ +
270    def endswith(self, value: t.Union[str, Column]) -> Column:
+271        value = self._lit(value) if not isinstance(value, Column) else value
+272        return self.invoke_anonymous_function(self, "ENDSWITH", value)
+
+ + + + +
+
+ +
+ + def + rlike(self, regexp: str) -> sqlglot.dataframe.sql.Column: + + + +
+ +
274    def rlike(self, regexp: str) -> Column:
+275        return self.invoke_expression_over_column(
+276            column=self, callable_expression=exp.RegexpLike, expression=self._lit(regexp).expression
+277        )
+
+ + + + +
+
+ +
+ + def + like(self, other: str): + + + +
+ +
279    def like(self, other: str):
+280        return self.invoke_expression_over_column(
+281            self, exp.Like, expression=self._lit(other).expression
+282        )
+
+ + + + +
+
+ +
+ + def + ilike(self, other: str): + + + +
+ +
284    def ilike(self, other: str):
+285        return self.invoke_expression_over_column(
+286            self, exp.ILike, expression=self._lit(other).expression
+287        )
+
+ + + + +
+
+ +
+ + def + substr( self, startPos: Union[int, sqlglot.dataframe.sql.Column], length: Union[int, sqlglot.dataframe.sql.Column]) -> sqlglot.dataframe.sql.Column: + + + +
+ +
289    def substr(self, startPos: t.Union[int, Column], length: t.Union[int, Column]) -> Column:
+290        startPos = self._lit(startPos) if not isinstance(startPos, Column) else startPos
+291        length = self._lit(length) if not isinstance(length, Column) else length
+292        return Column.invoke_expression_over_column(
+293            self, exp.Substring, start=startPos.expression, length=length.expression
+294        )
+
+ + + + +
+
+ +
+ + def + isin( self, *cols: Union[<MagicMock id='140700331213104'>, Iterable[<MagicMock id='140700331213104'>]]): + + + +
+ +
296    def isin(self, *cols: t.Union[ColumnOrLiteral, t.Iterable[ColumnOrLiteral]]):
+297        columns = flatten(cols) if isinstance(cols[0], (list, set, tuple)) else cols  # type: ignore
+298        expressions = [self._lit(x).expression for x in columns]
+299        return Column.invoke_expression_over_column(self, exp.In, expressions=expressions)  # type: ignore
+
+ + + + +
+
+ +
+ + def + between( self, lowerBound: <MagicMock id='140700331299440'>, upperBound: <MagicMock id='140700329240384'>) -> sqlglot.dataframe.sql.Column: + + + +
+ +
301    def between(
+302        self,
+303        lowerBound: t.Union[ColumnOrLiteral],
+304        upperBound: t.Union[ColumnOrLiteral],
+305    ) -> Column:
+306        lower_bound_exp = (
+307            self._lit(lowerBound) if not isinstance(lowerBound, Column) else lowerBound
+308        )
+309        upper_bound_exp = (
+310            self._lit(upperBound) if not isinstance(upperBound, Column) else upperBound
+311        )
+312        return Column(
+313            exp.Between(
+314                this=self.column_expression,
+315                low=lower_bound_exp.expression,
+316                high=upper_bound_exp.expression,
+317            )
+318        )
+
+ + + + +
+
+ +
+ + def + over( self, window: <MagicMock id='140700329314480'>) -> sqlglot.dataframe.sql.Column: + + + +
+ +
320    def over(self, window: WindowSpec) -> Column:
+321        window_expression = window.expression.copy()
+322        window_expression.set("this", self.column_expression)
+323        return Column(window_expression)
+
+ + + + +
+
+
+ +
+ + class + DataFrameNaFunctions: + + + +
+ +
785class DataFrameNaFunctions:
+786    def __init__(self, df: DataFrame):
+787        self.df = df
+788
+789    def drop(
+790        self,
+791        how: str = "any",
+792        thresh: t.Optional[int] = None,
+793        subset: t.Optional[t.Union[str, t.Tuple[str, ...], t.List[str]]] = None,
+794    ) -> DataFrame:
+795        return self.df.dropna(how=how, thresh=thresh, subset=subset)
+796
+797    def fill(
+798        self,
+799        value: t.Union[int, bool, float, str, t.Dict[str, t.Any]],
+800        subset: t.Optional[t.Union[str, t.Tuple[str, ...], t.List[str]]] = None,
+801    ) -> DataFrame:
+802        return self.df.fillna(value=value, subset=subset)
+803
+804    def replace(
+805        self,
+806        to_replace: t.Union[bool, int, float, str, t.List, t.Dict],
+807        value: t.Optional[t.Union[bool, int, float, str, t.List]] = None,
+808        subset: t.Optional[t.Union[str, t.List[str]]] = None,
+809    ) -> DataFrame:
+810        return self.df.replace(to_replace=to_replace, value=value, subset=subset)
+
+ + + + +
+ +
+ + DataFrameNaFunctions(df: sqlglot.dataframe.sql.DataFrame) + + + +
+ +
786    def __init__(self, df: DataFrame):
+787        self.df = df
+
+ + + + +
+
+ +
+ + def + drop( self, how: str = 'any', thresh: Optional[int] = None, subset: Union[str, Tuple[str, ...], List[str], NoneType] = None) -> sqlglot.dataframe.sql.DataFrame: + + + +
+ +
789    def drop(
+790        self,
+791        how: str = "any",
+792        thresh: t.Optional[int] = None,
+793        subset: t.Optional[t.Union[str, t.Tuple[str, ...], t.List[str]]] = None,
+794    ) -> DataFrame:
+795        return self.df.dropna(how=how, thresh=thresh, subset=subset)
+
+ + + + +
+
+ +
+ + def + fill( self, value: Union[int, bool, float, str, Dict[str, Any]], subset: Union[str, Tuple[str, ...], List[str], NoneType] = None) -> sqlglot.dataframe.sql.DataFrame: + + + +
+ +
797    def fill(
+798        self,
+799        value: t.Union[int, bool, float, str, t.Dict[str, t.Any]],
+800        subset: t.Optional[t.Union[str, t.Tuple[str, ...], t.List[str]]] = None,
+801    ) -> DataFrame:
+802        return self.df.fillna(value=value, subset=subset)
+
+ + + + +
+
+ +
+ + def + replace( self, to_replace: Union[bool, int, float, str, List, Dict], value: Union[bool, int, float, str, List, NoneType] = None, subset: Union[str, List[str], NoneType] = None) -> sqlglot.dataframe.sql.DataFrame: + + + +
+ +
804    def replace(
+805        self,
+806        to_replace: t.Union[bool, int, float, str, t.List, t.Dict],
+807        value: t.Optional[t.Union[bool, int, float, str, t.List]] = None,
+808        subset: t.Optional[t.Union[str, t.List[str]]] = None,
+809    ) -> DataFrame:
+810        return self.df.replace(to_replace=to_replace, value=value, subset=subset)
+
+ + + + +
+
+
+ +
+ + class + Window: + + + +
+ +
15class Window:
+16    _JAVA_MIN_LONG = -(1 << 63)  # -9223372036854775808
+17    _JAVA_MAX_LONG = (1 << 63) - 1  # 9223372036854775807
+18    _PRECEDING_THRESHOLD = max(-sys.maxsize, _JAVA_MIN_LONG)
+19    _FOLLOWING_THRESHOLD = min(sys.maxsize, _JAVA_MAX_LONG)
+20
+21    unboundedPreceding: int = _JAVA_MIN_LONG
+22
+23    unboundedFollowing: int = _JAVA_MAX_LONG
+24
+25    currentRow: int = 0
+26
+27    @classmethod
+28    def partitionBy(cls, *cols: t.Union[ColumnOrName, t.List[ColumnOrName]]) -> WindowSpec:
+29        return WindowSpec().partitionBy(*cols)
+30
+31    @classmethod
+32    def orderBy(cls, *cols: t.Union[ColumnOrName, t.List[ColumnOrName]]) -> WindowSpec:
+33        return WindowSpec().orderBy(*cols)
+34
+35    @classmethod
+36    def rowsBetween(cls, start: int, end: int) -> WindowSpec:
+37        return WindowSpec().rowsBetween(start, end)
+38
+39    @classmethod
+40    def rangeBetween(cls, start: int, end: int) -> WindowSpec:
+41        return WindowSpec().rangeBetween(start, end)
+
+ + + + +
+
+ + Window() + + +
+ + + + +
+
+ +
+
@classmethod
+ + def + partitionBy( cls, *cols: Union[<MagicMock id='140700329626592'>, List[<MagicMock id='140700329626592'>]]) -> sqlglot.dataframe.sql.WindowSpec: + + + +
+ +
27    @classmethod
+28    def partitionBy(cls, *cols: t.Union[ColumnOrName, t.List[ColumnOrName]]) -> WindowSpec:
+29        return WindowSpec().partitionBy(*cols)
+
+ + + + +
+
+ +
+
@classmethod
+ + def + orderBy( cls, *cols: Union[<MagicMock id='140700329828768'>, List[<MagicMock id='140700329828768'>]]) -> sqlglot.dataframe.sql.WindowSpec: + + + +
+ +
31    @classmethod
+32    def orderBy(cls, *cols: t.Union[ColumnOrName, t.List[ColumnOrName]]) -> WindowSpec:
+33        return WindowSpec().orderBy(*cols)
+
+ + + + +
+
+ +
+
@classmethod
+ + def + rowsBetween(cls, start: int, end: int) -> sqlglot.dataframe.sql.WindowSpec: + + + +
+ +
35    @classmethod
+36    def rowsBetween(cls, start: int, end: int) -> WindowSpec:
+37        return WindowSpec().rowsBetween(start, end)
+
+ + + + +
+
+ +
+
@classmethod
+ + def + rangeBetween(cls, start: int, end: int) -> sqlglot.dataframe.sql.WindowSpec: + + + +
+ +
39    @classmethod
+40    def rangeBetween(cls, start: int, end: int) -> WindowSpec:
+41        return WindowSpec().rangeBetween(start, end)
+
+ + + + +
+
+
+ +
+ + class + WindowSpec: + + + +
+ +
 44class WindowSpec:
+ 45    def __init__(self, expression: exp.Expression = exp.Window()):
+ 46        self.expression = expression
+ 47
+ 48    def copy(self):
+ 49        return WindowSpec(self.expression.copy())
+ 50
+ 51    def sql(self, **kwargs) -> str:
+ 52        return self.expression.sql(dialect="spark", **kwargs)
+ 53
+ 54    def partitionBy(self, *cols: t.Union[ColumnOrName, t.List[ColumnOrName]]) -> WindowSpec:
+ 55        from sqlglot.dataframe.sql.column import Column
+ 56
+ 57        cols = flatten(cols) if isinstance(cols[0], (list, set, tuple)) else cols  # type: ignore
+ 58        expressions = [Column.ensure_col(x).expression for x in cols]
+ 59        window_spec = self.copy()
+ 60        partition_by_expressions = window_spec.expression.args.get("partition_by", [])
+ 61        partition_by_expressions.extend(expressions)
+ 62        window_spec.expression.set("partition_by", partition_by_expressions)
+ 63        return window_spec
+ 64
+ 65    def orderBy(self, *cols: t.Union[ColumnOrName, t.List[ColumnOrName]]) -> WindowSpec:
+ 66        from sqlglot.dataframe.sql.column import Column
+ 67
+ 68        cols = flatten(cols) if isinstance(cols[0], (list, set, tuple)) else cols  # type: ignore
+ 69        expressions = [Column.ensure_col(x).expression for x in cols]
+ 70        window_spec = self.copy()
+ 71        if window_spec.expression.args.get("order") is None:
+ 72            window_spec.expression.set("order", exp.Order(expressions=[]))
+ 73        order_by = window_spec.expression.args["order"].expressions
+ 74        order_by.extend(expressions)
+ 75        window_spec.expression.args["order"].set("expressions", order_by)
+ 76        return window_spec
+ 77
+ 78    def _calc_start_end(
+ 79        self, start: int, end: int
+ 80    ) -> t.Dict[str, t.Optional[t.Union[str, exp.Expression]]]:
+ 81        kwargs: t.Dict[str, t.Optional[t.Union[str, exp.Expression]]] = {
+ 82            "start_side": None,
+ 83            "end_side": None,
+ 84        }
+ 85        if start == Window.currentRow:
+ 86            kwargs["start"] = "CURRENT ROW"
+ 87        else:
+ 88            kwargs = {
+ 89                **kwargs,
+ 90                **{
+ 91                    "start_side": "PRECEDING",
+ 92                    "start": "UNBOUNDED"
+ 93                    if start <= Window.unboundedPreceding
+ 94                    else F.lit(start).expression,
+ 95                },
+ 96            }
+ 97        if end == Window.currentRow:
+ 98            kwargs["end"] = "CURRENT ROW"
+ 99        else:
+100            kwargs = {
+101                **kwargs,
+102                **{
+103                    "end_side": "FOLLOWING",
+104                    "end": "UNBOUNDED"
+105                    if end >= Window.unboundedFollowing
+106                    else F.lit(end).expression,
+107                },
+108            }
+109        return kwargs
+110
+111    def rowsBetween(self, start: int, end: int) -> WindowSpec:
+112        window_spec = self.copy()
+113        spec = self._calc_start_end(start, end)
+114        spec["kind"] = "ROWS"
+115        window_spec.expression.set(
+116            "spec",
+117            exp.WindowSpec(
+118                **{**window_spec.expression.args.get("spec", exp.WindowSpec()).args, **spec}
+119            ),
+120        )
+121        return window_spec
+122
+123    def rangeBetween(self, start: int, end: int) -> WindowSpec:
+124        window_spec = self.copy()
+125        spec = self._calc_start_end(start, end)
+126        spec["kind"] = "RANGE"
+127        window_spec.expression.set(
+128            "spec",
+129            exp.WindowSpec(
+130                **{**window_spec.expression.args.get("spec", exp.WindowSpec()).args, **spec}
+131            ),
+132        )
+133        return window_spec
+
+ + + + +
+ +
+ + WindowSpec(expression: sqlglot.expressions.Expression = (WINDOW )) + + + +
+ +
45    def __init__(self, expression: exp.Expression = exp.Window()):
+46        self.expression = expression
+
+ + + + +
+
+ +
+ + def + copy(self): + + + +
+ +
48    def copy(self):
+49        return WindowSpec(self.expression.copy())
+
+ + + + +
+
+ +
+ + def + sql(self, **kwargs) -> str: + + + +
+ +
51    def sql(self, **kwargs) -> str:
+52        return self.expression.sql(dialect="spark", **kwargs)
+
+ + + + +
+
+ +
+ + def + partitionBy( self, *cols: Union[<MagicMock id='140700329685440'>, List[<MagicMock id='140700329685440'>]]) -> sqlglot.dataframe.sql.WindowSpec: + + + +
+ +
54    def partitionBy(self, *cols: t.Union[ColumnOrName, t.List[ColumnOrName]]) -> WindowSpec:
+55        from sqlglot.dataframe.sql.column import Column
+56
+57        cols = flatten(cols) if isinstance(cols[0], (list, set, tuple)) else cols  # type: ignore
+58        expressions = [Column.ensure_col(x).expression for x in cols]
+59        window_spec = self.copy()
+60        partition_by_expressions = window_spec.expression.args.get("partition_by", [])
+61        partition_by_expressions.extend(expressions)
+62        window_spec.expression.set("partition_by", partition_by_expressions)
+63        return window_spec
+
+ + + + +
+
+ +
+ + def + orderBy( self, *cols: Union[<MagicMock id='140700329654400'>, List[<MagicMock id='140700329654400'>]]) -> sqlglot.dataframe.sql.WindowSpec: + + + +
+ +
65    def orderBy(self, *cols: t.Union[ColumnOrName, t.List[ColumnOrName]]) -> WindowSpec:
+66        from sqlglot.dataframe.sql.column import Column
+67
+68        cols = flatten(cols) if isinstance(cols[0], (list, set, tuple)) else cols  # type: ignore
+69        expressions = [Column.ensure_col(x).expression for x in cols]
+70        window_spec = self.copy()
+71        if window_spec.expression.args.get("order") is None:
+72            window_spec.expression.set("order", exp.Order(expressions=[]))
+73        order_by = window_spec.expression.args["order"].expressions
+74        order_by.extend(expressions)
+75        window_spec.expression.args["order"].set("expressions", order_by)
+76        return window_spec
+
+ + + + +
+
+ +
+ + def + rowsBetween(self, start: int, end: int) -> sqlglot.dataframe.sql.WindowSpec: + + + +
+ +
111    def rowsBetween(self, start: int, end: int) -> WindowSpec:
+112        window_spec = self.copy()
+113        spec = self._calc_start_end(start, end)
+114        spec["kind"] = "ROWS"
+115        window_spec.expression.set(
+116            "spec",
+117            exp.WindowSpec(
+118                **{**window_spec.expression.args.get("spec", exp.WindowSpec()).args, **spec}
+119            ),
+120        )
+121        return window_spec
+
+ + + + +
+
+ +
+ + def + rangeBetween(self, start: int, end: int) -> sqlglot.dataframe.sql.WindowSpec: + + + +
+ +
123    def rangeBetween(self, start: int, end: int) -> WindowSpec:
+124        window_spec = self.copy()
+125        spec = self._calc_start_end(start, end)
+126        spec["kind"] = "RANGE"
+127        window_spec.expression.set(
+128            "spec",
+129            exp.WindowSpec(
+130                **{**window_spec.expression.args.get("spec", exp.WindowSpec()).args, **spec}
+131            ),
+132        )
+133        return window_spec
+
+ + + + +
+
+
+ +
+ + class + DataFrameReader: + + + +
+ +
15class DataFrameReader:
+16    def __init__(self, spark: SparkSession):
+17        self.spark = spark
+18
+19    def table(self, tableName: str) -> DataFrame:
+20        from sqlglot.dataframe.sql.dataframe import DataFrame
+21
+22        sqlglot.schema.add_table(tableName)
+23        return DataFrame(
+24            self.spark,
+25            exp.Select().from_(tableName).select(*sqlglot.schema.column_names(tableName)),
+26        )
+
+ + + + +
+ +
+ + DataFrameReader(spark: sqlglot.dataframe.sql.SparkSession) + + + +
+ +
16    def __init__(self, spark: SparkSession):
+17        self.spark = spark
+
+ + + + +
+
+ +
+ + def + table(self, tableName: str) -> sqlglot.dataframe.sql.DataFrame: + + + +
+ +
19    def table(self, tableName: str) -> DataFrame:
+20        from sqlglot.dataframe.sql.dataframe import DataFrame
+21
+22        sqlglot.schema.add_table(tableName)
+23        return DataFrame(
+24            self.spark,
+25            exp.Select().from_(tableName).select(*sqlglot.schema.column_names(tableName)),
+26        )
+
+ + + + +
+
+
+ +
+ + class + DataFrameWriter: + + + +
+ +
29class DataFrameWriter:
+30    def __init__(
+31        self,
+32        df: DataFrame,
+33        spark: t.Optional[SparkSession] = None,
+34        mode: t.Optional[str] = None,
+35        by_name: bool = False,
+36    ):
+37        self._df = df
+38        self._spark = spark or df.spark
+39        self._mode = mode
+40        self._by_name = by_name
+41
+42    def copy(self, **kwargs) -> DataFrameWriter:
+43        return DataFrameWriter(
+44            **{
+45                k[1:] if k.startswith("_") else k: v
+46                for k, v in object_to_dict(self, **kwargs).items()
+47            }
+48        )
+49
+50    def sql(self, **kwargs) -> t.List[str]:
+51        return self._df.sql(**kwargs)
+52
+53    def mode(self, saveMode: t.Optional[str]) -> DataFrameWriter:
+54        return self.copy(_mode=saveMode)
+55
+56    @property
+57    def byName(self):
+58        return self.copy(by_name=True)
+59
+60    def insertInto(self, tableName: str, overwrite: t.Optional[bool] = None) -> DataFrameWriter:
+61        output_expression_container = exp.Insert(
+62            **{
+63                "this": exp.to_table(tableName),
+64                "overwrite": overwrite,
+65            }
+66        )
+67        df = self._df.copy(output_expression_container=output_expression_container)
+68        if self._by_name:
+69            columns = sqlglot.schema.column_names(tableName, only_visible=True)
+70            df = df._convert_leaf_to_cte().select(*columns)
+71
+72        return self.copy(_df=df)
+73
+74    def saveAsTable(self, name: str, format: t.Optional[str] = None, mode: t.Optional[str] = None):
+75        if format is not None:
+76            raise NotImplementedError("Providing Format in the save as table is not supported")
+77        exists, replace, mode = None, None, mode or str(self._mode)
+78        if mode == "append":
+79            return self.insertInto(name)
+80        if mode == "ignore":
+81            exists = True
+82        if mode == "overwrite":
+83            replace = True
+84        output_expression_container = exp.Create(
+85            this=exp.to_table(name),
+86            kind="TABLE",
+87            exists=exists,
+88            replace=replace,
+89        )
+90        return self.copy(_df=self._df.copy(output_expression_container=output_expression_container))
+
+ + + + +
+ +
+ + DataFrameWriter( df: sqlglot.dataframe.sql.DataFrame, spark: Optional[sqlglot.dataframe.sql.SparkSession] = None, mode: Optional[str] = None, by_name: bool = False) + + + +
+ +
30    def __init__(
+31        self,
+32        df: DataFrame,
+33        spark: t.Optional[SparkSession] = None,
+34        mode: t.Optional[str] = None,
+35        by_name: bool = False,
+36    ):
+37        self._df = df
+38        self._spark = spark or df.spark
+39        self._mode = mode
+40        self._by_name = by_name
+
+ + + + +
+
+ +
+ + def + copy(self, **kwargs) -> sqlglot.dataframe.sql.DataFrameWriter: + + + +
+ +
42    def copy(self, **kwargs) -> DataFrameWriter:
+43        return DataFrameWriter(
+44            **{
+45                k[1:] if k.startswith("_") else k: v
+46                for k, v in object_to_dict(self, **kwargs).items()
+47            }
+48        )
+
+ + + + +
+
+ +
+ + def + sql(self, **kwargs) -> List[str]: + + + +
+ +
50    def sql(self, **kwargs) -> t.List[str]:
+51        return self._df.sql(**kwargs)
+
+ + + + +
+
+ +
+ + def + mode( self, saveMode: Optional[str]) -> sqlglot.dataframe.sql.DataFrameWriter: + + + +
+ +
53    def mode(self, saveMode: t.Optional[str]) -> DataFrameWriter:
+54        return self.copy(_mode=saveMode)
+
+ + + + +
+
+ +
+ + def + insertInto( self, tableName: str, overwrite: Optional[bool] = None) -> sqlglot.dataframe.sql.DataFrameWriter: + + + +
+ +
60    def insertInto(self, tableName: str, overwrite: t.Optional[bool] = None) -> DataFrameWriter:
+61        output_expression_container = exp.Insert(
+62            **{
+63                "this": exp.to_table(tableName),
+64                "overwrite": overwrite,
+65            }
+66        )
+67        df = self._df.copy(output_expression_container=output_expression_container)
+68        if self._by_name:
+69            columns = sqlglot.schema.column_names(tableName, only_visible=True)
+70            df = df._convert_leaf_to_cte().select(*columns)
+71
+72        return self.copy(_df=df)
+
+ + + + +
+
+ +
+ + def + saveAsTable( self, name: str, format: Optional[str] = None, mode: Optional[str] = None): + + + +
+ +
74    def saveAsTable(self, name: str, format: t.Optional[str] = None, mode: t.Optional[str] = None):
+75        if format is not None:
+76            raise NotImplementedError("Providing Format in the save as table is not supported")
+77        exists, replace, mode = None, None, mode or str(self._mode)
+78        if mode == "append":
+79            return self.insertInto(name)
+80        if mode == "ignore":
+81            exists = True
+82        if mode == "overwrite":
+83            replace = True
+84        output_expression_container = exp.Create(
+85            this=exp.to_table(name),
+86            kind="TABLE",
+87            exists=exists,
+88            replace=replace,
+89        )
+90        return self.copy(_df=self._df.copy(output_expression_container=output_expression_container))
+
+ + + + +
+
+
+ + \ No newline at end of file diff --git a/docs/sqlglot/dialects.html b/docs/sqlglot/dialects.html new file mode 100644 index 0000000..7a6c455 --- /dev/null +++ b/docs/sqlglot/dialects.html @@ -0,0 +1,400 @@ + + + + + + + sqlglot.dialects API documentation + + + + + + + + + +
+
+ Edit on GitHub + +

Dialects

+ +

While there is a SQL standard, most SQL engines support a variation of that standard. This makes it difficult +to write portable SQL code. SQLGlot bridges all the different variations, called "dialects", with an extensible +SQL transpilation framework.

+ +

The base sqlglot.dialects.dialect.Dialect class implements a generic dialect that aims to be as universal as possible.

+ +

Each SQL variation has its own Dialect subclass, extending the corresponding Tokenizer, Parser and Generator +classes as needed.

+ +

Implementing a custom Dialect

+ +

Consider the following example:

+ +
+
from sqlglot import exp
+from sqlglot.dialects.dialect import Dialect
+from sqlglot.generator import Generator
+from sqlglot.tokens import Tokenizer, TokenType
+
+
+class Custom(Dialect):
+    class Tokenizer(Tokenizer):
+        QUOTES = ["'", '"']
+        IDENTIFIERS = ["`"]
+
+        KEYWORDS = {
+            **Tokenizer.KEYWORDS,
+            "INT64": TokenType.BIGINT,
+            "FLOAT64": TokenType.DOUBLE,
+        }
+
+    class Generator(Generator):
+        TRANSFORMS = {exp.Array: lambda self, e: f"[{self.expressions(e)}]"}
+
+        TYPE_MAPPING = {
+            exp.DataType.Type.TINYINT: "INT64",
+            exp.DataType.Type.SMALLINT: "INT64",
+            exp.DataType.Type.INT: "INT64",
+            exp.DataType.Type.BIGINT: "INT64",
+            exp.DataType.Type.DECIMAL: "NUMERIC",
+            exp.DataType.Type.FLOAT: "FLOAT64",
+            exp.DataType.Type.DOUBLE: "FLOAT64",
+            exp.DataType.Type.BOOLEAN: "BOOL",
+            exp.DataType.Type.TEXT: "STRING",
+        }
+
+
+ +

This is a typical example of adding a new dialect implementation in SQLGlot: we specify its identifier and string +delimiters, as well as what tokens it uses for its types and how they're associated with SQLGlot types. Since +the Expression classes are common for each dialect supported in SQLGlot, we may also need to override the generation +logic for some expressions; this is usually done by adding new entries to the TRANSFORMS mapping.

+ +
+
+ + + + + +
 1"""
+ 2## Dialects
+ 3
+ 4While there is a SQL standard, most SQL engines support a variation of that standard. This makes it difficult
+ 5to write portable SQL code. SQLGlot bridges all the different variations, called "dialects", with an extensible
+ 6SQL transpilation framework. 
+ 7
+ 8The base `sqlglot.dialects.dialect.Dialect` class implements a generic dialect that aims to be as universal as possible.
+ 9
+10Each SQL variation has its own `Dialect` subclass, extending the corresponding `Tokenizer`, `Parser` and `Generator`
+11classes as needed.
+12
+13### Implementing a custom Dialect
+14
+15Consider the following example:
+16
+17```python
+18from sqlglot import exp
+19from sqlglot.dialects.dialect import Dialect
+20from sqlglot.generator import Generator
+21from sqlglot.tokens import Tokenizer, TokenType
+22
+23
+24class Custom(Dialect):
+25    class Tokenizer(Tokenizer):
+26        QUOTES = ["'", '"']
+27        IDENTIFIERS = ["`"]
+28
+29        KEYWORDS = {
+30            **Tokenizer.KEYWORDS,
+31            "INT64": TokenType.BIGINT,
+32            "FLOAT64": TokenType.DOUBLE,
+33        }
+34
+35    class Generator(Generator):
+36        TRANSFORMS = {exp.Array: lambda self, e: f"[{self.expressions(e)}]"}
+37
+38        TYPE_MAPPING = {
+39            exp.DataType.Type.TINYINT: "INT64",
+40            exp.DataType.Type.SMALLINT: "INT64",
+41            exp.DataType.Type.INT: "INT64",
+42            exp.DataType.Type.BIGINT: "INT64",
+43            exp.DataType.Type.DECIMAL: "NUMERIC",
+44            exp.DataType.Type.FLOAT: "FLOAT64",
+45            exp.DataType.Type.DOUBLE: "FLOAT64",
+46            exp.DataType.Type.BOOLEAN: "BOOL",
+47            exp.DataType.Type.TEXT: "STRING",
+48        }
+49```
+50
+51This is a typical example of adding a new dialect implementation in SQLGlot: we specify its identifier and string
+52delimiters, as well as what tokens it uses for its types and how they're associated with SQLGlot types. Since
+53the `Expression` classes are common for each dialect supported in SQLGlot, we may also need to override the generation
+54logic for some expressions; this is usually done by adding new entries to the `TRANSFORMS` mapping.
+55
+56----
+57"""
+58
+59from sqlglot.dialects.bigquery import BigQuery
+60from sqlglot.dialects.clickhouse import ClickHouse
+61from sqlglot.dialects.databricks import Databricks
+62from sqlglot.dialects.dialect import Dialect, Dialects
+63from sqlglot.dialects.drill import Drill
+64from sqlglot.dialects.duckdb import DuckDB
+65from sqlglot.dialects.hive import Hive
+66from sqlglot.dialects.mysql import MySQL
+67from sqlglot.dialects.oracle import Oracle
+68from sqlglot.dialects.postgres import Postgres
+69from sqlglot.dialects.presto import Presto
+70from sqlglot.dialects.redshift import Redshift
+71from sqlglot.dialects.snowflake import Snowflake
+72from sqlglot.dialects.spark import Spark
+73from sqlglot.dialects.sqlite import SQLite
+74from sqlglot.dialects.starrocks import StarRocks
+75from sqlglot.dialects.tableau import Tableau
+76from sqlglot.dialects.teradata import Teradata
+77from sqlglot.dialects.trino import Trino
+78from sqlglot.dialects.tsql import TSQL
+
+ + +
+
+ + \ No newline at end of file diff --git a/docs/sqlglot/dialects/bigquery.html b/docs/sqlglot/dialects/bigquery.html new file mode 100644 index 0000000..903fccb --- /dev/null +++ b/docs/sqlglot/dialects/bigquery.html @@ -0,0 +1,1434 @@ + + + + + + + sqlglot.dialects.bigquery API documentation + + + + + + + + + +
+
+ Edit on GitHub + +

Supports BigQuery Standard SQL.

+
+ + + + + +
  1"""Supports BigQuery Standard SQL."""
+  2
+  3from __future__ import annotations
+  4
+  5import typing as t
+  6
+  7from sqlglot import exp, generator, parser, tokens, transforms
+  8from sqlglot.dialects.dialect import (
+  9    Dialect,
+ 10    datestrtodate_sql,
+ 11    inline_array_sql,
+ 12    no_ilike_sql,
+ 13    rename_func,
+ 14    timestrtotime_sql,
+ 15)
+ 16from sqlglot.helper import seq_get
+ 17from sqlglot.tokens import TokenType
+ 18
+ 19E = t.TypeVar("E", bound=exp.Expression)
+ 20
+ 21
+ 22def _date_add(expression_class: t.Type[E]) -> t.Callable[[t.Sequence], E]:
+ 23    def func(args):
+ 24        interval = seq_get(args, 1)
+ 25        return expression_class(
+ 26            this=seq_get(args, 0),
+ 27            expression=interval.this,
+ 28            unit=interval.args.get("unit"),
+ 29        )
+ 30
+ 31    return func
+ 32
+ 33
+ 34def _date_trunc(args: t.Sequence) -> exp.Expression:
+ 35    unit = seq_get(args, 1)
+ 36    if isinstance(unit, exp.Column):
+ 37        unit = exp.Var(this=unit.name)
+ 38    return exp.DateTrunc(this=seq_get(args, 0), expression=unit)
+ 39
+ 40
+ 41def _date_add_sql(
+ 42    data_type: str, kind: str
+ 43) -> t.Callable[[generator.Generator, exp.Expression], str]:
+ 44    def func(self, expression):
+ 45        this = self.sql(expression, "this")
+ 46        return f"{data_type}_{kind}({this}, {self.sql(exp.Interval(this=expression.expression, unit=expression.args.get('unit') or exp.Literal.string('day')))})"
+ 47
+ 48    return func
+ 49
+ 50
+ 51def _derived_table_values_to_unnest(self: generator.Generator, expression: exp.Values) -> str:
+ 52    if not isinstance(expression.unnest().parent, exp.From):
+ 53        expression = t.cast(exp.Values, transforms.remove_precision_parameterized_types(expression))
+ 54        return self.values_sql(expression)
+ 55    rows = [tuple_exp.expressions for tuple_exp in expression.find_all(exp.Tuple)]
+ 56    structs = []
+ 57    for row in rows:
+ 58        aliases = [
+ 59            exp.alias_(value, column_name)
+ 60            for value, column_name in zip(row, expression.args["alias"].args["columns"])
+ 61        ]
+ 62        structs.append(exp.Struct(expressions=aliases))
+ 63    unnest_exp = exp.Unnest(expressions=[exp.Array(expressions=structs)])
+ 64    return self.unnest_sql(unnest_exp)
+ 65
+ 66
+ 67def _returnsproperty_sql(self: generator.Generator, expression: exp.ReturnsProperty) -> str:
+ 68    this = expression.this
+ 69    if isinstance(this, exp.Schema):
+ 70        this = f"{this.this} <{self.expressions(this)}>"
+ 71    else:
+ 72        this = self.sql(this)
+ 73    return f"RETURNS {this}"
+ 74
+ 75
+ 76def _create_sql(self: generator.Generator, expression: exp.Create) -> str:
+ 77    kind = expression.args["kind"]
+ 78    returns = expression.find(exp.ReturnsProperty)
+ 79    if kind.upper() == "FUNCTION" and returns and returns.args.get("is_table"):
+ 80        expression = expression.copy()
+ 81        expression.set("kind", "TABLE FUNCTION")
+ 82        if isinstance(
+ 83            expression.expression,
+ 84            (
+ 85                exp.Subquery,
+ 86                exp.Literal,
+ 87            ),
+ 88        ):
+ 89            expression.set("expression", expression.expression.this)
+ 90
+ 91        return self.create_sql(expression)
+ 92
+ 93    return self.create_sql(expression)
+ 94
+ 95
+ 96def _unqualify_unnest(expression: exp.Expression) -> exp.Expression:
+ 97    """Remove references to unnest table aliases since bigquery doesn't allow them.
+ 98
+ 99    These are added by the optimizer's qualify_column step.
+100    """
+101    if isinstance(expression, exp.Select):
+102        unnests = {
+103            unnest.alias
+104            for unnest in expression.args.get("from", exp.From(expressions=[])).expressions
+105            if isinstance(unnest, exp.Unnest) and unnest.alias
+106        }
+107
+108        if unnests:
+109            expression = expression.copy()
+110
+111            for select in expression.expressions:
+112                for column in select.find_all(exp.Column):
+113                    if column.table in unnests:
+114                        column.set("table", None)
+115
+116    return expression
+117
+118
+119class BigQuery(Dialect):
+120    unnest_column_only = True
+121    time_mapping = {
+122        "%M": "%-M",
+123        "%d": "%-d",
+124        "%m": "%-m",
+125        "%y": "%-y",
+126        "%H": "%-H",
+127        "%I": "%-I",
+128        "%S": "%-S",
+129        "%j": "%-j",
+130    }
+131
+132    class Tokenizer(tokens.Tokenizer):
+133        QUOTES = [
+134            (prefix + quote, quote) if prefix else quote
+135            for quote in ["'", '"', '"""', "'''"]
+136            for prefix in ["", "r", "R"]
+137        ]
+138        COMMENTS = ["--", "#", ("/*", "*/")]
+139        IDENTIFIERS = ["`"]
+140        STRING_ESCAPES = ["\\"]
+141        HEX_STRINGS = [("0x", ""), ("0X", "")]
+142
+143        KEYWORDS = {
+144            **tokens.Tokenizer.KEYWORDS,
+145            "BEGIN": TokenType.COMMAND,
+146            "BEGIN TRANSACTION": TokenType.BEGIN,
+147            "CURRENT_DATETIME": TokenType.CURRENT_DATETIME,
+148            "CURRENT_TIME": TokenType.CURRENT_TIME,
+149            "DECLARE": TokenType.COMMAND,
+150            "GEOGRAPHY": TokenType.GEOGRAPHY,
+151            "FLOAT64": TokenType.DOUBLE,
+152            "INT64": TokenType.BIGINT,
+153            "NOT DETERMINISTIC": TokenType.VOLATILE,
+154            "UNKNOWN": TokenType.NULL,
+155        }
+156        KEYWORDS.pop("DIV")
+157
+158    class Parser(parser.Parser):
+159        FUNCTIONS = {
+160            **parser.Parser.FUNCTIONS,  # type: ignore
+161            "DATE_TRUNC": _date_trunc,
+162            "DATE_ADD": _date_add(exp.DateAdd),
+163            "DATETIME_ADD": _date_add(exp.DatetimeAdd),
+164            "DIV": lambda args: exp.IntDiv(this=seq_get(args, 0), expression=seq_get(args, 1)),
+165            "REGEXP_CONTAINS": exp.RegexpLike.from_arg_list,
+166            "TIME_ADD": _date_add(exp.TimeAdd),
+167            "TIMESTAMP_ADD": _date_add(exp.TimestampAdd),
+168            "DATE_SUB": _date_add(exp.DateSub),
+169            "DATETIME_SUB": _date_add(exp.DatetimeSub),
+170            "TIME_SUB": _date_add(exp.TimeSub),
+171            "TIMESTAMP_SUB": _date_add(exp.TimestampSub),
+172            "PARSE_TIMESTAMP": lambda args: exp.StrToTime(
+173                this=seq_get(args, 1), format=seq_get(args, 0)
+174            ),
+175        }
+176
+177        FUNCTION_PARSERS = {
+178            **parser.Parser.FUNCTION_PARSERS,  # type: ignore
+179            "ARRAY": lambda self: self.expression(exp.Array, expressions=[self._parse_statement()]),
+180        }
+181        FUNCTION_PARSERS.pop("TRIM")
+182
+183        NO_PAREN_FUNCTIONS = {
+184            **parser.Parser.NO_PAREN_FUNCTIONS,  # type: ignore
+185            TokenType.CURRENT_DATETIME: exp.CurrentDatetime,
+186            TokenType.CURRENT_TIME: exp.CurrentTime,
+187        }
+188
+189        NESTED_TYPE_TOKENS = {
+190            *parser.Parser.NESTED_TYPE_TOKENS,  # type: ignore
+191            TokenType.TABLE,
+192        }
+193
+194        ID_VAR_TOKENS = {
+195            *parser.Parser.ID_VAR_TOKENS,  # type: ignore
+196            TokenType.VALUES,
+197        }
+198
+199        PROPERTY_PARSERS = {
+200            **parser.Parser.PROPERTY_PARSERS,  # type: ignore
+201            "NOT DETERMINISTIC": lambda self: self.expression(
+202                exp.VolatilityProperty, this=exp.Literal.string("VOLATILE")
+203            ),
+204        }
+205
+206    class Generator(generator.Generator):
+207        TRANSFORMS = {
+208            **generator.Generator.TRANSFORMS,  # type: ignore
+209            **transforms.REMOVE_PRECISION_PARAMETERIZED_TYPES,  # type: ignore
+210            exp.ArraySize: rename_func("ARRAY_LENGTH"),
+211            exp.DateAdd: _date_add_sql("DATE", "ADD"),
+212            exp.DateSub: _date_add_sql("DATE", "SUB"),
+213            exp.DatetimeAdd: _date_add_sql("DATETIME", "ADD"),
+214            exp.DatetimeSub: _date_add_sql("DATETIME", "SUB"),
+215            exp.DateDiff: lambda self, e: f"DATE_DIFF({self.sql(e, 'this')}, {self.sql(e, 'expression')}, {self.sql(e.args.get('unit', 'DAY'))})",
+216            exp.DateStrToDate: datestrtodate_sql,
+217            exp.GroupConcat: rename_func("STRING_AGG"),
+218            exp.ILike: no_ilike_sql,
+219            exp.IntDiv: rename_func("DIV"),
+220            exp.Select: transforms.preprocess(
+221                [_unqualify_unnest], transforms.delegate("select_sql")
+222            ),
+223            exp.StrToTime: lambda self, e: f"PARSE_TIMESTAMP({self.format_time(e)}, {self.sql(e, 'this')})",
+224            exp.TimeAdd: _date_add_sql("TIME", "ADD"),
+225            exp.TimeSub: _date_add_sql("TIME", "SUB"),
+226            exp.TimestampAdd: _date_add_sql("TIMESTAMP", "ADD"),
+227            exp.TimestampSub: _date_add_sql("TIMESTAMP", "SUB"),
+228            exp.TimeStrToTime: timestrtotime_sql,
+229            exp.VariancePop: rename_func("VAR_POP"),
+230            exp.Values: _derived_table_values_to_unnest,
+231            exp.ReturnsProperty: _returnsproperty_sql,
+232            exp.Create: _create_sql,
+233            exp.Trim: lambda self, e: f"TRIM({self.format_args(e.this, e.expression)})",
+234            exp.VolatilityProperty: lambda self, e: f"DETERMINISTIC"
+235            if e.name == "IMMUTABLE"
+236            else "NOT DETERMINISTIC",
+237            exp.RegexpLike: rename_func("REGEXP_CONTAINS"),
+238        }
+239
+240        TYPE_MAPPING = {
+241            **generator.Generator.TYPE_MAPPING,  # type: ignore
+242            exp.DataType.Type.TINYINT: "INT64",
+243            exp.DataType.Type.SMALLINT: "INT64",
+244            exp.DataType.Type.INT: "INT64",
+245            exp.DataType.Type.BIGINT: "INT64",
+246            exp.DataType.Type.DECIMAL: "NUMERIC",
+247            exp.DataType.Type.FLOAT: "FLOAT64",
+248            exp.DataType.Type.DOUBLE: "FLOAT64",
+249            exp.DataType.Type.BOOLEAN: "BOOL",
+250            exp.DataType.Type.TEXT: "STRING",
+251            exp.DataType.Type.VARCHAR: "STRING",
+252            exp.DataType.Type.NVARCHAR: "STRING",
+253        }
+254
+255        EXPLICIT_UNION = True
+256
+257        def array_sql(self, expression: exp.Array) -> str:
+258            first_arg = seq_get(expression.expressions, 0)
+259            if isinstance(first_arg, exp.Subqueryable):
+260                return f"ARRAY{self.wrap(self.sql(first_arg))}"
+261
+262            return inline_array_sql(self, expression)
+263
+264        def transaction_sql(self, *_) -> str:
+265            return "BEGIN TRANSACTION"
+266
+267        def commit_sql(self, *_) -> str:
+268            return "COMMIT TRANSACTION"
+269
+270        def rollback_sql(self, *_) -> str:
+271            return "ROLLBACK TRANSACTION"
+272
+273        def in_unnest_op(self, expression: exp.Unnest) -> str:
+274            return self.sql(expression)
+275
+276        def except_op(self, expression: exp.Except) -> str:
+277            if not expression.args.get("distinct", False):
+278                self.unsupported("EXCEPT without DISTINCT is not supported in BigQuery")
+279            return f"EXCEPT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}"
+280
+281        def intersect_op(self, expression: exp.Intersect) -> str:
+282            if not expression.args.get("distinct", False):
+283                self.unsupported("INTERSECT without DISTINCT is not supported in BigQuery")
+284            return f"INTERSECT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}"
+
+ + +
+
+ +
+ + class + BigQuery(sqlglot.dialects.dialect.Dialect): + + + +
+ +
120class BigQuery(Dialect):
+121    unnest_column_only = True
+122    time_mapping = {
+123        "%M": "%-M",
+124        "%d": "%-d",
+125        "%m": "%-m",
+126        "%y": "%-y",
+127        "%H": "%-H",
+128        "%I": "%-I",
+129        "%S": "%-S",
+130        "%j": "%-j",
+131    }
+132
+133    class Tokenizer(tokens.Tokenizer):
+134        QUOTES = [
+135            (prefix + quote, quote) if prefix else quote
+136            for quote in ["'", '"', '"""', "'''"]
+137            for prefix in ["", "r", "R"]
+138        ]
+139        COMMENTS = ["--", "#", ("/*", "*/")]
+140        IDENTIFIERS = ["`"]
+141        STRING_ESCAPES = ["\\"]
+142        HEX_STRINGS = [("0x", ""), ("0X", "")]
+143
+144        KEYWORDS = {
+145            **tokens.Tokenizer.KEYWORDS,
+146            "BEGIN": TokenType.COMMAND,
+147            "BEGIN TRANSACTION": TokenType.BEGIN,
+148            "CURRENT_DATETIME": TokenType.CURRENT_DATETIME,
+149            "CURRENT_TIME": TokenType.CURRENT_TIME,
+150            "DECLARE": TokenType.COMMAND,
+151            "GEOGRAPHY": TokenType.GEOGRAPHY,
+152            "FLOAT64": TokenType.DOUBLE,
+153            "INT64": TokenType.BIGINT,
+154            "NOT DETERMINISTIC": TokenType.VOLATILE,
+155            "UNKNOWN": TokenType.NULL,
+156        }
+157        KEYWORDS.pop("DIV")
+158
+159    class Parser(parser.Parser):
+160        FUNCTIONS = {
+161            **parser.Parser.FUNCTIONS,  # type: ignore
+162            "DATE_TRUNC": _date_trunc,
+163            "DATE_ADD": _date_add(exp.DateAdd),
+164            "DATETIME_ADD": _date_add(exp.DatetimeAdd),
+165            "DIV": lambda args: exp.IntDiv(this=seq_get(args, 0), expression=seq_get(args, 1)),
+166            "REGEXP_CONTAINS": exp.RegexpLike.from_arg_list,
+167            "TIME_ADD": _date_add(exp.TimeAdd),
+168            "TIMESTAMP_ADD": _date_add(exp.TimestampAdd),
+169            "DATE_SUB": _date_add(exp.DateSub),
+170            "DATETIME_SUB": _date_add(exp.DatetimeSub),
+171            "TIME_SUB": _date_add(exp.TimeSub),
+172            "TIMESTAMP_SUB": _date_add(exp.TimestampSub),
+173            "PARSE_TIMESTAMP": lambda args: exp.StrToTime(
+174                this=seq_get(args, 1), format=seq_get(args, 0)
+175            ),
+176        }
+177
+178        FUNCTION_PARSERS = {
+179            **parser.Parser.FUNCTION_PARSERS,  # type: ignore
+180            "ARRAY": lambda self: self.expression(exp.Array, expressions=[self._parse_statement()]),
+181        }
+182        FUNCTION_PARSERS.pop("TRIM")
+183
+184        NO_PAREN_FUNCTIONS = {
+185            **parser.Parser.NO_PAREN_FUNCTIONS,  # type: ignore
+186            TokenType.CURRENT_DATETIME: exp.CurrentDatetime,
+187            TokenType.CURRENT_TIME: exp.CurrentTime,
+188        }
+189
+190        NESTED_TYPE_TOKENS = {
+191            *parser.Parser.NESTED_TYPE_TOKENS,  # type: ignore
+192            TokenType.TABLE,
+193        }
+194
+195        ID_VAR_TOKENS = {
+196            *parser.Parser.ID_VAR_TOKENS,  # type: ignore
+197            TokenType.VALUES,
+198        }
+199
+200        PROPERTY_PARSERS = {
+201            **parser.Parser.PROPERTY_PARSERS,  # type: ignore
+202            "NOT DETERMINISTIC": lambda self: self.expression(
+203                exp.VolatilityProperty, this=exp.Literal.string("VOLATILE")
+204            ),
+205        }
+206
+207    class Generator(generator.Generator):
+208        TRANSFORMS = {
+209            **generator.Generator.TRANSFORMS,  # type: ignore
+210            **transforms.REMOVE_PRECISION_PARAMETERIZED_TYPES,  # type: ignore
+211            exp.ArraySize: rename_func("ARRAY_LENGTH"),
+212            exp.DateAdd: _date_add_sql("DATE", "ADD"),
+213            exp.DateSub: _date_add_sql("DATE", "SUB"),
+214            exp.DatetimeAdd: _date_add_sql("DATETIME", "ADD"),
+215            exp.DatetimeSub: _date_add_sql("DATETIME", "SUB"),
+216            exp.DateDiff: lambda self, e: f"DATE_DIFF({self.sql(e, 'this')}, {self.sql(e, 'expression')}, {self.sql(e.args.get('unit', 'DAY'))})",
+217            exp.DateStrToDate: datestrtodate_sql,
+218            exp.GroupConcat: rename_func("STRING_AGG"),
+219            exp.ILike: no_ilike_sql,
+220            exp.IntDiv: rename_func("DIV"),
+221            exp.Select: transforms.preprocess(
+222                [_unqualify_unnest], transforms.delegate("select_sql")
+223            ),
+224            exp.StrToTime: lambda self, e: f"PARSE_TIMESTAMP({self.format_time(e)}, {self.sql(e, 'this')})",
+225            exp.TimeAdd: _date_add_sql("TIME", "ADD"),
+226            exp.TimeSub: _date_add_sql("TIME", "SUB"),
+227            exp.TimestampAdd: _date_add_sql("TIMESTAMP", "ADD"),
+228            exp.TimestampSub: _date_add_sql("TIMESTAMP", "SUB"),
+229            exp.TimeStrToTime: timestrtotime_sql,
+230            exp.VariancePop: rename_func("VAR_POP"),
+231            exp.Values: _derived_table_values_to_unnest,
+232            exp.ReturnsProperty: _returnsproperty_sql,
+233            exp.Create: _create_sql,
+234            exp.Trim: lambda self, e: f"TRIM({self.format_args(e.this, e.expression)})",
+235            exp.VolatilityProperty: lambda self, e: f"DETERMINISTIC"
+236            if e.name == "IMMUTABLE"
+237            else "NOT DETERMINISTIC",
+238            exp.RegexpLike: rename_func("REGEXP_CONTAINS"),
+239        }
+240
+241        TYPE_MAPPING = {
+242            **generator.Generator.TYPE_MAPPING,  # type: ignore
+243            exp.DataType.Type.TINYINT: "INT64",
+244            exp.DataType.Type.SMALLINT: "INT64",
+245            exp.DataType.Type.INT: "INT64",
+246            exp.DataType.Type.BIGINT: "INT64",
+247            exp.DataType.Type.DECIMAL: "NUMERIC",
+248            exp.DataType.Type.FLOAT: "FLOAT64",
+249            exp.DataType.Type.DOUBLE: "FLOAT64",
+250            exp.DataType.Type.BOOLEAN: "BOOL",
+251            exp.DataType.Type.TEXT: "STRING",
+252            exp.DataType.Type.VARCHAR: "STRING",
+253            exp.DataType.Type.NVARCHAR: "STRING",
+254        }
+255
+256        EXPLICIT_UNION = True
+257
+258        def array_sql(self, expression: exp.Array) -> str:
+259            first_arg = seq_get(expression.expressions, 0)
+260            if isinstance(first_arg, exp.Subqueryable):
+261                return f"ARRAY{self.wrap(self.sql(first_arg))}"
+262
+263            return inline_array_sql(self, expression)
+264
+265        def transaction_sql(self, *_) -> str:
+266            return "BEGIN TRANSACTION"
+267
+268        def commit_sql(self, *_) -> str:
+269            return "COMMIT TRANSACTION"
+270
+271        def rollback_sql(self, *_) -> str:
+272            return "ROLLBACK TRANSACTION"
+273
+274        def in_unnest_op(self, expression: exp.Unnest) -> str:
+275            return self.sql(expression)
+276
+277        def except_op(self, expression: exp.Except) -> str:
+278            if not expression.args.get("distinct", False):
+279                self.unsupported("EXCEPT without DISTINCT is not supported in BigQuery")
+280            return f"EXCEPT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}"
+281
+282        def intersect_op(self, expression: exp.Intersect) -> str:
+283            if not expression.args.get("distinct", False):
+284                self.unsupported("INTERSECT without DISTINCT is not supported in BigQuery")
+285            return f"INTERSECT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}"
+
+ + + + +
+
+ + BigQuery() + + +
+ + + + +
+ +
+
+ +
+ + class + BigQuery.Tokenizer(sqlglot.tokens.Tokenizer): + + + +
+ +
133    class Tokenizer(tokens.Tokenizer):
+134        QUOTES = [
+135            (prefix + quote, quote) if prefix else quote
+136            for quote in ["'", '"', '"""', "'''"]
+137            for prefix in ["", "r", "R"]
+138        ]
+139        COMMENTS = ["--", "#", ("/*", "*/")]
+140        IDENTIFIERS = ["`"]
+141        STRING_ESCAPES = ["\\"]
+142        HEX_STRINGS = [("0x", ""), ("0X", "")]
+143
+144        KEYWORDS = {
+145            **tokens.Tokenizer.KEYWORDS,
+146            "BEGIN": TokenType.COMMAND,
+147            "BEGIN TRANSACTION": TokenType.BEGIN,
+148            "CURRENT_DATETIME": TokenType.CURRENT_DATETIME,
+149            "CURRENT_TIME": TokenType.CURRENT_TIME,
+150            "DECLARE": TokenType.COMMAND,
+151            "GEOGRAPHY": TokenType.GEOGRAPHY,
+152            "FLOAT64": TokenType.DOUBLE,
+153            "INT64": TokenType.BIGINT,
+154            "NOT DETERMINISTIC": TokenType.VOLATILE,
+155            "UNKNOWN": TokenType.NULL,
+156        }
+157        KEYWORDS.pop("DIV")
+
+ + + + +
+
Inherited Members
+
+ +
+
+
+
+ +
+ + class + BigQuery.Parser(sqlglot.parser.Parser): + + + +
+ +
159    class Parser(parser.Parser):
+160        FUNCTIONS = {
+161            **parser.Parser.FUNCTIONS,  # type: ignore
+162            "DATE_TRUNC": _date_trunc,
+163            "DATE_ADD": _date_add(exp.DateAdd),
+164            "DATETIME_ADD": _date_add(exp.DatetimeAdd),
+165            "DIV": lambda args: exp.IntDiv(this=seq_get(args, 0), expression=seq_get(args, 1)),
+166            "REGEXP_CONTAINS": exp.RegexpLike.from_arg_list,
+167            "TIME_ADD": _date_add(exp.TimeAdd),
+168            "TIMESTAMP_ADD": _date_add(exp.TimestampAdd),
+169            "DATE_SUB": _date_add(exp.DateSub),
+170            "DATETIME_SUB": _date_add(exp.DatetimeSub),
+171            "TIME_SUB": _date_add(exp.TimeSub),
+172            "TIMESTAMP_SUB": _date_add(exp.TimestampSub),
+173            "PARSE_TIMESTAMP": lambda args: exp.StrToTime(
+174                this=seq_get(args, 1), format=seq_get(args, 0)
+175            ),
+176        }
+177
+178        FUNCTION_PARSERS = {
+179            **parser.Parser.FUNCTION_PARSERS,  # type: ignore
+180            "ARRAY": lambda self: self.expression(exp.Array, expressions=[self._parse_statement()]),
+181        }
+182        FUNCTION_PARSERS.pop("TRIM")
+183
+184        NO_PAREN_FUNCTIONS = {
+185            **parser.Parser.NO_PAREN_FUNCTIONS,  # type: ignore
+186            TokenType.CURRENT_DATETIME: exp.CurrentDatetime,
+187            TokenType.CURRENT_TIME: exp.CurrentTime,
+188        }
+189
+190        NESTED_TYPE_TOKENS = {
+191            *parser.Parser.NESTED_TYPE_TOKENS,  # type: ignore
+192            TokenType.TABLE,
+193        }
+194
+195        ID_VAR_TOKENS = {
+196            *parser.Parser.ID_VAR_TOKENS,  # type: ignore
+197            TokenType.VALUES,
+198        }
+199
+200        PROPERTY_PARSERS = {
+201            **parser.Parser.PROPERTY_PARSERS,  # type: ignore
+202            "NOT DETERMINISTIC": lambda self: self.expression(
+203                exp.VolatilityProperty, this=exp.Literal.string("VOLATILE")
+204            ),
+205        }
+
+ + +

Parser consumes a list of tokens produced by the sqlglot.tokens.Tokenizer and produces +a parsed syntax tree.

+ +
Arguments:
+ +
    +
  • error_level: the desired error level. +Default: ErrorLevel.RAISE
  • +
  • error_message_context: determines the amount of context to capture from a +query string when displaying the error message (in number of characters). +Default: 50.
  • +
  • index_offset: Index offset for arrays eg ARRAY[0] vs ARRAY[1] as the head of a list. +Default: 0
  • +
  • alias_post_tablesample: If the table alias comes after tablesample. +Default: False
  • +
  • max_errors: Maximum number of error messages to include in a raised ParseError. +This is only relevant if error_level is ErrorLevel.RAISE. +Default: 3
  • +
  • null_ordering: Indicates the default null ordering method to use if not explicitly set. +Options are "nulls_are_small", "nulls_are_large", "nulls_are_last". +Default: "nulls_are_small"
  • +
+
+ + + +
+
+ +
+ + class + BigQuery.Generator(sqlglot.generator.Generator): + + + +
+ +
207    class Generator(generator.Generator):
+208        TRANSFORMS = {
+209            **generator.Generator.TRANSFORMS,  # type: ignore
+210            **transforms.REMOVE_PRECISION_PARAMETERIZED_TYPES,  # type: ignore
+211            exp.ArraySize: rename_func("ARRAY_LENGTH"),
+212            exp.DateAdd: _date_add_sql("DATE", "ADD"),
+213            exp.DateSub: _date_add_sql("DATE", "SUB"),
+214            exp.DatetimeAdd: _date_add_sql("DATETIME", "ADD"),
+215            exp.DatetimeSub: _date_add_sql("DATETIME", "SUB"),
+216            exp.DateDiff: lambda self, e: f"DATE_DIFF({self.sql(e, 'this')}, {self.sql(e, 'expression')}, {self.sql(e.args.get('unit', 'DAY'))})",
+217            exp.DateStrToDate: datestrtodate_sql,
+218            exp.GroupConcat: rename_func("STRING_AGG"),
+219            exp.ILike: no_ilike_sql,
+220            exp.IntDiv: rename_func("DIV"),
+221            exp.Select: transforms.preprocess(
+222                [_unqualify_unnest], transforms.delegate("select_sql")
+223            ),
+224            exp.StrToTime: lambda self, e: f"PARSE_TIMESTAMP({self.format_time(e)}, {self.sql(e, 'this')})",
+225            exp.TimeAdd: _date_add_sql("TIME", "ADD"),
+226            exp.TimeSub: _date_add_sql("TIME", "SUB"),
+227            exp.TimestampAdd: _date_add_sql("TIMESTAMP", "ADD"),
+228            exp.TimestampSub: _date_add_sql("TIMESTAMP", "SUB"),
+229            exp.TimeStrToTime: timestrtotime_sql,
+230            exp.VariancePop: rename_func("VAR_POP"),
+231            exp.Values: _derived_table_values_to_unnest,
+232            exp.ReturnsProperty: _returnsproperty_sql,
+233            exp.Create: _create_sql,
+234            exp.Trim: lambda self, e: f"TRIM({self.format_args(e.this, e.expression)})",
+235            exp.VolatilityProperty: lambda self, e: f"DETERMINISTIC"
+236            if e.name == "IMMUTABLE"
+237            else "NOT DETERMINISTIC",
+238            exp.RegexpLike: rename_func("REGEXP_CONTAINS"),
+239        }
+240
+241        TYPE_MAPPING = {
+242            **generator.Generator.TYPE_MAPPING,  # type: ignore
+243            exp.DataType.Type.TINYINT: "INT64",
+244            exp.DataType.Type.SMALLINT: "INT64",
+245            exp.DataType.Type.INT: "INT64",
+246            exp.DataType.Type.BIGINT: "INT64",
+247            exp.DataType.Type.DECIMAL: "NUMERIC",
+248            exp.DataType.Type.FLOAT: "FLOAT64",
+249            exp.DataType.Type.DOUBLE: "FLOAT64",
+250            exp.DataType.Type.BOOLEAN: "BOOL",
+251            exp.DataType.Type.TEXT: "STRING",
+252            exp.DataType.Type.VARCHAR: "STRING",
+253            exp.DataType.Type.NVARCHAR: "STRING",
+254        }
+255
+256        EXPLICIT_UNION = True
+257
+258        def array_sql(self, expression: exp.Array) -> str:
+259            first_arg = seq_get(expression.expressions, 0)
+260            if isinstance(first_arg, exp.Subqueryable):
+261                return f"ARRAY{self.wrap(self.sql(first_arg))}"
+262
+263            return inline_array_sql(self, expression)
+264
+265        def transaction_sql(self, *_) -> str:
+266            return "BEGIN TRANSACTION"
+267
+268        def commit_sql(self, *_) -> str:
+269            return "COMMIT TRANSACTION"
+270
+271        def rollback_sql(self, *_) -> str:
+272            return "ROLLBACK TRANSACTION"
+273
+274        def in_unnest_op(self, expression: exp.Unnest) -> str:
+275            return self.sql(expression)
+276
+277        def except_op(self, expression: exp.Except) -> str:
+278            if not expression.args.get("distinct", False):
+279                self.unsupported("EXCEPT without DISTINCT is not supported in BigQuery")
+280            return f"EXCEPT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}"
+281
+282        def intersect_op(self, expression: exp.Intersect) -> str:
+283            if not expression.args.get("distinct", False):
+284                self.unsupported("INTERSECT without DISTINCT is not supported in BigQuery")
+285            return f"INTERSECT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}"
+
+ + +

Generator interprets the given syntax tree and produces a SQL string as an output.

+ +
Arguments:
+ +
    +
  • time_mapping (dict): the dictionary of custom time mappings in which the key +represents a python time format and the output the target time format
  • +
  • time_trie (trie): a trie of the time_mapping keys
  • +
  • pretty (bool): if set to True the returned string will be formatted. Default: False.
  • +
  • quote_start (str): specifies which starting character to use to delimit quotes. Default: '.
  • +
  • quote_end (str): specifies which ending character to use to delimit quotes. Default: '.
  • +
  • identifier_start (str): specifies which starting character to use to delimit identifiers. Default: ".
  • +
  • identifier_end (str): specifies which ending character to use to delimit identifiers. Default: ".
  • +
  • identify (bool): if set to True all identifiers will be delimited by the corresponding +character.
  • +
  • normalize (bool): if set to True all identifiers will lower cased
  • +
  • string_escape (str): specifies a string escape character. Default: '.
  • +
  • identifier_escape (str): specifies an identifier escape character. Default: ".
  • +
  • pad (int): determines padding in a formatted string. Default: 2.
  • +
  • indent (int): determines the size of indentation in a formatted string. Default: 4.
  • +
  • unnest_column_only (bool): if true unnest table aliases are considered only as column aliases
  • +
  • normalize_functions (str): normalize function names, "upper", "lower", or None +Default: "upper"
  • +
  • alias_post_tablesample (bool): if the table alias comes after tablesample +Default: False
  • +
  • unsupported_level (ErrorLevel): determines the generator's behavior when it encounters +unsupported expressions. Default ErrorLevel.WARN.
  • +
  • null_ordering (str): Indicates the default null ordering method to use if not explicitly set. +Options are "nulls_are_small", "nulls_are_large", "nulls_are_last". +Default: "nulls_are_small"
  • +
  • max_unsupported (int): Maximum number of unsupported messages to include in a raised UnsupportedError. +This is only relevant if unsupported_level is ErrorLevel.RAISE. +Default: 3
  • +
  • leading_comma (bool): if the the comma is leading or trailing in select statements +Default: False
  • +
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode. +The default is on the smaller end because the length only represents a segment and not the true +line length. +Default: 80
  • +
  • comments: Whether or not to preserve comments in the output SQL code. +Default: True
  • +
+
+ + +
+ +
+ + def + array_sql(self, expression: sqlglot.expressions.Array) -> str: + + + +
+ +
258        def array_sql(self, expression: exp.Array) -> str:
+259            first_arg = seq_get(expression.expressions, 0)
+260            if isinstance(first_arg, exp.Subqueryable):
+261                return f"ARRAY{self.wrap(self.sql(first_arg))}"
+262
+263            return inline_array_sql(self, expression)
+
+ + + + +
+
+ +
+ + def + transaction_sql(self, *_) -> str: + + + +
+ +
265        def transaction_sql(self, *_) -> str:
+266            return "BEGIN TRANSACTION"
+
+ + + + +
+
+ +
+ + def + commit_sql(self, *_) -> str: + + + +
+ +
268        def commit_sql(self, *_) -> str:
+269            return "COMMIT TRANSACTION"
+
+ + + + +
+
+ +
+ + def + rollback_sql(self, *_) -> str: + + + +
+ +
271        def rollback_sql(self, *_) -> str:
+272            return "ROLLBACK TRANSACTION"
+
+ + + + +
+
+ +
+ + def + in_unnest_op(self, expression: sqlglot.expressions.Unnest) -> str: + + + +
+ +
274        def in_unnest_op(self, expression: exp.Unnest) -> str:
+275            return self.sql(expression)
+
+ + + + +
+
+ +
+ + def + except_op(self, expression: sqlglot.expressions.Except) -> str: + + + +
+ +
277        def except_op(self, expression: exp.Except) -> str:
+278            if not expression.args.get("distinct", False):
+279                self.unsupported("EXCEPT without DISTINCT is not supported in BigQuery")
+280            return f"EXCEPT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}"
+
+ + + + +
+
+ +
+ + def + intersect_op(self, expression: sqlglot.expressions.Intersect) -> str: + + + +
+ +
282        def intersect_op(self, expression: exp.Intersect) -> str:
+283            if not expression.args.get("distinct", False):
+284                self.unsupported("INTERSECT without DISTINCT is not supported in BigQuery")
+285            return f"INTERSECT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}"
+
+ + + + +
+
+
Inherited Members
+
+
sqlglot.generator.Generator
+
Generator
+
generate
+
unsupported
+
sep
+
seg
+
pad_comment
+
maybe_comment
+
wrap
+
no_identify
+
normalize_func
+
indent
+
sql
+
uncache_sql
+
cache_sql
+
characterset_sql
+
column_sql
+
columndef_sql
+
columnconstraint_sql
+
autoincrementcolumnconstraint_sql
+
checkcolumnconstraint_sql
+
commentcolumnconstraint_sql
+
collatecolumnconstraint_sql
+
encodecolumnconstraint_sql
+
defaultcolumnconstraint_sql
+
generatedasidentitycolumnconstraint_sql
+
notnullcolumnconstraint_sql
+
primarykeycolumnconstraint_sql
+
uniquecolumnconstraint_sql
+
create_sql
+
describe_sql
+
prepend_ctes
+
with_sql
+
cte_sql
+
tablealias_sql
+
bitstring_sql
+
hexstring_sql
+
datatype_sql
+
directory_sql
+
delete_sql
+
drop_sql
+
except_sql
+
fetch_sql
+
filter_sql
+
hint_sql
+
index_sql
+
identifier_sql
+
national_sql
+
partition_sql
+
properties_sql
+
root_properties
+
properties
+
with_properties
+
locate_properties
+
property_sql
+
likeproperty_sql
+
fallbackproperty_sql
+
journalproperty_sql
+
freespaceproperty_sql
+
afterjournalproperty_sql
+
checksumproperty_sql
+
mergeblockratioproperty_sql
+
datablocksizeproperty_sql
+
blockcompressionproperty_sql
+
isolatedloadingproperty_sql
+
insert_sql
+
intersect_sql
+
introducer_sql
+
pseudotype_sql
+
rowformatdelimitedproperty_sql
+
table_sql
+
tablesample_sql
+
pivot_sql
+
tuple_sql
+
update_sql
+
values_sql
+
var_sql
+
into_sql
+
from_sql
+
group_sql
+
having_sql
+
join_sql
+
lambda_sql
+
lateral_sql
+
limit_sql
+
offset_sql
+
lock_sql
+
literal_sql
+
loaddata_sql
+
null_sql
+
boolean_sql
+
order_sql
+
cluster_sql
+
distribute_sql
+
sort_sql
+
ordered_sql
+
matchrecognize_sql
+
query_modifiers
+
select_sql
+
schema_sql
+
star_sql
+
structkwarg_sql
+
parameter_sql
+
sessionparameter_sql
+
placeholder_sql
+
subquery_sql
+
qualify_sql
+
union_sql
+
union_op
+
unnest_sql
+
where_sql
+
window_sql
+
partition_by_sql
+
window_spec_sql
+
withingroup_sql
+
between_sql
+
bracket_sql
+
all_sql
+
any_sql
+
exists_sql
+
case_sql
+
constraint_sql
+
extract_sql
+
trim_sql
+
concat_sql
+
check_sql
+
foreignkey_sql
+
primarykey_sql
+
unique_sql
+
if_sql
+
in_sql
+
interval_sql
+
return_sql
+
reference_sql
+
anonymous_sql
+
paren_sql
+
neg_sql
+
not_sql
+
alias_sql
+
aliases_sql
+
attimezone_sql
+
add_sql
+
and_sql
+
connector_sql
+
bitwiseand_sql
+
bitwiseleftshift_sql
+
bitwisenot_sql
+
bitwiseor_sql
+
bitwiserightshift_sql
+
bitwisexor_sql
+
cast_sql
+
currentdate_sql
+
collate_sql
+
command_sql
+
altercolumn_sql
+
renametable_sql
+
altertable_sql
+
droppartition_sql
+
addconstraint_sql
+
distinct_sql
+
ignorenulls_sql
+
respectnulls_sql
+
intdiv_sql
+
dpipe_sql
+
div_sql
+
distance_sql
+
dot_sql
+
eq_sql
+
escape_sql
+
glob_sql
+
gt_sql
+
gte_sql
+
ilike_sql
+
is_sql
+
like_sql
+
similarto_sql
+
lt_sql
+
lte_sql
+
mod_sql
+
mul_sql
+
neq_sql
+
nullsafeeq_sql
+
nullsafeneq_sql
+
or_sql
+
slice_sql
+
sub_sql
+
trycast_sql
+
use_sql
+
binary
+
function_fallback_sql
+
format_args
+
text_width
+
format_time
+
expressions
+
op_expressions
+
naked_property
+
set_operation
+
tag_sql
+
token_sql
+
userdefinedfunction_sql
+
userdefinedfunctionkwarg_sql
+
joinhint_sql
+
kwarg_sql
+
when_sql
+
merge_sql
+ +
+
+
+
+
+ + \ No newline at end of file diff --git a/docs/sqlglot/dialects/clickhouse.html b/docs/sqlglot/dialects/clickhouse.html new file mode 100644 index 0000000..06bc3cd --- /dev/null +++ b/docs/sqlglot/dialects/clickhouse.html @@ -0,0 +1,1077 @@ + + + + + + + sqlglot.dialects.clickhouse API documentation + + + + + + + + + +
+
+ Edit on GitHub +

+sqlglot.dialects.clickhouse

+ + + + + + +
  1from __future__ import annotations
+  2
+  3import typing as t
+  4
+  5from sqlglot import exp, generator, parser, tokens
+  6from sqlglot.dialects.dialect import Dialect, inline_array_sql, var_map_sql
+  7from sqlglot.errors import ParseError
+  8from sqlglot.parser import parse_var_map
+  9from sqlglot.tokens import TokenType
+ 10
+ 11
+ 12def _lower_func(sql: str) -> str:
+ 13    index = sql.index("(")
+ 14    return sql[:index].lower() + sql[index:]
+ 15
+ 16
+ 17class ClickHouse(Dialect):
+ 18    normalize_functions = None
+ 19    null_ordering = "nulls_are_last"
+ 20
+ 21    class Tokenizer(tokens.Tokenizer):
+ 22        COMMENTS = ["--", "#", "#!", ("/*", "*/")]
+ 23        IDENTIFIERS = ['"', "`"]
+ 24
+ 25        KEYWORDS = {
+ 26            **tokens.Tokenizer.KEYWORDS,
+ 27            "ASOF": TokenType.ASOF,
+ 28            "GLOBAL": TokenType.GLOBAL,
+ 29            "DATETIME64": TokenType.DATETIME,
+ 30            "FINAL": TokenType.FINAL,
+ 31            "FLOAT32": TokenType.FLOAT,
+ 32            "FLOAT64": TokenType.DOUBLE,
+ 33            "INT16": TokenType.SMALLINT,
+ 34            "INT32": TokenType.INT,
+ 35            "INT64": TokenType.BIGINT,
+ 36            "INT8": TokenType.TINYINT,
+ 37            "TUPLE": TokenType.STRUCT,
+ 38        }
+ 39
+ 40    class Parser(parser.Parser):
+ 41        FUNCTIONS = {
+ 42            **parser.Parser.FUNCTIONS,  # type: ignore
+ 43            "MAP": parse_var_map,
+ 44            "QUANTILE": lambda params, args: exp.Quantile(this=args, quantile=params),
+ 45            "QUANTILES": lambda params, args: exp.Quantiles(parameters=params, expressions=args),
+ 46            "QUANTILEIF": lambda params, args: exp.QuantileIf(parameters=params, expressions=args),
+ 47        }
+ 48
+ 49        RANGE_PARSERS = {
+ 50            **parser.Parser.RANGE_PARSERS,
+ 51            TokenType.GLOBAL: lambda self, this: self._match(TokenType.IN)
+ 52            and self._parse_in(this, is_global=True),
+ 53        }
+ 54
+ 55        JOIN_KINDS = {*parser.Parser.JOIN_KINDS, TokenType.ANY, TokenType.ASOF}  # type: ignore
+ 56
+ 57        TABLE_ALIAS_TOKENS = {*parser.Parser.TABLE_ALIAS_TOKENS} - {TokenType.ANY}  # type: ignore
+ 58
+ 59        def _parse_in(
+ 60            self, this: t.Optional[exp.Expression], is_global: bool = False
+ 61        ) -> exp.Expression:
+ 62            this = super()._parse_in(this)
+ 63            this.set("is_global", is_global)
+ 64            return this
+ 65
+ 66        def _parse_table(
+ 67            self, schema: bool = False, alias_tokens: t.Optional[t.Collection[TokenType]] = None
+ 68        ) -> t.Optional[exp.Expression]:
+ 69            this = super()._parse_table(schema=schema, alias_tokens=alias_tokens)
+ 70
+ 71            if self._match(TokenType.FINAL):
+ 72                this = self.expression(exp.Final, this=this)
+ 73
+ 74            return this
+ 75
+ 76        def _parse_position(self, haystack_first: bool = False) -> exp.Expression:
+ 77            return super()._parse_position(haystack_first=True)
+ 78
+ 79        # https://clickhouse.com/docs/en/sql-reference/statements/select/with/
+ 80        def _parse_cte(self) -> exp.Expression:
+ 81            index = self._index
+ 82            try:
+ 83                # WITH <identifier> AS <subquery expression>
+ 84                return super()._parse_cte()
+ 85            except ParseError:
+ 86                # WITH <expression> AS <identifier>
+ 87                self._retreat(index)
+ 88                statement = self._parse_statement()
+ 89
+ 90                if statement and isinstance(statement.this, exp.Alias):
+ 91                    self.raise_error("Expected CTE to have alias")
+ 92
+ 93                return self.expression(exp.CTE, this=statement, alias=statement and statement.this)
+ 94
+ 95    class Generator(generator.Generator):
+ 96        STRUCT_DELIMITER = ("(", ")")
+ 97
+ 98        TYPE_MAPPING = {
+ 99            **generator.Generator.TYPE_MAPPING,  # type: ignore
+100            exp.DataType.Type.NULLABLE: "Nullable",
+101            exp.DataType.Type.DATETIME: "DateTime64",
+102            exp.DataType.Type.MAP: "Map",
+103            exp.DataType.Type.ARRAY: "Array",
+104            exp.DataType.Type.STRUCT: "Tuple",
+105            exp.DataType.Type.TINYINT: "Int8",
+106            exp.DataType.Type.SMALLINT: "Int16",
+107            exp.DataType.Type.INT: "Int32",
+108            exp.DataType.Type.BIGINT: "Int64",
+109            exp.DataType.Type.FLOAT: "Float32",
+110            exp.DataType.Type.DOUBLE: "Float64",
+111        }
+112
+113        TRANSFORMS = {
+114            **generator.Generator.TRANSFORMS,  # type: ignore
+115            exp.Array: inline_array_sql,
+116            exp.StrPosition: lambda self, e: f"position({self.format_args(e.this, e.args.get('substr'), e.args.get('position'))})",
+117            exp.Final: lambda self, e: f"{self.sql(e, 'this')} FINAL",
+118            exp.Map: lambda self, e: _lower_func(var_map_sql(self, e)),
+119            exp.VarMap: lambda self, e: _lower_func(var_map_sql(self, e)),
+120            exp.Quantile: lambda self, e: f"quantile{self._param_args_sql(e, 'quantile', 'this')}",
+121            exp.Quantiles: lambda self, e: f"quantiles{self._param_args_sql(e, 'parameters', 'expressions')}",
+122            exp.QuantileIf: lambda self, e: f"quantileIf{self._param_args_sql(e, 'parameters', 'expressions')}",
+123        }
+124
+125        EXPLICIT_UNION = True
+126
+127        def _param_args_sql(
+128            self, expression: exp.Expression, params_name: str, args_name: str
+129        ) -> str:
+130            params = self.format_args(self.expressions(expression, params_name))
+131            args = self.format_args(self.expressions(expression, args_name))
+132            return f"({params})({args})"
+133
+134        def cte_sql(self, expression: exp.CTE) -> str:
+135            if isinstance(expression.this, exp.Alias):
+136                return self.sql(expression, "this")
+137
+138            return super().cte_sql(expression)
+
+ + +
+
+ +
+ + class + ClickHouse(sqlglot.dialects.dialect.Dialect): + + + +
+ +
 18class ClickHouse(Dialect):
+ 19    normalize_functions = None
+ 20    null_ordering = "nulls_are_last"
+ 21
+ 22    class Tokenizer(tokens.Tokenizer):
+ 23        COMMENTS = ["--", "#", "#!", ("/*", "*/")]
+ 24        IDENTIFIERS = ['"', "`"]
+ 25
+ 26        KEYWORDS = {
+ 27            **tokens.Tokenizer.KEYWORDS,
+ 28            "ASOF": TokenType.ASOF,
+ 29            "GLOBAL": TokenType.GLOBAL,
+ 30            "DATETIME64": TokenType.DATETIME,
+ 31            "FINAL": TokenType.FINAL,
+ 32            "FLOAT32": TokenType.FLOAT,
+ 33            "FLOAT64": TokenType.DOUBLE,
+ 34            "INT16": TokenType.SMALLINT,
+ 35            "INT32": TokenType.INT,
+ 36            "INT64": TokenType.BIGINT,
+ 37            "INT8": TokenType.TINYINT,
+ 38            "TUPLE": TokenType.STRUCT,
+ 39        }
+ 40
+ 41    class Parser(parser.Parser):
+ 42        FUNCTIONS = {
+ 43            **parser.Parser.FUNCTIONS,  # type: ignore
+ 44            "MAP": parse_var_map,
+ 45            "QUANTILE": lambda params, args: exp.Quantile(this=args, quantile=params),
+ 46            "QUANTILES": lambda params, args: exp.Quantiles(parameters=params, expressions=args),
+ 47            "QUANTILEIF": lambda params, args: exp.QuantileIf(parameters=params, expressions=args),
+ 48        }
+ 49
+ 50        RANGE_PARSERS = {
+ 51            **parser.Parser.RANGE_PARSERS,
+ 52            TokenType.GLOBAL: lambda self, this: self._match(TokenType.IN)
+ 53            and self._parse_in(this, is_global=True),
+ 54        }
+ 55
+ 56        JOIN_KINDS = {*parser.Parser.JOIN_KINDS, TokenType.ANY, TokenType.ASOF}  # type: ignore
+ 57
+ 58        TABLE_ALIAS_TOKENS = {*parser.Parser.TABLE_ALIAS_TOKENS} - {TokenType.ANY}  # type: ignore
+ 59
+ 60        def _parse_in(
+ 61            self, this: t.Optional[exp.Expression], is_global: bool = False
+ 62        ) -> exp.Expression:
+ 63            this = super()._parse_in(this)
+ 64            this.set("is_global", is_global)
+ 65            return this
+ 66
+ 67        def _parse_table(
+ 68            self, schema: bool = False, alias_tokens: t.Optional[t.Collection[TokenType]] = None
+ 69        ) -> t.Optional[exp.Expression]:
+ 70            this = super()._parse_table(schema=schema, alias_tokens=alias_tokens)
+ 71
+ 72            if self._match(TokenType.FINAL):
+ 73                this = self.expression(exp.Final, this=this)
+ 74
+ 75            return this
+ 76
+ 77        def _parse_position(self, haystack_first: bool = False) -> exp.Expression:
+ 78            return super()._parse_position(haystack_first=True)
+ 79
+ 80        # https://clickhouse.com/docs/en/sql-reference/statements/select/with/
+ 81        def _parse_cte(self) -> exp.Expression:
+ 82            index = self._index
+ 83            try:
+ 84                # WITH <identifier> AS <subquery expression>
+ 85                return super()._parse_cte()
+ 86            except ParseError:
+ 87                # WITH <expression> AS <identifier>
+ 88                self._retreat(index)
+ 89                statement = self._parse_statement()
+ 90
+ 91                if statement and isinstance(statement.this, exp.Alias):
+ 92                    self.raise_error("Expected CTE to have alias")
+ 93
+ 94                return self.expression(exp.CTE, this=statement, alias=statement and statement.this)
+ 95
+ 96    class Generator(generator.Generator):
+ 97        STRUCT_DELIMITER = ("(", ")")
+ 98
+ 99        TYPE_MAPPING = {
+100            **generator.Generator.TYPE_MAPPING,  # type: ignore
+101            exp.DataType.Type.NULLABLE: "Nullable",
+102            exp.DataType.Type.DATETIME: "DateTime64",
+103            exp.DataType.Type.MAP: "Map",
+104            exp.DataType.Type.ARRAY: "Array",
+105            exp.DataType.Type.STRUCT: "Tuple",
+106            exp.DataType.Type.TINYINT: "Int8",
+107            exp.DataType.Type.SMALLINT: "Int16",
+108            exp.DataType.Type.INT: "Int32",
+109            exp.DataType.Type.BIGINT: "Int64",
+110            exp.DataType.Type.FLOAT: "Float32",
+111            exp.DataType.Type.DOUBLE: "Float64",
+112        }
+113
+114        TRANSFORMS = {
+115            **generator.Generator.TRANSFORMS,  # type: ignore
+116            exp.Array: inline_array_sql,
+117            exp.StrPosition: lambda self, e: f"position({self.format_args(e.this, e.args.get('substr'), e.args.get('position'))})",
+118            exp.Final: lambda self, e: f"{self.sql(e, 'this')} FINAL",
+119            exp.Map: lambda self, e: _lower_func(var_map_sql(self, e)),
+120            exp.VarMap: lambda self, e: _lower_func(var_map_sql(self, e)),
+121            exp.Quantile: lambda self, e: f"quantile{self._param_args_sql(e, 'quantile', 'this')}",
+122            exp.Quantiles: lambda self, e: f"quantiles{self._param_args_sql(e, 'parameters', 'expressions')}",
+123            exp.QuantileIf: lambda self, e: f"quantileIf{self._param_args_sql(e, 'parameters', 'expressions')}",
+124        }
+125
+126        EXPLICIT_UNION = True
+127
+128        def _param_args_sql(
+129            self, expression: exp.Expression, params_name: str, args_name: str
+130        ) -> str:
+131            params = self.format_args(self.expressions(expression, params_name))
+132            args = self.format_args(self.expressions(expression, args_name))
+133            return f"({params})({args})"
+134
+135        def cte_sql(self, expression: exp.CTE) -> str:
+136            if isinstance(expression.this, exp.Alias):
+137                return self.sql(expression, "this")
+138
+139            return super().cte_sql(expression)
+
+ + + + +
+
+ + ClickHouse() + + +
+ + + + +
+ +
+
+ +
+ + class + ClickHouse.Tokenizer(sqlglot.tokens.Tokenizer): + + + +
+ +
22    class Tokenizer(tokens.Tokenizer):
+23        COMMENTS = ["--", "#", "#!", ("/*", "*/")]
+24        IDENTIFIERS = ['"', "`"]
+25
+26        KEYWORDS = {
+27            **tokens.Tokenizer.KEYWORDS,
+28            "ASOF": TokenType.ASOF,
+29            "GLOBAL": TokenType.GLOBAL,
+30            "DATETIME64": TokenType.DATETIME,
+31            "FINAL": TokenType.FINAL,
+32            "FLOAT32": TokenType.FLOAT,
+33            "FLOAT64": TokenType.DOUBLE,
+34            "INT16": TokenType.SMALLINT,
+35            "INT32": TokenType.INT,
+36            "INT64": TokenType.BIGINT,
+37            "INT8": TokenType.TINYINT,
+38            "TUPLE": TokenType.STRUCT,
+39        }
+
+ + + + +
+
Inherited Members
+
+ +
+
+
+
+ +
+ + class + ClickHouse.Parser(sqlglot.parser.Parser): + + + +
+ +
41    class Parser(parser.Parser):
+42        FUNCTIONS = {
+43            **parser.Parser.FUNCTIONS,  # type: ignore
+44            "MAP": parse_var_map,
+45            "QUANTILE": lambda params, args: exp.Quantile(this=args, quantile=params),
+46            "QUANTILES": lambda params, args: exp.Quantiles(parameters=params, expressions=args),
+47            "QUANTILEIF": lambda params, args: exp.QuantileIf(parameters=params, expressions=args),
+48        }
+49
+50        RANGE_PARSERS = {
+51            **parser.Parser.RANGE_PARSERS,
+52            TokenType.GLOBAL: lambda self, this: self._match(TokenType.IN)
+53            and self._parse_in(this, is_global=True),
+54        }
+55
+56        JOIN_KINDS = {*parser.Parser.JOIN_KINDS, TokenType.ANY, TokenType.ASOF}  # type: ignore
+57
+58        TABLE_ALIAS_TOKENS = {*parser.Parser.TABLE_ALIAS_TOKENS} - {TokenType.ANY}  # type: ignore
+59
+60        def _parse_in(
+61            self, this: t.Optional[exp.Expression], is_global: bool = False
+62        ) -> exp.Expression:
+63            this = super()._parse_in(this)
+64            this.set("is_global", is_global)
+65            return this
+66
+67        def _parse_table(
+68            self, schema: bool = False, alias_tokens: t.Optional[t.Collection[TokenType]] = None
+69        ) -> t.Optional[exp.Expression]:
+70            this = super()._parse_table(schema=schema, alias_tokens=alias_tokens)
+71
+72            if self._match(TokenType.FINAL):
+73                this = self.expression(exp.Final, this=this)
+74
+75            return this
+76
+77        def _parse_position(self, haystack_first: bool = False) -> exp.Expression:
+78            return super()._parse_position(haystack_first=True)
+79
+80        # https://clickhouse.com/docs/en/sql-reference/statements/select/with/
+81        def _parse_cte(self) -> exp.Expression:
+82            index = self._index
+83            try:
+84                # WITH <identifier> AS <subquery expression>
+85                return super()._parse_cte()
+86            except ParseError:
+87                # WITH <expression> AS <identifier>
+88                self._retreat(index)
+89                statement = self._parse_statement()
+90
+91                if statement and isinstance(statement.this, exp.Alias):
+92                    self.raise_error("Expected CTE to have alias")
+93
+94                return self.expression(exp.CTE, this=statement, alias=statement and statement.this)
+
+ + +

Parser consumes a list of tokens produced by the sqlglot.tokens.Tokenizer and produces +a parsed syntax tree.

+ +
Arguments:
+ +
    +
  • error_level: the desired error level. +Default: ErrorLevel.RAISE
  • +
  • error_message_context: determines the amount of context to capture from a +query string when displaying the error message (in number of characters). +Default: 50.
  • +
  • index_offset: Index offset for arrays eg ARRAY[0] vs ARRAY[1] as the head of a list. +Default: 0
  • +
  • alias_post_tablesample: If the table alias comes after tablesample. +Default: False
  • +
  • max_errors: Maximum number of error messages to include in a raised ParseError. +This is only relevant if error_level is ErrorLevel.RAISE. +Default: 3
  • +
  • null_ordering: Indicates the default null ordering method to use if not explicitly set. +Options are "nulls_are_small", "nulls_are_large", "nulls_are_last". +Default: "nulls_are_small"
  • +
+
+ + + +
+
+ +
+ + class + ClickHouse.Generator(sqlglot.generator.Generator): + + + +
+ +
 96    class Generator(generator.Generator):
+ 97        STRUCT_DELIMITER = ("(", ")")
+ 98
+ 99        TYPE_MAPPING = {
+100            **generator.Generator.TYPE_MAPPING,  # type: ignore
+101            exp.DataType.Type.NULLABLE: "Nullable",
+102            exp.DataType.Type.DATETIME: "DateTime64",
+103            exp.DataType.Type.MAP: "Map",
+104            exp.DataType.Type.ARRAY: "Array",
+105            exp.DataType.Type.STRUCT: "Tuple",
+106            exp.DataType.Type.TINYINT: "Int8",
+107            exp.DataType.Type.SMALLINT: "Int16",
+108            exp.DataType.Type.INT: "Int32",
+109            exp.DataType.Type.BIGINT: "Int64",
+110            exp.DataType.Type.FLOAT: "Float32",
+111            exp.DataType.Type.DOUBLE: "Float64",
+112        }
+113
+114        TRANSFORMS = {
+115            **generator.Generator.TRANSFORMS,  # type: ignore
+116            exp.Array: inline_array_sql,
+117            exp.StrPosition: lambda self, e: f"position({self.format_args(e.this, e.args.get('substr'), e.args.get('position'))})",
+118            exp.Final: lambda self, e: f"{self.sql(e, 'this')} FINAL",
+119            exp.Map: lambda self, e: _lower_func(var_map_sql(self, e)),
+120            exp.VarMap: lambda self, e: _lower_func(var_map_sql(self, e)),
+121            exp.Quantile: lambda self, e: f"quantile{self._param_args_sql(e, 'quantile', 'this')}",
+122            exp.Quantiles: lambda self, e: f"quantiles{self._param_args_sql(e, 'parameters', 'expressions')}",
+123            exp.QuantileIf: lambda self, e: f"quantileIf{self._param_args_sql(e, 'parameters', 'expressions')}",
+124        }
+125
+126        EXPLICIT_UNION = True
+127
+128        def _param_args_sql(
+129            self, expression: exp.Expression, params_name: str, args_name: str
+130        ) -> str:
+131            params = self.format_args(self.expressions(expression, params_name))
+132            args = self.format_args(self.expressions(expression, args_name))
+133            return f"({params})({args})"
+134
+135        def cte_sql(self, expression: exp.CTE) -> str:
+136            if isinstance(expression.this, exp.Alias):
+137                return self.sql(expression, "this")
+138
+139            return super().cte_sql(expression)
+
+ + +

Generator interprets the given syntax tree and produces a SQL string as an output.

+ +
Arguments:
+ +
    +
  • time_mapping (dict): the dictionary of custom time mappings in which the key +represents a python time format and the output the target time format
  • +
  • time_trie (trie): a trie of the time_mapping keys
  • +
  • pretty (bool): if set to True the returned string will be formatted. Default: False.
  • +
  • quote_start (str): specifies which starting character to use to delimit quotes. Default: '.
  • +
  • quote_end (str): specifies which ending character to use to delimit quotes. Default: '.
  • +
  • identifier_start (str): specifies which starting character to use to delimit identifiers. Default: ".
  • +
  • identifier_end (str): specifies which ending character to use to delimit identifiers. Default: ".
  • +
  • identify (bool): if set to True all identifiers will be delimited by the corresponding +character.
  • +
  • normalize (bool): if set to True all identifiers will lower cased
  • +
  • string_escape (str): specifies a string escape character. Default: '.
  • +
  • identifier_escape (str): specifies an identifier escape character. Default: ".
  • +
  • pad (int): determines padding in a formatted string. Default: 2.
  • +
  • indent (int): determines the size of indentation in a formatted string. Default: 4.
  • +
  • unnest_column_only (bool): if true unnest table aliases are considered only as column aliases
  • +
  • normalize_functions (str): normalize function names, "upper", "lower", or None +Default: "upper"
  • +
  • alias_post_tablesample (bool): if the table alias comes after tablesample +Default: False
  • +
  • unsupported_level (ErrorLevel): determines the generator's behavior when it encounters +unsupported expressions. Default ErrorLevel.WARN.
  • +
  • null_ordering (str): Indicates the default null ordering method to use if not explicitly set. +Options are "nulls_are_small", "nulls_are_large", "nulls_are_last". +Default: "nulls_are_small"
  • +
  • max_unsupported (int): Maximum number of unsupported messages to include in a raised UnsupportedError. +This is only relevant if unsupported_level is ErrorLevel.RAISE. +Default: 3
  • +
  • leading_comma (bool): if the the comma is leading or trailing in select statements +Default: False
  • +
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode. +The default is on the smaller end because the length only represents a segment and not the true +line length. +Default: 80
  • +
  • comments: Whether or not to preserve comments in the output SQL code. +Default: True
  • +
+
+ + +
+ +
+ + def + cte_sql(self, expression: sqlglot.expressions.CTE) -> str: + + + +
+ +
135        def cte_sql(self, expression: exp.CTE) -> str:
+136            if isinstance(expression.this, exp.Alias):
+137                return self.sql(expression, "this")
+138
+139            return super().cte_sql(expression)
+
+ + + + +
+
+
Inherited Members
+
+
sqlglot.generator.Generator
+
Generator
+
generate
+
unsupported
+
sep
+
seg
+
pad_comment
+
maybe_comment
+
wrap
+
no_identify
+
normalize_func
+
indent
+
sql
+
uncache_sql
+
cache_sql
+
characterset_sql
+
column_sql
+
columndef_sql
+
columnconstraint_sql
+
autoincrementcolumnconstraint_sql
+
checkcolumnconstraint_sql
+
commentcolumnconstraint_sql
+
collatecolumnconstraint_sql
+
encodecolumnconstraint_sql
+
defaultcolumnconstraint_sql
+
generatedasidentitycolumnconstraint_sql
+
notnullcolumnconstraint_sql
+
primarykeycolumnconstraint_sql
+
uniquecolumnconstraint_sql
+
create_sql
+
describe_sql
+
prepend_ctes
+
with_sql
+
tablealias_sql
+
bitstring_sql
+
hexstring_sql
+
datatype_sql
+
directory_sql
+
delete_sql
+
drop_sql
+
except_sql
+
except_op
+
fetch_sql
+
filter_sql
+
hint_sql
+
index_sql
+
identifier_sql
+
national_sql
+
partition_sql
+
properties_sql
+
root_properties
+
properties
+
with_properties
+
locate_properties
+
property_sql
+
likeproperty_sql
+
fallbackproperty_sql
+
journalproperty_sql
+
freespaceproperty_sql
+
afterjournalproperty_sql
+
checksumproperty_sql
+
mergeblockratioproperty_sql
+
datablocksizeproperty_sql
+
blockcompressionproperty_sql
+
isolatedloadingproperty_sql
+
insert_sql
+
intersect_sql
+
intersect_op
+
introducer_sql
+
pseudotype_sql
+
rowformatdelimitedproperty_sql
+
table_sql
+
tablesample_sql
+
pivot_sql
+
tuple_sql
+
update_sql
+
values_sql
+
var_sql
+
into_sql
+
from_sql
+
group_sql
+
having_sql
+
join_sql
+
lambda_sql
+
lateral_sql
+
limit_sql
+
offset_sql
+
lock_sql
+
literal_sql
+
loaddata_sql
+
null_sql
+
boolean_sql
+
order_sql
+
cluster_sql
+
distribute_sql
+
sort_sql
+
ordered_sql
+
matchrecognize_sql
+
query_modifiers
+
select_sql
+
schema_sql
+
star_sql
+
structkwarg_sql
+
parameter_sql
+
sessionparameter_sql
+
placeholder_sql
+
subquery_sql
+
qualify_sql
+
union_sql
+
union_op
+
unnest_sql
+
where_sql
+
window_sql
+
partition_by_sql
+
window_spec_sql
+
withingroup_sql
+
between_sql
+
bracket_sql
+
all_sql
+
any_sql
+
exists_sql
+
case_sql
+
constraint_sql
+
extract_sql
+
trim_sql
+
concat_sql
+
check_sql
+
foreignkey_sql
+
primarykey_sql
+
unique_sql
+
if_sql
+
in_sql
+
in_unnest_op
+
interval_sql
+
return_sql
+
reference_sql
+
anonymous_sql
+
paren_sql
+
neg_sql
+
not_sql
+
alias_sql
+
aliases_sql
+
attimezone_sql
+
add_sql
+
and_sql
+
connector_sql
+
bitwiseand_sql
+
bitwiseleftshift_sql
+
bitwisenot_sql
+
bitwiseor_sql
+
bitwiserightshift_sql
+
bitwisexor_sql
+
cast_sql
+
currentdate_sql
+
collate_sql
+
command_sql
+
transaction_sql
+
commit_sql
+
rollback_sql
+
altercolumn_sql
+
renametable_sql
+
altertable_sql
+
droppartition_sql
+
addconstraint_sql
+
distinct_sql
+
ignorenulls_sql
+
respectnulls_sql
+
intdiv_sql
+
dpipe_sql
+
div_sql
+
distance_sql
+
dot_sql
+
eq_sql
+
escape_sql
+
glob_sql
+
gt_sql
+
gte_sql
+
ilike_sql
+
is_sql
+
like_sql
+
similarto_sql
+
lt_sql
+
lte_sql
+
mod_sql
+
mul_sql
+
neq_sql
+
nullsafeeq_sql
+
nullsafeneq_sql
+
or_sql
+
slice_sql
+
sub_sql
+
trycast_sql
+
use_sql
+
binary
+
function_fallback_sql
+
format_args
+
text_width
+
format_time
+
expressions
+
op_expressions
+
naked_property
+
set_operation
+
tag_sql
+
token_sql
+
userdefinedfunction_sql
+
userdefinedfunctionkwarg_sql
+
joinhint_sql
+
kwarg_sql
+
when_sql
+
merge_sql
+ +
+
+
+
+
+ + \ No newline at end of file diff --git a/docs/sqlglot/dialects/databricks.html b/docs/sqlglot/dialects/databricks.html new file mode 100644 index 0000000..74464c8 --- /dev/null +++ b/docs/sqlglot/dialects/databricks.html @@ -0,0 +1,704 @@ + + + + + + + sqlglot.dialects.databricks API documentation + + + + + + + + + +
+
+ Edit on GitHub +

+sqlglot.dialects.databricks

+ + + + + + +
 1from __future__ import annotations
+ 2
+ 3from sqlglot import exp
+ 4from sqlglot.dialects.dialect import parse_date_delta
+ 5from sqlglot.dialects.spark import Spark
+ 6from sqlglot.dialects.tsql import generate_date_delta_with_unit_sql
+ 7
+ 8
+ 9class Databricks(Spark):
+10    class Parser(Spark.Parser):
+11        FUNCTIONS = {
+12            **Spark.Parser.FUNCTIONS,
+13            "DATEADD": parse_date_delta(exp.DateAdd),
+14            "DATE_ADD": parse_date_delta(exp.DateAdd),
+15            "DATEDIFF": parse_date_delta(exp.DateDiff),
+16        }
+17
+18    class Generator(Spark.Generator):
+19        TRANSFORMS = {
+20            **Spark.Generator.TRANSFORMS,  # type: ignore
+21            exp.DateAdd: generate_date_delta_with_unit_sql,
+22            exp.DateDiff: generate_date_delta_with_unit_sql,
+23        }
+
+ + +
+
+ +
+ + class + Databricks(sqlglot.dialects.spark.Spark): + + + +
+ +
10class Databricks(Spark):
+11    class Parser(Spark.Parser):
+12        FUNCTIONS = {
+13            **Spark.Parser.FUNCTIONS,
+14            "DATEADD": parse_date_delta(exp.DateAdd),
+15            "DATE_ADD": parse_date_delta(exp.DateAdd),
+16            "DATEDIFF": parse_date_delta(exp.DateDiff),
+17        }
+18
+19    class Generator(Spark.Generator):
+20        TRANSFORMS = {
+21            **Spark.Generator.TRANSFORMS,  # type: ignore
+22            exp.DateAdd: generate_date_delta_with_unit_sql,
+23            exp.DateDiff: generate_date_delta_with_unit_sql,
+24        }
+
+ + + + +
+
+ + Databricks() + + +
+ + + + +
+ +
+
+ +
+ + class + Databricks.Parser(sqlglot.dialects.spark.Spark.Parser): + + + +
+ +
11    class Parser(Spark.Parser):
+12        FUNCTIONS = {
+13            **Spark.Parser.FUNCTIONS,
+14            "DATEADD": parse_date_delta(exp.DateAdd),
+15            "DATE_ADD": parse_date_delta(exp.DateAdd),
+16            "DATEDIFF": parse_date_delta(exp.DateDiff),
+17        }
+
+ + +

Parser consumes a list of tokens produced by the sqlglot.tokens.Tokenizer and produces +a parsed syntax tree.

+ +
Arguments:
+ +
    +
  • error_level: the desired error level. +Default: ErrorLevel.RAISE
  • +
  • error_message_context: determines the amount of context to capture from a +query string when displaying the error message (in number of characters). +Default: 50.
  • +
  • index_offset: Index offset for arrays eg ARRAY[0] vs ARRAY[1] as the head of a list. +Default: 0
  • +
  • alias_post_tablesample: If the table alias comes after tablesample. +Default: False
  • +
  • max_errors: Maximum number of error messages to include in a raised ParseError. +This is only relevant if error_level is ErrorLevel.RAISE. +Default: 3
  • +
  • null_ordering: Indicates the default null ordering method to use if not explicitly set. +Options are "nulls_are_small", "nulls_are_large", "nulls_are_last". +Default: "nulls_are_small"
  • +
+
+ + + +
+
+ +
+ + class + Databricks.Generator(sqlglot.dialects.spark.Spark.Generator): + + + +
+ +
19    class Generator(Spark.Generator):
+20        TRANSFORMS = {
+21            **Spark.Generator.TRANSFORMS,  # type: ignore
+22            exp.DateAdd: generate_date_delta_with_unit_sql,
+23            exp.DateDiff: generate_date_delta_with_unit_sql,
+24        }
+
+ + +

Generator interprets the given syntax tree and produces a SQL string as an output.

+ +
Arguments:
+ +
    +
  • time_mapping (dict): the dictionary of custom time mappings in which the key +represents a python time format and the output the target time format
  • +
  • time_trie (trie): a trie of the time_mapping keys
  • +
  • pretty (bool): if set to True the returned string will be formatted. Default: False.
  • +
  • quote_start (str): specifies which starting character to use to delimit quotes. Default: '.
  • +
  • quote_end (str): specifies which ending character to use to delimit quotes. Default: '.
  • +
  • identifier_start (str): specifies which starting character to use to delimit identifiers. Default: ".
  • +
  • identifier_end (str): specifies which ending character to use to delimit identifiers. Default: ".
  • +
  • identify (bool): if set to True all identifiers will be delimited by the corresponding +character.
  • +
  • normalize (bool): if set to True all identifiers will lower cased
  • +
  • string_escape (str): specifies a string escape character. Default: '.
  • +
  • identifier_escape (str): specifies an identifier escape character. Default: ".
  • +
  • pad (int): determines padding in a formatted string. Default: 2.
  • +
  • indent (int): determines the size of indentation in a formatted string. Default: 4.
  • +
  • unnest_column_only (bool): if true unnest table aliases are considered only as column aliases
  • +
  • normalize_functions (str): normalize function names, "upper", "lower", or None +Default: "upper"
  • +
  • alias_post_tablesample (bool): if the table alias comes after tablesample +Default: False
  • +
  • unsupported_level (ErrorLevel): determines the generator's behavior when it encounters +unsupported expressions. Default ErrorLevel.WARN.
  • +
  • null_ordering (str): Indicates the default null ordering method to use if not explicitly set. +Options are "nulls_are_small", "nulls_are_large", "nulls_are_last". +Default: "nulls_are_small"
  • +
  • max_unsupported (int): Maximum number of unsupported messages to include in a raised UnsupportedError. +This is only relevant if unsupported_level is ErrorLevel.RAISE. +Default: 3
  • +
  • leading_comma (bool): if the the comma is leading or trailing in select statements +Default: False
  • +
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode. +The default is on the smaller end because the length only represents a segment and not the true +line length. +Default: 80
  • +
  • comments: Whether or not to preserve comments in the output SQL code. +Default: True
  • +
+
+ + +
+
Inherited Members
+
+
sqlglot.generator.Generator
+
Generator
+
generate
+
unsupported
+
sep
+
seg
+
pad_comment
+
maybe_comment
+
wrap
+
no_identify
+
normalize_func
+
indent
+
sql
+
uncache_sql
+
cache_sql
+
characterset_sql
+
column_sql
+
columndef_sql
+
columnconstraint_sql
+
autoincrementcolumnconstraint_sql
+
checkcolumnconstraint_sql
+
commentcolumnconstraint_sql
+
collatecolumnconstraint_sql
+
encodecolumnconstraint_sql
+
defaultcolumnconstraint_sql
+
generatedasidentitycolumnconstraint_sql
+
notnullcolumnconstraint_sql
+
primarykeycolumnconstraint_sql
+
uniquecolumnconstraint_sql
+
create_sql
+
describe_sql
+
prepend_ctes
+
with_sql
+
cte_sql
+
tablealias_sql
+
bitstring_sql
+
hexstring_sql
+
directory_sql
+
delete_sql
+
drop_sql
+
except_sql
+
except_op
+
fetch_sql
+
filter_sql
+
hint_sql
+
index_sql
+
identifier_sql
+
national_sql
+
partition_sql
+
properties_sql
+
root_properties
+
properties
+
locate_properties
+
property_sql
+
likeproperty_sql
+
fallbackproperty_sql
+
journalproperty_sql
+
freespaceproperty_sql
+
afterjournalproperty_sql
+
checksumproperty_sql
+
mergeblockratioproperty_sql
+
datablocksizeproperty_sql
+
blockcompressionproperty_sql
+
isolatedloadingproperty_sql
+
insert_sql
+
intersect_sql
+
intersect_op
+
introducer_sql
+
pseudotype_sql
+
rowformatdelimitedproperty_sql
+
table_sql
+
tablesample_sql
+
pivot_sql
+
tuple_sql
+
update_sql
+
values_sql
+
var_sql
+
into_sql
+
from_sql
+
group_sql
+
having_sql
+
join_sql
+
lambda_sql
+
lateral_sql
+
limit_sql
+
offset_sql
+
lock_sql
+
literal_sql
+
loaddata_sql
+
null_sql
+
boolean_sql
+
order_sql
+
cluster_sql
+
distribute_sql
+
sort_sql
+
ordered_sql
+
matchrecognize_sql
+
query_modifiers
+
select_sql
+
schema_sql
+
star_sql
+
structkwarg_sql
+
parameter_sql
+
sessionparameter_sql
+
placeholder_sql
+
subquery_sql
+
qualify_sql
+
union_sql
+
union_op
+
unnest_sql
+
where_sql
+
window_sql
+
partition_by_sql
+
window_spec_sql
+
withingroup_sql
+
between_sql
+
bracket_sql
+
all_sql
+
any_sql
+
exists_sql
+
case_sql
+
constraint_sql
+
extract_sql
+
trim_sql
+
concat_sql
+
check_sql
+
foreignkey_sql
+
primarykey_sql
+
unique_sql
+
if_sql
+
in_sql
+
in_unnest_op
+
interval_sql
+
return_sql
+
reference_sql
+
anonymous_sql
+
paren_sql
+
neg_sql
+
not_sql
+
alias_sql
+
aliases_sql
+
attimezone_sql
+
add_sql
+
and_sql
+
connector_sql
+
bitwiseand_sql
+
bitwiseleftshift_sql
+
bitwisenot_sql
+
bitwiseor_sql
+
bitwiserightshift_sql
+
bitwisexor_sql
+
currentdate_sql
+
collate_sql
+
command_sql
+
transaction_sql
+
commit_sql
+
rollback_sql
+
altercolumn_sql
+
renametable_sql
+
altertable_sql
+
droppartition_sql
+
addconstraint_sql
+
distinct_sql
+
ignorenulls_sql
+
respectnulls_sql
+
intdiv_sql
+
dpipe_sql
+
div_sql
+
distance_sql
+
dot_sql
+
eq_sql
+
escape_sql
+
glob_sql
+
gt_sql
+
gte_sql
+
ilike_sql
+
is_sql
+
like_sql
+
similarto_sql
+
lt_sql
+
lte_sql
+
mod_sql
+
mul_sql
+
neq_sql
+
nullsafeeq_sql
+
nullsafeneq_sql
+
or_sql
+
slice_sql
+
sub_sql
+
trycast_sql
+
use_sql
+
binary
+
function_fallback_sql
+
format_args
+
text_width
+
format_time
+
expressions
+
op_expressions
+
naked_property
+
set_operation
+
tag_sql
+
token_sql
+
userdefinedfunction_sql
+
userdefinedfunctionkwarg_sql
+
joinhint_sql
+
kwarg_sql
+
when_sql
+
merge_sql
+ +
+ + +
+
+
+
+ + \ No newline at end of file diff --git a/docs/sqlglot/dialects/dialect.html b/docs/sqlglot/dialects/dialect.html new file mode 100644 index 0000000..8cf5456 --- /dev/null +++ b/docs/sqlglot/dialects/dialect.html @@ -0,0 +1,2134 @@ + + + + + + + sqlglot.dialects.dialect API documentation + + + + + + + + + +
+
+ Edit on GitHub +

+sqlglot.dialects.dialect

+ + + + + + +
  1from __future__ import annotations
+  2
+  3import typing as t
+  4from enum import Enum
+  5
+  6from sqlglot import exp
+  7from sqlglot.generator import Generator
+  8from sqlglot.helper import flatten, seq_get
+  9from sqlglot.parser import Parser
+ 10from sqlglot.time import format_time
+ 11from sqlglot.tokens import Tokenizer
+ 12from sqlglot.trie import new_trie
+ 13
+ 14E = t.TypeVar("E", bound=exp.Expression)
+ 15
+ 16
+ 17class Dialects(str, Enum):
+ 18    DIALECT = ""
+ 19
+ 20    BIGQUERY = "bigquery"
+ 21    CLICKHOUSE = "clickhouse"
+ 22    DUCKDB = "duckdb"
+ 23    HIVE = "hive"
+ 24    MYSQL = "mysql"
+ 25    ORACLE = "oracle"
+ 26    POSTGRES = "postgres"
+ 27    PRESTO = "presto"
+ 28    REDSHIFT = "redshift"
+ 29    SNOWFLAKE = "snowflake"
+ 30    SPARK = "spark"
+ 31    SQLITE = "sqlite"
+ 32    STARROCKS = "starrocks"
+ 33    TABLEAU = "tableau"
+ 34    TRINO = "trino"
+ 35    TSQL = "tsql"
+ 36    DATABRICKS = "databricks"
+ 37    DRILL = "drill"
+ 38    TERADATA = "teradata"
+ 39
+ 40
+ 41class _Dialect(type):
+ 42    classes: t.Dict[str, t.Type[Dialect]] = {}
+ 43
+ 44    @classmethod
+ 45    def __getitem__(cls, key: str) -> t.Type[Dialect]:
+ 46        return cls.classes[key]
+ 47
+ 48    @classmethod
+ 49    def get(
+ 50        cls, key: str, default: t.Optional[t.Type[Dialect]] = None
+ 51    ) -> t.Optional[t.Type[Dialect]]:
+ 52        return cls.classes.get(key, default)
+ 53
+ 54    def __new__(cls, clsname, bases, attrs):
+ 55        klass = super().__new__(cls, clsname, bases, attrs)
+ 56        enum = Dialects.__members__.get(clsname.upper())
+ 57        cls.classes[enum.value if enum is not None else clsname.lower()] = klass
+ 58
+ 59        klass.time_trie = new_trie(klass.time_mapping)
+ 60        klass.inverse_time_mapping = {v: k for k, v in klass.time_mapping.items()}
+ 61        klass.inverse_time_trie = new_trie(klass.inverse_time_mapping)
+ 62
+ 63        klass.tokenizer_class = getattr(klass, "Tokenizer", Tokenizer)
+ 64        klass.parser_class = getattr(klass, "Parser", Parser)
+ 65        klass.generator_class = getattr(klass, "Generator", Generator)
+ 66
+ 67        klass.quote_start, klass.quote_end = list(klass.tokenizer_class._QUOTES.items())[0]
+ 68        klass.identifier_start, klass.identifier_end = list(
+ 69            klass.tokenizer_class._IDENTIFIERS.items()
+ 70        )[0]
+ 71
+ 72        if (
+ 73            klass.tokenizer_class._BIT_STRINGS
+ 74            and exp.BitString not in klass.generator_class.TRANSFORMS
+ 75        ):
+ 76            bs_start, bs_end = list(klass.tokenizer_class._BIT_STRINGS.items())[0]
+ 77            klass.generator_class.TRANSFORMS[
+ 78                exp.BitString
+ 79            ] = lambda self, e: f"{bs_start}{int(self.sql(e, 'this')):b}{bs_end}"
+ 80        if (
+ 81            klass.tokenizer_class._HEX_STRINGS
+ 82            and exp.HexString not in klass.generator_class.TRANSFORMS
+ 83        ):
+ 84            hs_start, hs_end = list(klass.tokenizer_class._HEX_STRINGS.items())[0]
+ 85            klass.generator_class.TRANSFORMS[
+ 86                exp.HexString
+ 87            ] = lambda self, e: f"{hs_start}{int(self.sql(e, 'this')):X}{hs_end}"
+ 88        if (
+ 89            klass.tokenizer_class._BYTE_STRINGS
+ 90            and exp.ByteString not in klass.generator_class.TRANSFORMS
+ 91        ):
+ 92            be_start, be_end = list(klass.tokenizer_class._BYTE_STRINGS.items())[0]
+ 93            klass.generator_class.TRANSFORMS[
+ 94                exp.ByteString
+ 95            ] = lambda self, e: f"{be_start}{self.sql(e, 'this')}{be_end}"
+ 96
+ 97        return klass
+ 98
+ 99
+100class Dialect(metaclass=_Dialect):
+101    index_offset = 0
+102    unnest_column_only = False
+103    alias_post_tablesample = False
+104    normalize_functions: t.Optional[str] = "upper"
+105    null_ordering = "nulls_are_small"
+106
+107    date_format = "'%Y-%m-%d'"
+108    dateint_format = "'%Y%m%d'"
+109    time_format = "'%Y-%m-%d %H:%M:%S'"
+110    time_mapping: t.Dict[str, str] = {}
+111
+112    # autofilled
+113    quote_start = None
+114    quote_end = None
+115    identifier_start = None
+116    identifier_end = None
+117
+118    time_trie = None
+119    inverse_time_mapping = None
+120    inverse_time_trie = None
+121    tokenizer_class = None
+122    parser_class = None
+123    generator_class = None
+124
+125    @classmethod
+126    def get_or_raise(cls, dialect: DialectType) -> t.Type[Dialect]:
+127        if not dialect:
+128            return cls
+129        if isinstance(dialect, _Dialect):
+130            return dialect
+131        if isinstance(dialect, Dialect):
+132            return dialect.__class__
+133
+134        result = cls.get(dialect)
+135        if not result:
+136            raise ValueError(f"Unknown dialect '{dialect}'")
+137
+138        return result
+139
+140    @classmethod
+141    def format_time(
+142        cls, expression: t.Optional[str | exp.Expression]
+143    ) -> t.Optional[exp.Expression]:
+144        if isinstance(expression, str):
+145            return exp.Literal.string(
+146                format_time(
+147                    expression[1:-1],  # the time formats are quoted
+148                    cls.time_mapping,
+149                    cls.time_trie,
+150                )
+151            )
+152        if expression and expression.is_string:
+153            return exp.Literal.string(
+154                format_time(
+155                    expression.this,
+156                    cls.time_mapping,
+157                    cls.time_trie,
+158                )
+159            )
+160        return expression
+161
+162    def parse(self, sql: str, **opts) -> t.List[t.Optional[exp.Expression]]:
+163        return self.parser(**opts).parse(self.tokenizer.tokenize(sql), sql)
+164
+165    def parse_into(
+166        self, expression_type: exp.IntoType, sql: str, **opts
+167    ) -> t.List[t.Optional[exp.Expression]]:
+168        return self.parser(**opts).parse_into(expression_type, self.tokenizer.tokenize(sql), sql)
+169
+170    def generate(self, expression: t.Optional[exp.Expression], **opts) -> str:
+171        return self.generator(**opts).generate(expression)
+172
+173    def transpile(self, sql: str, **opts) -> t.List[str]:
+174        return [self.generate(expression, **opts) for expression in self.parse(sql)]
+175
+176    @property
+177    def tokenizer(self) -> Tokenizer:
+178        if not hasattr(self, "_tokenizer"):
+179            self._tokenizer = self.tokenizer_class()  # type: ignore
+180        return self._tokenizer
+181
+182    def parser(self, **opts) -> Parser:
+183        return self.parser_class(  # type: ignore
+184            **{
+185                "index_offset": self.index_offset,
+186                "unnest_column_only": self.unnest_column_only,
+187                "alias_post_tablesample": self.alias_post_tablesample,
+188                "null_ordering": self.null_ordering,
+189                **opts,
+190            },
+191        )
+192
+193    def generator(self, **opts) -> Generator:
+194        return self.generator_class(  # type: ignore
+195            **{
+196                "quote_start": self.quote_start,
+197                "quote_end": self.quote_end,
+198                "identifier_start": self.identifier_start,
+199                "identifier_end": self.identifier_end,
+200                "string_escape": self.tokenizer_class.STRING_ESCAPES[0],
+201                "identifier_escape": self.tokenizer_class.IDENTIFIER_ESCAPES[0],
+202                "index_offset": self.index_offset,
+203                "time_mapping": self.inverse_time_mapping,
+204                "time_trie": self.inverse_time_trie,
+205                "unnest_column_only": self.unnest_column_only,
+206                "alias_post_tablesample": self.alias_post_tablesample,
+207                "normalize_functions": self.normalize_functions,
+208                "null_ordering": self.null_ordering,
+209                **opts,
+210            }
+211        )
+212
+213
+214DialectType = t.Union[str, Dialect, t.Type[Dialect], None]
+215
+216
+217def rename_func(name: str) -> t.Callable[[Generator, exp.Expression], str]:
+218    def _rename(self, expression):
+219        args = flatten(expression.args.values())
+220        return f"{self.normalize_func(name)}({self.format_args(*args)})"
+221
+222    return _rename
+223
+224
+225def approx_count_distinct_sql(self: Generator, expression: exp.ApproxDistinct) -> str:
+226    if expression.args.get("accuracy"):
+227        self.unsupported("APPROX_COUNT_DISTINCT does not support accuracy")
+228    return f"APPROX_COUNT_DISTINCT({self.format_args(expression.this)})"
+229
+230
+231def if_sql(self: Generator, expression: exp.If) -> str:
+232    expressions = self.format_args(
+233        expression.this, expression.args.get("true"), expression.args.get("false")
+234    )
+235    return f"IF({expressions})"
+236
+237
+238def arrow_json_extract_sql(self: Generator, expression: exp.JSONExtract | exp.JSONBExtract) -> str:
+239    return self.binary(expression, "->")
+240
+241
+242def arrow_json_extract_scalar_sql(
+243    self: Generator, expression: exp.JSONExtractScalar | exp.JSONBExtractScalar
+244) -> str:
+245    return self.binary(expression, "->>")
+246
+247
+248def inline_array_sql(self: Generator, expression: exp.Array) -> str:
+249    return f"[{self.expressions(expression)}]"
+250
+251
+252def no_ilike_sql(self: Generator, expression: exp.ILike) -> str:
+253    return self.like_sql(
+254        exp.Like(
+255            this=exp.Lower(this=expression.this),
+256            expression=expression.args["expression"],
+257        )
+258    )
+259
+260
+261def no_paren_current_date_sql(self: Generator, expression: exp.CurrentDate) -> str:
+262    zone = self.sql(expression, "this")
+263    return f"CURRENT_DATE AT TIME ZONE {zone}" if zone else "CURRENT_DATE"
+264
+265
+266def no_recursive_cte_sql(self: Generator, expression: exp.With) -> str:
+267    if expression.args.get("recursive"):
+268        self.unsupported("Recursive CTEs are unsupported")
+269        expression.args["recursive"] = False
+270    return self.with_sql(expression)
+271
+272
+273def no_safe_divide_sql(self: Generator, expression: exp.SafeDivide) -> str:
+274    n = self.sql(expression, "this")
+275    d = self.sql(expression, "expression")
+276    return f"IF({d} <> 0, {n} / {d}, NULL)"
+277
+278
+279def no_tablesample_sql(self: Generator, expression: exp.TableSample) -> str:
+280    self.unsupported("TABLESAMPLE unsupported")
+281    return self.sql(expression.this)
+282
+283
+284def no_pivot_sql(self: Generator, expression: exp.Pivot) -> str:
+285    self.unsupported("PIVOT unsupported")
+286    return self.sql(expression)
+287
+288
+289def no_trycast_sql(self: Generator, expression: exp.TryCast) -> str:
+290    return self.cast_sql(expression)
+291
+292
+293def no_properties_sql(self: Generator, expression: exp.Properties) -> str:
+294    self.unsupported("Properties unsupported")
+295    return ""
+296
+297
+298def str_position_sql(self: Generator, expression: exp.StrPosition) -> str:
+299    this = self.sql(expression, "this")
+300    substr = self.sql(expression, "substr")
+301    position = self.sql(expression, "position")
+302    if position:
+303        return f"STRPOS(SUBSTR({this}, {position}), {substr}) + {position} - 1"
+304    return f"STRPOS({this}, {substr})"
+305
+306
+307def struct_extract_sql(self: Generator, expression: exp.StructExtract) -> str:
+308    this = self.sql(expression, "this")
+309    struct_key = self.sql(exp.Identifier(this=expression.expression, quoted=True))
+310    return f"{this}.{struct_key}"
+311
+312
+313def var_map_sql(
+314    self: Generator, expression: exp.Map | exp.VarMap, map_func_name: str = "MAP"
+315) -> str:
+316    keys = expression.args["keys"]
+317    values = expression.args["values"]
+318
+319    if not isinstance(keys, exp.Array) or not isinstance(values, exp.Array):
+320        self.unsupported("Cannot convert array columns into map.")
+321        return f"{map_func_name}({self.format_args(keys, values)})"
+322
+323    args = []
+324    for key, value in zip(keys.expressions, values.expressions):
+325        args.append(self.sql(key))
+326        args.append(self.sql(value))
+327    return f"{map_func_name}({self.format_args(*args)})"
+328
+329
+330def format_time_lambda(
+331    exp_class: t.Type[E], dialect: str, default: t.Optional[bool | str] = None
+332) -> t.Callable[[t.Sequence], E]:
+333    """Helper used for time expressions.
+334
+335    Args:
+336        exp_class: the expression class to instantiate.
+337        dialect: target sql dialect.
+338        default: the default format, True being time.
+339
+340    Returns:
+341        A callable that can be used to return the appropriately formatted time expression.
+342    """
+343
+344    def _format_time(args: t.Sequence):
+345        return exp_class(
+346            this=seq_get(args, 0),
+347            format=Dialect[dialect].format_time(
+348                seq_get(args, 1)
+349                or (Dialect[dialect].time_format if default is True else default or None)
+350            ),
+351        )
+352
+353    return _format_time
+354
+355
+356def create_with_partitions_sql(self: Generator, expression: exp.Create) -> str:
+357    """
+358    In Hive and Spark, the PARTITIONED BY property acts as an extension of a table's schema. When the
+359    PARTITIONED BY value is an array of column names, they are transformed into a schema. The corresponding
+360    columns are removed from the create statement.
+361    """
+362    has_schema = isinstance(expression.this, exp.Schema)
+363    is_partitionable = expression.args.get("kind") in ("TABLE", "VIEW")
+364
+365    if has_schema and is_partitionable:
+366        expression = expression.copy()
+367        prop = expression.find(exp.PartitionedByProperty)
+368        this = prop and prop.this
+369        if prop and not isinstance(this, exp.Schema):
+370            schema = expression.this
+371            columns = {v.name.upper() for v in this.expressions}
+372            partitions = [col for col in schema.expressions if col.name.upper() in columns]
+373            schema.set("expressions", [e for e in schema.expressions if e not in partitions])
+374            prop.replace(exp.PartitionedByProperty(this=exp.Schema(expressions=partitions)))
+375            expression.set("this", schema)
+376
+377    return self.create_sql(expression)
+378
+379
+380def parse_date_delta(
+381    exp_class: t.Type[E], unit_mapping: t.Optional[t.Dict[str, str]] = None
+382) -> t.Callable[[t.Sequence], E]:
+383    def inner_func(args: t.Sequence) -> E:
+384        unit_based = len(args) == 3
+385        this = seq_get(args, 2) if unit_based else seq_get(args, 0)
+386        expression = seq_get(args, 1) if unit_based else seq_get(args, 1)
+387        unit = seq_get(args, 0) if unit_based else exp.Literal.string("DAY")
+388        unit = unit_mapping.get(unit.name.lower(), unit) if unit_mapping else unit  # type: ignore
+389        return exp_class(this=this, expression=expression, unit=unit)
+390
+391    return inner_func
+392
+393
+394def locate_to_strposition(args: t.Sequence) -> exp.Expression:
+395    return exp.StrPosition(
+396        this=seq_get(args, 1),
+397        substr=seq_get(args, 0),
+398        position=seq_get(args, 2),
+399    )
+400
+401
+402def strposition_to_locate_sql(self: Generator, expression: exp.StrPosition) -> str:
+403    args = self.format_args(
+404        expression.args.get("substr"), expression.this, expression.args.get("position")
+405    )
+406    return f"LOCATE({args})"
+407
+408
+409def timestrtotime_sql(self: Generator, expression: exp.TimeStrToTime) -> str:
+410    return f"CAST({self.sql(expression, 'this')} AS TIMESTAMP)"
+411
+412
+413def datestrtodate_sql(self: Generator, expression: exp.DateStrToDate) -> str:
+414    return f"CAST({self.sql(expression, 'this')} AS DATE)"
+415
+416
+417def trim_sql(self: Generator, expression: exp.Trim) -> str:
+418    target = self.sql(expression, "this")
+419    trim_type = self.sql(expression, "position")
+420    remove_chars = self.sql(expression, "expression")
+421    collation = self.sql(expression, "collation")
+422
+423    # Use TRIM/LTRIM/RTRIM syntax if the expression isn't database-specific
+424    if not remove_chars and not collation:
+425        return self.trim_sql(expression)
+426
+427    trim_type = f"{trim_type} " if trim_type else ""
+428    remove_chars = f"{remove_chars} " if remove_chars else ""
+429    from_part = "FROM " if trim_type or remove_chars else ""
+430    collation = f" COLLATE {collation}" if collation else ""
+431    return f"TRIM({trim_type}{remove_chars}{from_part}{target}{collation})"
+
+ + +
+
+ +
+ + class + Dialects(builtins.str, enum.Enum): + + + +
+ +
18class Dialects(str, Enum):
+19    DIALECT = ""
+20
+21    BIGQUERY = "bigquery"
+22    CLICKHOUSE = "clickhouse"
+23    DUCKDB = "duckdb"
+24    HIVE = "hive"
+25    MYSQL = "mysql"
+26    ORACLE = "oracle"
+27    POSTGRES = "postgres"
+28    PRESTO = "presto"
+29    REDSHIFT = "redshift"
+30    SNOWFLAKE = "snowflake"
+31    SPARK = "spark"
+32    SQLITE = "sqlite"
+33    STARROCKS = "starrocks"
+34    TABLEAU = "tableau"
+35    TRINO = "trino"
+36    TSQL = "tsql"
+37    DATABRICKS = "databricks"
+38    DRILL = "drill"
+39    TERADATA = "teradata"
+
+ + +

An enumeration.

+
+ + +
+
+ DIALECT = <Dialects.DIALECT: ''> + + +
+ + + + +
+
+
+ BIGQUERY = <Dialects.BIGQUERY: 'bigquery'> + + +
+ + + + +
+
+
+ CLICKHOUSE = <Dialects.CLICKHOUSE: 'clickhouse'> + + +
+ + + + +
+
+
+ DUCKDB = <Dialects.DUCKDB: 'duckdb'> + + +
+ + + + +
+
+
+ HIVE = <Dialects.HIVE: 'hive'> + + +
+ + + + +
+
+
+ MYSQL = <Dialects.MYSQL: 'mysql'> + + +
+ + + + +
+
+
+ ORACLE = <Dialects.ORACLE: 'oracle'> + + +
+ + + + +
+
+
+ POSTGRES = <Dialects.POSTGRES: 'postgres'> + + +
+ + + + +
+
+
+ PRESTO = <Dialects.PRESTO: 'presto'> + + +
+ + + + +
+
+
+ REDSHIFT = <Dialects.REDSHIFT: 'redshift'> + + +
+ + + + +
+
+
+ SNOWFLAKE = <Dialects.SNOWFLAKE: 'snowflake'> + + +
+ + + + +
+
+
+ SPARK = <Dialects.SPARK: 'spark'> + + +
+ + + + +
+
+
+ SQLITE = <Dialects.SQLITE: 'sqlite'> + + +
+ + + + +
+
+
+ STARROCKS = <Dialects.STARROCKS: 'starrocks'> + + +
+ + + + +
+
+
+ TABLEAU = <Dialects.TABLEAU: 'tableau'> + + +
+ + + + +
+
+
+ TRINO = <Dialects.TRINO: 'trino'> + + +
+ + + + +
+
+
+ TSQL = <Dialects.TSQL: 'tsql'> + + +
+ + + + +
+
+
+ DATABRICKS = <Dialects.DATABRICKS: 'databricks'> + + +
+ + + + +
+
+
+ DRILL = <Dialects.DRILL: 'drill'> + + +
+ + + + +
+
+
+ TERADATA = <Dialects.TERADATA: 'teradata'> + + +
+ + + + +
+
+
Inherited Members
+
+
enum.Enum
+
name
+
value
+ +
+
builtins.str
+
encode
+
replace
+
split
+
rsplit
+
join
+
capitalize
+
casefold
+
title
+
center
+
count
+
expandtabs
+
find
+
partition
+
index
+
ljust
+
lower
+
lstrip
+
rfind
+
rindex
+
rjust
+
rstrip
+
rpartition
+
splitlines
+
strip
+
swapcase
+
translate
+
upper
+
startswith
+
endswith
+
removeprefix
+
removesuffix
+
isascii
+
islower
+
isupper
+
istitle
+
isspace
+
isdecimal
+
isdigit
+
isnumeric
+
isalpha
+
isalnum
+
isidentifier
+
isprintable
+
zfill
+
format
+
format_map
+
maketrans
+ +
+
+
+
+
+ +
+ + class + Dialect: + + + +
+ +
101class Dialect(metaclass=_Dialect):
+102    index_offset = 0
+103    unnest_column_only = False
+104    alias_post_tablesample = False
+105    normalize_functions: t.Optional[str] = "upper"
+106    null_ordering = "nulls_are_small"
+107
+108    date_format = "'%Y-%m-%d'"
+109    dateint_format = "'%Y%m%d'"
+110    time_format = "'%Y-%m-%d %H:%M:%S'"
+111    time_mapping: t.Dict[str, str] = {}
+112
+113    # autofilled
+114    quote_start = None
+115    quote_end = None
+116    identifier_start = None
+117    identifier_end = None
+118
+119    time_trie = None
+120    inverse_time_mapping = None
+121    inverse_time_trie = None
+122    tokenizer_class = None
+123    parser_class = None
+124    generator_class = None
+125
+126    @classmethod
+127    def get_or_raise(cls, dialect: DialectType) -> t.Type[Dialect]:
+128        if not dialect:
+129            return cls
+130        if isinstance(dialect, _Dialect):
+131            return dialect
+132        if isinstance(dialect, Dialect):
+133            return dialect.__class__
+134
+135        result = cls.get(dialect)
+136        if not result:
+137            raise ValueError(f"Unknown dialect '{dialect}'")
+138
+139        return result
+140
+141    @classmethod
+142    def format_time(
+143        cls, expression: t.Optional[str | exp.Expression]
+144    ) -> t.Optional[exp.Expression]:
+145        if isinstance(expression, str):
+146            return exp.Literal.string(
+147                format_time(
+148                    expression[1:-1],  # the time formats are quoted
+149                    cls.time_mapping,
+150                    cls.time_trie,
+151                )
+152            )
+153        if expression and expression.is_string:
+154            return exp.Literal.string(
+155                format_time(
+156                    expression.this,
+157                    cls.time_mapping,
+158                    cls.time_trie,
+159                )
+160            )
+161        return expression
+162
+163    def parse(self, sql: str, **opts) -> t.List[t.Optional[exp.Expression]]:
+164        return self.parser(**opts).parse(self.tokenizer.tokenize(sql), sql)
+165
+166    def parse_into(
+167        self, expression_type: exp.IntoType, sql: str, **opts
+168    ) -> t.List[t.Optional[exp.Expression]]:
+169        return self.parser(**opts).parse_into(expression_type, self.tokenizer.tokenize(sql), sql)
+170
+171    def generate(self, expression: t.Optional[exp.Expression], **opts) -> str:
+172        return self.generator(**opts).generate(expression)
+173
+174    def transpile(self, sql: str, **opts) -> t.List[str]:
+175        return [self.generate(expression, **opts) for expression in self.parse(sql)]
+176
+177    @property
+178    def tokenizer(self) -> Tokenizer:
+179        if not hasattr(self, "_tokenizer"):
+180            self._tokenizer = self.tokenizer_class()  # type: ignore
+181        return self._tokenizer
+182
+183    def parser(self, **opts) -> Parser:
+184        return self.parser_class(  # type: ignore
+185            **{
+186                "index_offset": self.index_offset,
+187                "unnest_column_only": self.unnest_column_only,
+188                "alias_post_tablesample": self.alias_post_tablesample,
+189                "null_ordering": self.null_ordering,
+190                **opts,
+191            },
+192        )
+193
+194    def generator(self, **opts) -> Generator:
+195        return self.generator_class(  # type: ignore
+196            **{
+197                "quote_start": self.quote_start,
+198                "quote_end": self.quote_end,
+199                "identifier_start": self.identifier_start,
+200                "identifier_end": self.identifier_end,
+201                "string_escape": self.tokenizer_class.STRING_ESCAPES[0],
+202                "identifier_escape": self.tokenizer_class.IDENTIFIER_ESCAPES[0],
+203                "index_offset": self.index_offset,
+204                "time_mapping": self.inverse_time_mapping,
+205                "time_trie": self.inverse_time_trie,
+206                "unnest_column_only": self.unnest_column_only,
+207                "alias_post_tablesample": self.alias_post_tablesample,
+208                "normalize_functions": self.normalize_functions,
+209                "null_ordering": self.null_ordering,
+210                **opts,
+211            }
+212        )
+
+ + + + +
+
+ + Dialect() + + +
+ + + + +
+
+ +
+
@classmethod
+ + def + get_or_raise( cls, dialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType]) -> Type[sqlglot.dialects.dialect.Dialect]: + + + +
+ +
126    @classmethod
+127    def get_or_raise(cls, dialect: DialectType) -> t.Type[Dialect]:
+128        if not dialect:
+129            return cls
+130        if isinstance(dialect, _Dialect):
+131            return dialect
+132        if isinstance(dialect, Dialect):
+133            return dialect.__class__
+134
+135        result = cls.get(dialect)
+136        if not result:
+137            raise ValueError(f"Unknown dialect '{dialect}'")
+138
+139        return result
+
+ + + + +
+
+ +
+
@classmethod
+ + def + format_time( cls, expression: Union[str, sqlglot.expressions.Expression, NoneType]) -> Optional[sqlglot.expressions.Expression]: + + + +
+ +
141    @classmethod
+142    def format_time(
+143        cls, expression: t.Optional[str | exp.Expression]
+144    ) -> t.Optional[exp.Expression]:
+145        if isinstance(expression, str):
+146            return exp.Literal.string(
+147                format_time(
+148                    expression[1:-1],  # the time formats are quoted
+149                    cls.time_mapping,
+150                    cls.time_trie,
+151                )
+152            )
+153        if expression and expression.is_string:
+154            return exp.Literal.string(
+155                format_time(
+156                    expression.this,
+157                    cls.time_mapping,
+158                    cls.time_trie,
+159                )
+160            )
+161        return expression
+
+ + + + +
+
+ +
+ + def + parse(self, sql: str, **opts) -> List[Optional[sqlglot.expressions.Expression]]: + + + +
+ +
163    def parse(self, sql: str, **opts) -> t.List[t.Optional[exp.Expression]]:
+164        return self.parser(**opts).parse(self.tokenizer.tokenize(sql), sql)
+
+ + + + +
+
+ +
+ + def + parse_into( self, expression_type: Union[str, Type[sqlglot.expressions.Expression], Collection[Union[str, Type[sqlglot.expressions.Expression]]]], sql: str, **opts) -> List[Optional[sqlglot.expressions.Expression]]: + + + +
+ +
166    def parse_into(
+167        self, expression_type: exp.IntoType, sql: str, **opts
+168    ) -> t.List[t.Optional[exp.Expression]]:
+169        return self.parser(**opts).parse_into(expression_type, self.tokenizer.tokenize(sql), sql)
+
+ + + + +
+
+ +
+ + def + generate( self, expression: Optional[sqlglot.expressions.Expression], **opts) -> str: + + + +
+ +
171    def generate(self, expression: t.Optional[exp.Expression], **opts) -> str:
+172        return self.generator(**opts).generate(expression)
+
+ + + + +
+
+ +
+ + def + transpile(self, sql: str, **opts) -> List[str]: + + + +
+ +
174    def transpile(self, sql: str, **opts) -> t.List[str]:
+175        return [self.generate(expression, **opts) for expression in self.parse(sql)]
+
+ + + + +
+
+ +
+ + def + parser(self, **opts) -> sqlglot.parser.Parser: + + + +
+ +
183    def parser(self, **opts) -> Parser:
+184        return self.parser_class(  # type: ignore
+185            **{
+186                "index_offset": self.index_offset,
+187                "unnest_column_only": self.unnest_column_only,
+188                "alias_post_tablesample": self.alias_post_tablesample,
+189                "null_ordering": self.null_ordering,
+190                **opts,
+191            },
+192        )
+
+ + + + +
+
+ +
+ + def + generator(self, **opts) -> sqlglot.generator.Generator: + + + +
+ +
194    def generator(self, **opts) -> Generator:
+195        return self.generator_class(  # type: ignore
+196            **{
+197                "quote_start": self.quote_start,
+198                "quote_end": self.quote_end,
+199                "identifier_start": self.identifier_start,
+200                "identifier_end": self.identifier_end,
+201                "string_escape": self.tokenizer_class.STRING_ESCAPES[0],
+202                "identifier_escape": self.tokenizer_class.IDENTIFIER_ESCAPES[0],
+203                "index_offset": self.index_offset,
+204                "time_mapping": self.inverse_time_mapping,
+205                "time_trie": self.inverse_time_trie,
+206                "unnest_column_only": self.unnest_column_only,
+207                "alias_post_tablesample": self.alias_post_tablesample,
+208                "normalize_functions": self.normalize_functions,
+209                "null_ordering": self.null_ordering,
+210                **opts,
+211            }
+212        )
+
+ + + + +
+
+
+ +
+ + def + rename_func( name: str) -> Callable[[sqlglot.generator.Generator, sqlglot.expressions.Expression], str]: + + + +
+ +
218def rename_func(name: str) -> t.Callable[[Generator, exp.Expression], str]:
+219    def _rename(self, expression):
+220        args = flatten(expression.args.values())
+221        return f"{self.normalize_func(name)}({self.format_args(*args)})"
+222
+223    return _rename
+
+ + + + +
+
+ +
+ + def + approx_count_distinct_sql( self: sqlglot.generator.Generator, expression: sqlglot.expressions.ApproxDistinct) -> str: + + + +
+ +
226def approx_count_distinct_sql(self: Generator, expression: exp.ApproxDistinct) -> str:
+227    if expression.args.get("accuracy"):
+228        self.unsupported("APPROX_COUNT_DISTINCT does not support accuracy")
+229    return f"APPROX_COUNT_DISTINCT({self.format_args(expression.this)})"
+
+ + + + +
+
+ +
+ + def + if_sql( self: sqlglot.generator.Generator, expression: sqlglot.expressions.If) -> str: + + + +
+ +
232def if_sql(self: Generator, expression: exp.If) -> str:
+233    expressions = self.format_args(
+234        expression.this, expression.args.get("true"), expression.args.get("false")
+235    )
+236    return f"IF({expressions})"
+
+ + + + +
+
+ +
+ + def + arrow_json_extract_sql( self: sqlglot.generator.Generator, expression: sqlglot.expressions.JSONExtract | sqlglot.expressions.JSONBExtract) -> str: + + + +
+ +
239def arrow_json_extract_sql(self: Generator, expression: exp.JSONExtract | exp.JSONBExtract) -> str:
+240    return self.binary(expression, "->")
+
+ + + + +
+
+ +
+ + def + arrow_json_extract_scalar_sql( self: sqlglot.generator.Generator, expression: sqlglot.expressions.JSONExtractScalar | sqlglot.expressions.JSONBExtractScalar) -> str: + + + +
+ +
243def arrow_json_extract_scalar_sql(
+244    self: Generator, expression: exp.JSONExtractScalar | exp.JSONBExtractScalar
+245) -> str:
+246    return self.binary(expression, "->>")
+
+ + + + +
+
+ +
+ + def + inline_array_sql( self: sqlglot.generator.Generator, expression: sqlglot.expressions.Array) -> str: + + + +
+ +
249def inline_array_sql(self: Generator, expression: exp.Array) -> str:
+250    return f"[{self.expressions(expression)}]"
+
+ + + + +
+
+ +
+ + def + no_ilike_sql( self: sqlglot.generator.Generator, expression: sqlglot.expressions.ILike) -> str: + + + +
+ +
253def no_ilike_sql(self: Generator, expression: exp.ILike) -> str:
+254    return self.like_sql(
+255        exp.Like(
+256            this=exp.Lower(this=expression.this),
+257            expression=expression.args["expression"],
+258        )
+259    )
+
+ + + + +
+
+ +
+ + def + no_paren_current_date_sql( self: sqlglot.generator.Generator, expression: sqlglot.expressions.CurrentDate) -> str: + + + +
+ +
262def no_paren_current_date_sql(self: Generator, expression: exp.CurrentDate) -> str:
+263    zone = self.sql(expression, "this")
+264    return f"CURRENT_DATE AT TIME ZONE {zone}" if zone else "CURRENT_DATE"
+
+ + + + +
+
+ +
+ + def + no_recursive_cte_sql( self: sqlglot.generator.Generator, expression: sqlglot.expressions.With) -> str: + + + +
+ +
267def no_recursive_cte_sql(self: Generator, expression: exp.With) -> str:
+268    if expression.args.get("recursive"):
+269        self.unsupported("Recursive CTEs are unsupported")
+270        expression.args["recursive"] = False
+271    return self.with_sql(expression)
+
+ + + + +
+
+ +
+ + def + no_safe_divide_sql( self: sqlglot.generator.Generator, expression: sqlglot.expressions.SafeDivide) -> str: + + + +
+ +
274def no_safe_divide_sql(self: Generator, expression: exp.SafeDivide) -> str:
+275    n = self.sql(expression, "this")
+276    d = self.sql(expression, "expression")
+277    return f"IF({d} <> 0, {n} / {d}, NULL)"
+
+ + + + +
+
+ +
+ + def + no_tablesample_sql( self: sqlglot.generator.Generator, expression: sqlglot.expressions.TableSample) -> str: + + + +
+ +
280def no_tablesample_sql(self: Generator, expression: exp.TableSample) -> str:
+281    self.unsupported("TABLESAMPLE unsupported")
+282    return self.sql(expression.this)
+
+ + + + +
+
+ +
+ + def + no_pivot_sql( self: sqlglot.generator.Generator, expression: sqlglot.expressions.Pivot) -> str: + + + +
+ +
285def no_pivot_sql(self: Generator, expression: exp.Pivot) -> str:
+286    self.unsupported("PIVOT unsupported")
+287    return self.sql(expression)
+
+ + + + +
+
+ +
+ + def + no_trycast_sql( self: sqlglot.generator.Generator, expression: sqlglot.expressions.TryCast) -> str: + + + +
+ +
290def no_trycast_sql(self: Generator, expression: exp.TryCast) -> str:
+291    return self.cast_sql(expression)
+
+ + + + +
+
+ +
+ + def + no_properties_sql( self: sqlglot.generator.Generator, expression: sqlglot.expressions.Properties) -> str: + + + +
+ +
294def no_properties_sql(self: Generator, expression: exp.Properties) -> str:
+295    self.unsupported("Properties unsupported")
+296    return ""
+
+ + + + +
+
+ +
+ + def + str_position_sql( self: sqlglot.generator.Generator, expression: sqlglot.expressions.StrPosition) -> str: + + + +
+ +
299def str_position_sql(self: Generator, expression: exp.StrPosition) -> str:
+300    this = self.sql(expression, "this")
+301    substr = self.sql(expression, "substr")
+302    position = self.sql(expression, "position")
+303    if position:
+304        return f"STRPOS(SUBSTR({this}, {position}), {substr}) + {position} - 1"
+305    return f"STRPOS({this}, {substr})"
+
+ + + + +
+
+ +
+ + def + struct_extract_sql( self: sqlglot.generator.Generator, expression: sqlglot.expressions.StructExtract) -> str: + + + +
+ +
308def struct_extract_sql(self: Generator, expression: exp.StructExtract) -> str:
+309    this = self.sql(expression, "this")
+310    struct_key = self.sql(exp.Identifier(this=expression.expression, quoted=True))
+311    return f"{this}.{struct_key}"
+
+ + + + +
+
+ +
+ + def + var_map_sql( self: sqlglot.generator.Generator, expression: sqlglot.expressions.Map | sqlglot.expressions.VarMap, map_func_name: str = 'MAP') -> str: + + + +
+ +
314def var_map_sql(
+315    self: Generator, expression: exp.Map | exp.VarMap, map_func_name: str = "MAP"
+316) -> str:
+317    keys = expression.args["keys"]
+318    values = expression.args["values"]
+319
+320    if not isinstance(keys, exp.Array) or not isinstance(values, exp.Array):
+321        self.unsupported("Cannot convert array columns into map.")
+322        return f"{map_func_name}({self.format_args(keys, values)})"
+323
+324    args = []
+325    for key, value in zip(keys.expressions, values.expressions):
+326        args.append(self.sql(key))
+327        args.append(self.sql(value))
+328    return f"{map_func_name}({self.format_args(*args)})"
+
+ + + + +
+
+ +
+ + def + format_time_lambda( exp_class: Type[~E], dialect: str, default: Union[bool, str, NoneType] = None) -> Callable[[Sequence], ~E]: + + + +
+ +
331def format_time_lambda(
+332    exp_class: t.Type[E], dialect: str, default: t.Optional[bool | str] = None
+333) -> t.Callable[[t.Sequence], E]:
+334    """Helper used for time expressions.
+335
+336    Args:
+337        exp_class: the expression class to instantiate.
+338        dialect: target sql dialect.
+339        default: the default format, True being time.
+340
+341    Returns:
+342        A callable that can be used to return the appropriately formatted time expression.
+343    """
+344
+345    def _format_time(args: t.Sequence):
+346        return exp_class(
+347            this=seq_get(args, 0),
+348            format=Dialect[dialect].format_time(
+349                seq_get(args, 1)
+350                or (Dialect[dialect].time_format if default is True else default or None)
+351            ),
+352        )
+353
+354    return _format_time
+
+ + +

Helper used for time expressions.

+ +
Arguments:
+ +
    +
  • exp_class: the expression class to instantiate.
  • +
  • dialect: target sql dialect.
  • +
  • default: the default format, True being time.
  • +
+ +
Returns:
+ +
+

A callable that can be used to return the appropriately formatted time expression.

+
+
+ + +
+
+ +
+ + def + create_with_partitions_sql( self: sqlglot.generator.Generator, expression: sqlglot.expressions.Create) -> str: + + + +
+ +
357def create_with_partitions_sql(self: Generator, expression: exp.Create) -> str:
+358    """
+359    In Hive and Spark, the PARTITIONED BY property acts as an extension of a table's schema. When the
+360    PARTITIONED BY value is an array of column names, they are transformed into a schema. The corresponding
+361    columns are removed from the create statement.
+362    """
+363    has_schema = isinstance(expression.this, exp.Schema)
+364    is_partitionable = expression.args.get("kind") in ("TABLE", "VIEW")
+365
+366    if has_schema and is_partitionable:
+367        expression = expression.copy()
+368        prop = expression.find(exp.PartitionedByProperty)
+369        this = prop and prop.this
+370        if prop and not isinstance(this, exp.Schema):
+371            schema = expression.this
+372            columns = {v.name.upper() for v in this.expressions}
+373            partitions = [col for col in schema.expressions if col.name.upper() in columns]
+374            schema.set("expressions", [e for e in schema.expressions if e not in partitions])
+375            prop.replace(exp.PartitionedByProperty(this=exp.Schema(expressions=partitions)))
+376            expression.set("this", schema)
+377
+378    return self.create_sql(expression)
+
+ + +

In Hive and Spark, the PARTITIONED BY property acts as an extension of a table's schema. When the +PARTITIONED BY value is an array of column names, they are transformed into a schema. The corresponding +columns are removed from the create statement.

+
+ + +
+
+ +
+ + def + parse_date_delta( exp_class: Type[~E], unit_mapping: Optional[Dict[str, str]] = None) -> Callable[[Sequence], ~E]: + + + +
+ +
381def parse_date_delta(
+382    exp_class: t.Type[E], unit_mapping: t.Optional[t.Dict[str, str]] = None
+383) -> t.Callable[[t.Sequence], E]:
+384    def inner_func(args: t.Sequence) -> E:
+385        unit_based = len(args) == 3
+386        this = seq_get(args, 2) if unit_based else seq_get(args, 0)
+387        expression = seq_get(args, 1) if unit_based else seq_get(args, 1)
+388        unit = seq_get(args, 0) if unit_based else exp.Literal.string("DAY")
+389        unit = unit_mapping.get(unit.name.lower(), unit) if unit_mapping else unit  # type: ignore
+390        return exp_class(this=this, expression=expression, unit=unit)
+391
+392    return inner_func
+
+ + + + +
+
+ +
+ + def + locate_to_strposition(args: Sequence) -> sqlglot.expressions.Expression: + + + +
+ +
395def locate_to_strposition(args: t.Sequence) -> exp.Expression:
+396    return exp.StrPosition(
+397        this=seq_get(args, 1),
+398        substr=seq_get(args, 0),
+399        position=seq_get(args, 2),
+400    )
+
+ + + + +
+
+ +
+ + def + strposition_to_locate_sql( self: sqlglot.generator.Generator, expression: sqlglot.expressions.StrPosition) -> str: + + + +
+ +
403def strposition_to_locate_sql(self: Generator, expression: exp.StrPosition) -> str:
+404    args = self.format_args(
+405        expression.args.get("substr"), expression.this, expression.args.get("position")
+406    )
+407    return f"LOCATE({args})"
+
+ + + + +
+
+ +
+ + def + timestrtotime_sql( self: sqlglot.generator.Generator, expression: sqlglot.expressions.TimeStrToTime) -> str: + + + +
+ +
410def timestrtotime_sql(self: Generator, expression: exp.TimeStrToTime) -> str:
+411    return f"CAST({self.sql(expression, 'this')} AS TIMESTAMP)"
+
+ + + + +
+
+ +
+ + def + datestrtodate_sql( self: sqlglot.generator.Generator, expression: sqlglot.expressions.DateStrToDate) -> str: + + + +
+ +
414def datestrtodate_sql(self: Generator, expression: exp.DateStrToDate) -> str:
+415    return f"CAST({self.sql(expression, 'this')} AS DATE)"
+
+ + + + +
+
+ +
+ + def + trim_sql( self: sqlglot.generator.Generator, expression: sqlglot.expressions.Trim) -> str: + + + +
+ +
418def trim_sql(self: Generator, expression: exp.Trim) -> str:
+419    target = self.sql(expression, "this")
+420    trim_type = self.sql(expression, "position")
+421    remove_chars = self.sql(expression, "expression")
+422    collation = self.sql(expression, "collation")
+423
+424    # Use TRIM/LTRIM/RTRIM syntax if the expression isn't database-specific
+425    if not remove_chars and not collation:
+426        return self.trim_sql(expression)
+427
+428    trim_type = f"{trim_type} " if trim_type else ""
+429    remove_chars = f"{remove_chars} " if remove_chars else ""
+430    from_part = "FROM " if trim_type or remove_chars else ""
+431    collation = f" COLLATE {collation}" if collation else ""
+432    return f"TRIM({trim_type}{remove_chars}{from_part}{target}{collation})"
+
+ + + + +
+
+ + \ No newline at end of file diff --git a/docs/sqlglot/dialects/drill.html b/docs/sqlglot/dialects/drill.html new file mode 100644 index 0000000..0cea591 --- /dev/null +++ b/docs/sqlglot/dialects/drill.html @@ -0,0 +1,1088 @@ + + + + + + + sqlglot.dialects.drill API documentation + + + + + + + + + +
+
+ Edit on GitHub +

+sqlglot.dialects.drill

+ + + + + + +
  1from __future__ import annotations
+  2
+  3import re
+  4import typing as t
+  5
+  6from sqlglot import exp, generator, parser, tokens
+  7from sqlglot.dialects.dialect import (
+  8    Dialect,
+  9    create_with_partitions_sql,
+ 10    datestrtodate_sql,
+ 11    format_time_lambda,
+ 12    no_pivot_sql,
+ 13    no_trycast_sql,
+ 14    rename_func,
+ 15    str_position_sql,
+ 16    timestrtotime_sql,
+ 17)
+ 18
+ 19
+ 20def _str_to_time_sql(self: generator.Generator, expression: exp.TsOrDsToDate) -> str:
+ 21    return f"STRPTIME({self.sql(expression, 'this')}, {self.format_time(expression)})"
+ 22
+ 23
+ 24def _ts_or_ds_to_date_sql(self: generator.Generator, expression: exp.TsOrDsToDate) -> str:
+ 25    time_format = self.format_time(expression)
+ 26    if time_format and time_format not in (Drill.time_format, Drill.date_format):
+ 27        return f"CAST({_str_to_time_sql(self, expression)} AS DATE)"
+ 28    return f"CAST({self.sql(expression, 'this')} AS DATE)"
+ 29
+ 30
+ 31def _date_add_sql(kind: str) -> t.Callable[[generator.Generator, exp.DateAdd | exp.DateSub], str]:
+ 32    def func(self: generator.Generator, expression: exp.DateAdd | exp.DateSub) -> str:
+ 33        this = self.sql(expression, "this")
+ 34        unit = exp.Var(this=expression.text("unit").upper() or "DAY")
+ 35        return (
+ 36            f"DATE_{kind}({this}, {self.sql(exp.Interval(this=expression.expression, unit=unit))})"
+ 37        )
+ 38
+ 39    return func
+ 40
+ 41
+ 42def if_sql(self: generator.Generator, expression: exp.If) -> str:
+ 43    """
+ 44    Drill requires backticks around certain SQL reserved words, IF being one of them,  This function
+ 45    adds the backticks around the keyword IF.
+ 46    Args:
+ 47        self: The Drill dialect
+ 48        expression: The input IF expression
+ 49
+ 50    Returns:  The expression with IF in backticks.
+ 51
+ 52    """
+ 53    expressions = self.format_args(
+ 54        expression.this, expression.args.get("true"), expression.args.get("false")
+ 55    )
+ 56    return f"`IF`({expressions})"
+ 57
+ 58
+ 59def _str_to_date(self: generator.Generator, expression: exp.StrToDate) -> str:
+ 60    this = self.sql(expression, "this")
+ 61    time_format = self.format_time(expression)
+ 62    if time_format == Drill.date_format:
+ 63        return f"CAST({this} AS DATE)"
+ 64    return f"TO_DATE({this}, {time_format})"
+ 65
+ 66
+ 67class Drill(Dialect):
+ 68    normalize_functions = None
+ 69    null_ordering = "nulls_are_last"
+ 70    date_format = "'yyyy-MM-dd'"
+ 71    dateint_format = "'yyyyMMdd'"
+ 72    time_format = "'yyyy-MM-dd HH:mm:ss'"
+ 73
+ 74    time_mapping = {
+ 75        "y": "%Y",
+ 76        "Y": "%Y",
+ 77        "YYYY": "%Y",
+ 78        "yyyy": "%Y",
+ 79        "YY": "%y",
+ 80        "yy": "%y",
+ 81        "MMMM": "%B",
+ 82        "MMM": "%b",
+ 83        "MM": "%m",
+ 84        "M": "%-m",
+ 85        "dd": "%d",
+ 86        "d": "%-d",
+ 87        "HH": "%H",
+ 88        "H": "%-H",
+ 89        "hh": "%I",
+ 90        "h": "%-I",
+ 91        "mm": "%M",
+ 92        "m": "%-M",
+ 93        "ss": "%S",
+ 94        "s": "%-S",
+ 95        "SSSSSS": "%f",
+ 96        "a": "%p",
+ 97        "DD": "%j",
+ 98        "D": "%-j",
+ 99        "E": "%a",
+100        "EE": "%a",
+101        "EEE": "%a",
+102        "EEEE": "%A",
+103        "''T''": "T",
+104    }
+105
+106    class Tokenizer(tokens.Tokenizer):
+107        QUOTES = ["'"]
+108        IDENTIFIERS = ["`"]
+109        STRING_ESCAPES = ["\\"]
+110        ENCODE = "utf-8"
+111
+112    class Parser(parser.Parser):
+113        STRICT_CAST = False
+114
+115        FUNCTIONS = {
+116            **parser.Parser.FUNCTIONS,  # type: ignore
+117            "TO_TIMESTAMP": exp.TimeStrToTime.from_arg_list,
+118            "TO_CHAR": format_time_lambda(exp.TimeToStr, "drill"),
+119        }
+120
+121    class Generator(generator.Generator):
+122        TYPE_MAPPING = {
+123            **generator.Generator.TYPE_MAPPING,  # type: ignore
+124            exp.DataType.Type.INT: "INTEGER",
+125            exp.DataType.Type.SMALLINT: "INTEGER",
+126            exp.DataType.Type.TINYINT: "INTEGER",
+127            exp.DataType.Type.BINARY: "VARBINARY",
+128            exp.DataType.Type.TEXT: "VARCHAR",
+129            exp.DataType.Type.NCHAR: "VARCHAR",
+130            exp.DataType.Type.TIMESTAMPLTZ: "TIMESTAMP",
+131            exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP",
+132            exp.DataType.Type.DATETIME: "TIMESTAMP",
+133        }
+134
+135        PROPERTIES_LOCATION = {
+136            **generator.Generator.PROPERTIES_LOCATION,  # type: ignore
+137            exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA_ROOT,
+138        }
+139
+140        TRANSFORMS = {
+141            **generator.Generator.TRANSFORMS,  # type: ignore
+142            exp.CurrentTimestamp: lambda *_: "CURRENT_TIMESTAMP",
+143            exp.ArrayContains: rename_func("REPEATED_CONTAINS"),
+144            exp.ArraySize: rename_func("REPEATED_COUNT"),
+145            exp.Create: create_with_partitions_sql,
+146            exp.DateAdd: _date_add_sql("ADD"),
+147            exp.DateStrToDate: datestrtodate_sql,
+148            exp.DateSub: _date_add_sql("SUB"),
+149            exp.DateToDi: lambda self, e: f"CAST(TO_DATE({self.sql(e, 'this')}, {Drill.dateint_format}) AS INT)",
+150            exp.DiToDate: lambda self, e: f"TO_DATE(CAST({self.sql(e, 'this')} AS VARCHAR), {Drill.dateint_format})",
+151            exp.If: if_sql,
+152            exp.ILike: lambda self, e: f" {self.sql(e, 'this')} `ILIKE` {self.sql(e, 'expression')}",
+153            exp.Levenshtein: rename_func("LEVENSHTEIN_DISTANCE"),
+154            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
+155            exp.Pivot: no_pivot_sql,
+156            exp.RegexpLike: rename_func("REGEXP_MATCHES"),
+157            exp.StrPosition: str_position_sql,
+158            exp.StrToDate: _str_to_date,
+159            exp.StrToTime: lambda self, e: f"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})",
+160            exp.TimeStrToDate: lambda self, e: f"CAST({self.sql(e, 'this')} AS DATE)",
+161            exp.TimeStrToTime: timestrtotime_sql,
+162            exp.TimeStrToUnix: rename_func("UNIX_TIMESTAMP"),
+163            exp.TimeToStr: lambda self, e: f"TO_CHAR({self.sql(e, 'this')}, {self.format_time(e)})",
+164            exp.TimeToUnix: rename_func("UNIX_TIMESTAMP"),
+165            exp.TryCast: no_trycast_sql,
+166            exp.TsOrDsAdd: lambda self, e: f"DATE_ADD(CAST({self.sql(e, 'this')} AS DATE), {self.sql(exp.Interval(this=e.expression, unit=exp.Var(this='DAY')))})",
+167            exp.TsOrDsToDate: _ts_or_ds_to_date_sql,
+168            exp.TsOrDiToDi: lambda self, e: f"CAST(SUBSTR(REPLACE(CAST({self.sql(e, 'this')} AS VARCHAR), '-', ''), 1, 8) AS INT)",
+169        }
+170
+171        def normalize_func(self, name: str) -> str:
+172            return name if re.match(exp.SAFE_IDENTIFIER_RE, name) else f"`{name}`"
+
+ + +
+
+ +
+ + def + if_sql( self: sqlglot.generator.Generator, expression: sqlglot.expressions.If) -> str: + + + +
+ +
43def if_sql(self: generator.Generator, expression: exp.If) -> str:
+44    """
+45    Drill requires backticks around certain SQL reserved words, IF being one of them,  This function
+46    adds the backticks around the keyword IF.
+47    Args:
+48        self: The Drill dialect
+49        expression: The input IF expression
+50
+51    Returns:  The expression with IF in backticks.
+52
+53    """
+54    expressions = self.format_args(
+55        expression.this, expression.args.get("true"), expression.args.get("false")
+56    )
+57    return f"`IF`({expressions})"
+
+ + +

Drill requires backticks around certain SQL reserved words, IF being one of them, This function +adds the backticks around the keyword IF.

+ +
Arguments:
+ +
    +
  • self: The Drill dialect
  • +
  • expression: The input IF expression
  • +
+ +

Returns: The expression with IF in backticks.

+
+ + +
+
+ +
+ + class + Drill(sqlglot.dialects.dialect.Dialect): + + + +
+ +
 68class Drill(Dialect):
+ 69    normalize_functions = None
+ 70    null_ordering = "nulls_are_last"
+ 71    date_format = "'yyyy-MM-dd'"
+ 72    dateint_format = "'yyyyMMdd'"
+ 73    time_format = "'yyyy-MM-dd HH:mm:ss'"
+ 74
+ 75    time_mapping = {
+ 76        "y": "%Y",
+ 77        "Y": "%Y",
+ 78        "YYYY": "%Y",
+ 79        "yyyy": "%Y",
+ 80        "YY": "%y",
+ 81        "yy": "%y",
+ 82        "MMMM": "%B",
+ 83        "MMM": "%b",
+ 84        "MM": "%m",
+ 85        "M": "%-m",
+ 86        "dd": "%d",
+ 87        "d": "%-d",
+ 88        "HH": "%H",
+ 89        "H": "%-H",
+ 90        "hh": "%I",
+ 91        "h": "%-I",
+ 92        "mm": "%M",
+ 93        "m": "%-M",
+ 94        "ss": "%S",
+ 95        "s": "%-S",
+ 96        "SSSSSS": "%f",
+ 97        "a": "%p",
+ 98        "DD": "%j",
+ 99        "D": "%-j",
+100        "E": "%a",
+101        "EE": "%a",
+102        "EEE": "%a",
+103        "EEEE": "%A",
+104        "''T''": "T",
+105    }
+106
+107    class Tokenizer(tokens.Tokenizer):
+108        QUOTES = ["'"]
+109        IDENTIFIERS = ["`"]
+110        STRING_ESCAPES = ["\\"]
+111        ENCODE = "utf-8"
+112
+113    class Parser(parser.Parser):
+114        STRICT_CAST = False
+115
+116        FUNCTIONS = {
+117            **parser.Parser.FUNCTIONS,  # type: ignore
+118            "TO_TIMESTAMP": exp.TimeStrToTime.from_arg_list,
+119            "TO_CHAR": format_time_lambda(exp.TimeToStr, "drill"),
+120        }
+121
+122    class Generator(generator.Generator):
+123        TYPE_MAPPING = {
+124            **generator.Generator.TYPE_MAPPING,  # type: ignore
+125            exp.DataType.Type.INT: "INTEGER",
+126            exp.DataType.Type.SMALLINT: "INTEGER",
+127            exp.DataType.Type.TINYINT: "INTEGER",
+128            exp.DataType.Type.BINARY: "VARBINARY",
+129            exp.DataType.Type.TEXT: "VARCHAR",
+130            exp.DataType.Type.NCHAR: "VARCHAR",
+131            exp.DataType.Type.TIMESTAMPLTZ: "TIMESTAMP",
+132            exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP",
+133            exp.DataType.Type.DATETIME: "TIMESTAMP",
+134        }
+135
+136        PROPERTIES_LOCATION = {
+137            **generator.Generator.PROPERTIES_LOCATION,  # type: ignore
+138            exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA_ROOT,
+139        }
+140
+141        TRANSFORMS = {
+142            **generator.Generator.TRANSFORMS,  # type: ignore
+143            exp.CurrentTimestamp: lambda *_: "CURRENT_TIMESTAMP",
+144            exp.ArrayContains: rename_func("REPEATED_CONTAINS"),
+145            exp.ArraySize: rename_func("REPEATED_COUNT"),
+146            exp.Create: create_with_partitions_sql,
+147            exp.DateAdd: _date_add_sql("ADD"),
+148            exp.DateStrToDate: datestrtodate_sql,
+149            exp.DateSub: _date_add_sql("SUB"),
+150            exp.DateToDi: lambda self, e: f"CAST(TO_DATE({self.sql(e, 'this')}, {Drill.dateint_format}) AS INT)",
+151            exp.DiToDate: lambda self, e: f"TO_DATE(CAST({self.sql(e, 'this')} AS VARCHAR), {Drill.dateint_format})",
+152            exp.If: if_sql,
+153            exp.ILike: lambda self, e: f" {self.sql(e, 'this')} `ILIKE` {self.sql(e, 'expression')}",
+154            exp.Levenshtein: rename_func("LEVENSHTEIN_DISTANCE"),
+155            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
+156            exp.Pivot: no_pivot_sql,
+157            exp.RegexpLike: rename_func("REGEXP_MATCHES"),
+158            exp.StrPosition: str_position_sql,
+159            exp.StrToDate: _str_to_date,
+160            exp.StrToTime: lambda self, e: f"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})",
+161            exp.TimeStrToDate: lambda self, e: f"CAST({self.sql(e, 'this')} AS DATE)",
+162            exp.TimeStrToTime: timestrtotime_sql,
+163            exp.TimeStrToUnix: rename_func("UNIX_TIMESTAMP"),
+164            exp.TimeToStr: lambda self, e: f"TO_CHAR({self.sql(e, 'this')}, {self.format_time(e)})",
+165            exp.TimeToUnix: rename_func("UNIX_TIMESTAMP"),
+166            exp.TryCast: no_trycast_sql,
+167            exp.TsOrDsAdd: lambda self, e: f"DATE_ADD(CAST({self.sql(e, 'this')} AS DATE), {self.sql(exp.Interval(this=e.expression, unit=exp.Var(this='DAY')))})",
+168            exp.TsOrDsToDate: _ts_or_ds_to_date_sql,
+169            exp.TsOrDiToDi: lambda self, e: f"CAST(SUBSTR(REPLACE(CAST({self.sql(e, 'this')} AS VARCHAR), '-', ''), 1, 8) AS INT)",
+170        }
+171
+172        def normalize_func(self, name: str) -> str:
+173            return name if re.match(exp.SAFE_IDENTIFIER_RE, name) else f"`{name}`"
+
+ + + + +
+
+ + Drill() + + +
+ + + + +
+ +
+
+ +
+ + class + Drill.Tokenizer(sqlglot.tokens.Tokenizer): + + + +
+ +
107    class Tokenizer(tokens.Tokenizer):
+108        QUOTES = ["'"]
+109        IDENTIFIERS = ["`"]
+110        STRING_ESCAPES = ["\\"]
+111        ENCODE = "utf-8"
+
+ + + + +
+
Inherited Members
+
+ +
+
+
+
+ +
+ + class + Drill.Parser(sqlglot.parser.Parser): + + + +
+ +
113    class Parser(parser.Parser):
+114        STRICT_CAST = False
+115
+116        FUNCTIONS = {
+117            **parser.Parser.FUNCTIONS,  # type: ignore
+118            "TO_TIMESTAMP": exp.TimeStrToTime.from_arg_list,
+119            "TO_CHAR": format_time_lambda(exp.TimeToStr, "drill"),
+120        }
+
+ + +

Parser consumes a list of tokens produced by the sqlglot.tokens.Tokenizer and produces +a parsed syntax tree.

+ +
Arguments:
+ +
    +
  • error_level: the desired error level. +Default: ErrorLevel.RAISE
  • +
  • error_message_context: determines the amount of context to capture from a +query string when displaying the error message (in number of characters). +Default: 50.
  • +
  • index_offset: Index offset for arrays eg ARRAY[0] vs ARRAY[1] as the head of a list. +Default: 0
  • +
  • alias_post_tablesample: If the table alias comes after tablesample. +Default: False
  • +
  • max_errors: Maximum number of error messages to include in a raised ParseError. +This is only relevant if error_level is ErrorLevel.RAISE. +Default: 3
  • +
  • null_ordering: Indicates the default null ordering method to use if not explicitly set. +Options are "nulls_are_small", "nulls_are_large", "nulls_are_last". +Default: "nulls_are_small"
  • +
+
+ + + +
+
+ +
+ + class + Drill.Generator(sqlglot.generator.Generator): + + + +
+ +
122    class Generator(generator.Generator):
+123        TYPE_MAPPING = {
+124            **generator.Generator.TYPE_MAPPING,  # type: ignore
+125            exp.DataType.Type.INT: "INTEGER",
+126            exp.DataType.Type.SMALLINT: "INTEGER",
+127            exp.DataType.Type.TINYINT: "INTEGER",
+128            exp.DataType.Type.BINARY: "VARBINARY",
+129            exp.DataType.Type.TEXT: "VARCHAR",
+130            exp.DataType.Type.NCHAR: "VARCHAR",
+131            exp.DataType.Type.TIMESTAMPLTZ: "TIMESTAMP",
+132            exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP",
+133            exp.DataType.Type.DATETIME: "TIMESTAMP",
+134        }
+135
+136        PROPERTIES_LOCATION = {
+137            **generator.Generator.PROPERTIES_LOCATION,  # type: ignore
+138            exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA_ROOT,
+139        }
+140
+141        TRANSFORMS = {
+142            **generator.Generator.TRANSFORMS,  # type: ignore
+143            exp.CurrentTimestamp: lambda *_: "CURRENT_TIMESTAMP",
+144            exp.ArrayContains: rename_func("REPEATED_CONTAINS"),
+145            exp.ArraySize: rename_func("REPEATED_COUNT"),
+146            exp.Create: create_with_partitions_sql,
+147            exp.DateAdd: _date_add_sql("ADD"),
+148            exp.DateStrToDate: datestrtodate_sql,
+149            exp.DateSub: _date_add_sql("SUB"),
+150            exp.DateToDi: lambda self, e: f"CAST(TO_DATE({self.sql(e, 'this')}, {Drill.dateint_format}) AS INT)",
+151            exp.DiToDate: lambda self, e: f"TO_DATE(CAST({self.sql(e, 'this')} AS VARCHAR), {Drill.dateint_format})",
+152            exp.If: if_sql,
+153            exp.ILike: lambda self, e: f" {self.sql(e, 'this')} `ILIKE` {self.sql(e, 'expression')}",
+154            exp.Levenshtein: rename_func("LEVENSHTEIN_DISTANCE"),
+155            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
+156            exp.Pivot: no_pivot_sql,
+157            exp.RegexpLike: rename_func("REGEXP_MATCHES"),
+158            exp.StrPosition: str_position_sql,
+159            exp.StrToDate: _str_to_date,
+160            exp.StrToTime: lambda self, e: f"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})",
+161            exp.TimeStrToDate: lambda self, e: f"CAST({self.sql(e, 'this')} AS DATE)",
+162            exp.TimeStrToTime: timestrtotime_sql,
+163            exp.TimeStrToUnix: rename_func("UNIX_TIMESTAMP"),
+164            exp.TimeToStr: lambda self, e: f"TO_CHAR({self.sql(e, 'this')}, {self.format_time(e)})",
+165            exp.TimeToUnix: rename_func("UNIX_TIMESTAMP"),
+166            exp.TryCast: no_trycast_sql,
+167            exp.TsOrDsAdd: lambda self, e: f"DATE_ADD(CAST({self.sql(e, 'this')} AS DATE), {self.sql(exp.Interval(this=e.expression, unit=exp.Var(this='DAY')))})",
+168            exp.TsOrDsToDate: _ts_or_ds_to_date_sql,
+169            exp.TsOrDiToDi: lambda self, e: f"CAST(SUBSTR(REPLACE(CAST({self.sql(e, 'this')} AS VARCHAR), '-', ''), 1, 8) AS INT)",
+170        }
+171
+172        def normalize_func(self, name: str) -> str:
+173            return name if re.match(exp.SAFE_IDENTIFIER_RE, name) else f"`{name}`"
+
+ + +

Generator interprets the given syntax tree and produces a SQL string as an output.

+ +
Arguments:
+ +
    +
  • time_mapping (dict): the dictionary of custom time mappings in which the key +represents a python time format and the output the target time format
  • +
  • time_trie (trie): a trie of the time_mapping keys
  • +
  • pretty (bool): if set to True the returned string will be formatted. Default: False.
  • +
  • quote_start (str): specifies which starting character to use to delimit quotes. Default: '.
  • +
  • quote_end (str): specifies which ending character to use to delimit quotes. Default: '.
  • +
  • identifier_start (str): specifies which starting character to use to delimit identifiers. Default: ".
  • +
  • identifier_end (str): specifies which ending character to use to delimit identifiers. Default: ".
  • +
  • identify (bool): if set to True all identifiers will be delimited by the corresponding +character.
  • +
  • normalize (bool): if set to True all identifiers will lower cased
  • +
  • string_escape (str): specifies a string escape character. Default: '.
  • +
  • identifier_escape (str): specifies an identifier escape character. Default: ".
  • +
  • pad (int): determines padding in a formatted string. Default: 2.
  • +
  • indent (int): determines the size of indentation in a formatted string. Default: 4.
  • +
  • unnest_column_only (bool): if true unnest table aliases are considered only as column aliases
  • +
  • normalize_functions (str): normalize function names, "upper", "lower", or None +Default: "upper"
  • +
  • alias_post_tablesample (bool): if the table alias comes after tablesample +Default: False
  • +
  • unsupported_level (ErrorLevel): determines the generator's behavior when it encounters +unsupported expressions. Default ErrorLevel.WARN.
  • +
  • null_ordering (str): Indicates the default null ordering method to use if not explicitly set. +Options are "nulls_are_small", "nulls_are_large", "nulls_are_last". +Default: "nulls_are_small"
  • +
  • max_unsupported (int): Maximum number of unsupported messages to include in a raised UnsupportedError. +This is only relevant if unsupported_level is ErrorLevel.RAISE. +Default: 3
  • +
  • leading_comma (bool): if the the comma is leading or trailing in select statements +Default: False
  • +
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode. +The default is on the smaller end because the length only represents a segment and not the true +line length. +Default: 80
  • +
  • comments: Whether or not to preserve comments in the output SQL code. +Default: True
  • +
+
+ + +
+ +
+ + def + normalize_func(self, name: str) -> str: + + + +
+ +
172        def normalize_func(self, name: str) -> str:
+173            return name if re.match(exp.SAFE_IDENTIFIER_RE, name) else f"`{name}`"
+
+ + + + +
+
+
Inherited Members
+
+
sqlglot.generator.Generator
+
Generator
+
generate
+
unsupported
+
sep
+
seg
+
pad_comment
+
maybe_comment
+
wrap
+
no_identify
+
indent
+
sql
+
uncache_sql
+
cache_sql
+
characterset_sql
+
column_sql
+
columndef_sql
+
columnconstraint_sql
+
autoincrementcolumnconstraint_sql
+
checkcolumnconstraint_sql
+
commentcolumnconstraint_sql
+
collatecolumnconstraint_sql
+
encodecolumnconstraint_sql
+
defaultcolumnconstraint_sql
+
generatedasidentitycolumnconstraint_sql
+
notnullcolumnconstraint_sql
+
primarykeycolumnconstraint_sql
+
uniquecolumnconstraint_sql
+
create_sql
+
describe_sql
+
prepend_ctes
+
with_sql
+
cte_sql
+
tablealias_sql
+
bitstring_sql
+
hexstring_sql
+
datatype_sql
+
directory_sql
+
delete_sql
+
drop_sql
+
except_sql
+
except_op
+
fetch_sql
+
filter_sql
+
hint_sql
+
index_sql
+
identifier_sql
+
national_sql
+
partition_sql
+
properties_sql
+
root_properties
+
properties
+
with_properties
+
locate_properties
+
property_sql
+
likeproperty_sql
+
fallbackproperty_sql
+
journalproperty_sql
+
freespaceproperty_sql
+
afterjournalproperty_sql
+
checksumproperty_sql
+
mergeblockratioproperty_sql
+
datablocksizeproperty_sql
+
blockcompressionproperty_sql
+
isolatedloadingproperty_sql
+
insert_sql
+
intersect_sql
+
intersect_op
+
introducer_sql
+
pseudotype_sql
+
rowformatdelimitedproperty_sql
+
table_sql
+
tablesample_sql
+
pivot_sql
+
tuple_sql
+
update_sql
+
values_sql
+
var_sql
+
into_sql
+
from_sql
+
group_sql
+
having_sql
+
join_sql
+
lambda_sql
+
lateral_sql
+
limit_sql
+
offset_sql
+
lock_sql
+
literal_sql
+
loaddata_sql
+
null_sql
+
boolean_sql
+
order_sql
+
cluster_sql
+
distribute_sql
+
sort_sql
+
ordered_sql
+
matchrecognize_sql
+
query_modifiers
+
select_sql
+
schema_sql
+
star_sql
+
structkwarg_sql
+
parameter_sql
+
sessionparameter_sql
+
placeholder_sql
+
subquery_sql
+
qualify_sql
+
union_sql
+
union_op
+
unnest_sql
+
where_sql
+
window_sql
+
partition_by_sql
+
window_spec_sql
+
withingroup_sql
+
between_sql
+
bracket_sql
+
all_sql
+
any_sql
+
exists_sql
+
case_sql
+
constraint_sql
+
extract_sql
+
trim_sql
+
concat_sql
+
check_sql
+
foreignkey_sql
+
primarykey_sql
+
unique_sql
+
if_sql
+
in_sql
+
in_unnest_op
+
interval_sql
+
return_sql
+
reference_sql
+
anonymous_sql
+
paren_sql
+
neg_sql
+
not_sql
+
alias_sql
+
aliases_sql
+
attimezone_sql
+
add_sql
+
and_sql
+
connector_sql
+
bitwiseand_sql
+
bitwiseleftshift_sql
+
bitwisenot_sql
+
bitwiseor_sql
+
bitwiserightshift_sql
+
bitwisexor_sql
+
cast_sql
+
currentdate_sql
+
collate_sql
+
command_sql
+
transaction_sql
+
commit_sql
+
rollback_sql
+
altercolumn_sql
+
renametable_sql
+
altertable_sql
+
droppartition_sql
+
addconstraint_sql
+
distinct_sql
+
ignorenulls_sql
+
respectnulls_sql
+
intdiv_sql
+
dpipe_sql
+
div_sql
+
distance_sql
+
dot_sql
+
eq_sql
+
escape_sql
+
glob_sql
+
gt_sql
+
gte_sql
+
ilike_sql
+
is_sql
+
like_sql
+
similarto_sql
+
lt_sql
+
lte_sql
+
mod_sql
+
mul_sql
+
neq_sql
+
nullsafeeq_sql
+
nullsafeneq_sql
+
or_sql
+
slice_sql
+
sub_sql
+
trycast_sql
+
use_sql
+
binary
+
function_fallback_sql
+
format_args
+
text_width
+
format_time
+
expressions
+
op_expressions
+
naked_property
+
set_operation
+
tag_sql
+
token_sql
+
userdefinedfunction_sql
+
userdefinedfunctionkwarg_sql
+
joinhint_sql
+
kwarg_sql
+
when_sql
+
merge_sql
+ +
+
+
+
+
+ + \ No newline at end of file diff --git a/docs/sqlglot/dialects/duckdb.html b/docs/sqlglot/dialects/duckdb.html new file mode 100644 index 0000000..8aa1a1b --- /dev/null +++ b/docs/sqlglot/dialects/duckdb.html @@ -0,0 +1,1028 @@ + + + + + + + sqlglot.dialects.duckdb API documentation + + + + + + + + + +
+
+ Edit on GitHub +

+sqlglot.dialects.duckdb

+ + + + + + +
  1from __future__ import annotations
+  2
+  3from sqlglot import exp, generator, parser, tokens
+  4from sqlglot.dialects.dialect import (
+  5    Dialect,
+  6    approx_count_distinct_sql,
+  7    arrow_json_extract_scalar_sql,
+  8    arrow_json_extract_sql,
+  9    datestrtodate_sql,
+ 10    format_time_lambda,
+ 11    no_pivot_sql,
+ 12    no_properties_sql,
+ 13    no_safe_divide_sql,
+ 14    no_tablesample_sql,
+ 15    rename_func,
+ 16    str_position_sql,
+ 17    timestrtotime_sql,
+ 18)
+ 19from sqlglot.helper import seq_get
+ 20from sqlglot.tokens import TokenType
+ 21
+ 22
+ 23def _str_to_time_sql(self, expression):
+ 24    return f"STRPTIME({self.sql(expression, 'this')}, {self.format_time(expression)})"
+ 25
+ 26
+ 27def _ts_or_ds_add(self, expression):
+ 28    this = expression.args.get("this")
+ 29    unit = self.sql(expression, "unit").strip("'") or "DAY"
+ 30    return f"CAST({this} AS DATE) + {self.sql(exp.Interval(this=expression.expression, unit=unit))}"
+ 31
+ 32
+ 33def _ts_or_ds_to_date_sql(self, expression):
+ 34    time_format = self.format_time(expression)
+ 35    if time_format and time_format not in (DuckDB.time_format, DuckDB.date_format):
+ 36        return f"CAST({_str_to_time_sql(self, expression)} AS DATE)"
+ 37    return f"CAST({self.sql(expression, 'this')} AS DATE)"
+ 38
+ 39
+ 40def _date_add(self, expression):
+ 41    this = self.sql(expression, "this")
+ 42    unit = self.sql(expression, "unit").strip("'") or "DAY"
+ 43    return f"{this} + {self.sql(exp.Interval(this=expression.expression, unit=unit))}"
+ 44
+ 45
+ 46def _array_sort_sql(self, expression):
+ 47    if expression.expression:
+ 48        self.unsupported("DUCKDB ARRAY_SORT does not support a comparator")
+ 49    return f"ARRAY_SORT({self.sql(expression, 'this')})"
+ 50
+ 51
+ 52def _sort_array_sql(self, expression):
+ 53    this = self.sql(expression, "this")
+ 54    if expression.args.get("asc") == exp.false():
+ 55        return f"ARRAY_REVERSE_SORT({this})"
+ 56    return f"ARRAY_SORT({this})"
+ 57
+ 58
+ 59def _sort_array_reverse(args):
+ 60    return exp.SortArray(this=seq_get(args, 0), asc=exp.false())
+ 61
+ 62
+ 63def _struct_sql(self, expression):
+ 64    args = [
+ 65        f"'{e.name or e.this.name}': {self.sql(e, 'expression')}" for e in expression.expressions
+ 66    ]
+ 67    return f"{{{', '.join(args)}}}"
+ 68
+ 69
+ 70def _datatype_sql(self, expression):
+ 71    if expression.this == exp.DataType.Type.ARRAY:
+ 72        return f"{self.expressions(expression, flat=True)}[]"
+ 73    return self.datatype_sql(expression)
+ 74
+ 75
+ 76class DuckDB(Dialect):
+ 77    class Tokenizer(tokens.Tokenizer):
+ 78        KEYWORDS = {
+ 79            **tokens.Tokenizer.KEYWORDS,
+ 80            ":=": TokenType.EQ,
+ 81            "CHARACTER VARYING": TokenType.VARCHAR,
+ 82        }
+ 83
+ 84    class Parser(parser.Parser):
+ 85        FUNCTIONS = {
+ 86            **parser.Parser.FUNCTIONS,  # type: ignore
+ 87            "APPROX_COUNT_DISTINCT": exp.ApproxDistinct.from_arg_list,
+ 88            "ARRAY_LENGTH": exp.ArraySize.from_arg_list,
+ 89            "ARRAY_SORT": exp.SortArray.from_arg_list,
+ 90            "ARRAY_REVERSE_SORT": _sort_array_reverse,
+ 91            "EPOCH": exp.TimeToUnix.from_arg_list,
+ 92            "EPOCH_MS": lambda args: exp.UnixToTime(
+ 93                this=exp.Div(
+ 94                    this=seq_get(args, 0),
+ 95                    expression=exp.Literal.number(1000),
+ 96                )
+ 97            ),
+ 98            "LIST_SORT": exp.SortArray.from_arg_list,
+ 99            "LIST_REVERSE_SORT": _sort_array_reverse,
+100            "LIST_VALUE": exp.Array.from_arg_list,
+101            "REGEXP_MATCHES": exp.RegexpLike.from_arg_list,
+102            "STRFTIME": format_time_lambda(exp.TimeToStr, "duckdb"),
+103            "STRPTIME": format_time_lambda(exp.StrToTime, "duckdb"),
+104            "STR_SPLIT": exp.Split.from_arg_list,
+105            "STRING_SPLIT": exp.Split.from_arg_list,
+106            "STRING_TO_ARRAY": exp.Split.from_arg_list,
+107            "STR_SPLIT_REGEX": exp.RegexpSplit.from_arg_list,
+108            "STRING_SPLIT_REGEX": exp.RegexpSplit.from_arg_list,
+109            "STRUCT_PACK": exp.Struct.from_arg_list,
+110            "TO_TIMESTAMP": exp.UnixToTime.from_arg_list,
+111            "UNNEST": exp.Explode.from_arg_list,
+112        }
+113
+114    class Generator(generator.Generator):
+115        STRUCT_DELIMITER = ("(", ")")
+116
+117        TRANSFORMS = {
+118            **generator.Generator.TRANSFORMS,  # type: ignore
+119            exp.ApproxDistinct: approx_count_distinct_sql,
+120            exp.Array: lambda self, e: f"{self.normalize_func('ARRAY')}({self.sql(e.expressions[0])})"
+121            if isinstance(seq_get(e.expressions, 0), exp.Select)
+122            else rename_func("LIST_VALUE")(self, e),
+123            exp.ArraySize: rename_func("ARRAY_LENGTH"),
+124            exp.ArraySort: _array_sort_sql,
+125            exp.ArraySum: rename_func("LIST_SUM"),
+126            exp.DataType: _datatype_sql,
+127            exp.DateAdd: _date_add,
+128            exp.DateDiff: lambda self, e: f"""DATE_DIFF({self.format_args(e.args.get("unit") or "'day'", e.expression, e.this)})""",
+129            exp.DateStrToDate: datestrtodate_sql,
+130            exp.DateToDi: lambda self, e: f"CAST(STRFTIME({self.sql(e, 'this')}, {DuckDB.dateint_format}) AS INT)",
+131            exp.DiToDate: lambda self, e: f"CAST(STRPTIME(CAST({self.sql(e, 'this')} AS TEXT), {DuckDB.dateint_format}) AS DATE)",
+132            exp.Explode: rename_func("UNNEST"),
+133            exp.JSONExtract: arrow_json_extract_sql,
+134            exp.JSONExtractScalar: arrow_json_extract_scalar_sql,
+135            exp.JSONBExtract: arrow_json_extract_sql,
+136            exp.JSONBExtractScalar: arrow_json_extract_scalar_sql,
+137            exp.LogicalOr: rename_func("BOOL_OR"),
+138            exp.Pivot: no_pivot_sql,
+139            exp.Properties: no_properties_sql,
+140            exp.RegexpLike: rename_func("REGEXP_MATCHES"),
+141            exp.RegexpSplit: rename_func("STR_SPLIT_REGEX"),
+142            exp.SafeDivide: no_safe_divide_sql,
+143            exp.Split: rename_func("STR_SPLIT"),
+144            exp.SortArray: _sort_array_sql,
+145            exp.StrPosition: str_position_sql,
+146            exp.StrToDate: lambda self, e: f"CAST({_str_to_time_sql(self, e)} AS DATE)",
+147            exp.StrToTime: _str_to_time_sql,
+148            exp.StrToUnix: lambda self, e: f"EPOCH(STRPTIME({self.sql(e, 'this')}, {self.format_time(e)}))",
+149            exp.Struct: _struct_sql,
+150            exp.TableSample: no_tablesample_sql,
+151            exp.TimeStrToDate: lambda self, e: f"CAST({self.sql(e, 'this')} AS DATE)",
+152            exp.TimeStrToTime: timestrtotime_sql,
+153            exp.TimeStrToUnix: lambda self, e: f"EPOCH(CAST({self.sql(e, 'this')} AS TIMESTAMP))",
+154            exp.TimeToStr: lambda self, e: f"STRFTIME({self.sql(e, 'this')}, {self.format_time(e)})",
+155            exp.TimeToUnix: rename_func("EPOCH"),
+156            exp.TsOrDiToDi: lambda self, e: f"CAST(SUBSTR(REPLACE(CAST({self.sql(e, 'this')} AS TEXT), '-', ''), 1, 8) AS INT)",
+157            exp.TsOrDsAdd: _ts_or_ds_add,
+158            exp.TsOrDsToDate: _ts_or_ds_to_date_sql,
+159            exp.UnixToStr: lambda self, e: f"STRFTIME(TO_TIMESTAMP({self.sql(e, 'this')}), {self.format_time(e)})",
+160            exp.UnixToTime: rename_func("TO_TIMESTAMP"),
+161            exp.UnixToTimeStr: lambda self, e: f"CAST(TO_TIMESTAMP({self.sql(e, 'this')}) AS TEXT)",
+162        }
+163
+164        TYPE_MAPPING = {
+165            **generator.Generator.TYPE_MAPPING,  # type: ignore
+166            exp.DataType.Type.VARCHAR: "TEXT",
+167            exp.DataType.Type.NVARCHAR: "TEXT",
+168        }
+
+ + +
+
+ +
+ + class + DuckDB(sqlglot.dialects.dialect.Dialect): + + + +
+ +
 77class DuckDB(Dialect):
+ 78    class Tokenizer(tokens.Tokenizer):
+ 79        KEYWORDS = {
+ 80            **tokens.Tokenizer.KEYWORDS,
+ 81            ":=": TokenType.EQ,
+ 82            "CHARACTER VARYING": TokenType.VARCHAR,
+ 83        }
+ 84
+ 85    class Parser(parser.Parser):
+ 86        FUNCTIONS = {
+ 87            **parser.Parser.FUNCTIONS,  # type: ignore
+ 88            "APPROX_COUNT_DISTINCT": exp.ApproxDistinct.from_arg_list,
+ 89            "ARRAY_LENGTH": exp.ArraySize.from_arg_list,
+ 90            "ARRAY_SORT": exp.SortArray.from_arg_list,
+ 91            "ARRAY_REVERSE_SORT": _sort_array_reverse,
+ 92            "EPOCH": exp.TimeToUnix.from_arg_list,
+ 93            "EPOCH_MS": lambda args: exp.UnixToTime(
+ 94                this=exp.Div(
+ 95                    this=seq_get(args, 0),
+ 96                    expression=exp.Literal.number(1000),
+ 97                )
+ 98            ),
+ 99            "LIST_SORT": exp.SortArray.from_arg_list,
+100            "LIST_REVERSE_SORT": _sort_array_reverse,
+101            "LIST_VALUE": exp.Array.from_arg_list,
+102            "REGEXP_MATCHES": exp.RegexpLike.from_arg_list,
+103            "STRFTIME": format_time_lambda(exp.TimeToStr, "duckdb"),
+104            "STRPTIME": format_time_lambda(exp.StrToTime, "duckdb"),
+105            "STR_SPLIT": exp.Split.from_arg_list,
+106            "STRING_SPLIT": exp.Split.from_arg_list,
+107            "STRING_TO_ARRAY": exp.Split.from_arg_list,
+108            "STR_SPLIT_REGEX": exp.RegexpSplit.from_arg_list,
+109            "STRING_SPLIT_REGEX": exp.RegexpSplit.from_arg_list,
+110            "STRUCT_PACK": exp.Struct.from_arg_list,
+111            "TO_TIMESTAMP": exp.UnixToTime.from_arg_list,
+112            "UNNEST": exp.Explode.from_arg_list,
+113        }
+114
+115    class Generator(generator.Generator):
+116        STRUCT_DELIMITER = ("(", ")")
+117
+118        TRANSFORMS = {
+119            **generator.Generator.TRANSFORMS,  # type: ignore
+120            exp.ApproxDistinct: approx_count_distinct_sql,
+121            exp.Array: lambda self, e: f"{self.normalize_func('ARRAY')}({self.sql(e.expressions[0])})"
+122            if isinstance(seq_get(e.expressions, 0), exp.Select)
+123            else rename_func("LIST_VALUE")(self, e),
+124            exp.ArraySize: rename_func("ARRAY_LENGTH"),
+125            exp.ArraySort: _array_sort_sql,
+126            exp.ArraySum: rename_func("LIST_SUM"),
+127            exp.DataType: _datatype_sql,
+128            exp.DateAdd: _date_add,
+129            exp.DateDiff: lambda self, e: f"""DATE_DIFF({self.format_args(e.args.get("unit") or "'day'", e.expression, e.this)})""",
+130            exp.DateStrToDate: datestrtodate_sql,
+131            exp.DateToDi: lambda self, e: f"CAST(STRFTIME({self.sql(e, 'this')}, {DuckDB.dateint_format}) AS INT)",
+132            exp.DiToDate: lambda self, e: f"CAST(STRPTIME(CAST({self.sql(e, 'this')} AS TEXT), {DuckDB.dateint_format}) AS DATE)",
+133            exp.Explode: rename_func("UNNEST"),
+134            exp.JSONExtract: arrow_json_extract_sql,
+135            exp.JSONExtractScalar: arrow_json_extract_scalar_sql,
+136            exp.JSONBExtract: arrow_json_extract_sql,
+137            exp.JSONBExtractScalar: arrow_json_extract_scalar_sql,
+138            exp.LogicalOr: rename_func("BOOL_OR"),
+139            exp.Pivot: no_pivot_sql,
+140            exp.Properties: no_properties_sql,
+141            exp.RegexpLike: rename_func("REGEXP_MATCHES"),
+142            exp.RegexpSplit: rename_func("STR_SPLIT_REGEX"),
+143            exp.SafeDivide: no_safe_divide_sql,
+144            exp.Split: rename_func("STR_SPLIT"),
+145            exp.SortArray: _sort_array_sql,
+146            exp.StrPosition: str_position_sql,
+147            exp.StrToDate: lambda self, e: f"CAST({_str_to_time_sql(self, e)} AS DATE)",
+148            exp.StrToTime: _str_to_time_sql,
+149            exp.StrToUnix: lambda self, e: f"EPOCH(STRPTIME({self.sql(e, 'this')}, {self.format_time(e)}))",
+150            exp.Struct: _struct_sql,
+151            exp.TableSample: no_tablesample_sql,
+152            exp.TimeStrToDate: lambda self, e: f"CAST({self.sql(e, 'this')} AS DATE)",
+153            exp.TimeStrToTime: timestrtotime_sql,
+154            exp.TimeStrToUnix: lambda self, e: f"EPOCH(CAST({self.sql(e, 'this')} AS TIMESTAMP))",
+155            exp.TimeToStr: lambda self, e: f"STRFTIME({self.sql(e, 'this')}, {self.format_time(e)})",
+156            exp.TimeToUnix: rename_func("EPOCH"),
+157            exp.TsOrDiToDi: lambda self, e: f"CAST(SUBSTR(REPLACE(CAST({self.sql(e, 'this')} AS TEXT), '-', ''), 1, 8) AS INT)",
+158            exp.TsOrDsAdd: _ts_or_ds_add,
+159            exp.TsOrDsToDate: _ts_or_ds_to_date_sql,
+160            exp.UnixToStr: lambda self, e: f"STRFTIME(TO_TIMESTAMP({self.sql(e, 'this')}), {self.format_time(e)})",
+161            exp.UnixToTime: rename_func("TO_TIMESTAMP"),
+162            exp.UnixToTimeStr: lambda self, e: f"CAST(TO_TIMESTAMP({self.sql(e, 'this')}) AS TEXT)",
+163        }
+164
+165        TYPE_MAPPING = {
+166            **generator.Generator.TYPE_MAPPING,  # type: ignore
+167            exp.DataType.Type.VARCHAR: "TEXT",
+168            exp.DataType.Type.NVARCHAR: "TEXT",
+169        }
+
+ + + + +
+
+ + DuckDB() + + +
+ + + + +
+ +
+
+ +
+ + class + DuckDB.Tokenizer(sqlglot.tokens.Tokenizer): + + + +
+ +
78    class Tokenizer(tokens.Tokenizer):
+79        KEYWORDS = {
+80            **tokens.Tokenizer.KEYWORDS,
+81            ":=": TokenType.EQ,
+82            "CHARACTER VARYING": TokenType.VARCHAR,
+83        }
+
+ + + + +
+
Inherited Members
+
+ +
+
+
+
+ +
+ + class + DuckDB.Parser(sqlglot.parser.Parser): + + + +
+ +
 85    class Parser(parser.Parser):
+ 86        FUNCTIONS = {
+ 87            **parser.Parser.FUNCTIONS,  # type: ignore
+ 88            "APPROX_COUNT_DISTINCT": exp.ApproxDistinct.from_arg_list,
+ 89            "ARRAY_LENGTH": exp.ArraySize.from_arg_list,
+ 90            "ARRAY_SORT": exp.SortArray.from_arg_list,
+ 91            "ARRAY_REVERSE_SORT": _sort_array_reverse,
+ 92            "EPOCH": exp.TimeToUnix.from_arg_list,
+ 93            "EPOCH_MS": lambda args: exp.UnixToTime(
+ 94                this=exp.Div(
+ 95                    this=seq_get(args, 0),
+ 96                    expression=exp.Literal.number(1000),
+ 97                )
+ 98            ),
+ 99            "LIST_SORT": exp.SortArray.from_arg_list,
+100            "LIST_REVERSE_SORT": _sort_array_reverse,
+101            "LIST_VALUE": exp.Array.from_arg_list,
+102            "REGEXP_MATCHES": exp.RegexpLike.from_arg_list,
+103            "STRFTIME": format_time_lambda(exp.TimeToStr, "duckdb"),
+104            "STRPTIME": format_time_lambda(exp.StrToTime, "duckdb"),
+105            "STR_SPLIT": exp.Split.from_arg_list,
+106            "STRING_SPLIT": exp.Split.from_arg_list,
+107            "STRING_TO_ARRAY": exp.Split.from_arg_list,
+108            "STR_SPLIT_REGEX": exp.RegexpSplit.from_arg_list,
+109            "STRING_SPLIT_REGEX": exp.RegexpSplit.from_arg_list,
+110            "STRUCT_PACK": exp.Struct.from_arg_list,
+111            "TO_TIMESTAMP": exp.UnixToTime.from_arg_list,
+112            "UNNEST": exp.Explode.from_arg_list,
+113        }
+
+ + +

Parser consumes a list of tokens produced by the sqlglot.tokens.Tokenizer and produces +a parsed syntax tree.

+ +
Arguments:
+ +
    +
  • error_level: the desired error level. +Default: ErrorLevel.RAISE
  • +
  • error_message_context: determines the amount of context to capture from a +query string when displaying the error message (in number of characters). +Default: 50.
  • +
  • index_offset: Index offset for arrays eg ARRAY[0] vs ARRAY[1] as the head of a list. +Default: 0
  • +
  • alias_post_tablesample: If the table alias comes after tablesample. +Default: False
  • +
  • max_errors: Maximum number of error messages to include in a raised ParseError. +This is only relevant if error_level is ErrorLevel.RAISE. +Default: 3
  • +
  • null_ordering: Indicates the default null ordering method to use if not explicitly set. +Options are "nulls_are_small", "nulls_are_large", "nulls_are_last". +Default: "nulls_are_small"
  • +
+
+ + + +
+
+ +
+ + class + DuckDB.Generator(sqlglot.generator.Generator): + + + +
+ +
115    class Generator(generator.Generator):
+116        STRUCT_DELIMITER = ("(", ")")
+117
+118        TRANSFORMS = {
+119            **generator.Generator.TRANSFORMS,  # type: ignore
+120            exp.ApproxDistinct: approx_count_distinct_sql,
+121            exp.Array: lambda self, e: f"{self.normalize_func('ARRAY')}({self.sql(e.expressions[0])})"
+122            if isinstance(seq_get(e.expressions, 0), exp.Select)
+123            else rename_func("LIST_VALUE")(self, e),
+124            exp.ArraySize: rename_func("ARRAY_LENGTH"),
+125            exp.ArraySort: _array_sort_sql,
+126            exp.ArraySum: rename_func("LIST_SUM"),
+127            exp.DataType: _datatype_sql,
+128            exp.DateAdd: _date_add,
+129            exp.DateDiff: lambda self, e: f"""DATE_DIFF({self.format_args(e.args.get("unit") or "'day'", e.expression, e.this)})""",
+130            exp.DateStrToDate: datestrtodate_sql,
+131            exp.DateToDi: lambda self, e: f"CAST(STRFTIME({self.sql(e, 'this')}, {DuckDB.dateint_format}) AS INT)",
+132            exp.DiToDate: lambda self, e: f"CAST(STRPTIME(CAST({self.sql(e, 'this')} AS TEXT), {DuckDB.dateint_format}) AS DATE)",
+133            exp.Explode: rename_func("UNNEST"),
+134            exp.JSONExtract: arrow_json_extract_sql,
+135            exp.JSONExtractScalar: arrow_json_extract_scalar_sql,
+136            exp.JSONBExtract: arrow_json_extract_sql,
+137            exp.JSONBExtractScalar: arrow_json_extract_scalar_sql,
+138            exp.LogicalOr: rename_func("BOOL_OR"),
+139            exp.Pivot: no_pivot_sql,
+140            exp.Properties: no_properties_sql,
+141            exp.RegexpLike: rename_func("REGEXP_MATCHES"),
+142            exp.RegexpSplit: rename_func("STR_SPLIT_REGEX"),
+143            exp.SafeDivide: no_safe_divide_sql,
+144            exp.Split: rename_func("STR_SPLIT"),
+145            exp.SortArray: _sort_array_sql,
+146            exp.StrPosition: str_position_sql,
+147            exp.StrToDate: lambda self, e: f"CAST({_str_to_time_sql(self, e)} AS DATE)",
+148            exp.StrToTime: _str_to_time_sql,
+149            exp.StrToUnix: lambda self, e: f"EPOCH(STRPTIME({self.sql(e, 'this')}, {self.format_time(e)}))",
+150            exp.Struct: _struct_sql,
+151            exp.TableSample: no_tablesample_sql,
+152            exp.TimeStrToDate: lambda self, e: f"CAST({self.sql(e, 'this')} AS DATE)",
+153            exp.TimeStrToTime: timestrtotime_sql,
+154            exp.TimeStrToUnix: lambda self, e: f"EPOCH(CAST({self.sql(e, 'this')} AS TIMESTAMP))",
+155            exp.TimeToStr: lambda self, e: f"STRFTIME({self.sql(e, 'this')}, {self.format_time(e)})",
+156            exp.TimeToUnix: rename_func("EPOCH"),
+157            exp.TsOrDiToDi: lambda self, e: f"CAST(SUBSTR(REPLACE(CAST({self.sql(e, 'this')} AS TEXT), '-', ''), 1, 8) AS INT)",
+158            exp.TsOrDsAdd: _ts_or_ds_add,
+159            exp.TsOrDsToDate: _ts_or_ds_to_date_sql,
+160            exp.UnixToStr: lambda self, e: f"STRFTIME(TO_TIMESTAMP({self.sql(e, 'this')}), {self.format_time(e)})",
+161            exp.UnixToTime: rename_func("TO_TIMESTAMP"),
+162            exp.UnixToTimeStr: lambda self, e: f"CAST(TO_TIMESTAMP({self.sql(e, 'this')}) AS TEXT)",
+163        }
+164
+165        TYPE_MAPPING = {
+166            **generator.Generator.TYPE_MAPPING,  # type: ignore
+167            exp.DataType.Type.VARCHAR: "TEXT",
+168            exp.DataType.Type.NVARCHAR: "TEXT",
+169        }
+
+ + +

Generator interprets the given syntax tree and produces a SQL string as an output.

+ +
Arguments:
+ +
    +
  • time_mapping (dict): the dictionary of custom time mappings in which the key +represents a python time format and the output the target time format
  • +
  • time_trie (trie): a trie of the time_mapping keys
  • +
  • pretty (bool): if set to True the returned string will be formatted. Default: False.
  • +
  • quote_start (str): specifies which starting character to use to delimit quotes. Default: '.
  • +
  • quote_end (str): specifies which ending character to use to delimit quotes. Default: '.
  • +
  • identifier_start (str): specifies which starting character to use to delimit identifiers. Default: ".
  • +
  • identifier_end (str): specifies which ending character to use to delimit identifiers. Default: ".
  • +
  • identify (bool): if set to True all identifiers will be delimited by the corresponding +character.
  • +
  • normalize (bool): if set to True all identifiers will lower cased
  • +
  • string_escape (str): specifies a string escape character. Default: '.
  • +
  • identifier_escape (str): specifies an identifier escape character. Default: ".
  • +
  • pad (int): determines padding in a formatted string. Default: 2.
  • +
  • indent (int): determines the size of indentation in a formatted string. Default: 4.
  • +
  • unnest_column_only (bool): if true unnest table aliases are considered only as column aliases
  • +
  • normalize_functions (str): normalize function names, "upper", "lower", or None +Default: "upper"
  • +
  • alias_post_tablesample (bool): if the table alias comes after tablesample +Default: False
  • +
  • unsupported_level (ErrorLevel): determines the generator's behavior when it encounters +unsupported expressions. Default ErrorLevel.WARN.
  • +
  • null_ordering (str): Indicates the default null ordering method to use if not explicitly set. +Options are "nulls_are_small", "nulls_are_large", "nulls_are_last". +Default: "nulls_are_small"
  • +
  • max_unsupported (int): Maximum number of unsupported messages to include in a raised UnsupportedError. +This is only relevant if unsupported_level is ErrorLevel.RAISE. +Default: 3
  • +
  • leading_comma (bool): if the the comma is leading or trailing in select statements +Default: False
  • +
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode. +The default is on the smaller end because the length only represents a segment and not the true +line length. +Default: 80
  • +
  • comments: Whether or not to preserve comments in the output SQL code. +Default: True
  • +
+
+ + +
+
Inherited Members
+
+
sqlglot.generator.Generator
+
Generator
+
generate
+
unsupported
+
sep
+
seg
+
pad_comment
+
maybe_comment
+
wrap
+
no_identify
+
normalize_func
+
indent
+
sql
+
uncache_sql
+
cache_sql
+
characterset_sql
+
column_sql
+
columndef_sql
+
columnconstraint_sql
+
autoincrementcolumnconstraint_sql
+
checkcolumnconstraint_sql
+
commentcolumnconstraint_sql
+
collatecolumnconstraint_sql
+
encodecolumnconstraint_sql
+
defaultcolumnconstraint_sql
+
generatedasidentitycolumnconstraint_sql
+
notnullcolumnconstraint_sql
+
primarykeycolumnconstraint_sql
+
uniquecolumnconstraint_sql
+
create_sql
+
describe_sql
+
prepend_ctes
+
with_sql
+
cte_sql
+
tablealias_sql
+
bitstring_sql
+
hexstring_sql
+
datatype_sql
+
directory_sql
+
delete_sql
+
drop_sql
+
except_sql
+
except_op
+
fetch_sql
+
filter_sql
+
hint_sql
+
index_sql
+
identifier_sql
+
national_sql
+
partition_sql
+
properties_sql
+
root_properties
+
properties
+
with_properties
+
locate_properties
+
property_sql
+
likeproperty_sql
+
fallbackproperty_sql
+
journalproperty_sql
+
freespaceproperty_sql
+
afterjournalproperty_sql
+
checksumproperty_sql
+
mergeblockratioproperty_sql
+
datablocksizeproperty_sql
+
blockcompressionproperty_sql
+
isolatedloadingproperty_sql
+
insert_sql
+
intersect_sql
+
intersect_op
+
introducer_sql
+
pseudotype_sql
+
rowformatdelimitedproperty_sql
+
table_sql
+
tablesample_sql
+
pivot_sql
+
tuple_sql
+
update_sql
+
values_sql
+
var_sql
+
into_sql
+
from_sql
+
group_sql
+
having_sql
+
join_sql
+
lambda_sql
+
lateral_sql
+
limit_sql
+
offset_sql
+
lock_sql
+
literal_sql
+
loaddata_sql
+
null_sql
+
boolean_sql
+
order_sql
+
cluster_sql
+
distribute_sql
+
sort_sql
+
ordered_sql
+
matchrecognize_sql
+
query_modifiers
+
select_sql
+
schema_sql
+
star_sql
+
structkwarg_sql
+
parameter_sql
+
sessionparameter_sql
+
placeholder_sql
+
subquery_sql
+
qualify_sql
+
union_sql
+
union_op
+
unnest_sql
+
where_sql
+
window_sql
+
partition_by_sql
+
window_spec_sql
+
withingroup_sql
+
between_sql
+
bracket_sql
+
all_sql
+
any_sql
+
exists_sql
+
case_sql
+
constraint_sql
+
extract_sql
+
trim_sql
+
concat_sql
+
check_sql
+
foreignkey_sql
+
primarykey_sql
+
unique_sql
+
if_sql
+
in_sql
+
in_unnest_op
+
interval_sql
+
return_sql
+
reference_sql
+
anonymous_sql
+
paren_sql
+
neg_sql
+
not_sql
+
alias_sql
+
aliases_sql
+
attimezone_sql
+
add_sql
+
and_sql
+
connector_sql
+
bitwiseand_sql
+
bitwiseleftshift_sql
+
bitwisenot_sql
+
bitwiseor_sql
+
bitwiserightshift_sql
+
bitwisexor_sql
+
cast_sql
+
currentdate_sql
+
collate_sql
+
command_sql
+
transaction_sql
+
commit_sql
+
rollback_sql
+
altercolumn_sql
+
renametable_sql
+
altertable_sql
+
droppartition_sql
+
addconstraint_sql
+
distinct_sql
+
ignorenulls_sql
+
respectnulls_sql
+
intdiv_sql
+
dpipe_sql
+
div_sql
+
distance_sql
+
dot_sql
+
eq_sql
+
escape_sql
+
glob_sql
+
gt_sql
+
gte_sql
+
ilike_sql
+
is_sql
+
like_sql
+
similarto_sql
+
lt_sql
+
lte_sql
+
mod_sql
+
mul_sql
+
neq_sql
+
nullsafeeq_sql
+
nullsafeneq_sql
+
or_sql
+
slice_sql
+
sub_sql
+
trycast_sql
+
use_sql
+
binary
+
function_fallback_sql
+
format_args
+
text_width
+
format_time
+
expressions
+
op_expressions
+
naked_property
+
set_operation
+
tag_sql
+
token_sql
+
userdefinedfunction_sql
+
userdefinedfunctionkwarg_sql
+
joinhint_sql
+
kwarg_sql
+
when_sql
+
merge_sql
+ +
+
+
+
+
+ + \ No newline at end of file diff --git a/docs/sqlglot/dialects/hive.html b/docs/sqlglot/dialects/hive.html new file mode 100644 index 0000000..19511f2 --- /dev/null +++ b/docs/sqlglot/dialects/hive.html @@ -0,0 +1,1461 @@ + + + + + + + sqlglot.dialects.hive API documentation + + + + + + + + + +
+
+ Edit on GitHub +

+sqlglot.dialects.hive

+ + + + + + +
  1from __future__ import annotations
+  2
+  3from sqlglot import exp, generator, parser, tokens, transforms
+  4from sqlglot.dialects.dialect import (
+  5    Dialect,
+  6    approx_count_distinct_sql,
+  7    create_with_partitions_sql,
+  8    format_time_lambda,
+  9    if_sql,
+ 10    locate_to_strposition,
+ 11    no_ilike_sql,
+ 12    no_recursive_cte_sql,
+ 13    no_safe_divide_sql,
+ 14    no_trycast_sql,
+ 15    rename_func,
+ 16    strposition_to_locate_sql,
+ 17    struct_extract_sql,
+ 18    timestrtotime_sql,
+ 19    var_map_sql,
+ 20)
+ 21from sqlglot.helper import seq_get
+ 22from sqlglot.parser import parse_var_map
+ 23from sqlglot.tokens import TokenType
+ 24
+ 25# (FuncType, Multiplier)
+ 26DATE_DELTA_INTERVAL = {
+ 27    "YEAR": ("ADD_MONTHS", 12),
+ 28    "MONTH": ("ADD_MONTHS", 1),
+ 29    "QUARTER": ("ADD_MONTHS", 3),
+ 30    "WEEK": ("DATE_ADD", 7),
+ 31    "DAY": ("DATE_ADD", 1),
+ 32}
+ 33
+ 34DIFF_MONTH_SWITCH = ("YEAR", "QUARTER", "MONTH")
+ 35
+ 36
+ 37def _add_date_sql(self, expression):
+ 38    unit = expression.text("unit").upper()
+ 39    func, multiplier = DATE_DELTA_INTERVAL.get(unit, ("DATE_ADD", 1))
+ 40    modified_increment = (
+ 41        int(expression.text("expression")) * multiplier
+ 42        if expression.expression.is_number
+ 43        else expression.expression
+ 44    )
+ 45    modified_increment = exp.Literal.number(modified_increment)
+ 46    return f"{func}({self.format_args(expression.this, modified_increment.this)})"
+ 47
+ 48
+ 49def _date_diff_sql(self, expression):
+ 50    unit = expression.text("unit").upper()
+ 51    sql_func = "MONTHS_BETWEEN" if unit in DIFF_MONTH_SWITCH else "DATEDIFF"
+ 52    _, multiplier = DATE_DELTA_INTERVAL.get(unit, ("", 1))
+ 53    multiplier_sql = f" / {multiplier}" if multiplier > 1 else ""
+ 54    diff_sql = f"{sql_func}({self.format_args(expression.this, expression.expression)})"
+ 55    return f"{diff_sql}{multiplier_sql}"
+ 56
+ 57
+ 58def _array_sort(self, expression):
+ 59    if expression.expression:
+ 60        self.unsupported("Hive SORT_ARRAY does not support a comparator")
+ 61    return f"SORT_ARRAY({self.sql(expression, 'this')})"
+ 62
+ 63
+ 64def _property_sql(self, expression):
+ 65    return f"'{expression.name}'={self.sql(expression, 'value')}"
+ 66
+ 67
+ 68def _str_to_unix(self, expression):
+ 69    return f"UNIX_TIMESTAMP({self.format_args(expression.this, _time_format(self, expression))})"
+ 70
+ 71
+ 72def _str_to_date(self, expression):
+ 73    this = self.sql(expression, "this")
+ 74    time_format = self.format_time(expression)
+ 75    if time_format not in (Hive.time_format, Hive.date_format):
+ 76        this = f"FROM_UNIXTIME(UNIX_TIMESTAMP({this}, {time_format}))"
+ 77    return f"CAST({this} AS DATE)"
+ 78
+ 79
+ 80def _str_to_time(self, expression):
+ 81    this = self.sql(expression, "this")
+ 82    time_format = self.format_time(expression)
+ 83    if time_format not in (Hive.time_format, Hive.date_format):
+ 84        this = f"FROM_UNIXTIME(UNIX_TIMESTAMP({this}, {time_format}))"
+ 85    return f"CAST({this} AS TIMESTAMP)"
+ 86
+ 87
+ 88def _time_format(self, expression):
+ 89    time_format = self.format_time(expression)
+ 90    if time_format == Hive.time_format:
+ 91        return None
+ 92    return time_format
+ 93
+ 94
+ 95def _time_to_str(self, expression):
+ 96    this = self.sql(expression, "this")
+ 97    time_format = self.format_time(expression)
+ 98    return f"DATE_FORMAT({this}, {time_format})"
+ 99
+100
+101def _to_date_sql(self, expression):
+102    this = self.sql(expression, "this")
+103    time_format = self.format_time(expression)
+104    if time_format and time_format not in (Hive.time_format, Hive.date_format):
+105        return f"TO_DATE({this}, {time_format})"
+106    return f"TO_DATE({this})"
+107
+108
+109def _unnest_to_explode_sql(self, expression):
+110    unnest = expression.this
+111    if isinstance(unnest, exp.Unnest):
+112        alias = unnest.args.get("alias")
+113        udtf = exp.Posexplode if unnest.args.get("ordinality") else exp.Explode
+114        return "".join(
+115            self.sql(
+116                exp.Lateral(
+117                    this=udtf(this=expression),
+118                    view=True,
+119                    alias=exp.TableAlias(this=alias.this, columns=[column]),
+120                )
+121            )
+122            for expression, column in zip(unnest.expressions, alias.columns if alias else [])
+123        )
+124    return self.join_sql(expression)
+125
+126
+127def _index_sql(self, expression):
+128    this = self.sql(expression, "this")
+129    table = self.sql(expression, "table")
+130    columns = self.sql(expression, "columns")
+131    return f"{this} ON TABLE {table} {columns}"
+132
+133
+134class Hive(Dialect):
+135    alias_post_tablesample = True
+136
+137    time_mapping = {
+138        "y": "%Y",
+139        "Y": "%Y",
+140        "YYYY": "%Y",
+141        "yyyy": "%Y",
+142        "YY": "%y",
+143        "yy": "%y",
+144        "MMMM": "%B",
+145        "MMM": "%b",
+146        "MM": "%m",
+147        "M": "%-m",
+148        "dd": "%d",
+149        "d": "%-d",
+150        "HH": "%H",
+151        "H": "%-H",
+152        "hh": "%I",
+153        "h": "%-I",
+154        "mm": "%M",
+155        "m": "%-M",
+156        "ss": "%S",
+157        "s": "%-S",
+158        "SSSSSS": "%f",
+159        "a": "%p",
+160        "DD": "%j",
+161        "D": "%-j",
+162        "E": "%a",
+163        "EE": "%a",
+164        "EEE": "%a",
+165        "EEEE": "%A",
+166    }
+167
+168    date_format = "'yyyy-MM-dd'"
+169    dateint_format = "'yyyyMMdd'"
+170    time_format = "'yyyy-MM-dd HH:mm:ss'"
+171
+172    class Tokenizer(tokens.Tokenizer):
+173        QUOTES = ["'", '"']
+174        IDENTIFIERS = ["`"]
+175        STRING_ESCAPES = ["\\"]
+176        ENCODE = "utf-8"
+177
+178        KEYWORDS = {
+179            **tokens.Tokenizer.KEYWORDS,
+180            "ADD ARCHIVE": TokenType.COMMAND,
+181            "ADD ARCHIVES": TokenType.COMMAND,
+182            "ADD FILE": TokenType.COMMAND,
+183            "ADD FILES": TokenType.COMMAND,
+184            "ADD JAR": TokenType.COMMAND,
+185            "ADD JARS": TokenType.COMMAND,
+186            "MSCK REPAIR": TokenType.COMMAND,
+187            "WITH SERDEPROPERTIES": TokenType.SERDE_PROPERTIES,
+188        }
+189
+190        NUMERIC_LITERALS = {
+191            "L": "BIGINT",
+192            "S": "SMALLINT",
+193            "Y": "TINYINT",
+194            "D": "DOUBLE",
+195            "F": "FLOAT",
+196            "BD": "DECIMAL",
+197        }
+198
+199        IDENTIFIER_CAN_START_WITH_DIGIT = True
+200
+201    class Parser(parser.Parser):
+202        STRICT_CAST = False
+203
+204        FUNCTIONS = {
+205            **parser.Parser.FUNCTIONS,  # type: ignore
+206            "APPROX_COUNT_DISTINCT": exp.ApproxDistinct.from_arg_list,
+207            "COLLECT_LIST": exp.ArrayAgg.from_arg_list,
+208            "DATE_ADD": lambda args: exp.TsOrDsAdd(
+209                this=seq_get(args, 0),
+210                expression=seq_get(args, 1),
+211                unit=exp.Literal.string("DAY"),
+212            ),
+213            "DATEDIFF": lambda args: exp.DateDiff(
+214                this=exp.TsOrDsToDate(this=seq_get(args, 0)),
+215                expression=exp.TsOrDsToDate(this=seq_get(args, 1)),
+216            ),
+217            "DATE_SUB": lambda args: exp.TsOrDsAdd(
+218                this=seq_get(args, 0),
+219                expression=exp.Mul(
+220                    this=seq_get(args, 1),
+221                    expression=exp.Literal.number(-1),
+222                ),
+223                unit=exp.Literal.string("DAY"),
+224            ),
+225            "DATE_FORMAT": lambda args: format_time_lambda(exp.TimeToStr, "hive")(
+226                [
+227                    exp.TimeStrToTime(this=seq_get(args, 0)),
+228                    seq_get(args, 1),
+229                ]
+230            ),
+231            "DAY": lambda args: exp.Day(this=exp.TsOrDsToDate(this=seq_get(args, 0))),
+232            "FROM_UNIXTIME": format_time_lambda(exp.UnixToStr, "hive", True),
+233            "GET_JSON_OBJECT": exp.JSONExtractScalar.from_arg_list,
+234            "LOCATE": locate_to_strposition,
+235            "LOG": (
+236                lambda args: exp.Log.from_arg_list(args)
+237                if len(args) > 1
+238                else exp.Ln.from_arg_list(args)
+239            ),
+240            "MAP": parse_var_map,
+241            "MONTH": lambda args: exp.Month(this=exp.TsOrDsToDate.from_arg_list(args)),
+242            "PERCENTILE": exp.Quantile.from_arg_list,
+243            "PERCENTILE_APPROX": exp.ApproxQuantile.from_arg_list,
+244            "COLLECT_SET": exp.SetAgg.from_arg_list,
+245            "SIZE": exp.ArraySize.from_arg_list,
+246            "SPLIT": exp.RegexpSplit.from_arg_list,
+247            "TO_DATE": format_time_lambda(exp.TsOrDsToDate, "hive"),
+248            "UNIX_TIMESTAMP": format_time_lambda(exp.StrToUnix, "hive", True),
+249            "YEAR": lambda args: exp.Year(this=exp.TsOrDsToDate.from_arg_list(args)),
+250        }
+251
+252        PROPERTY_PARSERS = {
+253            **parser.Parser.PROPERTY_PARSERS,  # type: ignore
+254            "WITH SERDEPROPERTIES": lambda self: exp.SerdeProperties(
+255                expressions=self._parse_wrapped_csv(self._parse_property)
+256            ),
+257        }
+258
+259    class Generator(generator.Generator):
+260        TYPE_MAPPING = {
+261            **generator.Generator.TYPE_MAPPING,  # type: ignore
+262            exp.DataType.Type.TEXT: "STRING",
+263            exp.DataType.Type.DATETIME: "TIMESTAMP",
+264            exp.DataType.Type.VARBINARY: "BINARY",
+265        }
+266
+267        TRANSFORMS = {
+268            **generator.Generator.TRANSFORMS,  # type: ignore
+269            **transforms.UNALIAS_GROUP,  # type: ignore
+270            exp.Property: _property_sql,
+271            exp.ApproxDistinct: approx_count_distinct_sql,
+272            exp.ArrayAgg: rename_func("COLLECT_LIST"),
+273            exp.ArrayConcat: rename_func("CONCAT"),
+274            exp.ArraySize: rename_func("SIZE"),
+275            exp.ArraySort: _array_sort,
+276            exp.With: no_recursive_cte_sql,
+277            exp.DateAdd: _add_date_sql,
+278            exp.DateDiff: _date_diff_sql,
+279            exp.DateStrToDate: rename_func("TO_DATE"),
+280            exp.DateToDi: lambda self, e: f"CAST(DATE_FORMAT({self.sql(e, 'this')}, {Hive.dateint_format}) AS INT)",
+281            exp.DiToDate: lambda self, e: f"TO_DATE(CAST({self.sql(e, 'this')} AS STRING), {Hive.dateint_format})",
+282            exp.FileFormatProperty: lambda self, e: f"STORED AS {e.name.upper()}",
+283            exp.If: if_sql,
+284            exp.Index: _index_sql,
+285            exp.ILike: no_ilike_sql,
+286            exp.Join: _unnest_to_explode_sql,
+287            exp.JSONExtract: rename_func("GET_JSON_OBJECT"),
+288            exp.JSONExtractScalar: rename_func("GET_JSON_OBJECT"),
+289            exp.Map: var_map_sql,
+290            exp.VarMap: var_map_sql,
+291            exp.Create: create_with_partitions_sql,
+292            exp.Quantile: rename_func("PERCENTILE"),
+293            exp.ApproxQuantile: rename_func("PERCENTILE_APPROX"),
+294            exp.RegexpLike: lambda self, e: self.binary(e, "RLIKE"),
+295            exp.RegexpSplit: rename_func("SPLIT"),
+296            exp.SafeDivide: no_safe_divide_sql,
+297            exp.SchemaCommentProperty: lambda self, e: self.naked_property(e),
+298            exp.SetAgg: rename_func("COLLECT_SET"),
+299            exp.Split: lambda self, e: f"SPLIT({self.sql(e, 'this')}, CONCAT('\\\\Q', {self.sql(e, 'expression')}))",
+300            exp.StrPosition: strposition_to_locate_sql,
+301            exp.StrToDate: _str_to_date,
+302            exp.StrToTime: _str_to_time,
+303            exp.StrToUnix: _str_to_unix,
+304            exp.StructExtract: struct_extract_sql,
+305            exp.TableFormatProperty: lambda self, e: f"USING {self.sql(e, 'this')}",
+306            exp.TimeStrToDate: rename_func("TO_DATE"),
+307            exp.TimeStrToTime: timestrtotime_sql,
+308            exp.TimeStrToUnix: rename_func("UNIX_TIMESTAMP"),
+309            exp.TimeToStr: _time_to_str,
+310            exp.TimeToUnix: rename_func("UNIX_TIMESTAMP"),
+311            exp.TsOrDiToDi: lambda self, e: f"CAST(SUBSTR(REPLACE(CAST({self.sql(e, 'this')} AS STRING), '-', ''), 1, 8) AS INT)",
+312            exp.TsOrDsAdd: lambda self, e: f"DATE_ADD({self.sql(e, 'this')}, {self.sql(e, 'expression')})",
+313            exp.TsOrDsToDate: _to_date_sql,
+314            exp.TryCast: no_trycast_sql,
+315            exp.UnixToStr: lambda self, e: f"FROM_UNIXTIME({self.format_args(e.this, _time_format(self, e))})",
+316            exp.UnixToTime: rename_func("FROM_UNIXTIME"),
+317            exp.UnixToTimeStr: rename_func("FROM_UNIXTIME"),
+318            exp.PartitionedByProperty: lambda self, e: f"PARTITIONED BY {self.sql(e, 'this')}",
+319            exp.RowFormatSerdeProperty: lambda self, e: f"ROW FORMAT SERDE {self.sql(e, 'this')}",
+320            exp.SerdeProperties: lambda self, e: self.properties(e, prefix="WITH SERDEPROPERTIES"),
+321            exp.NumberToStr: rename_func("FORMAT_NUMBER"),
+322            exp.LastDateOfMonth: rename_func("LAST_DAY"),
+323        }
+324
+325        PROPERTIES_LOCATION = {
+326            **generator.Generator.PROPERTIES_LOCATION,  # type: ignore
+327            exp.FileFormatProperty: exp.Properties.Location.POST_SCHEMA_ROOT,
+328            exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA_ROOT,
+329            exp.TableFormatProperty: exp.Properties.Location.POST_SCHEMA_ROOT,
+330        }
+331
+332        def with_properties(self, properties):
+333            return self.properties(
+334                properties,
+335                prefix=self.seg("TBLPROPERTIES"),
+336            )
+337
+338        def datatype_sql(self, expression):
+339            if (
+340                expression.this in (exp.DataType.Type.VARCHAR, exp.DataType.Type.NVARCHAR)
+341                and not expression.expressions
+342            ):
+343                expression = exp.DataType.build("text")
+344            elif expression.this in exp.DataType.TEMPORAL_TYPES:
+345                expression = exp.DataType.build(expression.this)
+346            return super().datatype_sql(expression)
+
+ + +
+
+ +
+ + class + Hive(sqlglot.dialects.dialect.Dialect): + + + +
+ +
135class Hive(Dialect):
+136    alias_post_tablesample = True
+137
+138    time_mapping = {
+139        "y": "%Y",
+140        "Y": "%Y",
+141        "YYYY": "%Y",
+142        "yyyy": "%Y",
+143        "YY": "%y",
+144        "yy": "%y",
+145        "MMMM": "%B",
+146        "MMM": "%b",
+147        "MM": "%m",
+148        "M": "%-m",
+149        "dd": "%d",
+150        "d": "%-d",
+151        "HH": "%H",
+152        "H": "%-H",
+153        "hh": "%I",
+154        "h": "%-I",
+155        "mm": "%M",
+156        "m": "%-M",
+157        "ss": "%S",
+158        "s": "%-S",
+159        "SSSSSS": "%f",
+160        "a": "%p",
+161        "DD": "%j",
+162        "D": "%-j",
+163        "E": "%a",
+164        "EE": "%a",
+165        "EEE": "%a",
+166        "EEEE": "%A",
+167    }
+168
+169    date_format = "'yyyy-MM-dd'"
+170    dateint_format = "'yyyyMMdd'"
+171    time_format = "'yyyy-MM-dd HH:mm:ss'"
+172
+173    class Tokenizer(tokens.Tokenizer):
+174        QUOTES = ["'", '"']
+175        IDENTIFIERS = ["`"]
+176        STRING_ESCAPES = ["\\"]
+177        ENCODE = "utf-8"
+178
+179        KEYWORDS = {
+180            **tokens.Tokenizer.KEYWORDS,
+181            "ADD ARCHIVE": TokenType.COMMAND,
+182            "ADD ARCHIVES": TokenType.COMMAND,
+183            "ADD FILE": TokenType.COMMAND,
+184            "ADD FILES": TokenType.COMMAND,
+185            "ADD JAR": TokenType.COMMAND,
+186            "ADD JARS": TokenType.COMMAND,
+187            "MSCK REPAIR": TokenType.COMMAND,
+188            "WITH SERDEPROPERTIES": TokenType.SERDE_PROPERTIES,
+189        }
+190
+191        NUMERIC_LITERALS = {
+192            "L": "BIGINT",
+193            "S": "SMALLINT",
+194            "Y": "TINYINT",
+195            "D": "DOUBLE",
+196            "F": "FLOAT",
+197            "BD": "DECIMAL",
+198        }
+199
+200        IDENTIFIER_CAN_START_WITH_DIGIT = True
+201
+202    class Parser(parser.Parser):
+203        STRICT_CAST = False
+204
+205        FUNCTIONS = {
+206            **parser.Parser.FUNCTIONS,  # type: ignore
+207            "APPROX_COUNT_DISTINCT": exp.ApproxDistinct.from_arg_list,
+208            "COLLECT_LIST": exp.ArrayAgg.from_arg_list,
+209            "DATE_ADD": lambda args: exp.TsOrDsAdd(
+210                this=seq_get(args, 0),
+211                expression=seq_get(args, 1),
+212                unit=exp.Literal.string("DAY"),
+213            ),
+214            "DATEDIFF": lambda args: exp.DateDiff(
+215                this=exp.TsOrDsToDate(this=seq_get(args, 0)),
+216                expression=exp.TsOrDsToDate(this=seq_get(args, 1)),
+217            ),
+218            "DATE_SUB": lambda args: exp.TsOrDsAdd(
+219                this=seq_get(args, 0),
+220                expression=exp.Mul(
+221                    this=seq_get(args, 1),
+222                    expression=exp.Literal.number(-1),
+223                ),
+224                unit=exp.Literal.string("DAY"),
+225            ),
+226            "DATE_FORMAT": lambda args: format_time_lambda(exp.TimeToStr, "hive")(
+227                [
+228                    exp.TimeStrToTime(this=seq_get(args, 0)),
+229                    seq_get(args, 1),
+230                ]
+231            ),
+232            "DAY": lambda args: exp.Day(this=exp.TsOrDsToDate(this=seq_get(args, 0))),
+233            "FROM_UNIXTIME": format_time_lambda(exp.UnixToStr, "hive", True),
+234            "GET_JSON_OBJECT": exp.JSONExtractScalar.from_arg_list,
+235            "LOCATE": locate_to_strposition,
+236            "LOG": (
+237                lambda args: exp.Log.from_arg_list(args)
+238                if len(args) > 1
+239                else exp.Ln.from_arg_list(args)
+240            ),
+241            "MAP": parse_var_map,
+242            "MONTH": lambda args: exp.Month(this=exp.TsOrDsToDate.from_arg_list(args)),
+243            "PERCENTILE": exp.Quantile.from_arg_list,
+244            "PERCENTILE_APPROX": exp.ApproxQuantile.from_arg_list,
+245            "COLLECT_SET": exp.SetAgg.from_arg_list,
+246            "SIZE": exp.ArraySize.from_arg_list,
+247            "SPLIT": exp.RegexpSplit.from_arg_list,
+248            "TO_DATE": format_time_lambda(exp.TsOrDsToDate, "hive"),
+249            "UNIX_TIMESTAMP": format_time_lambda(exp.StrToUnix, "hive", True),
+250            "YEAR": lambda args: exp.Year(this=exp.TsOrDsToDate.from_arg_list(args)),
+251        }
+252
+253        PROPERTY_PARSERS = {
+254            **parser.Parser.PROPERTY_PARSERS,  # type: ignore
+255            "WITH SERDEPROPERTIES": lambda self: exp.SerdeProperties(
+256                expressions=self._parse_wrapped_csv(self._parse_property)
+257            ),
+258        }
+259
+260    class Generator(generator.Generator):
+261        TYPE_MAPPING = {
+262            **generator.Generator.TYPE_MAPPING,  # type: ignore
+263            exp.DataType.Type.TEXT: "STRING",
+264            exp.DataType.Type.DATETIME: "TIMESTAMP",
+265            exp.DataType.Type.VARBINARY: "BINARY",
+266        }
+267
+268        TRANSFORMS = {
+269            **generator.Generator.TRANSFORMS,  # type: ignore
+270            **transforms.UNALIAS_GROUP,  # type: ignore
+271            exp.Property: _property_sql,
+272            exp.ApproxDistinct: approx_count_distinct_sql,
+273            exp.ArrayAgg: rename_func("COLLECT_LIST"),
+274            exp.ArrayConcat: rename_func("CONCAT"),
+275            exp.ArraySize: rename_func("SIZE"),
+276            exp.ArraySort: _array_sort,
+277            exp.With: no_recursive_cte_sql,
+278            exp.DateAdd: _add_date_sql,
+279            exp.DateDiff: _date_diff_sql,
+280            exp.DateStrToDate: rename_func("TO_DATE"),
+281            exp.DateToDi: lambda self, e: f"CAST(DATE_FORMAT({self.sql(e, 'this')}, {Hive.dateint_format}) AS INT)",
+282            exp.DiToDate: lambda self, e: f"TO_DATE(CAST({self.sql(e, 'this')} AS STRING), {Hive.dateint_format})",
+283            exp.FileFormatProperty: lambda self, e: f"STORED AS {e.name.upper()}",
+284            exp.If: if_sql,
+285            exp.Index: _index_sql,
+286            exp.ILike: no_ilike_sql,
+287            exp.Join: _unnest_to_explode_sql,
+288            exp.JSONExtract: rename_func("GET_JSON_OBJECT"),
+289            exp.JSONExtractScalar: rename_func("GET_JSON_OBJECT"),
+290            exp.Map: var_map_sql,
+291            exp.VarMap: var_map_sql,
+292            exp.Create: create_with_partitions_sql,
+293            exp.Quantile: rename_func("PERCENTILE"),
+294            exp.ApproxQuantile: rename_func("PERCENTILE_APPROX"),
+295            exp.RegexpLike: lambda self, e: self.binary(e, "RLIKE"),
+296            exp.RegexpSplit: rename_func("SPLIT"),
+297            exp.SafeDivide: no_safe_divide_sql,
+298            exp.SchemaCommentProperty: lambda self, e: self.naked_property(e),
+299            exp.SetAgg: rename_func("COLLECT_SET"),
+300            exp.Split: lambda self, e: f"SPLIT({self.sql(e, 'this')}, CONCAT('\\\\Q', {self.sql(e, 'expression')}))",
+301            exp.StrPosition: strposition_to_locate_sql,
+302            exp.StrToDate: _str_to_date,
+303            exp.StrToTime: _str_to_time,
+304            exp.StrToUnix: _str_to_unix,
+305            exp.StructExtract: struct_extract_sql,
+306            exp.TableFormatProperty: lambda self, e: f"USING {self.sql(e, 'this')}",
+307            exp.TimeStrToDate: rename_func("TO_DATE"),
+308            exp.TimeStrToTime: timestrtotime_sql,
+309            exp.TimeStrToUnix: rename_func("UNIX_TIMESTAMP"),
+310            exp.TimeToStr: _time_to_str,
+311            exp.TimeToUnix: rename_func("UNIX_TIMESTAMP"),
+312            exp.TsOrDiToDi: lambda self, e: f"CAST(SUBSTR(REPLACE(CAST({self.sql(e, 'this')} AS STRING), '-', ''), 1, 8) AS INT)",
+313            exp.TsOrDsAdd: lambda self, e: f"DATE_ADD({self.sql(e, 'this')}, {self.sql(e, 'expression')})",
+314            exp.TsOrDsToDate: _to_date_sql,
+315            exp.TryCast: no_trycast_sql,
+316            exp.UnixToStr: lambda self, e: f"FROM_UNIXTIME({self.format_args(e.this, _time_format(self, e))})",
+317            exp.UnixToTime: rename_func("FROM_UNIXTIME"),
+318            exp.UnixToTimeStr: rename_func("FROM_UNIXTIME"),
+319            exp.PartitionedByProperty: lambda self, e: f"PARTITIONED BY {self.sql(e, 'this')}",
+320            exp.RowFormatSerdeProperty: lambda self, e: f"ROW FORMAT SERDE {self.sql(e, 'this')}",
+321            exp.SerdeProperties: lambda self, e: self.properties(e, prefix="WITH SERDEPROPERTIES"),
+322            exp.NumberToStr: rename_func("FORMAT_NUMBER"),
+323            exp.LastDateOfMonth: rename_func("LAST_DAY"),
+324        }
+325
+326        PROPERTIES_LOCATION = {
+327            **generator.Generator.PROPERTIES_LOCATION,  # type: ignore
+328            exp.FileFormatProperty: exp.Properties.Location.POST_SCHEMA_ROOT,
+329            exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA_ROOT,
+330            exp.TableFormatProperty: exp.Properties.Location.POST_SCHEMA_ROOT,
+331        }
+332
+333        def with_properties(self, properties):
+334            return self.properties(
+335                properties,
+336                prefix=self.seg("TBLPROPERTIES"),
+337            )
+338
+339        def datatype_sql(self, expression):
+340            if (
+341                expression.this in (exp.DataType.Type.VARCHAR, exp.DataType.Type.NVARCHAR)
+342                and not expression.expressions
+343            ):
+344                expression = exp.DataType.build("text")
+345            elif expression.this in exp.DataType.TEMPORAL_TYPES:
+346                expression = exp.DataType.build(expression.this)
+347            return super().datatype_sql(expression)
+
+ + + + +
+
+ + Hive() + + +
+ + + + +
+ +
+
+ +
+ + class + Hive.Tokenizer(sqlglot.tokens.Tokenizer): + + + +
+ +
173    class Tokenizer(tokens.Tokenizer):
+174        QUOTES = ["'", '"']
+175        IDENTIFIERS = ["`"]
+176        STRING_ESCAPES = ["\\"]
+177        ENCODE = "utf-8"
+178
+179        KEYWORDS = {
+180            **tokens.Tokenizer.KEYWORDS,
+181            "ADD ARCHIVE": TokenType.COMMAND,
+182            "ADD ARCHIVES": TokenType.COMMAND,
+183            "ADD FILE": TokenType.COMMAND,
+184            "ADD FILES": TokenType.COMMAND,
+185            "ADD JAR": TokenType.COMMAND,
+186            "ADD JARS": TokenType.COMMAND,
+187            "MSCK REPAIR": TokenType.COMMAND,
+188            "WITH SERDEPROPERTIES": TokenType.SERDE_PROPERTIES,
+189        }
+190
+191        NUMERIC_LITERALS = {
+192            "L": "BIGINT",
+193            "S": "SMALLINT",
+194            "Y": "TINYINT",
+195            "D": "DOUBLE",
+196            "F": "FLOAT",
+197            "BD": "DECIMAL",
+198        }
+199
+200        IDENTIFIER_CAN_START_WITH_DIGIT = True
+
+ + + + +
+
Inherited Members
+
+ +
+
+
+
+ +
+ + class + Hive.Parser(sqlglot.parser.Parser): + + + +
+ +
202    class Parser(parser.Parser):
+203        STRICT_CAST = False
+204
+205        FUNCTIONS = {
+206            **parser.Parser.FUNCTIONS,  # type: ignore
+207            "APPROX_COUNT_DISTINCT": exp.ApproxDistinct.from_arg_list,
+208            "COLLECT_LIST": exp.ArrayAgg.from_arg_list,
+209            "DATE_ADD": lambda args: exp.TsOrDsAdd(
+210                this=seq_get(args, 0),
+211                expression=seq_get(args, 1),
+212                unit=exp.Literal.string("DAY"),
+213            ),
+214            "DATEDIFF": lambda args: exp.DateDiff(
+215                this=exp.TsOrDsToDate(this=seq_get(args, 0)),
+216                expression=exp.TsOrDsToDate(this=seq_get(args, 1)),
+217            ),
+218            "DATE_SUB": lambda args: exp.TsOrDsAdd(
+219                this=seq_get(args, 0),
+220                expression=exp.Mul(
+221                    this=seq_get(args, 1),
+222                    expression=exp.Literal.number(-1),
+223                ),
+224                unit=exp.Literal.string("DAY"),
+225            ),
+226            "DATE_FORMAT": lambda args: format_time_lambda(exp.TimeToStr, "hive")(
+227                [
+228                    exp.TimeStrToTime(this=seq_get(args, 0)),
+229                    seq_get(args, 1),
+230                ]
+231            ),
+232            "DAY": lambda args: exp.Day(this=exp.TsOrDsToDate(this=seq_get(args, 0))),
+233            "FROM_UNIXTIME": format_time_lambda(exp.UnixToStr, "hive", True),
+234            "GET_JSON_OBJECT": exp.JSONExtractScalar.from_arg_list,
+235            "LOCATE": locate_to_strposition,
+236            "LOG": (
+237                lambda args: exp.Log.from_arg_list(args)
+238                if len(args) > 1
+239                else exp.Ln.from_arg_list(args)
+240            ),
+241            "MAP": parse_var_map,
+242            "MONTH": lambda args: exp.Month(this=exp.TsOrDsToDate.from_arg_list(args)),
+243            "PERCENTILE": exp.Quantile.from_arg_list,
+244            "PERCENTILE_APPROX": exp.ApproxQuantile.from_arg_list,
+245            "COLLECT_SET": exp.SetAgg.from_arg_list,
+246            "SIZE": exp.ArraySize.from_arg_list,
+247            "SPLIT": exp.RegexpSplit.from_arg_list,
+248            "TO_DATE": format_time_lambda(exp.TsOrDsToDate, "hive"),
+249            "UNIX_TIMESTAMP": format_time_lambda(exp.StrToUnix, "hive", True),
+250            "YEAR": lambda args: exp.Year(this=exp.TsOrDsToDate.from_arg_list(args)),
+251        }
+252
+253        PROPERTY_PARSERS = {
+254            **parser.Parser.PROPERTY_PARSERS,  # type: ignore
+255            "WITH SERDEPROPERTIES": lambda self: exp.SerdeProperties(
+256                expressions=self._parse_wrapped_csv(self._parse_property)
+257            ),
+258        }
+
+ + +

Parser consumes a list of tokens produced by the sqlglot.tokens.Tokenizer and produces +a parsed syntax tree.

+ +
Arguments:
+ +
    +
  • error_level: the desired error level. +Default: ErrorLevel.RAISE
  • +
  • error_message_context: determines the amount of context to capture from a +query string when displaying the error message (in number of characters). +Default: 50.
  • +
  • index_offset: Index offset for arrays eg ARRAY[0] vs ARRAY[1] as the head of a list. +Default: 0
  • +
  • alias_post_tablesample: If the table alias comes after tablesample. +Default: False
  • +
  • max_errors: Maximum number of error messages to include in a raised ParseError. +This is only relevant if error_level is ErrorLevel.RAISE. +Default: 3
  • +
  • null_ordering: Indicates the default null ordering method to use if not explicitly set. +Options are "nulls_are_small", "nulls_are_large", "nulls_are_last". +Default: "nulls_are_small"
  • +
+
+ + + +
+
+ +
+ + class + Hive.Generator(sqlglot.generator.Generator): + + + +
+ +
260    class Generator(generator.Generator):
+261        TYPE_MAPPING = {
+262            **generator.Generator.TYPE_MAPPING,  # type: ignore
+263            exp.DataType.Type.TEXT: "STRING",
+264            exp.DataType.Type.DATETIME: "TIMESTAMP",
+265            exp.DataType.Type.VARBINARY: "BINARY",
+266        }
+267
+268        TRANSFORMS = {
+269            **generator.Generator.TRANSFORMS,  # type: ignore
+270            **transforms.UNALIAS_GROUP,  # type: ignore
+271            exp.Property: _property_sql,
+272            exp.ApproxDistinct: approx_count_distinct_sql,
+273            exp.ArrayAgg: rename_func("COLLECT_LIST"),
+274            exp.ArrayConcat: rename_func("CONCAT"),
+275            exp.ArraySize: rename_func("SIZE"),
+276            exp.ArraySort: _array_sort,
+277            exp.With: no_recursive_cte_sql,
+278            exp.DateAdd: _add_date_sql,
+279            exp.DateDiff: _date_diff_sql,
+280            exp.DateStrToDate: rename_func("TO_DATE"),
+281            exp.DateToDi: lambda self, e: f"CAST(DATE_FORMAT({self.sql(e, 'this')}, {Hive.dateint_format}) AS INT)",
+282            exp.DiToDate: lambda self, e: f"TO_DATE(CAST({self.sql(e, 'this')} AS STRING), {Hive.dateint_format})",
+283            exp.FileFormatProperty: lambda self, e: f"STORED AS {e.name.upper()}",
+284            exp.If: if_sql,
+285            exp.Index: _index_sql,
+286            exp.ILike: no_ilike_sql,
+287            exp.Join: _unnest_to_explode_sql,
+288            exp.JSONExtract: rename_func("GET_JSON_OBJECT"),
+289            exp.JSONExtractScalar: rename_func("GET_JSON_OBJECT"),
+290            exp.Map: var_map_sql,
+291            exp.VarMap: var_map_sql,
+292            exp.Create: create_with_partitions_sql,
+293            exp.Quantile: rename_func("PERCENTILE"),
+294            exp.ApproxQuantile: rename_func("PERCENTILE_APPROX"),
+295            exp.RegexpLike: lambda self, e: self.binary(e, "RLIKE"),
+296            exp.RegexpSplit: rename_func("SPLIT"),
+297            exp.SafeDivide: no_safe_divide_sql,
+298            exp.SchemaCommentProperty: lambda self, e: self.naked_property(e),
+299            exp.SetAgg: rename_func("COLLECT_SET"),
+300            exp.Split: lambda self, e: f"SPLIT({self.sql(e, 'this')}, CONCAT('\\\\Q', {self.sql(e, 'expression')}))",
+301            exp.StrPosition: strposition_to_locate_sql,
+302            exp.StrToDate: _str_to_date,
+303            exp.StrToTime: _str_to_time,
+304            exp.StrToUnix: _str_to_unix,
+305            exp.StructExtract: struct_extract_sql,
+306            exp.TableFormatProperty: lambda self, e: f"USING {self.sql(e, 'this')}",
+307            exp.TimeStrToDate: rename_func("TO_DATE"),
+308            exp.TimeStrToTime: timestrtotime_sql,
+309            exp.TimeStrToUnix: rename_func("UNIX_TIMESTAMP"),
+310            exp.TimeToStr: _time_to_str,
+311            exp.TimeToUnix: rename_func("UNIX_TIMESTAMP"),
+312            exp.TsOrDiToDi: lambda self, e: f"CAST(SUBSTR(REPLACE(CAST({self.sql(e, 'this')} AS STRING), '-', ''), 1, 8) AS INT)",
+313            exp.TsOrDsAdd: lambda self, e: f"DATE_ADD({self.sql(e, 'this')}, {self.sql(e, 'expression')})",
+314            exp.TsOrDsToDate: _to_date_sql,
+315            exp.TryCast: no_trycast_sql,
+316            exp.UnixToStr: lambda self, e: f"FROM_UNIXTIME({self.format_args(e.this, _time_format(self, e))})",
+317            exp.UnixToTime: rename_func("FROM_UNIXTIME"),
+318            exp.UnixToTimeStr: rename_func("FROM_UNIXTIME"),
+319            exp.PartitionedByProperty: lambda self, e: f"PARTITIONED BY {self.sql(e, 'this')}",
+320            exp.RowFormatSerdeProperty: lambda self, e: f"ROW FORMAT SERDE {self.sql(e, 'this')}",
+321            exp.SerdeProperties: lambda self, e: self.properties(e, prefix="WITH SERDEPROPERTIES"),
+322            exp.NumberToStr: rename_func("FORMAT_NUMBER"),
+323            exp.LastDateOfMonth: rename_func("LAST_DAY"),
+324        }
+325
+326        PROPERTIES_LOCATION = {
+327            **generator.Generator.PROPERTIES_LOCATION,  # type: ignore
+328            exp.FileFormatProperty: exp.Properties.Location.POST_SCHEMA_ROOT,
+329            exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA_ROOT,
+330            exp.TableFormatProperty: exp.Properties.Location.POST_SCHEMA_ROOT,
+331        }
+332
+333        def with_properties(self, properties):
+334            return self.properties(
+335                properties,
+336                prefix=self.seg("TBLPROPERTIES"),
+337            )
+338
+339        def datatype_sql(self, expression):
+340            if (
+341                expression.this in (exp.DataType.Type.VARCHAR, exp.DataType.Type.NVARCHAR)
+342                and not expression.expressions
+343            ):
+344                expression = exp.DataType.build("text")
+345            elif expression.this in exp.DataType.TEMPORAL_TYPES:
+346                expression = exp.DataType.build(expression.this)
+347            return super().datatype_sql(expression)
+
+ + +

Generator interprets the given syntax tree and produces a SQL string as an output.

+ +
Arguments:
+ +
    +
  • time_mapping (dict): the dictionary of custom time mappings in which the key +represents a python time format and the output the target time format
  • +
  • time_trie (trie): a trie of the time_mapping keys
  • +
  • pretty (bool): if set to True the returned string will be formatted. Default: False.
  • +
  • quote_start (str): specifies which starting character to use to delimit quotes. Default: '.
  • +
  • quote_end (str): specifies which ending character to use to delimit quotes. Default: '.
  • +
  • identifier_start (str): specifies which starting character to use to delimit identifiers. Default: ".
  • +
  • identifier_end (str): specifies which ending character to use to delimit identifiers. Default: ".
  • +
  • identify (bool): if set to True all identifiers will be delimited by the corresponding +character.
  • +
  • normalize (bool): if set to True all identifiers will lower cased
  • +
  • string_escape (str): specifies a string escape character. Default: '.
  • +
  • identifier_escape (str): specifies an identifier escape character. Default: ".
  • +
  • pad (int): determines padding in a formatted string. Default: 2.
  • +
  • indent (int): determines the size of indentation in a formatted string. Default: 4.
  • +
  • unnest_column_only (bool): if true unnest table aliases are considered only as column aliases
  • +
  • normalize_functions (str): normalize function names, "upper", "lower", or None +Default: "upper"
  • +
  • alias_post_tablesample (bool): if the table alias comes after tablesample +Default: False
  • +
  • unsupported_level (ErrorLevel): determines the generator's behavior when it encounters +unsupported expressions. Default ErrorLevel.WARN.
  • +
  • null_ordering (str): Indicates the default null ordering method to use if not explicitly set. +Options are "nulls_are_small", "nulls_are_large", "nulls_are_last". +Default: "nulls_are_small"
  • +
  • max_unsupported (int): Maximum number of unsupported messages to include in a raised UnsupportedError. +This is only relevant if unsupported_level is ErrorLevel.RAISE. +Default: 3
  • +
  • leading_comma (bool): if the the comma is leading or trailing in select statements +Default: False
  • +
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode. +The default is on the smaller end because the length only represents a segment and not the true +line length. +Default: 80
  • +
  • comments: Whether or not to preserve comments in the output SQL code. +Default: True
  • +
+
+ + +
+ +
+ + def + with_properties(self, properties): + + + +
+ +
333        def with_properties(self, properties):
+334            return self.properties(
+335                properties,
+336                prefix=self.seg("TBLPROPERTIES"),
+337            )
+
+ + + + +
+
+ +
+ + def + datatype_sql(self, expression): + + + +
+ +
339        def datatype_sql(self, expression):
+340            if (
+341                expression.this in (exp.DataType.Type.VARCHAR, exp.DataType.Type.NVARCHAR)
+342                and not expression.expressions
+343            ):
+344                expression = exp.DataType.build("text")
+345            elif expression.this in exp.DataType.TEMPORAL_TYPES:
+346                expression = exp.DataType.build(expression.this)
+347            return super().datatype_sql(expression)
+
+ + + + +
+
+
Inherited Members
+
+
sqlglot.generator.Generator
+
Generator
+
generate
+
unsupported
+
sep
+
seg
+
pad_comment
+
maybe_comment
+
wrap
+
no_identify
+
normalize_func
+
indent
+
sql
+
uncache_sql
+
cache_sql
+
characterset_sql
+
column_sql
+
columndef_sql
+
columnconstraint_sql
+
autoincrementcolumnconstraint_sql
+
checkcolumnconstraint_sql
+
commentcolumnconstraint_sql
+
collatecolumnconstraint_sql
+
encodecolumnconstraint_sql
+
defaultcolumnconstraint_sql
+
generatedasidentitycolumnconstraint_sql
+
notnullcolumnconstraint_sql
+
primarykeycolumnconstraint_sql
+
uniquecolumnconstraint_sql
+
create_sql
+
describe_sql
+
prepend_ctes
+
with_sql
+
cte_sql
+
tablealias_sql
+
bitstring_sql
+
hexstring_sql
+
directory_sql
+
delete_sql
+
drop_sql
+
except_sql
+
except_op
+
fetch_sql
+
filter_sql
+
hint_sql
+
index_sql
+
identifier_sql
+
national_sql
+
partition_sql
+
properties_sql
+
root_properties
+
properties
+
locate_properties
+
property_sql
+
likeproperty_sql
+
fallbackproperty_sql
+
journalproperty_sql
+
freespaceproperty_sql
+
afterjournalproperty_sql
+
checksumproperty_sql
+
mergeblockratioproperty_sql
+
datablocksizeproperty_sql
+
blockcompressionproperty_sql
+
isolatedloadingproperty_sql
+
insert_sql
+
intersect_sql
+
intersect_op
+
introducer_sql
+
pseudotype_sql
+
rowformatdelimitedproperty_sql
+
table_sql
+
tablesample_sql
+
pivot_sql
+
tuple_sql
+
update_sql
+
values_sql
+
var_sql
+
into_sql
+
from_sql
+
group_sql
+
having_sql
+
join_sql
+
lambda_sql
+
lateral_sql
+
limit_sql
+
offset_sql
+
lock_sql
+
literal_sql
+
loaddata_sql
+
null_sql
+
boolean_sql
+
order_sql
+
cluster_sql
+
distribute_sql
+
sort_sql
+
ordered_sql
+
matchrecognize_sql
+
query_modifiers
+
select_sql
+
schema_sql
+
star_sql
+
structkwarg_sql
+
parameter_sql
+
sessionparameter_sql
+
placeholder_sql
+
subquery_sql
+
qualify_sql
+
union_sql
+
union_op
+
unnest_sql
+
where_sql
+
window_sql
+
partition_by_sql
+
window_spec_sql
+
withingroup_sql
+
between_sql
+
bracket_sql
+
all_sql
+
any_sql
+
exists_sql
+
case_sql
+
constraint_sql
+
extract_sql
+
trim_sql
+
concat_sql
+
check_sql
+
foreignkey_sql
+
primarykey_sql
+
unique_sql
+
if_sql
+
in_sql
+
in_unnest_op
+
interval_sql
+
return_sql
+
reference_sql
+
anonymous_sql
+
paren_sql
+
neg_sql
+
not_sql
+
alias_sql
+
aliases_sql
+
attimezone_sql
+
add_sql
+
and_sql
+
connector_sql
+
bitwiseand_sql
+
bitwiseleftshift_sql
+
bitwisenot_sql
+
bitwiseor_sql
+
bitwiserightshift_sql
+
bitwisexor_sql
+
cast_sql
+
currentdate_sql
+
collate_sql
+
command_sql
+
transaction_sql
+
commit_sql
+
rollback_sql
+
altercolumn_sql
+
renametable_sql
+
altertable_sql
+
droppartition_sql
+
addconstraint_sql
+
distinct_sql
+
ignorenulls_sql
+
respectnulls_sql
+
intdiv_sql
+
dpipe_sql
+
div_sql
+
distance_sql
+
dot_sql
+
eq_sql
+
escape_sql
+
glob_sql
+
gt_sql
+
gte_sql
+
ilike_sql
+
is_sql
+
like_sql
+
similarto_sql
+
lt_sql
+
lte_sql
+
mod_sql
+
mul_sql
+
neq_sql
+
nullsafeeq_sql
+
nullsafeneq_sql
+
or_sql
+
slice_sql
+
sub_sql
+
trycast_sql
+
use_sql
+
binary
+
function_fallback_sql
+
format_args
+
text_width
+
format_time
+
expressions
+
op_expressions
+
naked_property
+
set_operation
+
tag_sql
+
token_sql
+
userdefinedfunction_sql
+
userdefinedfunctionkwarg_sql
+
joinhint_sql
+
kwarg_sql
+
when_sql
+
merge_sql
+ +
+
+
+
+
+ + \ No newline at end of file diff --git a/docs/sqlglot/dialects/mysql.html b/docs/sqlglot/dialects/mysql.html new file mode 100644 index 0000000..06ba4de --- /dev/null +++ b/docs/sqlglot/dialects/mysql.html @@ -0,0 +1,2149 @@ + + + + + + + sqlglot.dialects.mysql API documentation + + + + + + + + + +
+
+ Edit on GitHub +

+sqlglot.dialects.mysql

+ + + + + + +
  1from __future__ import annotations
+  2
+  3from sqlglot import exp, generator, parser, tokens
+  4from sqlglot.dialects.dialect import (
+  5    Dialect,
+  6    locate_to_strposition,
+  7    no_ilike_sql,
+  8    no_paren_current_date_sql,
+  9    no_tablesample_sql,
+ 10    no_trycast_sql,
+ 11    strposition_to_locate_sql,
+ 12)
+ 13from sqlglot.helper import seq_get
+ 14from sqlglot.tokens import TokenType
+ 15
+ 16
+ 17def _show_parser(*args, **kwargs):
+ 18    def _parse(self):
+ 19        return self._parse_show_mysql(*args, **kwargs)
+ 20
+ 21    return _parse
+ 22
+ 23
+ 24def _date_trunc_sql(self, expression):
+ 25    unit = expression.name.lower()
+ 26
+ 27    expr = self.sql(expression.expression)
+ 28
+ 29    if unit == "day":
+ 30        return f"DATE({expr})"
+ 31
+ 32    if unit == "week":
+ 33        concat = f"CONCAT(YEAR({expr}), ' ', WEEK({expr}, 1), ' 1')"
+ 34        date_format = "%Y %u %w"
+ 35    elif unit == "month":
+ 36        concat = f"CONCAT(YEAR({expr}), ' ', MONTH({expr}), ' 1')"
+ 37        date_format = "%Y %c %e"
+ 38    elif unit == "quarter":
+ 39        concat = f"CONCAT(YEAR({expr}), ' ', QUARTER({expr}) * 3 - 2, ' 1')"
+ 40        date_format = "%Y %c %e"
+ 41    elif unit == "year":
+ 42        concat = f"CONCAT(YEAR({expr}), ' 1 1')"
+ 43        date_format = "%Y %c %e"
+ 44    else:
+ 45        self.unsupported("Unexpected interval unit: {unit}")
+ 46        return f"DATE({expr})"
+ 47
+ 48    return f"STR_TO_DATE({concat}, '{date_format}')"
+ 49
+ 50
+ 51def _str_to_date(args):
+ 52    date_format = MySQL.format_time(seq_get(args, 1))
+ 53    return exp.StrToDate(this=seq_get(args, 0), format=date_format)
+ 54
+ 55
+ 56def _str_to_date_sql(self, expression):
+ 57    date_format = self.format_time(expression)
+ 58    return f"STR_TO_DATE({self.sql(expression.this)}, {date_format})"
+ 59
+ 60
+ 61def _trim_sql(self, expression):
+ 62    target = self.sql(expression, "this")
+ 63    trim_type = self.sql(expression, "position")
+ 64    remove_chars = self.sql(expression, "expression")
+ 65
+ 66    # Use TRIM/LTRIM/RTRIM syntax if the expression isn't mysql-specific
+ 67    if not remove_chars:
+ 68        return self.trim_sql(expression)
+ 69
+ 70    trim_type = f"{trim_type} " if trim_type else ""
+ 71    remove_chars = f"{remove_chars} " if remove_chars else ""
+ 72    from_part = "FROM " if trim_type or remove_chars else ""
+ 73    return f"TRIM({trim_type}{remove_chars}{from_part}{target})"
+ 74
+ 75
+ 76def _date_add(expression_class):
+ 77    def func(args):
+ 78        interval = seq_get(args, 1)
+ 79        return expression_class(
+ 80            this=seq_get(args, 0),
+ 81            expression=interval.this,
+ 82            unit=exp.Literal.string(interval.text("unit").lower()),
+ 83        )
+ 84
+ 85    return func
+ 86
+ 87
+ 88def _date_add_sql(kind):
+ 89    def func(self, expression):
+ 90        this = self.sql(expression, "this")
+ 91        unit = expression.text("unit").upper() or "DAY"
+ 92        return (
+ 93            f"DATE_{kind}({this}, {self.sql(exp.Interval(this=expression.expression, unit=unit))})"
+ 94        )
+ 95
+ 96    return func
+ 97
+ 98
+ 99class MySQL(Dialect):
+100    time_format = "'%Y-%m-%d %T'"
+101
+102    # https://prestodb.io/docs/current/functions/datetime.html#mysql-date-functions
+103    time_mapping = {
+104        "%M": "%B",
+105        "%c": "%-m",
+106        "%e": "%-d",
+107        "%h": "%I",
+108        "%i": "%M",
+109        "%s": "%S",
+110        "%S": "%S",
+111        "%u": "%W",
+112        "%k": "%-H",
+113        "%l": "%-I",
+114        "%T": "%H:%M:%S",
+115    }
+116
+117    class Tokenizer(tokens.Tokenizer):
+118        QUOTES = ["'", '"']
+119        COMMENTS = ["--", "#", ("/*", "*/")]
+120        IDENTIFIERS = ["`"]
+121        STRING_ESCAPES = ["'", "\\"]
+122        BIT_STRINGS = [("b'", "'"), ("B'", "'"), ("0b", "")]
+123        HEX_STRINGS = [("x'", "'"), ("X'", "'"), ("0x", "")]
+124
+125        KEYWORDS = {
+126            **tokens.Tokenizer.KEYWORDS,
+127            "MEDIUMTEXT": TokenType.MEDIUMTEXT,
+128            "LONGTEXT": TokenType.LONGTEXT,
+129            "MEDIUMBLOB": TokenType.MEDIUMBLOB,
+130            "LONGBLOB": TokenType.LONGBLOB,
+131            "START": TokenType.BEGIN,
+132            "SEPARATOR": TokenType.SEPARATOR,
+133            "_ARMSCII8": TokenType.INTRODUCER,
+134            "_ASCII": TokenType.INTRODUCER,
+135            "_BIG5": TokenType.INTRODUCER,
+136            "_BINARY": TokenType.INTRODUCER,
+137            "_CP1250": TokenType.INTRODUCER,
+138            "_CP1251": TokenType.INTRODUCER,
+139            "_CP1256": TokenType.INTRODUCER,
+140            "_CP1257": TokenType.INTRODUCER,
+141            "_CP850": TokenType.INTRODUCER,
+142            "_CP852": TokenType.INTRODUCER,
+143            "_CP866": TokenType.INTRODUCER,
+144            "_CP932": TokenType.INTRODUCER,
+145            "_DEC8": TokenType.INTRODUCER,
+146            "_EUCJPMS": TokenType.INTRODUCER,
+147            "_EUCKR": TokenType.INTRODUCER,
+148            "_GB18030": TokenType.INTRODUCER,
+149            "_GB2312": TokenType.INTRODUCER,
+150            "_GBK": TokenType.INTRODUCER,
+151            "_GEOSTD8": TokenType.INTRODUCER,
+152            "_GREEK": TokenType.INTRODUCER,
+153            "_HEBREW": TokenType.INTRODUCER,
+154            "_HP8": TokenType.INTRODUCER,
+155            "_KEYBCS2": TokenType.INTRODUCER,
+156            "_KOI8R": TokenType.INTRODUCER,
+157            "_KOI8U": TokenType.INTRODUCER,
+158            "_LATIN1": TokenType.INTRODUCER,
+159            "_LATIN2": TokenType.INTRODUCER,
+160            "_LATIN5": TokenType.INTRODUCER,
+161            "_LATIN7": TokenType.INTRODUCER,
+162            "_MACCE": TokenType.INTRODUCER,
+163            "_MACROMAN": TokenType.INTRODUCER,
+164            "_SJIS": TokenType.INTRODUCER,
+165            "_SWE7": TokenType.INTRODUCER,
+166            "_TIS620": TokenType.INTRODUCER,
+167            "_UCS2": TokenType.INTRODUCER,
+168            "_UJIS": TokenType.INTRODUCER,
+169            # https://dev.mysql.com/doc/refman/8.0/en/string-literals.html
+170            "_UTF8": TokenType.INTRODUCER,
+171            "_UTF16": TokenType.INTRODUCER,
+172            "_UTF16LE": TokenType.INTRODUCER,
+173            "_UTF32": TokenType.INTRODUCER,
+174            "_UTF8MB3": TokenType.INTRODUCER,
+175            "_UTF8MB4": TokenType.INTRODUCER,
+176            "@@": TokenType.SESSION_PARAMETER,
+177        }
+178
+179        COMMANDS = tokens.Tokenizer.COMMANDS - {TokenType.SET, TokenType.SHOW}
+180
+181    class Parser(parser.Parser):
+182        FUNC_TOKENS = {*parser.Parser.FUNC_TOKENS, TokenType.SCHEMA}  # type: ignore
+183
+184        FUNCTIONS = {
+185            **parser.Parser.FUNCTIONS,  # type: ignore
+186            "DATE_ADD": _date_add(exp.DateAdd),
+187            "DATE_SUB": _date_add(exp.DateSub),
+188            "STR_TO_DATE": _str_to_date,
+189            "LOCATE": locate_to_strposition,
+190            "INSTR": lambda args: exp.StrPosition(substr=seq_get(args, 1), this=seq_get(args, 0)),
+191            "LEFT": lambda args: exp.Substring(
+192                this=seq_get(args, 0), start=exp.Literal.number(1), length=seq_get(args, 1)
+193            ),
+194        }
+195
+196        FUNCTION_PARSERS = {
+197            **parser.Parser.FUNCTION_PARSERS,  # type: ignore
+198            "GROUP_CONCAT": lambda self: self.expression(
+199                exp.GroupConcat,
+200                this=self._parse_lambda(),
+201                separator=self._match(TokenType.SEPARATOR) and self._parse_field(),
+202            ),
+203        }
+204
+205        PROPERTY_PARSERS = {
+206            **parser.Parser.PROPERTY_PARSERS,  # type: ignore
+207            "ENGINE": lambda self: self._parse_property_assignment(exp.EngineProperty),
+208        }
+209
+210        STATEMENT_PARSERS = {
+211            **parser.Parser.STATEMENT_PARSERS,  # type: ignore
+212            TokenType.SHOW: lambda self: self._parse_show(),
+213            TokenType.SET: lambda self: self._parse_set(),
+214        }
+215
+216        SHOW_PARSERS = {
+217            "BINARY LOGS": _show_parser("BINARY LOGS"),
+218            "MASTER LOGS": _show_parser("BINARY LOGS"),
+219            "BINLOG EVENTS": _show_parser("BINLOG EVENTS"),
+220            "CHARACTER SET": _show_parser("CHARACTER SET"),
+221            "CHARSET": _show_parser("CHARACTER SET"),
+222            "COLLATION": _show_parser("COLLATION"),
+223            "FULL COLUMNS": _show_parser("COLUMNS", target="FROM", full=True),
+224            "COLUMNS": _show_parser("COLUMNS", target="FROM"),
+225            "CREATE DATABASE": _show_parser("CREATE DATABASE", target=True),
+226            "CREATE EVENT": _show_parser("CREATE EVENT", target=True),
+227            "CREATE FUNCTION": _show_parser("CREATE FUNCTION", target=True),
+228            "CREATE PROCEDURE": _show_parser("CREATE PROCEDURE", target=True),
+229            "CREATE TABLE": _show_parser("CREATE TABLE", target=True),
+230            "CREATE TRIGGER": _show_parser("CREATE TRIGGER", target=True),
+231            "CREATE VIEW": _show_parser("CREATE VIEW", target=True),
+232            "DATABASES": _show_parser("DATABASES"),
+233            "ENGINE": _show_parser("ENGINE", target=True),
+234            "STORAGE ENGINES": _show_parser("ENGINES"),
+235            "ENGINES": _show_parser("ENGINES"),
+236            "ERRORS": _show_parser("ERRORS"),
+237            "EVENTS": _show_parser("EVENTS"),
+238            "FUNCTION CODE": _show_parser("FUNCTION CODE", target=True),
+239            "FUNCTION STATUS": _show_parser("FUNCTION STATUS"),
+240            "GRANTS": _show_parser("GRANTS", target="FOR"),
+241            "INDEX": _show_parser("INDEX", target="FROM"),
+242            "MASTER STATUS": _show_parser("MASTER STATUS"),
+243            "OPEN TABLES": _show_parser("OPEN TABLES"),
+244            "PLUGINS": _show_parser("PLUGINS"),
+245            "PROCEDURE CODE": _show_parser("PROCEDURE CODE", target=True),
+246            "PROCEDURE STATUS": _show_parser("PROCEDURE STATUS"),
+247            "PRIVILEGES": _show_parser("PRIVILEGES"),
+248            "FULL PROCESSLIST": _show_parser("PROCESSLIST", full=True),
+249            "PROCESSLIST": _show_parser("PROCESSLIST"),
+250            "PROFILE": _show_parser("PROFILE"),
+251            "PROFILES": _show_parser("PROFILES"),
+252            "RELAYLOG EVENTS": _show_parser("RELAYLOG EVENTS"),
+253            "REPLICAS": _show_parser("REPLICAS"),
+254            "SLAVE HOSTS": _show_parser("REPLICAS"),
+255            "REPLICA STATUS": _show_parser("REPLICA STATUS"),
+256            "SLAVE STATUS": _show_parser("REPLICA STATUS"),
+257            "GLOBAL STATUS": _show_parser("STATUS", global_=True),
+258            "SESSION STATUS": _show_parser("STATUS"),
+259            "STATUS": _show_parser("STATUS"),
+260            "TABLE STATUS": _show_parser("TABLE STATUS"),
+261            "FULL TABLES": _show_parser("TABLES", full=True),
+262            "TABLES": _show_parser("TABLES"),
+263            "TRIGGERS": _show_parser("TRIGGERS"),
+264            "GLOBAL VARIABLES": _show_parser("VARIABLES", global_=True),
+265            "SESSION VARIABLES": _show_parser("VARIABLES"),
+266            "VARIABLES": _show_parser("VARIABLES"),
+267            "WARNINGS": _show_parser("WARNINGS"),
+268        }
+269
+270        SET_PARSERS = {
+271            "GLOBAL": lambda self: self._parse_set_item_assignment("GLOBAL"),
+272            "PERSIST": lambda self: self._parse_set_item_assignment("PERSIST"),
+273            "PERSIST_ONLY": lambda self: self._parse_set_item_assignment("PERSIST_ONLY"),
+274            "SESSION": lambda self: self._parse_set_item_assignment("SESSION"),
+275            "LOCAL": lambda self: self._parse_set_item_assignment("LOCAL"),
+276            "CHARACTER SET": lambda self: self._parse_set_item_charset("CHARACTER SET"),
+277            "CHARSET": lambda self: self._parse_set_item_charset("CHARACTER SET"),
+278            "NAMES": lambda self: self._parse_set_item_names(),
+279            "TRANSACTION": lambda self: self._parse_set_transaction(),
+280        }
+281
+282        PROFILE_TYPES = {
+283            "ALL",
+284            "BLOCK IO",
+285            "CONTEXT SWITCHES",
+286            "CPU",
+287            "IPC",
+288            "MEMORY",
+289            "PAGE FAULTS",
+290            "SOURCE",
+291            "SWAPS",
+292        }
+293
+294        TRANSACTION_CHARACTERISTICS = {
+295            "ISOLATION LEVEL REPEATABLE READ",
+296            "ISOLATION LEVEL READ COMMITTED",
+297            "ISOLATION LEVEL READ UNCOMMITTED",
+298            "ISOLATION LEVEL SERIALIZABLE",
+299            "READ WRITE",
+300            "READ ONLY",
+301        }
+302
+303        def _parse_show_mysql(self, this, target=False, full=None, global_=None):
+304            if target:
+305                if isinstance(target, str):
+306                    self._match_text_seq(target)
+307                target_id = self._parse_id_var()
+308            else:
+309                target_id = None
+310
+311            log = self._parse_string() if self._match_text_seq("IN") else None
+312
+313            if this in {"BINLOG EVENTS", "RELAYLOG EVENTS"}:
+314                position = self._parse_number() if self._match_text_seq("FROM") else None
+315                db = None
+316            else:
+317                position = None
+318                db = self._parse_id_var() if self._match_text_seq("FROM") else None
+319
+320            channel = self._parse_id_var() if self._match_text_seq("FOR", "CHANNEL") else None
+321
+322            like = self._parse_string() if self._match_text_seq("LIKE") else None
+323            where = self._parse_where()
+324
+325            if this == "PROFILE":
+326                types = self._parse_csv(lambda: self._parse_var_from_options(self.PROFILE_TYPES))
+327                query = self._parse_number() if self._match_text_seq("FOR", "QUERY") else None
+328                offset = self._parse_number() if self._match_text_seq("OFFSET") else None
+329                limit = self._parse_number() if self._match_text_seq("LIMIT") else None
+330            else:
+331                types, query = None, None
+332                offset, limit = self._parse_oldstyle_limit()
+333
+334            mutex = True if self._match_text_seq("MUTEX") else None
+335            mutex = False if self._match_text_seq("STATUS") else mutex
+336
+337            return self.expression(
+338                exp.Show,
+339                this=this,
+340                target=target_id,
+341                full=full,
+342                log=log,
+343                position=position,
+344                db=db,
+345                channel=channel,
+346                like=like,
+347                where=where,
+348                types=types,
+349                query=query,
+350                offset=offset,
+351                limit=limit,
+352                mutex=mutex,
+353                **{"global": global_},
+354            )
+355
+356        def _parse_var_from_options(self, options):
+357            for option in options:
+358                if self._match_text_seq(*option.split(" ")):
+359                    return exp.Var(this=option)
+360            return None
+361
+362        def _parse_oldstyle_limit(self):
+363            limit = None
+364            offset = None
+365            if self._match_text_seq("LIMIT"):
+366                parts = self._parse_csv(self._parse_number)
+367                if len(parts) == 1:
+368                    limit = parts[0]
+369                elif len(parts) == 2:
+370                    limit = parts[1]
+371                    offset = parts[0]
+372            return offset, limit
+373
+374        def _default_parse_set_item(self):
+375            return self._parse_set_item_assignment(kind=None)
+376
+377        def _parse_set_item_assignment(self, kind):
+378            if kind in {"GLOBAL", "SESSION"} and self._match_text_seq("TRANSACTION"):
+379                return self._parse_set_transaction(global_=kind == "GLOBAL")
+380
+381            left = self._parse_primary() or self._parse_id_var()
+382            if not self._match(TokenType.EQ):
+383                self.raise_error("Expected =")
+384            right = self._parse_statement() or self._parse_id_var()
+385
+386            this = self.expression(
+387                exp.EQ,
+388                this=left,
+389                expression=right,
+390            )
+391
+392            return self.expression(
+393                exp.SetItem,
+394                this=this,
+395                kind=kind,
+396            )
+397
+398        def _parse_set_item_charset(self, kind):
+399            this = self._parse_string() or self._parse_id_var()
+400
+401            return self.expression(
+402                exp.SetItem,
+403                this=this,
+404                kind=kind,
+405            )
+406
+407        def _parse_set_item_names(self):
+408            charset = self._parse_string() or self._parse_id_var()
+409            if self._match_text_seq("COLLATE"):
+410                collate = self._parse_string() or self._parse_id_var()
+411            else:
+412                collate = None
+413            return self.expression(
+414                exp.SetItem,
+415                this=charset,
+416                collate=collate,
+417                kind="NAMES",
+418            )
+419
+420        def _parse_set_transaction(self, global_=False):
+421            self._match_text_seq("TRANSACTION")
+422            characteristics = self._parse_csv(
+423                lambda: self._parse_var_from_options(self.TRANSACTION_CHARACTERISTICS)
+424            )
+425            return self.expression(
+426                exp.SetItem,
+427                expressions=characteristics,
+428                kind="TRANSACTION",
+429                **{"global": global_},
+430            )
+431
+432    class Generator(generator.Generator):
+433        LOCKING_READS_SUPPORTED = True
+434        NULL_ORDERING_SUPPORTED = False
+435
+436        TRANSFORMS = {
+437            **generator.Generator.TRANSFORMS,  # type: ignore
+438            exp.CurrentDate: no_paren_current_date_sql,
+439            exp.CurrentTimestamp: lambda *_: "CURRENT_TIMESTAMP",
+440            exp.ILike: no_ilike_sql,
+441            exp.TableSample: no_tablesample_sql,
+442            exp.TryCast: no_trycast_sql,
+443            exp.DateAdd: _date_add_sql("ADD"),
+444            exp.DateSub: _date_add_sql("SUB"),
+445            exp.DateTrunc: _date_trunc_sql,
+446            exp.GroupConcat: lambda self, e: f"""GROUP_CONCAT({self.sql(e, "this")} SEPARATOR {self.sql(e, "separator") or "','"})""",
+447            exp.StrToDate: _str_to_date_sql,
+448            exp.StrToTime: _str_to_date_sql,
+449            exp.Trim: _trim_sql,
+450            exp.NullSafeEQ: lambda self, e: self.binary(e, "<=>"),
+451            exp.NullSafeNEQ: lambda self, e: self.not_sql(self.binary(e, "<=>")),
+452            exp.StrPosition: strposition_to_locate_sql,
+453        }
+454
+455        TYPE_MAPPING = generator.Generator.TYPE_MAPPING.copy()
+456        TYPE_MAPPING.pop(exp.DataType.Type.MEDIUMTEXT)
+457        TYPE_MAPPING.pop(exp.DataType.Type.LONGTEXT)
+458        TYPE_MAPPING.pop(exp.DataType.Type.MEDIUMBLOB)
+459        TYPE_MAPPING.pop(exp.DataType.Type.LONGBLOB)
+460
+461        def show_sql(self, expression):
+462            this = f" {expression.name}"
+463            full = " FULL" if expression.args.get("full") else ""
+464            global_ = " GLOBAL" if expression.args.get("global") else ""
+465
+466            target = self.sql(expression, "target")
+467            target = f" {target}" if target else ""
+468            if expression.name in {"COLUMNS", "INDEX"}:
+469                target = f" FROM{target}"
+470            elif expression.name == "GRANTS":
+471                target = f" FOR{target}"
+472
+473            db = self._prefixed_sql("FROM", expression, "db")
+474
+475            like = self._prefixed_sql("LIKE", expression, "like")
+476            where = self.sql(expression, "where")
+477
+478            types = self.expressions(expression, key="types")
+479            types = f" {types}" if types else types
+480            query = self._prefixed_sql("FOR QUERY", expression, "query")
+481
+482            if expression.name == "PROFILE":
+483                offset = self._prefixed_sql("OFFSET", expression, "offset")
+484                limit = self._prefixed_sql("LIMIT", expression, "limit")
+485            else:
+486                offset = ""
+487                limit = self._oldstyle_limit_sql(expression)
+488
+489            log = self._prefixed_sql("IN", expression, "log")
+490            position = self._prefixed_sql("FROM", expression, "position")
+491
+492            channel = self._prefixed_sql("FOR CHANNEL", expression, "channel")
+493
+494            if expression.name == "ENGINE":
+495                mutex_or_status = " MUTEX" if expression.args.get("mutex") else " STATUS"
+496            else:
+497                mutex_or_status = ""
+498
+499            return f"SHOW{full}{global_}{this}{target}{types}{db}{query}{log}{position}{channel}{mutex_or_status}{like}{where}{offset}{limit}"
+500
+501        def _prefixed_sql(self, prefix, expression, arg):
+502            sql = self.sql(expression, arg)
+503            if not sql:
+504                return ""
+505            return f" {prefix} {sql}"
+506
+507        def _oldstyle_limit_sql(self, expression):
+508            limit = self.sql(expression, "limit")
+509            offset = self.sql(expression, "offset")
+510            if limit:
+511                limit_offset = f"{offset}, {limit}" if offset else limit
+512                return f" LIMIT {limit_offset}"
+513            return ""
+514
+515        def setitem_sql(self, expression):
+516            kind = self.sql(expression, "kind")
+517            kind = f"{kind} " if kind else ""
+518            this = self.sql(expression, "this")
+519            expressions = self.expressions(expression)
+520            collate = self.sql(expression, "collate")
+521            collate = f" COLLATE {collate}" if collate else ""
+522            global_ = "GLOBAL " if expression.args.get("global") else ""
+523            return f"{global_}{kind}{this}{expressions}{collate}"
+524
+525        def set_sql(self, expression):
+526            return f"SET {self.expressions(expression)}"
+
+ + +
+
+ +
+ + class + MySQL(sqlglot.dialects.dialect.Dialect): + + + +
+ +
100class MySQL(Dialect):
+101    time_format = "'%Y-%m-%d %T'"
+102
+103    # https://prestodb.io/docs/current/functions/datetime.html#mysql-date-functions
+104    time_mapping = {
+105        "%M": "%B",
+106        "%c": "%-m",
+107        "%e": "%-d",
+108        "%h": "%I",
+109        "%i": "%M",
+110        "%s": "%S",
+111        "%S": "%S",
+112        "%u": "%W",
+113        "%k": "%-H",
+114        "%l": "%-I",
+115        "%T": "%H:%M:%S",
+116    }
+117
+118    class Tokenizer(tokens.Tokenizer):
+119        QUOTES = ["'", '"']
+120        COMMENTS = ["--", "#", ("/*", "*/")]
+121        IDENTIFIERS = ["`"]
+122        STRING_ESCAPES = ["'", "\\"]
+123        BIT_STRINGS = [("b'", "'"), ("B'", "'"), ("0b", "")]
+124        HEX_STRINGS = [("x'", "'"), ("X'", "'"), ("0x", "")]
+125
+126        KEYWORDS = {
+127            **tokens.Tokenizer.KEYWORDS,
+128            "MEDIUMTEXT": TokenType.MEDIUMTEXT,
+129            "LONGTEXT": TokenType.LONGTEXT,
+130            "MEDIUMBLOB": TokenType.MEDIUMBLOB,
+131            "LONGBLOB": TokenType.LONGBLOB,
+132            "START": TokenType.BEGIN,
+133            "SEPARATOR": TokenType.SEPARATOR,
+134            "_ARMSCII8": TokenType.INTRODUCER,
+135            "_ASCII": TokenType.INTRODUCER,
+136            "_BIG5": TokenType.INTRODUCER,
+137            "_BINARY": TokenType.INTRODUCER,
+138            "_CP1250": TokenType.INTRODUCER,
+139            "_CP1251": TokenType.INTRODUCER,
+140            "_CP1256": TokenType.INTRODUCER,
+141            "_CP1257": TokenType.INTRODUCER,
+142            "_CP850": TokenType.INTRODUCER,
+143            "_CP852": TokenType.INTRODUCER,
+144            "_CP866": TokenType.INTRODUCER,
+145            "_CP932": TokenType.INTRODUCER,
+146            "_DEC8": TokenType.INTRODUCER,
+147            "_EUCJPMS": TokenType.INTRODUCER,
+148            "_EUCKR": TokenType.INTRODUCER,
+149            "_GB18030": TokenType.INTRODUCER,
+150            "_GB2312": TokenType.INTRODUCER,
+151            "_GBK": TokenType.INTRODUCER,
+152            "_GEOSTD8": TokenType.INTRODUCER,
+153            "_GREEK": TokenType.INTRODUCER,
+154            "_HEBREW": TokenType.INTRODUCER,
+155            "_HP8": TokenType.INTRODUCER,
+156            "_KEYBCS2": TokenType.INTRODUCER,
+157            "_KOI8R": TokenType.INTRODUCER,
+158            "_KOI8U": TokenType.INTRODUCER,
+159            "_LATIN1": TokenType.INTRODUCER,
+160            "_LATIN2": TokenType.INTRODUCER,
+161            "_LATIN5": TokenType.INTRODUCER,
+162            "_LATIN7": TokenType.INTRODUCER,
+163            "_MACCE": TokenType.INTRODUCER,
+164            "_MACROMAN": TokenType.INTRODUCER,
+165            "_SJIS": TokenType.INTRODUCER,
+166            "_SWE7": TokenType.INTRODUCER,
+167            "_TIS620": TokenType.INTRODUCER,
+168            "_UCS2": TokenType.INTRODUCER,
+169            "_UJIS": TokenType.INTRODUCER,
+170            # https://dev.mysql.com/doc/refman/8.0/en/string-literals.html
+171            "_UTF8": TokenType.INTRODUCER,
+172            "_UTF16": TokenType.INTRODUCER,
+173            "_UTF16LE": TokenType.INTRODUCER,
+174            "_UTF32": TokenType.INTRODUCER,
+175            "_UTF8MB3": TokenType.INTRODUCER,
+176            "_UTF8MB4": TokenType.INTRODUCER,
+177            "@@": TokenType.SESSION_PARAMETER,
+178        }
+179
+180        COMMANDS = tokens.Tokenizer.COMMANDS - {TokenType.SET, TokenType.SHOW}
+181
+182    class Parser(parser.Parser):
+183        FUNC_TOKENS = {*parser.Parser.FUNC_TOKENS, TokenType.SCHEMA}  # type: ignore
+184
+185        FUNCTIONS = {
+186            **parser.Parser.FUNCTIONS,  # type: ignore
+187            "DATE_ADD": _date_add(exp.DateAdd),
+188            "DATE_SUB": _date_add(exp.DateSub),
+189            "STR_TO_DATE": _str_to_date,
+190            "LOCATE": locate_to_strposition,
+191            "INSTR": lambda args: exp.StrPosition(substr=seq_get(args, 1), this=seq_get(args, 0)),
+192            "LEFT": lambda args: exp.Substring(
+193                this=seq_get(args, 0), start=exp.Literal.number(1), length=seq_get(args, 1)
+194            ),
+195        }
+196
+197        FUNCTION_PARSERS = {
+198            **parser.Parser.FUNCTION_PARSERS,  # type: ignore
+199            "GROUP_CONCAT": lambda self: self.expression(
+200                exp.GroupConcat,
+201                this=self._parse_lambda(),
+202                separator=self._match(TokenType.SEPARATOR) and self._parse_field(),
+203            ),
+204        }
+205
+206        PROPERTY_PARSERS = {
+207            **parser.Parser.PROPERTY_PARSERS,  # type: ignore
+208            "ENGINE": lambda self: self._parse_property_assignment(exp.EngineProperty),
+209        }
+210
+211        STATEMENT_PARSERS = {
+212            **parser.Parser.STATEMENT_PARSERS,  # type: ignore
+213            TokenType.SHOW: lambda self: self._parse_show(),
+214            TokenType.SET: lambda self: self._parse_set(),
+215        }
+216
+217        SHOW_PARSERS = {
+218            "BINARY LOGS": _show_parser("BINARY LOGS"),
+219            "MASTER LOGS": _show_parser("BINARY LOGS"),
+220            "BINLOG EVENTS": _show_parser("BINLOG EVENTS"),
+221            "CHARACTER SET": _show_parser("CHARACTER SET"),
+222            "CHARSET": _show_parser("CHARACTER SET"),
+223            "COLLATION": _show_parser("COLLATION"),
+224            "FULL COLUMNS": _show_parser("COLUMNS", target="FROM", full=True),
+225            "COLUMNS": _show_parser("COLUMNS", target="FROM"),
+226            "CREATE DATABASE": _show_parser("CREATE DATABASE", target=True),
+227            "CREATE EVENT": _show_parser("CREATE EVENT", target=True),
+228            "CREATE FUNCTION": _show_parser("CREATE FUNCTION", target=True),
+229            "CREATE PROCEDURE": _show_parser("CREATE PROCEDURE", target=True),
+230            "CREATE TABLE": _show_parser("CREATE TABLE", target=True),
+231            "CREATE TRIGGER": _show_parser("CREATE TRIGGER", target=True),
+232            "CREATE VIEW": _show_parser("CREATE VIEW", target=True),
+233            "DATABASES": _show_parser("DATABASES"),
+234            "ENGINE": _show_parser("ENGINE", target=True),
+235            "STORAGE ENGINES": _show_parser("ENGINES"),
+236            "ENGINES": _show_parser("ENGINES"),
+237            "ERRORS": _show_parser("ERRORS"),
+238            "EVENTS": _show_parser("EVENTS"),
+239            "FUNCTION CODE": _show_parser("FUNCTION CODE", target=True),
+240            "FUNCTION STATUS": _show_parser("FUNCTION STATUS"),
+241            "GRANTS": _show_parser("GRANTS", target="FOR"),
+242            "INDEX": _show_parser("INDEX", target="FROM"),
+243            "MASTER STATUS": _show_parser("MASTER STATUS"),
+244            "OPEN TABLES": _show_parser("OPEN TABLES"),
+245            "PLUGINS": _show_parser("PLUGINS"),
+246            "PROCEDURE CODE": _show_parser("PROCEDURE CODE", target=True),
+247            "PROCEDURE STATUS": _show_parser("PROCEDURE STATUS"),
+248            "PRIVILEGES": _show_parser("PRIVILEGES"),
+249            "FULL PROCESSLIST": _show_parser("PROCESSLIST", full=True),
+250            "PROCESSLIST": _show_parser("PROCESSLIST"),
+251            "PROFILE": _show_parser("PROFILE"),
+252            "PROFILES": _show_parser("PROFILES"),
+253            "RELAYLOG EVENTS": _show_parser("RELAYLOG EVENTS"),
+254            "REPLICAS": _show_parser("REPLICAS"),
+255            "SLAVE HOSTS": _show_parser("REPLICAS"),
+256            "REPLICA STATUS": _show_parser("REPLICA STATUS"),
+257            "SLAVE STATUS": _show_parser("REPLICA STATUS"),
+258            "GLOBAL STATUS": _show_parser("STATUS", global_=True),
+259            "SESSION STATUS": _show_parser("STATUS"),
+260            "STATUS": _show_parser("STATUS"),
+261            "TABLE STATUS": _show_parser("TABLE STATUS"),
+262            "FULL TABLES": _show_parser("TABLES", full=True),
+263            "TABLES": _show_parser("TABLES"),
+264            "TRIGGERS": _show_parser("TRIGGERS"),
+265            "GLOBAL VARIABLES": _show_parser("VARIABLES", global_=True),
+266            "SESSION VARIABLES": _show_parser("VARIABLES"),
+267            "VARIABLES": _show_parser("VARIABLES"),
+268            "WARNINGS": _show_parser("WARNINGS"),
+269        }
+270
+271        SET_PARSERS = {
+272            "GLOBAL": lambda self: self._parse_set_item_assignment("GLOBAL"),
+273            "PERSIST": lambda self: self._parse_set_item_assignment("PERSIST"),
+274            "PERSIST_ONLY": lambda self: self._parse_set_item_assignment("PERSIST_ONLY"),
+275            "SESSION": lambda self: self._parse_set_item_assignment("SESSION"),
+276            "LOCAL": lambda self: self._parse_set_item_assignment("LOCAL"),
+277            "CHARACTER SET": lambda self: self._parse_set_item_charset("CHARACTER SET"),
+278            "CHARSET": lambda self: self._parse_set_item_charset("CHARACTER SET"),
+279            "NAMES": lambda self: self._parse_set_item_names(),
+280            "TRANSACTION": lambda self: self._parse_set_transaction(),
+281        }
+282
+283        PROFILE_TYPES = {
+284            "ALL",
+285            "BLOCK IO",
+286            "CONTEXT SWITCHES",
+287            "CPU",
+288            "IPC",
+289            "MEMORY",
+290            "PAGE FAULTS",
+291            "SOURCE",
+292            "SWAPS",
+293        }
+294
+295        TRANSACTION_CHARACTERISTICS = {
+296            "ISOLATION LEVEL REPEATABLE READ",
+297            "ISOLATION LEVEL READ COMMITTED",
+298            "ISOLATION LEVEL READ UNCOMMITTED",
+299            "ISOLATION LEVEL SERIALIZABLE",
+300            "READ WRITE",
+301            "READ ONLY",
+302        }
+303
+304        def _parse_show_mysql(self, this, target=False, full=None, global_=None):
+305            if target:
+306                if isinstance(target, str):
+307                    self._match_text_seq(target)
+308                target_id = self._parse_id_var()
+309            else:
+310                target_id = None
+311
+312            log = self._parse_string() if self._match_text_seq("IN") else None
+313
+314            if this in {"BINLOG EVENTS", "RELAYLOG EVENTS"}:
+315                position = self._parse_number() if self._match_text_seq("FROM") else None
+316                db = None
+317            else:
+318                position = None
+319                db = self._parse_id_var() if self._match_text_seq("FROM") else None
+320
+321            channel = self._parse_id_var() if self._match_text_seq("FOR", "CHANNEL") else None
+322
+323            like = self._parse_string() if self._match_text_seq("LIKE") else None
+324            where = self._parse_where()
+325
+326            if this == "PROFILE":
+327                types = self._parse_csv(lambda: self._parse_var_from_options(self.PROFILE_TYPES))
+328                query = self._parse_number() if self._match_text_seq("FOR", "QUERY") else None
+329                offset = self._parse_number() if self._match_text_seq("OFFSET") else None
+330                limit = self._parse_number() if self._match_text_seq("LIMIT") else None
+331            else:
+332                types, query = None, None
+333                offset, limit = self._parse_oldstyle_limit()
+334
+335            mutex = True if self._match_text_seq("MUTEX") else None
+336            mutex = False if self._match_text_seq("STATUS") else mutex
+337
+338            return self.expression(
+339                exp.Show,
+340                this=this,
+341                target=target_id,
+342                full=full,
+343                log=log,
+344                position=position,
+345                db=db,
+346                channel=channel,
+347                like=like,
+348                where=where,
+349                types=types,
+350                query=query,
+351                offset=offset,
+352                limit=limit,
+353                mutex=mutex,
+354                **{"global": global_},
+355            )
+356
+357        def _parse_var_from_options(self, options):
+358            for option in options:
+359                if self._match_text_seq(*option.split(" ")):
+360                    return exp.Var(this=option)
+361            return None
+362
+363        def _parse_oldstyle_limit(self):
+364            limit = None
+365            offset = None
+366            if self._match_text_seq("LIMIT"):
+367                parts = self._parse_csv(self._parse_number)
+368                if len(parts) == 1:
+369                    limit = parts[0]
+370                elif len(parts) == 2:
+371                    limit = parts[1]
+372                    offset = parts[0]
+373            return offset, limit
+374
+375        def _default_parse_set_item(self):
+376            return self._parse_set_item_assignment(kind=None)
+377
+378        def _parse_set_item_assignment(self, kind):
+379            if kind in {"GLOBAL", "SESSION"} and self._match_text_seq("TRANSACTION"):
+380                return self._parse_set_transaction(global_=kind == "GLOBAL")
+381
+382            left = self._parse_primary() or self._parse_id_var()
+383            if not self._match(TokenType.EQ):
+384                self.raise_error("Expected =")
+385            right = self._parse_statement() or self._parse_id_var()
+386
+387            this = self.expression(
+388                exp.EQ,
+389                this=left,
+390                expression=right,
+391            )
+392
+393            return self.expression(
+394                exp.SetItem,
+395                this=this,
+396                kind=kind,
+397            )
+398
+399        def _parse_set_item_charset(self, kind):
+400            this = self._parse_string() or self._parse_id_var()
+401
+402            return self.expression(
+403                exp.SetItem,
+404                this=this,
+405                kind=kind,
+406            )
+407
+408        def _parse_set_item_names(self):
+409            charset = self._parse_string() or self._parse_id_var()
+410            if self._match_text_seq("COLLATE"):
+411                collate = self._parse_string() or self._parse_id_var()
+412            else:
+413                collate = None
+414            return self.expression(
+415                exp.SetItem,
+416                this=charset,
+417                collate=collate,
+418                kind="NAMES",
+419            )
+420
+421        def _parse_set_transaction(self, global_=False):
+422            self._match_text_seq("TRANSACTION")
+423            characteristics = self._parse_csv(
+424                lambda: self._parse_var_from_options(self.TRANSACTION_CHARACTERISTICS)
+425            )
+426            return self.expression(
+427                exp.SetItem,
+428                expressions=characteristics,
+429                kind="TRANSACTION",
+430                **{"global": global_},
+431            )
+432
+433    class Generator(generator.Generator):
+434        LOCKING_READS_SUPPORTED = True
+435        NULL_ORDERING_SUPPORTED = False
+436
+437        TRANSFORMS = {
+438            **generator.Generator.TRANSFORMS,  # type: ignore
+439            exp.CurrentDate: no_paren_current_date_sql,
+440            exp.CurrentTimestamp: lambda *_: "CURRENT_TIMESTAMP",
+441            exp.ILike: no_ilike_sql,
+442            exp.TableSample: no_tablesample_sql,
+443            exp.TryCast: no_trycast_sql,
+444            exp.DateAdd: _date_add_sql("ADD"),
+445            exp.DateSub: _date_add_sql("SUB"),
+446            exp.DateTrunc: _date_trunc_sql,
+447            exp.GroupConcat: lambda self, e: f"""GROUP_CONCAT({self.sql(e, "this")} SEPARATOR {self.sql(e, "separator") or "','"})""",
+448            exp.StrToDate: _str_to_date_sql,
+449            exp.StrToTime: _str_to_date_sql,
+450            exp.Trim: _trim_sql,
+451            exp.NullSafeEQ: lambda self, e: self.binary(e, "<=>"),
+452            exp.NullSafeNEQ: lambda self, e: self.not_sql(self.binary(e, "<=>")),
+453            exp.StrPosition: strposition_to_locate_sql,
+454        }
+455
+456        TYPE_MAPPING = generator.Generator.TYPE_MAPPING.copy()
+457        TYPE_MAPPING.pop(exp.DataType.Type.MEDIUMTEXT)
+458        TYPE_MAPPING.pop(exp.DataType.Type.LONGTEXT)
+459        TYPE_MAPPING.pop(exp.DataType.Type.MEDIUMBLOB)
+460        TYPE_MAPPING.pop(exp.DataType.Type.LONGBLOB)
+461
+462        def show_sql(self, expression):
+463            this = f" {expression.name}"
+464            full = " FULL" if expression.args.get("full") else ""
+465            global_ = " GLOBAL" if expression.args.get("global") else ""
+466
+467            target = self.sql(expression, "target")
+468            target = f" {target}" if target else ""
+469            if expression.name in {"COLUMNS", "INDEX"}:
+470                target = f" FROM{target}"
+471            elif expression.name == "GRANTS":
+472                target = f" FOR{target}"
+473
+474            db = self._prefixed_sql("FROM", expression, "db")
+475
+476            like = self._prefixed_sql("LIKE", expression, "like")
+477            where = self.sql(expression, "where")
+478
+479            types = self.expressions(expression, key="types")
+480            types = f" {types}" if types else types
+481            query = self._prefixed_sql("FOR QUERY", expression, "query")
+482
+483            if expression.name == "PROFILE":
+484                offset = self._prefixed_sql("OFFSET", expression, "offset")
+485                limit = self._prefixed_sql("LIMIT", expression, "limit")
+486            else:
+487                offset = ""
+488                limit = self._oldstyle_limit_sql(expression)
+489
+490            log = self._prefixed_sql("IN", expression, "log")
+491            position = self._prefixed_sql("FROM", expression, "position")
+492
+493            channel = self._prefixed_sql("FOR CHANNEL", expression, "channel")
+494
+495            if expression.name == "ENGINE":
+496                mutex_or_status = " MUTEX" if expression.args.get("mutex") else " STATUS"
+497            else:
+498                mutex_or_status = ""
+499
+500            return f"SHOW{full}{global_}{this}{target}{types}{db}{query}{log}{position}{channel}{mutex_or_status}{like}{where}{offset}{limit}"
+501
+502        def _prefixed_sql(self, prefix, expression, arg):
+503            sql = self.sql(expression, arg)
+504            if not sql:
+505                return ""
+506            return f" {prefix} {sql}"
+507
+508        def _oldstyle_limit_sql(self, expression):
+509            limit = self.sql(expression, "limit")
+510            offset = self.sql(expression, "offset")
+511            if limit:
+512                limit_offset = f"{offset}, {limit}" if offset else limit
+513                return f" LIMIT {limit_offset}"
+514            return ""
+515
+516        def setitem_sql(self, expression):
+517            kind = self.sql(expression, "kind")
+518            kind = f"{kind} " if kind else ""
+519            this = self.sql(expression, "this")
+520            expressions = self.expressions(expression)
+521            collate = self.sql(expression, "collate")
+522            collate = f" COLLATE {collate}" if collate else ""
+523            global_ = "GLOBAL " if expression.args.get("global") else ""
+524            return f"{global_}{kind}{this}{expressions}{collate}"
+525
+526        def set_sql(self, expression):
+527            return f"SET {self.expressions(expression)}"
+
+ + + + +
+
+ + MySQL() + + +
+ + + + +
+ +
+
+ +
+ + class + MySQL.Tokenizer(sqlglot.tokens.Tokenizer): + + + +
+ +
118    class Tokenizer(tokens.Tokenizer):
+119        QUOTES = ["'", '"']
+120        COMMENTS = ["--", "#", ("/*", "*/")]
+121        IDENTIFIERS = ["`"]
+122        STRING_ESCAPES = ["'", "\\"]
+123        BIT_STRINGS = [("b'", "'"), ("B'", "'"), ("0b", "")]
+124        HEX_STRINGS = [("x'", "'"), ("X'", "'"), ("0x", "")]
+125
+126        KEYWORDS = {
+127            **tokens.Tokenizer.KEYWORDS,
+128            "MEDIUMTEXT": TokenType.MEDIUMTEXT,
+129            "LONGTEXT": TokenType.LONGTEXT,
+130            "MEDIUMBLOB": TokenType.MEDIUMBLOB,
+131            "LONGBLOB": TokenType.LONGBLOB,
+132            "START": TokenType.BEGIN,
+133            "SEPARATOR": TokenType.SEPARATOR,
+134            "_ARMSCII8": TokenType.INTRODUCER,
+135            "_ASCII": TokenType.INTRODUCER,
+136            "_BIG5": TokenType.INTRODUCER,
+137            "_BINARY": TokenType.INTRODUCER,
+138            "_CP1250": TokenType.INTRODUCER,
+139            "_CP1251": TokenType.INTRODUCER,
+140            "_CP1256": TokenType.INTRODUCER,
+141            "_CP1257": TokenType.INTRODUCER,
+142            "_CP850": TokenType.INTRODUCER,
+143            "_CP852": TokenType.INTRODUCER,
+144            "_CP866": TokenType.INTRODUCER,
+145            "_CP932": TokenType.INTRODUCER,
+146            "_DEC8": TokenType.INTRODUCER,
+147            "_EUCJPMS": TokenType.INTRODUCER,
+148            "_EUCKR": TokenType.INTRODUCER,
+149            "_GB18030": TokenType.INTRODUCER,
+150            "_GB2312": TokenType.INTRODUCER,
+151            "_GBK": TokenType.INTRODUCER,
+152            "_GEOSTD8": TokenType.INTRODUCER,
+153            "_GREEK": TokenType.INTRODUCER,
+154            "_HEBREW": TokenType.INTRODUCER,
+155            "_HP8": TokenType.INTRODUCER,
+156            "_KEYBCS2": TokenType.INTRODUCER,
+157            "_KOI8R": TokenType.INTRODUCER,
+158            "_KOI8U": TokenType.INTRODUCER,
+159            "_LATIN1": TokenType.INTRODUCER,
+160            "_LATIN2": TokenType.INTRODUCER,
+161            "_LATIN5": TokenType.INTRODUCER,
+162            "_LATIN7": TokenType.INTRODUCER,
+163            "_MACCE": TokenType.INTRODUCER,
+164            "_MACROMAN": TokenType.INTRODUCER,
+165            "_SJIS": TokenType.INTRODUCER,
+166            "_SWE7": TokenType.INTRODUCER,
+167            "_TIS620": TokenType.INTRODUCER,
+168            "_UCS2": TokenType.INTRODUCER,
+169            "_UJIS": TokenType.INTRODUCER,
+170            # https://dev.mysql.com/doc/refman/8.0/en/string-literals.html
+171            "_UTF8": TokenType.INTRODUCER,
+172            "_UTF16": TokenType.INTRODUCER,
+173            "_UTF16LE": TokenType.INTRODUCER,
+174            "_UTF32": TokenType.INTRODUCER,
+175            "_UTF8MB3": TokenType.INTRODUCER,
+176            "_UTF8MB4": TokenType.INTRODUCER,
+177            "@@": TokenType.SESSION_PARAMETER,
+178        }
+179
+180        COMMANDS = tokens.Tokenizer.COMMANDS - {TokenType.SET, TokenType.SHOW}
+
+ + + + +
+
Inherited Members
+
+ +
+
+
+
+ +
+ + class + MySQL.Parser(sqlglot.parser.Parser): + + + +
+ +
182    class Parser(parser.Parser):
+183        FUNC_TOKENS = {*parser.Parser.FUNC_TOKENS, TokenType.SCHEMA}  # type: ignore
+184
+185        FUNCTIONS = {
+186            **parser.Parser.FUNCTIONS,  # type: ignore
+187            "DATE_ADD": _date_add(exp.DateAdd),
+188            "DATE_SUB": _date_add(exp.DateSub),
+189            "STR_TO_DATE": _str_to_date,
+190            "LOCATE": locate_to_strposition,
+191            "INSTR": lambda args: exp.StrPosition(substr=seq_get(args, 1), this=seq_get(args, 0)),
+192            "LEFT": lambda args: exp.Substring(
+193                this=seq_get(args, 0), start=exp.Literal.number(1), length=seq_get(args, 1)
+194            ),
+195        }
+196
+197        FUNCTION_PARSERS = {
+198            **parser.Parser.FUNCTION_PARSERS,  # type: ignore
+199            "GROUP_CONCAT": lambda self: self.expression(
+200                exp.GroupConcat,
+201                this=self._parse_lambda(),
+202                separator=self._match(TokenType.SEPARATOR) and self._parse_field(),
+203            ),
+204        }
+205
+206        PROPERTY_PARSERS = {
+207            **parser.Parser.PROPERTY_PARSERS,  # type: ignore
+208            "ENGINE": lambda self: self._parse_property_assignment(exp.EngineProperty),
+209        }
+210
+211        STATEMENT_PARSERS = {
+212            **parser.Parser.STATEMENT_PARSERS,  # type: ignore
+213            TokenType.SHOW: lambda self: self._parse_show(),
+214            TokenType.SET: lambda self: self._parse_set(),
+215        }
+216
+217        SHOW_PARSERS = {
+218            "BINARY LOGS": _show_parser("BINARY LOGS"),
+219            "MASTER LOGS": _show_parser("BINARY LOGS"),
+220            "BINLOG EVENTS": _show_parser("BINLOG EVENTS"),
+221            "CHARACTER SET": _show_parser("CHARACTER SET"),
+222            "CHARSET": _show_parser("CHARACTER SET"),
+223            "COLLATION": _show_parser("COLLATION"),
+224            "FULL COLUMNS": _show_parser("COLUMNS", target="FROM", full=True),
+225            "COLUMNS": _show_parser("COLUMNS", target="FROM"),
+226            "CREATE DATABASE": _show_parser("CREATE DATABASE", target=True),
+227            "CREATE EVENT": _show_parser("CREATE EVENT", target=True),
+228            "CREATE FUNCTION": _show_parser("CREATE FUNCTION", target=True),
+229            "CREATE PROCEDURE": _show_parser("CREATE PROCEDURE", target=True),
+230            "CREATE TABLE": _show_parser("CREATE TABLE", target=True),
+231            "CREATE TRIGGER": _show_parser("CREATE TRIGGER", target=True),
+232            "CREATE VIEW": _show_parser("CREATE VIEW", target=True),
+233            "DATABASES": _show_parser("DATABASES"),
+234            "ENGINE": _show_parser("ENGINE", target=True),
+235            "STORAGE ENGINES": _show_parser("ENGINES"),
+236            "ENGINES": _show_parser("ENGINES"),
+237            "ERRORS": _show_parser("ERRORS"),
+238            "EVENTS": _show_parser("EVENTS"),
+239            "FUNCTION CODE": _show_parser("FUNCTION CODE", target=True),
+240            "FUNCTION STATUS": _show_parser("FUNCTION STATUS"),
+241            "GRANTS": _show_parser("GRANTS", target="FOR"),
+242            "INDEX": _show_parser("INDEX", target="FROM"),
+243            "MASTER STATUS": _show_parser("MASTER STATUS"),
+244            "OPEN TABLES": _show_parser("OPEN TABLES"),
+245            "PLUGINS": _show_parser("PLUGINS"),
+246            "PROCEDURE CODE": _show_parser("PROCEDURE CODE", target=True),
+247            "PROCEDURE STATUS": _show_parser("PROCEDURE STATUS"),
+248            "PRIVILEGES": _show_parser("PRIVILEGES"),
+249            "FULL PROCESSLIST": _show_parser("PROCESSLIST", full=True),
+250            "PROCESSLIST": _show_parser("PROCESSLIST"),
+251            "PROFILE": _show_parser("PROFILE"),
+252            "PROFILES": _show_parser("PROFILES"),
+253            "RELAYLOG EVENTS": _show_parser("RELAYLOG EVENTS"),
+254            "REPLICAS": _show_parser("REPLICAS"),
+255            "SLAVE HOSTS": _show_parser("REPLICAS"),
+256            "REPLICA STATUS": _show_parser("REPLICA STATUS"),
+257            "SLAVE STATUS": _show_parser("REPLICA STATUS"),
+258            "GLOBAL STATUS": _show_parser("STATUS", global_=True),
+259            "SESSION STATUS": _show_parser("STATUS"),
+260            "STATUS": _show_parser("STATUS"),
+261            "TABLE STATUS": _show_parser("TABLE STATUS"),
+262            "FULL TABLES": _show_parser("TABLES", full=True),
+263            "TABLES": _show_parser("TABLES"),
+264            "TRIGGERS": _show_parser("TRIGGERS"),
+265            "GLOBAL VARIABLES": _show_parser("VARIABLES", global_=True),
+266            "SESSION VARIABLES": _show_parser("VARIABLES"),
+267            "VARIABLES": _show_parser("VARIABLES"),
+268            "WARNINGS": _show_parser("WARNINGS"),
+269        }
+270
+271        SET_PARSERS = {
+272            "GLOBAL": lambda self: self._parse_set_item_assignment("GLOBAL"),
+273            "PERSIST": lambda self: self._parse_set_item_assignment("PERSIST"),
+274            "PERSIST_ONLY": lambda self: self._parse_set_item_assignment("PERSIST_ONLY"),
+275            "SESSION": lambda self: self._parse_set_item_assignment("SESSION"),
+276            "LOCAL": lambda self: self._parse_set_item_assignment("LOCAL"),
+277            "CHARACTER SET": lambda self: self._parse_set_item_charset("CHARACTER SET"),
+278            "CHARSET": lambda self: self._parse_set_item_charset("CHARACTER SET"),
+279            "NAMES": lambda self: self._parse_set_item_names(),
+280            "TRANSACTION": lambda self: self._parse_set_transaction(),
+281        }
+282
+283        PROFILE_TYPES = {
+284            "ALL",
+285            "BLOCK IO",
+286            "CONTEXT SWITCHES",
+287            "CPU",
+288            "IPC",
+289            "MEMORY",
+290            "PAGE FAULTS",
+291            "SOURCE",
+292            "SWAPS",
+293        }
+294
+295        TRANSACTION_CHARACTERISTICS = {
+296            "ISOLATION LEVEL REPEATABLE READ",
+297            "ISOLATION LEVEL READ COMMITTED",
+298            "ISOLATION LEVEL READ UNCOMMITTED",
+299            "ISOLATION LEVEL SERIALIZABLE",
+300            "READ WRITE",
+301            "READ ONLY",
+302        }
+303
+304        def _parse_show_mysql(self, this, target=False, full=None, global_=None):
+305            if target:
+306                if isinstance(target, str):
+307                    self._match_text_seq(target)
+308                target_id = self._parse_id_var()
+309            else:
+310                target_id = None
+311
+312            log = self._parse_string() if self._match_text_seq("IN") else None
+313
+314            if this in {"BINLOG EVENTS", "RELAYLOG EVENTS"}:
+315                position = self._parse_number() if self._match_text_seq("FROM") else None
+316                db = None
+317            else:
+318                position = None
+319                db = self._parse_id_var() if self._match_text_seq("FROM") else None
+320
+321            channel = self._parse_id_var() if self._match_text_seq("FOR", "CHANNEL") else None
+322
+323            like = self._parse_string() if self._match_text_seq("LIKE") else None
+324            where = self._parse_where()
+325
+326            if this == "PROFILE":
+327                types = self._parse_csv(lambda: self._parse_var_from_options(self.PROFILE_TYPES))
+328                query = self._parse_number() if self._match_text_seq("FOR", "QUERY") else None
+329                offset = self._parse_number() if self._match_text_seq("OFFSET") else None
+330                limit = self._parse_number() if self._match_text_seq("LIMIT") else None
+331            else:
+332                types, query = None, None
+333                offset, limit = self._parse_oldstyle_limit()
+334
+335            mutex = True if self._match_text_seq("MUTEX") else None
+336            mutex = False if self._match_text_seq("STATUS") else mutex
+337
+338            return self.expression(
+339                exp.Show,
+340                this=this,
+341                target=target_id,
+342                full=full,
+343                log=log,
+344                position=position,
+345                db=db,
+346                channel=channel,
+347                like=like,
+348                where=where,
+349                types=types,
+350                query=query,
+351                offset=offset,
+352                limit=limit,
+353                mutex=mutex,
+354                **{"global": global_},
+355            )
+356
+357        def _parse_var_from_options(self, options):
+358            for option in options:
+359                if self._match_text_seq(*option.split(" ")):
+360                    return exp.Var(this=option)
+361            return None
+362
+363        def _parse_oldstyle_limit(self):
+364            limit = None
+365            offset = None
+366            if self._match_text_seq("LIMIT"):
+367                parts = self._parse_csv(self._parse_number)
+368                if len(parts) == 1:
+369                    limit = parts[0]
+370                elif len(parts) == 2:
+371                    limit = parts[1]
+372                    offset = parts[0]
+373            return offset, limit
+374
+375        def _default_parse_set_item(self):
+376            return self._parse_set_item_assignment(kind=None)
+377
+378        def _parse_set_item_assignment(self, kind):
+379            if kind in {"GLOBAL", "SESSION"} and self._match_text_seq("TRANSACTION"):
+380                return self._parse_set_transaction(global_=kind == "GLOBAL")
+381
+382            left = self._parse_primary() or self._parse_id_var()
+383            if not self._match(TokenType.EQ):
+384                self.raise_error("Expected =")
+385            right = self._parse_statement() or self._parse_id_var()
+386
+387            this = self.expression(
+388                exp.EQ,
+389                this=left,
+390                expression=right,
+391            )
+392
+393            return self.expression(
+394                exp.SetItem,
+395                this=this,
+396                kind=kind,
+397            )
+398
+399        def _parse_set_item_charset(self, kind):
+400            this = self._parse_string() or self._parse_id_var()
+401
+402            return self.expression(
+403                exp.SetItem,
+404                this=this,
+405                kind=kind,
+406            )
+407
+408        def _parse_set_item_names(self):
+409            charset = self._parse_string() or self._parse_id_var()
+410            if self._match_text_seq("COLLATE"):
+411                collate = self._parse_string() or self._parse_id_var()
+412            else:
+413                collate = None
+414            return self.expression(
+415                exp.SetItem,
+416                this=charset,
+417                collate=collate,
+418                kind="NAMES",
+419            )
+420
+421        def _parse_set_transaction(self, global_=False):
+422            self._match_text_seq("TRANSACTION")
+423            characteristics = self._parse_csv(
+424                lambda: self._parse_var_from_options(self.TRANSACTION_CHARACTERISTICS)
+425            )
+426            return self.expression(
+427                exp.SetItem,
+428                expressions=characteristics,
+429                kind="TRANSACTION",
+430                **{"global": global_},
+431            )
+
+ + +

Parser consumes a list of tokens produced by the sqlglot.tokens.Tokenizer and produces +a parsed syntax tree.

+ +
Arguments:
+ +
    +
  • error_level: the desired error level. +Default: ErrorLevel.RAISE
  • +
  • error_message_context: determines the amount of context to capture from a +query string when displaying the error message (in number of characters). +Default: 50.
  • +
  • index_offset: Index offset for arrays eg ARRAY[0] vs ARRAY[1] as the head of a list. +Default: 0
  • +
  • alias_post_tablesample: If the table alias comes after tablesample. +Default: False
  • +
  • max_errors: Maximum number of error messages to include in a raised ParseError. +This is only relevant if error_level is ErrorLevel.RAISE. +Default: 3
  • +
  • null_ordering: Indicates the default null ordering method to use if not explicitly set. +Options are "nulls_are_small", "nulls_are_large", "nulls_are_last". +Default: "nulls_are_small"
  • +
+
+ + + +
+
+ +
+ + class + MySQL.Generator(sqlglot.generator.Generator): + + + +
+ +
433    class Generator(generator.Generator):
+434        LOCKING_READS_SUPPORTED = True
+435        NULL_ORDERING_SUPPORTED = False
+436
+437        TRANSFORMS = {
+438            **generator.Generator.TRANSFORMS,  # type: ignore
+439            exp.CurrentDate: no_paren_current_date_sql,
+440            exp.CurrentTimestamp: lambda *_: "CURRENT_TIMESTAMP",
+441            exp.ILike: no_ilike_sql,
+442            exp.TableSample: no_tablesample_sql,
+443            exp.TryCast: no_trycast_sql,
+444            exp.DateAdd: _date_add_sql("ADD"),
+445            exp.DateSub: _date_add_sql("SUB"),
+446            exp.DateTrunc: _date_trunc_sql,
+447            exp.GroupConcat: lambda self, e: f"""GROUP_CONCAT({self.sql(e, "this")} SEPARATOR {self.sql(e, "separator") or "','"})""",
+448            exp.StrToDate: _str_to_date_sql,
+449            exp.StrToTime: _str_to_date_sql,
+450            exp.Trim: _trim_sql,
+451            exp.NullSafeEQ: lambda self, e: self.binary(e, "<=>"),
+452            exp.NullSafeNEQ: lambda self, e: self.not_sql(self.binary(e, "<=>")),
+453            exp.StrPosition: strposition_to_locate_sql,
+454        }
+455
+456        TYPE_MAPPING = generator.Generator.TYPE_MAPPING.copy()
+457        TYPE_MAPPING.pop(exp.DataType.Type.MEDIUMTEXT)
+458        TYPE_MAPPING.pop(exp.DataType.Type.LONGTEXT)
+459        TYPE_MAPPING.pop(exp.DataType.Type.MEDIUMBLOB)
+460        TYPE_MAPPING.pop(exp.DataType.Type.LONGBLOB)
+461
+462        def show_sql(self, expression):
+463            this = f" {expression.name}"
+464            full = " FULL" if expression.args.get("full") else ""
+465            global_ = " GLOBAL" if expression.args.get("global") else ""
+466
+467            target = self.sql(expression, "target")
+468            target = f" {target}" if target else ""
+469            if expression.name in {"COLUMNS", "INDEX"}:
+470                target = f" FROM{target}"
+471            elif expression.name == "GRANTS":
+472                target = f" FOR{target}"
+473
+474            db = self._prefixed_sql("FROM", expression, "db")
+475
+476            like = self._prefixed_sql("LIKE", expression, "like")
+477            where = self.sql(expression, "where")
+478
+479            types = self.expressions(expression, key="types")
+480            types = f" {types}" if types else types
+481            query = self._prefixed_sql("FOR QUERY", expression, "query")
+482
+483            if expression.name == "PROFILE":
+484                offset = self._prefixed_sql("OFFSET", expression, "offset")
+485                limit = self._prefixed_sql("LIMIT", expression, "limit")
+486            else:
+487                offset = ""
+488                limit = self._oldstyle_limit_sql(expression)
+489
+490            log = self._prefixed_sql("IN", expression, "log")
+491            position = self._prefixed_sql("FROM", expression, "position")
+492
+493            channel = self._prefixed_sql("FOR CHANNEL", expression, "channel")
+494
+495            if expression.name == "ENGINE":
+496                mutex_or_status = " MUTEX" if expression.args.get("mutex") else " STATUS"
+497            else:
+498                mutex_or_status = ""
+499
+500            return f"SHOW{full}{global_}{this}{target}{types}{db}{query}{log}{position}{channel}{mutex_or_status}{like}{where}{offset}{limit}"
+501
+502        def _prefixed_sql(self, prefix, expression, arg):
+503            sql = self.sql(expression, arg)
+504            if not sql:
+505                return ""
+506            return f" {prefix} {sql}"
+507
+508        def _oldstyle_limit_sql(self, expression):
+509            limit = self.sql(expression, "limit")
+510            offset = self.sql(expression, "offset")
+511            if limit:
+512                limit_offset = f"{offset}, {limit}" if offset else limit
+513                return f" LIMIT {limit_offset}"
+514            return ""
+515
+516        def setitem_sql(self, expression):
+517            kind = self.sql(expression, "kind")
+518            kind = f"{kind} " if kind else ""
+519            this = self.sql(expression, "this")
+520            expressions = self.expressions(expression)
+521            collate = self.sql(expression, "collate")
+522            collate = f" COLLATE {collate}" if collate else ""
+523            global_ = "GLOBAL " if expression.args.get("global") else ""
+524            return f"{global_}{kind}{this}{expressions}{collate}"
+525
+526        def set_sql(self, expression):
+527            return f"SET {self.expressions(expression)}"
+
+ + +

Generator interprets the given syntax tree and produces a SQL string as an output.

+ +
Arguments:
+ +
    +
  • time_mapping (dict): the dictionary of custom time mappings in which the key +represents a python time format and the output the target time format
  • +
  • time_trie (trie): a trie of the time_mapping keys
  • +
  • pretty (bool): if set to True the returned string will be formatted. Default: False.
  • +
  • quote_start (str): specifies which starting character to use to delimit quotes. Default: '.
  • +
  • quote_end (str): specifies which ending character to use to delimit quotes. Default: '.
  • +
  • identifier_start (str): specifies which starting character to use to delimit identifiers. Default: ".
  • +
  • identifier_end (str): specifies which ending character to use to delimit identifiers. Default: ".
  • +
  • identify (bool): if set to True all identifiers will be delimited by the corresponding +character.
  • +
  • normalize (bool): if set to True all identifiers will lower cased
  • +
  • string_escape (str): specifies a string escape character. Default: '.
  • +
  • identifier_escape (str): specifies an identifier escape character. Default: ".
  • +
  • pad (int): determines padding in a formatted string. Default: 2.
  • +
  • indent (int): determines the size of indentation in a formatted string. Default: 4.
  • +
  • unnest_column_only (bool): if true unnest table aliases are considered only as column aliases
  • +
  • normalize_functions (str): normalize function names, "upper", "lower", or None +Default: "upper"
  • +
  • alias_post_tablesample (bool): if the table alias comes after tablesample +Default: False
  • +
  • unsupported_level (ErrorLevel): determines the generator's behavior when it encounters +unsupported expressions. Default ErrorLevel.WARN.
  • +
  • null_ordering (str): Indicates the default null ordering method to use if not explicitly set. +Options are "nulls_are_small", "nulls_are_large", "nulls_are_last". +Default: "nulls_are_small"
  • +
  • max_unsupported (int): Maximum number of unsupported messages to include in a raised UnsupportedError. +This is only relevant if unsupported_level is ErrorLevel.RAISE. +Default: 3
  • +
  • leading_comma (bool): if the the comma is leading or trailing in select statements +Default: False
  • +
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode. +The default is on the smaller end because the length only represents a segment and not the true +line length. +Default: 80
  • +
  • comments: Whether or not to preserve comments in the output SQL code. +Default: True
  • +
+
+ + +
+ +
+ + def + show_sql(self, expression): + + + +
+ +
462        def show_sql(self, expression):
+463            this = f" {expression.name}"
+464            full = " FULL" if expression.args.get("full") else ""
+465            global_ = " GLOBAL" if expression.args.get("global") else ""
+466
+467            target = self.sql(expression, "target")
+468            target = f" {target}" if target else ""
+469            if expression.name in {"COLUMNS", "INDEX"}:
+470                target = f" FROM{target}"
+471            elif expression.name == "GRANTS":
+472                target = f" FOR{target}"
+473
+474            db = self._prefixed_sql("FROM", expression, "db")
+475
+476            like = self._prefixed_sql("LIKE", expression, "like")
+477            where = self.sql(expression, "where")
+478
+479            types = self.expressions(expression, key="types")
+480            types = f" {types}" if types else types
+481            query = self._prefixed_sql("FOR QUERY", expression, "query")
+482
+483            if expression.name == "PROFILE":
+484                offset = self._prefixed_sql("OFFSET", expression, "offset")
+485                limit = self._prefixed_sql("LIMIT", expression, "limit")
+486            else:
+487                offset = ""
+488                limit = self._oldstyle_limit_sql(expression)
+489
+490            log = self._prefixed_sql("IN", expression, "log")
+491            position = self._prefixed_sql("FROM", expression, "position")
+492
+493            channel = self._prefixed_sql("FOR CHANNEL", expression, "channel")
+494
+495            if expression.name == "ENGINE":
+496                mutex_or_status = " MUTEX" if expression.args.get("mutex") else " STATUS"
+497            else:
+498                mutex_or_status = ""
+499
+500            return f"SHOW{full}{global_}{this}{target}{types}{db}{query}{log}{position}{channel}{mutex_or_status}{like}{where}{offset}{limit}"
+
+ + + + +
+
+ +
+ + def + setitem_sql(self, expression): + + + +
+ +
516        def setitem_sql(self, expression):
+517            kind = self.sql(expression, "kind")
+518            kind = f"{kind} " if kind else ""
+519            this = self.sql(expression, "this")
+520            expressions = self.expressions(expression)
+521            collate = self.sql(expression, "collate")
+522            collate = f" COLLATE {collate}" if collate else ""
+523            global_ = "GLOBAL " if expression.args.get("global") else ""
+524            return f"{global_}{kind}{this}{expressions}{collate}"
+
+ + + + +
+
+ +
+ + def + set_sql(self, expression): + + + +
+ +
526        def set_sql(self, expression):
+527            return f"SET {self.expressions(expression)}"
+
+ + + + +
+
+
Inherited Members
+
+
sqlglot.generator.Generator
+
Generator
+
generate
+
unsupported
+
sep
+
seg
+
pad_comment
+
maybe_comment
+
wrap
+
no_identify
+
normalize_func
+
indent
+
sql
+
uncache_sql
+
cache_sql
+
characterset_sql
+
column_sql
+
columndef_sql
+
columnconstraint_sql
+
autoincrementcolumnconstraint_sql
+
checkcolumnconstraint_sql
+
commentcolumnconstraint_sql
+
collatecolumnconstraint_sql
+
encodecolumnconstraint_sql
+
defaultcolumnconstraint_sql
+
generatedasidentitycolumnconstraint_sql
+
notnullcolumnconstraint_sql
+
primarykeycolumnconstraint_sql
+
uniquecolumnconstraint_sql
+
create_sql
+
describe_sql
+
prepend_ctes
+
with_sql
+
cte_sql
+
tablealias_sql
+
bitstring_sql
+
hexstring_sql
+
datatype_sql
+
directory_sql
+
delete_sql
+
drop_sql
+
except_sql
+
except_op
+
fetch_sql
+
filter_sql
+
hint_sql
+
index_sql
+
identifier_sql
+
national_sql
+
partition_sql
+
properties_sql
+
root_properties
+
properties
+
with_properties
+
locate_properties
+
property_sql
+
likeproperty_sql
+
fallbackproperty_sql
+
journalproperty_sql
+
freespaceproperty_sql
+
afterjournalproperty_sql
+
checksumproperty_sql
+
mergeblockratioproperty_sql
+
datablocksizeproperty_sql
+
blockcompressionproperty_sql
+
isolatedloadingproperty_sql
+
insert_sql
+
intersect_sql
+
intersect_op
+
introducer_sql
+
pseudotype_sql
+
rowformatdelimitedproperty_sql
+
table_sql
+
tablesample_sql
+
pivot_sql
+
tuple_sql
+
update_sql
+
values_sql
+
var_sql
+
into_sql
+
from_sql
+
group_sql
+
having_sql
+
join_sql
+
lambda_sql
+
lateral_sql
+
limit_sql
+
offset_sql
+
lock_sql
+
literal_sql
+
loaddata_sql
+
null_sql
+
boolean_sql
+
order_sql
+
cluster_sql
+
distribute_sql
+
sort_sql
+
ordered_sql
+
matchrecognize_sql
+
query_modifiers
+
select_sql
+
schema_sql
+
star_sql
+
structkwarg_sql
+
parameter_sql
+
sessionparameter_sql
+
placeholder_sql
+
subquery_sql
+
qualify_sql
+
union_sql
+
union_op
+
unnest_sql
+
where_sql
+
window_sql
+
partition_by_sql
+
window_spec_sql
+
withingroup_sql
+
between_sql
+
bracket_sql
+
all_sql
+
any_sql
+
exists_sql
+
case_sql
+
constraint_sql
+
extract_sql
+
trim_sql
+
concat_sql
+
check_sql
+
foreignkey_sql
+
primarykey_sql
+
unique_sql
+
if_sql
+
in_sql
+
in_unnest_op
+
interval_sql
+
return_sql
+
reference_sql
+
anonymous_sql
+
paren_sql
+
neg_sql
+
not_sql
+
alias_sql
+
aliases_sql
+
attimezone_sql
+
add_sql
+
and_sql
+
connector_sql
+
bitwiseand_sql
+
bitwiseleftshift_sql
+
bitwisenot_sql
+
bitwiseor_sql
+
bitwiserightshift_sql
+
bitwisexor_sql
+
cast_sql
+
currentdate_sql
+
collate_sql
+
command_sql
+
transaction_sql
+
commit_sql
+
rollback_sql
+
altercolumn_sql
+
renametable_sql
+
altertable_sql
+
droppartition_sql
+
addconstraint_sql
+
distinct_sql
+
ignorenulls_sql
+
respectnulls_sql
+
intdiv_sql
+
dpipe_sql
+
div_sql
+
distance_sql
+
dot_sql
+
eq_sql
+
escape_sql
+
glob_sql
+
gt_sql
+
gte_sql
+
ilike_sql
+
is_sql
+
like_sql
+
similarto_sql
+
lt_sql
+
lte_sql
+
mod_sql
+
mul_sql
+
neq_sql
+
nullsafeeq_sql
+
nullsafeneq_sql
+
or_sql
+
slice_sql
+
sub_sql
+
trycast_sql
+
use_sql
+
binary
+
function_fallback_sql
+
format_args
+
text_width
+
format_time
+
expressions
+
op_expressions
+
naked_property
+
set_operation
+
tag_sql
+
token_sql
+
userdefinedfunction_sql
+
userdefinedfunctionkwarg_sql
+
joinhint_sql
+
kwarg_sql
+
when_sql
+
merge_sql
+ +
+
+
+
+
+ + \ No newline at end of file diff --git a/docs/sqlglot/dialects/oracle.html b/docs/sqlglot/dialects/oracle.html new file mode 100644 index 0000000..4934245 --- /dev/null +++ b/docs/sqlglot/dialects/oracle.html @@ -0,0 +1,1052 @@ + + + + + + + sqlglot.dialects.oracle API documentation + + + + + + + + + +
+
+ Edit on GitHub +

+sqlglot.dialects.oracle

+ + + + + + +
  1from __future__ import annotations
+  2
+  3from sqlglot import exp, generator, parser, tokens, transforms
+  4from sqlglot.dialects.dialect import Dialect, no_ilike_sql, rename_func, trim_sql
+  5from sqlglot.helper import csv
+  6from sqlglot.tokens import TokenType
+  7
+  8
+  9def _limit_sql(self, expression):
+ 10    return self.fetch_sql(exp.Fetch(direction="FIRST", count=expression.expression))
+ 11
+ 12
+ 13class Oracle(Dialect):
+ 14    # https://docs.oracle.com/database/121/SQLRF/sql_elements004.htm#SQLRF00212
+ 15    # https://docs.python.org/3/library/datetime.html#strftime-and-strptime-format-codes
+ 16    time_mapping = {
+ 17        "AM": "%p",  # Meridian indicator with or without periods
+ 18        "A.M.": "%p",  # Meridian indicator with or without periods
+ 19        "PM": "%p",  # Meridian indicator with or without periods
+ 20        "P.M.": "%p",  # Meridian indicator with or without periods
+ 21        "D": "%u",  # Day of week (1-7)
+ 22        "DAY": "%A",  # name of day
+ 23        "DD": "%d",  # day of month (1-31)
+ 24        "DDD": "%j",  # day of year (1-366)
+ 25        "DY": "%a",  # abbreviated name of day
+ 26        "HH": "%I",  # Hour of day (1-12)
+ 27        "HH12": "%I",  # alias for HH
+ 28        "HH24": "%H",  # Hour of day (0-23)
+ 29        "IW": "%V",  # Calendar week of year (1-52 or 1-53), as defined by the ISO 8601 standard
+ 30        "MI": "%M",  # Minute (0-59)
+ 31        "MM": "%m",  # Month (01-12; January = 01)
+ 32        "MON": "%b",  # Abbreviated name of month
+ 33        "MONTH": "%B",  # Name of month
+ 34        "SS": "%S",  # Second (0-59)
+ 35        "WW": "%W",  # Week of year (1-53)
+ 36        "YY": "%y",  # 15
+ 37        "YYYY": "%Y",  # 2015
+ 38    }
+ 39
+ 40    class Parser(parser.Parser):
+ 41        FUNCTIONS = {
+ 42            **parser.Parser.FUNCTIONS,  # type: ignore
+ 43            "DECODE": exp.Matches.from_arg_list,
+ 44        }
+ 45
+ 46    class Generator(generator.Generator):
+ 47        LOCKING_READS_SUPPORTED = True
+ 48
+ 49        TYPE_MAPPING = {
+ 50            **generator.Generator.TYPE_MAPPING,  # type: ignore
+ 51            exp.DataType.Type.TINYINT: "NUMBER",
+ 52            exp.DataType.Type.SMALLINT: "NUMBER",
+ 53            exp.DataType.Type.INT: "NUMBER",
+ 54            exp.DataType.Type.BIGINT: "NUMBER",
+ 55            exp.DataType.Type.DECIMAL: "NUMBER",
+ 56            exp.DataType.Type.DOUBLE: "DOUBLE PRECISION",
+ 57            exp.DataType.Type.VARCHAR: "VARCHAR2",
+ 58            exp.DataType.Type.NVARCHAR: "NVARCHAR2",
+ 59            exp.DataType.Type.TEXT: "CLOB",
+ 60            exp.DataType.Type.BINARY: "BLOB",
+ 61            exp.DataType.Type.VARBINARY: "BLOB",
+ 62        }
+ 63
+ 64        TRANSFORMS = {
+ 65            **generator.Generator.TRANSFORMS,  # type: ignore
+ 66            **transforms.UNALIAS_GROUP,  # type: ignore
+ 67            exp.ILike: no_ilike_sql,
+ 68            exp.Limit: _limit_sql,
+ 69            exp.Trim: trim_sql,
+ 70            exp.Matches: rename_func("DECODE"),
+ 71            exp.StrToTime: lambda self, e: f"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})",
+ 72            exp.TimeToStr: lambda self, e: f"TO_CHAR({self.sql(e, 'this')}, {self.format_time(e)})",
+ 73            exp.UnixToTime: lambda self, e: f"TO_DATE('1970-01-01','YYYY-MM-DD') + ({self.sql(e, 'this')} / 86400)",
+ 74            exp.Substring: rename_func("SUBSTR"),
+ 75        }
+ 76
+ 77        def query_modifiers(self, expression, *sqls):
+ 78            return csv(
+ 79                *sqls,
+ 80                *[self.sql(sql) for sql in expression.args.get("joins") or []],
+ 81                self.sql(expression, "match"),
+ 82                *[self.sql(sql) for sql in expression.args.get("laterals") or []],
+ 83                self.sql(expression, "where"),
+ 84                self.sql(expression, "group"),
+ 85                self.sql(expression, "having"),
+ 86                self.sql(expression, "qualify"),
+ 87                self.seg("WINDOW ") + self.expressions(expression, "windows", flat=True)
+ 88                if expression.args.get("windows")
+ 89                else "",
+ 90                self.sql(expression, "distribute"),
+ 91                self.sql(expression, "sort"),
+ 92                self.sql(expression, "cluster"),
+ 93                self.sql(expression, "order"),
+ 94                self.sql(expression, "offset"),  # offset before limit in oracle
+ 95                self.sql(expression, "limit"),
+ 96                self.sql(expression, "lock"),
+ 97                sep="",
+ 98            )
+ 99
+100        def offset_sql(self, expression):
+101            return f"{super().offset_sql(expression)} ROWS"
+102
+103        def table_sql(self, expression):
+104            return super().table_sql(expression, sep=" ")
+105
+106    class Tokenizer(tokens.Tokenizer):
+107        KEYWORDS = {
+108            **tokens.Tokenizer.KEYWORDS,
+109            "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE,
+110            "MINUS": TokenType.EXCEPT,
+111            "START": TokenType.BEGIN,
+112            "TOP": TokenType.TOP,
+113            "VARCHAR2": TokenType.VARCHAR,
+114            "NVARCHAR2": TokenType.NVARCHAR,
+115        }
+
+ + +
+
+ +
+ + class + Oracle(sqlglot.dialects.dialect.Dialect): + + + +
+ +
 14class Oracle(Dialect):
+ 15    # https://docs.oracle.com/database/121/SQLRF/sql_elements004.htm#SQLRF00212
+ 16    # https://docs.python.org/3/library/datetime.html#strftime-and-strptime-format-codes
+ 17    time_mapping = {
+ 18        "AM": "%p",  # Meridian indicator with or without periods
+ 19        "A.M.": "%p",  # Meridian indicator with or without periods
+ 20        "PM": "%p",  # Meridian indicator with or without periods
+ 21        "P.M.": "%p",  # Meridian indicator with or without periods
+ 22        "D": "%u",  # Day of week (1-7)
+ 23        "DAY": "%A",  # name of day
+ 24        "DD": "%d",  # day of month (1-31)
+ 25        "DDD": "%j",  # day of year (1-366)
+ 26        "DY": "%a",  # abbreviated name of day
+ 27        "HH": "%I",  # Hour of day (1-12)
+ 28        "HH12": "%I",  # alias for HH
+ 29        "HH24": "%H",  # Hour of day (0-23)
+ 30        "IW": "%V",  # Calendar week of year (1-52 or 1-53), as defined by the ISO 8601 standard
+ 31        "MI": "%M",  # Minute (0-59)
+ 32        "MM": "%m",  # Month (01-12; January = 01)
+ 33        "MON": "%b",  # Abbreviated name of month
+ 34        "MONTH": "%B",  # Name of month
+ 35        "SS": "%S",  # Second (0-59)
+ 36        "WW": "%W",  # Week of year (1-53)
+ 37        "YY": "%y",  # 15
+ 38        "YYYY": "%Y",  # 2015
+ 39    }
+ 40
+ 41    class Parser(parser.Parser):
+ 42        FUNCTIONS = {
+ 43            **parser.Parser.FUNCTIONS,  # type: ignore
+ 44            "DECODE": exp.Matches.from_arg_list,
+ 45        }
+ 46
+ 47    class Generator(generator.Generator):
+ 48        LOCKING_READS_SUPPORTED = True
+ 49
+ 50        TYPE_MAPPING = {
+ 51            **generator.Generator.TYPE_MAPPING,  # type: ignore
+ 52            exp.DataType.Type.TINYINT: "NUMBER",
+ 53            exp.DataType.Type.SMALLINT: "NUMBER",
+ 54            exp.DataType.Type.INT: "NUMBER",
+ 55            exp.DataType.Type.BIGINT: "NUMBER",
+ 56            exp.DataType.Type.DECIMAL: "NUMBER",
+ 57            exp.DataType.Type.DOUBLE: "DOUBLE PRECISION",
+ 58            exp.DataType.Type.VARCHAR: "VARCHAR2",
+ 59            exp.DataType.Type.NVARCHAR: "NVARCHAR2",
+ 60            exp.DataType.Type.TEXT: "CLOB",
+ 61            exp.DataType.Type.BINARY: "BLOB",
+ 62            exp.DataType.Type.VARBINARY: "BLOB",
+ 63        }
+ 64
+ 65        TRANSFORMS = {
+ 66            **generator.Generator.TRANSFORMS,  # type: ignore
+ 67            **transforms.UNALIAS_GROUP,  # type: ignore
+ 68            exp.ILike: no_ilike_sql,
+ 69            exp.Limit: _limit_sql,
+ 70            exp.Trim: trim_sql,
+ 71            exp.Matches: rename_func("DECODE"),
+ 72            exp.StrToTime: lambda self, e: f"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})",
+ 73            exp.TimeToStr: lambda self, e: f"TO_CHAR({self.sql(e, 'this')}, {self.format_time(e)})",
+ 74            exp.UnixToTime: lambda self, e: f"TO_DATE('1970-01-01','YYYY-MM-DD') + ({self.sql(e, 'this')} / 86400)",
+ 75            exp.Substring: rename_func("SUBSTR"),
+ 76        }
+ 77
+ 78        def query_modifiers(self, expression, *sqls):
+ 79            return csv(
+ 80                *sqls,
+ 81                *[self.sql(sql) for sql in expression.args.get("joins") or []],
+ 82                self.sql(expression, "match"),
+ 83                *[self.sql(sql) for sql in expression.args.get("laterals") or []],
+ 84                self.sql(expression, "where"),
+ 85                self.sql(expression, "group"),
+ 86                self.sql(expression, "having"),
+ 87                self.sql(expression, "qualify"),
+ 88                self.seg("WINDOW ") + self.expressions(expression, "windows", flat=True)
+ 89                if expression.args.get("windows")
+ 90                else "",
+ 91                self.sql(expression, "distribute"),
+ 92                self.sql(expression, "sort"),
+ 93                self.sql(expression, "cluster"),
+ 94                self.sql(expression, "order"),
+ 95                self.sql(expression, "offset"),  # offset before limit in oracle
+ 96                self.sql(expression, "limit"),
+ 97                self.sql(expression, "lock"),
+ 98                sep="",
+ 99            )
+100
+101        def offset_sql(self, expression):
+102            return f"{super().offset_sql(expression)} ROWS"
+103
+104        def table_sql(self, expression):
+105            return super().table_sql(expression, sep=" ")
+106
+107    class Tokenizer(tokens.Tokenizer):
+108        KEYWORDS = {
+109            **tokens.Tokenizer.KEYWORDS,
+110            "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE,
+111            "MINUS": TokenType.EXCEPT,
+112            "START": TokenType.BEGIN,
+113            "TOP": TokenType.TOP,
+114            "VARCHAR2": TokenType.VARCHAR,
+115            "NVARCHAR2": TokenType.NVARCHAR,
+116        }
+
+ + + + +
+
+ + Oracle() + + +
+ + + + +
+ +
+
+ +
+ + class + Oracle.Parser(sqlglot.parser.Parser): + + + +
+ +
41    class Parser(parser.Parser):
+42        FUNCTIONS = {
+43            **parser.Parser.FUNCTIONS,  # type: ignore
+44            "DECODE": exp.Matches.from_arg_list,
+45        }
+
+ + +

Parser consumes a list of tokens produced by the sqlglot.tokens.Tokenizer and produces +a parsed syntax tree.

+ +
Arguments:
+ +
    +
  • error_level: the desired error level. +Default: ErrorLevel.RAISE
  • +
  • error_message_context: determines the amount of context to capture from a +query string when displaying the error message (in number of characters). +Default: 50.
  • +
  • index_offset: Index offset for arrays eg ARRAY[0] vs ARRAY[1] as the head of a list. +Default: 0
  • +
  • alias_post_tablesample: If the table alias comes after tablesample. +Default: False
  • +
  • max_errors: Maximum number of error messages to include in a raised ParseError. +This is only relevant if error_level is ErrorLevel.RAISE. +Default: 3
  • +
  • null_ordering: Indicates the default null ordering method to use if not explicitly set. +Options are "nulls_are_small", "nulls_are_large", "nulls_are_last". +Default: "nulls_are_small"
  • +
+
+ + + +
+
+ +
+ + class + Oracle.Generator(sqlglot.generator.Generator): + + + +
+ +
 47    class Generator(generator.Generator):
+ 48        LOCKING_READS_SUPPORTED = True
+ 49
+ 50        TYPE_MAPPING = {
+ 51            **generator.Generator.TYPE_MAPPING,  # type: ignore
+ 52            exp.DataType.Type.TINYINT: "NUMBER",
+ 53            exp.DataType.Type.SMALLINT: "NUMBER",
+ 54            exp.DataType.Type.INT: "NUMBER",
+ 55            exp.DataType.Type.BIGINT: "NUMBER",
+ 56            exp.DataType.Type.DECIMAL: "NUMBER",
+ 57            exp.DataType.Type.DOUBLE: "DOUBLE PRECISION",
+ 58            exp.DataType.Type.VARCHAR: "VARCHAR2",
+ 59            exp.DataType.Type.NVARCHAR: "NVARCHAR2",
+ 60            exp.DataType.Type.TEXT: "CLOB",
+ 61            exp.DataType.Type.BINARY: "BLOB",
+ 62            exp.DataType.Type.VARBINARY: "BLOB",
+ 63        }
+ 64
+ 65        TRANSFORMS = {
+ 66            **generator.Generator.TRANSFORMS,  # type: ignore
+ 67            **transforms.UNALIAS_GROUP,  # type: ignore
+ 68            exp.ILike: no_ilike_sql,
+ 69            exp.Limit: _limit_sql,
+ 70            exp.Trim: trim_sql,
+ 71            exp.Matches: rename_func("DECODE"),
+ 72            exp.StrToTime: lambda self, e: f"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})",
+ 73            exp.TimeToStr: lambda self, e: f"TO_CHAR({self.sql(e, 'this')}, {self.format_time(e)})",
+ 74            exp.UnixToTime: lambda self, e: f"TO_DATE('1970-01-01','YYYY-MM-DD') + ({self.sql(e, 'this')} / 86400)",
+ 75            exp.Substring: rename_func("SUBSTR"),
+ 76        }
+ 77
+ 78        def query_modifiers(self, expression, *sqls):
+ 79            return csv(
+ 80                *sqls,
+ 81                *[self.sql(sql) for sql in expression.args.get("joins") or []],
+ 82                self.sql(expression, "match"),
+ 83                *[self.sql(sql) for sql in expression.args.get("laterals") or []],
+ 84                self.sql(expression, "where"),
+ 85                self.sql(expression, "group"),
+ 86                self.sql(expression, "having"),
+ 87                self.sql(expression, "qualify"),
+ 88                self.seg("WINDOW ") + self.expressions(expression, "windows", flat=True)
+ 89                if expression.args.get("windows")
+ 90                else "",
+ 91                self.sql(expression, "distribute"),
+ 92                self.sql(expression, "sort"),
+ 93                self.sql(expression, "cluster"),
+ 94                self.sql(expression, "order"),
+ 95                self.sql(expression, "offset"),  # offset before limit in oracle
+ 96                self.sql(expression, "limit"),
+ 97                self.sql(expression, "lock"),
+ 98                sep="",
+ 99            )
+100
+101        def offset_sql(self, expression):
+102            return f"{super().offset_sql(expression)} ROWS"
+103
+104        def table_sql(self, expression):
+105            return super().table_sql(expression, sep=" ")
+
+ + +

Generator interprets the given syntax tree and produces a SQL string as an output.

+ +
Arguments:
+ +
    +
  • time_mapping (dict): the dictionary of custom time mappings in which the key +represents a python time format and the output the target time format
  • +
  • time_trie (trie): a trie of the time_mapping keys
  • +
  • pretty (bool): if set to True the returned string will be formatted. Default: False.
  • +
  • quote_start (str): specifies which starting character to use to delimit quotes. Default: '.
  • +
  • quote_end (str): specifies which ending character to use to delimit quotes. Default: '.
  • +
  • identifier_start (str): specifies which starting character to use to delimit identifiers. Default: ".
  • +
  • identifier_end (str): specifies which ending character to use to delimit identifiers. Default: ".
  • +
  • identify (bool): if set to True all identifiers will be delimited by the corresponding +character.
  • +
  • normalize (bool): if set to True all identifiers will lower cased
  • +
  • string_escape (str): specifies a string escape character. Default: '.
  • +
  • identifier_escape (str): specifies an identifier escape character. Default: ".
  • +
  • pad (int): determines padding in a formatted string. Default: 2.
  • +
  • indent (int): determines the size of indentation in a formatted string. Default: 4.
  • +
  • unnest_column_only (bool): if true unnest table aliases are considered only as column aliases
  • +
  • normalize_functions (str): normalize function names, "upper", "lower", or None +Default: "upper"
  • +
  • alias_post_tablesample (bool): if the table alias comes after tablesample +Default: False
  • +
  • unsupported_level (ErrorLevel): determines the generator's behavior when it encounters +unsupported expressions. Default ErrorLevel.WARN.
  • +
  • null_ordering (str): Indicates the default null ordering method to use if not explicitly set. +Options are "nulls_are_small", "nulls_are_large", "nulls_are_last". +Default: "nulls_are_small"
  • +
  • max_unsupported (int): Maximum number of unsupported messages to include in a raised UnsupportedError. +This is only relevant if unsupported_level is ErrorLevel.RAISE. +Default: 3
  • +
  • leading_comma (bool): if the the comma is leading or trailing in select statements +Default: False
  • +
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode. +The default is on the smaller end because the length only represents a segment and not the true +line length. +Default: 80
  • +
  • comments: Whether or not to preserve comments in the output SQL code. +Default: True
  • +
+
+ + +
+ +
+ + def + query_modifiers(self, expression, *sqls): + + + +
+ +
78        def query_modifiers(self, expression, *sqls):
+79            return csv(
+80                *sqls,
+81                *[self.sql(sql) for sql in expression.args.get("joins") or []],
+82                self.sql(expression, "match"),
+83                *[self.sql(sql) for sql in expression.args.get("laterals") or []],
+84                self.sql(expression, "where"),
+85                self.sql(expression, "group"),
+86                self.sql(expression, "having"),
+87                self.sql(expression, "qualify"),
+88                self.seg("WINDOW ") + self.expressions(expression, "windows", flat=True)
+89                if expression.args.get("windows")
+90                else "",
+91                self.sql(expression, "distribute"),
+92                self.sql(expression, "sort"),
+93                self.sql(expression, "cluster"),
+94                self.sql(expression, "order"),
+95                self.sql(expression, "offset"),  # offset before limit in oracle
+96                self.sql(expression, "limit"),
+97                self.sql(expression, "lock"),
+98                sep="",
+99            )
+
+ + + + +
+
+ +
+ + def + offset_sql(self, expression): + + + +
+ +
101        def offset_sql(self, expression):
+102            return f"{super().offset_sql(expression)} ROWS"
+
+ + + + +
+
+ +
+ + def + table_sql(self, expression): + + + +
+ +
104        def table_sql(self, expression):
+105            return super().table_sql(expression, sep=" ")
+
+ + + + +
+
+
Inherited Members
+
+
sqlglot.generator.Generator
+
Generator
+
generate
+
unsupported
+
sep
+
seg
+
pad_comment
+
maybe_comment
+
wrap
+
no_identify
+
normalize_func
+
indent
+
sql
+
uncache_sql
+
cache_sql
+
characterset_sql
+
column_sql
+
columndef_sql
+
columnconstraint_sql
+
autoincrementcolumnconstraint_sql
+
checkcolumnconstraint_sql
+
commentcolumnconstraint_sql
+
collatecolumnconstraint_sql
+
encodecolumnconstraint_sql
+
defaultcolumnconstraint_sql
+
generatedasidentitycolumnconstraint_sql
+
notnullcolumnconstraint_sql
+
primarykeycolumnconstraint_sql
+
uniquecolumnconstraint_sql
+
create_sql
+
describe_sql
+
prepend_ctes
+
with_sql
+
cte_sql
+
tablealias_sql
+
bitstring_sql
+
hexstring_sql
+
datatype_sql
+
directory_sql
+
delete_sql
+
drop_sql
+
except_sql
+
except_op
+
fetch_sql
+
filter_sql
+
hint_sql
+
index_sql
+
identifier_sql
+
national_sql
+
partition_sql
+
properties_sql
+
root_properties
+
properties
+
with_properties
+
locate_properties
+
property_sql
+
likeproperty_sql
+
fallbackproperty_sql
+
journalproperty_sql
+
freespaceproperty_sql
+
afterjournalproperty_sql
+
checksumproperty_sql
+
mergeblockratioproperty_sql
+
datablocksizeproperty_sql
+
blockcompressionproperty_sql
+
isolatedloadingproperty_sql
+
insert_sql
+
intersect_sql
+
intersect_op
+
introducer_sql
+
pseudotype_sql
+
rowformatdelimitedproperty_sql
+
tablesample_sql
+
pivot_sql
+
tuple_sql
+
update_sql
+
values_sql
+
var_sql
+
into_sql
+
from_sql
+
group_sql
+
having_sql
+
join_sql
+
lambda_sql
+
lateral_sql
+
limit_sql
+
lock_sql
+
literal_sql
+
loaddata_sql
+
null_sql
+
boolean_sql
+
order_sql
+
cluster_sql
+
distribute_sql
+
sort_sql
+
ordered_sql
+
matchrecognize_sql
+
select_sql
+
schema_sql
+
star_sql
+
structkwarg_sql
+
parameter_sql
+
sessionparameter_sql
+
placeholder_sql
+
subquery_sql
+
qualify_sql
+
union_sql
+
union_op
+
unnest_sql
+
where_sql
+
window_sql
+
partition_by_sql
+
window_spec_sql
+
withingroup_sql
+
between_sql
+
bracket_sql
+
all_sql
+
any_sql
+
exists_sql
+
case_sql
+
constraint_sql
+
extract_sql
+
trim_sql
+
concat_sql
+
check_sql
+
foreignkey_sql
+
primarykey_sql
+
unique_sql
+
if_sql
+
in_sql
+
in_unnest_op
+
interval_sql
+
return_sql
+
reference_sql
+
anonymous_sql
+
paren_sql
+
neg_sql
+
not_sql
+
alias_sql
+
aliases_sql
+
attimezone_sql
+
add_sql
+
and_sql
+
connector_sql
+
bitwiseand_sql
+
bitwiseleftshift_sql
+
bitwisenot_sql
+
bitwiseor_sql
+
bitwiserightshift_sql
+
bitwisexor_sql
+
cast_sql
+
currentdate_sql
+
collate_sql
+
command_sql
+
transaction_sql
+
commit_sql
+
rollback_sql
+
altercolumn_sql
+
renametable_sql
+
altertable_sql
+
droppartition_sql
+
addconstraint_sql
+
distinct_sql
+
ignorenulls_sql
+
respectnulls_sql
+
intdiv_sql
+
dpipe_sql
+
div_sql
+
distance_sql
+
dot_sql
+
eq_sql
+
escape_sql
+
glob_sql
+
gt_sql
+
gte_sql
+
ilike_sql
+
is_sql
+
like_sql
+
similarto_sql
+
lt_sql
+
lte_sql
+
mod_sql
+
mul_sql
+
neq_sql
+
nullsafeeq_sql
+
nullsafeneq_sql
+
or_sql
+
slice_sql
+
sub_sql
+
trycast_sql
+
use_sql
+
binary
+
function_fallback_sql
+
format_args
+
text_width
+
format_time
+
expressions
+
op_expressions
+
naked_property
+
set_operation
+
tag_sql
+
token_sql
+
userdefinedfunction_sql
+
userdefinedfunctionkwarg_sql
+
joinhint_sql
+
kwarg_sql
+
when_sql
+
merge_sql
+ +
+
+
+
+
+ +
+ + class + Oracle.Tokenizer(sqlglot.tokens.Tokenizer): + + + +
+ +
107    class Tokenizer(tokens.Tokenizer):
+108        KEYWORDS = {
+109            **tokens.Tokenizer.KEYWORDS,
+110            "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE,
+111            "MINUS": TokenType.EXCEPT,
+112            "START": TokenType.BEGIN,
+113            "TOP": TokenType.TOP,
+114            "VARCHAR2": TokenType.VARCHAR,
+115            "NVARCHAR2": TokenType.NVARCHAR,
+116        }
+
+ + + + +
+
Inherited Members
+
+ +
+
+
+
+ + \ No newline at end of file diff --git a/docs/sqlglot/dialects/postgres.html b/docs/sqlglot/dialects/postgres.html new file mode 100644 index 0000000..4dcc846 --- /dev/null +++ b/docs/sqlglot/dialects/postgres.html @@ -0,0 +1,1245 @@ + + + + + + + sqlglot.dialects.postgres API documentation + + + + + + + + + +
+
+ Edit on GitHub +

+sqlglot.dialects.postgres

+ + + + + + +
  1from __future__ import annotations
+  2
+  3from sqlglot import exp, generator, parser, tokens
+  4from sqlglot.dialects.dialect import (
+  5    Dialect,
+  6    arrow_json_extract_scalar_sql,
+  7    arrow_json_extract_sql,
+  8    format_time_lambda,
+  9    no_paren_current_date_sql,
+ 10    no_tablesample_sql,
+ 11    no_trycast_sql,
+ 12    rename_func,
+ 13    str_position_sql,
+ 14    trim_sql,
+ 15)
+ 16from sqlglot.helper import seq_get
+ 17from sqlglot.tokens import TokenType
+ 18from sqlglot.transforms import delegate, preprocess
+ 19
+ 20DATE_DIFF_FACTOR = {
+ 21    "MICROSECOND": " * 1000000",
+ 22    "MILLISECOND": " * 1000",
+ 23    "SECOND": "",
+ 24    "MINUTE": " / 60",
+ 25    "HOUR": " / 3600",
+ 26    "DAY": " / 86400",
+ 27}
+ 28
+ 29
+ 30def _date_add_sql(kind):
+ 31    def func(self, expression):
+ 32        from sqlglot.optimizer.simplify import simplify
+ 33
+ 34        this = self.sql(expression, "this")
+ 35        unit = self.sql(expression, "unit")
+ 36        expression = simplify(expression.args["expression"])
+ 37
+ 38        if not isinstance(expression, exp.Literal):
+ 39            self.unsupported("Cannot add non literal")
+ 40
+ 41        expression = expression.copy()
+ 42        expression.args["is_string"] = True
+ 43        return f"{this} {kind} {self.sql(exp.Interval(this=expression, unit=unit))}"
+ 44
+ 45    return func
+ 46
+ 47
+ 48def _date_diff_sql(self, expression):
+ 49    unit = expression.text("unit").upper()
+ 50    factor = DATE_DIFF_FACTOR.get(unit)
+ 51
+ 52    end = f"CAST({expression.this} AS TIMESTAMP)"
+ 53    start = f"CAST({expression.expression} AS TIMESTAMP)"
+ 54
+ 55    if factor is not None:
+ 56        return f"CAST(EXTRACT(epoch FROM {end} - {start}){factor} AS BIGINT)"
+ 57
+ 58    age = f"AGE({end}, {start})"
+ 59
+ 60    if unit == "WEEK":
+ 61        extract = f"EXTRACT(year FROM {age}) * 48 + EXTRACT(month FROM {age}) * 4 + EXTRACT(day FROM {age}) / 7"
+ 62    elif unit == "MONTH":
+ 63        extract = f"EXTRACT(year FROM {age}) * 12 + EXTRACT(month FROM {age})"
+ 64    elif unit == "QUARTER":
+ 65        extract = f"EXTRACT(year FROM {age}) * 4 + EXTRACT(month FROM {age}) / 3"
+ 66    elif unit == "YEAR":
+ 67        extract = f"EXTRACT(year FROM {age})"
+ 68    else:
+ 69        self.unsupported(f"Unsupported DATEDIFF unit {unit}")
+ 70
+ 71    return f"CAST({extract} AS BIGINT)"
+ 72
+ 73
+ 74def _substring_sql(self, expression):
+ 75    this = self.sql(expression, "this")
+ 76    start = self.sql(expression, "start")
+ 77    length = self.sql(expression, "length")
+ 78
+ 79    from_part = f" FROM {start}" if start else ""
+ 80    for_part = f" FOR {length}" if length else ""
+ 81
+ 82    return f"SUBSTRING({this}{from_part}{for_part})"
+ 83
+ 84
+ 85def _string_agg_sql(self, expression):
+ 86    expression = expression.copy()
+ 87    separator = expression.args.get("separator") or exp.Literal.string(",")
+ 88
+ 89    order = ""
+ 90    this = expression.this
+ 91    if isinstance(this, exp.Order):
+ 92        if this.this:
+ 93            this = this.this
+ 94            this.pop()
+ 95        order = self.sql(expression.this)  # Order has a leading space
+ 96
+ 97    return f"STRING_AGG({self.format_args(this, separator)}{order})"
+ 98
+ 99
+100def _datatype_sql(self, expression):
+101    if expression.this == exp.DataType.Type.ARRAY:
+102        return f"{self.expressions(expression, flat=True)}[]"
+103    return self.datatype_sql(expression)
+104
+105
+106def _auto_increment_to_serial(expression):
+107    auto = expression.find(exp.AutoIncrementColumnConstraint)
+108
+109    if auto:
+110        expression = expression.copy()
+111        expression.args["constraints"].remove(auto.parent)
+112        kind = expression.args["kind"]
+113
+114        if kind.this == exp.DataType.Type.INT:
+115            kind.replace(exp.DataType(this=exp.DataType.Type.SERIAL))
+116        elif kind.this == exp.DataType.Type.SMALLINT:
+117            kind.replace(exp.DataType(this=exp.DataType.Type.SMALLSERIAL))
+118        elif kind.this == exp.DataType.Type.BIGINT:
+119            kind.replace(exp.DataType(this=exp.DataType.Type.BIGSERIAL))
+120
+121    return expression
+122
+123
+124def _serial_to_generated(expression):
+125    kind = expression.args["kind"]
+126
+127    if kind.this == exp.DataType.Type.SERIAL:
+128        data_type = exp.DataType(this=exp.DataType.Type.INT)
+129    elif kind.this == exp.DataType.Type.SMALLSERIAL:
+130        data_type = exp.DataType(this=exp.DataType.Type.SMALLINT)
+131    elif kind.this == exp.DataType.Type.BIGSERIAL:
+132        data_type = exp.DataType(this=exp.DataType.Type.BIGINT)
+133    else:
+134        data_type = None
+135
+136    if data_type:
+137        expression = expression.copy()
+138        expression.args["kind"].replace(data_type)
+139        constraints = expression.args["constraints"]
+140        generated = exp.ColumnConstraint(kind=exp.GeneratedAsIdentityColumnConstraint(this=False))
+141        notnull = exp.ColumnConstraint(kind=exp.NotNullColumnConstraint())
+142        if notnull not in constraints:
+143            constraints.insert(0, notnull)
+144        if generated not in constraints:
+145            constraints.insert(0, generated)
+146
+147    return expression
+148
+149
+150def _generate_series(args):
+151    # The goal is to convert step values like '1 day' or INTERVAL '1 day' into INTERVAL '1' day
+152    step = seq_get(args, 2)
+153
+154    if step is None:
+155        # Postgres allows calls with just two arguments -- the "step" argument defaults to 1
+156        return exp.GenerateSeries.from_arg_list(args)
+157
+158    if step.is_string:
+159        args[2] = exp.to_interval(step.this)
+160    elif isinstance(step, exp.Interval) and not step.args.get("unit"):
+161        args[2] = exp.to_interval(step.this.this)
+162
+163    return exp.GenerateSeries.from_arg_list(args)
+164
+165
+166def _to_timestamp(args):
+167    # TO_TIMESTAMP accepts either a single double argument or (text, text)
+168    if len(args) == 1:
+169        # https://www.postgresql.org/docs/current/functions-datetime.html#FUNCTIONS-DATETIME-TABLE
+170        return exp.UnixToTime.from_arg_list(args)
+171    # https://www.postgresql.org/docs/current/functions-formatting.html
+172    return format_time_lambda(exp.StrToTime, "postgres")(args)
+173
+174
+175class Postgres(Dialect):
+176    null_ordering = "nulls_are_large"
+177    time_format = "'YYYY-MM-DD HH24:MI:SS'"
+178    time_mapping = {
+179        "AM": "%p",
+180        "PM": "%p",
+181        "D": "%u",  # 1-based day of week
+182        "DD": "%d",  # day of month
+183        "DDD": "%j",  # zero padded day of year
+184        "FMDD": "%-d",  # - is no leading zero for Python; same for FM in postgres
+185        "FMDDD": "%-j",  # day of year
+186        "FMHH12": "%-I",  # 9
+187        "FMHH24": "%-H",  # 9
+188        "FMMI": "%-M",  # Minute
+189        "FMMM": "%-m",  # 1
+190        "FMSS": "%-S",  # Second
+191        "HH12": "%I",  # 09
+192        "HH24": "%H",  # 09
+193        "MI": "%M",  # zero padded minute
+194        "MM": "%m",  # 01
+195        "OF": "%z",  # utc offset
+196        "SS": "%S",  # zero padded second
+197        "TMDay": "%A",  # TM is locale dependent
+198        "TMDy": "%a",
+199        "TMMon": "%b",  # Sep
+200        "TMMonth": "%B",  # September
+201        "TZ": "%Z",  # uppercase timezone name
+202        "US": "%f",  # zero padded microsecond
+203        "WW": "%U",  # 1-based week of year
+204        "YY": "%y",  # 15
+205        "YYYY": "%Y",  # 2015
+206    }
+207
+208    class Tokenizer(tokens.Tokenizer):
+209        BIT_STRINGS = [("b'", "'"), ("B'", "'")]
+210        HEX_STRINGS = [("x'", "'"), ("X'", "'")]
+211        BYTE_STRINGS = [("e'", "'"), ("E'", "'")]
+212
+213        KEYWORDS = {
+214            **tokens.Tokenizer.KEYWORDS,
+215            "~~": TokenType.LIKE,
+216            "~~*": TokenType.ILIKE,
+217            "~*": TokenType.IRLIKE,
+218            "~": TokenType.RLIKE,
+219            "BEGIN": TokenType.COMMAND,
+220            "BEGIN TRANSACTION": TokenType.BEGIN,
+221            "BIGSERIAL": TokenType.BIGSERIAL,
+222            "CHARACTER VARYING": TokenType.VARCHAR,
+223            "COMMENT ON": TokenType.COMMAND,
+224            "DECLARE": TokenType.COMMAND,
+225            "DO": TokenType.COMMAND,
+226            "GRANT": TokenType.COMMAND,
+227            "HSTORE": TokenType.HSTORE,
+228            "JSONB": TokenType.JSONB,
+229            "REFRESH": TokenType.COMMAND,
+230            "REINDEX": TokenType.COMMAND,
+231            "RESET": TokenType.COMMAND,
+232            "REVOKE": TokenType.COMMAND,
+233            "SERIAL": TokenType.SERIAL,
+234            "SMALLSERIAL": TokenType.SMALLSERIAL,
+235            "TEMP": TokenType.TEMPORARY,
+236            "UUID": TokenType.UUID,
+237            "CSTRING": TokenType.PSEUDO_TYPE,
+238        }
+239        QUOTES = ["'", "$$"]
+240        SINGLE_TOKENS = {
+241            **tokens.Tokenizer.SINGLE_TOKENS,
+242            "$": TokenType.PARAMETER,
+243        }
+244
+245    class Parser(parser.Parser):
+246        STRICT_CAST = False
+247
+248        FUNCTIONS = {
+249            **parser.Parser.FUNCTIONS,  # type: ignore
+250            "NOW": exp.CurrentTimestamp.from_arg_list,
+251            "TO_TIMESTAMP": _to_timestamp,
+252            "TO_CHAR": format_time_lambda(exp.TimeToStr, "postgres"),
+253            "GENERATE_SERIES": _generate_series,
+254        }
+255
+256        BITWISE = {
+257            **parser.Parser.BITWISE,  # type: ignore
+258            TokenType.HASH: exp.BitwiseXor,
+259        }
+260
+261        FACTOR = {
+262            **parser.Parser.FACTOR,  # type: ignore
+263            TokenType.CARET: exp.Pow,
+264        }
+265
+266    class Generator(generator.Generator):
+267        LOCKING_READS_SUPPORTED = True
+268
+269        TYPE_MAPPING = {
+270            **generator.Generator.TYPE_MAPPING,  # type: ignore
+271            exp.DataType.Type.TINYINT: "SMALLINT",
+272            exp.DataType.Type.FLOAT: "REAL",
+273            exp.DataType.Type.DOUBLE: "DOUBLE PRECISION",
+274            exp.DataType.Type.BINARY: "BYTEA",
+275            exp.DataType.Type.VARBINARY: "BYTEA",
+276            exp.DataType.Type.DATETIME: "TIMESTAMP",
+277        }
+278
+279        TRANSFORMS = {
+280            **generator.Generator.TRANSFORMS,  # type: ignore
+281            exp.BitwiseXor: lambda self, e: self.binary(e, "#"),
+282            exp.ColumnDef: preprocess(
+283                [
+284                    _auto_increment_to_serial,
+285                    _serial_to_generated,
+286                ],
+287                delegate("columndef_sql"),
+288            ),
+289            exp.JSONExtract: arrow_json_extract_sql,
+290            exp.JSONExtractScalar: arrow_json_extract_scalar_sql,
+291            exp.JSONBExtract: lambda self, e: self.binary(e, "#>"),
+292            exp.JSONBExtractScalar: lambda self, e: self.binary(e, "#>>"),
+293            exp.JSONBContains: lambda self, e: self.binary(e, "?"),
+294            exp.Pow: lambda self, e: self.binary(e, "^"),
+295            exp.CurrentDate: no_paren_current_date_sql,
+296            exp.CurrentTimestamp: lambda *_: "CURRENT_TIMESTAMP",
+297            exp.DateAdd: _date_add_sql("+"),
+298            exp.DateSub: _date_add_sql("-"),
+299            exp.DateDiff: _date_diff_sql,
+300            exp.LogicalOr: rename_func("BOOL_OR"),
+301            exp.RegexpLike: lambda self, e: self.binary(e, "~"),
+302            exp.RegexpILike: lambda self, e: self.binary(e, "~*"),
+303            exp.StrPosition: str_position_sql,
+304            exp.StrToTime: lambda self, e: f"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})",
+305            exp.Substring: _substring_sql,
+306            exp.TimeStrToTime: lambda self, e: f"CAST({self.sql(e, 'this')} AS TIMESTAMP)",
+307            exp.TimeToStr: lambda self, e: f"TO_CHAR({self.sql(e, 'this')}, {self.format_time(e)})",
+308            exp.TableSample: no_tablesample_sql,
+309            exp.Trim: trim_sql,
+310            exp.TryCast: no_trycast_sql,
+311            exp.UnixToTime: lambda self, e: f"TO_TIMESTAMP({self.sql(e, 'this')})",
+312            exp.DataType: _datatype_sql,
+313            exp.GroupConcat: _string_agg_sql,
+314            exp.Array: lambda self, e: f"{self.normalize_func('ARRAY')}({self.sql(e.expressions[0])})"
+315            if isinstance(seq_get(e.expressions, 0), exp.Select)
+316            else f"{self.normalize_func('ARRAY')}[{self.expressions(e, flat=True)}]",
+317        }
+
+ + +
+
+ +
+ + class + Postgres(sqlglot.dialects.dialect.Dialect): + + + +
+ +
176class Postgres(Dialect):
+177    null_ordering = "nulls_are_large"
+178    time_format = "'YYYY-MM-DD HH24:MI:SS'"
+179    time_mapping = {
+180        "AM": "%p",
+181        "PM": "%p",
+182        "D": "%u",  # 1-based day of week
+183        "DD": "%d",  # day of month
+184        "DDD": "%j",  # zero padded day of year
+185        "FMDD": "%-d",  # - is no leading zero for Python; same for FM in postgres
+186        "FMDDD": "%-j",  # day of year
+187        "FMHH12": "%-I",  # 9
+188        "FMHH24": "%-H",  # 9
+189        "FMMI": "%-M",  # Minute
+190        "FMMM": "%-m",  # 1
+191        "FMSS": "%-S",  # Second
+192        "HH12": "%I",  # 09
+193        "HH24": "%H",  # 09
+194        "MI": "%M",  # zero padded minute
+195        "MM": "%m",  # 01
+196        "OF": "%z",  # utc offset
+197        "SS": "%S",  # zero padded second
+198        "TMDay": "%A",  # TM is locale dependent
+199        "TMDy": "%a",
+200        "TMMon": "%b",  # Sep
+201        "TMMonth": "%B",  # September
+202        "TZ": "%Z",  # uppercase timezone name
+203        "US": "%f",  # zero padded microsecond
+204        "WW": "%U",  # 1-based week of year
+205        "YY": "%y",  # 15
+206        "YYYY": "%Y",  # 2015
+207    }
+208
+209    class Tokenizer(tokens.Tokenizer):
+210        BIT_STRINGS = [("b'", "'"), ("B'", "'")]
+211        HEX_STRINGS = [("x'", "'"), ("X'", "'")]
+212        BYTE_STRINGS = [("e'", "'"), ("E'", "'")]
+213
+214        KEYWORDS = {
+215            **tokens.Tokenizer.KEYWORDS,
+216            "~~": TokenType.LIKE,
+217            "~~*": TokenType.ILIKE,
+218            "~*": TokenType.IRLIKE,
+219            "~": TokenType.RLIKE,
+220            "BEGIN": TokenType.COMMAND,
+221            "BEGIN TRANSACTION": TokenType.BEGIN,
+222            "BIGSERIAL": TokenType.BIGSERIAL,
+223            "CHARACTER VARYING": TokenType.VARCHAR,
+224            "COMMENT ON": TokenType.COMMAND,
+225            "DECLARE": TokenType.COMMAND,
+226            "DO": TokenType.COMMAND,
+227            "GRANT": TokenType.COMMAND,
+228            "HSTORE": TokenType.HSTORE,
+229            "JSONB": TokenType.JSONB,
+230            "REFRESH": TokenType.COMMAND,
+231            "REINDEX": TokenType.COMMAND,
+232            "RESET": TokenType.COMMAND,
+233            "REVOKE": TokenType.COMMAND,
+234            "SERIAL": TokenType.SERIAL,
+235            "SMALLSERIAL": TokenType.SMALLSERIAL,
+236            "TEMP": TokenType.TEMPORARY,
+237            "UUID": TokenType.UUID,
+238            "CSTRING": TokenType.PSEUDO_TYPE,
+239        }
+240        QUOTES = ["'", "$$"]
+241        SINGLE_TOKENS = {
+242            **tokens.Tokenizer.SINGLE_TOKENS,
+243            "$": TokenType.PARAMETER,
+244        }
+245
+246    class Parser(parser.Parser):
+247        STRICT_CAST = False
+248
+249        FUNCTIONS = {
+250            **parser.Parser.FUNCTIONS,  # type: ignore
+251            "NOW": exp.CurrentTimestamp.from_arg_list,
+252            "TO_TIMESTAMP": _to_timestamp,
+253            "TO_CHAR": format_time_lambda(exp.TimeToStr, "postgres"),
+254            "GENERATE_SERIES": _generate_series,
+255        }
+256
+257        BITWISE = {
+258            **parser.Parser.BITWISE,  # type: ignore
+259            TokenType.HASH: exp.BitwiseXor,
+260        }
+261
+262        FACTOR = {
+263            **parser.Parser.FACTOR,  # type: ignore
+264            TokenType.CARET: exp.Pow,
+265        }
+266
+267    class Generator(generator.Generator):
+268        LOCKING_READS_SUPPORTED = True
+269
+270        TYPE_MAPPING = {
+271            **generator.Generator.TYPE_MAPPING,  # type: ignore
+272            exp.DataType.Type.TINYINT: "SMALLINT",
+273            exp.DataType.Type.FLOAT: "REAL",
+274            exp.DataType.Type.DOUBLE: "DOUBLE PRECISION",
+275            exp.DataType.Type.BINARY: "BYTEA",
+276            exp.DataType.Type.VARBINARY: "BYTEA",
+277            exp.DataType.Type.DATETIME: "TIMESTAMP",
+278        }
+279
+280        TRANSFORMS = {
+281            **generator.Generator.TRANSFORMS,  # type: ignore
+282            exp.BitwiseXor: lambda self, e: self.binary(e, "#"),
+283            exp.ColumnDef: preprocess(
+284                [
+285                    _auto_increment_to_serial,
+286                    _serial_to_generated,
+287                ],
+288                delegate("columndef_sql"),
+289            ),
+290            exp.JSONExtract: arrow_json_extract_sql,
+291            exp.JSONExtractScalar: arrow_json_extract_scalar_sql,
+292            exp.JSONBExtract: lambda self, e: self.binary(e, "#>"),
+293            exp.JSONBExtractScalar: lambda self, e: self.binary(e, "#>>"),
+294            exp.JSONBContains: lambda self, e: self.binary(e, "?"),
+295            exp.Pow: lambda self, e: self.binary(e, "^"),
+296            exp.CurrentDate: no_paren_current_date_sql,
+297            exp.CurrentTimestamp: lambda *_: "CURRENT_TIMESTAMP",
+298            exp.DateAdd: _date_add_sql("+"),
+299            exp.DateSub: _date_add_sql("-"),
+300            exp.DateDiff: _date_diff_sql,
+301            exp.LogicalOr: rename_func("BOOL_OR"),
+302            exp.RegexpLike: lambda self, e: self.binary(e, "~"),
+303            exp.RegexpILike: lambda self, e: self.binary(e, "~*"),
+304            exp.StrPosition: str_position_sql,
+305            exp.StrToTime: lambda self, e: f"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})",
+306            exp.Substring: _substring_sql,
+307            exp.TimeStrToTime: lambda self, e: f"CAST({self.sql(e, 'this')} AS TIMESTAMP)",
+308            exp.TimeToStr: lambda self, e: f"TO_CHAR({self.sql(e, 'this')}, {self.format_time(e)})",
+309            exp.TableSample: no_tablesample_sql,
+310            exp.Trim: trim_sql,
+311            exp.TryCast: no_trycast_sql,
+312            exp.UnixToTime: lambda self, e: f"TO_TIMESTAMP({self.sql(e, 'this')})",
+313            exp.DataType: _datatype_sql,
+314            exp.GroupConcat: _string_agg_sql,
+315            exp.Array: lambda self, e: f"{self.normalize_func('ARRAY')}({self.sql(e.expressions[0])})"
+316            if isinstance(seq_get(e.expressions, 0), exp.Select)
+317            else f"{self.normalize_func('ARRAY')}[{self.expressions(e, flat=True)}]",
+318        }
+
+ + + + +
+
+ + Postgres() + + +
+ + + + +
+ +
+
+ +
+ + class + Postgres.Tokenizer(sqlglot.tokens.Tokenizer): + + + +
+ +
209    class Tokenizer(tokens.Tokenizer):
+210        BIT_STRINGS = [("b'", "'"), ("B'", "'")]
+211        HEX_STRINGS = [("x'", "'"), ("X'", "'")]
+212        BYTE_STRINGS = [("e'", "'"), ("E'", "'")]
+213
+214        KEYWORDS = {
+215            **tokens.Tokenizer.KEYWORDS,
+216            "~~": TokenType.LIKE,
+217            "~~*": TokenType.ILIKE,
+218            "~*": TokenType.IRLIKE,
+219            "~": TokenType.RLIKE,
+220            "BEGIN": TokenType.COMMAND,
+221            "BEGIN TRANSACTION": TokenType.BEGIN,
+222            "BIGSERIAL": TokenType.BIGSERIAL,
+223            "CHARACTER VARYING": TokenType.VARCHAR,
+224            "COMMENT ON": TokenType.COMMAND,
+225            "DECLARE": TokenType.COMMAND,
+226            "DO": TokenType.COMMAND,
+227            "GRANT": TokenType.COMMAND,
+228            "HSTORE": TokenType.HSTORE,
+229            "JSONB": TokenType.JSONB,
+230            "REFRESH": TokenType.COMMAND,
+231            "REINDEX": TokenType.COMMAND,
+232            "RESET": TokenType.COMMAND,
+233            "REVOKE": TokenType.COMMAND,
+234            "SERIAL": TokenType.SERIAL,
+235            "SMALLSERIAL": TokenType.SMALLSERIAL,
+236            "TEMP": TokenType.TEMPORARY,
+237            "UUID": TokenType.UUID,
+238            "CSTRING": TokenType.PSEUDO_TYPE,
+239        }
+240        QUOTES = ["'", "$$"]
+241        SINGLE_TOKENS = {
+242            **tokens.Tokenizer.SINGLE_TOKENS,
+243            "$": TokenType.PARAMETER,
+244        }
+
+ + + + +
+
Inherited Members
+
+ +
+
+
+
+ +
+ + class + Postgres.Parser(sqlglot.parser.Parser): + + + +
+ +
246    class Parser(parser.Parser):
+247        STRICT_CAST = False
+248
+249        FUNCTIONS = {
+250            **parser.Parser.FUNCTIONS,  # type: ignore
+251            "NOW": exp.CurrentTimestamp.from_arg_list,
+252            "TO_TIMESTAMP": _to_timestamp,
+253            "TO_CHAR": format_time_lambda(exp.TimeToStr, "postgres"),
+254            "GENERATE_SERIES": _generate_series,
+255        }
+256
+257        BITWISE = {
+258            **parser.Parser.BITWISE,  # type: ignore
+259            TokenType.HASH: exp.BitwiseXor,
+260        }
+261
+262        FACTOR = {
+263            **parser.Parser.FACTOR,  # type: ignore
+264            TokenType.CARET: exp.Pow,
+265        }
+
+ + +

Parser consumes a list of tokens produced by the sqlglot.tokens.Tokenizer and produces +a parsed syntax tree.

+ +
Arguments:
+ +
    +
  • error_level: the desired error level. +Default: ErrorLevel.RAISE
  • +
  • error_message_context: determines the amount of context to capture from a +query string when displaying the error message (in number of characters). +Default: 50.
  • +
  • index_offset: Index offset for arrays eg ARRAY[0] vs ARRAY[1] as the head of a list. +Default: 0
  • +
  • alias_post_tablesample: If the table alias comes after tablesample. +Default: False
  • +
  • max_errors: Maximum number of error messages to include in a raised ParseError. +This is only relevant if error_level is ErrorLevel.RAISE. +Default: 3
  • +
  • null_ordering: Indicates the default null ordering method to use if not explicitly set. +Options are "nulls_are_small", "nulls_are_large", "nulls_are_last". +Default: "nulls_are_small"
  • +
+
+ + + +
+
+ +
+ + class + Postgres.Generator(sqlglot.generator.Generator): + + + +
+ +
267    class Generator(generator.Generator):
+268        LOCKING_READS_SUPPORTED = True
+269
+270        TYPE_MAPPING = {
+271            **generator.Generator.TYPE_MAPPING,  # type: ignore
+272            exp.DataType.Type.TINYINT: "SMALLINT",
+273            exp.DataType.Type.FLOAT: "REAL",
+274            exp.DataType.Type.DOUBLE: "DOUBLE PRECISION",
+275            exp.DataType.Type.BINARY: "BYTEA",
+276            exp.DataType.Type.VARBINARY: "BYTEA",
+277            exp.DataType.Type.DATETIME: "TIMESTAMP",
+278        }
+279
+280        TRANSFORMS = {
+281            **generator.Generator.TRANSFORMS,  # type: ignore
+282            exp.BitwiseXor: lambda self, e: self.binary(e, "#"),
+283            exp.ColumnDef: preprocess(
+284                [
+285                    _auto_increment_to_serial,
+286                    _serial_to_generated,
+287                ],
+288                delegate("columndef_sql"),
+289            ),
+290            exp.JSONExtract: arrow_json_extract_sql,
+291            exp.JSONExtractScalar: arrow_json_extract_scalar_sql,
+292            exp.JSONBExtract: lambda self, e: self.binary(e, "#>"),
+293            exp.JSONBExtractScalar: lambda self, e: self.binary(e, "#>>"),
+294            exp.JSONBContains: lambda self, e: self.binary(e, "?"),
+295            exp.Pow: lambda self, e: self.binary(e, "^"),
+296            exp.CurrentDate: no_paren_current_date_sql,
+297            exp.CurrentTimestamp: lambda *_: "CURRENT_TIMESTAMP",
+298            exp.DateAdd: _date_add_sql("+"),
+299            exp.DateSub: _date_add_sql("-"),
+300            exp.DateDiff: _date_diff_sql,
+301            exp.LogicalOr: rename_func("BOOL_OR"),
+302            exp.RegexpLike: lambda self, e: self.binary(e, "~"),
+303            exp.RegexpILike: lambda self, e: self.binary(e, "~*"),
+304            exp.StrPosition: str_position_sql,
+305            exp.StrToTime: lambda self, e: f"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})",
+306            exp.Substring: _substring_sql,
+307            exp.TimeStrToTime: lambda self, e: f"CAST({self.sql(e, 'this')} AS TIMESTAMP)",
+308            exp.TimeToStr: lambda self, e: f"TO_CHAR({self.sql(e, 'this')}, {self.format_time(e)})",
+309            exp.TableSample: no_tablesample_sql,
+310            exp.Trim: trim_sql,
+311            exp.TryCast: no_trycast_sql,
+312            exp.UnixToTime: lambda self, e: f"TO_TIMESTAMP({self.sql(e, 'this')})",
+313            exp.DataType: _datatype_sql,
+314            exp.GroupConcat: _string_agg_sql,
+315            exp.Array: lambda self, e: f"{self.normalize_func('ARRAY')}({self.sql(e.expressions[0])})"
+316            if isinstance(seq_get(e.expressions, 0), exp.Select)
+317            else f"{self.normalize_func('ARRAY')}[{self.expressions(e, flat=True)}]",
+318        }
+
+ + +

Generator interprets the given syntax tree and produces a SQL string as an output.

+ +
Arguments:
+ +
    +
  • time_mapping (dict): the dictionary of custom time mappings in which the key +represents a python time format and the output the target time format
  • +
  • time_trie (trie): a trie of the time_mapping keys
  • +
  • pretty (bool): if set to True the returned string will be formatted. Default: False.
  • +
  • quote_start (str): specifies which starting character to use to delimit quotes. Default: '.
  • +
  • quote_end (str): specifies which ending character to use to delimit quotes. Default: '.
  • +
  • identifier_start (str): specifies which starting character to use to delimit identifiers. Default: ".
  • +
  • identifier_end (str): specifies which ending character to use to delimit identifiers. Default: ".
  • +
  • identify (bool): if set to True all identifiers will be delimited by the corresponding +character.
  • +
  • normalize (bool): if set to True all identifiers will lower cased
  • +
  • string_escape (str): specifies a string escape character. Default: '.
  • +
  • identifier_escape (str): specifies an identifier escape character. Default: ".
  • +
  • pad (int): determines padding in a formatted string. Default: 2.
  • +
  • indent (int): determines the size of indentation in a formatted string. Default: 4.
  • +
  • unnest_column_only (bool): if true unnest table aliases are considered only as column aliases
  • +
  • normalize_functions (str): normalize function names, "upper", "lower", or None +Default: "upper"
  • +
  • alias_post_tablesample (bool): if the table alias comes after tablesample +Default: False
  • +
  • unsupported_level (ErrorLevel): determines the generator's behavior when it encounters +unsupported expressions. Default ErrorLevel.WARN.
  • +
  • null_ordering (str): Indicates the default null ordering method to use if not explicitly set. +Options are "nulls_are_small", "nulls_are_large", "nulls_are_last". +Default: "nulls_are_small"
  • +
  • max_unsupported (int): Maximum number of unsupported messages to include in a raised UnsupportedError. +This is only relevant if unsupported_level is ErrorLevel.RAISE. +Default: 3
  • +
  • leading_comma (bool): if the the comma is leading or trailing in select statements +Default: False
  • +
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode. +The default is on the smaller end because the length only represents a segment and not the true +line length. +Default: 80
  • +
  • comments: Whether or not to preserve comments in the output SQL code. +Default: True
  • +
+
+ + +
+
Inherited Members
+
+
sqlglot.generator.Generator
+
Generator
+
generate
+
unsupported
+
sep
+
seg
+
pad_comment
+
maybe_comment
+
wrap
+
no_identify
+
normalize_func
+
indent
+
sql
+
uncache_sql
+
cache_sql
+
characterset_sql
+
column_sql
+
columndef_sql
+
columnconstraint_sql
+
autoincrementcolumnconstraint_sql
+
checkcolumnconstraint_sql
+
commentcolumnconstraint_sql
+
collatecolumnconstraint_sql
+
encodecolumnconstraint_sql
+
defaultcolumnconstraint_sql
+
generatedasidentitycolumnconstraint_sql
+
notnullcolumnconstraint_sql
+
primarykeycolumnconstraint_sql
+
uniquecolumnconstraint_sql
+
create_sql
+
describe_sql
+
prepend_ctes
+
with_sql
+
cte_sql
+
tablealias_sql
+
bitstring_sql
+
hexstring_sql
+
datatype_sql
+
directory_sql
+
delete_sql
+
drop_sql
+
except_sql
+
except_op
+
fetch_sql
+
filter_sql
+
hint_sql
+
index_sql
+
identifier_sql
+
national_sql
+
partition_sql
+
properties_sql
+
root_properties
+
properties
+
with_properties
+
locate_properties
+
property_sql
+
likeproperty_sql
+
fallbackproperty_sql
+
journalproperty_sql
+
freespaceproperty_sql
+
afterjournalproperty_sql
+
checksumproperty_sql
+
mergeblockratioproperty_sql
+
datablocksizeproperty_sql
+
blockcompressionproperty_sql
+
isolatedloadingproperty_sql
+
insert_sql
+
intersect_sql
+
intersect_op
+
introducer_sql
+
pseudotype_sql
+
rowformatdelimitedproperty_sql
+
table_sql
+
tablesample_sql
+
pivot_sql
+
tuple_sql
+
update_sql
+
values_sql
+
var_sql
+
into_sql
+
from_sql
+
group_sql
+
having_sql
+
join_sql
+
lambda_sql
+
lateral_sql
+
limit_sql
+
offset_sql
+
lock_sql
+
literal_sql
+
loaddata_sql
+
null_sql
+
boolean_sql
+
order_sql
+
cluster_sql
+
distribute_sql
+
sort_sql
+
ordered_sql
+
matchrecognize_sql
+
query_modifiers
+
select_sql
+
schema_sql
+
star_sql
+
structkwarg_sql
+
parameter_sql
+
sessionparameter_sql
+
placeholder_sql
+
subquery_sql
+
qualify_sql
+
union_sql
+
union_op
+
unnest_sql
+
where_sql
+
window_sql
+
partition_by_sql
+
window_spec_sql
+
withingroup_sql
+
between_sql
+
bracket_sql
+
all_sql
+
any_sql
+
exists_sql
+
case_sql
+
constraint_sql
+
extract_sql
+
trim_sql
+
concat_sql
+
check_sql
+
foreignkey_sql
+
primarykey_sql
+
unique_sql
+
if_sql
+
in_sql
+
in_unnest_op
+
interval_sql
+
return_sql
+
reference_sql
+
anonymous_sql
+
paren_sql
+
neg_sql
+
not_sql
+
alias_sql
+
aliases_sql
+
attimezone_sql
+
add_sql
+
and_sql
+
connector_sql
+
bitwiseand_sql
+
bitwiseleftshift_sql
+
bitwisenot_sql
+
bitwiseor_sql
+
bitwiserightshift_sql
+
bitwisexor_sql
+
cast_sql
+
currentdate_sql
+
collate_sql
+
command_sql
+
transaction_sql
+
commit_sql
+
rollback_sql
+
altercolumn_sql
+
renametable_sql
+
altertable_sql
+
droppartition_sql
+
addconstraint_sql
+
distinct_sql
+
ignorenulls_sql
+
respectnulls_sql
+
intdiv_sql
+
dpipe_sql
+
div_sql
+
distance_sql
+
dot_sql
+
eq_sql
+
escape_sql
+
glob_sql
+
gt_sql
+
gte_sql
+
ilike_sql
+
is_sql
+
like_sql
+
similarto_sql
+
lt_sql
+
lte_sql
+
mod_sql
+
mul_sql
+
neq_sql
+
nullsafeeq_sql
+
nullsafeneq_sql
+
or_sql
+
slice_sql
+
sub_sql
+
trycast_sql
+
use_sql
+
binary
+
function_fallback_sql
+
format_args
+
text_width
+
format_time
+
expressions
+
op_expressions
+
naked_property
+
set_operation
+
tag_sql
+
token_sql
+
userdefinedfunction_sql
+
userdefinedfunctionkwarg_sql
+
joinhint_sql
+
kwarg_sql
+
when_sql
+
merge_sql
+ +
+
+
+
+
+ + \ No newline at end of file diff --git a/docs/sqlglot/dialects/presto.html b/docs/sqlglot/dialects/presto.html new file mode 100644 index 0000000..6be49f5 --- /dev/null +++ b/docs/sqlglot/dialects/presto.html @@ -0,0 +1,1255 @@ + + + + + + + sqlglot.dialects.presto API documentation + + + + + + + + + +
+
+ Edit on GitHub +

+sqlglot.dialects.presto

+ + + + + + +
  1from __future__ import annotations
+  2
+  3from sqlglot import exp, generator, parser, tokens, transforms
+  4from sqlglot.dialects.dialect import (
+  5    Dialect,
+  6    format_time_lambda,
+  7    if_sql,
+  8    no_ilike_sql,
+  9    no_safe_divide_sql,
+ 10    rename_func,
+ 11    struct_extract_sql,
+ 12    timestrtotime_sql,
+ 13)
+ 14from sqlglot.dialects.mysql import MySQL
+ 15from sqlglot.errors import UnsupportedError
+ 16from sqlglot.helper import seq_get
+ 17from sqlglot.tokens import TokenType
+ 18
+ 19
+ 20def _approx_distinct_sql(self, expression):
+ 21    accuracy = expression.args.get("accuracy")
+ 22    accuracy = ", " + self.sql(accuracy) if accuracy else ""
+ 23    return f"APPROX_DISTINCT({self.sql(expression, 'this')}{accuracy})"
+ 24
+ 25
+ 26def _datatype_sql(self, expression):
+ 27    sql = self.datatype_sql(expression)
+ 28    if expression.this == exp.DataType.Type.TIMESTAMPTZ:
+ 29        sql = f"{sql} WITH TIME ZONE"
+ 30    return sql
+ 31
+ 32
+ 33def _explode_to_unnest_sql(self, expression):
+ 34    if isinstance(expression.this, (exp.Explode, exp.Posexplode)):
+ 35        return self.sql(
+ 36            exp.Join(
+ 37                this=exp.Unnest(
+ 38                    expressions=[expression.this.this],
+ 39                    alias=expression.args.get("alias"),
+ 40                    ordinality=isinstance(expression.this, exp.Posexplode),
+ 41                ),
+ 42                kind="cross",
+ 43            )
+ 44        )
+ 45    return self.lateral_sql(expression)
+ 46
+ 47
+ 48def _initcap_sql(self, expression):
+ 49    regex = r"(\w)(\w*)"
+ 50    return f"REGEXP_REPLACE({self.sql(expression, 'this')}, '{regex}', x -> UPPER(x[1]) || LOWER(x[2]))"
+ 51
+ 52
+ 53def _decode_sql(self, expression):
+ 54    _ensure_utf8(expression.args.get("charset"))
+ 55    return f"FROM_UTF8({self.format_args(expression.this, expression.args.get('replace'))})"
+ 56
+ 57
+ 58def _encode_sql(self, expression):
+ 59    _ensure_utf8(expression.args.get("charset"))
+ 60    return f"TO_UTF8({self.sql(expression, 'this')})"
+ 61
+ 62
+ 63def _no_sort_array(self, expression):
+ 64    if expression.args.get("asc") == exp.false():
+ 65        comparator = "(a, b) -> CASE WHEN a < b THEN 1 WHEN a > b THEN -1 ELSE 0 END"
+ 66    else:
+ 67        comparator = None
+ 68    args = self.format_args(expression.this, comparator)
+ 69    return f"ARRAY_SORT({args})"
+ 70
+ 71
+ 72def _schema_sql(self, expression):
+ 73    if isinstance(expression.parent, exp.Property):
+ 74        columns = ", ".join(f"'{c.name}'" for c in expression.expressions)
+ 75        return f"ARRAY[{columns}]"
+ 76
+ 77    for schema in expression.parent.find_all(exp.Schema):
+ 78        if isinstance(schema.parent, exp.Property):
+ 79            expression = expression.copy()
+ 80            expression.expressions.extend(schema.expressions)
+ 81
+ 82    return self.schema_sql(expression)
+ 83
+ 84
+ 85def _quantile_sql(self, expression):
+ 86    self.unsupported("Presto does not support exact quantiles")
+ 87    return f"APPROX_PERCENTILE({self.sql(expression, 'this')}, {self.sql(expression, 'quantile')})"
+ 88
+ 89
+ 90def _str_to_time_sql(self, expression):
+ 91    return f"DATE_PARSE({self.sql(expression, 'this')}, {self.format_time(expression)})"
+ 92
+ 93
+ 94def _ts_or_ds_to_date_sql(self, expression):
+ 95    time_format = self.format_time(expression)
+ 96    if time_format and time_format not in (Presto.time_format, Presto.date_format):
+ 97        return f"CAST({_str_to_time_sql(self, expression)} AS DATE)"
+ 98    return f"CAST(SUBSTR(CAST({self.sql(expression, 'this')} AS VARCHAR), 1, 10) AS DATE)"
+ 99
+100
+101def _ts_or_ds_add_sql(self, expression):
+102    this = self.sql(expression, "this")
+103    e = self.sql(expression, "expression")
+104    unit = self.sql(expression, "unit") or "'day'"
+105    return f"DATE_ADD({unit}, {e}, DATE_PARSE(SUBSTR({this}, 1, 10), {Presto.date_format}))"
+106
+107
+108def _sequence_sql(self, expression):
+109    start = expression.args["start"]
+110    end = expression.args["end"]
+111    step = expression.args.get("step", 1)  # Postgres defaults to 1 for generate_series
+112
+113    target_type = None
+114
+115    if isinstance(start, exp.Cast):
+116        target_type = start.to
+117    elif isinstance(end, exp.Cast):
+118        target_type = end.to
+119
+120    if target_type and target_type.this == exp.DataType.Type.TIMESTAMP:
+121        to = target_type.copy()
+122
+123        if target_type is start.to:
+124            end = exp.Cast(this=end, to=to)
+125        else:
+126            start = exp.Cast(this=start, to=to)
+127
+128    return f"SEQUENCE({self.format_args(start, end, step)})"
+129
+130
+131def _ensure_utf8(charset):
+132    if charset.name.lower() != "utf-8":
+133        raise UnsupportedError(f"Unsupported charset {charset}")
+134
+135
+136def _approx_percentile(args):
+137    if len(args) == 4:
+138        return exp.ApproxQuantile(
+139            this=seq_get(args, 0),
+140            weight=seq_get(args, 1),
+141            quantile=seq_get(args, 2),
+142            accuracy=seq_get(args, 3),
+143        )
+144    if len(args) == 3:
+145        return exp.ApproxQuantile(
+146            this=seq_get(args, 0),
+147            quantile=seq_get(args, 1),
+148            accuracy=seq_get(args, 2),
+149        )
+150    return exp.ApproxQuantile.from_arg_list(args)
+151
+152
+153def _from_unixtime(args):
+154    if len(args) == 3:
+155        return exp.UnixToTime(
+156            this=seq_get(args, 0),
+157            hours=seq_get(args, 1),
+158            minutes=seq_get(args, 2),
+159        )
+160    if len(args) == 2:
+161        return exp.UnixToTime(
+162            this=seq_get(args, 0),
+163            zone=seq_get(args, 1),
+164        )
+165    return exp.UnixToTime.from_arg_list(args)
+166
+167
+168class Presto(Dialect):
+169    index_offset = 1
+170    null_ordering = "nulls_are_last"
+171    time_format = MySQL.time_format  # type: ignore
+172    time_mapping = MySQL.time_mapping  # type: ignore
+173
+174    class Tokenizer(tokens.Tokenizer):
+175        KEYWORDS = {
+176            **tokens.Tokenizer.KEYWORDS,
+177            "START": TokenType.BEGIN,
+178            "ROW": TokenType.STRUCT,
+179        }
+180
+181    class Parser(parser.Parser):
+182        FUNCTIONS = {
+183            **parser.Parser.FUNCTIONS,  # type: ignore
+184            "APPROX_DISTINCT": exp.ApproxDistinct.from_arg_list,
+185            "CARDINALITY": exp.ArraySize.from_arg_list,
+186            "CONTAINS": exp.ArrayContains.from_arg_list,
+187            "DATE_ADD": lambda args: exp.DateAdd(
+188                this=seq_get(args, 2),
+189                expression=seq_get(args, 1),
+190                unit=seq_get(args, 0),
+191            ),
+192            "DATE_DIFF": lambda args: exp.DateDiff(
+193                this=seq_get(args, 2),
+194                expression=seq_get(args, 1),
+195                unit=seq_get(args, 0),
+196            ),
+197            "DATE_FORMAT": format_time_lambda(exp.TimeToStr, "presto"),
+198            "DATE_PARSE": format_time_lambda(exp.StrToTime, "presto"),
+199            "FROM_UNIXTIME": _from_unixtime,
+200            "NOW": exp.CurrentTimestamp.from_arg_list,
+201            "STRPOS": lambda args: exp.StrPosition(
+202                this=seq_get(args, 0),
+203                substr=seq_get(args, 1),
+204                instance=seq_get(args, 2),
+205            ),
+206            "TO_UNIXTIME": exp.TimeToUnix.from_arg_list,
+207            "APPROX_PERCENTILE": _approx_percentile,
+208            "FROM_HEX": exp.Unhex.from_arg_list,
+209            "TO_HEX": exp.Hex.from_arg_list,
+210            "TO_UTF8": lambda args: exp.Encode(
+211                this=seq_get(args, 0), charset=exp.Literal.string("utf-8")
+212            ),
+213            "FROM_UTF8": lambda args: exp.Decode(
+214                this=seq_get(args, 0), replace=seq_get(args, 1), charset=exp.Literal.string("utf-8")
+215            ),
+216        }
+217        FUNCTION_PARSERS = parser.Parser.FUNCTION_PARSERS.copy()
+218        FUNCTION_PARSERS.pop("TRIM")
+219
+220    class Generator(generator.Generator):
+221        STRUCT_DELIMITER = ("(", ")")
+222
+223        PROPERTIES_LOCATION = {
+224            **generator.Generator.PROPERTIES_LOCATION,  # type: ignore
+225            exp.LocationProperty: exp.Properties.Location.UNSUPPORTED,
+226        }
+227
+228        TYPE_MAPPING = {
+229            **generator.Generator.TYPE_MAPPING,  # type: ignore
+230            exp.DataType.Type.INT: "INTEGER",
+231            exp.DataType.Type.FLOAT: "REAL",
+232            exp.DataType.Type.BINARY: "VARBINARY",
+233            exp.DataType.Type.TEXT: "VARCHAR",
+234            exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP",
+235            exp.DataType.Type.STRUCT: "ROW",
+236        }
+237
+238        TRANSFORMS = {
+239            **generator.Generator.TRANSFORMS,  # type: ignore
+240            **transforms.UNALIAS_GROUP,  # type: ignore
+241            exp.ApproxDistinct: _approx_distinct_sql,
+242            exp.Array: lambda self, e: f"ARRAY[{self.expressions(e, flat=True)}]",
+243            exp.ArrayConcat: rename_func("CONCAT"),
+244            exp.ArrayContains: rename_func("CONTAINS"),
+245            exp.ArraySize: rename_func("CARDINALITY"),
+246            exp.BitwiseAnd: lambda self, e: f"BITWISE_AND({self.sql(e, 'this')}, {self.sql(e, 'expression')})",
+247            exp.BitwiseLeftShift: lambda self, e: f"BITWISE_ARITHMETIC_SHIFT_LEFT({self.sql(e, 'this')}, {self.sql(e, 'expression')})",
+248            exp.BitwiseNot: lambda self, e: f"BITWISE_NOT({self.sql(e, 'this')})",
+249            exp.BitwiseOr: lambda self, e: f"BITWISE_OR({self.sql(e, 'this')}, {self.sql(e, 'expression')})",
+250            exp.BitwiseRightShift: lambda self, e: f"BITWISE_ARITHMETIC_SHIFT_RIGHT({self.sql(e, 'this')}, {self.sql(e, 'expression')})",
+251            exp.BitwiseXor: lambda self, e: f"BITWISE_XOR({self.sql(e, 'this')}, {self.sql(e, 'expression')})",
+252            exp.CurrentTimestamp: lambda *_: "CURRENT_TIMESTAMP",
+253            exp.DataType: _datatype_sql,
+254            exp.DateAdd: lambda self, e: f"""DATE_ADD({self.sql(e, 'unit') or "'day'"}, {self.sql(e, 'expression')}, {self.sql(e, 'this')})""",
+255            exp.DateDiff: lambda self, e: f"""DATE_DIFF({self.sql(e, 'unit') or "'day'"}, {self.sql(e, 'expression')}, {self.sql(e, 'this')})""",
+256            exp.DateStrToDate: lambda self, e: f"CAST(DATE_PARSE({self.sql(e, 'this')}, {Presto.date_format}) AS DATE)",
+257            exp.DateToDi: lambda self, e: f"CAST(DATE_FORMAT({self.sql(e, 'this')}, {Presto.dateint_format}) AS INT)",
+258            exp.Decode: _decode_sql,
+259            exp.DiToDate: lambda self, e: f"CAST(DATE_PARSE(CAST({self.sql(e, 'this')} AS VARCHAR), {Presto.dateint_format}) AS DATE)",
+260            exp.Encode: _encode_sql,
+261            exp.GenerateSeries: _sequence_sql,
+262            exp.Hex: rename_func("TO_HEX"),
+263            exp.If: if_sql,
+264            exp.ILike: no_ilike_sql,
+265            exp.Initcap: _initcap_sql,
+266            exp.Lateral: _explode_to_unnest_sql,
+267            exp.Levenshtein: rename_func("LEVENSHTEIN_DISTANCE"),
+268            exp.LogicalOr: rename_func("BOOL_OR"),
+269            exp.Quantile: _quantile_sql,
+270            exp.ApproxQuantile: rename_func("APPROX_PERCENTILE"),
+271            exp.SafeDivide: no_safe_divide_sql,
+272            exp.Schema: _schema_sql,
+273            exp.SortArray: _no_sort_array,
+274            exp.StrPosition: rename_func("STRPOS"),
+275            exp.StrToDate: lambda self, e: f"CAST({_str_to_time_sql(self, e)} AS DATE)",
+276            exp.StrToTime: _str_to_time_sql,
+277            exp.StrToUnix: lambda self, e: f"TO_UNIXTIME(DATE_PARSE({self.sql(e, 'this')}, {self.format_time(e)}))",
+278            exp.StructExtract: struct_extract_sql,
+279            exp.TableFormatProperty: lambda self, e: f"TABLE_FORMAT='{e.name.upper()}'",
+280            exp.FileFormatProperty: lambda self, e: f"FORMAT='{e.name.upper()}'",
+281            exp.TimeStrToDate: timestrtotime_sql,
+282            exp.TimeStrToTime: timestrtotime_sql,
+283            exp.TimeStrToUnix: lambda self, e: f"TO_UNIXTIME(DATE_PARSE({self.sql(e, 'this')}, {Presto.time_format}))",
+284            exp.TimeToStr: lambda self, e: f"DATE_FORMAT({self.sql(e, 'this')}, {self.format_time(e)})",
+285            exp.TimeToUnix: rename_func("TO_UNIXTIME"),
+286            exp.TsOrDiToDi: lambda self, e: f"CAST(SUBSTR(REPLACE(CAST({self.sql(e, 'this')} AS VARCHAR), '-', ''), 1, 8) AS INT)",
+287            exp.TsOrDsAdd: _ts_or_ds_add_sql,
+288            exp.TsOrDsToDate: _ts_or_ds_to_date_sql,
+289            exp.Unhex: rename_func("FROM_HEX"),
+290            exp.UnixToStr: lambda self, e: f"DATE_FORMAT(FROM_UNIXTIME({self.sql(e, 'this')}), {self.format_time(e)})",
+291            exp.UnixToTime: rename_func("FROM_UNIXTIME"),
+292            exp.UnixToTimeStr: lambda self, e: f"CAST(FROM_UNIXTIME({self.sql(e, 'this')}) AS VARCHAR)",
+293            exp.VariancePop: rename_func("VAR_POP"),
+294        }
+295
+296        def transaction_sql(self, expression):
+297            modes = expression.args.get("modes")
+298            modes = f" {', '.join(modes)}" if modes else ""
+299            return f"START TRANSACTION{modes}"
+
+ + +
+
+ +
+ + class + Presto(sqlglot.dialects.dialect.Dialect): + + + +
+ +
169class Presto(Dialect):
+170    index_offset = 1
+171    null_ordering = "nulls_are_last"
+172    time_format = MySQL.time_format  # type: ignore
+173    time_mapping = MySQL.time_mapping  # type: ignore
+174
+175    class Tokenizer(tokens.Tokenizer):
+176        KEYWORDS = {
+177            **tokens.Tokenizer.KEYWORDS,
+178            "START": TokenType.BEGIN,
+179            "ROW": TokenType.STRUCT,
+180        }
+181
+182    class Parser(parser.Parser):
+183        FUNCTIONS = {
+184            **parser.Parser.FUNCTIONS,  # type: ignore
+185            "APPROX_DISTINCT": exp.ApproxDistinct.from_arg_list,
+186            "CARDINALITY": exp.ArraySize.from_arg_list,
+187            "CONTAINS": exp.ArrayContains.from_arg_list,
+188            "DATE_ADD": lambda args: exp.DateAdd(
+189                this=seq_get(args, 2),
+190                expression=seq_get(args, 1),
+191                unit=seq_get(args, 0),
+192            ),
+193            "DATE_DIFF": lambda args: exp.DateDiff(
+194                this=seq_get(args, 2),
+195                expression=seq_get(args, 1),
+196                unit=seq_get(args, 0),
+197            ),
+198            "DATE_FORMAT": format_time_lambda(exp.TimeToStr, "presto"),
+199            "DATE_PARSE": format_time_lambda(exp.StrToTime, "presto"),
+200            "FROM_UNIXTIME": _from_unixtime,
+201            "NOW": exp.CurrentTimestamp.from_arg_list,
+202            "STRPOS": lambda args: exp.StrPosition(
+203                this=seq_get(args, 0),
+204                substr=seq_get(args, 1),
+205                instance=seq_get(args, 2),
+206            ),
+207            "TO_UNIXTIME": exp.TimeToUnix.from_arg_list,
+208            "APPROX_PERCENTILE": _approx_percentile,
+209            "FROM_HEX": exp.Unhex.from_arg_list,
+210            "TO_HEX": exp.Hex.from_arg_list,
+211            "TO_UTF8": lambda args: exp.Encode(
+212                this=seq_get(args, 0), charset=exp.Literal.string("utf-8")
+213            ),
+214            "FROM_UTF8": lambda args: exp.Decode(
+215                this=seq_get(args, 0), replace=seq_get(args, 1), charset=exp.Literal.string("utf-8")
+216            ),
+217        }
+218        FUNCTION_PARSERS = parser.Parser.FUNCTION_PARSERS.copy()
+219        FUNCTION_PARSERS.pop("TRIM")
+220
+221    class Generator(generator.Generator):
+222        STRUCT_DELIMITER = ("(", ")")
+223
+224        PROPERTIES_LOCATION = {
+225            **generator.Generator.PROPERTIES_LOCATION,  # type: ignore
+226            exp.LocationProperty: exp.Properties.Location.UNSUPPORTED,
+227        }
+228
+229        TYPE_MAPPING = {
+230            **generator.Generator.TYPE_MAPPING,  # type: ignore
+231            exp.DataType.Type.INT: "INTEGER",
+232            exp.DataType.Type.FLOAT: "REAL",
+233            exp.DataType.Type.BINARY: "VARBINARY",
+234            exp.DataType.Type.TEXT: "VARCHAR",
+235            exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP",
+236            exp.DataType.Type.STRUCT: "ROW",
+237        }
+238
+239        TRANSFORMS = {
+240            **generator.Generator.TRANSFORMS,  # type: ignore
+241            **transforms.UNALIAS_GROUP,  # type: ignore
+242            exp.ApproxDistinct: _approx_distinct_sql,
+243            exp.Array: lambda self, e: f"ARRAY[{self.expressions(e, flat=True)}]",
+244            exp.ArrayConcat: rename_func("CONCAT"),
+245            exp.ArrayContains: rename_func("CONTAINS"),
+246            exp.ArraySize: rename_func("CARDINALITY"),
+247            exp.BitwiseAnd: lambda self, e: f"BITWISE_AND({self.sql(e, 'this')}, {self.sql(e, 'expression')})",
+248            exp.BitwiseLeftShift: lambda self, e: f"BITWISE_ARITHMETIC_SHIFT_LEFT({self.sql(e, 'this')}, {self.sql(e, 'expression')})",
+249            exp.BitwiseNot: lambda self, e: f"BITWISE_NOT({self.sql(e, 'this')})",
+250            exp.BitwiseOr: lambda self, e: f"BITWISE_OR({self.sql(e, 'this')}, {self.sql(e, 'expression')})",
+251            exp.BitwiseRightShift: lambda self, e: f"BITWISE_ARITHMETIC_SHIFT_RIGHT({self.sql(e, 'this')}, {self.sql(e, 'expression')})",
+252            exp.BitwiseXor: lambda self, e: f"BITWISE_XOR({self.sql(e, 'this')}, {self.sql(e, 'expression')})",
+253            exp.CurrentTimestamp: lambda *_: "CURRENT_TIMESTAMP",
+254            exp.DataType: _datatype_sql,
+255            exp.DateAdd: lambda self, e: f"""DATE_ADD({self.sql(e, 'unit') or "'day'"}, {self.sql(e, 'expression')}, {self.sql(e, 'this')})""",
+256            exp.DateDiff: lambda self, e: f"""DATE_DIFF({self.sql(e, 'unit') or "'day'"}, {self.sql(e, 'expression')}, {self.sql(e, 'this')})""",
+257            exp.DateStrToDate: lambda self, e: f"CAST(DATE_PARSE({self.sql(e, 'this')}, {Presto.date_format}) AS DATE)",
+258            exp.DateToDi: lambda self, e: f"CAST(DATE_FORMAT({self.sql(e, 'this')}, {Presto.dateint_format}) AS INT)",
+259            exp.Decode: _decode_sql,
+260            exp.DiToDate: lambda self, e: f"CAST(DATE_PARSE(CAST({self.sql(e, 'this')} AS VARCHAR), {Presto.dateint_format}) AS DATE)",
+261            exp.Encode: _encode_sql,
+262            exp.GenerateSeries: _sequence_sql,
+263            exp.Hex: rename_func("TO_HEX"),
+264            exp.If: if_sql,
+265            exp.ILike: no_ilike_sql,
+266            exp.Initcap: _initcap_sql,
+267            exp.Lateral: _explode_to_unnest_sql,
+268            exp.Levenshtein: rename_func("LEVENSHTEIN_DISTANCE"),
+269            exp.LogicalOr: rename_func("BOOL_OR"),
+270            exp.Quantile: _quantile_sql,
+271            exp.ApproxQuantile: rename_func("APPROX_PERCENTILE"),
+272            exp.SafeDivide: no_safe_divide_sql,
+273            exp.Schema: _schema_sql,
+274            exp.SortArray: _no_sort_array,
+275            exp.StrPosition: rename_func("STRPOS"),
+276            exp.StrToDate: lambda self, e: f"CAST({_str_to_time_sql(self, e)} AS DATE)",
+277            exp.StrToTime: _str_to_time_sql,
+278            exp.StrToUnix: lambda self, e: f"TO_UNIXTIME(DATE_PARSE({self.sql(e, 'this')}, {self.format_time(e)}))",
+279            exp.StructExtract: struct_extract_sql,
+280            exp.TableFormatProperty: lambda self, e: f"TABLE_FORMAT='{e.name.upper()}'",
+281            exp.FileFormatProperty: lambda self, e: f"FORMAT='{e.name.upper()}'",
+282            exp.TimeStrToDate: timestrtotime_sql,
+283            exp.TimeStrToTime: timestrtotime_sql,
+284            exp.TimeStrToUnix: lambda self, e: f"TO_UNIXTIME(DATE_PARSE({self.sql(e, 'this')}, {Presto.time_format}))",
+285            exp.TimeToStr: lambda self, e: f"DATE_FORMAT({self.sql(e, 'this')}, {self.format_time(e)})",
+286            exp.TimeToUnix: rename_func("TO_UNIXTIME"),
+287            exp.TsOrDiToDi: lambda self, e: f"CAST(SUBSTR(REPLACE(CAST({self.sql(e, 'this')} AS VARCHAR), '-', ''), 1, 8) AS INT)",
+288            exp.TsOrDsAdd: _ts_or_ds_add_sql,
+289            exp.TsOrDsToDate: _ts_or_ds_to_date_sql,
+290            exp.Unhex: rename_func("FROM_HEX"),
+291            exp.UnixToStr: lambda self, e: f"DATE_FORMAT(FROM_UNIXTIME({self.sql(e, 'this')}), {self.format_time(e)})",
+292            exp.UnixToTime: rename_func("FROM_UNIXTIME"),
+293            exp.UnixToTimeStr: lambda self, e: f"CAST(FROM_UNIXTIME({self.sql(e, 'this')}) AS VARCHAR)",
+294            exp.VariancePop: rename_func("VAR_POP"),
+295        }
+296
+297        def transaction_sql(self, expression):
+298            modes = expression.args.get("modes")
+299            modes = f" {', '.join(modes)}" if modes else ""
+300            return f"START TRANSACTION{modes}"
+
+ + + + +
+
+ + Presto() + + +
+ + + + +
+ +
+
+ +
+ + class + Presto.Tokenizer(sqlglot.tokens.Tokenizer): + + + +
+ +
175    class Tokenizer(tokens.Tokenizer):
+176        KEYWORDS = {
+177            **tokens.Tokenizer.KEYWORDS,
+178            "START": TokenType.BEGIN,
+179            "ROW": TokenType.STRUCT,
+180        }
+
+ + + + +
+
Inherited Members
+
+ +
+
+
+
+ +
+ + class + Presto.Parser(sqlglot.parser.Parser): + + + +
+ +
182    class Parser(parser.Parser):
+183        FUNCTIONS = {
+184            **parser.Parser.FUNCTIONS,  # type: ignore
+185            "APPROX_DISTINCT": exp.ApproxDistinct.from_arg_list,
+186            "CARDINALITY": exp.ArraySize.from_arg_list,
+187            "CONTAINS": exp.ArrayContains.from_arg_list,
+188            "DATE_ADD": lambda args: exp.DateAdd(
+189                this=seq_get(args, 2),
+190                expression=seq_get(args, 1),
+191                unit=seq_get(args, 0),
+192            ),
+193            "DATE_DIFF": lambda args: exp.DateDiff(
+194                this=seq_get(args, 2),
+195                expression=seq_get(args, 1),
+196                unit=seq_get(args, 0),
+197            ),
+198            "DATE_FORMAT": format_time_lambda(exp.TimeToStr, "presto"),
+199            "DATE_PARSE": format_time_lambda(exp.StrToTime, "presto"),
+200            "FROM_UNIXTIME": _from_unixtime,
+201            "NOW": exp.CurrentTimestamp.from_arg_list,
+202            "STRPOS": lambda args: exp.StrPosition(
+203                this=seq_get(args, 0),
+204                substr=seq_get(args, 1),
+205                instance=seq_get(args, 2),
+206            ),
+207            "TO_UNIXTIME": exp.TimeToUnix.from_arg_list,
+208            "APPROX_PERCENTILE": _approx_percentile,
+209            "FROM_HEX": exp.Unhex.from_arg_list,
+210            "TO_HEX": exp.Hex.from_arg_list,
+211            "TO_UTF8": lambda args: exp.Encode(
+212                this=seq_get(args, 0), charset=exp.Literal.string("utf-8")
+213            ),
+214            "FROM_UTF8": lambda args: exp.Decode(
+215                this=seq_get(args, 0), replace=seq_get(args, 1), charset=exp.Literal.string("utf-8")
+216            ),
+217        }
+218        FUNCTION_PARSERS = parser.Parser.FUNCTION_PARSERS.copy()
+219        FUNCTION_PARSERS.pop("TRIM")
+
+ + +

Parser consumes a list of tokens produced by the sqlglot.tokens.Tokenizer and produces +a parsed syntax tree.

+ +
Arguments:
+ +
    +
  • error_level: the desired error level. +Default: ErrorLevel.RAISE
  • +
  • error_message_context: determines the amount of context to capture from a +query string when displaying the error message (in number of characters). +Default: 50.
  • +
  • index_offset: Index offset for arrays eg ARRAY[0] vs ARRAY[1] as the head of a list. +Default: 0
  • +
  • alias_post_tablesample: If the table alias comes after tablesample. +Default: False
  • +
  • max_errors: Maximum number of error messages to include in a raised ParseError. +This is only relevant if error_level is ErrorLevel.RAISE. +Default: 3
  • +
  • null_ordering: Indicates the default null ordering method to use if not explicitly set. +Options are "nulls_are_small", "nulls_are_large", "nulls_are_last". +Default: "nulls_are_small"
  • +
+
+ + + +
+
+ +
+ + class + Presto.Generator(sqlglot.generator.Generator): + + + +
+ +
221    class Generator(generator.Generator):
+222        STRUCT_DELIMITER = ("(", ")")
+223
+224        PROPERTIES_LOCATION = {
+225            **generator.Generator.PROPERTIES_LOCATION,  # type: ignore
+226            exp.LocationProperty: exp.Properties.Location.UNSUPPORTED,
+227        }
+228
+229        TYPE_MAPPING = {
+230            **generator.Generator.TYPE_MAPPING,  # type: ignore
+231            exp.DataType.Type.INT: "INTEGER",
+232            exp.DataType.Type.FLOAT: "REAL",
+233            exp.DataType.Type.BINARY: "VARBINARY",
+234            exp.DataType.Type.TEXT: "VARCHAR",
+235            exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP",
+236            exp.DataType.Type.STRUCT: "ROW",
+237        }
+238
+239        TRANSFORMS = {
+240            **generator.Generator.TRANSFORMS,  # type: ignore
+241            **transforms.UNALIAS_GROUP,  # type: ignore
+242            exp.ApproxDistinct: _approx_distinct_sql,
+243            exp.Array: lambda self, e: f"ARRAY[{self.expressions(e, flat=True)}]",
+244            exp.ArrayConcat: rename_func("CONCAT"),
+245            exp.ArrayContains: rename_func("CONTAINS"),
+246            exp.ArraySize: rename_func("CARDINALITY"),
+247            exp.BitwiseAnd: lambda self, e: f"BITWISE_AND({self.sql(e, 'this')}, {self.sql(e, 'expression')})",
+248            exp.BitwiseLeftShift: lambda self, e: f"BITWISE_ARITHMETIC_SHIFT_LEFT({self.sql(e, 'this')}, {self.sql(e, 'expression')})",
+249            exp.BitwiseNot: lambda self, e: f"BITWISE_NOT({self.sql(e, 'this')})",
+250            exp.BitwiseOr: lambda self, e: f"BITWISE_OR({self.sql(e, 'this')}, {self.sql(e, 'expression')})",
+251            exp.BitwiseRightShift: lambda self, e: f"BITWISE_ARITHMETIC_SHIFT_RIGHT({self.sql(e, 'this')}, {self.sql(e, 'expression')})",
+252            exp.BitwiseXor: lambda self, e: f"BITWISE_XOR({self.sql(e, 'this')}, {self.sql(e, 'expression')})",
+253            exp.CurrentTimestamp: lambda *_: "CURRENT_TIMESTAMP",
+254            exp.DataType: _datatype_sql,
+255            exp.DateAdd: lambda self, e: f"""DATE_ADD({self.sql(e, 'unit') or "'day'"}, {self.sql(e, 'expression')}, {self.sql(e, 'this')})""",
+256            exp.DateDiff: lambda self, e: f"""DATE_DIFF({self.sql(e, 'unit') or "'day'"}, {self.sql(e, 'expression')}, {self.sql(e, 'this')})""",
+257            exp.DateStrToDate: lambda self, e: f"CAST(DATE_PARSE({self.sql(e, 'this')}, {Presto.date_format}) AS DATE)",
+258            exp.DateToDi: lambda self, e: f"CAST(DATE_FORMAT({self.sql(e, 'this')}, {Presto.dateint_format}) AS INT)",
+259            exp.Decode: _decode_sql,
+260            exp.DiToDate: lambda self, e: f"CAST(DATE_PARSE(CAST({self.sql(e, 'this')} AS VARCHAR), {Presto.dateint_format}) AS DATE)",
+261            exp.Encode: _encode_sql,
+262            exp.GenerateSeries: _sequence_sql,
+263            exp.Hex: rename_func("TO_HEX"),
+264            exp.If: if_sql,
+265            exp.ILike: no_ilike_sql,
+266            exp.Initcap: _initcap_sql,
+267            exp.Lateral: _explode_to_unnest_sql,
+268            exp.Levenshtein: rename_func("LEVENSHTEIN_DISTANCE"),
+269            exp.LogicalOr: rename_func("BOOL_OR"),
+270            exp.Quantile: _quantile_sql,
+271            exp.ApproxQuantile: rename_func("APPROX_PERCENTILE"),
+272            exp.SafeDivide: no_safe_divide_sql,
+273            exp.Schema: _schema_sql,
+274            exp.SortArray: _no_sort_array,
+275            exp.StrPosition: rename_func("STRPOS"),
+276            exp.StrToDate: lambda self, e: f"CAST({_str_to_time_sql(self, e)} AS DATE)",
+277            exp.StrToTime: _str_to_time_sql,
+278            exp.StrToUnix: lambda self, e: f"TO_UNIXTIME(DATE_PARSE({self.sql(e, 'this')}, {self.format_time(e)}))",
+279            exp.StructExtract: struct_extract_sql,
+280            exp.TableFormatProperty: lambda self, e: f"TABLE_FORMAT='{e.name.upper()}'",
+281            exp.FileFormatProperty: lambda self, e: f"FORMAT='{e.name.upper()}'",
+282            exp.TimeStrToDate: timestrtotime_sql,
+283            exp.TimeStrToTime: timestrtotime_sql,
+284            exp.TimeStrToUnix: lambda self, e: f"TO_UNIXTIME(DATE_PARSE({self.sql(e, 'this')}, {Presto.time_format}))",
+285            exp.TimeToStr: lambda self, e: f"DATE_FORMAT({self.sql(e, 'this')}, {self.format_time(e)})",
+286            exp.TimeToUnix: rename_func("TO_UNIXTIME"),
+287            exp.TsOrDiToDi: lambda self, e: f"CAST(SUBSTR(REPLACE(CAST({self.sql(e, 'this')} AS VARCHAR), '-', ''), 1, 8) AS INT)",
+288            exp.TsOrDsAdd: _ts_or_ds_add_sql,
+289            exp.TsOrDsToDate: _ts_or_ds_to_date_sql,
+290            exp.Unhex: rename_func("FROM_HEX"),
+291            exp.UnixToStr: lambda self, e: f"DATE_FORMAT(FROM_UNIXTIME({self.sql(e, 'this')}), {self.format_time(e)})",
+292            exp.UnixToTime: rename_func("FROM_UNIXTIME"),
+293            exp.UnixToTimeStr: lambda self, e: f"CAST(FROM_UNIXTIME({self.sql(e, 'this')}) AS VARCHAR)",
+294            exp.VariancePop: rename_func("VAR_POP"),
+295        }
+296
+297        def transaction_sql(self, expression):
+298            modes = expression.args.get("modes")
+299            modes = f" {', '.join(modes)}" if modes else ""
+300            return f"START TRANSACTION{modes}"
+
+ + +

Generator interprets the given syntax tree and produces a SQL string as an output.

+ +
Arguments:
+ +
    +
  • time_mapping (dict): the dictionary of custom time mappings in which the key +represents a python time format and the output the target time format
  • +
  • time_trie (trie): a trie of the time_mapping keys
  • +
  • pretty (bool): if set to True the returned string will be formatted. Default: False.
  • +
  • quote_start (str): specifies which starting character to use to delimit quotes. Default: '.
  • +
  • quote_end (str): specifies which ending character to use to delimit quotes. Default: '.
  • +
  • identifier_start (str): specifies which starting character to use to delimit identifiers. Default: ".
  • +
  • identifier_end (str): specifies which ending character to use to delimit identifiers. Default: ".
  • +
  • identify (bool): if set to True all identifiers will be delimited by the corresponding +character.
  • +
  • normalize (bool): if set to True all identifiers will lower cased
  • +
  • string_escape (str): specifies a string escape character. Default: '.
  • +
  • identifier_escape (str): specifies an identifier escape character. Default: ".
  • +
  • pad (int): determines padding in a formatted string. Default: 2.
  • +
  • indent (int): determines the size of indentation in a formatted string. Default: 4.
  • +
  • unnest_column_only (bool): if true unnest table aliases are considered only as column aliases
  • +
  • normalize_functions (str): normalize function names, "upper", "lower", or None +Default: "upper"
  • +
  • alias_post_tablesample (bool): if the table alias comes after tablesample +Default: False
  • +
  • unsupported_level (ErrorLevel): determines the generator's behavior when it encounters +unsupported expressions. Default ErrorLevel.WARN.
  • +
  • null_ordering (str): Indicates the default null ordering method to use if not explicitly set. +Options are "nulls_are_small", "nulls_are_large", "nulls_are_last". +Default: "nulls_are_small"
  • +
  • max_unsupported (int): Maximum number of unsupported messages to include in a raised UnsupportedError. +This is only relevant if unsupported_level is ErrorLevel.RAISE. +Default: 3
  • +
  • leading_comma (bool): if the the comma is leading or trailing in select statements +Default: False
  • +
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode. +The default is on the smaller end because the length only represents a segment and not the true +line length. +Default: 80
  • +
  • comments: Whether or not to preserve comments in the output SQL code. +Default: True
  • +
+
+ + +
+ +
+ + def + transaction_sql(self, expression): + + + +
+ +
297        def transaction_sql(self, expression):
+298            modes = expression.args.get("modes")
+299            modes = f" {', '.join(modes)}" if modes else ""
+300            return f"START TRANSACTION{modes}"
+
+ + + + +
+
+
Inherited Members
+
+
sqlglot.generator.Generator
+
Generator
+
generate
+
unsupported
+
sep
+
seg
+
pad_comment
+
maybe_comment
+
wrap
+
no_identify
+
normalize_func
+
indent
+
sql
+
uncache_sql
+
cache_sql
+
characterset_sql
+
column_sql
+
columndef_sql
+
columnconstraint_sql
+
autoincrementcolumnconstraint_sql
+
checkcolumnconstraint_sql
+
commentcolumnconstraint_sql
+
collatecolumnconstraint_sql
+
encodecolumnconstraint_sql
+
defaultcolumnconstraint_sql
+
generatedasidentitycolumnconstraint_sql
+
notnullcolumnconstraint_sql
+
primarykeycolumnconstraint_sql
+
uniquecolumnconstraint_sql
+
create_sql
+
describe_sql
+
prepend_ctes
+
with_sql
+
cte_sql
+
tablealias_sql
+
bitstring_sql
+
hexstring_sql
+
datatype_sql
+
directory_sql
+
delete_sql
+
drop_sql
+
except_sql
+
except_op
+
fetch_sql
+
filter_sql
+
hint_sql
+
index_sql
+
identifier_sql
+
national_sql
+
partition_sql
+
properties_sql
+
root_properties
+
properties
+
with_properties
+
locate_properties
+
property_sql
+
likeproperty_sql
+
fallbackproperty_sql
+
journalproperty_sql
+
freespaceproperty_sql
+
afterjournalproperty_sql
+
checksumproperty_sql
+
mergeblockratioproperty_sql
+
datablocksizeproperty_sql
+
blockcompressionproperty_sql
+
isolatedloadingproperty_sql
+
insert_sql
+
intersect_sql
+
intersect_op
+
introducer_sql
+
pseudotype_sql
+
rowformatdelimitedproperty_sql
+
table_sql
+
tablesample_sql
+
pivot_sql
+
tuple_sql
+
update_sql
+
values_sql
+
var_sql
+
into_sql
+
from_sql
+
group_sql
+
having_sql
+
join_sql
+
lambda_sql
+
lateral_sql
+
limit_sql
+
offset_sql
+
lock_sql
+
literal_sql
+
loaddata_sql
+
null_sql
+
boolean_sql
+
order_sql
+
cluster_sql
+
distribute_sql
+
sort_sql
+
ordered_sql
+
matchrecognize_sql
+
query_modifiers
+
select_sql
+
schema_sql
+
star_sql
+
structkwarg_sql
+
parameter_sql
+
sessionparameter_sql
+
placeholder_sql
+
subquery_sql
+
qualify_sql
+
union_sql
+
union_op
+
unnest_sql
+
where_sql
+
window_sql
+
partition_by_sql
+
window_spec_sql
+
withingroup_sql
+
between_sql
+
bracket_sql
+
all_sql
+
any_sql
+
exists_sql
+
case_sql
+
constraint_sql
+
extract_sql
+
trim_sql
+
concat_sql
+
check_sql
+
foreignkey_sql
+
primarykey_sql
+
unique_sql
+
if_sql
+
in_sql
+
in_unnest_op
+
interval_sql
+
return_sql
+
reference_sql
+
anonymous_sql
+
paren_sql
+
neg_sql
+
not_sql
+
alias_sql
+
aliases_sql
+
attimezone_sql
+
add_sql
+
and_sql
+
connector_sql
+
bitwiseand_sql
+
bitwiseleftshift_sql
+
bitwisenot_sql
+
bitwiseor_sql
+
bitwiserightshift_sql
+
bitwisexor_sql
+
cast_sql
+
currentdate_sql
+
collate_sql
+
command_sql
+
commit_sql
+
rollback_sql
+
altercolumn_sql
+
renametable_sql
+
altertable_sql
+
droppartition_sql
+
addconstraint_sql
+
distinct_sql
+
ignorenulls_sql
+
respectnulls_sql
+
intdiv_sql
+
dpipe_sql
+
div_sql
+
distance_sql
+
dot_sql
+
eq_sql
+
escape_sql
+
glob_sql
+
gt_sql
+
gte_sql
+
ilike_sql
+
is_sql
+
like_sql
+
similarto_sql
+
lt_sql
+
lte_sql
+
mod_sql
+
mul_sql
+
neq_sql
+
nullsafeeq_sql
+
nullsafeneq_sql
+
or_sql
+
slice_sql
+
sub_sql
+
trycast_sql
+
use_sql
+
binary
+
function_fallback_sql
+
format_args
+
text_width
+
format_time
+
expressions
+
op_expressions
+
naked_property
+
set_operation
+
tag_sql
+
token_sql
+
userdefinedfunction_sql
+
userdefinedfunctionkwarg_sql
+
joinhint_sql
+
kwarg_sql
+
when_sql
+
merge_sql
+ +
+
+
+
+
+ + \ No newline at end of file diff --git a/docs/sqlglot/dialects/redshift.html b/docs/sqlglot/dialects/redshift.html new file mode 100644 index 0000000..649bb3c --- /dev/null +++ b/docs/sqlglot/dialects/redshift.html @@ -0,0 +1,1175 @@ + + + + + + + sqlglot.dialects.redshift API documentation + + + + + + + + + +
+
+ Edit on GitHub +

+sqlglot.dialects.redshift

+ + + + + + +
  1from __future__ import annotations
+  2
+  3import typing as t
+  4
+  5from sqlglot import exp, transforms
+  6from sqlglot.dialects.dialect import rename_func
+  7from sqlglot.dialects.postgres import Postgres
+  8from sqlglot.tokens import TokenType
+  9
+ 10
+ 11class Redshift(Postgres):
+ 12    time_format = "'YYYY-MM-DD HH:MI:SS'"
+ 13    time_mapping = {
+ 14        **Postgres.time_mapping,  # type: ignore
+ 15        "MON": "%b",
+ 16        "HH": "%H",
+ 17    }
+ 18
+ 19    class Parser(Postgres.Parser):
+ 20        FUNCTIONS = {
+ 21            **Postgres.Parser.FUNCTIONS,  # type: ignore
+ 22            "DECODE": exp.Matches.from_arg_list,
+ 23            "NVL": exp.Coalesce.from_arg_list,
+ 24        }
+ 25
+ 26        def _parse_types(self, check_func: bool = False) -> t.Optional[exp.Expression]:
+ 27            this = super()._parse_types(check_func=check_func)
+ 28
+ 29            if (
+ 30                isinstance(this, exp.DataType)
+ 31                and this.this == exp.DataType.Type.VARCHAR
+ 32                and this.expressions
+ 33                and this.expressions[0] == exp.column("MAX")
+ 34            ):
+ 35                this.set("expressions", [exp.Var(this="MAX")])
+ 36
+ 37            return this
+ 38
+ 39    class Tokenizer(Postgres.Tokenizer):
+ 40        STRING_ESCAPES = ["\\"]
+ 41
+ 42        KEYWORDS = {
+ 43            **Postgres.Tokenizer.KEYWORDS,  # type: ignore
+ 44            "ENCODE": TokenType.ENCODE,
+ 45            "GEOMETRY": TokenType.GEOMETRY,
+ 46            "GEOGRAPHY": TokenType.GEOGRAPHY,
+ 47            "HLLSKETCH": TokenType.HLLSKETCH,
+ 48            "SUPER": TokenType.SUPER,
+ 49            "TIME": TokenType.TIMESTAMP,
+ 50            "TIMETZ": TokenType.TIMESTAMPTZ,
+ 51            "UNLOAD": TokenType.COMMAND,
+ 52            "VARBYTE": TokenType.VARBINARY,
+ 53        }
+ 54
+ 55    class Generator(Postgres.Generator):
+ 56        TYPE_MAPPING = {
+ 57            **Postgres.Generator.TYPE_MAPPING,  # type: ignore
+ 58            exp.DataType.Type.BINARY: "VARBYTE",
+ 59            exp.DataType.Type.VARBINARY: "VARBYTE",
+ 60            exp.DataType.Type.INT: "INTEGER",
+ 61        }
+ 62
+ 63        PROPERTIES_LOCATION = {
+ 64            **Postgres.Generator.PROPERTIES_LOCATION,  # type: ignore
+ 65            exp.LikeProperty: exp.Properties.Location.POST_SCHEMA_WITH,
+ 66        }
+ 67
+ 68        TRANSFORMS = {
+ 69            **Postgres.Generator.TRANSFORMS,  # type: ignore
+ 70            **transforms.ELIMINATE_DISTINCT_ON,  # type: ignore
+ 71            exp.DistKeyProperty: lambda self, e: f"DISTKEY({e.name})",
+ 72            exp.SortKeyProperty: lambda self, e: f"{'COMPOUND ' if e.args['compound'] else ''}SORTKEY({self.format_args(*e.this)})",
+ 73            exp.DistStyleProperty: lambda self, e: self.naked_property(e),
+ 74            exp.Matches: rename_func("DECODE"),
+ 75        }
+ 76
+ 77        def values_sql(self, expression: exp.Values) -> str:
+ 78            """
+ 79            Converts `VALUES...` expression into a series of unions.
+ 80
+ 81            Note: If you have a lot of unions then this will result in a large number of recursive statements to
+ 82            evaluate the expression. You may need to increase `sys.setrecursionlimit` to run and it can also be
+ 83            very slow.
+ 84            """
+ 85            if not isinstance(expression.unnest().parent, exp.From):
+ 86                return super().values_sql(expression)
+ 87            rows = [tuple_exp.expressions for tuple_exp in expression.expressions]
+ 88            selects = []
+ 89            for i, row in enumerate(rows):
+ 90                if i == 0 and expression.alias:
+ 91                    row = [
+ 92                        exp.alias_(value, column_name)
+ 93                        for value, column_name in zip(row, expression.args["alias"].args["columns"])
+ 94                    ]
+ 95                selects.append(exp.Select(expressions=row))
+ 96            subquery_expression = selects[0]
+ 97            if len(selects) > 1:
+ 98                for select in selects[1:]:
+ 99                    subquery_expression = exp.union(subquery_expression, select, distinct=False)
+100            return self.subquery_sql(subquery_expression.subquery(expression.alias))
+101
+102        def with_properties(self, properties: exp.Properties) -> str:
+103            """Redshift doesn't have `WITH` as part of their with_properties so we remove it"""
+104            return self.properties(properties, prefix=" ", suffix="")
+105
+106        def renametable_sql(self, expression: exp.RenameTable) -> str:
+107            """Redshift only supports defining the table name itself (not the db) when renaming tables"""
+108            expression = expression.copy()
+109            target_table = expression.this
+110            for arg in target_table.args:
+111                if arg != "this":
+112                    target_table.set(arg, None)
+113            this = self.sql(expression, "this")
+114            return f"RENAME TO {this}"
+115
+116        def datatype_sql(self, expression: exp.DataType) -> str:
+117            """
+118            Redshift converts the `TEXT` data type to `VARCHAR(255)` by default when people more generally mean
+119            VARCHAR of max length which is `VARCHAR(max)` in Redshift. Therefore if we get a `TEXT` data type
+120            without precision we convert it to `VARCHAR(max)` and if it does have precision then we just convert
+121            `TEXT` to `VARCHAR`.
+122            """
+123            if expression.this == exp.DataType.Type.TEXT:
+124                expression = expression.copy()
+125                expression.set("this", exp.DataType.Type.VARCHAR)
+126                precision = expression.args.get("expressions")
+127                if not precision:
+128                    expression.append("expressions", exp.Var(this="MAX"))
+129            return super().datatype_sql(expression)
+
+ + +
+
+ +
+ + class + Redshift(sqlglot.dialects.postgres.Postgres): + + + +
+ +
 12class Redshift(Postgres):
+ 13    time_format = "'YYYY-MM-DD HH:MI:SS'"
+ 14    time_mapping = {
+ 15        **Postgres.time_mapping,  # type: ignore
+ 16        "MON": "%b",
+ 17        "HH": "%H",
+ 18    }
+ 19
+ 20    class Parser(Postgres.Parser):
+ 21        FUNCTIONS = {
+ 22            **Postgres.Parser.FUNCTIONS,  # type: ignore
+ 23            "DECODE": exp.Matches.from_arg_list,
+ 24            "NVL": exp.Coalesce.from_arg_list,
+ 25        }
+ 26
+ 27        def _parse_types(self, check_func: bool = False) -> t.Optional[exp.Expression]:
+ 28            this = super()._parse_types(check_func=check_func)
+ 29
+ 30            if (
+ 31                isinstance(this, exp.DataType)
+ 32                and this.this == exp.DataType.Type.VARCHAR
+ 33                and this.expressions
+ 34                and this.expressions[0] == exp.column("MAX")
+ 35            ):
+ 36                this.set("expressions", [exp.Var(this="MAX")])
+ 37
+ 38            return this
+ 39
+ 40    class Tokenizer(Postgres.Tokenizer):
+ 41        STRING_ESCAPES = ["\\"]
+ 42
+ 43        KEYWORDS = {
+ 44            **Postgres.Tokenizer.KEYWORDS,  # type: ignore
+ 45            "ENCODE": TokenType.ENCODE,
+ 46            "GEOMETRY": TokenType.GEOMETRY,
+ 47            "GEOGRAPHY": TokenType.GEOGRAPHY,
+ 48            "HLLSKETCH": TokenType.HLLSKETCH,
+ 49            "SUPER": TokenType.SUPER,
+ 50            "TIME": TokenType.TIMESTAMP,
+ 51            "TIMETZ": TokenType.TIMESTAMPTZ,
+ 52            "UNLOAD": TokenType.COMMAND,
+ 53            "VARBYTE": TokenType.VARBINARY,
+ 54        }
+ 55
+ 56    class Generator(Postgres.Generator):
+ 57        TYPE_MAPPING = {
+ 58            **Postgres.Generator.TYPE_MAPPING,  # type: ignore
+ 59            exp.DataType.Type.BINARY: "VARBYTE",
+ 60            exp.DataType.Type.VARBINARY: "VARBYTE",
+ 61            exp.DataType.Type.INT: "INTEGER",
+ 62        }
+ 63
+ 64        PROPERTIES_LOCATION = {
+ 65            **Postgres.Generator.PROPERTIES_LOCATION,  # type: ignore
+ 66            exp.LikeProperty: exp.Properties.Location.POST_SCHEMA_WITH,
+ 67        }
+ 68
+ 69        TRANSFORMS = {
+ 70            **Postgres.Generator.TRANSFORMS,  # type: ignore
+ 71            **transforms.ELIMINATE_DISTINCT_ON,  # type: ignore
+ 72            exp.DistKeyProperty: lambda self, e: f"DISTKEY({e.name})",
+ 73            exp.SortKeyProperty: lambda self, e: f"{'COMPOUND ' if e.args['compound'] else ''}SORTKEY({self.format_args(*e.this)})",
+ 74            exp.DistStyleProperty: lambda self, e: self.naked_property(e),
+ 75            exp.Matches: rename_func("DECODE"),
+ 76        }
+ 77
+ 78        def values_sql(self, expression: exp.Values) -> str:
+ 79            """
+ 80            Converts `VALUES...` expression into a series of unions.
+ 81
+ 82            Note: If you have a lot of unions then this will result in a large number of recursive statements to
+ 83            evaluate the expression. You may need to increase `sys.setrecursionlimit` to run and it can also be
+ 84            very slow.
+ 85            """
+ 86            if not isinstance(expression.unnest().parent, exp.From):
+ 87                return super().values_sql(expression)
+ 88            rows = [tuple_exp.expressions for tuple_exp in expression.expressions]
+ 89            selects = []
+ 90            for i, row in enumerate(rows):
+ 91                if i == 0 and expression.alias:
+ 92                    row = [
+ 93                        exp.alias_(value, column_name)
+ 94                        for value, column_name in zip(row, expression.args["alias"].args["columns"])
+ 95                    ]
+ 96                selects.append(exp.Select(expressions=row))
+ 97            subquery_expression = selects[0]
+ 98            if len(selects) > 1:
+ 99                for select in selects[1:]:
+100                    subquery_expression = exp.union(subquery_expression, select, distinct=False)
+101            return self.subquery_sql(subquery_expression.subquery(expression.alias))
+102
+103        def with_properties(self, properties: exp.Properties) -> str:
+104            """Redshift doesn't have `WITH` as part of their with_properties so we remove it"""
+105            return self.properties(properties, prefix=" ", suffix="")
+106
+107        def renametable_sql(self, expression: exp.RenameTable) -> str:
+108            """Redshift only supports defining the table name itself (not the db) when renaming tables"""
+109            expression = expression.copy()
+110            target_table = expression.this
+111            for arg in target_table.args:
+112                if arg != "this":
+113                    target_table.set(arg, None)
+114            this = self.sql(expression, "this")
+115            return f"RENAME TO {this}"
+116
+117        def datatype_sql(self, expression: exp.DataType) -> str:
+118            """
+119            Redshift converts the `TEXT` data type to `VARCHAR(255)` by default when people more generally mean
+120            VARCHAR of max length which is `VARCHAR(max)` in Redshift. Therefore if we get a `TEXT` data type
+121            without precision we convert it to `VARCHAR(max)` and if it does have precision then we just convert
+122            `TEXT` to `VARCHAR`.
+123            """
+124            if expression.this == exp.DataType.Type.TEXT:
+125                expression = expression.copy()
+126                expression.set("this", exp.DataType.Type.VARCHAR)
+127                precision = expression.args.get("expressions")
+128                if not precision:
+129                    expression.append("expressions", exp.Var(this="MAX"))
+130            return super().datatype_sql(expression)
+
+ + + + +
+
+ + Redshift() + + +
+ + + + +
+ +
+
+ +
+ + class + Redshift.Parser(sqlglot.dialects.postgres.Postgres.Parser): + + + +
+ +
20    class Parser(Postgres.Parser):
+21        FUNCTIONS = {
+22            **Postgres.Parser.FUNCTIONS,  # type: ignore
+23            "DECODE": exp.Matches.from_arg_list,
+24            "NVL": exp.Coalesce.from_arg_list,
+25        }
+26
+27        def _parse_types(self, check_func: bool = False) -> t.Optional[exp.Expression]:
+28            this = super()._parse_types(check_func=check_func)
+29
+30            if (
+31                isinstance(this, exp.DataType)
+32                and this.this == exp.DataType.Type.VARCHAR
+33                and this.expressions
+34                and this.expressions[0] == exp.column("MAX")
+35            ):
+36                this.set("expressions", [exp.Var(this="MAX")])
+37
+38            return this
+
+ + +

Parser consumes a list of tokens produced by the sqlglot.tokens.Tokenizer and produces +a parsed syntax tree.

+ +
Arguments:
+ +
    +
  • error_level: the desired error level. +Default: ErrorLevel.RAISE
  • +
  • error_message_context: determines the amount of context to capture from a +query string when displaying the error message (in number of characters). +Default: 50.
  • +
  • index_offset: Index offset for arrays eg ARRAY[0] vs ARRAY[1] as the head of a list. +Default: 0
  • +
  • alias_post_tablesample: If the table alias comes after tablesample. +Default: False
  • +
  • max_errors: Maximum number of error messages to include in a raised ParseError. +This is only relevant if error_level is ErrorLevel.RAISE. +Default: 3
  • +
  • null_ordering: Indicates the default null ordering method to use if not explicitly set. +Options are "nulls_are_small", "nulls_are_large", "nulls_are_last". +Default: "nulls_are_small"
  • +
+
+ + + +
+
+ +
+ + class + Redshift.Tokenizer(sqlglot.dialects.postgres.Postgres.Tokenizer): + + + +
+ +
40    class Tokenizer(Postgres.Tokenizer):
+41        STRING_ESCAPES = ["\\"]
+42
+43        KEYWORDS = {
+44            **Postgres.Tokenizer.KEYWORDS,  # type: ignore
+45            "ENCODE": TokenType.ENCODE,
+46            "GEOMETRY": TokenType.GEOMETRY,
+47            "GEOGRAPHY": TokenType.GEOGRAPHY,
+48            "HLLSKETCH": TokenType.HLLSKETCH,
+49            "SUPER": TokenType.SUPER,
+50            "TIME": TokenType.TIMESTAMP,
+51            "TIMETZ": TokenType.TIMESTAMPTZ,
+52            "UNLOAD": TokenType.COMMAND,
+53            "VARBYTE": TokenType.VARBINARY,
+54        }
+
+ + + + +
+
Inherited Members
+
+ +
+
+
+
+ +
+ + class + Redshift.Generator(sqlglot.dialects.postgres.Postgres.Generator): + + + +
+ +
 56    class Generator(Postgres.Generator):
+ 57        TYPE_MAPPING = {
+ 58            **Postgres.Generator.TYPE_MAPPING,  # type: ignore
+ 59            exp.DataType.Type.BINARY: "VARBYTE",
+ 60            exp.DataType.Type.VARBINARY: "VARBYTE",
+ 61            exp.DataType.Type.INT: "INTEGER",
+ 62        }
+ 63
+ 64        PROPERTIES_LOCATION = {
+ 65            **Postgres.Generator.PROPERTIES_LOCATION,  # type: ignore
+ 66            exp.LikeProperty: exp.Properties.Location.POST_SCHEMA_WITH,
+ 67        }
+ 68
+ 69        TRANSFORMS = {
+ 70            **Postgres.Generator.TRANSFORMS,  # type: ignore
+ 71            **transforms.ELIMINATE_DISTINCT_ON,  # type: ignore
+ 72            exp.DistKeyProperty: lambda self, e: f"DISTKEY({e.name})",
+ 73            exp.SortKeyProperty: lambda self, e: f"{'COMPOUND ' if e.args['compound'] else ''}SORTKEY({self.format_args(*e.this)})",
+ 74            exp.DistStyleProperty: lambda self, e: self.naked_property(e),
+ 75            exp.Matches: rename_func("DECODE"),
+ 76        }
+ 77
+ 78        def values_sql(self, expression: exp.Values) -> str:
+ 79            """
+ 80            Converts `VALUES...` expression into a series of unions.
+ 81
+ 82            Note: If you have a lot of unions then this will result in a large number of recursive statements to
+ 83            evaluate the expression. You may need to increase `sys.setrecursionlimit` to run and it can also be
+ 84            very slow.
+ 85            """
+ 86            if not isinstance(expression.unnest().parent, exp.From):
+ 87                return super().values_sql(expression)
+ 88            rows = [tuple_exp.expressions for tuple_exp in expression.expressions]
+ 89            selects = []
+ 90            for i, row in enumerate(rows):
+ 91                if i == 0 and expression.alias:
+ 92                    row = [
+ 93                        exp.alias_(value, column_name)
+ 94                        for value, column_name in zip(row, expression.args["alias"].args["columns"])
+ 95                    ]
+ 96                selects.append(exp.Select(expressions=row))
+ 97            subquery_expression = selects[0]
+ 98            if len(selects) > 1:
+ 99                for select in selects[1:]:
+100                    subquery_expression = exp.union(subquery_expression, select, distinct=False)
+101            return self.subquery_sql(subquery_expression.subquery(expression.alias))
+102
+103        def with_properties(self, properties: exp.Properties) -> str:
+104            """Redshift doesn't have `WITH` as part of their with_properties so we remove it"""
+105            return self.properties(properties, prefix=" ", suffix="")
+106
+107        def renametable_sql(self, expression: exp.RenameTable) -> str:
+108            """Redshift only supports defining the table name itself (not the db) when renaming tables"""
+109            expression = expression.copy()
+110            target_table = expression.this
+111            for arg in target_table.args:
+112                if arg != "this":
+113                    target_table.set(arg, None)
+114            this = self.sql(expression, "this")
+115            return f"RENAME TO {this}"
+116
+117        def datatype_sql(self, expression: exp.DataType) -> str:
+118            """
+119            Redshift converts the `TEXT` data type to `VARCHAR(255)` by default when people more generally mean
+120            VARCHAR of max length which is `VARCHAR(max)` in Redshift. Therefore if we get a `TEXT` data type
+121            without precision we convert it to `VARCHAR(max)` and if it does have precision then we just convert
+122            `TEXT` to `VARCHAR`.
+123            """
+124            if expression.this == exp.DataType.Type.TEXT:
+125                expression = expression.copy()
+126                expression.set("this", exp.DataType.Type.VARCHAR)
+127                precision = expression.args.get("expressions")
+128                if not precision:
+129                    expression.append("expressions", exp.Var(this="MAX"))
+130            return super().datatype_sql(expression)
+
+ + +

Generator interprets the given syntax tree and produces a SQL string as an output.

+ +
Arguments:
+ +
    +
  • time_mapping (dict): the dictionary of custom time mappings in which the key +represents a python time format and the output the target time format
  • +
  • time_trie (trie): a trie of the time_mapping keys
  • +
  • pretty (bool): if set to True the returned string will be formatted. Default: False.
  • +
  • quote_start (str): specifies which starting character to use to delimit quotes. Default: '.
  • +
  • quote_end (str): specifies which ending character to use to delimit quotes. Default: '.
  • +
  • identifier_start (str): specifies which starting character to use to delimit identifiers. Default: ".
  • +
  • identifier_end (str): specifies which ending character to use to delimit identifiers. Default: ".
  • +
  • identify (bool): if set to True all identifiers will be delimited by the corresponding +character.
  • +
  • normalize (bool): if set to True all identifiers will lower cased
  • +
  • string_escape (str): specifies a string escape character. Default: '.
  • +
  • identifier_escape (str): specifies an identifier escape character. Default: ".
  • +
  • pad (int): determines padding in a formatted string. Default: 2.
  • +
  • indent (int): determines the size of indentation in a formatted string. Default: 4.
  • +
  • unnest_column_only (bool): if true unnest table aliases are considered only as column aliases
  • +
  • normalize_functions (str): normalize function names, "upper", "lower", or None +Default: "upper"
  • +
  • alias_post_tablesample (bool): if the table alias comes after tablesample +Default: False
  • +
  • unsupported_level (ErrorLevel): determines the generator's behavior when it encounters +unsupported expressions. Default ErrorLevel.WARN.
  • +
  • null_ordering (str): Indicates the default null ordering method to use if not explicitly set. +Options are "nulls_are_small", "nulls_are_large", "nulls_are_last". +Default: "nulls_are_small"
  • +
  • max_unsupported (int): Maximum number of unsupported messages to include in a raised UnsupportedError. +This is only relevant if unsupported_level is ErrorLevel.RAISE. +Default: 3
  • +
  • leading_comma (bool): if the the comma is leading or trailing in select statements +Default: False
  • +
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode. +The default is on the smaller end because the length only represents a segment and not the true +line length. +Default: 80
  • +
  • comments: Whether or not to preserve comments in the output SQL code. +Default: True
  • +
+
+ + +
+ +
+ + def + values_sql(self, expression: sqlglot.expressions.Values) -> str: + + + +
+ +
 78        def values_sql(self, expression: exp.Values) -> str:
+ 79            """
+ 80            Converts `VALUES...` expression into a series of unions.
+ 81
+ 82            Note: If you have a lot of unions then this will result in a large number of recursive statements to
+ 83            evaluate the expression. You may need to increase `sys.setrecursionlimit` to run and it can also be
+ 84            very slow.
+ 85            """
+ 86            if not isinstance(expression.unnest().parent, exp.From):
+ 87                return super().values_sql(expression)
+ 88            rows = [tuple_exp.expressions for tuple_exp in expression.expressions]
+ 89            selects = []
+ 90            for i, row in enumerate(rows):
+ 91                if i == 0 and expression.alias:
+ 92                    row = [
+ 93                        exp.alias_(value, column_name)
+ 94                        for value, column_name in zip(row, expression.args["alias"].args["columns"])
+ 95                    ]
+ 96                selects.append(exp.Select(expressions=row))
+ 97            subquery_expression = selects[0]
+ 98            if len(selects) > 1:
+ 99                for select in selects[1:]:
+100                    subquery_expression = exp.union(subquery_expression, select, distinct=False)
+101            return self.subquery_sql(subquery_expression.subquery(expression.alias))
+
+ + +

Converts VALUES... expression into a series of unions.

+ +

Note: If you have a lot of unions then this will result in a large number of recursive statements to +evaluate the expression. You may need to increase sys.setrecursionlimit to run and it can also be +very slow.

+
+ + +
+
+ +
+ + def + with_properties(self, properties: sqlglot.expressions.Properties) -> str: + + + +
+ +
103        def with_properties(self, properties: exp.Properties) -> str:
+104            """Redshift doesn't have `WITH` as part of their with_properties so we remove it"""
+105            return self.properties(properties, prefix=" ", suffix="")
+
+ + +

Redshift doesn't have WITH as part of their with_properties so we remove it

+
+ + +
+
+ +
+ + def + renametable_sql(self, expression: sqlglot.expressions.RenameTable) -> str: + + + +
+ +
107        def renametable_sql(self, expression: exp.RenameTable) -> str:
+108            """Redshift only supports defining the table name itself (not the db) when renaming tables"""
+109            expression = expression.copy()
+110            target_table = expression.this
+111            for arg in target_table.args:
+112                if arg != "this":
+113                    target_table.set(arg, None)
+114            this = self.sql(expression, "this")
+115            return f"RENAME TO {this}"
+
+ + +

Redshift only supports defining the table name itself (not the db) when renaming tables

+
+ + +
+
+ +
+ + def + datatype_sql(self, expression: sqlglot.expressions.DataType) -> str: + + + +
+ +
117        def datatype_sql(self, expression: exp.DataType) -> str:
+118            """
+119            Redshift converts the `TEXT` data type to `VARCHAR(255)` by default when people more generally mean
+120            VARCHAR of max length which is `VARCHAR(max)` in Redshift. Therefore if we get a `TEXT` data type
+121            without precision we convert it to `VARCHAR(max)` and if it does have precision then we just convert
+122            `TEXT` to `VARCHAR`.
+123            """
+124            if expression.this == exp.DataType.Type.TEXT:
+125                expression = expression.copy()
+126                expression.set("this", exp.DataType.Type.VARCHAR)
+127                precision = expression.args.get("expressions")
+128                if not precision:
+129                    expression.append("expressions", exp.Var(this="MAX"))
+130            return super().datatype_sql(expression)
+
+ + +

Redshift converts the TEXT data type to VARCHAR(255) by default when people more generally mean +VARCHAR of max length which is VARCHAR(max) in Redshift. Therefore if we get a TEXT data type +without precision we convert it to VARCHAR(max) and if it does have precision then we just convert +TEXT to VARCHAR.

+
+ + +
+
+
Inherited Members
+
+
sqlglot.generator.Generator
+
Generator
+
generate
+
unsupported
+
sep
+
seg
+
pad_comment
+
maybe_comment
+
wrap
+
no_identify
+
normalize_func
+
indent
+
sql
+
uncache_sql
+
cache_sql
+
characterset_sql
+
column_sql
+
columndef_sql
+
columnconstraint_sql
+
autoincrementcolumnconstraint_sql
+
checkcolumnconstraint_sql
+
commentcolumnconstraint_sql
+
collatecolumnconstraint_sql
+
encodecolumnconstraint_sql
+
defaultcolumnconstraint_sql
+
generatedasidentitycolumnconstraint_sql
+
notnullcolumnconstraint_sql
+
primarykeycolumnconstraint_sql
+
uniquecolumnconstraint_sql
+
create_sql
+
describe_sql
+
prepend_ctes
+
with_sql
+
cte_sql
+
tablealias_sql
+
bitstring_sql
+
hexstring_sql
+
directory_sql
+
delete_sql
+
drop_sql
+
except_sql
+
except_op
+
fetch_sql
+
filter_sql
+
hint_sql
+
index_sql
+
identifier_sql
+
national_sql
+
partition_sql
+
properties_sql
+
root_properties
+
properties
+
locate_properties
+
property_sql
+
likeproperty_sql
+
fallbackproperty_sql
+
journalproperty_sql
+
freespaceproperty_sql
+
afterjournalproperty_sql
+
checksumproperty_sql
+
mergeblockratioproperty_sql
+
datablocksizeproperty_sql
+
blockcompressionproperty_sql
+
isolatedloadingproperty_sql
+
insert_sql
+
intersect_sql
+
intersect_op
+
introducer_sql
+
pseudotype_sql
+
rowformatdelimitedproperty_sql
+
table_sql
+
tablesample_sql
+
pivot_sql
+
tuple_sql
+
update_sql
+
var_sql
+
into_sql
+
from_sql
+
group_sql
+
having_sql
+
join_sql
+
lambda_sql
+
lateral_sql
+
limit_sql
+
offset_sql
+
lock_sql
+
literal_sql
+
loaddata_sql
+
null_sql
+
boolean_sql
+
order_sql
+
cluster_sql
+
distribute_sql
+
sort_sql
+
ordered_sql
+
matchrecognize_sql
+
query_modifiers
+
select_sql
+
schema_sql
+
star_sql
+
structkwarg_sql
+
parameter_sql
+
sessionparameter_sql
+
placeholder_sql
+
subquery_sql
+
qualify_sql
+
union_sql
+
union_op
+
unnest_sql
+
where_sql
+
window_sql
+
partition_by_sql
+
window_spec_sql
+
withingroup_sql
+
between_sql
+
bracket_sql
+
all_sql
+
any_sql
+
exists_sql
+
case_sql
+
constraint_sql
+
extract_sql
+
trim_sql
+
concat_sql
+
check_sql
+
foreignkey_sql
+
primarykey_sql
+
unique_sql
+
if_sql
+
in_sql
+
in_unnest_op
+
interval_sql
+
return_sql
+
reference_sql
+
anonymous_sql
+
paren_sql
+
neg_sql
+
not_sql
+
alias_sql
+
aliases_sql
+
attimezone_sql
+
add_sql
+
and_sql
+
connector_sql
+
bitwiseand_sql
+
bitwiseleftshift_sql
+
bitwisenot_sql
+
bitwiseor_sql
+
bitwiserightshift_sql
+
bitwisexor_sql
+
cast_sql
+
currentdate_sql
+
collate_sql
+
command_sql
+
transaction_sql
+
commit_sql
+
rollback_sql
+
altercolumn_sql
+
altertable_sql
+
droppartition_sql
+
addconstraint_sql
+
distinct_sql
+
ignorenulls_sql
+
respectnulls_sql
+
intdiv_sql
+
dpipe_sql
+
div_sql
+
distance_sql
+
dot_sql
+
eq_sql
+
escape_sql
+
glob_sql
+
gt_sql
+
gte_sql
+
ilike_sql
+
is_sql
+
like_sql
+
similarto_sql
+
lt_sql
+
lte_sql
+
mod_sql
+
mul_sql
+
neq_sql
+
nullsafeeq_sql
+
nullsafeneq_sql
+
or_sql
+
slice_sql
+
sub_sql
+
trycast_sql
+
use_sql
+
binary
+
function_fallback_sql
+
format_args
+
text_width
+
format_time
+
expressions
+
op_expressions
+
naked_property
+
set_operation
+
tag_sql
+
token_sql
+
userdefinedfunction_sql
+
userdefinedfunctionkwarg_sql
+
joinhint_sql
+
kwarg_sql
+
when_sql
+
merge_sql
+ +
+
+
+
+
+ + \ No newline at end of file diff --git a/docs/sqlglot/dialects/snowflake.html b/docs/sqlglot/dialects/snowflake.html new file mode 100644 index 0000000..c39b31e --- /dev/null +++ b/docs/sqlglot/dialects/snowflake.html @@ -0,0 +1,1528 @@ + + + + + + + sqlglot.dialects.snowflake API documentation + + + + + + + + + +
+
+ Edit on GitHub +

+sqlglot.dialects.snowflake

+ + + + + + +
  1from __future__ import annotations
+  2
+  3from sqlglot import exp, generator, parser, tokens
+  4from sqlglot.dialects.dialect import (
+  5    Dialect,
+  6    datestrtodate_sql,
+  7    format_time_lambda,
+  8    inline_array_sql,
+  9    rename_func,
+ 10    timestrtotime_sql,
+ 11    var_map_sql,
+ 12)
+ 13from sqlglot.expressions import Literal
+ 14from sqlglot.helper import flatten, seq_get
+ 15from sqlglot.tokens import TokenType
+ 16
+ 17
+ 18def _check_int(s):
+ 19    if s[0] in ("-", "+"):
+ 20        return s[1:].isdigit()
+ 21    return s.isdigit()
+ 22
+ 23
+ 24# from https://docs.snowflake.com/en/sql-reference/functions/to_timestamp.html
+ 25def _snowflake_to_timestamp(args):
+ 26    if len(args) == 2:
+ 27        first_arg, second_arg = args
+ 28        if second_arg.is_string:
+ 29            # case: <string_expr> [ , <format> ]
+ 30            return format_time_lambda(exp.StrToTime, "snowflake")(args)
+ 31
+ 32        # case: <numeric_expr> [ , <scale> ]
+ 33        if second_arg.name not in ["0", "3", "9"]:
+ 34            raise ValueError(
+ 35                f"Scale for snowflake numeric timestamp is {second_arg}, but should be 0, 3, or 9"
+ 36            )
+ 37
+ 38        if second_arg.name == "0":
+ 39            timescale = exp.UnixToTime.SECONDS
+ 40        elif second_arg.name == "3":
+ 41            timescale = exp.UnixToTime.MILLIS
+ 42        elif second_arg.name == "9":
+ 43            timescale = exp.UnixToTime.MICROS
+ 44
+ 45        return exp.UnixToTime(this=first_arg, scale=timescale)
+ 46
+ 47    first_arg = seq_get(args, 0)
+ 48    if not isinstance(first_arg, Literal):
+ 49        # case: <variant_expr>
+ 50        return format_time_lambda(exp.StrToTime, "snowflake", default=True)(args)
+ 51
+ 52    if first_arg.is_string:
+ 53        if _check_int(first_arg.this):
+ 54            # case: <integer>
+ 55            return exp.UnixToTime.from_arg_list(args)
+ 56
+ 57        # case: <date_expr>
+ 58        return format_time_lambda(exp.StrToTime, "snowflake", default=True)(args)
+ 59
+ 60    # case: <numeric_expr>
+ 61    return exp.UnixToTime.from_arg_list(args)
+ 62
+ 63
+ 64def _unix_to_time_sql(self, expression):
+ 65    scale = expression.args.get("scale")
+ 66    timestamp = self.sql(expression, "this")
+ 67    if scale in [None, exp.UnixToTime.SECONDS]:
+ 68        return f"TO_TIMESTAMP({timestamp})"
+ 69    if scale == exp.UnixToTime.MILLIS:
+ 70        return f"TO_TIMESTAMP({timestamp}, 3)"
+ 71    if scale == exp.UnixToTime.MICROS:
+ 72        return f"TO_TIMESTAMP({timestamp}, 9)"
+ 73
+ 74    raise ValueError("Improper scale for timestamp")
+ 75
+ 76
+ 77# https://docs.snowflake.com/en/sql-reference/functions/date_part.html
+ 78# https://docs.snowflake.com/en/sql-reference/functions-date-time.html#label-supported-date-time-parts
+ 79def _parse_date_part(self):
+ 80    this = self._parse_var() or self._parse_type()
+ 81    self._match(TokenType.COMMA)
+ 82    expression = self._parse_bitwise()
+ 83
+ 84    name = this.name.upper()
+ 85    if name.startswith("EPOCH"):
+ 86        if name.startswith("EPOCH_MILLISECOND"):
+ 87            scale = 10**3
+ 88        elif name.startswith("EPOCH_MICROSECOND"):
+ 89            scale = 10**6
+ 90        elif name.startswith("EPOCH_NANOSECOND"):
+ 91            scale = 10**9
+ 92        else:
+ 93            scale = None
+ 94
+ 95        ts = self.expression(exp.Cast, this=expression, to=exp.DataType.build("TIMESTAMP"))
+ 96        to_unix = self.expression(exp.TimeToUnix, this=ts)
+ 97
+ 98        if scale:
+ 99            to_unix = exp.Mul(this=to_unix, expression=exp.Literal.number(scale))
+100
+101        return to_unix
+102
+103    return self.expression(exp.Extract, this=this, expression=expression)
+104
+105
+106def _datatype_sql(self, expression):
+107    if expression.this == exp.DataType.Type.ARRAY:
+108        return "ARRAY"
+109    elif expression.this == exp.DataType.Type.MAP:
+110        return "OBJECT"
+111    return self.datatype_sql(expression)
+112
+113
+114class Snowflake(Dialect):
+115    null_ordering = "nulls_are_large"
+116    time_format = "'yyyy-mm-dd hh24:mi:ss'"
+117
+118    time_mapping = {
+119        "YYYY": "%Y",
+120        "yyyy": "%Y",
+121        "YY": "%y",
+122        "yy": "%y",
+123        "MMMM": "%B",
+124        "mmmm": "%B",
+125        "MON": "%b",
+126        "mon": "%b",
+127        "MM": "%m",
+128        "mm": "%m",
+129        "DD": "%d",
+130        "dd": "%d",
+131        "d": "%-d",
+132        "DY": "%w",
+133        "dy": "%w",
+134        "HH24": "%H",
+135        "hh24": "%H",
+136        "HH12": "%I",
+137        "hh12": "%I",
+138        "MI": "%M",
+139        "mi": "%M",
+140        "SS": "%S",
+141        "ss": "%S",
+142        "FF": "%f",
+143        "ff": "%f",
+144        "FF6": "%f",
+145        "ff6": "%f",
+146    }
+147
+148    class Parser(parser.Parser):
+149        FUNCTIONS = {
+150            **parser.Parser.FUNCTIONS,
+151            "ARRAYAGG": exp.ArrayAgg.from_arg_list,
+152            "IFF": exp.If.from_arg_list,
+153            "TO_TIMESTAMP": _snowflake_to_timestamp,
+154            "ARRAY_CONSTRUCT": exp.Array.from_arg_list,
+155            "RLIKE": exp.RegexpLike.from_arg_list,
+156            "DECODE": exp.Matches.from_arg_list,
+157            "OBJECT_CONSTRUCT": parser.parse_var_map,
+158        }
+159
+160        FUNCTION_PARSERS = {
+161            **parser.Parser.FUNCTION_PARSERS,
+162            "DATE_PART": _parse_date_part,
+163        }
+164        FUNCTION_PARSERS.pop("TRIM")
+165
+166        FUNC_TOKENS = {
+167            *parser.Parser.FUNC_TOKENS,
+168            TokenType.RLIKE,
+169            TokenType.TABLE,
+170        }
+171
+172        COLUMN_OPERATORS = {
+173            **parser.Parser.COLUMN_OPERATORS,  # type: ignore
+174            TokenType.COLON: lambda self, this, path: self.expression(
+175                exp.Bracket,
+176                this=this,
+177                expressions=[path],
+178            ),
+179        }
+180
+181    class Tokenizer(tokens.Tokenizer):
+182        QUOTES = ["'", "$$"]
+183        STRING_ESCAPES = ["\\", "'"]
+184
+185        SINGLE_TOKENS = {
+186            **tokens.Tokenizer.SINGLE_TOKENS,
+187            "$": TokenType.PARAMETER,
+188        }
+189
+190        KEYWORDS = {
+191            **tokens.Tokenizer.KEYWORDS,
+192            "EXCLUDE": TokenType.EXCEPT,
+193            "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE,
+194            "PUT": TokenType.COMMAND,
+195            "RENAME": TokenType.REPLACE,
+196            "TIMESTAMP_LTZ": TokenType.TIMESTAMPLTZ,
+197            "TIMESTAMP_NTZ": TokenType.TIMESTAMP,
+198            "TIMESTAMP_TZ": TokenType.TIMESTAMPTZ,
+199            "TIMESTAMPNTZ": TokenType.TIMESTAMP,
+200            "MINUS": TokenType.EXCEPT,
+201            "SAMPLE": TokenType.TABLE_SAMPLE,
+202        }
+203
+204    class Generator(generator.Generator):
+205        CREATE_TRANSIENT = True
+206
+207        TRANSFORMS = {
+208            **generator.Generator.TRANSFORMS,  # type: ignore
+209            exp.Array: inline_array_sql,
+210            exp.ArrayConcat: rename_func("ARRAY_CAT"),
+211            exp.DateAdd: rename_func("DATEADD"),
+212            exp.DateStrToDate: datestrtodate_sql,
+213            exp.DataType: _datatype_sql,
+214            exp.If: rename_func("IFF"),
+215            exp.Map: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
+216            exp.VarMap: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
+217            exp.Parameter: lambda self, e: f"${self.sql(e, 'this')}",
+218            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
+219            exp.Matches: rename_func("DECODE"),
+220            exp.StrPosition: lambda self, e: f"{self.normalize_func('POSITION')}({self.format_args(e.args.get('substr'), e.this, e.args.get('position'))})",
+221            exp.StrToTime: lambda self, e: f"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})",
+222            exp.TimeStrToTime: timestrtotime_sql,
+223            exp.TimeToUnix: lambda self, e: f"EXTRACT(epoch_second FROM {self.sql(e, 'this')})",
+224            exp.Trim: lambda self, e: f"TRIM({self.format_args(e.this, e.expression)})",
+225            exp.UnixToTime: _unix_to_time_sql,
+226            exp.DayOfWeek: rename_func("DAYOFWEEK"),
+227        }
+228
+229        TYPE_MAPPING = {
+230            **generator.Generator.TYPE_MAPPING,  # type: ignore
+231            exp.DataType.Type.TIMESTAMP: "TIMESTAMPNTZ",
+232        }
+233
+234        STAR_MAPPING = {
+235            "except": "EXCLUDE",
+236            "replace": "RENAME",
+237        }
+238
+239        def except_op(self, expression):
+240            if not expression.args.get("distinct", False):
+241                self.unsupported("EXCEPT with All is not supported in Snowflake")
+242            return super().except_op(expression)
+243
+244        def intersect_op(self, expression):
+245            if not expression.args.get("distinct", False):
+246                self.unsupported("INTERSECT with All is not supported in Snowflake")
+247            return super().intersect_op(expression)
+248
+249        def values_sql(self, expression: exp.Values) -> str:
+250            """Due to a bug in Snowflake we want to make sure that all columns in a VALUES table alias are unquoted.
+251
+252            We also want to make sure that after we find matches where we need to unquote a column that we prevent users
+253            from adding quotes to the column by using the `identify` argument when generating the SQL.
+254            """
+255            alias = expression.args.get("alias")
+256            if alias and alias.args.get("columns"):
+257                expression = expression.transform(
+258                    lambda node: exp.Identifier(**{**node.args, "quoted": False})
+259                    if isinstance(node, exp.Identifier)
+260                    and isinstance(node.parent, exp.TableAlias)
+261                    and node.arg_key == "columns"
+262                    else node,
+263                )
+264                return self.no_identify(lambda: super(self.__class__, self).values_sql(expression))
+265            return super().values_sql(expression)
+266
+267        def select_sql(self, expression: exp.Select) -> str:
+268            """Due to a bug in Snowflake we want to make sure that all columns in a VALUES table alias are unquoted and also
+269            that all columns in a SELECT are unquoted. We also want to make sure that after we find matches where we need
+270            to unquote a column that we prevent users from adding quotes to the column by using the `identify` argument when
+271            generating the SQL.
+272
+273            Note: We make an assumption that any columns referenced in a VALUES expression should be unquoted throughout the
+274            expression. This might not be true in a case where the same column name can be sourced from another table that can
+275            properly quote but should be true in most cases.
+276            """
+277            values_expressions = expression.find_all(exp.Values)
+278            values_identifiers = set(
+279                flatten(
+280                    v.args.get("alias", exp.Alias()).args.get("columns", [])
+281                    for v in values_expressions
+282                )
+283            )
+284            if values_identifiers:
+285                expression = expression.transform(
+286                    lambda node: exp.Identifier(**{**node.args, "quoted": False})
+287                    if isinstance(node, exp.Identifier) and node in values_identifiers
+288                    else node,
+289                )
+290                return self.no_identify(lambda: super(self.__class__, self).select_sql(expression))
+291            return super().select_sql(expression)
+292
+293        def describe_sql(self, expression: exp.Describe) -> str:
+294            # Default to table if kind is unknown
+295            kind_value = expression.args.get("kind") or "TABLE"
+296            kind = f" {kind_value}" if kind_value else ""
+297            this = f" {self.sql(expression, 'this')}"
+298            return f"DESCRIBE{kind}{this}"
+299
+300        def generatedasidentitycolumnconstraint_sql(
+301            self, expression: exp.GeneratedAsIdentityColumnConstraint
+302        ) -> str:
+303            start = expression.args.get("start")
+304            start = f" START {start}" if start else ""
+305            increment = expression.args.get("increment")
+306            increment = f" INCREMENT {increment}" if increment else ""
+307            return f"AUTOINCREMENT{start}{increment}"
+
+ + +
+
+ +
+ + class + Snowflake(sqlglot.dialects.dialect.Dialect): + + + +
+ +
115class Snowflake(Dialect):
+116    null_ordering = "nulls_are_large"
+117    time_format = "'yyyy-mm-dd hh24:mi:ss'"
+118
+119    time_mapping = {
+120        "YYYY": "%Y",
+121        "yyyy": "%Y",
+122        "YY": "%y",
+123        "yy": "%y",
+124        "MMMM": "%B",
+125        "mmmm": "%B",
+126        "MON": "%b",
+127        "mon": "%b",
+128        "MM": "%m",
+129        "mm": "%m",
+130        "DD": "%d",
+131        "dd": "%d",
+132        "d": "%-d",
+133        "DY": "%w",
+134        "dy": "%w",
+135        "HH24": "%H",
+136        "hh24": "%H",
+137        "HH12": "%I",
+138        "hh12": "%I",
+139        "MI": "%M",
+140        "mi": "%M",
+141        "SS": "%S",
+142        "ss": "%S",
+143        "FF": "%f",
+144        "ff": "%f",
+145        "FF6": "%f",
+146        "ff6": "%f",
+147    }
+148
+149    class Parser(parser.Parser):
+150        FUNCTIONS = {
+151            **parser.Parser.FUNCTIONS,
+152            "ARRAYAGG": exp.ArrayAgg.from_arg_list,
+153            "IFF": exp.If.from_arg_list,
+154            "TO_TIMESTAMP": _snowflake_to_timestamp,
+155            "ARRAY_CONSTRUCT": exp.Array.from_arg_list,
+156            "RLIKE": exp.RegexpLike.from_arg_list,
+157            "DECODE": exp.Matches.from_arg_list,
+158            "OBJECT_CONSTRUCT": parser.parse_var_map,
+159        }
+160
+161        FUNCTION_PARSERS = {
+162            **parser.Parser.FUNCTION_PARSERS,
+163            "DATE_PART": _parse_date_part,
+164        }
+165        FUNCTION_PARSERS.pop("TRIM")
+166
+167        FUNC_TOKENS = {
+168            *parser.Parser.FUNC_TOKENS,
+169            TokenType.RLIKE,
+170            TokenType.TABLE,
+171        }
+172
+173        COLUMN_OPERATORS = {
+174            **parser.Parser.COLUMN_OPERATORS,  # type: ignore
+175            TokenType.COLON: lambda self, this, path: self.expression(
+176                exp.Bracket,
+177                this=this,
+178                expressions=[path],
+179            ),
+180        }
+181
+182    class Tokenizer(tokens.Tokenizer):
+183        QUOTES = ["'", "$$"]
+184        STRING_ESCAPES = ["\\", "'"]
+185
+186        SINGLE_TOKENS = {
+187            **tokens.Tokenizer.SINGLE_TOKENS,
+188            "$": TokenType.PARAMETER,
+189        }
+190
+191        KEYWORDS = {
+192            **tokens.Tokenizer.KEYWORDS,
+193            "EXCLUDE": TokenType.EXCEPT,
+194            "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE,
+195            "PUT": TokenType.COMMAND,
+196            "RENAME": TokenType.REPLACE,
+197            "TIMESTAMP_LTZ": TokenType.TIMESTAMPLTZ,
+198            "TIMESTAMP_NTZ": TokenType.TIMESTAMP,
+199            "TIMESTAMP_TZ": TokenType.TIMESTAMPTZ,
+200            "TIMESTAMPNTZ": TokenType.TIMESTAMP,
+201            "MINUS": TokenType.EXCEPT,
+202            "SAMPLE": TokenType.TABLE_SAMPLE,
+203        }
+204
+205    class Generator(generator.Generator):
+206        CREATE_TRANSIENT = True
+207
+208        TRANSFORMS = {
+209            **generator.Generator.TRANSFORMS,  # type: ignore
+210            exp.Array: inline_array_sql,
+211            exp.ArrayConcat: rename_func("ARRAY_CAT"),
+212            exp.DateAdd: rename_func("DATEADD"),
+213            exp.DateStrToDate: datestrtodate_sql,
+214            exp.DataType: _datatype_sql,
+215            exp.If: rename_func("IFF"),
+216            exp.Map: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
+217            exp.VarMap: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
+218            exp.Parameter: lambda self, e: f"${self.sql(e, 'this')}",
+219            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
+220            exp.Matches: rename_func("DECODE"),
+221            exp.StrPosition: lambda self, e: f"{self.normalize_func('POSITION')}({self.format_args(e.args.get('substr'), e.this, e.args.get('position'))})",
+222            exp.StrToTime: lambda self, e: f"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})",
+223            exp.TimeStrToTime: timestrtotime_sql,
+224            exp.TimeToUnix: lambda self, e: f"EXTRACT(epoch_second FROM {self.sql(e, 'this')})",
+225            exp.Trim: lambda self, e: f"TRIM({self.format_args(e.this, e.expression)})",
+226            exp.UnixToTime: _unix_to_time_sql,
+227            exp.DayOfWeek: rename_func("DAYOFWEEK"),
+228        }
+229
+230        TYPE_MAPPING = {
+231            **generator.Generator.TYPE_MAPPING,  # type: ignore
+232            exp.DataType.Type.TIMESTAMP: "TIMESTAMPNTZ",
+233        }
+234
+235        STAR_MAPPING = {
+236            "except": "EXCLUDE",
+237            "replace": "RENAME",
+238        }
+239
+240        def except_op(self, expression):
+241            if not expression.args.get("distinct", False):
+242                self.unsupported("EXCEPT with All is not supported in Snowflake")
+243            return super().except_op(expression)
+244
+245        def intersect_op(self, expression):
+246            if not expression.args.get("distinct", False):
+247                self.unsupported("INTERSECT with All is not supported in Snowflake")
+248            return super().intersect_op(expression)
+249
+250        def values_sql(self, expression: exp.Values) -> str:
+251            """Due to a bug in Snowflake we want to make sure that all columns in a VALUES table alias are unquoted.
+252
+253            We also want to make sure that after we find matches where we need to unquote a column that we prevent users
+254            from adding quotes to the column by using the `identify` argument when generating the SQL.
+255            """
+256            alias = expression.args.get("alias")
+257            if alias and alias.args.get("columns"):
+258                expression = expression.transform(
+259                    lambda node: exp.Identifier(**{**node.args, "quoted": False})
+260                    if isinstance(node, exp.Identifier)
+261                    and isinstance(node.parent, exp.TableAlias)
+262                    and node.arg_key == "columns"
+263                    else node,
+264                )
+265                return self.no_identify(lambda: super(self.__class__, self).values_sql(expression))
+266            return super().values_sql(expression)
+267
+268        def select_sql(self, expression: exp.Select) -> str:
+269            """Due to a bug in Snowflake we want to make sure that all columns in a VALUES table alias are unquoted and also
+270            that all columns in a SELECT are unquoted. We also want to make sure that after we find matches where we need
+271            to unquote a column that we prevent users from adding quotes to the column by using the `identify` argument when
+272            generating the SQL.
+273
+274            Note: We make an assumption that any columns referenced in a VALUES expression should be unquoted throughout the
+275            expression. This might not be true in a case where the same column name can be sourced from another table that can
+276            properly quote but should be true in most cases.
+277            """
+278            values_expressions = expression.find_all(exp.Values)
+279            values_identifiers = set(
+280                flatten(
+281                    v.args.get("alias", exp.Alias()).args.get("columns", [])
+282                    for v in values_expressions
+283                )
+284            )
+285            if values_identifiers:
+286                expression = expression.transform(
+287                    lambda node: exp.Identifier(**{**node.args, "quoted": False})
+288                    if isinstance(node, exp.Identifier) and node in values_identifiers
+289                    else node,
+290                )
+291                return self.no_identify(lambda: super(self.__class__, self).select_sql(expression))
+292            return super().select_sql(expression)
+293
+294        def describe_sql(self, expression: exp.Describe) -> str:
+295            # Default to table if kind is unknown
+296            kind_value = expression.args.get("kind") or "TABLE"
+297            kind = f" {kind_value}" if kind_value else ""
+298            this = f" {self.sql(expression, 'this')}"
+299            return f"DESCRIBE{kind}{this}"
+300
+301        def generatedasidentitycolumnconstraint_sql(
+302            self, expression: exp.GeneratedAsIdentityColumnConstraint
+303        ) -> str:
+304            start = expression.args.get("start")
+305            start = f" START {start}" if start else ""
+306            increment = expression.args.get("increment")
+307            increment = f" INCREMENT {increment}" if increment else ""
+308            return f"AUTOINCREMENT{start}{increment}"
+
+ + + + +
+
+ + Snowflake() + + +
+ + + + +
+ +
+
+ +
+ + class + Snowflake.Parser(sqlglot.parser.Parser): + + + +
+ +
149    class Parser(parser.Parser):
+150        FUNCTIONS = {
+151            **parser.Parser.FUNCTIONS,
+152            "ARRAYAGG": exp.ArrayAgg.from_arg_list,
+153            "IFF": exp.If.from_arg_list,
+154            "TO_TIMESTAMP": _snowflake_to_timestamp,
+155            "ARRAY_CONSTRUCT": exp.Array.from_arg_list,
+156            "RLIKE": exp.RegexpLike.from_arg_list,
+157            "DECODE": exp.Matches.from_arg_list,
+158            "OBJECT_CONSTRUCT": parser.parse_var_map,
+159        }
+160
+161        FUNCTION_PARSERS = {
+162            **parser.Parser.FUNCTION_PARSERS,
+163            "DATE_PART": _parse_date_part,
+164        }
+165        FUNCTION_PARSERS.pop("TRIM")
+166
+167        FUNC_TOKENS = {
+168            *parser.Parser.FUNC_TOKENS,
+169            TokenType.RLIKE,
+170            TokenType.TABLE,
+171        }
+172
+173        COLUMN_OPERATORS = {
+174            **parser.Parser.COLUMN_OPERATORS,  # type: ignore
+175            TokenType.COLON: lambda self, this, path: self.expression(
+176                exp.Bracket,
+177                this=this,
+178                expressions=[path],
+179            ),
+180        }
+
+ + +

Parser consumes a list of tokens produced by the sqlglot.tokens.Tokenizer and produces +a parsed syntax tree.

+ +
Arguments:
+ +
    +
  • error_level: the desired error level. +Default: ErrorLevel.RAISE
  • +
  • error_message_context: determines the amount of context to capture from a +query string when displaying the error message (in number of characters). +Default: 50.
  • +
  • index_offset: Index offset for arrays eg ARRAY[0] vs ARRAY[1] as the head of a list. +Default: 0
  • +
  • alias_post_tablesample: If the table alias comes after tablesample. +Default: False
  • +
  • max_errors: Maximum number of error messages to include in a raised ParseError. +This is only relevant if error_level is ErrorLevel.RAISE. +Default: 3
  • +
  • null_ordering: Indicates the default null ordering method to use if not explicitly set. +Options are "nulls_are_small", "nulls_are_large", "nulls_are_last". +Default: "nulls_are_small"
  • +
+
+ + + +
+
+ +
+ + class + Snowflake.Tokenizer(sqlglot.tokens.Tokenizer): + + + +
+ +
182    class Tokenizer(tokens.Tokenizer):
+183        QUOTES = ["'", "$$"]
+184        STRING_ESCAPES = ["\\", "'"]
+185
+186        SINGLE_TOKENS = {
+187            **tokens.Tokenizer.SINGLE_TOKENS,
+188            "$": TokenType.PARAMETER,
+189        }
+190
+191        KEYWORDS = {
+192            **tokens.Tokenizer.KEYWORDS,
+193            "EXCLUDE": TokenType.EXCEPT,
+194            "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE,
+195            "PUT": TokenType.COMMAND,
+196            "RENAME": TokenType.REPLACE,
+197            "TIMESTAMP_LTZ": TokenType.TIMESTAMPLTZ,
+198            "TIMESTAMP_NTZ": TokenType.TIMESTAMP,
+199            "TIMESTAMP_TZ": TokenType.TIMESTAMPTZ,
+200            "TIMESTAMPNTZ": TokenType.TIMESTAMP,
+201            "MINUS": TokenType.EXCEPT,
+202            "SAMPLE": TokenType.TABLE_SAMPLE,
+203        }
+
+ + + + +
+
Inherited Members
+
+ +
+
+
+
+ +
+ + class + Snowflake.Generator(sqlglot.generator.Generator): + + + +
+ +
205    class Generator(generator.Generator):
+206        CREATE_TRANSIENT = True
+207
+208        TRANSFORMS = {
+209            **generator.Generator.TRANSFORMS,  # type: ignore
+210            exp.Array: inline_array_sql,
+211            exp.ArrayConcat: rename_func("ARRAY_CAT"),
+212            exp.DateAdd: rename_func("DATEADD"),
+213            exp.DateStrToDate: datestrtodate_sql,
+214            exp.DataType: _datatype_sql,
+215            exp.If: rename_func("IFF"),
+216            exp.Map: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
+217            exp.VarMap: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
+218            exp.Parameter: lambda self, e: f"${self.sql(e, 'this')}",
+219            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
+220            exp.Matches: rename_func("DECODE"),
+221            exp.StrPosition: lambda self, e: f"{self.normalize_func('POSITION')}({self.format_args(e.args.get('substr'), e.this, e.args.get('position'))})",
+222            exp.StrToTime: lambda self, e: f"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})",
+223            exp.TimeStrToTime: timestrtotime_sql,
+224            exp.TimeToUnix: lambda self, e: f"EXTRACT(epoch_second FROM {self.sql(e, 'this')})",
+225            exp.Trim: lambda self, e: f"TRIM({self.format_args(e.this, e.expression)})",
+226            exp.UnixToTime: _unix_to_time_sql,
+227            exp.DayOfWeek: rename_func("DAYOFWEEK"),
+228        }
+229
+230        TYPE_MAPPING = {
+231            **generator.Generator.TYPE_MAPPING,  # type: ignore
+232            exp.DataType.Type.TIMESTAMP: "TIMESTAMPNTZ",
+233        }
+234
+235        STAR_MAPPING = {
+236            "except": "EXCLUDE",
+237            "replace": "RENAME",
+238        }
+239
+240        def except_op(self, expression):
+241            if not expression.args.get("distinct", False):
+242                self.unsupported("EXCEPT with All is not supported in Snowflake")
+243            return super().except_op(expression)
+244
+245        def intersect_op(self, expression):
+246            if not expression.args.get("distinct", False):
+247                self.unsupported("INTERSECT with All is not supported in Snowflake")
+248            return super().intersect_op(expression)
+249
+250        def values_sql(self, expression: exp.Values) -> str:
+251            """Due to a bug in Snowflake we want to make sure that all columns in a VALUES table alias are unquoted.
+252
+253            We also want to make sure that after we find matches where we need to unquote a column that we prevent users
+254            from adding quotes to the column by using the `identify` argument when generating the SQL.
+255            """
+256            alias = expression.args.get("alias")
+257            if alias and alias.args.get("columns"):
+258                expression = expression.transform(
+259                    lambda node: exp.Identifier(**{**node.args, "quoted": False})
+260                    if isinstance(node, exp.Identifier)
+261                    and isinstance(node.parent, exp.TableAlias)
+262                    and node.arg_key == "columns"
+263                    else node,
+264                )
+265                return self.no_identify(lambda: super(self.__class__, self).values_sql(expression))
+266            return super().values_sql(expression)
+267
+268        def select_sql(self, expression: exp.Select) -> str:
+269            """Due to a bug in Snowflake we want to make sure that all columns in a VALUES table alias are unquoted and also
+270            that all columns in a SELECT are unquoted. We also want to make sure that after we find matches where we need
+271            to unquote a column that we prevent users from adding quotes to the column by using the `identify` argument when
+272            generating the SQL.
+273
+274            Note: We make an assumption that any columns referenced in a VALUES expression should be unquoted throughout the
+275            expression. This might not be true in a case where the same column name can be sourced from another table that can
+276            properly quote but should be true in most cases.
+277            """
+278            values_expressions = expression.find_all(exp.Values)
+279            values_identifiers = set(
+280                flatten(
+281                    v.args.get("alias", exp.Alias()).args.get("columns", [])
+282                    for v in values_expressions
+283                )
+284            )
+285            if values_identifiers:
+286                expression = expression.transform(
+287                    lambda node: exp.Identifier(**{**node.args, "quoted": False})
+288                    if isinstance(node, exp.Identifier) and node in values_identifiers
+289                    else node,
+290                )
+291                return self.no_identify(lambda: super(self.__class__, self).select_sql(expression))
+292            return super().select_sql(expression)
+293
+294        def describe_sql(self, expression: exp.Describe) -> str:
+295            # Default to table if kind is unknown
+296            kind_value = expression.args.get("kind") or "TABLE"
+297            kind = f" {kind_value}" if kind_value else ""
+298            this = f" {self.sql(expression, 'this')}"
+299            return f"DESCRIBE{kind}{this}"
+300
+301        def generatedasidentitycolumnconstraint_sql(
+302            self, expression: exp.GeneratedAsIdentityColumnConstraint
+303        ) -> str:
+304            start = expression.args.get("start")
+305            start = f" START {start}" if start else ""
+306            increment = expression.args.get("increment")
+307            increment = f" INCREMENT {increment}" if increment else ""
+308            return f"AUTOINCREMENT{start}{increment}"
+
+ + +

Generator interprets the given syntax tree and produces a SQL string as an output.

+ +
Arguments:
+ +
    +
  • time_mapping (dict): the dictionary of custom time mappings in which the key +represents a python time format and the output the target time format
  • +
  • time_trie (trie): a trie of the time_mapping keys
  • +
  • pretty (bool): if set to True the returned string will be formatted. Default: False.
  • +
  • quote_start (str): specifies which starting character to use to delimit quotes. Default: '.
  • +
  • quote_end (str): specifies which ending character to use to delimit quotes. Default: '.
  • +
  • identifier_start (str): specifies which starting character to use to delimit identifiers. Default: ".
  • +
  • identifier_end (str): specifies which ending character to use to delimit identifiers. Default: ".
  • +
  • identify (bool): if set to True all identifiers will be delimited by the corresponding +character.
  • +
  • normalize (bool): if set to True all identifiers will lower cased
  • +
  • string_escape (str): specifies a string escape character. Default: '.
  • +
  • identifier_escape (str): specifies an identifier escape character. Default: ".
  • +
  • pad (int): determines padding in a formatted string. Default: 2.
  • +
  • indent (int): determines the size of indentation in a formatted string. Default: 4.
  • +
  • unnest_column_only (bool): if true unnest table aliases are considered only as column aliases
  • +
  • normalize_functions (str): normalize function names, "upper", "lower", or None +Default: "upper"
  • +
  • alias_post_tablesample (bool): if the table alias comes after tablesample +Default: False
  • +
  • unsupported_level (ErrorLevel): determines the generator's behavior when it encounters +unsupported expressions. Default ErrorLevel.WARN.
  • +
  • null_ordering (str): Indicates the default null ordering method to use if not explicitly set. +Options are "nulls_are_small", "nulls_are_large", "nulls_are_last". +Default: "nulls_are_small"
  • +
  • max_unsupported (int): Maximum number of unsupported messages to include in a raised UnsupportedError. +This is only relevant if unsupported_level is ErrorLevel.RAISE. +Default: 3
  • +
  • leading_comma (bool): if the the comma is leading or trailing in select statements +Default: False
  • +
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode. +The default is on the smaller end because the length only represents a segment and not the true +line length. +Default: 80
  • +
  • comments: Whether or not to preserve comments in the output SQL code. +Default: True
  • +
+
+ + +
+ +
+ + def + except_op(self, expression): + + + +
+ +
240        def except_op(self, expression):
+241            if not expression.args.get("distinct", False):
+242                self.unsupported("EXCEPT with All is not supported in Snowflake")
+243            return super().except_op(expression)
+
+ + + + +
+
+ +
+ + def + intersect_op(self, expression): + + + +
+ +
245        def intersect_op(self, expression):
+246            if not expression.args.get("distinct", False):
+247                self.unsupported("INTERSECT with All is not supported in Snowflake")
+248            return super().intersect_op(expression)
+
+ + + + +
+
+ +
+ + def + values_sql(self, expression: sqlglot.expressions.Values) -> str: + + + +
+ +
250        def values_sql(self, expression: exp.Values) -> str:
+251            """Due to a bug in Snowflake we want to make sure that all columns in a VALUES table alias are unquoted.
+252
+253            We also want to make sure that after we find matches where we need to unquote a column that we prevent users
+254            from adding quotes to the column by using the `identify` argument when generating the SQL.
+255            """
+256            alias = expression.args.get("alias")
+257            if alias and alias.args.get("columns"):
+258                expression = expression.transform(
+259                    lambda node: exp.Identifier(**{**node.args, "quoted": False})
+260                    if isinstance(node, exp.Identifier)
+261                    and isinstance(node.parent, exp.TableAlias)
+262                    and node.arg_key == "columns"
+263                    else node,
+264                )
+265                return self.no_identify(lambda: super(self.__class__, self).values_sql(expression))
+266            return super().values_sql(expression)
+
+ + +

Due to a bug in Snowflake we want to make sure that all columns in a VALUES table alias are unquoted.

+ +

We also want to make sure that after we find matches where we need to unquote a column that we prevent users +from adding quotes to the column by using the identify argument when generating the SQL.

+
+ + +
+
+ +
+ + def + select_sql(self, expression: sqlglot.expressions.Select) -> str: + + + +
+ +
268        def select_sql(self, expression: exp.Select) -> str:
+269            """Due to a bug in Snowflake we want to make sure that all columns in a VALUES table alias are unquoted and also
+270            that all columns in a SELECT are unquoted. We also want to make sure that after we find matches where we need
+271            to unquote a column that we prevent users from adding quotes to the column by using the `identify` argument when
+272            generating the SQL.
+273
+274            Note: We make an assumption that any columns referenced in a VALUES expression should be unquoted throughout the
+275            expression. This might not be true in a case where the same column name can be sourced from another table that can
+276            properly quote but should be true in most cases.
+277            """
+278            values_expressions = expression.find_all(exp.Values)
+279            values_identifiers = set(
+280                flatten(
+281                    v.args.get("alias", exp.Alias()).args.get("columns", [])
+282                    for v in values_expressions
+283                )
+284            )
+285            if values_identifiers:
+286                expression = expression.transform(
+287                    lambda node: exp.Identifier(**{**node.args, "quoted": False})
+288                    if isinstance(node, exp.Identifier) and node in values_identifiers
+289                    else node,
+290                )
+291                return self.no_identify(lambda: super(self.__class__, self).select_sql(expression))
+292            return super().select_sql(expression)
+
+ + +

Due to a bug in Snowflake we want to make sure that all columns in a VALUES table alias are unquoted and also +that all columns in a SELECT are unquoted. We also want to make sure that after we find matches where we need +to unquote a column that we prevent users from adding quotes to the column by using the identify argument when +generating the SQL.

+ +

Note: We make an assumption that any columns referenced in a VALUES expression should be unquoted throughout the +expression. This might not be true in a case where the same column name can be sourced from another table that can +properly quote but should be true in most cases.

+
+ + +
+
+ +
+ + def + describe_sql(self, expression: sqlglot.expressions.Describe) -> str: + + + +
+ +
294        def describe_sql(self, expression: exp.Describe) -> str:
+295            # Default to table if kind is unknown
+296            kind_value = expression.args.get("kind") or "TABLE"
+297            kind = f" {kind_value}" if kind_value else ""
+298            this = f" {self.sql(expression, 'this')}"
+299            return f"DESCRIBE{kind}{this}"
+
+ + + + +
+
+ +
+ + def + generatedasidentitycolumnconstraint_sql( self, expression: sqlglot.expressions.GeneratedAsIdentityColumnConstraint) -> str: + + + +
+ +
301        def generatedasidentitycolumnconstraint_sql(
+302            self, expression: exp.GeneratedAsIdentityColumnConstraint
+303        ) -> str:
+304            start = expression.args.get("start")
+305            start = f" START {start}" if start else ""
+306            increment = expression.args.get("increment")
+307            increment = f" INCREMENT {increment}" if increment else ""
+308            return f"AUTOINCREMENT{start}{increment}"
+
+ + + + +
+
+
Inherited Members
+
+
sqlglot.generator.Generator
+
Generator
+
generate
+
unsupported
+
sep
+
seg
+
pad_comment
+
maybe_comment
+
wrap
+
no_identify
+
normalize_func
+
indent
+
sql
+
uncache_sql
+
cache_sql
+
characterset_sql
+
column_sql
+
columndef_sql
+
columnconstraint_sql
+
autoincrementcolumnconstraint_sql
+
checkcolumnconstraint_sql
+
commentcolumnconstraint_sql
+
collatecolumnconstraint_sql
+
encodecolumnconstraint_sql
+
defaultcolumnconstraint_sql
+
notnullcolumnconstraint_sql
+
primarykeycolumnconstraint_sql
+
uniquecolumnconstraint_sql
+
create_sql
+
prepend_ctes
+
with_sql
+
cte_sql
+
tablealias_sql
+
bitstring_sql
+
hexstring_sql
+
datatype_sql
+
directory_sql
+
delete_sql
+
drop_sql
+
except_sql
+
fetch_sql
+
filter_sql
+
hint_sql
+
index_sql
+
identifier_sql
+
national_sql
+
partition_sql
+
properties_sql
+
root_properties
+
properties
+
with_properties
+
locate_properties
+
property_sql
+
likeproperty_sql
+
fallbackproperty_sql
+
journalproperty_sql
+
freespaceproperty_sql
+
afterjournalproperty_sql
+
checksumproperty_sql
+
mergeblockratioproperty_sql
+
datablocksizeproperty_sql
+
blockcompressionproperty_sql
+
isolatedloadingproperty_sql
+
insert_sql
+
intersect_sql
+
introducer_sql
+
pseudotype_sql
+
rowformatdelimitedproperty_sql
+
table_sql
+
tablesample_sql
+
pivot_sql
+
tuple_sql
+
update_sql
+
var_sql
+
into_sql
+
from_sql
+
group_sql
+
having_sql
+
join_sql
+
lambda_sql
+
lateral_sql
+
limit_sql
+
offset_sql
+
lock_sql
+
literal_sql
+
loaddata_sql
+
null_sql
+
boolean_sql
+
order_sql
+
cluster_sql
+
distribute_sql
+
sort_sql
+
ordered_sql
+
matchrecognize_sql
+
query_modifiers
+
schema_sql
+
star_sql
+
structkwarg_sql
+
parameter_sql
+
sessionparameter_sql
+
placeholder_sql
+
subquery_sql
+
qualify_sql
+
union_sql
+
union_op
+
unnest_sql
+
where_sql
+
window_sql
+
partition_by_sql
+
window_spec_sql
+
withingroup_sql
+
between_sql
+
bracket_sql
+
all_sql
+
any_sql
+
exists_sql
+
case_sql
+
constraint_sql
+
extract_sql
+
trim_sql
+
concat_sql
+
check_sql
+
foreignkey_sql
+
primarykey_sql
+
unique_sql
+
if_sql
+
in_sql
+
in_unnest_op
+
interval_sql
+
return_sql
+
reference_sql
+
anonymous_sql
+
paren_sql
+
neg_sql
+
not_sql
+
alias_sql
+
aliases_sql
+
attimezone_sql
+
add_sql
+
and_sql
+
connector_sql
+
bitwiseand_sql
+
bitwiseleftshift_sql
+
bitwisenot_sql
+
bitwiseor_sql
+
bitwiserightshift_sql
+
bitwisexor_sql
+
cast_sql
+
currentdate_sql
+
collate_sql
+
command_sql
+
transaction_sql
+
commit_sql
+
rollback_sql
+
altercolumn_sql
+
renametable_sql
+
altertable_sql
+
droppartition_sql
+
addconstraint_sql
+
distinct_sql
+
ignorenulls_sql
+
respectnulls_sql
+
intdiv_sql
+
dpipe_sql
+
div_sql
+
distance_sql
+
dot_sql
+
eq_sql
+
escape_sql
+
glob_sql
+
gt_sql
+
gte_sql
+
ilike_sql
+
is_sql
+
like_sql
+
similarto_sql
+
lt_sql
+
lte_sql
+
mod_sql
+
mul_sql
+
neq_sql
+
nullsafeeq_sql
+
nullsafeneq_sql
+
or_sql
+
slice_sql
+
sub_sql
+
trycast_sql
+
use_sql
+
binary
+
function_fallback_sql
+
format_args
+
text_width
+
format_time
+
expressions
+
op_expressions
+
naked_property
+
set_operation
+
tag_sql
+
token_sql
+
userdefinedfunction_sql
+
userdefinedfunctionkwarg_sql
+
joinhint_sql
+
kwarg_sql
+
when_sql
+
merge_sql
+ +
+
+
+
+
+ + \ No newline at end of file diff --git a/docs/sqlglot/dialects/spark.html b/docs/sqlglot/dialects/spark.html new file mode 100644 index 0000000..75d5ac4 --- /dev/null +++ b/docs/sqlglot/dialects/spark.html @@ -0,0 +1,1136 @@ + + + + + + + sqlglot.dialects.spark API documentation + + + + + + + + + +
+
+ Edit on GitHub +

+sqlglot.dialects.spark

+ + + + + + +
  1from __future__ import annotations
+  2
+  3from sqlglot import exp, parser
+  4from sqlglot.dialects.dialect import create_with_partitions_sql, rename_func, trim_sql
+  5from sqlglot.dialects.hive import Hive
+  6from sqlglot.helper import seq_get
+  7
+  8
+  9def _create_sql(self, e):
+ 10    kind = e.args.get("kind")
+ 11    temporary = e.args.get("temporary")
+ 12
+ 13    if kind.upper() == "TABLE" and temporary is True:
+ 14        return f"CREATE TEMPORARY VIEW {self.sql(e, 'this')} AS {self.sql(e, 'expression')}"
+ 15    return create_with_partitions_sql(self, e)
+ 16
+ 17
+ 18def _map_sql(self, expression):
+ 19    keys = self.sql(expression.args["keys"])
+ 20    values = self.sql(expression.args["values"])
+ 21    return f"MAP_FROM_ARRAYS({keys}, {values})"
+ 22
+ 23
+ 24def _str_to_date(self, expression):
+ 25    this = self.sql(expression, "this")
+ 26    time_format = self.format_time(expression)
+ 27    if time_format == Hive.date_format:
+ 28        return f"TO_DATE({this})"
+ 29    return f"TO_DATE({this}, {time_format})"
+ 30
+ 31
+ 32def _unix_to_time(self, expression):
+ 33    scale = expression.args.get("scale")
+ 34    timestamp = self.sql(expression, "this")
+ 35    if scale is None:
+ 36        return f"FROM_UNIXTIME({timestamp})"
+ 37    if scale == exp.UnixToTime.SECONDS:
+ 38        return f"TIMESTAMP_SECONDS({timestamp})"
+ 39    if scale == exp.UnixToTime.MILLIS:
+ 40        return f"TIMESTAMP_MILLIS({timestamp})"
+ 41    if scale == exp.UnixToTime.MICROS:
+ 42        return f"TIMESTAMP_MICROS({timestamp})"
+ 43
+ 44    raise ValueError("Improper scale for timestamp")
+ 45
+ 46
+ 47class Spark(Hive):
+ 48    class Parser(Hive.Parser):
+ 49        FUNCTIONS = {
+ 50            **Hive.Parser.FUNCTIONS,  # type: ignore
+ 51            "MAP_FROM_ARRAYS": exp.Map.from_arg_list,
+ 52            "TO_UNIX_TIMESTAMP": exp.StrToUnix.from_arg_list,
+ 53            "LEFT": lambda args: exp.Substring(
+ 54                this=seq_get(args, 0),
+ 55                start=exp.Literal.number(1),
+ 56                length=seq_get(args, 1),
+ 57            ),
+ 58            "SHIFTLEFT": lambda args: exp.BitwiseLeftShift(
+ 59                this=seq_get(args, 0),
+ 60                expression=seq_get(args, 1),
+ 61            ),
+ 62            "SHIFTRIGHT": lambda args: exp.BitwiseRightShift(
+ 63                this=seq_get(args, 0),
+ 64                expression=seq_get(args, 1),
+ 65            ),
+ 66            "RIGHT": lambda args: exp.Substring(
+ 67                this=seq_get(args, 0),
+ 68                start=exp.Sub(
+ 69                    this=exp.Length(this=seq_get(args, 0)),
+ 70                    expression=exp.Add(this=seq_get(args, 1), expression=exp.Literal.number(1)),
+ 71                ),
+ 72                length=seq_get(args, 1),
+ 73            ),
+ 74            "APPROX_PERCENTILE": exp.ApproxQuantile.from_arg_list,
+ 75            "IIF": exp.If.from_arg_list,
+ 76            "AGGREGATE": exp.Reduce.from_arg_list,
+ 77            "DAYOFWEEK": lambda args: exp.DayOfWeek(
+ 78                this=exp.TsOrDsToDate(this=seq_get(args, 0)),
+ 79            ),
+ 80            "DAYOFMONTH": lambda args: exp.DayOfMonth(
+ 81                this=exp.TsOrDsToDate(this=seq_get(args, 0)),
+ 82            ),
+ 83            "DAYOFYEAR": lambda args: exp.DayOfYear(
+ 84                this=exp.TsOrDsToDate(this=seq_get(args, 0)),
+ 85            ),
+ 86            "WEEKOFYEAR": lambda args: exp.WeekOfYear(
+ 87                this=exp.TsOrDsToDate(this=seq_get(args, 0)),
+ 88            ),
+ 89        }
+ 90
+ 91        FUNCTION_PARSERS = {
+ 92            **parser.Parser.FUNCTION_PARSERS,  # type: ignore
+ 93            "BROADCAST": lambda self: self._parse_join_hint("BROADCAST"),
+ 94            "BROADCASTJOIN": lambda self: self._parse_join_hint("BROADCASTJOIN"),
+ 95            "MAPJOIN": lambda self: self._parse_join_hint("MAPJOIN"),
+ 96            "MERGE": lambda self: self._parse_join_hint("MERGE"),
+ 97            "SHUFFLEMERGE": lambda self: self._parse_join_hint("SHUFFLEMERGE"),
+ 98            "MERGEJOIN": lambda self: self._parse_join_hint("MERGEJOIN"),
+ 99            "SHUFFLE_HASH": lambda self: self._parse_join_hint("SHUFFLE_HASH"),
+100            "SHUFFLE_REPLICATE_NL": lambda self: self._parse_join_hint("SHUFFLE_REPLICATE_NL"),
+101        }
+102
+103        def _parse_add_column(self):
+104            return self._match_text_seq("ADD", "COLUMNS") and self._parse_schema()
+105
+106        def _parse_drop_column(self):
+107            return self._match_text_seq("DROP", "COLUMNS") and self.expression(
+108                exp.Drop,
+109                this=self._parse_schema(),
+110                kind="COLUMNS",
+111            )
+112
+113    class Generator(Hive.Generator):
+114        TYPE_MAPPING = {
+115            **Hive.Generator.TYPE_MAPPING,  # type: ignore
+116            exp.DataType.Type.TINYINT: "BYTE",
+117            exp.DataType.Type.SMALLINT: "SHORT",
+118            exp.DataType.Type.BIGINT: "LONG",
+119        }
+120
+121        PROPERTIES_LOCATION = {
+122            **Hive.Generator.PROPERTIES_LOCATION,  # type: ignore
+123            exp.EngineProperty: exp.Properties.Location.UNSUPPORTED,
+124            exp.AutoIncrementProperty: exp.Properties.Location.UNSUPPORTED,
+125            exp.CharacterSetProperty: exp.Properties.Location.UNSUPPORTED,
+126            exp.CollateProperty: exp.Properties.Location.UNSUPPORTED,
+127        }
+128
+129        TRANSFORMS = {
+130            **Hive.Generator.TRANSFORMS,  # type: ignore
+131            exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"),
+132            exp.FileFormatProperty: lambda self, e: f"USING {e.name.upper()}",
+133            exp.ArraySum: lambda self, e: f"AGGREGATE({self.sql(e, 'this')}, 0, (acc, x) -> acc + x, acc -> acc)",
+134            exp.BitwiseLeftShift: rename_func("SHIFTLEFT"),
+135            exp.BitwiseRightShift: rename_func("SHIFTRIGHT"),
+136            exp.DateTrunc: rename_func("TRUNC"),
+137            exp.Hint: lambda self, e: f" /*+ {self.expressions(e).strip()} */",
+138            exp.StrToDate: _str_to_date,
+139            exp.StrToTime: lambda self, e: f"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})",
+140            exp.UnixToTime: _unix_to_time,
+141            exp.Create: _create_sql,
+142            exp.Map: _map_sql,
+143            exp.Reduce: rename_func("AGGREGATE"),
+144            exp.StructKwarg: lambda self, e: f"{self.sql(e, 'this')}: {self.sql(e, 'expression')}",
+145            exp.TimestampTrunc: lambda self, e: f"DATE_TRUNC({self.sql(e, 'unit')}, {self.sql(e, 'this')})",
+146            exp.Trim: trim_sql,
+147            exp.VariancePop: rename_func("VAR_POP"),
+148            exp.DateFromParts: rename_func("MAKE_DATE"),
+149            exp.LogicalOr: rename_func("BOOL_OR"),
+150            exp.DayOfWeek: rename_func("DAYOFWEEK"),
+151            exp.DayOfMonth: rename_func("DAYOFMONTH"),
+152            exp.DayOfYear: rename_func("DAYOFYEAR"),
+153            exp.WeekOfYear: rename_func("WEEKOFYEAR"),
+154            exp.AtTimeZone: lambda self, e: f"FROM_UTC_TIMESTAMP({self.sql(e, 'this')}, {self.sql(e, 'zone')})",
+155        }
+156        TRANSFORMS.pop(exp.ArraySort)
+157        TRANSFORMS.pop(exp.ILike)
+158
+159        WRAP_DERIVED_VALUES = False
+160        CREATE_FUNCTION_AS = False
+161
+162        def cast_sql(self, expression: exp.Cast) -> str:
+163            if isinstance(expression.this, exp.Cast) and expression.this.is_type(
+164                exp.DataType.Type.JSON
+165            ):
+166                schema = f"'{self.sql(expression, 'to')}'"
+167                return f"FROM_JSON({self.format_args(self.sql(expression.this, 'this'), schema)})"
+168            if expression.to.is_type(exp.DataType.Type.JSON):
+169                return f"TO_JSON({self.sql(expression, 'this')})"
+170
+171            return super(Spark.Generator, self).cast_sql(expression)
+172
+173    class Tokenizer(Hive.Tokenizer):
+174        HEX_STRINGS = [("X'", "'")]
+
+ + +
+
+ +
+ + class + Spark(sqlglot.dialects.hive.Hive): + + + +
+ +
 48class Spark(Hive):
+ 49    class Parser(Hive.Parser):
+ 50        FUNCTIONS = {
+ 51            **Hive.Parser.FUNCTIONS,  # type: ignore
+ 52            "MAP_FROM_ARRAYS": exp.Map.from_arg_list,
+ 53            "TO_UNIX_TIMESTAMP": exp.StrToUnix.from_arg_list,
+ 54            "LEFT": lambda args: exp.Substring(
+ 55                this=seq_get(args, 0),
+ 56                start=exp.Literal.number(1),
+ 57                length=seq_get(args, 1),
+ 58            ),
+ 59            "SHIFTLEFT": lambda args: exp.BitwiseLeftShift(
+ 60                this=seq_get(args, 0),
+ 61                expression=seq_get(args, 1),
+ 62            ),
+ 63            "SHIFTRIGHT": lambda args: exp.BitwiseRightShift(
+ 64                this=seq_get(args, 0),
+ 65                expression=seq_get(args, 1),
+ 66            ),
+ 67            "RIGHT": lambda args: exp.Substring(
+ 68                this=seq_get(args, 0),
+ 69                start=exp.Sub(
+ 70                    this=exp.Length(this=seq_get(args, 0)),
+ 71                    expression=exp.Add(this=seq_get(args, 1), expression=exp.Literal.number(1)),
+ 72                ),
+ 73                length=seq_get(args, 1),
+ 74            ),
+ 75            "APPROX_PERCENTILE": exp.ApproxQuantile.from_arg_list,
+ 76            "IIF": exp.If.from_arg_list,
+ 77            "AGGREGATE": exp.Reduce.from_arg_list,
+ 78            "DAYOFWEEK": lambda args: exp.DayOfWeek(
+ 79                this=exp.TsOrDsToDate(this=seq_get(args, 0)),
+ 80            ),
+ 81            "DAYOFMONTH": lambda args: exp.DayOfMonth(
+ 82                this=exp.TsOrDsToDate(this=seq_get(args, 0)),
+ 83            ),
+ 84            "DAYOFYEAR": lambda args: exp.DayOfYear(
+ 85                this=exp.TsOrDsToDate(this=seq_get(args, 0)),
+ 86            ),
+ 87            "WEEKOFYEAR": lambda args: exp.WeekOfYear(
+ 88                this=exp.TsOrDsToDate(this=seq_get(args, 0)),
+ 89            ),
+ 90        }
+ 91
+ 92        FUNCTION_PARSERS = {
+ 93            **parser.Parser.FUNCTION_PARSERS,  # type: ignore
+ 94            "BROADCAST": lambda self: self._parse_join_hint("BROADCAST"),
+ 95            "BROADCASTJOIN": lambda self: self._parse_join_hint("BROADCASTJOIN"),
+ 96            "MAPJOIN": lambda self: self._parse_join_hint("MAPJOIN"),
+ 97            "MERGE": lambda self: self._parse_join_hint("MERGE"),
+ 98            "SHUFFLEMERGE": lambda self: self._parse_join_hint("SHUFFLEMERGE"),
+ 99            "MERGEJOIN": lambda self: self._parse_join_hint("MERGEJOIN"),
+100            "SHUFFLE_HASH": lambda self: self._parse_join_hint("SHUFFLE_HASH"),
+101            "SHUFFLE_REPLICATE_NL": lambda self: self._parse_join_hint("SHUFFLE_REPLICATE_NL"),
+102        }
+103
+104        def _parse_add_column(self):
+105            return self._match_text_seq("ADD", "COLUMNS") and self._parse_schema()
+106
+107        def _parse_drop_column(self):
+108            return self._match_text_seq("DROP", "COLUMNS") and self.expression(
+109                exp.Drop,
+110                this=self._parse_schema(),
+111                kind="COLUMNS",
+112            )
+113
+114    class Generator(Hive.Generator):
+115        TYPE_MAPPING = {
+116            **Hive.Generator.TYPE_MAPPING,  # type: ignore
+117            exp.DataType.Type.TINYINT: "BYTE",
+118            exp.DataType.Type.SMALLINT: "SHORT",
+119            exp.DataType.Type.BIGINT: "LONG",
+120        }
+121
+122        PROPERTIES_LOCATION = {
+123            **Hive.Generator.PROPERTIES_LOCATION,  # type: ignore
+124            exp.EngineProperty: exp.Properties.Location.UNSUPPORTED,
+125            exp.AutoIncrementProperty: exp.Properties.Location.UNSUPPORTED,
+126            exp.CharacterSetProperty: exp.Properties.Location.UNSUPPORTED,
+127            exp.CollateProperty: exp.Properties.Location.UNSUPPORTED,
+128        }
+129
+130        TRANSFORMS = {
+131            **Hive.Generator.TRANSFORMS,  # type: ignore
+132            exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"),
+133            exp.FileFormatProperty: lambda self, e: f"USING {e.name.upper()}",
+134            exp.ArraySum: lambda self, e: f"AGGREGATE({self.sql(e, 'this')}, 0, (acc, x) -> acc + x, acc -> acc)",
+135            exp.BitwiseLeftShift: rename_func("SHIFTLEFT"),
+136            exp.BitwiseRightShift: rename_func("SHIFTRIGHT"),
+137            exp.DateTrunc: rename_func("TRUNC"),
+138            exp.Hint: lambda self, e: f" /*+ {self.expressions(e).strip()} */",
+139            exp.StrToDate: _str_to_date,
+140            exp.StrToTime: lambda self, e: f"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})",
+141            exp.UnixToTime: _unix_to_time,
+142            exp.Create: _create_sql,
+143            exp.Map: _map_sql,
+144            exp.Reduce: rename_func("AGGREGATE"),
+145            exp.StructKwarg: lambda self, e: f"{self.sql(e, 'this')}: {self.sql(e, 'expression')}",
+146            exp.TimestampTrunc: lambda self, e: f"DATE_TRUNC({self.sql(e, 'unit')}, {self.sql(e, 'this')})",
+147            exp.Trim: trim_sql,
+148            exp.VariancePop: rename_func("VAR_POP"),
+149            exp.DateFromParts: rename_func("MAKE_DATE"),
+150            exp.LogicalOr: rename_func("BOOL_OR"),
+151            exp.DayOfWeek: rename_func("DAYOFWEEK"),
+152            exp.DayOfMonth: rename_func("DAYOFMONTH"),
+153            exp.DayOfYear: rename_func("DAYOFYEAR"),
+154            exp.WeekOfYear: rename_func("WEEKOFYEAR"),
+155            exp.AtTimeZone: lambda self, e: f"FROM_UTC_TIMESTAMP({self.sql(e, 'this')}, {self.sql(e, 'zone')})",
+156        }
+157        TRANSFORMS.pop(exp.ArraySort)
+158        TRANSFORMS.pop(exp.ILike)
+159
+160        WRAP_DERIVED_VALUES = False
+161        CREATE_FUNCTION_AS = False
+162
+163        def cast_sql(self, expression: exp.Cast) -> str:
+164            if isinstance(expression.this, exp.Cast) and expression.this.is_type(
+165                exp.DataType.Type.JSON
+166            ):
+167                schema = f"'{self.sql(expression, 'to')}'"
+168                return f"FROM_JSON({self.format_args(self.sql(expression.this, 'this'), schema)})"
+169            if expression.to.is_type(exp.DataType.Type.JSON):
+170                return f"TO_JSON({self.sql(expression, 'this')})"
+171
+172            return super(Spark.Generator, self).cast_sql(expression)
+173
+174    class Tokenizer(Hive.Tokenizer):
+175        HEX_STRINGS = [("X'", "'")]
+
+ + + + +
+
+ + Spark() + + +
+ + + + +
+ +
+
+ +
+ + class + Spark.Parser(sqlglot.dialects.hive.Hive.Parser): + + + +
+ +
 49    class Parser(Hive.Parser):
+ 50        FUNCTIONS = {
+ 51            **Hive.Parser.FUNCTIONS,  # type: ignore
+ 52            "MAP_FROM_ARRAYS": exp.Map.from_arg_list,
+ 53            "TO_UNIX_TIMESTAMP": exp.StrToUnix.from_arg_list,
+ 54            "LEFT": lambda args: exp.Substring(
+ 55                this=seq_get(args, 0),
+ 56                start=exp.Literal.number(1),
+ 57                length=seq_get(args, 1),
+ 58            ),
+ 59            "SHIFTLEFT": lambda args: exp.BitwiseLeftShift(
+ 60                this=seq_get(args, 0),
+ 61                expression=seq_get(args, 1),
+ 62            ),
+ 63            "SHIFTRIGHT": lambda args: exp.BitwiseRightShift(
+ 64                this=seq_get(args, 0),
+ 65                expression=seq_get(args, 1),
+ 66            ),
+ 67            "RIGHT": lambda args: exp.Substring(
+ 68                this=seq_get(args, 0),
+ 69                start=exp.Sub(
+ 70                    this=exp.Length(this=seq_get(args, 0)),
+ 71                    expression=exp.Add(this=seq_get(args, 1), expression=exp.Literal.number(1)),
+ 72                ),
+ 73                length=seq_get(args, 1),
+ 74            ),
+ 75            "APPROX_PERCENTILE": exp.ApproxQuantile.from_arg_list,
+ 76            "IIF": exp.If.from_arg_list,
+ 77            "AGGREGATE": exp.Reduce.from_arg_list,
+ 78            "DAYOFWEEK": lambda args: exp.DayOfWeek(
+ 79                this=exp.TsOrDsToDate(this=seq_get(args, 0)),
+ 80            ),
+ 81            "DAYOFMONTH": lambda args: exp.DayOfMonth(
+ 82                this=exp.TsOrDsToDate(this=seq_get(args, 0)),
+ 83            ),
+ 84            "DAYOFYEAR": lambda args: exp.DayOfYear(
+ 85                this=exp.TsOrDsToDate(this=seq_get(args, 0)),
+ 86            ),
+ 87            "WEEKOFYEAR": lambda args: exp.WeekOfYear(
+ 88                this=exp.TsOrDsToDate(this=seq_get(args, 0)),
+ 89            ),
+ 90        }
+ 91
+ 92        FUNCTION_PARSERS = {
+ 93            **parser.Parser.FUNCTION_PARSERS,  # type: ignore
+ 94            "BROADCAST": lambda self: self._parse_join_hint("BROADCAST"),
+ 95            "BROADCASTJOIN": lambda self: self._parse_join_hint("BROADCASTJOIN"),
+ 96            "MAPJOIN": lambda self: self._parse_join_hint("MAPJOIN"),
+ 97            "MERGE": lambda self: self._parse_join_hint("MERGE"),
+ 98            "SHUFFLEMERGE": lambda self: self._parse_join_hint("SHUFFLEMERGE"),
+ 99            "MERGEJOIN": lambda self: self._parse_join_hint("MERGEJOIN"),
+100            "SHUFFLE_HASH": lambda self: self._parse_join_hint("SHUFFLE_HASH"),
+101            "SHUFFLE_REPLICATE_NL": lambda self: self._parse_join_hint("SHUFFLE_REPLICATE_NL"),
+102        }
+103
+104        def _parse_add_column(self):
+105            return self._match_text_seq("ADD", "COLUMNS") and self._parse_schema()
+106
+107        def _parse_drop_column(self):
+108            return self._match_text_seq("DROP", "COLUMNS") and self.expression(
+109                exp.Drop,
+110                this=self._parse_schema(),
+111                kind="COLUMNS",
+112            )
+
+ + +

Parser consumes a list of tokens produced by the sqlglot.tokens.Tokenizer and produces +a parsed syntax tree.

+ +
Arguments:
+ +
    +
  • error_level: the desired error level. +Default: ErrorLevel.RAISE
  • +
  • error_message_context: determines the amount of context to capture from a +query string when displaying the error message (in number of characters). +Default: 50.
  • +
  • index_offset: Index offset for arrays eg ARRAY[0] vs ARRAY[1] as the head of a list. +Default: 0
  • +
  • alias_post_tablesample: If the table alias comes after tablesample. +Default: False
  • +
  • max_errors: Maximum number of error messages to include in a raised ParseError. +This is only relevant if error_level is ErrorLevel.RAISE. +Default: 3
  • +
  • null_ordering: Indicates the default null ordering method to use if not explicitly set. +Options are "nulls_are_small", "nulls_are_large", "nulls_are_last". +Default: "nulls_are_small"
  • +
+
+ + + +
+
+ +
+ + class + Spark.Generator(sqlglot.dialects.hive.Hive.Generator): + + + +
+ +
114    class Generator(Hive.Generator):
+115        TYPE_MAPPING = {
+116            **Hive.Generator.TYPE_MAPPING,  # type: ignore
+117            exp.DataType.Type.TINYINT: "BYTE",
+118            exp.DataType.Type.SMALLINT: "SHORT",
+119            exp.DataType.Type.BIGINT: "LONG",
+120        }
+121
+122        PROPERTIES_LOCATION = {
+123            **Hive.Generator.PROPERTIES_LOCATION,  # type: ignore
+124            exp.EngineProperty: exp.Properties.Location.UNSUPPORTED,
+125            exp.AutoIncrementProperty: exp.Properties.Location.UNSUPPORTED,
+126            exp.CharacterSetProperty: exp.Properties.Location.UNSUPPORTED,
+127            exp.CollateProperty: exp.Properties.Location.UNSUPPORTED,
+128        }
+129
+130        TRANSFORMS = {
+131            **Hive.Generator.TRANSFORMS,  # type: ignore
+132            exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"),
+133            exp.FileFormatProperty: lambda self, e: f"USING {e.name.upper()}",
+134            exp.ArraySum: lambda self, e: f"AGGREGATE({self.sql(e, 'this')}, 0, (acc, x) -> acc + x, acc -> acc)",
+135            exp.BitwiseLeftShift: rename_func("SHIFTLEFT"),
+136            exp.BitwiseRightShift: rename_func("SHIFTRIGHT"),
+137            exp.DateTrunc: rename_func("TRUNC"),
+138            exp.Hint: lambda self, e: f" /*+ {self.expressions(e).strip()} */",
+139            exp.StrToDate: _str_to_date,
+140            exp.StrToTime: lambda self, e: f"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})",
+141            exp.UnixToTime: _unix_to_time,
+142            exp.Create: _create_sql,
+143            exp.Map: _map_sql,
+144            exp.Reduce: rename_func("AGGREGATE"),
+145            exp.StructKwarg: lambda self, e: f"{self.sql(e, 'this')}: {self.sql(e, 'expression')}",
+146            exp.TimestampTrunc: lambda self, e: f"DATE_TRUNC({self.sql(e, 'unit')}, {self.sql(e, 'this')})",
+147            exp.Trim: trim_sql,
+148            exp.VariancePop: rename_func("VAR_POP"),
+149            exp.DateFromParts: rename_func("MAKE_DATE"),
+150            exp.LogicalOr: rename_func("BOOL_OR"),
+151            exp.DayOfWeek: rename_func("DAYOFWEEK"),
+152            exp.DayOfMonth: rename_func("DAYOFMONTH"),
+153            exp.DayOfYear: rename_func("DAYOFYEAR"),
+154            exp.WeekOfYear: rename_func("WEEKOFYEAR"),
+155            exp.AtTimeZone: lambda self, e: f"FROM_UTC_TIMESTAMP({self.sql(e, 'this')}, {self.sql(e, 'zone')})",
+156        }
+157        TRANSFORMS.pop(exp.ArraySort)
+158        TRANSFORMS.pop(exp.ILike)
+159
+160        WRAP_DERIVED_VALUES = False
+161        CREATE_FUNCTION_AS = False
+162
+163        def cast_sql(self, expression: exp.Cast) -> str:
+164            if isinstance(expression.this, exp.Cast) and expression.this.is_type(
+165                exp.DataType.Type.JSON
+166            ):
+167                schema = f"'{self.sql(expression, 'to')}'"
+168                return f"FROM_JSON({self.format_args(self.sql(expression.this, 'this'), schema)})"
+169            if expression.to.is_type(exp.DataType.Type.JSON):
+170                return f"TO_JSON({self.sql(expression, 'this')})"
+171
+172            return super(Spark.Generator, self).cast_sql(expression)
+
+ + +

Generator interprets the given syntax tree and produces a SQL string as an output.

+ +
Arguments:
+ +
    +
  • time_mapping (dict): the dictionary of custom time mappings in which the key +represents a python time format and the output the target time format
  • +
  • time_trie (trie): a trie of the time_mapping keys
  • +
  • pretty (bool): if set to True the returned string will be formatted. Default: False.
  • +
  • quote_start (str): specifies which starting character to use to delimit quotes. Default: '.
  • +
  • quote_end (str): specifies which ending character to use to delimit quotes. Default: '.
  • +
  • identifier_start (str): specifies which starting character to use to delimit identifiers. Default: ".
  • +
  • identifier_end (str): specifies which ending character to use to delimit identifiers. Default: ".
  • +
  • identify (bool): if set to True all identifiers will be delimited by the corresponding +character.
  • +
  • normalize (bool): if set to True all identifiers will lower cased
  • +
  • string_escape (str): specifies a string escape character. Default: '.
  • +
  • identifier_escape (str): specifies an identifier escape character. Default: ".
  • +
  • pad (int): determines padding in a formatted string. Default: 2.
  • +
  • indent (int): determines the size of indentation in a formatted string. Default: 4.
  • +
  • unnest_column_only (bool): if true unnest table aliases are considered only as column aliases
  • +
  • normalize_functions (str): normalize function names, "upper", "lower", or None +Default: "upper"
  • +
  • alias_post_tablesample (bool): if the table alias comes after tablesample +Default: False
  • +
  • unsupported_level (ErrorLevel): determines the generator's behavior when it encounters +unsupported expressions. Default ErrorLevel.WARN.
  • +
  • null_ordering (str): Indicates the default null ordering method to use if not explicitly set. +Options are "nulls_are_small", "nulls_are_large", "nulls_are_last". +Default: "nulls_are_small"
  • +
  • max_unsupported (int): Maximum number of unsupported messages to include in a raised UnsupportedError. +This is only relevant if unsupported_level is ErrorLevel.RAISE. +Default: 3
  • +
  • leading_comma (bool): if the the comma is leading or trailing in select statements +Default: False
  • +
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode. +The default is on the smaller end because the length only represents a segment and not the true +line length. +Default: 80
  • +
  • comments: Whether or not to preserve comments in the output SQL code. +Default: True
  • +
+
+ + +
+ +
+ + def + cast_sql(self, expression: sqlglot.expressions.Cast) -> str: + + + +
+ +
163        def cast_sql(self, expression: exp.Cast) -> str:
+164            if isinstance(expression.this, exp.Cast) and expression.this.is_type(
+165                exp.DataType.Type.JSON
+166            ):
+167                schema = f"'{self.sql(expression, 'to')}'"
+168                return f"FROM_JSON({self.format_args(self.sql(expression.this, 'this'), schema)})"
+169            if expression.to.is_type(exp.DataType.Type.JSON):
+170                return f"TO_JSON({self.sql(expression, 'this')})"
+171
+172            return super(Spark.Generator, self).cast_sql(expression)
+
+ + + + +
+
+
Inherited Members
+
+
sqlglot.generator.Generator
+
Generator
+
generate
+
unsupported
+
sep
+
seg
+
pad_comment
+
maybe_comment
+
wrap
+
no_identify
+
normalize_func
+
indent
+
sql
+
uncache_sql
+
cache_sql
+
characterset_sql
+
column_sql
+
columndef_sql
+
columnconstraint_sql
+
autoincrementcolumnconstraint_sql
+
checkcolumnconstraint_sql
+
commentcolumnconstraint_sql
+
collatecolumnconstraint_sql
+
encodecolumnconstraint_sql
+
defaultcolumnconstraint_sql
+
generatedasidentitycolumnconstraint_sql
+
notnullcolumnconstraint_sql
+
primarykeycolumnconstraint_sql
+
uniquecolumnconstraint_sql
+
create_sql
+
describe_sql
+
prepend_ctes
+
with_sql
+
cte_sql
+
tablealias_sql
+
bitstring_sql
+
hexstring_sql
+
directory_sql
+
delete_sql
+
drop_sql
+
except_sql
+
except_op
+
fetch_sql
+
filter_sql
+
hint_sql
+
index_sql
+
identifier_sql
+
national_sql
+
partition_sql
+
properties_sql
+
root_properties
+
properties
+
locate_properties
+
property_sql
+
likeproperty_sql
+
fallbackproperty_sql
+
journalproperty_sql
+
freespaceproperty_sql
+
afterjournalproperty_sql
+
checksumproperty_sql
+
mergeblockratioproperty_sql
+
datablocksizeproperty_sql
+
blockcompressionproperty_sql
+
isolatedloadingproperty_sql
+
insert_sql
+
intersect_sql
+
intersect_op
+
introducer_sql
+
pseudotype_sql
+
rowformatdelimitedproperty_sql
+
table_sql
+
tablesample_sql
+
pivot_sql
+
tuple_sql
+
update_sql
+
values_sql
+
var_sql
+
into_sql
+
from_sql
+
group_sql
+
having_sql
+
join_sql
+
lambda_sql
+
lateral_sql
+
limit_sql
+
offset_sql
+
lock_sql
+
literal_sql
+
loaddata_sql
+
null_sql
+
boolean_sql
+
order_sql
+
cluster_sql
+
distribute_sql
+
sort_sql
+
ordered_sql
+
matchrecognize_sql
+
query_modifiers
+
select_sql
+
schema_sql
+
star_sql
+
structkwarg_sql
+
parameter_sql
+
sessionparameter_sql
+
placeholder_sql
+
subquery_sql
+
qualify_sql
+
union_sql
+
union_op
+
unnest_sql
+
where_sql
+
window_sql
+
partition_by_sql
+
window_spec_sql
+
withingroup_sql
+
between_sql
+
bracket_sql
+
all_sql
+
any_sql
+
exists_sql
+
case_sql
+
constraint_sql
+
extract_sql
+
trim_sql
+
concat_sql
+
check_sql
+
foreignkey_sql
+
primarykey_sql
+
unique_sql
+
if_sql
+
in_sql
+
in_unnest_op
+
interval_sql
+
return_sql
+
reference_sql
+
anonymous_sql
+
paren_sql
+
neg_sql
+
not_sql
+
alias_sql
+
aliases_sql
+
attimezone_sql
+
add_sql
+
and_sql
+
connector_sql
+
bitwiseand_sql
+
bitwiseleftshift_sql
+
bitwisenot_sql
+
bitwiseor_sql
+
bitwiserightshift_sql
+
bitwisexor_sql
+
currentdate_sql
+
collate_sql
+
command_sql
+
transaction_sql
+
commit_sql
+
rollback_sql
+
altercolumn_sql
+
renametable_sql
+
altertable_sql
+
droppartition_sql
+
addconstraint_sql
+
distinct_sql
+
ignorenulls_sql
+
respectnulls_sql
+
intdiv_sql
+
dpipe_sql
+
div_sql
+
distance_sql
+
dot_sql
+
eq_sql
+
escape_sql
+
glob_sql
+
gt_sql
+
gte_sql
+
ilike_sql
+
is_sql
+
like_sql
+
similarto_sql
+
lt_sql
+
lte_sql
+
mod_sql
+
mul_sql
+
neq_sql
+
nullsafeeq_sql
+
nullsafeneq_sql
+
or_sql
+
slice_sql
+
sub_sql
+
trycast_sql
+
use_sql
+
binary
+
function_fallback_sql
+
format_args
+
text_width
+
format_time
+
expressions
+
op_expressions
+
naked_property
+
set_operation
+
tag_sql
+
token_sql
+
userdefinedfunction_sql
+
userdefinedfunctionkwarg_sql
+
joinhint_sql
+
kwarg_sql
+
when_sql
+
merge_sql
+ +
+ +
+
+
+
+ +
+ + class + Spark.Tokenizer(sqlglot.dialects.hive.Hive.Tokenizer): + + + +
+ +
174    class Tokenizer(Hive.Tokenizer):
+175        HEX_STRINGS = [("X'", "'")]
+
+ + + + +
+
Inherited Members
+
+ +
+
+
+
+ + \ No newline at end of file diff --git a/docs/sqlglot/dialects/sqlite.html b/docs/sqlglot/dialects/sqlite.html new file mode 100644 index 0000000..20314c5 --- /dev/null +++ b/docs/sqlglot/dialects/sqlite.html @@ -0,0 +1,918 @@ + + + + + + + sqlglot.dialects.sqlite API documentation + + + + + + + + + +
+
+ Edit on GitHub +

+sqlglot.dialects.sqlite

+ + + + + + +
  1from __future__ import annotations
+  2
+  3from sqlglot import exp, generator, parser, tokens
+  4from sqlglot.dialects.dialect import (
+  5    Dialect,
+  6    arrow_json_extract_scalar_sql,
+  7    arrow_json_extract_sql,
+  8    no_ilike_sql,
+  9    no_tablesample_sql,
+ 10    no_trycast_sql,
+ 11    rename_func,
+ 12)
+ 13from sqlglot.tokens import TokenType
+ 14
+ 15
+ 16def _fetch_sql(self, expression):
+ 17    return self.limit_sql(exp.Limit(expression=expression.args.get("count")))
+ 18
+ 19
+ 20# https://www.sqlite.org/lang_aggfunc.html#group_concat
+ 21def _group_concat_sql(self, expression):
+ 22    this = expression.this
+ 23    distinct = expression.find(exp.Distinct)
+ 24    if distinct:
+ 25        this = distinct.expressions[0]
+ 26        distinct = "DISTINCT "
+ 27
+ 28    if isinstance(expression.this, exp.Order):
+ 29        self.unsupported("SQLite GROUP_CONCAT doesn't support ORDER BY.")
+ 30        if expression.this.this and not distinct:
+ 31            this = expression.this.this
+ 32
+ 33    separator = expression.args.get("separator")
+ 34    return f"GROUP_CONCAT({distinct or ''}{self.format_args(this, separator)})"
+ 35
+ 36
+ 37def _date_add_sql(self, expression):
+ 38    modifier = expression.expression
+ 39    modifier = expression.name if modifier.is_string else self.sql(modifier)
+ 40    unit = expression.args.get("unit")
+ 41    modifier = f"'{modifier} {unit.name}'" if unit else f"'{modifier}'"
+ 42    return f"{self.normalize_func('DATE')}({self.format_args(expression.this, modifier)})"
+ 43
+ 44
+ 45class SQLite(Dialect):
+ 46    class Tokenizer(tokens.Tokenizer):
+ 47        IDENTIFIERS = ['"', ("[", "]"), "`"]
+ 48        HEX_STRINGS = [("x'", "'"), ("X'", "'"), ("0x", ""), ("0X", "")]
+ 49
+ 50        KEYWORDS = {
+ 51            **tokens.Tokenizer.KEYWORDS,
+ 52        }
+ 53
+ 54    class Parser(parser.Parser):
+ 55        FUNCTIONS = {
+ 56            **parser.Parser.FUNCTIONS,  # type: ignore
+ 57            "EDITDIST3": exp.Levenshtein.from_arg_list,
+ 58        }
+ 59
+ 60    class Generator(generator.Generator):
+ 61        TYPE_MAPPING = {
+ 62            **generator.Generator.TYPE_MAPPING,  # type: ignore
+ 63            exp.DataType.Type.BOOLEAN: "INTEGER",
+ 64            exp.DataType.Type.TINYINT: "INTEGER",
+ 65            exp.DataType.Type.SMALLINT: "INTEGER",
+ 66            exp.DataType.Type.INT: "INTEGER",
+ 67            exp.DataType.Type.BIGINT: "INTEGER",
+ 68            exp.DataType.Type.FLOAT: "REAL",
+ 69            exp.DataType.Type.DOUBLE: "REAL",
+ 70            exp.DataType.Type.DECIMAL: "REAL",
+ 71            exp.DataType.Type.CHAR: "TEXT",
+ 72            exp.DataType.Type.NCHAR: "TEXT",
+ 73            exp.DataType.Type.VARCHAR: "TEXT",
+ 74            exp.DataType.Type.NVARCHAR: "TEXT",
+ 75            exp.DataType.Type.BINARY: "BLOB",
+ 76            exp.DataType.Type.VARBINARY: "BLOB",
+ 77        }
+ 78
+ 79        TOKEN_MAPPING = {
+ 80            TokenType.AUTO_INCREMENT: "AUTOINCREMENT",
+ 81        }
+ 82
+ 83        TRANSFORMS = {
+ 84            **generator.Generator.TRANSFORMS,  # type: ignore
+ 85            exp.DateAdd: _date_add_sql,
+ 86            exp.ILike: no_ilike_sql,
+ 87            exp.JSONExtract: arrow_json_extract_sql,
+ 88            exp.JSONExtractScalar: arrow_json_extract_scalar_sql,
+ 89            exp.JSONBExtract: arrow_json_extract_sql,
+ 90            exp.JSONBExtractScalar: arrow_json_extract_scalar_sql,
+ 91            exp.Levenshtein: rename_func("EDITDIST3"),
+ 92            exp.TableSample: no_tablesample_sql,
+ 93            exp.DateStrToDate: lambda self, e: self.sql(e, "this"),
+ 94            exp.TimeStrToTime: lambda self, e: self.sql(e, "this"),
+ 95            exp.TryCast: no_trycast_sql,
+ 96            exp.GroupConcat: _group_concat_sql,
+ 97            exp.Fetch: _fetch_sql,
+ 98        }
+ 99
+100        def transaction_sql(self, expression):
+101            this = expression.this
+102            this = f" {this}" if this else ""
+103            return f"BEGIN{this} TRANSACTION"
+
+ + +
+
+ +
+ + class + SQLite(sqlglot.dialects.dialect.Dialect): + + + +
+ +
 46class SQLite(Dialect):
+ 47    class Tokenizer(tokens.Tokenizer):
+ 48        IDENTIFIERS = ['"', ("[", "]"), "`"]
+ 49        HEX_STRINGS = [("x'", "'"), ("X'", "'"), ("0x", ""), ("0X", "")]
+ 50
+ 51        KEYWORDS = {
+ 52            **tokens.Tokenizer.KEYWORDS,
+ 53        }
+ 54
+ 55    class Parser(parser.Parser):
+ 56        FUNCTIONS = {
+ 57            **parser.Parser.FUNCTIONS,  # type: ignore
+ 58            "EDITDIST3": exp.Levenshtein.from_arg_list,
+ 59        }
+ 60
+ 61    class Generator(generator.Generator):
+ 62        TYPE_MAPPING = {
+ 63            **generator.Generator.TYPE_MAPPING,  # type: ignore
+ 64            exp.DataType.Type.BOOLEAN: "INTEGER",
+ 65            exp.DataType.Type.TINYINT: "INTEGER",
+ 66            exp.DataType.Type.SMALLINT: "INTEGER",
+ 67            exp.DataType.Type.INT: "INTEGER",
+ 68            exp.DataType.Type.BIGINT: "INTEGER",
+ 69            exp.DataType.Type.FLOAT: "REAL",
+ 70            exp.DataType.Type.DOUBLE: "REAL",
+ 71            exp.DataType.Type.DECIMAL: "REAL",
+ 72            exp.DataType.Type.CHAR: "TEXT",
+ 73            exp.DataType.Type.NCHAR: "TEXT",
+ 74            exp.DataType.Type.VARCHAR: "TEXT",
+ 75            exp.DataType.Type.NVARCHAR: "TEXT",
+ 76            exp.DataType.Type.BINARY: "BLOB",
+ 77            exp.DataType.Type.VARBINARY: "BLOB",
+ 78        }
+ 79
+ 80        TOKEN_MAPPING = {
+ 81            TokenType.AUTO_INCREMENT: "AUTOINCREMENT",
+ 82        }
+ 83
+ 84        TRANSFORMS = {
+ 85            **generator.Generator.TRANSFORMS,  # type: ignore
+ 86            exp.DateAdd: _date_add_sql,
+ 87            exp.ILike: no_ilike_sql,
+ 88            exp.JSONExtract: arrow_json_extract_sql,
+ 89            exp.JSONExtractScalar: arrow_json_extract_scalar_sql,
+ 90            exp.JSONBExtract: arrow_json_extract_sql,
+ 91            exp.JSONBExtractScalar: arrow_json_extract_scalar_sql,
+ 92            exp.Levenshtein: rename_func("EDITDIST3"),
+ 93            exp.TableSample: no_tablesample_sql,
+ 94            exp.DateStrToDate: lambda self, e: self.sql(e, "this"),
+ 95            exp.TimeStrToTime: lambda self, e: self.sql(e, "this"),
+ 96            exp.TryCast: no_trycast_sql,
+ 97            exp.GroupConcat: _group_concat_sql,
+ 98            exp.Fetch: _fetch_sql,
+ 99        }
+100
+101        def transaction_sql(self, expression):
+102            this = expression.this
+103            this = f" {this}" if this else ""
+104            return f"BEGIN{this} TRANSACTION"
+
+ + + + +
+
+ + SQLite() + + +
+ + + + +
+ +
+
+ +
+ + class + SQLite.Tokenizer(sqlglot.tokens.Tokenizer): + + + +
+ +
47    class Tokenizer(tokens.Tokenizer):
+48        IDENTIFIERS = ['"', ("[", "]"), "`"]
+49        HEX_STRINGS = [("x'", "'"), ("X'", "'"), ("0x", ""), ("0X", "")]
+50
+51        KEYWORDS = {
+52            **tokens.Tokenizer.KEYWORDS,
+53        }
+
+ + + + +
+
Inherited Members
+
+ +
+
+
+
+ +
+ + class + SQLite.Parser(sqlglot.parser.Parser): + + + +
+ +
55    class Parser(parser.Parser):
+56        FUNCTIONS = {
+57            **parser.Parser.FUNCTIONS,  # type: ignore
+58            "EDITDIST3": exp.Levenshtein.from_arg_list,
+59        }
+
+ + +

Parser consumes a list of tokens produced by the sqlglot.tokens.Tokenizer and produces +a parsed syntax tree.

+ +
Arguments:
+ +
    +
  • error_level: the desired error level. +Default: ErrorLevel.RAISE
  • +
  • error_message_context: determines the amount of context to capture from a +query string when displaying the error message (in number of characters). +Default: 50.
  • +
  • index_offset: Index offset for arrays eg ARRAY[0] vs ARRAY[1] as the head of a list. +Default: 0
  • +
  • alias_post_tablesample: If the table alias comes after tablesample. +Default: False
  • +
  • max_errors: Maximum number of error messages to include in a raised ParseError. +This is only relevant if error_level is ErrorLevel.RAISE. +Default: 3
  • +
  • null_ordering: Indicates the default null ordering method to use if not explicitly set. +Options are "nulls_are_small", "nulls_are_large", "nulls_are_last". +Default: "nulls_are_small"
  • +
+
+ + + +
+
+ +
+ + class + SQLite.Generator(sqlglot.generator.Generator): + + + +
+ +
 61    class Generator(generator.Generator):
+ 62        TYPE_MAPPING = {
+ 63            **generator.Generator.TYPE_MAPPING,  # type: ignore
+ 64            exp.DataType.Type.BOOLEAN: "INTEGER",
+ 65            exp.DataType.Type.TINYINT: "INTEGER",
+ 66            exp.DataType.Type.SMALLINT: "INTEGER",
+ 67            exp.DataType.Type.INT: "INTEGER",
+ 68            exp.DataType.Type.BIGINT: "INTEGER",
+ 69            exp.DataType.Type.FLOAT: "REAL",
+ 70            exp.DataType.Type.DOUBLE: "REAL",
+ 71            exp.DataType.Type.DECIMAL: "REAL",
+ 72            exp.DataType.Type.CHAR: "TEXT",
+ 73            exp.DataType.Type.NCHAR: "TEXT",
+ 74            exp.DataType.Type.VARCHAR: "TEXT",
+ 75            exp.DataType.Type.NVARCHAR: "TEXT",
+ 76            exp.DataType.Type.BINARY: "BLOB",
+ 77            exp.DataType.Type.VARBINARY: "BLOB",
+ 78        }
+ 79
+ 80        TOKEN_MAPPING = {
+ 81            TokenType.AUTO_INCREMENT: "AUTOINCREMENT",
+ 82        }
+ 83
+ 84        TRANSFORMS = {
+ 85            **generator.Generator.TRANSFORMS,  # type: ignore
+ 86            exp.DateAdd: _date_add_sql,
+ 87            exp.ILike: no_ilike_sql,
+ 88            exp.JSONExtract: arrow_json_extract_sql,
+ 89            exp.JSONExtractScalar: arrow_json_extract_scalar_sql,
+ 90            exp.JSONBExtract: arrow_json_extract_sql,
+ 91            exp.JSONBExtractScalar: arrow_json_extract_scalar_sql,
+ 92            exp.Levenshtein: rename_func("EDITDIST3"),
+ 93            exp.TableSample: no_tablesample_sql,
+ 94            exp.DateStrToDate: lambda self, e: self.sql(e, "this"),
+ 95            exp.TimeStrToTime: lambda self, e: self.sql(e, "this"),
+ 96            exp.TryCast: no_trycast_sql,
+ 97            exp.GroupConcat: _group_concat_sql,
+ 98            exp.Fetch: _fetch_sql,
+ 99        }
+100
+101        def transaction_sql(self, expression):
+102            this = expression.this
+103            this = f" {this}" if this else ""
+104            return f"BEGIN{this} TRANSACTION"
+
+ + +

Generator interprets the given syntax tree and produces a SQL string as an output.

+ +
Arguments:
+ +
    +
  • time_mapping (dict): the dictionary of custom time mappings in which the key +represents a python time format and the output the target time format
  • +
  • time_trie (trie): a trie of the time_mapping keys
  • +
  • pretty (bool): if set to True the returned string will be formatted. Default: False.
  • +
  • quote_start (str): specifies which starting character to use to delimit quotes. Default: '.
  • +
  • quote_end (str): specifies which ending character to use to delimit quotes. Default: '.
  • +
  • identifier_start (str): specifies which starting character to use to delimit identifiers. Default: ".
  • +
  • identifier_end (str): specifies which ending character to use to delimit identifiers. Default: ".
  • +
  • identify (bool): if set to True all identifiers will be delimited by the corresponding +character.
  • +
  • normalize (bool): if set to True all identifiers will lower cased
  • +
  • string_escape (str): specifies a string escape character. Default: '.
  • +
  • identifier_escape (str): specifies an identifier escape character. Default: ".
  • +
  • pad (int): determines padding in a formatted string. Default: 2.
  • +
  • indent (int): determines the size of indentation in a formatted string. Default: 4.
  • +
  • unnest_column_only (bool): if true unnest table aliases are considered only as column aliases
  • +
  • normalize_functions (str): normalize function names, "upper", "lower", or None +Default: "upper"
  • +
  • alias_post_tablesample (bool): if the table alias comes after tablesample +Default: False
  • +
  • unsupported_level (ErrorLevel): determines the generator's behavior when it encounters +unsupported expressions. Default ErrorLevel.WARN.
  • +
  • null_ordering (str): Indicates the default null ordering method to use if not explicitly set. +Options are "nulls_are_small", "nulls_are_large", "nulls_are_last". +Default: "nulls_are_small"
  • +
  • max_unsupported (int): Maximum number of unsupported messages to include in a raised UnsupportedError. +This is only relevant if unsupported_level is ErrorLevel.RAISE. +Default: 3
  • +
  • leading_comma (bool): if the the comma is leading or trailing in select statements +Default: False
  • +
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode. +The default is on the smaller end because the length only represents a segment and not the true +line length. +Default: 80
  • +
  • comments: Whether or not to preserve comments in the output SQL code. +Default: True
  • +
+
+ + +
+ +
+ + def + transaction_sql(self, expression): + + + +
+ +
101        def transaction_sql(self, expression):
+102            this = expression.this
+103            this = f" {this}" if this else ""
+104            return f"BEGIN{this} TRANSACTION"
+
+ + + + +
+
+
Inherited Members
+
+
sqlglot.generator.Generator
+
Generator
+
generate
+
unsupported
+
sep
+
seg
+
pad_comment
+
maybe_comment
+
wrap
+
no_identify
+
normalize_func
+
indent
+
sql
+
uncache_sql
+
cache_sql
+
characterset_sql
+
column_sql
+
columndef_sql
+
columnconstraint_sql
+
autoincrementcolumnconstraint_sql
+
checkcolumnconstraint_sql
+
commentcolumnconstraint_sql
+
collatecolumnconstraint_sql
+
encodecolumnconstraint_sql
+
defaultcolumnconstraint_sql
+
generatedasidentitycolumnconstraint_sql
+
notnullcolumnconstraint_sql
+
primarykeycolumnconstraint_sql
+
uniquecolumnconstraint_sql
+
create_sql
+
describe_sql
+
prepend_ctes
+
with_sql
+
cte_sql
+
tablealias_sql
+
bitstring_sql
+
hexstring_sql
+
datatype_sql
+
directory_sql
+
delete_sql
+
drop_sql
+
except_sql
+
except_op
+
fetch_sql
+
filter_sql
+
hint_sql
+
index_sql
+
identifier_sql
+
national_sql
+
partition_sql
+
properties_sql
+
root_properties
+
properties
+
with_properties
+
locate_properties
+
property_sql
+
likeproperty_sql
+
fallbackproperty_sql
+
journalproperty_sql
+
freespaceproperty_sql
+
afterjournalproperty_sql
+
checksumproperty_sql
+
mergeblockratioproperty_sql
+
datablocksizeproperty_sql
+
blockcompressionproperty_sql
+
isolatedloadingproperty_sql
+
insert_sql
+
intersect_sql
+
intersect_op
+
introducer_sql
+
pseudotype_sql
+
rowformatdelimitedproperty_sql
+
table_sql
+
tablesample_sql
+
pivot_sql
+
tuple_sql
+
update_sql
+
values_sql
+
var_sql
+
into_sql
+
from_sql
+
group_sql
+
having_sql
+
join_sql
+
lambda_sql
+
lateral_sql
+
limit_sql
+
offset_sql
+
lock_sql
+
literal_sql
+
loaddata_sql
+
null_sql
+
boolean_sql
+
order_sql
+
cluster_sql
+
distribute_sql
+
sort_sql
+
ordered_sql
+
matchrecognize_sql
+
query_modifiers
+
select_sql
+
schema_sql
+
star_sql
+
structkwarg_sql
+
parameter_sql
+
sessionparameter_sql
+
placeholder_sql
+
subquery_sql
+
qualify_sql
+
union_sql
+
union_op
+
unnest_sql
+
where_sql
+
window_sql
+
partition_by_sql
+
window_spec_sql
+
withingroup_sql
+
between_sql
+
bracket_sql
+
all_sql
+
any_sql
+
exists_sql
+
case_sql
+
constraint_sql
+
extract_sql
+
trim_sql
+
concat_sql
+
check_sql
+
foreignkey_sql
+
primarykey_sql
+
unique_sql
+
if_sql
+
in_sql
+
in_unnest_op
+
interval_sql
+
return_sql
+
reference_sql
+
anonymous_sql
+
paren_sql
+
neg_sql
+
not_sql
+
alias_sql
+
aliases_sql
+
attimezone_sql
+
add_sql
+
and_sql
+
connector_sql
+
bitwiseand_sql
+
bitwiseleftshift_sql
+
bitwisenot_sql
+
bitwiseor_sql
+
bitwiserightshift_sql
+
bitwisexor_sql
+
cast_sql
+
currentdate_sql
+
collate_sql
+
command_sql
+
commit_sql
+
rollback_sql
+
altercolumn_sql
+
renametable_sql
+
altertable_sql
+
droppartition_sql
+
addconstraint_sql
+
distinct_sql
+
ignorenulls_sql
+
respectnulls_sql
+
intdiv_sql
+
dpipe_sql
+
div_sql
+
distance_sql
+
dot_sql
+
eq_sql
+
escape_sql
+
glob_sql
+
gt_sql
+
gte_sql
+
ilike_sql
+
is_sql
+
like_sql
+
similarto_sql
+
lt_sql
+
lte_sql
+
mod_sql
+
mul_sql
+
neq_sql
+
nullsafeeq_sql
+
nullsafeneq_sql
+
or_sql
+
slice_sql
+
sub_sql
+
trycast_sql
+
use_sql
+
binary
+
function_fallback_sql
+
format_args
+
text_width
+
format_time
+
expressions
+
op_expressions
+
naked_property
+
set_operation
+
tag_sql
+
token_sql
+
userdefinedfunction_sql
+
userdefinedfunctionkwarg_sql
+
joinhint_sql
+
kwarg_sql
+
when_sql
+
merge_sql
+ +
+
+
+
+
+ + \ No newline at end of file diff --git a/docs/sqlglot/dialects/starrocks.html b/docs/sqlglot/dialects/starrocks.html new file mode 100644 index 0000000..208c6dc --- /dev/null +++ b/docs/sqlglot/dialects/starrocks.html @@ -0,0 +1,658 @@ + + + + + + + sqlglot.dialects.starrocks API documentation + + + + + + + + + +
+
+ Edit on GitHub +

+sqlglot.dialects.starrocks

+ + + + + + +
 1from __future__ import annotations
+ 2
+ 3from sqlglot import exp
+ 4from sqlglot.dialects.dialect import arrow_json_extract_sql, rename_func
+ 5from sqlglot.dialects.mysql import MySQL
+ 6
+ 7
+ 8class StarRocks(MySQL):
+ 9    class Generator(MySQL.Generator):  # type: ignore
+10        TYPE_MAPPING = {
+11            **MySQL.Generator.TYPE_MAPPING,  # type: ignore
+12            exp.DataType.Type.TEXT: "STRING",
+13            exp.DataType.Type.TIMESTAMP: "DATETIME",
+14            exp.DataType.Type.TIMESTAMPTZ: "DATETIME",
+15        }
+16
+17        TRANSFORMS = {
+18            **MySQL.Generator.TRANSFORMS,  # type: ignore
+19            exp.JSONExtractScalar: arrow_json_extract_sql,
+20            exp.JSONExtract: arrow_json_extract_sql,
+21            exp.DateDiff: rename_func("DATEDIFF"),
+22            exp.StrToUnix: lambda self, e: f"UNIX_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})",
+23            exp.TimeStrToDate: rename_func("TO_DATE"),
+24            exp.UnixToStr: lambda self, e: f"FROM_UNIXTIME({self.sql(e, 'this')}, {self.format_time(e)})",
+25            exp.UnixToTime: rename_func("FROM_UNIXTIME"),
+26        }
+27        TRANSFORMS.pop(exp.DateTrunc)
+
+ + +
+
+ +
+ + class + StarRocks(sqlglot.dialects.mysql.MySQL): + + + +
+ +
 9class StarRocks(MySQL):
+10    class Generator(MySQL.Generator):  # type: ignore
+11        TYPE_MAPPING = {
+12            **MySQL.Generator.TYPE_MAPPING,  # type: ignore
+13            exp.DataType.Type.TEXT: "STRING",
+14            exp.DataType.Type.TIMESTAMP: "DATETIME",
+15            exp.DataType.Type.TIMESTAMPTZ: "DATETIME",
+16        }
+17
+18        TRANSFORMS = {
+19            **MySQL.Generator.TRANSFORMS,  # type: ignore
+20            exp.JSONExtractScalar: arrow_json_extract_sql,
+21            exp.JSONExtract: arrow_json_extract_sql,
+22            exp.DateDiff: rename_func("DATEDIFF"),
+23            exp.StrToUnix: lambda self, e: f"UNIX_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})",
+24            exp.TimeStrToDate: rename_func("TO_DATE"),
+25            exp.UnixToStr: lambda self, e: f"FROM_UNIXTIME({self.sql(e, 'this')}, {self.format_time(e)})",
+26            exp.UnixToTime: rename_func("FROM_UNIXTIME"),
+27        }
+28        TRANSFORMS.pop(exp.DateTrunc)
+
+ + + + +
+
+ + StarRocks() + + +
+ + + + +
+ +
+
+ +
+ + class + StarRocks.Generator(sqlglot.dialects.mysql.MySQL.Generator): + + + +
+ +
10    class Generator(MySQL.Generator):  # type: ignore
+11        TYPE_MAPPING = {
+12            **MySQL.Generator.TYPE_MAPPING,  # type: ignore
+13            exp.DataType.Type.TEXT: "STRING",
+14            exp.DataType.Type.TIMESTAMP: "DATETIME",
+15            exp.DataType.Type.TIMESTAMPTZ: "DATETIME",
+16        }
+17
+18        TRANSFORMS = {
+19            **MySQL.Generator.TRANSFORMS,  # type: ignore
+20            exp.JSONExtractScalar: arrow_json_extract_sql,
+21            exp.JSONExtract: arrow_json_extract_sql,
+22            exp.DateDiff: rename_func("DATEDIFF"),
+23            exp.StrToUnix: lambda self, e: f"UNIX_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})",
+24            exp.TimeStrToDate: rename_func("TO_DATE"),
+25            exp.UnixToStr: lambda self, e: f"FROM_UNIXTIME({self.sql(e, 'this')}, {self.format_time(e)})",
+26            exp.UnixToTime: rename_func("FROM_UNIXTIME"),
+27        }
+28        TRANSFORMS.pop(exp.DateTrunc)
+
+ + +

Generator interprets the given syntax tree and produces a SQL string as an output.

+ +
Arguments:
+ +
    +
  • time_mapping (dict): the dictionary of custom time mappings in which the key +represents a python time format and the output the target time format
  • +
  • time_trie (trie): a trie of the time_mapping keys
  • +
  • pretty (bool): if set to True the returned string will be formatted. Default: False.
  • +
  • quote_start (str): specifies which starting character to use to delimit quotes. Default: '.
  • +
  • quote_end (str): specifies which ending character to use to delimit quotes. Default: '.
  • +
  • identifier_start (str): specifies which starting character to use to delimit identifiers. Default: ".
  • +
  • identifier_end (str): specifies which ending character to use to delimit identifiers. Default: ".
  • +
  • identify (bool): if set to True all identifiers will be delimited by the corresponding +character.
  • +
  • normalize (bool): if set to True all identifiers will lower cased
  • +
  • string_escape (str): specifies a string escape character. Default: '.
  • +
  • identifier_escape (str): specifies an identifier escape character. Default: ".
  • +
  • pad (int): determines padding in a formatted string. Default: 2.
  • +
  • indent (int): determines the size of indentation in a formatted string. Default: 4.
  • +
  • unnest_column_only (bool): if true unnest table aliases are considered only as column aliases
  • +
  • normalize_functions (str): normalize function names, "upper", "lower", or None +Default: "upper"
  • +
  • alias_post_tablesample (bool): if the table alias comes after tablesample +Default: False
  • +
  • unsupported_level (ErrorLevel): determines the generator's behavior when it encounters +unsupported expressions. Default ErrorLevel.WARN.
  • +
  • null_ordering (str): Indicates the default null ordering method to use if not explicitly set. +Options are "nulls_are_small", "nulls_are_large", "nulls_are_last". +Default: "nulls_are_small"
  • +
  • max_unsupported (int): Maximum number of unsupported messages to include in a raised UnsupportedError. +This is only relevant if unsupported_level is ErrorLevel.RAISE. +Default: 3
  • +
  • leading_comma (bool): if the the comma is leading or trailing in select statements +Default: False
  • +
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode. +The default is on the smaller end because the length only represents a segment and not the true +line length. +Default: 80
  • +
  • comments: Whether or not to preserve comments in the output SQL code. +Default: True
  • +
+
+ + +
+
Inherited Members
+
+
sqlglot.generator.Generator
+
Generator
+
generate
+
unsupported
+
sep
+
seg
+
pad_comment
+
maybe_comment
+
wrap
+
no_identify
+
normalize_func
+
indent
+
sql
+
uncache_sql
+
cache_sql
+
characterset_sql
+
column_sql
+
columndef_sql
+
columnconstraint_sql
+
autoincrementcolumnconstraint_sql
+
checkcolumnconstraint_sql
+
commentcolumnconstraint_sql
+
collatecolumnconstraint_sql
+
encodecolumnconstraint_sql
+
defaultcolumnconstraint_sql
+
generatedasidentitycolumnconstraint_sql
+
notnullcolumnconstraint_sql
+
primarykeycolumnconstraint_sql
+
uniquecolumnconstraint_sql
+
create_sql
+
describe_sql
+
prepend_ctes
+
with_sql
+
cte_sql
+
tablealias_sql
+
bitstring_sql
+
hexstring_sql
+
datatype_sql
+
directory_sql
+
delete_sql
+
drop_sql
+
except_sql
+
except_op
+
fetch_sql
+
filter_sql
+
hint_sql
+
index_sql
+
identifier_sql
+
national_sql
+
partition_sql
+
properties_sql
+
root_properties
+
properties
+
with_properties
+
locate_properties
+
property_sql
+
likeproperty_sql
+
fallbackproperty_sql
+
journalproperty_sql
+
freespaceproperty_sql
+
afterjournalproperty_sql
+
checksumproperty_sql
+
mergeblockratioproperty_sql
+
datablocksizeproperty_sql
+
blockcompressionproperty_sql
+
isolatedloadingproperty_sql
+
insert_sql
+
intersect_sql
+
intersect_op
+
introducer_sql
+
pseudotype_sql
+
rowformatdelimitedproperty_sql
+
table_sql
+
tablesample_sql
+
pivot_sql
+
tuple_sql
+
update_sql
+
values_sql
+
var_sql
+
into_sql
+
from_sql
+
group_sql
+
having_sql
+
join_sql
+
lambda_sql
+
lateral_sql
+
limit_sql
+
offset_sql
+
lock_sql
+
literal_sql
+
loaddata_sql
+
null_sql
+
boolean_sql
+
order_sql
+
cluster_sql
+
distribute_sql
+
sort_sql
+
ordered_sql
+
matchrecognize_sql
+
query_modifiers
+
select_sql
+
schema_sql
+
star_sql
+
structkwarg_sql
+
parameter_sql
+
sessionparameter_sql
+
placeholder_sql
+
subquery_sql
+
qualify_sql
+
union_sql
+
union_op
+
unnest_sql
+
where_sql
+
window_sql
+
partition_by_sql
+
window_spec_sql
+
withingroup_sql
+
between_sql
+
bracket_sql
+
all_sql
+
any_sql
+
exists_sql
+
case_sql
+
constraint_sql
+
extract_sql
+
trim_sql
+
concat_sql
+
check_sql
+
foreignkey_sql
+
primarykey_sql
+
unique_sql
+
if_sql
+
in_sql
+
in_unnest_op
+
interval_sql
+
return_sql
+
reference_sql
+
anonymous_sql
+
paren_sql
+
neg_sql
+
not_sql
+
alias_sql
+
aliases_sql
+
attimezone_sql
+
add_sql
+
and_sql
+
connector_sql
+
bitwiseand_sql
+
bitwiseleftshift_sql
+
bitwisenot_sql
+
bitwiseor_sql
+
bitwiserightshift_sql
+
bitwisexor_sql
+
cast_sql
+
currentdate_sql
+
collate_sql
+
command_sql
+
transaction_sql
+
commit_sql
+
rollback_sql
+
altercolumn_sql
+
renametable_sql
+
altertable_sql
+
droppartition_sql
+
addconstraint_sql
+
distinct_sql
+
ignorenulls_sql
+
respectnulls_sql
+
intdiv_sql
+
dpipe_sql
+
div_sql
+
distance_sql
+
dot_sql
+
eq_sql
+
escape_sql
+
glob_sql
+
gt_sql
+
gte_sql
+
ilike_sql
+
is_sql
+
like_sql
+
similarto_sql
+
lt_sql
+
lte_sql
+
mod_sql
+
mul_sql
+
neq_sql
+
nullsafeeq_sql
+
nullsafeneq_sql
+
or_sql
+
slice_sql
+
sub_sql
+
trycast_sql
+
use_sql
+
binary
+
function_fallback_sql
+
format_args
+
text_width
+
format_time
+
expressions
+
op_expressions
+
naked_property
+
set_operation
+
tag_sql
+
token_sql
+
userdefinedfunction_sql
+
userdefinedfunctionkwarg_sql
+
joinhint_sql
+
kwarg_sql
+
when_sql
+
merge_sql
+ +
+ +
+
+
+
+ + \ No newline at end of file diff --git a/docs/sqlglot/dialects/tableau.html b/docs/sqlglot/dialects/tableau.html new file mode 100644 index 0000000..ce9359e --- /dev/null +++ b/docs/sqlglot/dialects/tableau.html @@ -0,0 +1,704 @@ + + + + + + + sqlglot.dialects.tableau API documentation + + + + + + + + + +
+
+ Edit on GitHub +

+sqlglot.dialects.tableau

+ + + + + + +
 1from __future__ import annotations
+ 2
+ 3from sqlglot import exp, generator, parser
+ 4from sqlglot.dialects.dialect import Dialect
+ 5
+ 6
+ 7def _if_sql(self, expression):
+ 8    return f"IF {self.sql(expression, 'this')} THEN {self.sql(expression, 'true')} ELSE {self.sql(expression, 'false')} END"
+ 9
+10
+11def _coalesce_sql(self, expression):
+12    return f"IFNULL({self.sql(expression, 'this')}, {self.expressions(expression)})"
+13
+14
+15def _count_sql(self, expression):
+16    this = expression.this
+17    if isinstance(this, exp.Distinct):
+18        return f"COUNTD({self.expressions(this, flat=True)})"
+19    return f"COUNT({self.sql(expression, 'this')})"
+20
+21
+22class Tableau(Dialect):
+23    class Generator(generator.Generator):
+24        TRANSFORMS = {
+25            **generator.Generator.TRANSFORMS,  # type: ignore
+26            exp.If: _if_sql,
+27            exp.Coalesce: _coalesce_sql,
+28            exp.Count: _count_sql,
+29        }
+30
+31    class Parser(parser.Parser):
+32        FUNCTIONS = {
+33            **parser.Parser.FUNCTIONS,  # type: ignore
+34            "COUNTD": lambda args: exp.Count(this=exp.Distinct(expressions=args)),
+35        }
+
+ + +
+
+ +
+ + class + Tableau(sqlglot.dialects.dialect.Dialect): + + + +
+ +
23class Tableau(Dialect):
+24    class Generator(generator.Generator):
+25        TRANSFORMS = {
+26            **generator.Generator.TRANSFORMS,  # type: ignore
+27            exp.If: _if_sql,
+28            exp.Coalesce: _coalesce_sql,
+29            exp.Count: _count_sql,
+30        }
+31
+32    class Parser(parser.Parser):
+33        FUNCTIONS = {
+34            **parser.Parser.FUNCTIONS,  # type: ignore
+35            "COUNTD": lambda args: exp.Count(this=exp.Distinct(expressions=args)),
+36        }
+
+ + + + +
+
+ + Tableau() + + +
+ + + + +
+ +
+
+ +
+ + class + Tableau.Generator(sqlglot.generator.Generator): + + + +
+ +
24    class Generator(generator.Generator):
+25        TRANSFORMS = {
+26            **generator.Generator.TRANSFORMS,  # type: ignore
+27            exp.If: _if_sql,
+28            exp.Coalesce: _coalesce_sql,
+29            exp.Count: _count_sql,
+30        }
+
+ + +

Generator interprets the given syntax tree and produces a SQL string as an output.

+ +
Arguments:
+ +
    +
  • time_mapping (dict): the dictionary of custom time mappings in which the key +represents a python time format and the output the target time format
  • +
  • time_trie (trie): a trie of the time_mapping keys
  • +
  • pretty (bool): if set to True the returned string will be formatted. Default: False.
  • +
  • quote_start (str): specifies which starting character to use to delimit quotes. Default: '.
  • +
  • quote_end (str): specifies which ending character to use to delimit quotes. Default: '.
  • +
  • identifier_start (str): specifies which starting character to use to delimit identifiers. Default: ".
  • +
  • identifier_end (str): specifies which ending character to use to delimit identifiers. Default: ".
  • +
  • identify (bool): if set to True all identifiers will be delimited by the corresponding +character.
  • +
  • normalize (bool): if set to True all identifiers will lower cased
  • +
  • string_escape (str): specifies a string escape character. Default: '.
  • +
  • identifier_escape (str): specifies an identifier escape character. Default: ".
  • +
  • pad (int): determines padding in a formatted string. Default: 2.
  • +
  • indent (int): determines the size of indentation in a formatted string. Default: 4.
  • +
  • unnest_column_only (bool): if true unnest table aliases are considered only as column aliases
  • +
  • normalize_functions (str): normalize function names, "upper", "lower", or None +Default: "upper"
  • +
  • alias_post_tablesample (bool): if the table alias comes after tablesample +Default: False
  • +
  • unsupported_level (ErrorLevel): determines the generator's behavior when it encounters +unsupported expressions. Default ErrorLevel.WARN.
  • +
  • null_ordering (str): Indicates the default null ordering method to use if not explicitly set. +Options are "nulls_are_small", "nulls_are_large", "nulls_are_last". +Default: "nulls_are_small"
  • +
  • max_unsupported (int): Maximum number of unsupported messages to include in a raised UnsupportedError. +This is only relevant if unsupported_level is ErrorLevel.RAISE. +Default: 3
  • +
  • leading_comma (bool): if the the comma is leading or trailing in select statements +Default: False
  • +
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode. +The default is on the smaller end because the length only represents a segment and not the true +line length. +Default: 80
  • +
  • comments: Whether or not to preserve comments in the output SQL code. +Default: True
  • +
+
+ + +
+
Inherited Members
+
+
sqlglot.generator.Generator
+
Generator
+
generate
+
unsupported
+
sep
+
seg
+
pad_comment
+
maybe_comment
+
wrap
+
no_identify
+
normalize_func
+
indent
+
sql
+
uncache_sql
+
cache_sql
+
characterset_sql
+
column_sql
+
columndef_sql
+
columnconstraint_sql
+
autoincrementcolumnconstraint_sql
+
checkcolumnconstraint_sql
+
commentcolumnconstraint_sql
+
collatecolumnconstraint_sql
+
encodecolumnconstraint_sql
+
defaultcolumnconstraint_sql
+
generatedasidentitycolumnconstraint_sql
+
notnullcolumnconstraint_sql
+
primarykeycolumnconstraint_sql
+
uniquecolumnconstraint_sql
+
create_sql
+
describe_sql
+
prepend_ctes
+
with_sql
+
cte_sql
+
tablealias_sql
+
bitstring_sql
+
hexstring_sql
+
datatype_sql
+
directory_sql
+
delete_sql
+
drop_sql
+
except_sql
+
except_op
+
fetch_sql
+
filter_sql
+
hint_sql
+
index_sql
+
identifier_sql
+
national_sql
+
partition_sql
+
properties_sql
+
root_properties
+
properties
+
with_properties
+
locate_properties
+
property_sql
+
likeproperty_sql
+
fallbackproperty_sql
+
journalproperty_sql
+
freespaceproperty_sql
+
afterjournalproperty_sql
+
checksumproperty_sql
+
mergeblockratioproperty_sql
+
datablocksizeproperty_sql
+
blockcompressionproperty_sql
+
isolatedloadingproperty_sql
+
insert_sql
+
intersect_sql
+
intersect_op
+
introducer_sql
+
pseudotype_sql
+
rowformatdelimitedproperty_sql
+
table_sql
+
tablesample_sql
+
pivot_sql
+
tuple_sql
+
update_sql
+
values_sql
+
var_sql
+
into_sql
+
from_sql
+
group_sql
+
having_sql
+
join_sql
+
lambda_sql
+
lateral_sql
+
limit_sql
+
offset_sql
+
lock_sql
+
literal_sql
+
loaddata_sql
+
null_sql
+
boolean_sql
+
order_sql
+
cluster_sql
+
distribute_sql
+
sort_sql
+
ordered_sql
+
matchrecognize_sql
+
query_modifiers
+
select_sql
+
schema_sql
+
star_sql
+
structkwarg_sql
+
parameter_sql
+
sessionparameter_sql
+
placeholder_sql
+
subquery_sql
+
qualify_sql
+
union_sql
+
union_op
+
unnest_sql
+
where_sql
+
window_sql
+
partition_by_sql
+
window_spec_sql
+
withingroup_sql
+
between_sql
+
bracket_sql
+
all_sql
+
any_sql
+
exists_sql
+
case_sql
+
constraint_sql
+
extract_sql
+
trim_sql
+
concat_sql
+
check_sql
+
foreignkey_sql
+
primarykey_sql
+
unique_sql
+
if_sql
+
in_sql
+
in_unnest_op
+
interval_sql
+
return_sql
+
reference_sql
+
anonymous_sql
+
paren_sql
+
neg_sql
+
not_sql
+
alias_sql
+
aliases_sql
+
attimezone_sql
+
add_sql
+
and_sql
+
connector_sql
+
bitwiseand_sql
+
bitwiseleftshift_sql
+
bitwisenot_sql
+
bitwiseor_sql
+
bitwiserightshift_sql
+
bitwisexor_sql
+
cast_sql
+
currentdate_sql
+
collate_sql
+
command_sql
+
transaction_sql
+
commit_sql
+
rollback_sql
+
altercolumn_sql
+
renametable_sql
+
altertable_sql
+
droppartition_sql
+
addconstraint_sql
+
distinct_sql
+
ignorenulls_sql
+
respectnulls_sql
+
intdiv_sql
+
dpipe_sql
+
div_sql
+
distance_sql
+
dot_sql
+
eq_sql
+
escape_sql
+
glob_sql
+
gt_sql
+
gte_sql
+
ilike_sql
+
is_sql
+
like_sql
+
similarto_sql
+
lt_sql
+
lte_sql
+
mod_sql
+
mul_sql
+
neq_sql
+
nullsafeeq_sql
+
nullsafeneq_sql
+
or_sql
+
slice_sql
+
sub_sql
+
trycast_sql
+
use_sql
+
binary
+
function_fallback_sql
+
format_args
+
text_width
+
format_time
+
expressions
+
op_expressions
+
naked_property
+
set_operation
+
tag_sql
+
token_sql
+
userdefinedfunction_sql
+
userdefinedfunctionkwarg_sql
+
joinhint_sql
+
kwarg_sql
+
when_sql
+
merge_sql
+ +
+
+
+
+
+ +
+ + class + Tableau.Parser(sqlglot.parser.Parser): + + + +
+ +
32    class Parser(parser.Parser):
+33        FUNCTIONS = {
+34            **parser.Parser.FUNCTIONS,  # type: ignore
+35            "COUNTD": lambda args: exp.Count(this=exp.Distinct(expressions=args)),
+36        }
+
+ + +

Parser consumes a list of tokens produced by the sqlglot.tokens.Tokenizer and produces +a parsed syntax tree.

+ +
Arguments:
+ +
    +
  • error_level: the desired error level. +Default: ErrorLevel.RAISE
  • +
  • error_message_context: determines the amount of context to capture from a +query string when displaying the error message (in number of characters). +Default: 50.
  • +
  • index_offset: Index offset for arrays eg ARRAY[0] vs ARRAY[1] as the head of a list. +Default: 0
  • +
  • alias_post_tablesample: If the table alias comes after tablesample. +Default: False
  • +
  • max_errors: Maximum number of error messages to include in a raised ParseError. +This is only relevant if error_level is ErrorLevel.RAISE. +Default: 3
  • +
  • null_ordering: Indicates the default null ordering method to use if not explicitly set. +Options are "nulls_are_small", "nulls_are_large", "nulls_are_last". +Default: "nulls_are_small"
  • +
+
+ + + +
+
+ + \ No newline at end of file diff --git a/docs/sqlglot/dialects/teradata.html b/docs/sqlglot/dialects/teradata.html new file mode 100644 index 0000000..ab5dfd2 --- /dev/null +++ b/docs/sqlglot/dialects/teradata.html @@ -0,0 +1,960 @@ + + + + + + + sqlglot.dialects.teradata API documentation + + + + + + + + + +
+
+ Edit on GitHub +

+sqlglot.dialects.teradata

+ + + + + + +
 1from __future__ import annotations
+ 2
+ 3from sqlglot import exp, generator, parser
+ 4from sqlglot.dialects.dialect import Dialect
+ 5from sqlglot.tokens import TokenType
+ 6
+ 7
+ 8class Teradata(Dialect):
+ 9    class Parser(parser.Parser):
+10        CHARSET_TRANSLATORS = {
+11            "GRAPHIC_TO_KANJISJIS",
+12            "GRAPHIC_TO_LATIN",
+13            "GRAPHIC_TO_UNICODE",
+14            "GRAPHIC_TO_UNICODE_PadSpace",
+15            "KANJI1_KanjiEBCDIC_TO_UNICODE",
+16            "KANJI1_KanjiEUC_TO_UNICODE",
+17            "KANJI1_KANJISJIS_TO_UNICODE",
+18            "KANJI1_SBC_TO_UNICODE",
+19            "KANJISJIS_TO_GRAPHIC",
+20            "KANJISJIS_TO_LATIN",
+21            "KANJISJIS_TO_UNICODE",
+22            "LATIN_TO_GRAPHIC",
+23            "LATIN_TO_KANJISJIS",
+24            "LATIN_TO_UNICODE",
+25            "LOCALE_TO_UNICODE",
+26            "UNICODE_TO_GRAPHIC",
+27            "UNICODE_TO_GRAPHIC_PadGraphic",
+28            "UNICODE_TO_GRAPHIC_VarGraphic",
+29            "UNICODE_TO_KANJI1_KanjiEBCDIC",
+30            "UNICODE_TO_KANJI1_KanjiEUC",
+31            "UNICODE_TO_KANJI1_KANJISJIS",
+32            "UNICODE_TO_KANJI1_SBC",
+33            "UNICODE_TO_KANJISJIS",
+34            "UNICODE_TO_LATIN",
+35            "UNICODE_TO_LOCALE",
+36            "UNICODE_TO_UNICODE_FoldSpace",
+37            "UNICODE_TO_UNICODE_Fullwidth",
+38            "UNICODE_TO_UNICODE_Halfwidth",
+39            "UNICODE_TO_UNICODE_NFC",
+40            "UNICODE_TO_UNICODE_NFD",
+41            "UNICODE_TO_UNICODE_NFKC",
+42            "UNICODE_TO_UNICODE_NFKD",
+43        }
+44
+45        FUNCTION_PARSERS = {
+46            **parser.Parser.FUNCTION_PARSERS,  # type: ignore
+47            "TRANSLATE": lambda self: self._parse_translate(self.STRICT_CAST),
+48        }
+49
+50        def _parse_translate(self, strict: bool) -> exp.Expression:
+51            this = self._parse_conjunction()
+52
+53            if not self._match(TokenType.USING):
+54                self.raise_error("Expected USING in TRANSLATE")
+55
+56            if self._match_texts(self.CHARSET_TRANSLATORS):
+57                charset_split = self._prev.text.split("_TO_")
+58                to = self.expression(exp.CharacterSet, this=charset_split[1])
+59            else:
+60                self.raise_error("Expected a character set translator after USING in TRANSLATE")
+61
+62            return self.expression(exp.Cast if strict else exp.TryCast, this=this, to=to)
+63
+64        # FROM before SET in Teradata UPDATE syntax
+65        # https://docs.teradata.com/r/Enterprise_IntelliFlex_VMware/Teradata-VantageTM-SQL-Data-Manipulation-Language-17.20/Statement-Syntax/UPDATE/UPDATE-Syntax-Basic-Form-FROM-Clause
+66        def _parse_update(self) -> exp.Expression:
+67            return self.expression(
+68                exp.Update,
+69                **{  # type: ignore
+70                    "this": self._parse_table(alias_tokens=self.UPDATE_ALIAS_TOKENS),
+71                    "from": self._parse_from(),
+72                    "expressions": self._match(TokenType.SET)
+73                    and self._parse_csv(self._parse_equality),
+74                    "where": self._parse_where(),
+75                },
+76            )
+77
+78    class Generator(generator.Generator):
+79        PROPERTIES_LOCATION = {
+80            **generator.Generator.PROPERTIES_LOCATION,  # type: ignore
+81            exp.PartitionedByProperty: exp.Properties.Location.POST_INDEX,
+82        }
+83
+84        def partitionedbyproperty_sql(self, expression: exp.PartitionedByProperty) -> str:
+85            return f"PARTITION BY {self.sql(expression, 'this')}"
+86
+87        # FROM before SET in Teradata UPDATE syntax
+88        # https://docs.teradata.com/r/Enterprise_IntelliFlex_VMware/Teradata-VantageTM-SQL-Data-Manipulation-Language-17.20/Statement-Syntax/UPDATE/UPDATE-Syntax-Basic-Form-FROM-Clause
+89        def update_sql(self, expression: exp.Update) -> str:
+90            this = self.sql(expression, "this")
+91            from_sql = self.sql(expression, "from")
+92            set_sql = self.expressions(expression, flat=True)
+93            where_sql = self.sql(expression, "where")
+94            sql = f"UPDATE {this}{from_sql} SET {set_sql}{where_sql}"
+95            return self.prepend_ctes(expression, sql)
+
+ + +
+
+ +
+ + class + Teradata(sqlglot.dialects.dialect.Dialect): + + + +
+ +
 9class Teradata(Dialect):
+10    class Parser(parser.Parser):
+11        CHARSET_TRANSLATORS = {
+12            "GRAPHIC_TO_KANJISJIS",
+13            "GRAPHIC_TO_LATIN",
+14            "GRAPHIC_TO_UNICODE",
+15            "GRAPHIC_TO_UNICODE_PadSpace",
+16            "KANJI1_KanjiEBCDIC_TO_UNICODE",
+17            "KANJI1_KanjiEUC_TO_UNICODE",
+18            "KANJI1_KANJISJIS_TO_UNICODE",
+19            "KANJI1_SBC_TO_UNICODE",
+20            "KANJISJIS_TO_GRAPHIC",
+21            "KANJISJIS_TO_LATIN",
+22            "KANJISJIS_TO_UNICODE",
+23            "LATIN_TO_GRAPHIC",
+24            "LATIN_TO_KANJISJIS",
+25            "LATIN_TO_UNICODE",
+26            "LOCALE_TO_UNICODE",
+27            "UNICODE_TO_GRAPHIC",
+28            "UNICODE_TO_GRAPHIC_PadGraphic",
+29            "UNICODE_TO_GRAPHIC_VarGraphic",
+30            "UNICODE_TO_KANJI1_KanjiEBCDIC",
+31            "UNICODE_TO_KANJI1_KanjiEUC",
+32            "UNICODE_TO_KANJI1_KANJISJIS",
+33            "UNICODE_TO_KANJI1_SBC",
+34            "UNICODE_TO_KANJISJIS",
+35            "UNICODE_TO_LATIN",
+36            "UNICODE_TO_LOCALE",
+37            "UNICODE_TO_UNICODE_FoldSpace",
+38            "UNICODE_TO_UNICODE_Fullwidth",
+39            "UNICODE_TO_UNICODE_Halfwidth",
+40            "UNICODE_TO_UNICODE_NFC",
+41            "UNICODE_TO_UNICODE_NFD",
+42            "UNICODE_TO_UNICODE_NFKC",
+43            "UNICODE_TO_UNICODE_NFKD",
+44        }
+45
+46        FUNCTION_PARSERS = {
+47            **parser.Parser.FUNCTION_PARSERS,  # type: ignore
+48            "TRANSLATE": lambda self: self._parse_translate(self.STRICT_CAST),
+49        }
+50
+51        def _parse_translate(self, strict: bool) -> exp.Expression:
+52            this = self._parse_conjunction()
+53
+54            if not self._match(TokenType.USING):
+55                self.raise_error("Expected USING in TRANSLATE")
+56
+57            if self._match_texts(self.CHARSET_TRANSLATORS):
+58                charset_split = self._prev.text.split("_TO_")
+59                to = self.expression(exp.CharacterSet, this=charset_split[1])
+60            else:
+61                self.raise_error("Expected a character set translator after USING in TRANSLATE")
+62
+63            return self.expression(exp.Cast if strict else exp.TryCast, this=this, to=to)
+64
+65        # FROM before SET in Teradata UPDATE syntax
+66        # https://docs.teradata.com/r/Enterprise_IntelliFlex_VMware/Teradata-VantageTM-SQL-Data-Manipulation-Language-17.20/Statement-Syntax/UPDATE/UPDATE-Syntax-Basic-Form-FROM-Clause
+67        def _parse_update(self) -> exp.Expression:
+68            return self.expression(
+69                exp.Update,
+70                **{  # type: ignore
+71                    "this": self._parse_table(alias_tokens=self.UPDATE_ALIAS_TOKENS),
+72                    "from": self._parse_from(),
+73                    "expressions": self._match(TokenType.SET)
+74                    and self._parse_csv(self._parse_equality),
+75                    "where": self._parse_where(),
+76                },
+77            )
+78
+79    class Generator(generator.Generator):
+80        PROPERTIES_LOCATION = {
+81            **generator.Generator.PROPERTIES_LOCATION,  # type: ignore
+82            exp.PartitionedByProperty: exp.Properties.Location.POST_INDEX,
+83        }
+84
+85        def partitionedbyproperty_sql(self, expression: exp.PartitionedByProperty) -> str:
+86            return f"PARTITION BY {self.sql(expression, 'this')}"
+87
+88        # FROM before SET in Teradata UPDATE syntax
+89        # https://docs.teradata.com/r/Enterprise_IntelliFlex_VMware/Teradata-VantageTM-SQL-Data-Manipulation-Language-17.20/Statement-Syntax/UPDATE/UPDATE-Syntax-Basic-Form-FROM-Clause
+90        def update_sql(self, expression: exp.Update) -> str:
+91            this = self.sql(expression, "this")
+92            from_sql = self.sql(expression, "from")
+93            set_sql = self.expressions(expression, flat=True)
+94            where_sql = self.sql(expression, "where")
+95            sql = f"UPDATE {this}{from_sql} SET {set_sql}{where_sql}"
+96            return self.prepend_ctes(expression, sql)
+
+ + + + +
+
+ + Teradata() + + +
+ + + + +
+ +
+
+ +
+ + class + Teradata.Parser(sqlglot.parser.Parser): + + + +
+ +
10    class Parser(parser.Parser):
+11        CHARSET_TRANSLATORS = {
+12            "GRAPHIC_TO_KANJISJIS",
+13            "GRAPHIC_TO_LATIN",
+14            "GRAPHIC_TO_UNICODE",
+15            "GRAPHIC_TO_UNICODE_PadSpace",
+16            "KANJI1_KanjiEBCDIC_TO_UNICODE",
+17            "KANJI1_KanjiEUC_TO_UNICODE",
+18            "KANJI1_KANJISJIS_TO_UNICODE",
+19            "KANJI1_SBC_TO_UNICODE",
+20            "KANJISJIS_TO_GRAPHIC",
+21            "KANJISJIS_TO_LATIN",
+22            "KANJISJIS_TO_UNICODE",
+23            "LATIN_TO_GRAPHIC",
+24            "LATIN_TO_KANJISJIS",
+25            "LATIN_TO_UNICODE",
+26            "LOCALE_TO_UNICODE",
+27            "UNICODE_TO_GRAPHIC",
+28            "UNICODE_TO_GRAPHIC_PadGraphic",
+29            "UNICODE_TO_GRAPHIC_VarGraphic",
+30            "UNICODE_TO_KANJI1_KanjiEBCDIC",
+31            "UNICODE_TO_KANJI1_KanjiEUC",
+32            "UNICODE_TO_KANJI1_KANJISJIS",
+33            "UNICODE_TO_KANJI1_SBC",
+34            "UNICODE_TO_KANJISJIS",
+35            "UNICODE_TO_LATIN",
+36            "UNICODE_TO_LOCALE",
+37            "UNICODE_TO_UNICODE_FoldSpace",
+38            "UNICODE_TO_UNICODE_Fullwidth",
+39            "UNICODE_TO_UNICODE_Halfwidth",
+40            "UNICODE_TO_UNICODE_NFC",
+41            "UNICODE_TO_UNICODE_NFD",
+42            "UNICODE_TO_UNICODE_NFKC",
+43            "UNICODE_TO_UNICODE_NFKD",
+44        }
+45
+46        FUNCTION_PARSERS = {
+47            **parser.Parser.FUNCTION_PARSERS,  # type: ignore
+48            "TRANSLATE": lambda self: self._parse_translate(self.STRICT_CAST),
+49        }
+50
+51        def _parse_translate(self, strict: bool) -> exp.Expression:
+52            this = self._parse_conjunction()
+53
+54            if not self._match(TokenType.USING):
+55                self.raise_error("Expected USING in TRANSLATE")
+56
+57            if self._match_texts(self.CHARSET_TRANSLATORS):
+58                charset_split = self._prev.text.split("_TO_")
+59                to = self.expression(exp.CharacterSet, this=charset_split[1])
+60            else:
+61                self.raise_error("Expected a character set translator after USING in TRANSLATE")
+62
+63            return self.expression(exp.Cast if strict else exp.TryCast, this=this, to=to)
+64
+65        # FROM before SET in Teradata UPDATE syntax
+66        # https://docs.teradata.com/r/Enterprise_IntelliFlex_VMware/Teradata-VantageTM-SQL-Data-Manipulation-Language-17.20/Statement-Syntax/UPDATE/UPDATE-Syntax-Basic-Form-FROM-Clause
+67        def _parse_update(self) -> exp.Expression:
+68            return self.expression(
+69                exp.Update,
+70                **{  # type: ignore
+71                    "this": self._parse_table(alias_tokens=self.UPDATE_ALIAS_TOKENS),
+72                    "from": self._parse_from(),
+73                    "expressions": self._match(TokenType.SET)
+74                    and self._parse_csv(self._parse_equality),
+75                    "where": self._parse_where(),
+76                },
+77            )
+
+ + +

Parser consumes a list of tokens produced by the sqlglot.tokens.Tokenizer and produces +a parsed syntax tree.

+ +
Arguments:
+ +
    +
  • error_level: the desired error level. +Default: ErrorLevel.RAISE
  • +
  • error_message_context: determines the amount of context to capture from a +query string when displaying the error message (in number of characters). +Default: 50.
  • +
  • index_offset: Index offset for arrays eg ARRAY[0] vs ARRAY[1] as the head of a list. +Default: 0
  • +
  • alias_post_tablesample: If the table alias comes after tablesample. +Default: False
  • +
  • max_errors: Maximum number of error messages to include in a raised ParseError. +This is only relevant if error_level is ErrorLevel.RAISE. +Default: 3
  • +
  • null_ordering: Indicates the default null ordering method to use if not explicitly set. +Options are "nulls_are_small", "nulls_are_large", "nulls_are_last". +Default: "nulls_are_small"
  • +
+
+ + + +
+
+ +
+ + class + Teradata.Generator(sqlglot.generator.Generator): + + + +
+ +
79    class Generator(generator.Generator):
+80        PROPERTIES_LOCATION = {
+81            **generator.Generator.PROPERTIES_LOCATION,  # type: ignore
+82            exp.PartitionedByProperty: exp.Properties.Location.POST_INDEX,
+83        }
+84
+85        def partitionedbyproperty_sql(self, expression: exp.PartitionedByProperty) -> str:
+86            return f"PARTITION BY {self.sql(expression, 'this')}"
+87
+88        # FROM before SET in Teradata UPDATE syntax
+89        # https://docs.teradata.com/r/Enterprise_IntelliFlex_VMware/Teradata-VantageTM-SQL-Data-Manipulation-Language-17.20/Statement-Syntax/UPDATE/UPDATE-Syntax-Basic-Form-FROM-Clause
+90        def update_sql(self, expression: exp.Update) -> str:
+91            this = self.sql(expression, "this")
+92            from_sql = self.sql(expression, "from")
+93            set_sql = self.expressions(expression, flat=True)
+94            where_sql = self.sql(expression, "where")
+95            sql = f"UPDATE {this}{from_sql} SET {set_sql}{where_sql}"
+96            return self.prepend_ctes(expression, sql)
+
+ + +

Generator interprets the given syntax tree and produces a SQL string as an output.

+ +
Arguments:
+ +
    +
  • time_mapping (dict): the dictionary of custom time mappings in which the key +represents a python time format and the output the target time format
  • +
  • time_trie (trie): a trie of the time_mapping keys
  • +
  • pretty (bool): if set to True the returned string will be formatted. Default: False.
  • +
  • quote_start (str): specifies which starting character to use to delimit quotes. Default: '.
  • +
  • quote_end (str): specifies which ending character to use to delimit quotes. Default: '.
  • +
  • identifier_start (str): specifies which starting character to use to delimit identifiers. Default: ".
  • +
  • identifier_end (str): specifies which ending character to use to delimit identifiers. Default: ".
  • +
  • identify (bool): if set to True all identifiers will be delimited by the corresponding +character.
  • +
  • normalize (bool): if set to True all identifiers will lower cased
  • +
  • string_escape (str): specifies a string escape character. Default: '.
  • +
  • identifier_escape (str): specifies an identifier escape character. Default: ".
  • +
  • pad (int): determines padding in a formatted string. Default: 2.
  • +
  • indent (int): determines the size of indentation in a formatted string. Default: 4.
  • +
  • unnest_column_only (bool): if true unnest table aliases are considered only as column aliases
  • +
  • normalize_functions (str): normalize function names, "upper", "lower", or None +Default: "upper"
  • +
  • alias_post_tablesample (bool): if the table alias comes after tablesample +Default: False
  • +
  • unsupported_level (ErrorLevel): determines the generator's behavior when it encounters +unsupported expressions. Default ErrorLevel.WARN.
  • +
  • null_ordering (str): Indicates the default null ordering method to use if not explicitly set. +Options are "nulls_are_small", "nulls_are_large", "nulls_are_last". +Default: "nulls_are_small"
  • +
  • max_unsupported (int): Maximum number of unsupported messages to include in a raised UnsupportedError. +This is only relevant if unsupported_level is ErrorLevel.RAISE. +Default: 3
  • +
  • leading_comma (bool): if the the comma is leading or trailing in select statements +Default: False
  • +
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode. +The default is on the smaller end because the length only represents a segment and not the true +line length. +Default: 80
  • +
  • comments: Whether or not to preserve comments in the output SQL code. +Default: True
  • +
+
+ + +
+ +
+ + def + partitionedbyproperty_sql(self, expression: sqlglot.expressions.PartitionedByProperty) -> str: + + + +
+ +
85        def partitionedbyproperty_sql(self, expression: exp.PartitionedByProperty) -> str:
+86            return f"PARTITION BY {self.sql(expression, 'this')}"
+
+ + + + +
+
+ +
+ + def + update_sql(self, expression: sqlglot.expressions.Update) -> str: + + + +
+ +
90        def update_sql(self, expression: exp.Update) -> str:
+91            this = self.sql(expression, "this")
+92            from_sql = self.sql(expression, "from")
+93            set_sql = self.expressions(expression, flat=True)
+94            where_sql = self.sql(expression, "where")
+95            sql = f"UPDATE {this}{from_sql} SET {set_sql}{where_sql}"
+96            return self.prepend_ctes(expression, sql)
+
+ + + + +
+
+
Inherited Members
+
+
sqlglot.generator.Generator
+
Generator
+
generate
+
unsupported
+
sep
+
seg
+
pad_comment
+
maybe_comment
+
wrap
+
no_identify
+
normalize_func
+
indent
+
sql
+
uncache_sql
+
cache_sql
+
characterset_sql
+
column_sql
+
columndef_sql
+
columnconstraint_sql
+
autoincrementcolumnconstraint_sql
+
checkcolumnconstraint_sql
+
commentcolumnconstraint_sql
+
collatecolumnconstraint_sql
+
encodecolumnconstraint_sql
+
defaultcolumnconstraint_sql
+
generatedasidentitycolumnconstraint_sql
+
notnullcolumnconstraint_sql
+
primarykeycolumnconstraint_sql
+
uniquecolumnconstraint_sql
+
create_sql
+
describe_sql
+
prepend_ctes
+
with_sql
+
cte_sql
+
tablealias_sql
+
bitstring_sql
+
hexstring_sql
+
datatype_sql
+
directory_sql
+
delete_sql
+
drop_sql
+
except_sql
+
except_op
+
fetch_sql
+
filter_sql
+
hint_sql
+
index_sql
+
identifier_sql
+
national_sql
+
partition_sql
+
properties_sql
+
root_properties
+
properties
+
with_properties
+
locate_properties
+
property_sql
+
likeproperty_sql
+
fallbackproperty_sql
+
journalproperty_sql
+
freespaceproperty_sql
+
afterjournalproperty_sql
+
checksumproperty_sql
+
mergeblockratioproperty_sql
+
datablocksizeproperty_sql
+
blockcompressionproperty_sql
+
isolatedloadingproperty_sql
+
insert_sql
+
intersect_sql
+
intersect_op
+
introducer_sql
+
pseudotype_sql
+
rowformatdelimitedproperty_sql
+
table_sql
+
tablesample_sql
+
pivot_sql
+
tuple_sql
+
values_sql
+
var_sql
+
into_sql
+
from_sql
+
group_sql
+
having_sql
+
join_sql
+
lambda_sql
+
lateral_sql
+
limit_sql
+
offset_sql
+
lock_sql
+
literal_sql
+
loaddata_sql
+
null_sql
+
boolean_sql
+
order_sql
+
cluster_sql
+
distribute_sql
+
sort_sql
+
ordered_sql
+
matchrecognize_sql
+
query_modifiers
+
select_sql
+
schema_sql
+
star_sql
+
structkwarg_sql
+
parameter_sql
+
sessionparameter_sql
+
placeholder_sql
+
subquery_sql
+
qualify_sql
+
union_sql
+
union_op
+
unnest_sql
+
where_sql
+
window_sql
+
partition_by_sql
+
window_spec_sql
+
withingroup_sql
+
between_sql
+
bracket_sql
+
all_sql
+
any_sql
+
exists_sql
+
case_sql
+
constraint_sql
+
extract_sql
+
trim_sql
+
concat_sql
+
check_sql
+
foreignkey_sql
+
primarykey_sql
+
unique_sql
+
if_sql
+
in_sql
+
in_unnest_op
+
interval_sql
+
return_sql
+
reference_sql
+
anonymous_sql
+
paren_sql
+
neg_sql
+
not_sql
+
alias_sql
+
aliases_sql
+
attimezone_sql
+
add_sql
+
and_sql
+
connector_sql
+
bitwiseand_sql
+
bitwiseleftshift_sql
+
bitwisenot_sql
+
bitwiseor_sql
+
bitwiserightshift_sql
+
bitwisexor_sql
+
cast_sql
+
currentdate_sql
+
collate_sql
+
command_sql
+
transaction_sql
+
commit_sql
+
rollback_sql
+
altercolumn_sql
+
renametable_sql
+
altertable_sql
+
droppartition_sql
+
addconstraint_sql
+
distinct_sql
+
ignorenulls_sql
+
respectnulls_sql
+
intdiv_sql
+
dpipe_sql
+
div_sql
+
distance_sql
+
dot_sql
+
eq_sql
+
escape_sql
+
glob_sql
+
gt_sql
+
gte_sql
+
ilike_sql
+
is_sql
+
like_sql
+
similarto_sql
+
lt_sql
+
lte_sql
+
mod_sql
+
mul_sql
+
neq_sql
+
nullsafeeq_sql
+
nullsafeneq_sql
+
or_sql
+
slice_sql
+
sub_sql
+
trycast_sql
+
use_sql
+
binary
+
function_fallback_sql
+
format_args
+
text_width
+
format_time
+
expressions
+
op_expressions
+
naked_property
+
set_operation
+
tag_sql
+
token_sql
+
userdefinedfunction_sql
+
userdefinedfunctionkwarg_sql
+
joinhint_sql
+
kwarg_sql
+
when_sql
+
merge_sql
+ +
+
+
+
+
+ + \ No newline at end of file diff --git a/docs/sqlglot/dialects/trino.html b/docs/sqlglot/dialects/trino.html new file mode 100644 index 0000000..2584557 --- /dev/null +++ b/docs/sqlglot/dialects/trino.html @@ -0,0 +1,653 @@ + + + + + + + sqlglot.dialects.trino API documentation + + + + + + + + + +
+
+ Edit on GitHub +

+sqlglot.dialects.trino

+ + + + + + +
 1from __future__ import annotations
+ 2
+ 3from sqlglot import exp
+ 4from sqlglot.dialects.presto import Presto
+ 5
+ 6
+ 7class Trino(Presto):
+ 8    class Generator(Presto.Generator):
+ 9        TRANSFORMS = {
+10            **Presto.Generator.TRANSFORMS,  # type: ignore
+11            exp.ArraySum: lambda self, e: f"REDUCE({self.sql(e, 'this')}, 0, (acc, x) -> acc + x, acc -> acc)",
+12        }
+13
+14    class Tokenizer(Presto.Tokenizer):
+15        HEX_STRINGS = [("X'", "'")]
+
+ + +
+
+ +
+ + class + Trino(sqlglot.dialects.presto.Presto): + + + +
+ +
 8class Trino(Presto):
+ 9    class Generator(Presto.Generator):
+10        TRANSFORMS = {
+11            **Presto.Generator.TRANSFORMS,  # type: ignore
+12            exp.ArraySum: lambda self, e: f"REDUCE({self.sql(e, 'this')}, 0, (acc, x) -> acc + x, acc -> acc)",
+13        }
+14
+15    class Tokenizer(Presto.Tokenizer):
+16        HEX_STRINGS = [("X'", "'")]
+
+ + + + +
+
+ + Trino() + + +
+ + + + +
+ +
+
+ +
+ + class + Trino.Generator(sqlglot.dialects.presto.Presto.Generator): + + + +
+ +
 9    class Generator(Presto.Generator):
+10        TRANSFORMS = {
+11            **Presto.Generator.TRANSFORMS,  # type: ignore
+12            exp.ArraySum: lambda self, e: f"REDUCE({self.sql(e, 'this')}, 0, (acc, x) -> acc + x, acc -> acc)",
+13        }
+
+ + +

Generator interprets the given syntax tree and produces a SQL string as an output.

+ +
Arguments:
+ +
    +
  • time_mapping (dict): the dictionary of custom time mappings in which the key +represents a python time format and the output the target time format
  • +
  • time_trie (trie): a trie of the time_mapping keys
  • +
  • pretty (bool): if set to True the returned string will be formatted. Default: False.
  • +
  • quote_start (str): specifies which starting character to use to delimit quotes. Default: '.
  • +
  • quote_end (str): specifies which ending character to use to delimit quotes. Default: '.
  • +
  • identifier_start (str): specifies which starting character to use to delimit identifiers. Default: ".
  • +
  • identifier_end (str): specifies which ending character to use to delimit identifiers. Default: ".
  • +
  • identify (bool): if set to True all identifiers will be delimited by the corresponding +character.
  • +
  • normalize (bool): if set to True all identifiers will lower cased
  • +
  • string_escape (str): specifies a string escape character. Default: '.
  • +
  • identifier_escape (str): specifies an identifier escape character. Default: ".
  • +
  • pad (int): determines padding in a formatted string. Default: 2.
  • +
  • indent (int): determines the size of indentation in a formatted string. Default: 4.
  • +
  • unnest_column_only (bool): if true unnest table aliases are considered only as column aliases
  • +
  • normalize_functions (str): normalize function names, "upper", "lower", or None +Default: "upper"
  • +
  • alias_post_tablesample (bool): if the table alias comes after tablesample +Default: False
  • +
  • unsupported_level (ErrorLevel): determines the generator's behavior when it encounters +unsupported expressions. Default ErrorLevel.WARN.
  • +
  • null_ordering (str): Indicates the default null ordering method to use if not explicitly set. +Options are "nulls_are_small", "nulls_are_large", "nulls_are_last". +Default: "nulls_are_small"
  • +
  • max_unsupported (int): Maximum number of unsupported messages to include in a raised UnsupportedError. +This is only relevant if unsupported_level is ErrorLevel.RAISE. +Default: 3
  • +
  • leading_comma (bool): if the the comma is leading or trailing in select statements +Default: False
  • +
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode. +The default is on the smaller end because the length only represents a segment and not the true +line length. +Default: 80
  • +
  • comments: Whether or not to preserve comments in the output SQL code. +Default: True
  • +
+
+ + +
+
Inherited Members
+
+
sqlglot.generator.Generator
+
Generator
+
generate
+
unsupported
+
sep
+
seg
+
pad_comment
+
maybe_comment
+
wrap
+
no_identify
+
normalize_func
+
indent
+
sql
+
uncache_sql
+
cache_sql
+
characterset_sql
+
column_sql
+
columndef_sql
+
columnconstraint_sql
+
autoincrementcolumnconstraint_sql
+
checkcolumnconstraint_sql
+
commentcolumnconstraint_sql
+
collatecolumnconstraint_sql
+
encodecolumnconstraint_sql
+
defaultcolumnconstraint_sql
+
generatedasidentitycolumnconstraint_sql
+
notnullcolumnconstraint_sql
+
primarykeycolumnconstraint_sql
+
uniquecolumnconstraint_sql
+
create_sql
+
describe_sql
+
prepend_ctes
+
with_sql
+
cte_sql
+
tablealias_sql
+
bitstring_sql
+
hexstring_sql
+
datatype_sql
+
directory_sql
+
delete_sql
+
drop_sql
+
except_sql
+
except_op
+
fetch_sql
+
filter_sql
+
hint_sql
+
index_sql
+
identifier_sql
+
national_sql
+
partition_sql
+
properties_sql
+
root_properties
+
properties
+
with_properties
+
locate_properties
+
property_sql
+
likeproperty_sql
+
fallbackproperty_sql
+
journalproperty_sql
+
freespaceproperty_sql
+
afterjournalproperty_sql
+
checksumproperty_sql
+
mergeblockratioproperty_sql
+
datablocksizeproperty_sql
+
blockcompressionproperty_sql
+
isolatedloadingproperty_sql
+
insert_sql
+
intersect_sql
+
intersect_op
+
introducer_sql
+
pseudotype_sql
+
rowformatdelimitedproperty_sql
+
table_sql
+
tablesample_sql
+
pivot_sql
+
tuple_sql
+
update_sql
+
values_sql
+
var_sql
+
into_sql
+
from_sql
+
group_sql
+
having_sql
+
join_sql
+
lambda_sql
+
lateral_sql
+
limit_sql
+
offset_sql
+
lock_sql
+
literal_sql
+
loaddata_sql
+
null_sql
+
boolean_sql
+
order_sql
+
cluster_sql
+
distribute_sql
+
sort_sql
+
ordered_sql
+
matchrecognize_sql
+
query_modifiers
+
select_sql
+
schema_sql
+
star_sql
+
structkwarg_sql
+
parameter_sql
+
sessionparameter_sql
+
placeholder_sql
+
subquery_sql
+
qualify_sql
+
union_sql
+
union_op
+
unnest_sql
+
where_sql
+
window_sql
+
partition_by_sql
+
window_spec_sql
+
withingroup_sql
+
between_sql
+
bracket_sql
+
all_sql
+
any_sql
+
exists_sql
+
case_sql
+
constraint_sql
+
extract_sql
+
trim_sql
+
concat_sql
+
check_sql
+
foreignkey_sql
+
primarykey_sql
+
unique_sql
+
if_sql
+
in_sql
+
in_unnest_op
+
interval_sql
+
return_sql
+
reference_sql
+
anonymous_sql
+
paren_sql
+
neg_sql
+
not_sql
+
alias_sql
+
aliases_sql
+
attimezone_sql
+
add_sql
+
and_sql
+
connector_sql
+
bitwiseand_sql
+
bitwiseleftshift_sql
+
bitwisenot_sql
+
bitwiseor_sql
+
bitwiserightshift_sql
+
bitwisexor_sql
+
cast_sql
+
currentdate_sql
+
collate_sql
+
command_sql
+
commit_sql
+
rollback_sql
+
altercolumn_sql
+
renametable_sql
+
altertable_sql
+
droppartition_sql
+
addconstraint_sql
+
distinct_sql
+
ignorenulls_sql
+
respectnulls_sql
+
intdiv_sql
+
dpipe_sql
+
div_sql
+
distance_sql
+
dot_sql
+
eq_sql
+
escape_sql
+
glob_sql
+
gt_sql
+
gte_sql
+
ilike_sql
+
is_sql
+
like_sql
+
similarto_sql
+
lt_sql
+
lte_sql
+
mod_sql
+
mul_sql
+
neq_sql
+
nullsafeeq_sql
+
nullsafeneq_sql
+
or_sql
+
slice_sql
+
sub_sql
+
trycast_sql
+
use_sql
+
binary
+
function_fallback_sql
+
format_args
+
text_width
+
format_time
+
expressions
+
op_expressions
+
naked_property
+
set_operation
+
tag_sql
+
token_sql
+
userdefinedfunction_sql
+
userdefinedfunctionkwarg_sql
+
joinhint_sql
+
kwarg_sql
+
when_sql
+
merge_sql
+ +
+ +
+
+
+
+ +
+ + class + Trino.Tokenizer(sqlglot.dialects.presto.Presto.Tokenizer): + + + +
+ +
15    class Tokenizer(Presto.Tokenizer):
+16        HEX_STRINGS = [("X'", "'")]
+
+ + + + +
+
Inherited Members
+
+ +
+
+
+
+ + \ No newline at end of file diff --git a/docs/sqlglot/dialects/tsql.html b/docs/sqlglot/dialects/tsql.html new file mode 100644 index 0000000..40e29e6 --- /dev/null +++ b/docs/sqlglot/dialects/tsql.html @@ -0,0 +1,1772 @@ + + + + + + + sqlglot.dialects.tsql API documentation + + + + + + + + + +
+
+ Edit on GitHub +

+sqlglot.dialects.tsql

+ + + + + + +
  1from __future__ import annotations
+  2
+  3import re
+  4import typing as t
+  5
+  6from sqlglot import exp, generator, parser, tokens
+  7from sqlglot.dialects.dialect import Dialect, parse_date_delta, rename_func
+  8from sqlglot.expressions import DataType
+  9from sqlglot.helper import seq_get
+ 10from sqlglot.time import format_time
+ 11from sqlglot.tokens import TokenType
+ 12
+ 13FULL_FORMAT_TIME_MAPPING = {
+ 14    "weekday": "%A",
+ 15    "dw": "%A",
+ 16    "w": "%A",
+ 17    "month": "%B",
+ 18    "mm": "%B",
+ 19    "m": "%B",
+ 20}
+ 21
+ 22DATE_DELTA_INTERVAL = {
+ 23    "year": "year",
+ 24    "yyyy": "year",
+ 25    "yy": "year",
+ 26    "quarter": "quarter",
+ 27    "qq": "quarter",
+ 28    "q": "quarter",
+ 29    "month": "month",
+ 30    "mm": "month",
+ 31    "m": "month",
+ 32    "week": "week",
+ 33    "ww": "week",
+ 34    "wk": "week",
+ 35    "day": "day",
+ 36    "dd": "day",
+ 37    "d": "day",
+ 38}
+ 39
+ 40
+ 41DATE_FMT_RE = re.compile("([dD]{1,2})|([mM]{1,2})|([yY]{1,4})|([hH]{1,2})|([sS]{1,2})")
+ 42
+ 43# N = Numeric, C=Currency
+ 44TRANSPILE_SAFE_NUMBER_FMT = {"N", "C"}
+ 45
+ 46
+ 47def _format_time_lambda(exp_class, full_format_mapping=None, default=None):
+ 48    def _format_time(args):
+ 49        return exp_class(
+ 50            this=seq_get(args, 1),
+ 51            format=exp.Literal.string(
+ 52                format_time(
+ 53                    seq_get(args, 0).name or (TSQL.time_format if default is True else default),
+ 54                    {**TSQL.time_mapping, **FULL_FORMAT_TIME_MAPPING}
+ 55                    if full_format_mapping
+ 56                    else TSQL.time_mapping,
+ 57                )
+ 58            ),
+ 59        )
+ 60
+ 61    return _format_time
+ 62
+ 63
+ 64def _parse_format(args):
+ 65    fmt = seq_get(args, 1)
+ 66    number_fmt = fmt.name in TRANSPILE_SAFE_NUMBER_FMT or not DATE_FMT_RE.search(fmt.this)
+ 67    if number_fmt:
+ 68        return exp.NumberToStr(this=seq_get(args, 0), format=fmt)
+ 69    return exp.TimeToStr(
+ 70        this=seq_get(args, 0),
+ 71        format=exp.Literal.string(
+ 72            format_time(fmt.name, TSQL.format_time_mapping)
+ 73            if len(fmt.name) == 1
+ 74            else format_time(fmt.name, TSQL.time_mapping)
+ 75        ),
+ 76    )
+ 77
+ 78
+ 79def _parse_eomonth(args):
+ 80    date = seq_get(args, 0)
+ 81    month_lag = seq_get(args, 1)
+ 82    unit = DATE_DELTA_INTERVAL.get("month")
+ 83
+ 84    if month_lag is None:
+ 85        return exp.LastDateOfMonth(this=date)
+ 86
+ 87    # Remove month lag argument in parser as its compared with the number of arguments of the resulting class
+ 88    args.remove(month_lag)
+ 89
+ 90    return exp.LastDateOfMonth(this=exp.DateAdd(this=date, expression=month_lag, unit=unit))
+ 91
+ 92
+ 93def generate_date_delta_with_unit_sql(self, e):
+ 94    func = "DATEADD" if isinstance(e, exp.DateAdd) else "DATEDIFF"
+ 95    return f"{func}({self.format_args(e.text('unit'), e.expression, e.this)})"
+ 96
+ 97
+ 98def _format_sql(self, e):
+ 99    fmt = (
+100        e.args["format"]
+101        if isinstance(e, exp.NumberToStr)
+102        else exp.Literal.string(format_time(e.text("format"), TSQL.inverse_time_mapping))
+103    )
+104    return f"FORMAT({self.format_args(e.this, fmt)})"
+105
+106
+107def _string_agg_sql(self, e):
+108    e = e.copy()
+109
+110    this = e.this
+111    distinct = e.find(exp.Distinct)
+112    if distinct:
+113        # exp.Distinct can appear below an exp.Order or an exp.GroupConcat expression
+114        self.unsupported("T-SQL STRING_AGG doesn't support DISTINCT.")
+115        this = distinct.expressions[0]
+116        distinct.pop()
+117
+118    order = ""
+119    if isinstance(e.this, exp.Order):
+120        if e.this.this:
+121            this = e.this.this
+122            e.this.this.pop()
+123        order = f" WITHIN GROUP ({self.sql(e.this)[1:]})"  # Order has a leading space
+124
+125    separator = e.args.get("separator") or exp.Literal.string(",")
+126    return f"STRING_AGG({self.format_args(this, separator)}){order}"
+127
+128
+129class TSQL(Dialect):
+130    null_ordering = "nulls_are_small"
+131    time_format = "'yyyy-mm-dd hh:mm:ss'"
+132
+133    time_mapping = {
+134        "year": "%Y",
+135        "qq": "%q",
+136        "q": "%q",
+137        "quarter": "%q",
+138        "dayofyear": "%j",
+139        "day": "%d",
+140        "dy": "%d",
+141        "y": "%Y",
+142        "week": "%W",
+143        "ww": "%W",
+144        "wk": "%W",
+145        "hour": "%h",
+146        "hh": "%I",
+147        "minute": "%M",
+148        "mi": "%M",
+149        "n": "%M",
+150        "second": "%S",
+151        "ss": "%S",
+152        "s": "%-S",
+153        "millisecond": "%f",
+154        "ms": "%f",
+155        "weekday": "%W",
+156        "dw": "%W",
+157        "month": "%m",
+158        "mm": "%M",
+159        "m": "%-M",
+160        "Y": "%Y",
+161        "YYYY": "%Y",
+162        "YY": "%y",
+163        "MMMM": "%B",
+164        "MMM": "%b",
+165        "MM": "%m",
+166        "M": "%-m",
+167        "dd": "%d",
+168        "d": "%-d",
+169        "HH": "%H",
+170        "H": "%-H",
+171        "h": "%-I",
+172        "S": "%f",
+173        "yyyy": "%Y",
+174        "yy": "%y",
+175    }
+176
+177    convert_format_mapping = {
+178        "0": "%b %d %Y %-I:%M%p",
+179        "1": "%m/%d/%y",
+180        "2": "%y.%m.%d",
+181        "3": "%d/%m/%y",
+182        "4": "%d.%m.%y",
+183        "5": "%d-%m-%y",
+184        "6": "%d %b %y",
+185        "7": "%b %d, %y",
+186        "8": "%H:%M:%S",
+187        "9": "%b %d %Y %-I:%M:%S:%f%p",
+188        "10": "mm-dd-yy",
+189        "11": "yy/mm/dd",
+190        "12": "yymmdd",
+191        "13": "%d %b %Y %H:%M:ss:%f",
+192        "14": "%H:%M:%S:%f",
+193        "20": "%Y-%m-%d %H:%M:%S",
+194        "21": "%Y-%m-%d %H:%M:%S.%f",
+195        "22": "%m/%d/%y %-I:%M:%S %p",
+196        "23": "%Y-%m-%d",
+197        "24": "%H:%M:%S",
+198        "25": "%Y-%m-%d %H:%M:%S.%f",
+199        "100": "%b %d %Y %-I:%M%p",
+200        "101": "%m/%d/%Y",
+201        "102": "%Y.%m.%d",
+202        "103": "%d/%m/%Y",
+203        "104": "%d.%m.%Y",
+204        "105": "%d-%m-%Y",
+205        "106": "%d %b %Y",
+206        "107": "%b %d, %Y",
+207        "108": "%H:%M:%S",
+208        "109": "%b %d %Y %-I:%M:%S:%f%p",
+209        "110": "%m-%d-%Y",
+210        "111": "%Y/%m/%d",
+211        "112": "%Y%m%d",
+212        "113": "%d %b %Y %H:%M:%S:%f",
+213        "114": "%H:%M:%S:%f",
+214        "120": "%Y-%m-%d %H:%M:%S",
+215        "121": "%Y-%m-%d %H:%M:%S.%f",
+216    }
+217    # not sure if complete
+218    format_time_mapping = {
+219        "y": "%B %Y",
+220        "d": "%m/%d/%Y",
+221        "H": "%-H",
+222        "h": "%-I",
+223        "s": "%Y-%m-%d %H:%M:%S",
+224        "D": "%A,%B,%Y",
+225        "f": "%A,%B,%Y %-I:%M %p",
+226        "F": "%A,%B,%Y %-I:%M:%S %p",
+227        "g": "%m/%d/%Y %-I:%M %p",
+228        "G": "%m/%d/%Y %-I:%M:%S %p",
+229        "M": "%B %-d",
+230        "m": "%B %-d",
+231        "O": "%Y-%m-%dT%H:%M:%S",
+232        "u": "%Y-%M-%D %H:%M:%S%z",
+233        "U": "%A, %B %D, %Y %H:%M:%S%z",
+234        "T": "%-I:%M:%S %p",
+235        "t": "%-I:%M",
+236        "Y": "%a %Y",
+237    }
+238
+239    class Tokenizer(tokens.Tokenizer):
+240        IDENTIFIERS = ['"', ("[", "]")]
+241
+242        QUOTES = ["'", '"']
+243
+244        KEYWORDS = {
+245            **tokens.Tokenizer.KEYWORDS,
+246            "BIT": TokenType.BOOLEAN,
+247            "DATETIME2": TokenType.DATETIME,
+248            "DATETIMEOFFSET": TokenType.TIMESTAMPTZ,
+249            "DECLARE": TokenType.COMMAND,
+250            "IMAGE": TokenType.IMAGE,
+251            "MONEY": TokenType.MONEY,
+252            "NTEXT": TokenType.TEXT,
+253            "NVARCHAR(MAX)": TokenType.TEXT,
+254            "PRINT": TokenType.COMMAND,
+255            "PROC": TokenType.PROCEDURE,
+256            "REAL": TokenType.FLOAT,
+257            "ROWVERSION": TokenType.ROWVERSION,
+258            "SMALLDATETIME": TokenType.DATETIME,
+259            "SMALLMONEY": TokenType.SMALLMONEY,
+260            "SQL_VARIANT": TokenType.VARIANT,
+261            "TIME": TokenType.TIMESTAMP,
+262            "TOP": TokenType.TOP,
+263            "UNIQUEIDENTIFIER": TokenType.UNIQUEIDENTIFIER,
+264            "VARCHAR(MAX)": TokenType.TEXT,
+265            "XML": TokenType.XML,
+266        }
+267
+268        # TSQL allows @, # to appear as a variable/identifier prefix
+269        SINGLE_TOKENS = tokens.Tokenizer.SINGLE_TOKENS.copy()
+270        SINGLE_TOKENS.pop("@")
+271        SINGLE_TOKENS.pop("#")
+272
+273    class Parser(parser.Parser):
+274        FUNCTIONS = {
+275            **parser.Parser.FUNCTIONS,  # type: ignore
+276            "CHARINDEX": lambda args: exp.StrPosition(
+277                this=seq_get(args, 1),
+278                substr=seq_get(args, 0),
+279                position=seq_get(args, 2),
+280            ),
+281            "ISNULL": exp.Coalesce.from_arg_list,
+282            "DATEADD": parse_date_delta(exp.DateAdd, unit_mapping=DATE_DELTA_INTERVAL),
+283            "DATEDIFF": parse_date_delta(exp.DateDiff, unit_mapping=DATE_DELTA_INTERVAL),
+284            "DATENAME": _format_time_lambda(exp.TimeToStr, full_format_mapping=True),
+285            "DATEPART": _format_time_lambda(exp.TimeToStr),
+286            "GETDATE": exp.CurrentTimestamp.from_arg_list,
+287            "SYSDATETIME": exp.CurrentTimestamp.from_arg_list,
+288            "IIF": exp.If.from_arg_list,
+289            "LEN": exp.Length.from_arg_list,
+290            "REPLICATE": exp.Repeat.from_arg_list,
+291            "JSON_VALUE": exp.JSONExtractScalar.from_arg_list,
+292            "FORMAT": _parse_format,
+293            "EOMONTH": _parse_eomonth,
+294        }
+295
+296        VAR_LENGTH_DATATYPES = {
+297            DataType.Type.NVARCHAR,
+298            DataType.Type.VARCHAR,
+299            DataType.Type.CHAR,
+300            DataType.Type.NCHAR,
+301        }
+302
+303        RETURNS_TABLE_TOKENS = parser.Parser.ID_VAR_TOKENS - {  # type: ignore
+304            TokenType.TABLE,
+305            *parser.Parser.TYPE_TOKENS,  # type: ignore
+306        }
+307
+308        STATEMENT_PARSERS = {
+309            **parser.Parser.STATEMENT_PARSERS,  # type: ignore
+310            TokenType.END: lambda self: self._parse_command(),
+311        }
+312
+313        def _parse_system_time(self) -> t.Optional[exp.Expression]:
+314            if not self._match_text_seq("FOR", "SYSTEM_TIME"):
+315                return None
+316
+317            if self._match_text_seq("AS", "OF"):
+318                system_time = self.expression(
+319                    exp.SystemTime, this=self._parse_bitwise(), kind="AS OF"
+320                )
+321            elif self._match_set((TokenType.FROM, TokenType.BETWEEN)):
+322                kind = self._prev.text
+323                this = self._parse_bitwise()
+324                self._match_texts(("TO", "AND"))
+325                expression = self._parse_bitwise()
+326                system_time = self.expression(
+327                    exp.SystemTime, this=this, expression=expression, kind=kind
+328                )
+329            elif self._match_text_seq("CONTAINED", "IN"):
+330                args = self._parse_wrapped_csv(self._parse_bitwise)
+331                system_time = self.expression(
+332                    exp.SystemTime,
+333                    this=seq_get(args, 0),
+334                    expression=seq_get(args, 1),
+335                    kind="CONTAINED IN",
+336                )
+337            elif self._match(TokenType.ALL):
+338                system_time = self.expression(exp.SystemTime, kind="ALL")
+339            else:
+340                system_time = None
+341                self.raise_error("Unable to parse FOR SYSTEM_TIME clause")
+342
+343            return system_time
+344
+345        def _parse_table_parts(self, schema: bool = False) -> exp.Expression:
+346            table = super()._parse_table_parts(schema=schema)
+347            table.set("system_time", self._parse_system_time())
+348            return table
+349
+350        def _parse_returns(self) -> exp.Expression:
+351            table = self._parse_id_var(any_token=False, tokens=self.RETURNS_TABLE_TOKENS)
+352            returns = super()._parse_returns()
+353            returns.set("table", table)
+354            return returns
+355
+356        def _parse_convert(self, strict: bool) -> t.Optional[exp.Expression]:
+357            to = self._parse_types()
+358            self._match(TokenType.COMMA)
+359            this = self._parse_conjunction()
+360
+361            if not to or not this:
+362                return None
+363
+364            # Retrieve length of datatype and override to default if not specified
+365            if seq_get(to.expressions, 0) is None and to.this in self.VAR_LENGTH_DATATYPES:
+366                to = exp.DataType.build(to.this, expressions=[exp.Literal.number(30)], nested=False)
+367
+368            # Check whether a conversion with format is applicable
+369            if self._match(TokenType.COMMA):
+370                format_val = self._parse_number()
+371                format_val_name = format_val.name if format_val else ""
+372
+373                if format_val_name not in TSQL.convert_format_mapping:
+374                    raise ValueError(
+375                        f"CONVERT function at T-SQL does not support format style {format_val_name}"
+376                    )
+377
+378                format_norm = exp.Literal.string(TSQL.convert_format_mapping[format_val_name])
+379
+380                # Check whether the convert entails a string to date format
+381                if to.this == DataType.Type.DATE:
+382                    return self.expression(exp.StrToDate, this=this, format=format_norm)
+383                # Check whether the convert entails a string to datetime format
+384                elif to.this == DataType.Type.DATETIME:
+385                    return self.expression(exp.StrToTime, this=this, format=format_norm)
+386                # Check whether the convert entails a date to string format
+387                elif to.this in self.VAR_LENGTH_DATATYPES:
+388                    return self.expression(
+389                        exp.Cast if strict else exp.TryCast,
+390                        to=to,
+391                        this=self.expression(exp.TimeToStr, this=this, format=format_norm),
+392                    )
+393                elif to.this == DataType.Type.TEXT:
+394                    return self.expression(exp.TimeToStr, this=this, format=format_norm)
+395
+396            # Entails a simple cast without any format requirement
+397            return self.expression(exp.Cast if strict else exp.TryCast, this=this, to=to)
+398
+399        def _parse_user_defined_function(
+400            self, kind: t.Optional[TokenType] = None
+401        ) -> t.Optional[exp.Expression]:
+402            this = super()._parse_user_defined_function(kind=kind)
+403
+404            if (
+405                kind == TokenType.FUNCTION
+406                or isinstance(this, exp.UserDefinedFunction)
+407                or self._match(TokenType.ALIAS, advance=False)
+408            ):
+409                return this
+410
+411            expressions = self._parse_csv(self._parse_udf_kwarg)
+412            return self.expression(exp.UserDefinedFunction, this=this, expressions=expressions)
+413
+414    class Generator(generator.Generator):
+415        LOCKING_READS_SUPPORTED = True
+416
+417        TYPE_MAPPING = {
+418            **generator.Generator.TYPE_MAPPING,  # type: ignore
+419            exp.DataType.Type.BOOLEAN: "BIT",
+420            exp.DataType.Type.INT: "INTEGER",
+421            exp.DataType.Type.DECIMAL: "NUMERIC",
+422            exp.DataType.Type.DATETIME: "DATETIME2",
+423            exp.DataType.Type.VARIANT: "SQL_VARIANT",
+424        }
+425
+426        TRANSFORMS = {
+427            **generator.Generator.TRANSFORMS,  # type: ignore
+428            exp.DateAdd: generate_date_delta_with_unit_sql,
+429            exp.DateDiff: generate_date_delta_with_unit_sql,
+430            exp.CurrentDate: rename_func("GETDATE"),
+431            exp.CurrentTimestamp: rename_func("GETDATE"),
+432            exp.If: rename_func("IIF"),
+433            exp.NumberToStr: _format_sql,
+434            exp.TimeToStr: _format_sql,
+435            exp.GroupConcat: _string_agg_sql,
+436        }
+437
+438        TRANSFORMS.pop(exp.ReturnsProperty)
+439
+440        def systemtime_sql(self, expression: exp.SystemTime) -> str:
+441            kind = expression.args["kind"]
+442            if kind == "ALL":
+443                return "FOR SYSTEM_TIME ALL"
+444
+445            start = self.sql(expression, "this")
+446            if kind == "AS OF":
+447                return f"FOR SYSTEM_TIME AS OF {start}"
+448
+449            end = self.sql(expression, "expression")
+450            if kind == "FROM":
+451                return f"FOR SYSTEM_TIME FROM {start} TO {end}"
+452            if kind == "BETWEEN":
+453                return f"FOR SYSTEM_TIME BETWEEN {start} AND {end}"
+454
+455            return f"FOR SYSTEM_TIME CONTAINED IN ({start}, {end})"
+456
+457        def returnsproperty_sql(self, expression: exp.ReturnsProperty) -> str:
+458            table = expression.args.get("table")
+459            table = f"{table} " if table else ""
+460            return f"RETURNS {table}{self.sql(expression, 'this')}"
+
+ + +
+
+ +
+ + def + generate_date_delta_with_unit_sql(self, e): + + + +
+ +
94def generate_date_delta_with_unit_sql(self, e):
+95    func = "DATEADD" if isinstance(e, exp.DateAdd) else "DATEDIFF"
+96    return f"{func}({self.format_args(e.text('unit'), e.expression, e.this)})"
+
+ + + + +
+
+ +
+ + class + TSQL(sqlglot.dialects.dialect.Dialect): + + + +
+ +
130class TSQL(Dialect):
+131    null_ordering = "nulls_are_small"
+132    time_format = "'yyyy-mm-dd hh:mm:ss'"
+133
+134    time_mapping = {
+135        "year": "%Y",
+136        "qq": "%q",
+137        "q": "%q",
+138        "quarter": "%q",
+139        "dayofyear": "%j",
+140        "day": "%d",
+141        "dy": "%d",
+142        "y": "%Y",
+143        "week": "%W",
+144        "ww": "%W",
+145        "wk": "%W",
+146        "hour": "%h",
+147        "hh": "%I",
+148        "minute": "%M",
+149        "mi": "%M",
+150        "n": "%M",
+151        "second": "%S",
+152        "ss": "%S",
+153        "s": "%-S",
+154        "millisecond": "%f",
+155        "ms": "%f",
+156        "weekday": "%W",
+157        "dw": "%W",
+158        "month": "%m",
+159        "mm": "%M",
+160        "m": "%-M",
+161        "Y": "%Y",
+162        "YYYY": "%Y",
+163        "YY": "%y",
+164        "MMMM": "%B",
+165        "MMM": "%b",
+166        "MM": "%m",
+167        "M": "%-m",
+168        "dd": "%d",
+169        "d": "%-d",
+170        "HH": "%H",
+171        "H": "%-H",
+172        "h": "%-I",
+173        "S": "%f",
+174        "yyyy": "%Y",
+175        "yy": "%y",
+176    }
+177
+178    convert_format_mapping = {
+179        "0": "%b %d %Y %-I:%M%p",
+180        "1": "%m/%d/%y",
+181        "2": "%y.%m.%d",
+182        "3": "%d/%m/%y",
+183        "4": "%d.%m.%y",
+184        "5": "%d-%m-%y",
+185        "6": "%d %b %y",
+186        "7": "%b %d, %y",
+187        "8": "%H:%M:%S",
+188        "9": "%b %d %Y %-I:%M:%S:%f%p",
+189        "10": "mm-dd-yy",
+190        "11": "yy/mm/dd",
+191        "12": "yymmdd",
+192        "13": "%d %b %Y %H:%M:ss:%f",
+193        "14": "%H:%M:%S:%f",
+194        "20": "%Y-%m-%d %H:%M:%S",
+195        "21": "%Y-%m-%d %H:%M:%S.%f",
+196        "22": "%m/%d/%y %-I:%M:%S %p",
+197        "23": "%Y-%m-%d",
+198        "24": "%H:%M:%S",
+199        "25": "%Y-%m-%d %H:%M:%S.%f",
+200        "100": "%b %d %Y %-I:%M%p",
+201        "101": "%m/%d/%Y",
+202        "102": "%Y.%m.%d",
+203        "103": "%d/%m/%Y",
+204        "104": "%d.%m.%Y",
+205        "105": "%d-%m-%Y",
+206        "106": "%d %b %Y",
+207        "107": "%b %d, %Y",
+208        "108": "%H:%M:%S",
+209        "109": "%b %d %Y %-I:%M:%S:%f%p",
+210        "110": "%m-%d-%Y",
+211        "111": "%Y/%m/%d",
+212        "112": "%Y%m%d",
+213        "113": "%d %b %Y %H:%M:%S:%f",
+214        "114": "%H:%M:%S:%f",
+215        "120": "%Y-%m-%d %H:%M:%S",
+216        "121": "%Y-%m-%d %H:%M:%S.%f",
+217    }
+218    # not sure if complete
+219    format_time_mapping = {
+220        "y": "%B %Y",
+221        "d": "%m/%d/%Y",
+222        "H": "%-H",
+223        "h": "%-I",
+224        "s": "%Y-%m-%d %H:%M:%S",
+225        "D": "%A,%B,%Y",
+226        "f": "%A,%B,%Y %-I:%M %p",
+227        "F": "%A,%B,%Y %-I:%M:%S %p",
+228        "g": "%m/%d/%Y %-I:%M %p",
+229        "G": "%m/%d/%Y %-I:%M:%S %p",
+230        "M": "%B %-d",
+231        "m": "%B %-d",
+232        "O": "%Y-%m-%dT%H:%M:%S",
+233        "u": "%Y-%M-%D %H:%M:%S%z",
+234        "U": "%A, %B %D, %Y %H:%M:%S%z",
+235        "T": "%-I:%M:%S %p",
+236        "t": "%-I:%M",
+237        "Y": "%a %Y",
+238    }
+239
+240    class Tokenizer(tokens.Tokenizer):
+241        IDENTIFIERS = ['"', ("[", "]")]
+242
+243        QUOTES = ["'", '"']
+244
+245        KEYWORDS = {
+246            **tokens.Tokenizer.KEYWORDS,
+247            "BIT": TokenType.BOOLEAN,
+248            "DATETIME2": TokenType.DATETIME,
+249            "DATETIMEOFFSET": TokenType.TIMESTAMPTZ,
+250            "DECLARE": TokenType.COMMAND,
+251            "IMAGE": TokenType.IMAGE,
+252            "MONEY": TokenType.MONEY,
+253            "NTEXT": TokenType.TEXT,
+254            "NVARCHAR(MAX)": TokenType.TEXT,
+255            "PRINT": TokenType.COMMAND,
+256            "PROC": TokenType.PROCEDURE,
+257            "REAL": TokenType.FLOAT,
+258            "ROWVERSION": TokenType.ROWVERSION,
+259            "SMALLDATETIME": TokenType.DATETIME,
+260            "SMALLMONEY": TokenType.SMALLMONEY,
+261            "SQL_VARIANT": TokenType.VARIANT,
+262            "TIME": TokenType.TIMESTAMP,
+263            "TOP": TokenType.TOP,
+264            "UNIQUEIDENTIFIER": TokenType.UNIQUEIDENTIFIER,
+265            "VARCHAR(MAX)": TokenType.TEXT,
+266            "XML": TokenType.XML,
+267        }
+268
+269        # TSQL allows @, # to appear as a variable/identifier prefix
+270        SINGLE_TOKENS = tokens.Tokenizer.SINGLE_TOKENS.copy()
+271        SINGLE_TOKENS.pop("@")
+272        SINGLE_TOKENS.pop("#")
+273
+274    class Parser(parser.Parser):
+275        FUNCTIONS = {
+276            **parser.Parser.FUNCTIONS,  # type: ignore
+277            "CHARINDEX": lambda args: exp.StrPosition(
+278                this=seq_get(args, 1),
+279                substr=seq_get(args, 0),
+280                position=seq_get(args, 2),
+281            ),
+282            "ISNULL": exp.Coalesce.from_arg_list,
+283            "DATEADD": parse_date_delta(exp.DateAdd, unit_mapping=DATE_DELTA_INTERVAL),
+284            "DATEDIFF": parse_date_delta(exp.DateDiff, unit_mapping=DATE_DELTA_INTERVAL),
+285            "DATENAME": _format_time_lambda(exp.TimeToStr, full_format_mapping=True),
+286            "DATEPART": _format_time_lambda(exp.TimeToStr),
+287            "GETDATE": exp.CurrentTimestamp.from_arg_list,
+288            "SYSDATETIME": exp.CurrentTimestamp.from_arg_list,
+289            "IIF": exp.If.from_arg_list,
+290            "LEN": exp.Length.from_arg_list,
+291            "REPLICATE": exp.Repeat.from_arg_list,
+292            "JSON_VALUE": exp.JSONExtractScalar.from_arg_list,
+293            "FORMAT": _parse_format,
+294            "EOMONTH": _parse_eomonth,
+295        }
+296
+297        VAR_LENGTH_DATATYPES = {
+298            DataType.Type.NVARCHAR,
+299            DataType.Type.VARCHAR,
+300            DataType.Type.CHAR,
+301            DataType.Type.NCHAR,
+302        }
+303
+304        RETURNS_TABLE_TOKENS = parser.Parser.ID_VAR_TOKENS - {  # type: ignore
+305            TokenType.TABLE,
+306            *parser.Parser.TYPE_TOKENS,  # type: ignore
+307        }
+308
+309        STATEMENT_PARSERS = {
+310            **parser.Parser.STATEMENT_PARSERS,  # type: ignore
+311            TokenType.END: lambda self: self._parse_command(),
+312        }
+313
+314        def _parse_system_time(self) -> t.Optional[exp.Expression]:
+315            if not self._match_text_seq("FOR", "SYSTEM_TIME"):
+316                return None
+317
+318            if self._match_text_seq("AS", "OF"):
+319                system_time = self.expression(
+320                    exp.SystemTime, this=self._parse_bitwise(), kind="AS OF"
+321                )
+322            elif self._match_set((TokenType.FROM, TokenType.BETWEEN)):
+323                kind = self._prev.text
+324                this = self._parse_bitwise()
+325                self._match_texts(("TO", "AND"))
+326                expression = self._parse_bitwise()
+327                system_time = self.expression(
+328                    exp.SystemTime, this=this, expression=expression, kind=kind
+329                )
+330            elif self._match_text_seq("CONTAINED", "IN"):
+331                args = self._parse_wrapped_csv(self._parse_bitwise)
+332                system_time = self.expression(
+333                    exp.SystemTime,
+334                    this=seq_get(args, 0),
+335                    expression=seq_get(args, 1),
+336                    kind="CONTAINED IN",
+337                )
+338            elif self._match(TokenType.ALL):
+339                system_time = self.expression(exp.SystemTime, kind="ALL")
+340            else:
+341                system_time = None
+342                self.raise_error("Unable to parse FOR SYSTEM_TIME clause")
+343
+344            return system_time
+345
+346        def _parse_table_parts(self, schema: bool = False) -> exp.Expression:
+347            table = super()._parse_table_parts(schema=schema)
+348            table.set("system_time", self._parse_system_time())
+349            return table
+350
+351        def _parse_returns(self) -> exp.Expression:
+352            table = self._parse_id_var(any_token=False, tokens=self.RETURNS_TABLE_TOKENS)
+353            returns = super()._parse_returns()
+354            returns.set("table", table)
+355            return returns
+356
+357        def _parse_convert(self, strict: bool) -> t.Optional[exp.Expression]:
+358            to = self._parse_types()
+359            self._match(TokenType.COMMA)
+360            this = self._parse_conjunction()
+361
+362            if not to or not this:
+363                return None
+364
+365            # Retrieve length of datatype and override to default if not specified
+366            if seq_get(to.expressions, 0) is None and to.this in self.VAR_LENGTH_DATATYPES:
+367                to = exp.DataType.build(to.this, expressions=[exp.Literal.number(30)], nested=False)
+368
+369            # Check whether a conversion with format is applicable
+370            if self._match(TokenType.COMMA):
+371                format_val = self._parse_number()
+372                format_val_name = format_val.name if format_val else ""
+373
+374                if format_val_name not in TSQL.convert_format_mapping:
+375                    raise ValueError(
+376                        f"CONVERT function at T-SQL does not support format style {format_val_name}"
+377                    )
+378
+379                format_norm = exp.Literal.string(TSQL.convert_format_mapping[format_val_name])
+380
+381                # Check whether the convert entails a string to date format
+382                if to.this == DataType.Type.DATE:
+383                    return self.expression(exp.StrToDate, this=this, format=format_norm)
+384                # Check whether the convert entails a string to datetime format
+385                elif to.this == DataType.Type.DATETIME:
+386                    return self.expression(exp.StrToTime, this=this, format=format_norm)
+387                # Check whether the convert entails a date to string format
+388                elif to.this in self.VAR_LENGTH_DATATYPES:
+389                    return self.expression(
+390                        exp.Cast if strict else exp.TryCast,
+391                        to=to,
+392                        this=self.expression(exp.TimeToStr, this=this, format=format_norm),
+393                    )
+394                elif to.this == DataType.Type.TEXT:
+395                    return self.expression(exp.TimeToStr, this=this, format=format_norm)
+396
+397            # Entails a simple cast without any format requirement
+398            return self.expression(exp.Cast if strict else exp.TryCast, this=this, to=to)
+399
+400        def _parse_user_defined_function(
+401            self, kind: t.Optional[TokenType] = None
+402        ) -> t.Optional[exp.Expression]:
+403            this = super()._parse_user_defined_function(kind=kind)
+404
+405            if (
+406                kind == TokenType.FUNCTION
+407                or isinstance(this, exp.UserDefinedFunction)
+408                or self._match(TokenType.ALIAS, advance=False)
+409            ):
+410                return this
+411
+412            expressions = self._parse_csv(self._parse_udf_kwarg)
+413            return self.expression(exp.UserDefinedFunction, this=this, expressions=expressions)
+414
+415    class Generator(generator.Generator):
+416        LOCKING_READS_SUPPORTED = True
+417
+418        TYPE_MAPPING = {
+419            **generator.Generator.TYPE_MAPPING,  # type: ignore
+420            exp.DataType.Type.BOOLEAN: "BIT",
+421            exp.DataType.Type.INT: "INTEGER",
+422            exp.DataType.Type.DECIMAL: "NUMERIC",
+423            exp.DataType.Type.DATETIME: "DATETIME2",
+424            exp.DataType.Type.VARIANT: "SQL_VARIANT",
+425        }
+426
+427        TRANSFORMS = {
+428            **generator.Generator.TRANSFORMS,  # type: ignore
+429            exp.DateAdd: generate_date_delta_with_unit_sql,
+430            exp.DateDiff: generate_date_delta_with_unit_sql,
+431            exp.CurrentDate: rename_func("GETDATE"),
+432            exp.CurrentTimestamp: rename_func("GETDATE"),
+433            exp.If: rename_func("IIF"),
+434            exp.NumberToStr: _format_sql,
+435            exp.TimeToStr: _format_sql,
+436            exp.GroupConcat: _string_agg_sql,
+437        }
+438
+439        TRANSFORMS.pop(exp.ReturnsProperty)
+440
+441        def systemtime_sql(self, expression: exp.SystemTime) -> str:
+442            kind = expression.args["kind"]
+443            if kind == "ALL":
+444                return "FOR SYSTEM_TIME ALL"
+445
+446            start = self.sql(expression, "this")
+447            if kind == "AS OF":
+448                return f"FOR SYSTEM_TIME AS OF {start}"
+449
+450            end = self.sql(expression, "expression")
+451            if kind == "FROM":
+452                return f"FOR SYSTEM_TIME FROM {start} TO {end}"
+453            if kind == "BETWEEN":
+454                return f"FOR SYSTEM_TIME BETWEEN {start} AND {end}"
+455
+456            return f"FOR SYSTEM_TIME CONTAINED IN ({start}, {end})"
+457
+458        def returnsproperty_sql(self, expression: exp.ReturnsProperty) -> str:
+459            table = expression.args.get("table")
+460            table = f"{table} " if table else ""
+461            return f"RETURNS {table}{self.sql(expression, 'this')}"
+
+ + + + +
+
+ + TSQL() + + +
+ + + + +
+ +
+
+ +
+ + class + TSQL.Tokenizer(sqlglot.tokens.Tokenizer): + + + +
+ +
240    class Tokenizer(tokens.Tokenizer):
+241        IDENTIFIERS = ['"', ("[", "]")]
+242
+243        QUOTES = ["'", '"']
+244
+245        KEYWORDS = {
+246            **tokens.Tokenizer.KEYWORDS,
+247            "BIT": TokenType.BOOLEAN,
+248            "DATETIME2": TokenType.DATETIME,
+249            "DATETIMEOFFSET": TokenType.TIMESTAMPTZ,
+250            "DECLARE": TokenType.COMMAND,
+251            "IMAGE": TokenType.IMAGE,
+252            "MONEY": TokenType.MONEY,
+253            "NTEXT": TokenType.TEXT,
+254            "NVARCHAR(MAX)": TokenType.TEXT,
+255            "PRINT": TokenType.COMMAND,
+256            "PROC": TokenType.PROCEDURE,
+257            "REAL": TokenType.FLOAT,
+258            "ROWVERSION": TokenType.ROWVERSION,
+259            "SMALLDATETIME": TokenType.DATETIME,
+260            "SMALLMONEY": TokenType.SMALLMONEY,
+261            "SQL_VARIANT": TokenType.VARIANT,
+262            "TIME": TokenType.TIMESTAMP,
+263            "TOP": TokenType.TOP,
+264            "UNIQUEIDENTIFIER": TokenType.UNIQUEIDENTIFIER,
+265            "VARCHAR(MAX)": TokenType.TEXT,
+266            "XML": TokenType.XML,
+267        }
+268
+269        # TSQL allows @, # to appear as a variable/identifier prefix
+270        SINGLE_TOKENS = tokens.Tokenizer.SINGLE_TOKENS.copy()
+271        SINGLE_TOKENS.pop("@")
+272        SINGLE_TOKENS.pop("#")
+
+ + + + +
+
Inherited Members
+
+ +
+
+
+
+ +
+ + class + TSQL.Parser(sqlglot.parser.Parser): + + + +
+ +
274    class Parser(parser.Parser):
+275        FUNCTIONS = {
+276            **parser.Parser.FUNCTIONS,  # type: ignore
+277            "CHARINDEX": lambda args: exp.StrPosition(
+278                this=seq_get(args, 1),
+279                substr=seq_get(args, 0),
+280                position=seq_get(args, 2),
+281            ),
+282            "ISNULL": exp.Coalesce.from_arg_list,
+283            "DATEADD": parse_date_delta(exp.DateAdd, unit_mapping=DATE_DELTA_INTERVAL),
+284            "DATEDIFF": parse_date_delta(exp.DateDiff, unit_mapping=DATE_DELTA_INTERVAL),
+285            "DATENAME": _format_time_lambda(exp.TimeToStr, full_format_mapping=True),
+286            "DATEPART": _format_time_lambda(exp.TimeToStr),
+287            "GETDATE": exp.CurrentTimestamp.from_arg_list,
+288            "SYSDATETIME": exp.CurrentTimestamp.from_arg_list,
+289            "IIF": exp.If.from_arg_list,
+290            "LEN": exp.Length.from_arg_list,
+291            "REPLICATE": exp.Repeat.from_arg_list,
+292            "JSON_VALUE": exp.JSONExtractScalar.from_arg_list,
+293            "FORMAT": _parse_format,
+294            "EOMONTH": _parse_eomonth,
+295        }
+296
+297        VAR_LENGTH_DATATYPES = {
+298            DataType.Type.NVARCHAR,
+299            DataType.Type.VARCHAR,
+300            DataType.Type.CHAR,
+301            DataType.Type.NCHAR,
+302        }
+303
+304        RETURNS_TABLE_TOKENS = parser.Parser.ID_VAR_TOKENS - {  # type: ignore
+305            TokenType.TABLE,
+306            *parser.Parser.TYPE_TOKENS,  # type: ignore
+307        }
+308
+309        STATEMENT_PARSERS = {
+310            **parser.Parser.STATEMENT_PARSERS,  # type: ignore
+311            TokenType.END: lambda self: self._parse_command(),
+312        }
+313
+314        def _parse_system_time(self) -> t.Optional[exp.Expression]:
+315            if not self._match_text_seq("FOR", "SYSTEM_TIME"):
+316                return None
+317
+318            if self._match_text_seq("AS", "OF"):
+319                system_time = self.expression(
+320                    exp.SystemTime, this=self._parse_bitwise(), kind="AS OF"
+321                )
+322            elif self._match_set((TokenType.FROM, TokenType.BETWEEN)):
+323                kind = self._prev.text
+324                this = self._parse_bitwise()
+325                self._match_texts(("TO", "AND"))
+326                expression = self._parse_bitwise()
+327                system_time = self.expression(
+328                    exp.SystemTime, this=this, expression=expression, kind=kind
+329                )
+330            elif self._match_text_seq("CONTAINED", "IN"):
+331                args = self._parse_wrapped_csv(self._parse_bitwise)
+332                system_time = self.expression(
+333                    exp.SystemTime,
+334                    this=seq_get(args, 0),
+335                    expression=seq_get(args, 1),
+336                    kind="CONTAINED IN",
+337                )
+338            elif self._match(TokenType.ALL):
+339                system_time = self.expression(exp.SystemTime, kind="ALL")
+340            else:
+341                system_time = None
+342                self.raise_error("Unable to parse FOR SYSTEM_TIME clause")
+343
+344            return system_time
+345
+346        def _parse_table_parts(self, schema: bool = False) -> exp.Expression:
+347            table = super()._parse_table_parts(schema=schema)
+348            table.set("system_time", self._parse_system_time())
+349            return table
+350
+351        def _parse_returns(self) -> exp.Expression:
+352            table = self._parse_id_var(any_token=False, tokens=self.RETURNS_TABLE_TOKENS)
+353            returns = super()._parse_returns()
+354            returns.set("table", table)
+355            return returns
+356
+357        def _parse_convert(self, strict: bool) -> t.Optional[exp.Expression]:
+358            to = self._parse_types()
+359            self._match(TokenType.COMMA)
+360            this = self._parse_conjunction()
+361
+362            if not to or not this:
+363                return None
+364
+365            # Retrieve length of datatype and override to default if not specified
+366            if seq_get(to.expressions, 0) is None and to.this in self.VAR_LENGTH_DATATYPES:
+367                to = exp.DataType.build(to.this, expressions=[exp.Literal.number(30)], nested=False)
+368
+369            # Check whether a conversion with format is applicable
+370            if self._match(TokenType.COMMA):
+371                format_val = self._parse_number()
+372                format_val_name = format_val.name if format_val else ""
+373
+374                if format_val_name not in TSQL.convert_format_mapping:
+375                    raise ValueError(
+376                        f"CONVERT function at T-SQL does not support format style {format_val_name}"
+377                    )
+378
+379                format_norm = exp.Literal.string(TSQL.convert_format_mapping[format_val_name])
+380
+381                # Check whether the convert entails a string to date format
+382                if to.this == DataType.Type.DATE:
+383                    return self.expression(exp.StrToDate, this=this, format=format_norm)
+384                # Check whether the convert entails a string to datetime format
+385                elif to.this == DataType.Type.DATETIME:
+386                    return self.expression(exp.StrToTime, this=this, format=format_norm)
+387                # Check whether the convert entails a date to string format
+388                elif to.this in self.VAR_LENGTH_DATATYPES:
+389                    return self.expression(
+390                        exp.Cast if strict else exp.TryCast,
+391                        to=to,
+392                        this=self.expression(exp.TimeToStr, this=this, format=format_norm),
+393                    )
+394                elif to.this == DataType.Type.TEXT:
+395                    return self.expression(exp.TimeToStr, this=this, format=format_norm)
+396
+397            # Entails a simple cast without any format requirement
+398            return self.expression(exp.Cast if strict else exp.TryCast, this=this, to=to)
+399
+400        def _parse_user_defined_function(
+401            self, kind: t.Optional[TokenType] = None
+402        ) -> t.Optional[exp.Expression]:
+403            this = super()._parse_user_defined_function(kind=kind)
+404
+405            if (
+406                kind == TokenType.FUNCTION
+407                or isinstance(this, exp.UserDefinedFunction)
+408                or self._match(TokenType.ALIAS, advance=False)
+409            ):
+410                return this
+411
+412            expressions = self._parse_csv(self._parse_udf_kwarg)
+413            return self.expression(exp.UserDefinedFunction, this=this, expressions=expressions)
+
+ + +

Parser consumes a list of tokens produced by the sqlglot.tokens.Tokenizer and produces +a parsed syntax tree.

+ +
Arguments:
+ +
    +
  • error_level: the desired error level. +Default: ErrorLevel.RAISE
  • +
  • error_message_context: determines the amount of context to capture from a +query string when displaying the error message (in number of characters). +Default: 50.
  • +
  • index_offset: Index offset for arrays eg ARRAY[0] vs ARRAY[1] as the head of a list. +Default: 0
  • +
  • alias_post_tablesample: If the table alias comes after tablesample. +Default: False
  • +
  • max_errors: Maximum number of error messages to include in a raised ParseError. +This is only relevant if error_level is ErrorLevel.RAISE. +Default: 3
  • +
  • null_ordering: Indicates the default null ordering method to use if not explicitly set. +Options are "nulls_are_small", "nulls_are_large", "nulls_are_last". +Default: "nulls_are_small"
  • +
+
+ + + +
+
+ +
+ + class + TSQL.Generator(sqlglot.generator.Generator): + + + +
+ +
415    class Generator(generator.Generator):
+416        LOCKING_READS_SUPPORTED = True
+417
+418        TYPE_MAPPING = {
+419            **generator.Generator.TYPE_MAPPING,  # type: ignore
+420            exp.DataType.Type.BOOLEAN: "BIT",
+421            exp.DataType.Type.INT: "INTEGER",
+422            exp.DataType.Type.DECIMAL: "NUMERIC",
+423            exp.DataType.Type.DATETIME: "DATETIME2",
+424            exp.DataType.Type.VARIANT: "SQL_VARIANT",
+425        }
+426
+427        TRANSFORMS = {
+428            **generator.Generator.TRANSFORMS,  # type: ignore
+429            exp.DateAdd: generate_date_delta_with_unit_sql,
+430            exp.DateDiff: generate_date_delta_with_unit_sql,
+431            exp.CurrentDate: rename_func("GETDATE"),
+432            exp.CurrentTimestamp: rename_func("GETDATE"),
+433            exp.If: rename_func("IIF"),
+434            exp.NumberToStr: _format_sql,
+435            exp.TimeToStr: _format_sql,
+436            exp.GroupConcat: _string_agg_sql,
+437        }
+438
+439        TRANSFORMS.pop(exp.ReturnsProperty)
+440
+441        def systemtime_sql(self, expression: exp.SystemTime) -> str:
+442            kind = expression.args["kind"]
+443            if kind == "ALL":
+444                return "FOR SYSTEM_TIME ALL"
+445
+446            start = self.sql(expression, "this")
+447            if kind == "AS OF":
+448                return f"FOR SYSTEM_TIME AS OF {start}"
+449
+450            end = self.sql(expression, "expression")
+451            if kind == "FROM":
+452                return f"FOR SYSTEM_TIME FROM {start} TO {end}"
+453            if kind == "BETWEEN":
+454                return f"FOR SYSTEM_TIME BETWEEN {start} AND {end}"
+455
+456            return f"FOR SYSTEM_TIME CONTAINED IN ({start}, {end})"
+457
+458        def returnsproperty_sql(self, expression: exp.ReturnsProperty) -> str:
+459            table = expression.args.get("table")
+460            table = f"{table} " if table else ""
+461            return f"RETURNS {table}{self.sql(expression, 'this')}"
+
+ + +

Generator interprets the given syntax tree and produces a SQL string as an output.

+ +
Arguments:
+ +
    +
  • time_mapping (dict): the dictionary of custom time mappings in which the key +represents a python time format and the output the target time format
  • +
  • time_trie (trie): a trie of the time_mapping keys
  • +
  • pretty (bool): if set to True the returned string will be formatted. Default: False.
  • +
  • quote_start (str): specifies which starting character to use to delimit quotes. Default: '.
  • +
  • quote_end (str): specifies which ending character to use to delimit quotes. Default: '.
  • +
  • identifier_start (str): specifies which starting character to use to delimit identifiers. Default: ".
  • +
  • identifier_end (str): specifies which ending character to use to delimit identifiers. Default: ".
  • +
  • identify (bool): if set to True all identifiers will be delimited by the corresponding +character.
  • +
  • normalize (bool): if set to True all identifiers will lower cased
  • +
  • string_escape (str): specifies a string escape character. Default: '.
  • +
  • identifier_escape (str): specifies an identifier escape character. Default: ".
  • +
  • pad (int): determines padding in a formatted string. Default: 2.
  • +
  • indent (int): determines the size of indentation in a formatted string. Default: 4.
  • +
  • unnest_column_only (bool): if true unnest table aliases are considered only as column aliases
  • +
  • normalize_functions (str): normalize function names, "upper", "lower", or None +Default: "upper"
  • +
  • alias_post_tablesample (bool): if the table alias comes after tablesample +Default: False
  • +
  • unsupported_level (ErrorLevel): determines the generator's behavior when it encounters +unsupported expressions. Default ErrorLevel.WARN.
  • +
  • null_ordering (str): Indicates the default null ordering method to use if not explicitly set. +Options are "nulls_are_small", "nulls_are_large", "nulls_are_last". +Default: "nulls_are_small"
  • +
  • max_unsupported (int): Maximum number of unsupported messages to include in a raised UnsupportedError. +This is only relevant if unsupported_level is ErrorLevel.RAISE. +Default: 3
  • +
  • leading_comma (bool): if the the comma is leading or trailing in select statements +Default: False
  • +
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode. +The default is on the smaller end because the length only represents a segment and not the true +line length. +Default: 80
  • +
  • comments: Whether or not to preserve comments in the output SQL code. +Default: True
  • +
+
+ + +
+ +
+ + def + systemtime_sql(self, expression: sqlglot.expressions.SystemTime) -> str: + + + +
+ +
441        def systemtime_sql(self, expression: exp.SystemTime) -> str:
+442            kind = expression.args["kind"]
+443            if kind == "ALL":
+444                return "FOR SYSTEM_TIME ALL"
+445
+446            start = self.sql(expression, "this")
+447            if kind == "AS OF":
+448                return f"FOR SYSTEM_TIME AS OF {start}"
+449
+450            end = self.sql(expression, "expression")
+451            if kind == "FROM":
+452                return f"FOR SYSTEM_TIME FROM {start} TO {end}"
+453            if kind == "BETWEEN":
+454                return f"FOR SYSTEM_TIME BETWEEN {start} AND {end}"
+455
+456            return f"FOR SYSTEM_TIME CONTAINED IN ({start}, {end})"
+
+ + + + +
+
+ +
+ + def + returnsproperty_sql(self, expression: sqlglot.expressions.ReturnsProperty) -> str: + + + +
+ +
458        def returnsproperty_sql(self, expression: exp.ReturnsProperty) -> str:
+459            table = expression.args.get("table")
+460            table = f"{table} " if table else ""
+461            return f"RETURNS {table}{self.sql(expression, 'this')}"
+
+ + + + +
+
+
Inherited Members
+
+
sqlglot.generator.Generator
+
Generator
+
generate
+
unsupported
+
sep
+
seg
+
pad_comment
+
maybe_comment
+
wrap
+
no_identify
+
normalize_func
+
indent
+
sql
+
uncache_sql
+
cache_sql
+
characterset_sql
+
column_sql
+
columndef_sql
+
columnconstraint_sql
+
autoincrementcolumnconstraint_sql
+
checkcolumnconstraint_sql
+
commentcolumnconstraint_sql
+
collatecolumnconstraint_sql
+
encodecolumnconstraint_sql
+
defaultcolumnconstraint_sql
+
generatedasidentitycolumnconstraint_sql
+
notnullcolumnconstraint_sql
+
primarykeycolumnconstraint_sql
+
uniquecolumnconstraint_sql
+
create_sql
+
describe_sql
+
prepend_ctes
+
with_sql
+
cte_sql
+
tablealias_sql
+
bitstring_sql
+
hexstring_sql
+
datatype_sql
+
directory_sql
+
delete_sql
+
drop_sql
+
except_sql
+
except_op
+
fetch_sql
+
filter_sql
+
hint_sql
+
index_sql
+
identifier_sql
+
national_sql
+
partition_sql
+
properties_sql
+
root_properties
+
properties
+
with_properties
+
locate_properties
+
property_sql
+
likeproperty_sql
+
fallbackproperty_sql
+
journalproperty_sql
+
freespaceproperty_sql
+
afterjournalproperty_sql
+
checksumproperty_sql
+
mergeblockratioproperty_sql
+
datablocksizeproperty_sql
+
blockcompressionproperty_sql
+
isolatedloadingproperty_sql
+
insert_sql
+
intersect_sql
+
intersect_op
+
introducer_sql
+
pseudotype_sql
+
rowformatdelimitedproperty_sql
+
table_sql
+
tablesample_sql
+
pivot_sql
+
tuple_sql
+
update_sql
+
values_sql
+
var_sql
+
into_sql
+
from_sql
+
group_sql
+
having_sql
+
join_sql
+
lambda_sql
+
lateral_sql
+
limit_sql
+
offset_sql
+
lock_sql
+
literal_sql
+
loaddata_sql
+
null_sql
+
boolean_sql
+
order_sql
+
cluster_sql
+
distribute_sql
+
sort_sql
+
ordered_sql
+
matchrecognize_sql
+
query_modifiers
+
select_sql
+
schema_sql
+
star_sql
+
structkwarg_sql
+
parameter_sql
+
sessionparameter_sql
+
placeholder_sql
+
subquery_sql
+
qualify_sql
+
union_sql
+
union_op
+
unnest_sql
+
where_sql
+
window_sql
+
partition_by_sql
+
window_spec_sql
+
withingroup_sql
+
between_sql
+
bracket_sql
+
all_sql
+
any_sql
+
exists_sql
+
case_sql
+
constraint_sql
+
extract_sql
+
trim_sql
+
concat_sql
+
check_sql
+
foreignkey_sql
+
primarykey_sql
+
unique_sql
+
if_sql
+
in_sql
+
in_unnest_op
+
interval_sql
+
return_sql
+
reference_sql
+
anonymous_sql
+
paren_sql
+
neg_sql
+
not_sql
+
alias_sql
+
aliases_sql
+
attimezone_sql
+
add_sql
+
and_sql
+
connector_sql
+
bitwiseand_sql
+
bitwiseleftshift_sql
+
bitwisenot_sql
+
bitwiseor_sql
+
bitwiserightshift_sql
+
bitwisexor_sql
+
cast_sql
+
currentdate_sql
+
collate_sql
+
command_sql
+
transaction_sql
+
commit_sql
+
rollback_sql
+
altercolumn_sql
+
renametable_sql
+
altertable_sql
+
droppartition_sql
+
addconstraint_sql
+
distinct_sql
+
ignorenulls_sql
+
respectnulls_sql
+
intdiv_sql
+
dpipe_sql
+
div_sql
+
distance_sql
+
dot_sql
+
eq_sql
+
escape_sql
+
glob_sql
+
gt_sql
+
gte_sql
+
ilike_sql
+
is_sql
+
like_sql
+
similarto_sql
+
lt_sql
+
lte_sql
+
mod_sql
+
mul_sql
+
neq_sql
+
nullsafeeq_sql
+
nullsafeneq_sql
+
or_sql
+
slice_sql
+
sub_sql
+
trycast_sql
+
use_sql
+
binary
+
function_fallback_sql
+
format_args
+
text_width
+
format_time
+
expressions
+
op_expressions
+
naked_property
+
set_operation
+
tag_sql
+
token_sql
+
userdefinedfunction_sql
+
userdefinedfunctionkwarg_sql
+
joinhint_sql
+
kwarg_sql
+
when_sql
+
merge_sql
+ +
+
+
+
+
+ + \ No newline at end of file diff --git a/docs/sqlglot/diff.html b/docs/sqlglot/diff.html new file mode 100644 index 0000000..9c366b2 --- /dev/null +++ b/docs/sqlglot/diff.html @@ -0,0 +1,1560 @@ + + + + + + + sqlglot.diff API documentation + + + + + + + + + +
+
+ Edit on GitHub + +

Semantic Diff for SQL

+ +

by Iaroslav Zeigerman

+ +

Motivation

+ +

Software is constantly changing and evolving, and identifying what has changed and reviewing those changes is an integral part of the development process. SQL code is no exception to this.

+ +

Text-based diff tools such as git diff, when applied to a code base, have certain limitations. First, they can only detect insertions and deletions, not movements or updates of individual pieces of code. Second, such tools can only detect changes between lines of text, which is too coarse for something as granular and detailed as source code. Additionally, the outcome of such a diff is dependent on the underlying code formatting, and yields different results if the formatting should change.

+ +

Consider the following diff generated by Git:

+ +

Git diff output

+ +

Semantically the query hasn’t changed. The two arguments b and c have been swapped (moved), posing no impact on the output of the query. Yet Git replaced the whole affected expression alongside a bulk of unrelated elements.

+ +

The alternative to text-based diffing is to compare Abstract Syntax Trees (AST) instead. The main advantage of ASTs are that they are a direct product of code parsing, which represents the underlying code structure at any desired level of granularity. Comparing ASTs may yield extremely precise diffs; changes such as code movements and updates can also be detected. Even more importantly, this approach facilitates additional use cases beyond eyeballing two versions of source code side by side.

+ +

The use cases I had in mind for SQL when I decided to embark on this journey of semantic diffing were the following:

+ +
    +
  • Query similarity score. Identifying which parts the two queries have in common to automatically suggest opportunities for consolidation, creation of intermediate/staging tables, and so on.
  • +
  • Differentiating between cosmetic / structural changes and functional ones. For example when a nested query is refactored into a common table expression (CTE), this kind of change doesn’t have any functional impact on either a query or its outcome.
  • +
  • Automatic suggestions about the need to retroactively backfill data. This is especially important for pipelines that populate very large tables for which restatement is a runtime-intensive procedure. The ability to discern between simple code movements and actual modifications can help assess the impact of a change and make suggestions accordingly.
  • +
+ +

The implementation discussed in this post is now a part of the SQLGlot library. You can find a complete source code in the diff.py module. The choice of SQLglot was an obvious one due to its simple but powerful API, lack of external dependencies and, more importantly, extensive list of supported SQL dialects.

+ +

The Search for a Solution

+ +

When it comes to any diffing tool (not just a semantic one), the primary challenge is to match as many elements of compared entities as possible. Once such a set of matching elements is available, deriving a sequence of changes becomes an easy task.

+ +

If our elements have unique identifiers associated with them (for example, an element’s ID in DOM), the matching problem is trivial. However, the SQL syntax trees that we are comparing have neither unique keys nor object identifiers that can be used for the purposes of matching. So, how do we suppose to find pairs of nodes that are related?

+ +

To better illustrate the problem, consider comparing the following SQL expressions: SELECT a + b + c, d, e and SELECT a - b + c, e, f. Matching individual nodes from respective syntax trees can be visualized as follows:

+ +

Figure 1: Example of node matching for two SQL expression trees +Figure 1: Example of node matching for two SQL expression trees.

+ +

By looking at the figure of node matching for two SQL expression trees above, we conclude that the following changes should be captured by our solution:

+ +
    +
  • Inserted nodes: Sub and f. These are the nodes from the target AST which do not have a matching node in the source AST.
  • +
  • Removed nodes: Add and d. These are the nodes from the source AST which do not have a counterpart in the target AST.
  • +
  • Remaining nodes must be identified as unchanged.
  • +
+ +

It should be clear at this point that if we manage to match nodes in the source tree with their counterparts in the target tree, then computing the diff becomes a trivial matter.

+ +

Naïve Brute-Force

+ +

The naïve solution would be to try all different permutations of node pair combinations, and see which set of pairs performs the best based on some type of heuristics. The runtime cost of such a solution quickly reaches the escape velocity; if both trees had only 10 nodes each, the number of such sets would approximately be 10! ^ 2 = 3.6M ^ 2 ~= 13 * 10^12. This is a very bad case of factorial complexity (to be precise, it’s actually much worse - O(n! ^ 2) - but I couldn’t come up with a name for it), so there is little need to explore this approach any further.

+ +

Myers Algorithm

+ +

After the naïve approach was proven to be infeasible, the next question I asked myself was “how does git diff work?”. This question led me to discover the Myers diff algorithm [1]. This algorithm has been designed to compare sequences of strings. At its core, it’s looking for the shortest path on a graph of possible edits that transform the first sequence into the second one, while heavily rewarding those paths that lead to longest subsequences of unchanged elements. There’s a lot of material out there describing this algorithm in greater detail. I found James Coglan’s series of blog posts to be the most comprehensive.

+ +

Therefore, I had this “brilliant” (actually not) idea to transform trees into sequences by traversing them in topological order, and then applying the Myers algorithm on resulting sequences while using a custom heuristics when checking the equality of two nodes. Unsurprisingly, comparing sequences of strings is quite different from comparing hierarchical tree structures, and by flattening trees into sequences, we lose a lot of relevant context. This resulted in a terrible performance of this algorithm on ASTs. It often matched completely unrelated nodes, even when the two trees were mostly the same, and produced extremely inaccurate lists of changes overall. After playing around with it a little and tweaking my equality heuristics to improve accuracy, I ultimately scrapped the whole implementation and went back to the drawing board.

+ +

Change Distiller

+ +

The algorithm I settled on at the end was Change Distiller, created by Fluri et al. [2], which in turn is an improvement over the core idea described by Chawathe et al. [3].

+ +

The algorithm consists of two high-level steps:

+ +
    +
  1. Finding appropriate matchings between pairs of nodes that are part of compared ASTs. Identifying what is meant by “appropriate” matching is also a part of this step.
  2. +
  3. Generating the so-called “edit script” from the matching set built in the 1st step. The edit script is a sequence of edit operations (for example, insert, remove, update, etc.) on individual tree nodes, such that when applied as transformations on the source AST, it eventually becomes the target AST. In general, the shorter the sequence, the better. The length of the edit script can be used to compare the performance of different algorithms, though this is not the only metric that matters.
  4. +
+ +

The rest of this section is dedicated to the Python implementation of the steps above using the AST implementation provided by the SQLGlot library.

+ +

Building the Matching Set

+ +

Matching Leaves

+ +

We begin composing the matching set by matching the leaf nodes. Leaf nodes are the nodes that do not have any children nodes (such as literals, identifiers, etc.). In order to match them, we gather all the leaf nodes from the source tree and generate a cartesian product with all the leaves from the target tree, while comparing pairs created this way and assigning them a similarity score. During this stage, we also exclude pairs that don’t pass basic matching criteria. Then, we pick pairs that scored the highest while making sure that each node is matched no more than once.

+ +

Using the example provided at the beginning of the post, the process of building an initial set of candidate matchings can be seen on Figure 2.

+ +

Figure 2: Building a set of candidate matchings between leaf nodes. The third item in each triplet represents a similarity score between two nodes. +Figure 2: Building a set of candidate matchings between leaf nodes. The third item in each triplet represents a similarity score between two nodes.

+ +

First, let’s analyze the similarity score. Then, we’ll discuss matching criteria.

+ +

The similarity score proposed by Fluri et al. [2] is a dice coefficient applied to bigrams of respective node values. A bigram is a sequence of two adjacent elements from a string computed in a sliding window fashion:

+ +
+
def bigram(string):
+    count = max(0, len(string) - 1)
+    return [string[i : i + 2] for i in range(count)]
+
+
+ +

For reasons that will become clear shortly, we actually need to compute bigram histograms rather than just sequences:

+ +
+
from collections import defaultdict
+
+def bigram_histo(string):
+    count = max(0, len(string) - 1)
+    bigram_histo = defaultdict(int)
+    for i in range(count):
+        bigram_histo[string[i : i + 2]] += 1
+    return bigram_histo
+
+
+ +

The dice coefficient formula looks like following:

+ +

Dice Coefficient

+ +

Where X is a bigram of the source node and Y is a bigram of the second one. What this essentially does is count the number of bigram elements the two nodes have in common, multiply it by 2, and then divide by the total number of elements in both bigrams. This is where bigram histograms come in handy:

+ +
+
def dice_coefficient(source, target):
+    source_histo = bigram_histo(source.sql())
+    target_histo = bigram_histo(target.sql())
+
+    total_grams = (
+        sum(source_histo.values()) + sum(target_histo.values())
+    )
+    if not total_grams:
+        return 1.0 if source == target else 0.0
+
+    overlap_len = 0
+    overlapping_grams = set(source_histo) & set(target_histo)
+    for g in overlapping_grams:
+        overlap_len += min(source_histo[g], target_histo[g])
+
+    return 2 * overlap_len / total_grams
+
+
+ +

To compute a bigram given a tree node, we first transform the node into its canonical SQL representation,so that the Literal(123) node becomes just “123” and the Identifier(“a”) node becomes just “a”. We also handle a scenario when strings are too short to derive bigrams. In this case, we fallback to checking the two nodes for equality.

+ +

Now when we know how to compute the similarity score, we can take care of the matching criteria for leaf nodes. In the original paper [2], the matching criteria is formalized as follows:

+ +

Matching criteria for leaf nodes

+ +

The two nodes are matched if two conditions are met:

+ +
    +
  1. The node labels match (in our case labels are just node types).
  2. +
  3. The similarity score for node values is greater than or equal to some threshold “f”. The authors of the paper recommend setting the value of “f” to 0.6.
  4. +
+ +

With building blocks in place, we can now build a matching set for leaf nodes. First, we generate a list of candidates for matching:

+ +
+
from heapq import heappush, heappop
+
+candidate_matchings = []
+source_leaves = _get_leaves(self._source)
+target_leaves = _get_leaves(self._target)
+for source_leaf in source_leaves:
+    for target_leaf in target_leaves:
+        if _is_same_type(source_leaf, target_leaf):
+            similarity_score = dice_coefficient(
+                source_leaf, target_leaf
+            )
+            if similarity_score >= 0.6:
+                heappush(
+                    candidate_matchings,
+                    (
+                        -similarity_score,
+                        len(candidate_matchings),
+                        source_leaf,
+                        target_leaf,
+                    ),
+                )
+
+
+ +

In the implementation above, we push each matching pair onto the heap to automatically maintain the correct order based on the assigned similarity score.

+ +

Finally, we build the initial matching set by picking leaf pairs with the highest score:

+ +
+
matching_set = set()
+while candidate_matchings:
+    _, _, source_leaf, target_leaf = heappop(candidate_matchings)
+    if (
+        source_leaf in unmatched_source_nodes
+        and target_leaf in unmatched_target_nodes
+    ):
+        matching_set.add((source_leaf, target_leaf))
+        unmatched_source_nodes.remove(source_leaf)
+        unmatched_target_nodes.remove(target_leaf)
+
+
+ +

To finalize the matching set, we should now proceed with matching inner nodes.

+ +

Matching Inner Nodes

+ +

Matching inner nodes is quite similar to matching leaf nodes, with the following two distinctions:

+ +
    +
  • Rather than ranking a set of possible candidates, we pick the first node pair that passes the matching criteria.
  • +
  • The matching criteria itself has been extended to account for the number of leaf nodes the pair of inner nodes have in common.
  • +
+ +

Figure 3: Matching inner nodes based on their type as well as how many of their leaf nodes have been previously matched. +Figure 3: Matching inner nodes based on their type as well as how many of their leaf nodes have been previously matched.

+ +

Let’s start with the matching criteria. The criteria is formalized as follows:

+ +

Matching criteria for inner nodes

+ +

Alongside already familiar similarity score and node type criteria, there is a new one in the middle: the ratio of leaf nodes that the two nodes have in common must exceed some threshold “t”. The recommended value for “t” is also 0.6. Counting the number of common leaf nodes is pretty straightforward, since we already have the complete matching set for leaves. All we need to do is count how many matching pairs do leaf nodes from the two compared inner nodes form.

+ +

There are two additional heuristics associated with this matching criteria:

+ +
    +
  • Inner node similarity weighting: if the similarity score between the node values doesn’t pass the threshold “f” but the ratio of common leaf nodes (“t”) is greater than or equal to 0.8, then the matching is considered successful.
  • +
  • The threshold “t” is reduced to 0.4 for inner nodes with the number of leaf nodes equal to 4 or less, in order to decrease the false negative rate for small subtrees.
  • +
+ +

We now only have to iterate through the remaining unmatched nodes and form matching pairs based on the outlined criteria:

+ +
+
leaves_matching_set = matching_set.copy()
+
+for source_node in unmatched_source_nodes.copy():
+    for target_node in unmatched_target_nodes:
+        if _is_same_type(source_node, target_node):
+            source_leaves = set(_get_leaves(source_node))
+            target_leaves = set(_get_leaves(target_node))
+
+            max_leaves_num = max(len(source_leaves), len(target_leaves))
+            if max_leaves_num:
+                common_leaves_num = sum(
+                    1 if s in source_leaves and t in target_leaves else 0
+                    for s, t in leaves_matching_set
+                )
+                leaf_similarity_score = common_leaves_num / max_leaves_num
+            else:
+                leaf_similarity_score = 0.0
+
+            adjusted_t = (
+                0.6
+                if min(len(source_leaves), len(target_leaves)) > 4
+                else 0.4
+            )
+
+            if leaf_similarity_score >= 0.8 or (
+                leaf_similarity_score >= adjusted_t
+                and dice_coefficient(source_node, target_node) >= 0.6
+            ):
+                matching_set.add((source_node, target_node))
+                unmatched_source_nodes.remove(source_node)
+                unmatched_target_nodes.remove(target_node)
+                break
+
+
+ +

After the matching set is formed, we can proceed with generation of the edit script, which will be the algorithm’s output.

+ +

Generating the Edit Script

+ +

At this point, we should have the following 3 sets at our disposal:

+ +
    +
  • The set of matched node pairs.
  • +
  • The set of remaining unmatched nodes from the source tree.
  • +
  • The set of remaining unmatched nodes from the target tree.
  • +
+ +

We can derive 3 kinds of edits from the matching set: either the node’s value was updated (Update), the node was moved to a different position within the tree (Move), or the node remained unchanged (Keep). Note that the Move case is not mutually exclusive with the other two. The node could have been updated or could have remained the same while at the same time its position within its parent node or the parent node itself could have changed. All unmatched nodes from the source tree are the ones that were removed (Remove), while unmatched nodes from the target tree are the ones that were inserted (Insert).

+ +

The latter two cases are pretty straightforward to implement:

+ +
+
edit_script = []
+
+for removed_node in unmatched_source_nodes:
+    edit_script.append(Remove(removed_node))
+for inserted_node in unmatched_target_nodes:
+    edit_script.append(Insert(inserted_node))
+
+
+ +

Traversing the matching set requires a little more thought:

+ +
+
for source_node, target_node in matching_set:
+    if (
+        not isinstance(source_node, LEAF_EXPRESSION_TYPES)
+        or source_node == target_node
+    ):
+        move_edits = generate_move_edits(
+            source_node, target_node, matching_set
+        )
+        edit_script.extend(move_edits)
+        edit_script.append(Keep(source_node, target_node))
+    else:
+        edit_script.append(Update(source_node, target_node))
+
+
+ +

If a matching pair represents a pair of leaf nodes, we check if they are the same to decide whether an update took place. For inner node pairs, we also need to compare the positions of their respective children to detect node movements. Chawathe et al. [3] suggest applying the longest common subsequence (LCS) algorithm which, no surprise here, was described by Myers himself [1]. There is a small catch, however: instead of checking the equality of two children nodes, we need to check whether the two nodes form a pair that is a part of our matching set.

+ +

Now with this knowledge, the implementation becomes straightforward:

+ +
+
def generate_move_edits(source, target, matching_set):
+    source_children = _get_child_nodes(source)
+    target_children = _get_child_nodes(target)
+
+    lcs = set(
+        _longest_common_subsequence(
+            source_children,
+            target_children,
+            lambda l, r: (l, r) in matching_set
+        )
+    )
+
+    move_edits = []
+    for node in source_children:
+        if node not in lcs and node not in unmatched_source_nodes:
+            move_edits.append(Move(node))
+
+    return move_edits
+
+
+ +

I left out the implementation of the LCS algorithm itself here, but there are plenty of implementation choices out there that can be easily looked up.

+ +

Output

+ +

The implemented algorithm produces the output that resembles the following:

+ +
+
>>> from sqlglot import parse_one, diff
+>>> diff(parse_one("SELECT a + b + c, d, e"), parse_one("SELECT a - b + c, e, f"))
+
+Remove(Add)
+Remove(Column(d))
+Remove(Identifier(d))
+Insert(Sub)
+Insert(Column(f))
+Insert(Identifier(f))
+Keep(Select, Select)
+Keep(Add, Add)
+Keep(Column(a), Column(a))
+Keep(Identifier(a), Identifier(a))
+Keep(Column(b), Column(b))
+Keep(Identifier(b), Identifier(b))
+Keep(Column(c), Column(c))
+Keep(Identifier(c), Identifier(c))
+Keep(Column(e), Column(e))
+Keep(Identifier(e), Identifier(e))
+
+
+ +

Note that the output above is abbreviated. The string representation of actual AST nodes is significantly more verbose.

+ +

The implementation works especially well when coupled with the SQLGlot’s query optimizer which can be used to produce canonical representations of compared queries:

+ +
+
>>> schema={"t": {"a": "INT", "b": "INT", "c": "INT", "d": "INT"}}
+>>> source = """
+... SELECT 1 + 1 + a
+... FROM t
+... WHERE b = 1 OR (c = 2 AND d = 3)
+... """
+>>> target = """
+... SELECT 2 + a
+... FROM t
+... WHERE (b = 1 OR c = 2) AND (b = 1 OR d = 3)
+... """
+>>> optimized_source = optimize(parse_one(source), schema=schema)
+>>> optimized_target = optimize(parse_one(target), schema=schema)
+>>> edit_script = diff(optimized_source, optimized_target)
+>>> sum(0 if isinstance(e, Keep) else 1 for e in edit_script)
+0
+
+
+ +

Optimizations

+ +

The worst case runtime complexity of this algorithm is not exactly stellar: O(n^2 * log n^2). This is because of the leaf matching process, which involves ranking a cartesian product between all leaf nodes of compared trees. Unsurprisingly, the algorithm takes a considerable time to finish for bigger queries.

+ +

There are still a few basic things we can do in our implementation to help improve performance:

+ +
    +
  • Refer to individual node objects using their identifiers (Python’s id()) instead of direct references in sets. This helps avoid costly recursive hash calculations and equality checks.
  • +
  • Cache bigram histograms to avoid computing them more than once for the same node.
  • +
  • Compute the canonical SQL string representation for each tree once while caching string representations of all inner nodes. This prevents redundant tree traversals when bigrams are computed.
  • +
+ +

At the time of writing only the first two optimizations have been implemented, so there is an opportunity to contribute for anyone who’s interested.

+ +

Alternative Solutions

+ +

This section is dedicated to solutions that I’ve investigated, but haven’t tried.

+ +

First, this section wouldn’t be complete without Tristan Hume’s blog post. Tristan’s solution has a lot in common with the Myers algorithm plus heuristics that is much more clever than what I came up with. The implementation relies on a combination of dynamic programming and A* search algorithm to explore the space of possible matchings and pick the best ones. It seemed to have worked well for Tistan’s specific use case, but after my negative experience with the Myers algorithm, I decided to try something different.

+ +

Another notable approach is the Gumtree algorithm by Falleri et al. [4]. I discovered this paper after I’d already implemented the algorithm that is the main focus of this post. In sections 5.2 and 5.3 of their paper, the authors compare the two algorithms side by side and claim that Gumtree is significantly better in terms of both runtime performance and accuracy when evaluated on 12 792 pairs of Java source files. This doesn’t surprise me, as the algorithm takes the height of subtrees into account. In my tests, I definitely saw scenarios in which this context would have helped. On top of that, the authors promise O(n^2) runtime complexity in the worst case which, given the Change Distiller's O(n^2 * log n^2), looks particularly tempting. I hope to try this algorithm out at some point, and there is a good chance you see me writing about it in my future posts.

+ +

Conclusion

+ +

The Change Distiller algorithm yielded quite satisfactory results in most of my tests. The scenarios in which it fell short mostly concerned identical (or very similar) subtrees located in different parts of the AST. In those cases, node mismatches were frequent and, as a result, edit scripts were somewhat suboptimal.

+ +

Additionally, the runtime performance of the algorithm leaves a lot to be desired. On trees with 1000 leaf nodes each, the algorithm takes a little under 2 seconds to complete. My implementation still has room for improvement, but this should give you a rough idea of what to expect. It appears that the Gumtree algorithm [4] can help address both of these points. I hope to find bandwidth to work on it soon and then compare the two algorithms side-by-side to find out which one performs better on SQL specifically. In the meantime, Change Distiller definitely gets the job done, and I can now proceed with applying it to some of the use cases I mentioned at the beginning of this post.

+ +

I’m also curious to learn whether other folks in the industry faced a similar problem, and how they approached it. If you did something similar, I’m interested to hear about your experience.

+ +

References

+ +

[1] Eugene W. Myers. An O(ND) Difference Algorithm and Its Variations. Algorithmica 1(2): 251-266 (1986)

+ +

[2] B. Fluri, M. Wursch, M. Pinzger, and H. Gall. Change Distilling: Tree differencing for fine-grained source code change extraction. IEEE Trans. Software Eng., 33(11):725–743, 2007.

+ +

[3] S.S. Chawathe, A. Rajaraman, H. Garcia-Molina, and J. Widom. Change Detection in Hierarchically Structured Information. Proc. ACM Sigmod Int’l Conf. Management of Data, pp. 493-504, June 1996

+ +

[4] Jean-Rémy Falleri, Floréal Morandat, Xavier Blanc, Matias Martinez, Martin Monperrus. Fine-grained and Accurate Source Code Differencing. Proceedings of the International Conference on Automated Software Engineering, 2014, Västeras, Sweden. pp.313-324, 10.1145/2642937.2642982. hal-01054552

+ +
+
+ + + + + +
  1"""
+  2.. include:: ../posts/sql_diff.md
+  3
+  4----
+  5"""
+  6
+  7from __future__ import annotations
+  8
+  9import typing as t
+ 10from collections import defaultdict
+ 11from dataclasses import dataclass
+ 12from heapq import heappop, heappush
+ 13
+ 14from sqlglot import Dialect
+ 15from sqlglot import expressions as exp
+ 16from sqlglot.helper import ensure_collection
+ 17
+ 18
+ 19@dataclass(frozen=True)
+ 20class Insert:
+ 21    """Indicates that a new node has been inserted"""
+ 22
+ 23    expression: exp.Expression
+ 24
+ 25
+ 26@dataclass(frozen=True)
+ 27class Remove:
+ 28    """Indicates that an existing node has been removed"""
+ 29
+ 30    expression: exp.Expression
+ 31
+ 32
+ 33@dataclass(frozen=True)
+ 34class Move:
+ 35    """Indicates that an existing node's position within the tree has changed"""
+ 36
+ 37    expression: exp.Expression
+ 38
+ 39
+ 40@dataclass(frozen=True)
+ 41class Update:
+ 42    """Indicates that an existing node has been updated"""
+ 43
+ 44    source: exp.Expression
+ 45    target: exp.Expression
+ 46
+ 47
+ 48@dataclass(frozen=True)
+ 49class Keep:
+ 50    """Indicates that an existing node hasn't been changed"""
+ 51
+ 52    source: exp.Expression
+ 53    target: exp.Expression
+ 54
+ 55
+ 56if t.TYPE_CHECKING:
+ 57    T = t.TypeVar("T")
+ 58    Edit = t.Union[Insert, Remove, Move, Update, Keep]
+ 59
+ 60
+ 61def diff(source: exp.Expression, target: exp.Expression) -> t.List[Edit]:
+ 62    """
+ 63    Returns the list of changes between the source and the target expressions.
+ 64
+ 65    Examples:
+ 66        >>> diff(parse_one("a + b"), parse_one("a + c"))
+ 67        [
+ 68            Remove(expression=(COLUMN this: (IDENTIFIER this: b, quoted: False))),
+ 69            Insert(expression=(COLUMN this: (IDENTIFIER this: c, quoted: False))),
+ 70            Keep(
+ 71                source=(ADD this: ...),
+ 72                target=(ADD this: ...)
+ 73            ),
+ 74            Keep(
+ 75                source=(COLUMN this: (IDENTIFIER this: a, quoted: False)),
+ 76                target=(COLUMN this: (IDENTIFIER this: a, quoted: False))
+ 77            ),
+ 78        ]
+ 79
+ 80    Args:
+ 81        source: the source expression.
+ 82        target: the target expression against which the diff should be calculated.
+ 83
+ 84    Returns:
+ 85        the list of Insert, Remove, Move, Update and Keep objects for each node in the source and the
+ 86        target expression trees. This list represents a sequence of steps needed to transform the source
+ 87        expression tree into the target one.
+ 88    """
+ 89    return ChangeDistiller().diff(source.copy(), target.copy())
+ 90
+ 91
+ 92LEAF_EXPRESSION_TYPES = (
+ 93    exp.Boolean,
+ 94    exp.DataType,
+ 95    exp.Identifier,
+ 96    exp.Literal,
+ 97)
+ 98
+ 99
+100class ChangeDistiller:
+101    """
+102    The implementation of the Change Distiller algorithm described by Beat Fluri and Martin Pinzger in
+103    their paper https://ieeexplore.ieee.org/document/4339230, which in turn is based on the algorithm by
+104    Chawathe et al. described in http://ilpubs.stanford.edu:8090/115/1/1995-46.pdf.
+105    """
+106
+107    def __init__(self, f: float = 0.6, t: float = 0.6) -> None:
+108        self.f = f
+109        self.t = t
+110        self._sql_generator = Dialect().generator()
+111
+112    def diff(self, source: exp.Expression, target: exp.Expression) -> t.List[Edit]:
+113        self._source = source
+114        self._target = target
+115        self._source_index = {id(n[0]): n[0] for n in source.bfs()}
+116        self._target_index = {id(n[0]): n[0] for n in target.bfs()}
+117        self._unmatched_source_nodes = set(self._source_index)
+118        self._unmatched_target_nodes = set(self._target_index)
+119        self._bigram_histo_cache: t.Dict[int, t.DefaultDict[str, int]] = {}
+120
+121        matching_set = self._compute_matching_set()
+122        return self._generate_edit_script(matching_set)
+123
+124    def _generate_edit_script(self, matching_set: t.Set[t.Tuple[int, int]]) -> t.List[Edit]:
+125        edit_script: t.List[Edit] = []
+126        for removed_node_id in self._unmatched_source_nodes:
+127            edit_script.append(Remove(self._source_index[removed_node_id]))
+128        for inserted_node_id in self._unmatched_target_nodes:
+129            edit_script.append(Insert(self._target_index[inserted_node_id]))
+130        for kept_source_node_id, kept_target_node_id in matching_set:
+131            source_node = self._source_index[kept_source_node_id]
+132            target_node = self._target_index[kept_target_node_id]
+133            if not isinstance(source_node, LEAF_EXPRESSION_TYPES) or source_node == target_node:
+134                edit_script.extend(
+135                    self._generate_move_edits(source_node, target_node, matching_set)
+136                )
+137                edit_script.append(Keep(source_node, target_node))
+138            else:
+139                edit_script.append(Update(source_node, target_node))
+140
+141        return edit_script
+142
+143    def _generate_move_edits(
+144        self, source: exp.Expression, target: exp.Expression, matching_set: t.Set[t.Tuple[int, int]]
+145    ) -> t.List[Move]:
+146        source_args = [id(e) for e in _expression_only_args(source)]
+147        target_args = [id(e) for e in _expression_only_args(target)]
+148
+149        args_lcs = set(_lcs(source_args, target_args, lambda l, r: (l, r) in matching_set))
+150
+151        move_edits = []
+152        for a in source_args:
+153            if a not in args_lcs and a not in self._unmatched_source_nodes:
+154                move_edits.append(Move(self._source_index[a]))
+155
+156        return move_edits
+157
+158    def _compute_matching_set(self) -> t.Set[t.Tuple[int, int]]:
+159        leaves_matching_set = self._compute_leaf_matching_set()
+160        matching_set = leaves_matching_set.copy()
+161
+162        ordered_unmatched_source_nodes = {
+163            id(n[0]): None for n in self._source.bfs() if id(n[0]) in self._unmatched_source_nodes
+164        }
+165        ordered_unmatched_target_nodes = {
+166            id(n[0]): None for n in self._target.bfs() if id(n[0]) in self._unmatched_target_nodes
+167        }
+168
+169        for source_node_id in ordered_unmatched_source_nodes:
+170            for target_node_id in ordered_unmatched_target_nodes:
+171                source_node = self._source_index[source_node_id]
+172                target_node = self._target_index[target_node_id]
+173                if _is_same_type(source_node, target_node):
+174                    source_leaf_ids = {id(l) for l in _get_leaves(source_node)}
+175                    target_leaf_ids = {id(l) for l in _get_leaves(target_node)}
+176
+177                    max_leaves_num = max(len(source_leaf_ids), len(target_leaf_ids))
+178                    if max_leaves_num:
+179                        common_leaves_num = sum(
+180                            1 if s in source_leaf_ids and t in target_leaf_ids else 0
+181                            for s, t in leaves_matching_set
+182                        )
+183                        leaf_similarity_score = common_leaves_num / max_leaves_num
+184                    else:
+185                        leaf_similarity_score = 0.0
+186
+187                    adjusted_t = (
+188                        self.t if min(len(source_leaf_ids), len(target_leaf_ids)) > 4 else 0.4
+189                    )
+190
+191                    if leaf_similarity_score >= 0.8 or (
+192                        leaf_similarity_score >= adjusted_t
+193                        and self._dice_coefficient(source_node, target_node) >= self.f
+194                    ):
+195                        matching_set.add((source_node_id, target_node_id))
+196                        self._unmatched_source_nodes.remove(source_node_id)
+197                        self._unmatched_target_nodes.remove(target_node_id)
+198                        ordered_unmatched_target_nodes.pop(target_node_id, None)
+199                        break
+200
+201        return matching_set
+202
+203    def _compute_leaf_matching_set(self) -> t.Set[t.Tuple[int, int]]:
+204        candidate_matchings: t.List[t.Tuple[float, int, exp.Expression, exp.Expression]] = []
+205        source_leaves = list(_get_leaves(self._source))
+206        target_leaves = list(_get_leaves(self._target))
+207        for source_leaf in source_leaves:
+208            for target_leaf in target_leaves:
+209                if _is_same_type(source_leaf, target_leaf):
+210                    similarity_score = self._dice_coefficient(source_leaf, target_leaf)
+211                    if similarity_score >= self.f:
+212                        heappush(
+213                            candidate_matchings,
+214                            (
+215                                -similarity_score,
+216                                len(candidate_matchings),
+217                                source_leaf,
+218                                target_leaf,
+219                            ),
+220                        )
+221
+222        # Pick best matchings based on the highest score
+223        matching_set = set()
+224        while candidate_matchings:
+225            _, _, source_leaf, target_leaf = heappop(candidate_matchings)
+226            if (
+227                id(source_leaf) in self._unmatched_source_nodes
+228                and id(target_leaf) in self._unmatched_target_nodes
+229            ):
+230                matching_set.add((id(source_leaf), id(target_leaf)))
+231                self._unmatched_source_nodes.remove(id(source_leaf))
+232                self._unmatched_target_nodes.remove(id(target_leaf))
+233
+234        return matching_set
+235
+236    def _dice_coefficient(self, source: exp.Expression, target: exp.Expression) -> float:
+237        source_histo = self._bigram_histo(source)
+238        target_histo = self._bigram_histo(target)
+239
+240        total_grams = sum(source_histo.values()) + sum(target_histo.values())
+241        if not total_grams:
+242            return 1.0 if source == target else 0.0
+243
+244        overlap_len = 0
+245        overlapping_grams = set(source_histo) & set(target_histo)
+246        for g in overlapping_grams:
+247            overlap_len += min(source_histo[g], target_histo[g])
+248
+249        return 2 * overlap_len / total_grams
+250
+251    def _bigram_histo(self, expression: exp.Expression) -> t.DefaultDict[str, int]:
+252        if id(expression) in self._bigram_histo_cache:
+253            return self._bigram_histo_cache[id(expression)]
+254
+255        expression_str = self._sql_generator.generate(expression)
+256        count = max(0, len(expression_str) - 1)
+257        bigram_histo: t.DefaultDict[str, int] = defaultdict(int)
+258        for i in range(count):
+259            bigram_histo[expression_str[i : i + 2]] += 1
+260
+261        self._bigram_histo_cache[id(expression)] = bigram_histo
+262        return bigram_histo
+263
+264
+265def _get_leaves(expression: exp.Expression) -> t.Iterator[exp.Expression]:
+266    has_child_exprs = False
+267
+268    for a in expression.args.values():
+269        for node in ensure_collection(a):
+270            if isinstance(node, exp.Expression):
+271                has_child_exprs = True
+272                yield from _get_leaves(node)
+273
+274    if not has_child_exprs:
+275        yield expression
+276
+277
+278def _is_same_type(source: exp.Expression, target: exp.Expression) -> bool:
+279    if type(source) is type(target):
+280        if isinstance(source, exp.Join):
+281            return source.args.get("side") == target.args.get("side")
+282
+283        if isinstance(source, exp.Anonymous):
+284            return source.this == target.this
+285
+286        return True
+287
+288    return False
+289
+290
+291def _expression_only_args(expression: exp.Expression) -> t.List[exp.Expression]:
+292    args: t.List[t.Union[exp.Expression, t.List]] = []
+293    if expression:
+294        for a in expression.args.values():
+295            args.extend(ensure_collection(a))
+296    return [a for a in args if isinstance(a, exp.Expression)]
+297
+298
+299def _lcs(
+300    seq_a: t.Sequence[T], seq_b: t.Sequence[T], equal: t.Callable[[T, T], bool]
+301) -> t.Sequence[t.Optional[T]]:
+302    """Calculates the longest common subsequence"""
+303
+304    len_a = len(seq_a)
+305    len_b = len(seq_b)
+306    lcs_result = [[None] * (len_b + 1) for i in range(len_a + 1)]
+307
+308    for i in range(len_a + 1):
+309        for j in range(len_b + 1):
+310            if i == 0 or j == 0:
+311                lcs_result[i][j] = []  # type: ignore
+312            elif equal(seq_a[i - 1], seq_b[j - 1]):
+313                lcs_result[i][j] = lcs_result[i - 1][j - 1] + [seq_a[i - 1]]  # type: ignore
+314            else:
+315                lcs_result[i][j] = (
+316                    lcs_result[i - 1][j]
+317                    if len(lcs_result[i - 1][j]) > len(lcs_result[i][j - 1])  # type: ignore
+318                    else lcs_result[i][j - 1]
+319                )
+320
+321    return lcs_result[len_a][len_b]  # type: ignore
+
+ + +
+
+ +
+
@dataclass(frozen=True)
+ + class + Insert: + + + +
+ +
20@dataclass(frozen=True)
+21class Insert:
+22    """Indicates that a new node has been inserted"""
+23
+24    expression: exp.Expression
+
+ + +

Indicates that a new node has been inserted

+
+ + +
+
+ + Insert(expression: sqlglot.expressions.Expression) + + +
+ + + + +
+
+
+ +
+
@dataclass(frozen=True)
+ + class + Remove: + + + +
+ +
27@dataclass(frozen=True)
+28class Remove:
+29    """Indicates that an existing node has been removed"""
+30
+31    expression: exp.Expression
+
+ + +

Indicates that an existing node has been removed

+
+ + +
+
+ + Remove(expression: sqlglot.expressions.Expression) + + +
+ + + + +
+
+
+ +
+
@dataclass(frozen=True)
+ + class + Move: + + + +
+ +
34@dataclass(frozen=True)
+35class Move:
+36    """Indicates that an existing node's position within the tree has changed"""
+37
+38    expression: exp.Expression
+
+ + +

Indicates that an existing node's position within the tree has changed

+
+ + +
+
+ + Move(expression: sqlglot.expressions.Expression) + + +
+ + + + +
+
+
+ +
+
@dataclass(frozen=True)
+ + class + Update: + + + +
+ +
41@dataclass(frozen=True)
+42class Update:
+43    """Indicates that an existing node has been updated"""
+44
+45    source: exp.Expression
+46    target: exp.Expression
+
+ + +

Indicates that an existing node has been updated

+
+ + +
+ + + + + +
+
+
+ +
+
@dataclass(frozen=True)
+ + class + Keep: + + + +
+ +
49@dataclass(frozen=True)
+50class Keep:
+51    """Indicates that an existing node hasn't been changed"""
+52
+53    source: exp.Expression
+54    target: exp.Expression
+
+ + +

Indicates that an existing node hasn't been changed

+
+ + +
+ + + + + +
+
+
+ + + +
62def diff(source: exp.Expression, target: exp.Expression) -> t.List[Edit]:
+63    """
+64    Returns the list of changes between the source and the target expressions.
+65
+66    Examples:
+67        >>> diff(parse_one("a + b"), parse_one("a + c"))
+68        [
+69            Remove(expression=(COLUMN this: (IDENTIFIER this: b, quoted: False))),
+70            Insert(expression=(COLUMN this: (IDENTIFIER this: c, quoted: False))),
+71            Keep(
+72                source=(ADD this: ...),
+73                target=(ADD this: ...)
+74            ),
+75            Keep(
+76                source=(COLUMN this: (IDENTIFIER this: a, quoted: False)),
+77                target=(COLUMN this: (IDENTIFIER this: a, quoted: False))
+78            ),
+79        ]
+80
+81    Args:
+82        source: the source expression.
+83        target: the target expression against which the diff should be calculated.
+84
+85    Returns:
+86        the list of Insert, Remove, Move, Update and Keep objects for each node in the source and the
+87        target expression trees. This list represents a sequence of steps needed to transform the source
+88        expression tree into the target one.
+89    """
+90    return ChangeDistiller().diff(source.copy(), target.copy())
+
+ + +

Returns the list of changes between the source and the target expressions.

+ +
Examples:
+ +
+
+
>>> diff(parse_one("a + b"), parse_one("a + c"))
+[
+    Remove(expression=(COLUMN this: (IDENTIFIER this: b, quoted: False))),
+    Insert(expression=(COLUMN this: (IDENTIFIER this: c, quoted: False))),
+    Keep(
+        source=(ADD this: ...),
+        target=(ADD this: ...)
+    ),
+    Keep(
+        source=(COLUMN this: (IDENTIFIER this: a, quoted: False)),
+        target=(COLUMN this: (IDENTIFIER this: a, quoted: False))
+    ),
+]
+
+
+
+ +
Arguments:
+ +
    +
  • source: the source expression.
  • +
  • target: the target expression against which the diff should be calculated.
  • +
+ +
Returns:
+ +
+

the list of Insert, Remove, Move, Update and Keep objects for each node in the source and the + target expression trees. This list represents a sequence of steps needed to transform the source + expression tree into the target one.

+
+
+ + +
+
+ +
+ + class + ChangeDistiller: + + + +
+ +
101class ChangeDistiller:
+102    """
+103    The implementation of the Change Distiller algorithm described by Beat Fluri and Martin Pinzger in
+104    their paper https://ieeexplore.ieee.org/document/4339230, which in turn is based on the algorithm by
+105    Chawathe et al. described in http://ilpubs.stanford.edu:8090/115/1/1995-46.pdf.
+106    """
+107
+108    def __init__(self, f: float = 0.6, t: float = 0.6) -> None:
+109        self.f = f
+110        self.t = t
+111        self._sql_generator = Dialect().generator()
+112
+113    def diff(self, source: exp.Expression, target: exp.Expression) -> t.List[Edit]:
+114        self._source = source
+115        self._target = target
+116        self._source_index = {id(n[0]): n[0] for n in source.bfs()}
+117        self._target_index = {id(n[0]): n[0] for n in target.bfs()}
+118        self._unmatched_source_nodes = set(self._source_index)
+119        self._unmatched_target_nodes = set(self._target_index)
+120        self._bigram_histo_cache: t.Dict[int, t.DefaultDict[str, int]] = {}
+121
+122        matching_set = self._compute_matching_set()
+123        return self._generate_edit_script(matching_set)
+124
+125    def _generate_edit_script(self, matching_set: t.Set[t.Tuple[int, int]]) -> t.List[Edit]:
+126        edit_script: t.List[Edit] = []
+127        for removed_node_id in self._unmatched_source_nodes:
+128            edit_script.append(Remove(self._source_index[removed_node_id]))
+129        for inserted_node_id in self._unmatched_target_nodes:
+130            edit_script.append(Insert(self._target_index[inserted_node_id]))
+131        for kept_source_node_id, kept_target_node_id in matching_set:
+132            source_node = self._source_index[kept_source_node_id]
+133            target_node = self._target_index[kept_target_node_id]
+134            if not isinstance(source_node, LEAF_EXPRESSION_TYPES) or source_node == target_node:
+135                edit_script.extend(
+136                    self._generate_move_edits(source_node, target_node, matching_set)
+137                )
+138                edit_script.append(Keep(source_node, target_node))
+139            else:
+140                edit_script.append(Update(source_node, target_node))
+141
+142        return edit_script
+143
+144    def _generate_move_edits(
+145        self, source: exp.Expression, target: exp.Expression, matching_set: t.Set[t.Tuple[int, int]]
+146    ) -> t.List[Move]:
+147        source_args = [id(e) for e in _expression_only_args(source)]
+148        target_args = [id(e) for e in _expression_only_args(target)]
+149
+150        args_lcs = set(_lcs(source_args, target_args, lambda l, r: (l, r) in matching_set))
+151
+152        move_edits = []
+153        for a in source_args:
+154            if a not in args_lcs and a not in self._unmatched_source_nodes:
+155                move_edits.append(Move(self._source_index[a]))
+156
+157        return move_edits
+158
+159    def _compute_matching_set(self) -> t.Set[t.Tuple[int, int]]:
+160        leaves_matching_set = self._compute_leaf_matching_set()
+161        matching_set = leaves_matching_set.copy()
+162
+163        ordered_unmatched_source_nodes = {
+164            id(n[0]): None for n in self._source.bfs() if id(n[0]) in self._unmatched_source_nodes
+165        }
+166        ordered_unmatched_target_nodes = {
+167            id(n[0]): None for n in self._target.bfs() if id(n[0]) in self._unmatched_target_nodes
+168        }
+169
+170        for source_node_id in ordered_unmatched_source_nodes:
+171            for target_node_id in ordered_unmatched_target_nodes:
+172                source_node = self._source_index[source_node_id]
+173                target_node = self._target_index[target_node_id]
+174                if _is_same_type(source_node, target_node):
+175                    source_leaf_ids = {id(l) for l in _get_leaves(source_node)}
+176                    target_leaf_ids = {id(l) for l in _get_leaves(target_node)}
+177
+178                    max_leaves_num = max(len(source_leaf_ids), len(target_leaf_ids))
+179                    if max_leaves_num:
+180                        common_leaves_num = sum(
+181                            1 if s in source_leaf_ids and t in target_leaf_ids else 0
+182                            for s, t in leaves_matching_set
+183                        )
+184                        leaf_similarity_score = common_leaves_num / max_leaves_num
+185                    else:
+186                        leaf_similarity_score = 0.0
+187
+188                    adjusted_t = (
+189                        self.t if min(len(source_leaf_ids), len(target_leaf_ids)) > 4 else 0.4
+190                    )
+191
+192                    if leaf_similarity_score >= 0.8 or (
+193                        leaf_similarity_score >= adjusted_t
+194                        and self._dice_coefficient(source_node, target_node) >= self.f
+195                    ):
+196                        matching_set.add((source_node_id, target_node_id))
+197                        self._unmatched_source_nodes.remove(source_node_id)
+198                        self._unmatched_target_nodes.remove(target_node_id)
+199                        ordered_unmatched_target_nodes.pop(target_node_id, None)
+200                        break
+201
+202        return matching_set
+203
+204    def _compute_leaf_matching_set(self) -> t.Set[t.Tuple[int, int]]:
+205        candidate_matchings: t.List[t.Tuple[float, int, exp.Expression, exp.Expression]] = []
+206        source_leaves = list(_get_leaves(self._source))
+207        target_leaves = list(_get_leaves(self._target))
+208        for source_leaf in source_leaves:
+209            for target_leaf in target_leaves:
+210                if _is_same_type(source_leaf, target_leaf):
+211                    similarity_score = self._dice_coefficient(source_leaf, target_leaf)
+212                    if similarity_score >= self.f:
+213                        heappush(
+214                            candidate_matchings,
+215                            (
+216                                -similarity_score,
+217                                len(candidate_matchings),
+218                                source_leaf,
+219                                target_leaf,
+220                            ),
+221                        )
+222
+223        # Pick best matchings based on the highest score
+224        matching_set = set()
+225        while candidate_matchings:
+226            _, _, source_leaf, target_leaf = heappop(candidate_matchings)
+227            if (
+228                id(source_leaf) in self._unmatched_source_nodes
+229                and id(target_leaf) in self._unmatched_target_nodes
+230            ):
+231                matching_set.add((id(source_leaf), id(target_leaf)))
+232                self._unmatched_source_nodes.remove(id(source_leaf))
+233                self._unmatched_target_nodes.remove(id(target_leaf))
+234
+235        return matching_set
+236
+237    def _dice_coefficient(self, source: exp.Expression, target: exp.Expression) -> float:
+238        source_histo = self._bigram_histo(source)
+239        target_histo = self._bigram_histo(target)
+240
+241        total_grams = sum(source_histo.values()) + sum(target_histo.values())
+242        if not total_grams:
+243            return 1.0 if source == target else 0.0
+244
+245        overlap_len = 0
+246        overlapping_grams = set(source_histo) & set(target_histo)
+247        for g in overlapping_grams:
+248            overlap_len += min(source_histo[g], target_histo[g])
+249
+250        return 2 * overlap_len / total_grams
+251
+252    def _bigram_histo(self, expression: exp.Expression) -> t.DefaultDict[str, int]:
+253        if id(expression) in self._bigram_histo_cache:
+254            return self._bigram_histo_cache[id(expression)]
+255
+256        expression_str = self._sql_generator.generate(expression)
+257        count = max(0, len(expression_str) - 1)
+258        bigram_histo: t.DefaultDict[str, int] = defaultdict(int)
+259        for i in range(count):
+260            bigram_histo[expression_str[i : i + 2]] += 1
+261
+262        self._bigram_histo_cache[id(expression)] = bigram_histo
+263        return bigram_histo
+
+ + +

The implementation of the Change Distiller algorithm described by Beat Fluri and Martin Pinzger in +their paper https://ieeexplore.ieee.org/document/4339230, which in turn is based on the algorithm by +Chawathe et al. described in http://ilpubs.stanford.edu:8090/115/1/1995-46.pdf.

+
+ + +
+ +
+ + ChangeDistiller(f: float = 0.6, t: float = 0.6) + + + +
+ +
108    def __init__(self, f: float = 0.6, t: float = 0.6) -> None:
+109        self.f = f
+110        self.t = t
+111        self._sql_generator = Dialect().generator()
+
+ + + + +
+
+ + + +
113    def diff(self, source: exp.Expression, target: exp.Expression) -> t.List[Edit]:
+114        self._source = source
+115        self._target = target
+116        self._source_index = {id(n[0]): n[0] for n in source.bfs()}
+117        self._target_index = {id(n[0]): n[0] for n in target.bfs()}
+118        self._unmatched_source_nodes = set(self._source_index)
+119        self._unmatched_target_nodes = set(self._target_index)
+120        self._bigram_histo_cache: t.Dict[int, t.DefaultDict[str, int]] = {}
+121
+122        matching_set = self._compute_matching_set()
+123        return self._generate_edit_script(matching_set)
+
+ + + + +
+
+
+ + \ No newline at end of file diff --git a/docs/sqlglot/errors.html b/docs/sqlglot/errors.html new file mode 100644 index 0000000..534f71a --- /dev/null +++ b/docs/sqlglot/errors.html @@ -0,0 +1,877 @@ + + + + + + + sqlglot.errors API documentation + + + + + + + + + +
+
+ Edit on GitHub +

+sqlglot.errors

+ + + + + + +
 1from __future__ import annotations
+ 2
+ 3import typing as t
+ 4from enum import auto
+ 5
+ 6from sqlglot.helper import AutoName
+ 7
+ 8
+ 9class ErrorLevel(AutoName):
+10    IGNORE = auto()
+11    """Ignore all errors."""
+12
+13    WARN = auto()
+14    """Log all errors."""
+15
+16    RAISE = auto()
+17    """Collect all errors and raise a single exception."""
+18
+19    IMMEDIATE = auto()
+20    """Immediately raise an exception on the first error found."""
+21
+22
+23class SqlglotError(Exception):
+24    pass
+25
+26
+27class UnsupportedError(SqlglotError):
+28    pass
+29
+30
+31class ParseError(SqlglotError):
+32    def __init__(
+33        self,
+34        message: str,
+35        errors: t.Optional[t.List[t.Dict[str, t.Any]]] = None,
+36    ):
+37        super().__init__(message)
+38        self.errors = errors or []
+39
+40    @classmethod
+41    def new(
+42        cls,
+43        message: str,
+44        description: t.Optional[str] = None,
+45        line: t.Optional[int] = None,
+46        col: t.Optional[int] = None,
+47        start_context: t.Optional[str] = None,
+48        highlight: t.Optional[str] = None,
+49        end_context: t.Optional[str] = None,
+50        into_expression: t.Optional[str] = None,
+51    ) -> ParseError:
+52        return cls(
+53            message,
+54            [
+55                {
+56                    "description": description,
+57                    "line": line,
+58                    "col": col,
+59                    "start_context": start_context,
+60                    "highlight": highlight,
+61                    "end_context": end_context,
+62                    "into_expression": into_expression,
+63                }
+64            ],
+65        )
+66
+67
+68class TokenError(SqlglotError):
+69    pass
+70
+71
+72class OptimizeError(SqlglotError):
+73    pass
+74
+75
+76class SchemaError(SqlglotError):
+77    pass
+78
+79
+80class ExecuteError(SqlglotError):
+81    pass
+82
+83
+84def concat_messages(errors: t.Sequence[t.Any], maximum: int) -> str:
+85    msg = [str(e) for e in errors[:maximum]]
+86    remaining = len(errors) - maximum
+87    if remaining > 0:
+88        msg.append(f"... and {remaining} more")
+89    return "\n\n".join(msg)
+90
+91
+92def merge_errors(errors: t.Sequence[ParseError]) -> t.List[t.Dict[str, t.Any]]:
+93    return [e_dict for error in errors for e_dict in error.errors]
+
+ + +
+
+ +
+ + class + ErrorLevel(sqlglot.helper.AutoName): + + + +
+ +
10class ErrorLevel(AutoName):
+11    IGNORE = auto()
+12    """Ignore all errors."""
+13
+14    WARN = auto()
+15    """Log all errors."""
+16
+17    RAISE = auto()
+18    """Collect all errors and raise a single exception."""
+19
+20    IMMEDIATE = auto()
+21    """Immediately raise an exception on the first error found."""
+
+ + +

An enumeration.

+
+ + +
+
+ IGNORE = <ErrorLevel.IGNORE: 'IGNORE'> + + +
+ + +

Ignore all errors.

+
+ + +
+
+
+ WARN = <ErrorLevel.WARN: 'WARN'> + + +
+ + +

Log all errors.

+
+ + +
+
+
+ RAISE = <ErrorLevel.RAISE: 'RAISE'> + + +
+ + +

Collect all errors and raise a single exception.

+
+ + +
+
+
+ IMMEDIATE = <ErrorLevel.IMMEDIATE: 'IMMEDIATE'> + + +
+ + +

Immediately raise an exception on the first error found.

+
+ + +
+
+
Inherited Members
+
+
enum.Enum
+
name
+
value
+ +
+
+
+
+
+ +
+ + class + SqlglotError(builtins.Exception): + + + +
+ +
24class SqlglotError(Exception):
+25    pass
+
+ + +

Common base class for all non-exit exceptions.

+
+ + +
+
Inherited Members
+
+
builtins.Exception
+
Exception
+ +
+
builtins.BaseException
+
with_traceback
+ +
+
+
+
+
+ +
+ + class + UnsupportedError(SqlglotError): + + + +
+ +
28class UnsupportedError(SqlglotError):
+29    pass
+
+ + +

Common base class for all non-exit exceptions.

+
+ + +
+
Inherited Members
+
+
builtins.Exception
+
Exception
+ +
+
builtins.BaseException
+
with_traceback
+ +
+
+
+
+
+ +
+ + class + ParseError(SqlglotError): + + + +
+ +
32class ParseError(SqlglotError):
+33    def __init__(
+34        self,
+35        message: str,
+36        errors: t.Optional[t.List[t.Dict[str, t.Any]]] = None,
+37    ):
+38        super().__init__(message)
+39        self.errors = errors or []
+40
+41    @classmethod
+42    def new(
+43        cls,
+44        message: str,
+45        description: t.Optional[str] = None,
+46        line: t.Optional[int] = None,
+47        col: t.Optional[int] = None,
+48        start_context: t.Optional[str] = None,
+49        highlight: t.Optional[str] = None,
+50        end_context: t.Optional[str] = None,
+51        into_expression: t.Optional[str] = None,
+52    ) -> ParseError:
+53        return cls(
+54            message,
+55            [
+56                {
+57                    "description": description,
+58                    "line": line,
+59                    "col": col,
+60                    "start_context": start_context,
+61                    "highlight": highlight,
+62                    "end_context": end_context,
+63                    "into_expression": into_expression,
+64                }
+65            ],
+66        )
+
+ + +

Common base class for all non-exit exceptions.

+
+ + +
+ +
+ + ParseError(message: str, errors: Optional[List[Dict[str, Any]]] = None) + + + +
+ +
33    def __init__(
+34        self,
+35        message: str,
+36        errors: t.Optional[t.List[t.Dict[str, t.Any]]] = None,
+37    ):
+38        super().__init__(message)
+39        self.errors = errors or []
+
+ + + + +
+
+ +
+
@classmethod
+ + def + new( cls, message: str, description: Optional[str] = None, line: Optional[int] = None, col: Optional[int] = None, start_context: Optional[str] = None, highlight: Optional[str] = None, end_context: Optional[str] = None, into_expression: Optional[str] = None) -> sqlglot.errors.ParseError: + + + +
+ +
41    @classmethod
+42    def new(
+43        cls,
+44        message: str,
+45        description: t.Optional[str] = None,
+46        line: t.Optional[int] = None,
+47        col: t.Optional[int] = None,
+48        start_context: t.Optional[str] = None,
+49        highlight: t.Optional[str] = None,
+50        end_context: t.Optional[str] = None,
+51        into_expression: t.Optional[str] = None,
+52    ) -> ParseError:
+53        return cls(
+54            message,
+55            [
+56                {
+57                    "description": description,
+58                    "line": line,
+59                    "col": col,
+60                    "start_context": start_context,
+61                    "highlight": highlight,
+62                    "end_context": end_context,
+63                    "into_expression": into_expression,
+64                }
+65            ],
+66        )
+
+ + + + +
+
+
Inherited Members
+
+
builtins.BaseException
+
with_traceback
+ +
+
+
+
+
+ +
+ + class + TokenError(SqlglotError): + + + +
+ +
69class TokenError(SqlglotError):
+70    pass
+
+ + +

Common base class for all non-exit exceptions.

+
+ + +
+
Inherited Members
+
+
builtins.Exception
+
Exception
+ +
+
builtins.BaseException
+
with_traceback
+ +
+
+
+
+
+ +
+ + class + OptimizeError(SqlglotError): + + + +
+ +
73class OptimizeError(SqlglotError):
+74    pass
+
+ + +

Common base class for all non-exit exceptions.

+
+ + +
+
Inherited Members
+
+
builtins.Exception
+
Exception
+ +
+
builtins.BaseException
+
with_traceback
+ +
+
+
+
+
+ +
+ + class + SchemaError(SqlglotError): + + + +
+ +
77class SchemaError(SqlglotError):
+78    pass
+
+ + +

Common base class for all non-exit exceptions.

+
+ + +
+
Inherited Members
+
+
builtins.Exception
+
Exception
+ +
+
builtins.BaseException
+
with_traceback
+ +
+
+
+
+
+ +
+ + class + ExecuteError(SqlglotError): + + + +
+ +
81class ExecuteError(SqlglotError):
+82    pass
+
+ + +

Common base class for all non-exit exceptions.

+
+ + +
+
Inherited Members
+
+
builtins.Exception
+
Exception
+ +
+
builtins.BaseException
+
with_traceback
+ +
+
+
+
+
+ +
+ + def + concat_messages(errors: Sequence[Any], maximum: int) -> str: + + + +
+ +
85def concat_messages(errors: t.Sequence[t.Any], maximum: int) -> str:
+86    msg = [str(e) for e in errors[:maximum]]
+87    remaining = len(errors) - maximum
+88    if remaining > 0:
+89        msg.append(f"... and {remaining} more")
+90    return "\n\n".join(msg)
+
+ + + + +
+
+ +
+ + def + merge_errors(errors: Sequence[sqlglot.errors.ParseError]) -> List[Dict[str, Any]]: + + + +
+ +
93def merge_errors(errors: t.Sequence[ParseError]) -> t.List[t.Dict[str, t.Any]]:
+94    return [e_dict for error in errors for e_dict in error.errors]
+
+ + + + +
+
+ + \ No newline at end of file diff --git a/docs/sqlglot/executor.html b/docs/sqlglot/executor.html new file mode 100644 index 0000000..a0cfd40 --- /dev/null +++ b/docs/sqlglot/executor.html @@ -0,0 +1,694 @@ + + + + + + + sqlglot.executor API documentation + + + + + + + + + +
+
+ Edit on GitHub + +

Writing a Python SQL engine from scratch

+ +

Toby Mao

+ +

Introduction

+ +

When I first started writing SQLGlot in early 2021, my goal was just to translate SQL queries from SparkSQL to Presto and vice versa. However, over the last year and a half, I've ended up with a full-fledged SQL engine. SQLGlot can now parse and transpile between 18 SQL dialects and can execute all 24 TPC-H SQL queries. The parser and engine are all written from scratch using Python.

+ +

This post will cover why I went through the effort of creating a Python SQL engine and how a simple query goes from a string to actually transforming data. The following steps are briefly summarized:

+ + + +

Why?

+ +

I started working on SQLGlot because of my work on the experimentation and metrics platform at Netflix, where I built tools that allowed data scientists to define and compute SQL-based metrics. Netflix relied on multiple engines to query data (Spark, Presto, and Druid), so my team built the metrics platform around PyPika, a Python SQL query builder. This way, definitions could be reused across multiple engines. However, it became quickly apparent that writing python code to programmatically generate SQL was challenging for data scientists, especially those with academic backgrounds, since they were mostly familiar with R and SQL. At the time, the only Python SQL parser was sqlparse, which is not actually a parser but a tokenizer, so having users write raw SQL into the platform wasn't really an option. Some time later, I randomly stumbled across Crafting Interpreters and realized that I could use it as a guide towards creating my own SQL parser/transpiler.

+ +

Why did I do this? Isn't a Python SQL engine going to be extremely slow?

+ +

The main reason why I ended up building a SQL engine was...just for entertainment. It's been fun learning about all the things required to actually run a SQL query, and seeing it actually work is extremely rewarding. Before SQLGlot, I had zero experience with lexers, parsers, or compilers.

+ +

In terms of practical use cases, I planned to use the Python SQL engine for unit testing SQL pipelines. Big data pipelines are tough to test because many of the engines are not open source and cannot be run locally. With SQLGlot, you can take a SQL query targeting a warehouse such as Snowflake and seamlessly run it in CI on mock Python data. It's easy to mock data and create arbitrary UDFs because everything is just Python. Although the implementation is slow and unsuitable for large amounts of data (> 1 million rows), there's very little overhead/startup and you can run queries on test data in a couple of milliseconds.

+ +

Finally, the components that have been built to support execution can be used as a foundation for a faster engine. I'm inspired by what Apache Calcite has done for the JVM world. Even though Python is commonly used for data, there hasn't been a Calcite for Python. So, you could say that SQLGlot aims to be that framework. For example, it wouldn't take much work to replace the Python execution engine with numpy/pandas/arrow to become a respectably-performing query engine. The implementation would be able to leverage the parser, optimizer, and logical planner, only needing to implement physical execution. There is a lot of work in the Python ecosystem around high performance vectorized computation, which I think could benefit from a pure Python-based AST/plan. Parsing and planning doesn't have to be fast when the bottleneck of running queries is processing terabytes of data. So, having a Python-based ecosystem around SQL is beneficial given the ease of development in Python, despite not having bare metal performance.

+ +

Parts of SQLGlot's toolkit are being used today by the following:

+ +
    +
  • Ibis: A Python library that provides a lightweight, universal interface for data wrangling. +
      +
    • Uses the Python SQL expression builder and leverages the optimizer/planner to convert SQL into dataframe operations.
    • +
  • +
  • mysql-mimic: Pure-Python implementation of the MySQL server wire protocol +
      +
    • Parses / transforms SQL and executes INFORMATION_SCHEMA queries.
    • +
  • +
  • Quokka: Push-based vectorized query engine +
      +
    • Parse and optimizes SQL.
    • +
  • +
  • Splink: Fast, accurate and scalable probabilistic data linkage using your choice of SQL backend. +
      +
    • Transpiles queries.
    • +
  • +
+ +

How?

+ +

There are many steps involved with actually running a simple query like:

+ +
+
SELECT
+  bar.a,
+  b + 1 AS b
+FROM bar
+JOIN baz
+  ON bar.a = baz.a
+WHERE bar.a > 1
+
+
+ +

In this post, I'll walk through all the steps SQLGlot takes to run this query over Python objects.

+ +

Tokenizing

+ +

The first step is to convert the sql string into a list of tokens. SQLGlot's tokenizer is quite simple and can be found here. In a while loop, it checks each character and either appends the character to the current token, or makes a new token.

+ +

Running the SQLGlot tokenizer shows the output.

+ +

Tokenizer Output

+ +

Each keyword has been converted to a SQLGlot Token object. Each token has some metadata associated with it, like line/column information for error messages. Comments are also a part of the token, so that comments can be preserved.

+ +

Parsing

+ +

Once a SQL statement is tokenized, we don't need to worry about white space and other formatting, so it's easier to work with. We can now convert the list of tokens into an AST. The SQLGlot parser is a handwritten recursive descent parser.

+ +

Similar to the tokenizer, it consumes the tokens sequentially, but it instead uses a recursive algorithm. The tokens are converted into a single AST node that presents the SQL query. The SQLGlot parser was designed to support various dialects, so it contains many options for overriding parsing functionality.

+ +

Parser Output

+ +

The AST is a generic representation of a given SQL query. Each dialect can override or implement its own generator, which can convert an AST object into syntatically-correct SQL.

+ +

Optimizing

+ +

Once we have our AST, we can transform it into an equivalent query that produces the same results more efficiently. When optimizing queries, most engines first convert the AST into a logical plan and then optimize the plan. However, I chose to optimize the AST directly for the following reasons:

+ +
    +
  1. It's easier to debug and validate the optimizations when the input and output are both SQL.

  2. +
  3. Rules can be applied a la carte to transform SQL into a more desirable form.

  4. +
  5. I wanted a way to generate 'canonical sql'. Having a canonical representation of SQL is useful for understanding if two queries are semantically equivalent (e.g. SELECT 1 + 1 and SELECT 2).

  6. +
+ +

I've yet to find another engine that takes this approach, but I'm quite happy with this decision. The optimizer currently does not perform any "physical optimizations" such as join reordering. Those are left to the execution layer, as additional statistics and information could become relevant.

+ +

Optimizer Output

+ +

The optimizer currently has 17 rules. Each of these rules is applied, transforming the AST in place. The combination of these rules creates "canonical" sql that can then be more easily converted into a logical plan and executed.

+ +

Some example rules are:

+ +

qualify_tables and qualify_columns

+ +
    +
  • Adds all db/catalog qualifiers to tables and forces an alias.
  • +
  • Ensure each column is unambiguous and expand stars.
  • +
+ +
+
SELECT * FROM x;
+
+SELECT "db"."x" AS "x";
+
+
+ +

simplify

+ +

Boolean and math simplification. Check out all the test cases.

+ +
+
((NOT FALSE) AND (x = x)) AND (TRUE OR 1 <> 3);
+x = x;
+
+1 + 1;
+2;
+
+
+ +

normalize

+ +

Attempts to convert all predicates into conjunctive normal form.

+ +
+
-- DNF
+(A AND B) OR (B AND C AND D);
+
+-- CNF
+(A OR C) AND (A OR D) AND B;
+
+
+ +

unnest_subqueries

+ +

Converts subqueries in predicates into joins.

+ +
+
-- The subquery can be converted into a left join
+SELECT *
+FROM x AS x
+WHERE (
+  SELECT y.a AS a
+  FROM y AS y
+  WHERE x.a = y.a
+) = 1;
+
+SELECT *
+FROM x AS x
+LEFT JOIN (
+  SELECT y.a AS a
+  FROM y AS y
+  WHERE TRUE
+  GROUP BY y.a
+) AS "_u_0"
+  ON x.a = "_u_0".a
+WHERE ("_u_0".a = 1 AND NOT "_u_0".a IS NULL)
+
+
+ +

pushdown_predicates

+ +

Push down filters into the innermost query.

+ +
+
SELECT *
+FROM (
+  SELECT *
+  FROM x AS x
+) AS y
+WHERE y.a = 1;
+
+SELECT *
+FROM (
+  SELECT *
+  FROM x AS x
+  WHERE y.a = 1
+) AS y WHERE TRUE
+
+
+ +

annotate_types

+ +

Infer all types throughout the AST given schema information and function type definitions.

+ +

Planning

+ +

After the SQL AST has been "optimized", it's much easier to convert into a logical plan. The AST is traversed and converted into a DAG consisting of one of five steps. The different steps are:

+ +

Scan

+ +

Selects columns from a table, applies projections, and finally filters the table.

+ +

Sort

+ +

Sorts a table for order by expressions.

+ +

Set

+ +

Applies the operators union/union all/except/intersect.

+ +

Aggregate

+ +

Applies an aggregation/group by.

+ +

Join

+ +

Joins multiple tables together.

+ +

Planner Output

+ +

The logical plan is quite simple and contains the information required to convert it into a physical plan (execution).

+ +

Executing

+ +

Finally, we can actually execute the SQL query. The Python engine is not fast, but it's very small (~400 LOC)! It iterates the DAG with a queue and runs each step, passing each intermediary table to the next step.

+ +

In order to keep things simple, it evaluates expressions with eval. Because SQLGlot was built primarily to be a transpiler, it was simple to create a "Python SQL" dialect. So a SQL expression x + 1 can just be converted into scope['x'] + 1.

+ +

Executor Output

+ +

What's next

+ +

SQLGlot's main focus will always be on parsing/transpiling, but I plan to continue development on the execution engine. I'd like to pass TPC-DS. If someone doesn't beat me to it, I may even take a stab at writing a Pandas/Arrow execution engine.

+ +

I'm hoping that over time, SQLGlot will spark the Python SQL ecosystem just like Calcite has for Java.

+ +

Special thanks

+ +

SQLGlot would not be what it is without it's core contributors. In particular, the execution engine would not exist without Barak Alon and George Sittas.

+ +

Get in touch

+ +

If you'd like to chat more about SQLGlot, please join my Slack Channel!

+ +
+
+ + + + + +
 1"""
+ 2.. include:: ../../posts/python_sql_engine.md
+ 3
+ 4----
+ 5"""
+ 6
+ 7from __future__ import annotations
+ 8
+ 9import logging
+10import time
+11import typing as t
+12
+13from sqlglot import maybe_parse
+14from sqlglot.errors import ExecuteError
+15from sqlglot.executor.python import PythonExecutor
+16from sqlglot.executor.table import Table, ensure_tables
+17from sqlglot.optimizer import optimize
+18from sqlglot.planner import Plan
+19from sqlglot.schema import ensure_schema
+20
+21logger = logging.getLogger("sqlglot")
+22
+23if t.TYPE_CHECKING:
+24    from sqlglot.dialects.dialect import DialectType
+25    from sqlglot.executor.table import Tables
+26    from sqlglot.expressions import Expression
+27    from sqlglot.schema import Schema
+28
+29
+30def execute(
+31    sql: str | Expression,
+32    schema: t.Optional[t.Dict | Schema] = None,
+33    read: DialectType = None,
+34    tables: t.Optional[t.Dict] = None,
+35) -> Table:
+36    """
+37    Run a sql query against data.
+38
+39    Args:
+40        sql: a sql statement.
+41        schema: database schema.
+42            This can either be an instance of `Schema` or a mapping in one of the following forms:
+43            1. {table: {col: type}}
+44            2. {db: {table: {col: type}}}
+45            3. {catalog: {db: {table: {col: type}}}}
+46        read: the SQL dialect to apply during parsing (eg. "spark", "hive", "presto", "mysql").
+47        tables: additional tables to register.
+48
+49    Returns:
+50        Simple columnar data structure.
+51    """
+52    tables_ = ensure_tables(tables)
+53
+54    if not schema:
+55        schema = {
+56            name: {column: type(table[0][column]).__name__ for column in table.columns}
+57            for name, table in tables_.mapping.items()
+58        }
+59
+60    schema = ensure_schema(schema)
+61
+62    if tables_.supported_table_args and tables_.supported_table_args != schema.supported_table_args:
+63        raise ExecuteError("Tables must support the same table args as schema")
+64
+65    expression = maybe_parse(sql, dialect=read)
+66
+67    now = time.time()
+68    expression = optimize(expression, schema, leave_tables_isolated=True)
+69
+70    logger.debug("Optimization finished: %f", time.time() - now)
+71    logger.debug("Optimized SQL: %s", expression.sql(pretty=True))
+72
+73    plan = Plan(expression)
+74
+75    logger.debug("Logical Plan: %s", plan)
+76
+77    now = time.time()
+78    result = PythonExecutor(tables=tables_).execute(plan)
+79
+80    logger.debug("Query finished: %f", time.time() - now)
+81
+82    return result
+
+ + +
+
+ +
+ + def + execute( sql: str | sqlglot.expressions.Expression, schema: Union[Dict, sqlglot.schema.Schema, NoneType] = None, read: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None, tables: Optional[Dict] = None) -> sqlglot.executor.table.Table: + + + +
+ +
31def execute(
+32    sql: str | Expression,
+33    schema: t.Optional[t.Dict | Schema] = None,
+34    read: DialectType = None,
+35    tables: t.Optional[t.Dict] = None,
+36) -> Table:
+37    """
+38    Run a sql query against data.
+39
+40    Args:
+41        sql: a sql statement.
+42        schema: database schema.
+43            This can either be an instance of `Schema` or a mapping in one of the following forms:
+44            1. {table: {col: type}}
+45            2. {db: {table: {col: type}}}
+46            3. {catalog: {db: {table: {col: type}}}}
+47        read: the SQL dialect to apply during parsing (eg. "spark", "hive", "presto", "mysql").
+48        tables: additional tables to register.
+49
+50    Returns:
+51        Simple columnar data structure.
+52    """
+53    tables_ = ensure_tables(tables)
+54
+55    if not schema:
+56        schema = {
+57            name: {column: type(table[0][column]).__name__ for column in table.columns}
+58            for name, table in tables_.mapping.items()
+59        }
+60
+61    schema = ensure_schema(schema)
+62
+63    if tables_.supported_table_args and tables_.supported_table_args != schema.supported_table_args:
+64        raise ExecuteError("Tables must support the same table args as schema")
+65
+66    expression = maybe_parse(sql, dialect=read)
+67
+68    now = time.time()
+69    expression = optimize(expression, schema, leave_tables_isolated=True)
+70
+71    logger.debug("Optimization finished: %f", time.time() - now)
+72    logger.debug("Optimized SQL: %s", expression.sql(pretty=True))
+73
+74    plan = Plan(expression)
+75
+76    logger.debug("Logical Plan: %s", plan)
+77
+78    now = time.time()
+79    result = PythonExecutor(tables=tables_).execute(plan)
+80
+81    logger.debug("Query finished: %f", time.time() - now)
+82
+83    return result
+
+ + +

Run a sql query against data.

+ +
Arguments:
+ +
    +
  • sql: a sql statement.
  • +
  • schema: database schema. +This can either be an instance of Schema or a mapping in one of the following forms: +
      +
    1. {table: {col: type}}
    2. +
    3. {db: {table: {col: type}}}
    4. +
    5. {catalog: {db: {table: {col: type}}}}
    6. +
  • +
  • read: the SQL dialect to apply during parsing (eg. "spark", "hive", "presto", "mysql").
  • +
  • tables: additional tables to register.
  • +
+ +
Returns:
+ +
+

Simple columnar data structure.

+
+
+ + +
+
+ + \ No newline at end of file diff --git a/docs/sqlglot/executor/context.html b/docs/sqlglot/executor/context.html new file mode 100644 index 0000000..da6c629 --- /dev/null +++ b/docs/sqlglot/executor/context.html @@ -0,0 +1,715 @@ + + + + + + + sqlglot.executor.context API documentation + + + + + + + + + +
+
+ Edit on GitHub +

+sqlglot.executor.context

+ + + + + + +
  1from __future__ import annotations
+  2
+  3import typing as t
+  4
+  5from sqlglot.executor.env import ENV
+  6
+  7if t.TYPE_CHECKING:
+  8    from sqlglot.executor.table import Table, TableIter
+  9
+ 10
+ 11class Context:
+ 12    """
+ 13    Execution context for sql expressions.
+ 14
+ 15    Context is used to hold relevant data tables which can then be queried on with eval.
+ 16
+ 17    References to columns can either be scalar or vectors. When set_row is used, column references
+ 18    evaluate to scalars while set_range evaluates to vectors. This allows convenient and efficient
+ 19    evaluation of aggregation functions.
+ 20    """
+ 21
+ 22    def __init__(self, tables: t.Dict[str, Table], env: t.Optional[t.Dict] = None) -> None:
+ 23        """
+ 24        Args
+ 25            tables: representing the scope of the current execution context.
+ 26            env: dictionary of functions within the execution context.
+ 27        """
+ 28        self.tables = tables
+ 29        self._table: t.Optional[Table] = None
+ 30        self.range_readers = {name: table.range_reader for name, table in self.tables.items()}
+ 31        self.row_readers = {name: table.reader for name, table in tables.items()}
+ 32        self.env = {**ENV, **(env or {}), "scope": self.row_readers}
+ 33
+ 34    def eval(self, code):
+ 35        return eval(code, self.env)
+ 36
+ 37    def eval_tuple(self, codes):
+ 38        return tuple(self.eval(code) for code in codes)
+ 39
+ 40    @property
+ 41    def table(self) -> Table:
+ 42        if self._table is None:
+ 43            self._table = list(self.tables.values())[0]
+ 44            for other in self.tables.values():
+ 45                if self._table.columns != other.columns:
+ 46                    raise Exception(f"Columns are different.")
+ 47                if len(self._table.rows) != len(other.rows):
+ 48                    raise Exception(f"Rows are different.")
+ 49        return self._table
+ 50
+ 51    def add_columns(self, *columns: str) -> None:
+ 52        for table in self.tables.values():
+ 53            table.add_columns(*columns)
+ 54
+ 55    @property
+ 56    def columns(self) -> t.Tuple:
+ 57        return self.table.columns
+ 58
+ 59    def __iter__(self):
+ 60        self.env["scope"] = self.row_readers
+ 61        for i in range(len(self.table.rows)):
+ 62            for table in self.tables.values():
+ 63                reader = table[i]
+ 64            yield reader, self
+ 65
+ 66    def table_iter(self, table: str) -> t.Iterator[t.Tuple[TableIter, Context]]:
+ 67        self.env["scope"] = self.row_readers
+ 68
+ 69        for reader in self.tables[table]:
+ 70            yield reader, self
+ 71
+ 72    def filter(self, condition) -> None:
+ 73        rows = [reader.row for reader, _ in self if self.eval(condition)]
+ 74
+ 75        for table in self.tables.values():
+ 76            table.rows = rows
+ 77
+ 78    def sort(self, key) -> None:
+ 79        def sort_key(row: t.Tuple) -> t.Tuple:
+ 80            self.set_row(row)
+ 81            return self.eval_tuple(key)
+ 82
+ 83        self.table.rows.sort(key=sort_key)
+ 84
+ 85    def set_row(self, row: t.Tuple) -> None:
+ 86        for table in self.tables.values():
+ 87            table.reader.row = row
+ 88        self.env["scope"] = self.row_readers
+ 89
+ 90    def set_index(self, index: int) -> None:
+ 91        for table in self.tables.values():
+ 92            table[index]
+ 93        self.env["scope"] = self.row_readers
+ 94
+ 95    def set_range(self, start: int, end: int) -> None:
+ 96        for name in self.tables:
+ 97            self.range_readers[name].range = range(start, end)
+ 98        self.env["scope"] = self.range_readers
+ 99
+100    def __contains__(self, table: str) -> bool:
+101        return table in self.tables
+
+ + +
+
+ +
+ + class + Context: + + + +
+ +
 12class Context:
+ 13    """
+ 14    Execution context for sql expressions.
+ 15
+ 16    Context is used to hold relevant data tables which can then be queried on with eval.
+ 17
+ 18    References to columns can either be scalar or vectors. When set_row is used, column references
+ 19    evaluate to scalars while set_range evaluates to vectors. This allows convenient and efficient
+ 20    evaluation of aggregation functions.
+ 21    """
+ 22
+ 23    def __init__(self, tables: t.Dict[str, Table], env: t.Optional[t.Dict] = None) -> None:
+ 24        """
+ 25        Args
+ 26            tables: representing the scope of the current execution context.
+ 27            env: dictionary of functions within the execution context.
+ 28        """
+ 29        self.tables = tables
+ 30        self._table: t.Optional[Table] = None
+ 31        self.range_readers = {name: table.range_reader for name, table in self.tables.items()}
+ 32        self.row_readers = {name: table.reader for name, table in tables.items()}
+ 33        self.env = {**ENV, **(env or {}), "scope": self.row_readers}
+ 34
+ 35    def eval(self, code):
+ 36        return eval(code, self.env)
+ 37
+ 38    def eval_tuple(self, codes):
+ 39        return tuple(self.eval(code) for code in codes)
+ 40
+ 41    @property
+ 42    def table(self) -> Table:
+ 43        if self._table is None:
+ 44            self._table = list(self.tables.values())[0]
+ 45            for other in self.tables.values():
+ 46                if self._table.columns != other.columns:
+ 47                    raise Exception(f"Columns are different.")
+ 48                if len(self._table.rows) != len(other.rows):
+ 49                    raise Exception(f"Rows are different.")
+ 50        return self._table
+ 51
+ 52    def add_columns(self, *columns: str) -> None:
+ 53        for table in self.tables.values():
+ 54            table.add_columns(*columns)
+ 55
+ 56    @property
+ 57    def columns(self) -> t.Tuple:
+ 58        return self.table.columns
+ 59
+ 60    def __iter__(self):
+ 61        self.env["scope"] = self.row_readers
+ 62        for i in range(len(self.table.rows)):
+ 63            for table in self.tables.values():
+ 64                reader = table[i]
+ 65            yield reader, self
+ 66
+ 67    def table_iter(self, table: str) -> t.Iterator[t.Tuple[TableIter, Context]]:
+ 68        self.env["scope"] = self.row_readers
+ 69
+ 70        for reader in self.tables[table]:
+ 71            yield reader, self
+ 72
+ 73    def filter(self, condition) -> None:
+ 74        rows = [reader.row for reader, _ in self if self.eval(condition)]
+ 75
+ 76        for table in self.tables.values():
+ 77            table.rows = rows
+ 78
+ 79    def sort(self, key) -> None:
+ 80        def sort_key(row: t.Tuple) -> t.Tuple:
+ 81            self.set_row(row)
+ 82            return self.eval_tuple(key)
+ 83
+ 84        self.table.rows.sort(key=sort_key)
+ 85
+ 86    def set_row(self, row: t.Tuple) -> None:
+ 87        for table in self.tables.values():
+ 88            table.reader.row = row
+ 89        self.env["scope"] = self.row_readers
+ 90
+ 91    def set_index(self, index: int) -> None:
+ 92        for table in self.tables.values():
+ 93            table[index]
+ 94        self.env["scope"] = self.row_readers
+ 95
+ 96    def set_range(self, start: int, end: int) -> None:
+ 97        for name in self.tables:
+ 98            self.range_readers[name].range = range(start, end)
+ 99        self.env["scope"] = self.range_readers
+100
+101    def __contains__(self, table: str) -> bool:
+102        return table in self.tables
+
+ + +

Execution context for sql expressions.

+ +

Context is used to hold relevant data tables which can then be queried on with eval.

+ +

References to columns can either be scalar or vectors. When set_row is used, column references +evaluate to scalars while set_range evaluates to vectors. This allows convenient and efficient +evaluation of aggregation functions.

+
+ + +
+ +
+ + Context( tables: Dict[str, sqlglot.executor.table.Table], env: Optional[Dict] = None) + + + +
+ +
23    def __init__(self, tables: t.Dict[str, Table], env: t.Optional[t.Dict] = None) -> None:
+24        """
+25        Args
+26            tables: representing the scope of the current execution context.
+27            env: dictionary of functions within the execution context.
+28        """
+29        self.tables = tables
+30        self._table: t.Optional[Table] = None
+31        self.range_readers = {name: table.range_reader for name, table in self.tables.items()}
+32        self.row_readers = {name: table.reader for name, table in tables.items()}
+33        self.env = {**ENV, **(env or {}), "scope": self.row_readers}
+
+ + +

Args + tables: representing the scope of the current execution context. + env: dictionary of functions within the execution context.

+
+ + +
+
+ +
+ + def + eval(self, code): + + + +
+ +
35    def eval(self, code):
+36        return eval(code, self.env)
+
+ + + + +
+
+ +
+ + def + eval_tuple(self, codes): + + + +
+ +
38    def eval_tuple(self, codes):
+39        return tuple(self.eval(code) for code in codes)
+
+ + + + +
+
+ +
+ + def + add_columns(self, *columns: str) -> None: + + + +
+ +
52    def add_columns(self, *columns: str) -> None:
+53        for table in self.tables.values():
+54            table.add_columns(*columns)
+
+ + + + +
+
+ +
+ + def + table_iter( self, table: str) -> Iterator[Tuple[sqlglot.executor.table.TableIter, sqlglot.executor.context.Context]]: + + + +
+ +
67    def table_iter(self, table: str) -> t.Iterator[t.Tuple[TableIter, Context]]:
+68        self.env["scope"] = self.row_readers
+69
+70        for reader in self.tables[table]:
+71            yield reader, self
+
+ + + + +
+
+ +
+ + def + filter(self, condition) -> None: + + + +
+ +
73    def filter(self, condition) -> None:
+74        rows = [reader.row for reader, _ in self if self.eval(condition)]
+75
+76        for table in self.tables.values():
+77            table.rows = rows
+
+ + + + +
+
+ +
+ + def + sort(self, key) -> None: + + + +
+ +
79    def sort(self, key) -> None:
+80        def sort_key(row: t.Tuple) -> t.Tuple:
+81            self.set_row(row)
+82            return self.eval_tuple(key)
+83
+84        self.table.rows.sort(key=sort_key)
+
+ + + + +
+
+ +
+ + def + set_row(self, row: Tuple) -> None: + + + +
+ +
86    def set_row(self, row: t.Tuple) -> None:
+87        for table in self.tables.values():
+88            table.reader.row = row
+89        self.env["scope"] = self.row_readers
+
+ + + + +
+
+ +
+ + def + set_index(self, index: int) -> None: + + + +
+ +
91    def set_index(self, index: int) -> None:
+92        for table in self.tables.values():
+93            table[index]
+94        self.env["scope"] = self.row_readers
+
+ + + + +
+
+ +
+ + def + set_range(self, start: int, end: int) -> None: + + + +
+ +
96    def set_range(self, start: int, end: int) -> None:
+97        for name in self.tables:
+98            self.range_readers[name].range = range(start, end)
+99        self.env["scope"] = self.range_readers
+
+ + + + +
+
+
+ + \ No newline at end of file diff --git a/docs/sqlglot/executor/env.html b/docs/sqlglot/executor/env.html new file mode 100644 index 0000000..f221871 --- /dev/null +++ b/docs/sqlglot/executor/env.html @@ -0,0 +1,717 @@ + + + + + + + sqlglot.executor.env API documentation + + + + + + + + + +
+
+ Edit on GitHub +

+sqlglot.executor.env

+ + + + + + +
  1import datetime
+  2import inspect
+  3import re
+  4import statistics
+  5from functools import wraps
+  6
+  7from sqlglot import exp
+  8from sqlglot.helper import PYTHON_VERSION
+  9
+ 10
+ 11class reverse_key:
+ 12    def __init__(self, obj):
+ 13        self.obj = obj
+ 14
+ 15    def __eq__(self, other):
+ 16        return other.obj == self.obj
+ 17
+ 18    def __lt__(self, other):
+ 19        return other.obj < self.obj
+ 20
+ 21
+ 22def filter_nulls(func, empty_null=True):
+ 23    @wraps(func)
+ 24    def _func(values):
+ 25        filtered = tuple(v for v in values if v is not None)
+ 26        if not filtered and empty_null:
+ 27            return None
+ 28        return func(filtered)
+ 29
+ 30    return _func
+ 31
+ 32
+ 33def null_if_any(*required):
+ 34    """
+ 35    Decorator that makes a function return `None` if any of the `required` arguments are `None`.
+ 36
+ 37    This also supports decoration with no arguments, e.g.:
+ 38
+ 39        @null_if_any
+ 40        def foo(a, b): ...
+ 41
+ 42    In which case all arguments are required.
+ 43    """
+ 44    f = None
+ 45    if len(required) == 1 and callable(required[0]):
+ 46        f = required[0]
+ 47        required = ()
+ 48
+ 49    def decorator(func):
+ 50        if required:
+ 51            required_indices = [
+ 52                i for i, param in enumerate(inspect.signature(func).parameters) if param in required
+ 53            ]
+ 54
+ 55            def predicate(*args):
+ 56                return any(args[i] is None for i in required_indices)
+ 57
+ 58        else:
+ 59
+ 60            def predicate(*args):
+ 61                return any(a is None for a in args)
+ 62
+ 63        @wraps(func)
+ 64        def _func(*args):
+ 65            if predicate(*args):
+ 66                return None
+ 67            return func(*args)
+ 68
+ 69        return _func
+ 70
+ 71    if f:
+ 72        return decorator(f)
+ 73
+ 74    return decorator
+ 75
+ 76
+ 77@null_if_any("substr", "this")
+ 78def str_position(substr, this, position=None):
+ 79    position = position - 1 if position is not None else position
+ 80    return this.find(substr, position) + 1
+ 81
+ 82
+ 83@null_if_any("this")
+ 84def substring(this, start=None, length=None):
+ 85    if start is None:
+ 86        return this
+ 87    elif start == 0:
+ 88        return ""
+ 89    elif start < 0:
+ 90        start = len(this) + start
+ 91    else:
+ 92        start -= 1
+ 93
+ 94    end = None if length is None else start + length
+ 95
+ 96    return this[start:end]
+ 97
+ 98
+ 99@null_if_any
+100def cast(this, to):
+101    if to == exp.DataType.Type.DATE:
+102        return datetime.date.fromisoformat(this)
+103    if to == exp.DataType.Type.DATETIME:
+104        return datetime.datetime.fromisoformat(this)
+105    if to in exp.DataType.TEXT_TYPES:
+106        return str(this)
+107    if to in {exp.DataType.Type.FLOAT, exp.DataType.Type.DOUBLE}:
+108        return float(this)
+109    if to in exp.DataType.NUMERIC_TYPES:
+110        return int(this)
+111    raise NotImplementedError(f"Casting to '{to}' not implemented.")
+112
+113
+114def ordered(this, desc, nulls_first):
+115    if desc:
+116        return reverse_key(this)
+117    return this
+118
+119
+120@null_if_any
+121def interval(this, unit):
+122    if unit == "DAY":
+123        return datetime.timedelta(days=float(this))
+124    raise NotImplementedError
+125
+126
+127ENV = {
+128    "exp": exp,
+129    # aggs
+130    "ARRAYAGG": list,
+131    "AVG": filter_nulls(statistics.fmean if PYTHON_VERSION >= (3, 8) else statistics.mean),  # type: ignore
+132    "COUNT": filter_nulls(lambda acc: sum(1 for _ in acc), False),
+133    "MAX": filter_nulls(max),
+134    "MIN": filter_nulls(min),
+135    "SUM": filter_nulls(sum),
+136    # scalar functions
+137    "ABS": null_if_any(lambda this: abs(this)),
+138    "ADD": null_if_any(lambda e, this: e + this),
+139    "ARRAYANY": null_if_any(lambda arr, func: any(func(e) for e in arr)),
+140    "BETWEEN": null_if_any(lambda this, low, high: low <= this and this <= high),
+141    "BITWISEAND": null_if_any(lambda this, e: this & e),
+142    "BITWISELEFTSHIFT": null_if_any(lambda this, e: this << e),
+143    "BITWISEOR": null_if_any(lambda this, e: this | e),
+144    "BITWISERIGHTSHIFT": null_if_any(lambda this, e: this >> e),
+145    "BITWISEXOR": null_if_any(lambda this, e: this ^ e),
+146    "CAST": cast,
+147    "COALESCE": lambda *args: next((a for a in args if a is not None), None),
+148    "CONCAT": null_if_any(lambda *args: "".join(args)),
+149    "CONCATWS": null_if_any(lambda this, *args: this.join(args)),
+150    "DIV": null_if_any(lambda e, this: e / this),
+151    "EQ": null_if_any(lambda this, e: this == e),
+152    "EXTRACT": null_if_any(lambda this, e: getattr(e, this)),
+153    "GT": null_if_any(lambda this, e: this > e),
+154    "GTE": null_if_any(lambda this, e: this >= e),
+155    "IFNULL": lambda e, alt: alt if e is None else e,
+156    "IF": lambda predicate, true, false: true if predicate else false,
+157    "INTDIV": null_if_any(lambda e, this: e // this),
+158    "INTERVAL": interval,
+159    "LIKE": null_if_any(
+160        lambda this, e: bool(re.match(e.replace("_", ".").replace("%", ".*"), this))
+161    ),
+162    "LOWER": null_if_any(lambda arg: arg.lower()),
+163    "LT": null_if_any(lambda this, e: this < e),
+164    "LTE": null_if_any(lambda this, e: this <= e),
+165    "MOD": null_if_any(lambda e, this: e % this),
+166    "MUL": null_if_any(lambda e, this: e * this),
+167    "NEQ": null_if_any(lambda this, e: this != e),
+168    "ORD": null_if_any(ord),
+169    "ORDERED": ordered,
+170    "POW": pow,
+171    "STRPOSITION": str_position,
+172    "SUB": null_if_any(lambda e, this: e - this),
+173    "SUBSTRING": substring,
+174    "TIMESTRTOTIME": null_if_any(lambda arg: datetime.datetime.fromisoformat(arg)),
+175    "UPPER": null_if_any(lambda arg: arg.upper()),
+176}
+
+ + +
+
+ +
+ + class + reverse_key: + + + +
+ +
12class reverse_key:
+13    def __init__(self, obj):
+14        self.obj = obj
+15
+16    def __eq__(self, other):
+17        return other.obj == self.obj
+18
+19    def __lt__(self, other):
+20        return other.obj < self.obj
+
+ + + + +
+ +
+ + reverse_key(obj) + + + +
+ +
13    def __init__(self, obj):
+14        self.obj = obj
+
+ + + + +
+
+
+ +
+ + def + filter_nulls(func, empty_null=True): + + + +
+ +
23def filter_nulls(func, empty_null=True):
+24    @wraps(func)
+25    def _func(values):
+26        filtered = tuple(v for v in values if v is not None)
+27        if not filtered and empty_null:
+28            return None
+29        return func(filtered)
+30
+31    return _func
+
+ + + + +
+
+ +
+ + def + null_if_any(*required): + + + +
+ +
34def null_if_any(*required):
+35    """
+36    Decorator that makes a function return `None` if any of the `required` arguments are `None`.
+37
+38    This also supports decoration with no arguments, e.g.:
+39
+40        @null_if_any
+41        def foo(a, b): ...
+42
+43    In which case all arguments are required.
+44    """
+45    f = None
+46    if len(required) == 1 and callable(required[0]):
+47        f = required[0]
+48        required = ()
+49
+50    def decorator(func):
+51        if required:
+52            required_indices = [
+53                i for i, param in enumerate(inspect.signature(func).parameters) if param in required
+54            ]
+55
+56            def predicate(*args):
+57                return any(args[i] is None for i in required_indices)
+58
+59        else:
+60
+61            def predicate(*args):
+62                return any(a is None for a in args)
+63
+64        @wraps(func)
+65        def _func(*args):
+66            if predicate(*args):
+67                return None
+68            return func(*args)
+69
+70        return _func
+71
+72    if f:
+73        return decorator(f)
+74
+75    return decorator
+
+ + +

Decorator that makes a function return None if any of the required arguments are None.

+ +

This also supports decoration with no arguments, e.g.:

+ +
@null_if_any
+def foo(a, b): ...
+
+ +

In which case all arguments are required.

+
+ + +
+
+ +
+
@null_if_any('substr', 'this')
+ + def + str_position(substr, this, position=None): + + + +
+ +
78@null_if_any("substr", "this")
+79def str_position(substr, this, position=None):
+80    position = position - 1 if position is not None else position
+81    return this.find(substr, position) + 1
+
+ + + + +
+
+ +
+
@null_if_any('this')
+ + def + substring(this, start=None, length=None): + + + +
+ +
84@null_if_any("this")
+85def substring(this, start=None, length=None):
+86    if start is None:
+87        return this
+88    elif start == 0:
+89        return ""
+90    elif start < 0:
+91        start = len(this) + start
+92    else:
+93        start -= 1
+94
+95    end = None if length is None else start + length
+96
+97    return this[start:end]
+
+ + + + +
+
+ +
+
@null_if_any
+ + def + cast(this, to): + + + +
+ +
100@null_if_any
+101def cast(this, to):
+102    if to == exp.DataType.Type.DATE:
+103        return datetime.date.fromisoformat(this)
+104    if to == exp.DataType.Type.DATETIME:
+105        return datetime.datetime.fromisoformat(this)
+106    if to in exp.DataType.TEXT_TYPES:
+107        return str(this)
+108    if to in {exp.DataType.Type.FLOAT, exp.DataType.Type.DOUBLE}:
+109        return float(this)
+110    if to in exp.DataType.NUMERIC_TYPES:
+111        return int(this)
+112    raise NotImplementedError(f"Casting to '{to}' not implemented.")
+
+ + + + +
+
+ +
+ + def + ordered(this, desc, nulls_first): + + + +
+ +
115def ordered(this, desc, nulls_first):
+116    if desc:
+117        return reverse_key(this)
+118    return this
+
+ + + + +
+
+ +
+
@null_if_any
+ + def + interval(this, unit): + + + +
+ +
121@null_if_any
+122def interval(this, unit):
+123    if unit == "DAY":
+124        return datetime.timedelta(days=float(this))
+125    raise NotImplementedError
+
+ + + + +
+
+ + \ No newline at end of file diff --git a/docs/sqlglot/executor/python.html b/docs/sqlglot/executor/python.html new file mode 100644 index 0000000..c035943 --- /dev/null +++ b/docs/sqlglot/executor/python.html @@ -0,0 +1,2130 @@ + + + + + + + sqlglot.executor.python API documentation + + + + + + + + + +
+
+ Edit on GitHub +

+sqlglot.executor.python

+ + + + + + +
  1import ast
+  2import collections
+  3import itertools
+  4import math
+  5
+  6from sqlglot import exp, generator, planner, tokens
+  7from sqlglot.dialects.dialect import Dialect, inline_array_sql
+  8from sqlglot.errors import ExecuteError
+  9from sqlglot.executor.context import Context
+ 10from sqlglot.executor.env import ENV
+ 11from sqlglot.executor.table import RowReader, Table
+ 12from sqlglot.helper import csv_reader, subclasses
+ 13
+ 14
+ 15class PythonExecutor:
+ 16    def __init__(self, env=None, tables=None):
+ 17        self.generator = Python().generator(identify=True, comments=False)
+ 18        self.env = {**ENV, **(env or {})}
+ 19        self.tables = tables or {}
+ 20
+ 21    def execute(self, plan):
+ 22        running = set()
+ 23        finished = set()
+ 24        queue = set(plan.leaves)
+ 25        contexts = {}
+ 26
+ 27        while queue:
+ 28            node = queue.pop()
+ 29            try:
+ 30                context = self.context(
+ 31                    {
+ 32                        name: table
+ 33                        for dep in node.dependencies
+ 34                        for name, table in contexts[dep].tables.items()
+ 35                    }
+ 36                )
+ 37                running.add(node)
+ 38
+ 39                if isinstance(node, planner.Scan):
+ 40                    contexts[node] = self.scan(node, context)
+ 41                elif isinstance(node, planner.Aggregate):
+ 42                    contexts[node] = self.aggregate(node, context)
+ 43                elif isinstance(node, planner.Join):
+ 44                    contexts[node] = self.join(node, context)
+ 45                elif isinstance(node, planner.Sort):
+ 46                    contexts[node] = self.sort(node, context)
+ 47                elif isinstance(node, planner.SetOperation):
+ 48                    contexts[node] = self.set_operation(node, context)
+ 49                else:
+ 50                    raise NotImplementedError
+ 51
+ 52                running.remove(node)
+ 53                finished.add(node)
+ 54
+ 55                for dep in node.dependents:
+ 56                    if dep not in running and all(d in contexts for d in dep.dependencies):
+ 57                        queue.add(dep)
+ 58
+ 59                for dep in node.dependencies:
+ 60                    if all(d in finished for d in dep.dependents):
+ 61                        contexts.pop(dep)
+ 62            except Exception as e:
+ 63                raise ExecuteError(f"Step '{node.id}' failed: {e}") from e
+ 64
+ 65        root = plan.root
+ 66        return contexts[root].tables[root.name]
+ 67
+ 68    def generate(self, expression):
+ 69        """Convert a SQL expression into literal Python code and compile it into bytecode."""
+ 70        if not expression:
+ 71            return None
+ 72
+ 73        sql = self.generator.generate(expression)
+ 74        return compile(sql, sql, "eval", optimize=2)
+ 75
+ 76    def generate_tuple(self, expressions):
+ 77        """Convert an array of SQL expressions into tuple of Python byte code."""
+ 78        if not expressions:
+ 79            return tuple()
+ 80        return tuple(self.generate(expression) for expression in expressions)
+ 81
+ 82    def context(self, tables):
+ 83        return Context(tables, env=self.env)
+ 84
+ 85    def table(self, expressions):
+ 86        return Table(
+ 87            expression.alias_or_name if isinstance(expression, exp.Expression) else expression
+ 88            for expression in expressions
+ 89        )
+ 90
+ 91    def scan(self, step, context):
+ 92        source = step.source
+ 93
+ 94        if source and isinstance(source, exp.Expression):
+ 95            source = source.name or source.alias
+ 96
+ 97        condition = self.generate(step.condition)
+ 98        projections = self.generate_tuple(step.projections)
+ 99
+100        if source is None:
+101            context, table_iter = self.static()
+102        elif source in context:
+103            if not projections and not condition:
+104                return self.context({step.name: context.tables[source]})
+105            table_iter = context.table_iter(source)
+106        elif isinstance(step.source, exp.Table) and isinstance(step.source.this, exp.ReadCSV):
+107            table_iter = self.scan_csv(step)
+108            context = next(table_iter)
+109        else:
+110            context, table_iter = self.scan_table(step)
+111
+112        if projections:
+113            sink = self.table(step.projections)
+114        else:
+115            sink = self.table(context.columns)
+116
+117        for reader in table_iter:
+118            if len(sink) >= step.limit:
+119                break
+120
+121            if condition and not context.eval(condition):
+122                continue
+123
+124            if projections:
+125                sink.append(context.eval_tuple(projections))
+126            else:
+127                sink.append(reader.row)
+128
+129        return self.context({step.name: sink})
+130
+131    def static(self):
+132        return self.context({}), [RowReader(())]
+133
+134    def scan_table(self, step):
+135        table = self.tables.find(step.source)
+136        context = self.context({step.source.alias_or_name: table})
+137        return context, iter(table)
+138
+139    def scan_csv(self, step):
+140        alias = step.source.alias
+141        source = step.source.this
+142
+143        with csv_reader(source) as reader:
+144            columns = next(reader)
+145            table = Table(columns)
+146            context = self.context({alias: table})
+147            yield context
+148            types = []
+149
+150            for row in reader:
+151                if not types:
+152                    for v in row:
+153                        try:
+154                            types.append(type(ast.literal_eval(v)))
+155                        except (ValueError, SyntaxError):
+156                            types.append(str)
+157                context.set_row(tuple(t(v) for t, v in zip(types, row)))
+158                yield context.table.reader
+159
+160    def join(self, step, context):
+161        source = step.name
+162
+163        source_table = context.tables[source]
+164        source_context = self.context({source: source_table})
+165        column_ranges = {source: range(0, len(source_table.columns))}
+166
+167        for name, join in step.joins.items():
+168            table = context.tables[name]
+169            start = max(r.stop for r in column_ranges.values())
+170            column_ranges[name] = range(start, len(table.columns) + start)
+171            join_context = self.context({name: table})
+172
+173            if join.get("source_key"):
+174                table = self.hash_join(join, source_context, join_context)
+175            else:
+176                table = self.nested_loop_join(join, source_context, join_context)
+177
+178            source_context = self.context(
+179                {
+180                    name: Table(table.columns, table.rows, column_range)
+181                    for name, column_range in column_ranges.items()
+182                }
+183            )
+184            condition = self.generate(join["condition"])
+185            if condition:
+186                source_context.filter(condition)
+187
+188        condition = self.generate(step.condition)
+189        projections = self.generate_tuple(step.projections)
+190
+191        if not condition and not projections:
+192            return source_context
+193
+194        sink = self.table(step.projections if projections else source_context.columns)
+195
+196        for reader, ctx in source_context:
+197            if condition and not ctx.eval(condition):
+198                continue
+199
+200            if projections:
+201                sink.append(ctx.eval_tuple(projections))
+202            else:
+203                sink.append(reader.row)
+204
+205            if len(sink) >= step.limit:
+206                break
+207
+208        if projections:
+209            return self.context({step.name: sink})
+210        else:
+211            return self.context(
+212                {
+213                    name: Table(table.columns, sink.rows, table.column_range)
+214                    for name, table in source_context.tables.items()
+215                }
+216            )
+217
+218    def nested_loop_join(self, _join, source_context, join_context):
+219        table = Table(source_context.columns + join_context.columns)
+220
+221        for reader_a, _ in source_context:
+222            for reader_b, _ in join_context:
+223                table.append(reader_a.row + reader_b.row)
+224
+225        return table
+226
+227    def hash_join(self, join, source_context, join_context):
+228        source_key = self.generate_tuple(join["source_key"])
+229        join_key = self.generate_tuple(join["join_key"])
+230        left = join.get("side") == "LEFT"
+231        right = join.get("side") == "RIGHT"
+232
+233        results = collections.defaultdict(lambda: ([], []))
+234
+235        for reader, ctx in source_context:
+236            results[ctx.eval_tuple(source_key)][0].append(reader.row)
+237        for reader, ctx in join_context:
+238            results[ctx.eval_tuple(join_key)][1].append(reader.row)
+239
+240        table = Table(source_context.columns + join_context.columns)
+241        nulls = [(None,) * len(join_context.columns if left else source_context.columns)]
+242
+243        for a_group, b_group in results.values():
+244            if left:
+245                b_group = b_group or nulls
+246            elif right:
+247                a_group = a_group or nulls
+248
+249            for a_row, b_row in itertools.product(a_group, b_group):
+250                table.append(a_row + b_row)
+251
+252        return table
+253
+254    def aggregate(self, step, context):
+255        group_by = self.generate_tuple(step.group.values())
+256        aggregations = self.generate_tuple(step.aggregations)
+257        operands = self.generate_tuple(step.operands)
+258
+259        if operands:
+260            operand_table = Table(self.table(step.operands).columns)
+261
+262            for reader, ctx in context:
+263                operand_table.append(ctx.eval_tuple(operands))
+264
+265            for i, (a, b) in enumerate(zip(context.table.rows, operand_table.rows)):
+266                context.table.rows[i] = a + b
+267
+268            width = len(context.columns)
+269            context.add_columns(*operand_table.columns)
+270
+271            operand_table = Table(
+272                context.columns,
+273                context.table.rows,
+274                range(width, width + len(operand_table.columns)),
+275            )
+276
+277            context = self.context(
+278                {
+279                    None: operand_table,
+280                    **context.tables,
+281                }
+282            )
+283
+284        context.sort(group_by)
+285
+286        group = None
+287        start = 0
+288        end = 1
+289        length = len(context.table)
+290        table = self.table(list(step.group) + step.aggregations)
+291        condition = self.generate(step.condition)
+292
+293        def add_row():
+294            if not condition or context.eval(condition):
+295                table.append(group + context.eval_tuple(aggregations))
+296
+297        if length:
+298            for i in range(length):
+299                context.set_index(i)
+300                key = context.eval_tuple(group_by)
+301                group = key if group is None else group
+302                end += 1
+303                if key != group:
+304                    context.set_range(start, end - 2)
+305                    add_row()
+306                    group = key
+307                    start = end - 2
+308                if len(table.rows) >= step.limit:
+309                    break
+310                if i == length - 1:
+311                    context.set_range(start, end - 1)
+312                    add_row()
+313        elif step.limit > 0 and not group_by:
+314            context.set_range(0, 0)
+315            table.append(context.eval_tuple(aggregations))
+316
+317        context = self.context({step.name: table, **{name: table for name in context.tables}})
+318
+319        if step.projections:
+320            return self.scan(step, context)
+321        return context
+322
+323    def sort(self, step, context):
+324        projections = self.generate_tuple(step.projections)
+325        projection_columns = [p.alias_or_name for p in step.projections]
+326        all_columns = list(context.columns) + projection_columns
+327        sink = self.table(all_columns)
+328        for reader, ctx in context:
+329            sink.append(reader.row + ctx.eval_tuple(projections))
+330
+331        sort_ctx = self.context(
+332            {
+333                None: sink,
+334                **{table: sink for table in context.tables},
+335            }
+336        )
+337        sort_ctx.sort(self.generate_tuple(step.key))
+338
+339        if not math.isinf(step.limit):
+340            sort_ctx.table.rows = sort_ctx.table.rows[0 : step.limit]
+341
+342        output = Table(
+343            projection_columns,
+344            rows=[r[len(context.columns) : len(all_columns)] for r in sort_ctx.table.rows],
+345        )
+346        return self.context({step.name: output})
+347
+348    def set_operation(self, step, context):
+349        left = context.tables[step.left]
+350        right = context.tables[step.right]
+351
+352        sink = self.table(left.columns)
+353
+354        if issubclass(step.op, exp.Intersect):
+355            sink.rows = list(set(left.rows).intersection(set(right.rows)))
+356        elif issubclass(step.op, exp.Except):
+357            sink.rows = list(set(left.rows).difference(set(right.rows)))
+358        elif issubclass(step.op, exp.Union) and step.distinct:
+359            sink.rows = list(set(left.rows).union(set(right.rows)))
+360        else:
+361            sink.rows = left.rows + right.rows
+362
+363        return self.context({step.name: sink})
+364
+365
+366def _ordered_py(self, expression):
+367    this = self.sql(expression, "this")
+368    desc = "True" if expression.args.get("desc") else "False"
+369    nulls_first = "True" if expression.args.get("nulls_first") else "False"
+370    return f"ORDERED({this}, {desc}, {nulls_first})"
+371
+372
+373def _rename(self, e):
+374    try:
+375        if "expressions" in e.args:
+376            this = self.sql(e, "this")
+377            this = f"{this}, " if this else ""
+378            return f"{e.key.upper()}({this}{self.expressions(e)})"
+379        return f"{e.key.upper()}({self.format_args(*e.args.values())})"
+380    except Exception as ex:
+381        raise Exception(f"Could not rename {repr(e)}") from ex
+382
+383
+384def _case_sql(self, expression):
+385    this = self.sql(expression, "this")
+386    chain = self.sql(expression, "default") or "None"
+387
+388    for e in reversed(expression.args["ifs"]):
+389        true = self.sql(e, "true")
+390        condition = self.sql(e, "this")
+391        condition = f"{this} = ({condition})" if this else condition
+392        chain = f"{true} if {condition} else ({chain})"
+393
+394    return chain
+395
+396
+397def _lambda_sql(self, e: exp.Lambda) -> str:
+398    names = {e.name.lower() for e in e.expressions}
+399
+400    e = e.transform(
+401        lambda n: exp.Var(this=n.name)
+402        if isinstance(n, exp.Identifier) and n.name.lower() in names
+403        else n
+404    )
+405
+406    return f"lambda {self.expressions(e, flat=True)}: {self.sql(e, 'this')}"
+407
+408
+409class Python(Dialect):
+410    class Tokenizer(tokens.Tokenizer):
+411        STRING_ESCAPES = ["\\"]
+412
+413    class Generator(generator.Generator):
+414        TRANSFORMS = {
+415            **{klass: _rename for klass in subclasses(exp.__name__, exp.Binary)},
+416            **{klass: _rename for klass in exp.ALL_FUNCTIONS},
+417            exp.Case: _case_sql,
+418            exp.Alias: lambda self, e: self.sql(e.this),
+419            exp.Array: inline_array_sql,
+420            exp.And: lambda self, e: self.binary(e, "and"),
+421            exp.Between: _rename,
+422            exp.Boolean: lambda self, e: "True" if e.this else "False",
+423            exp.Cast: lambda self, e: f"CAST({self.sql(e.this)}, exp.DataType.Type.{e.args['to']})",
+424            exp.Column: lambda self, e: f"scope[{self.sql(e, 'table') or None}][{self.sql(e.this)}]",
+425            exp.Distinct: lambda self, e: f"set({self.sql(e, 'this')})",
+426            exp.Extract: lambda self, e: f"EXTRACT('{e.name.lower()}', {self.sql(e, 'expression')})",
+427            exp.In: lambda self, e: f"{self.sql(e, 'this')} in ({self.expressions(e, flat=True)})",
+428            exp.Is: lambda self, e: self.binary(e, "is"),
+429            exp.Lambda: _lambda_sql,
+430            exp.Not: lambda self, e: f"not {self.sql(e.this)}",
+431            exp.Null: lambda *_: "None",
+432            exp.Or: lambda self, e: self.binary(e, "or"),
+433            exp.Ordered: _ordered_py,
+434            exp.Star: lambda *_: "1",
+435        }
+
+ + +
+
+ +
+ + class + PythonExecutor: + + + +
+ +
 16class PythonExecutor:
+ 17    def __init__(self, env=None, tables=None):
+ 18        self.generator = Python().generator(identify=True, comments=False)
+ 19        self.env = {**ENV, **(env or {})}
+ 20        self.tables = tables or {}
+ 21
+ 22    def execute(self, plan):
+ 23        running = set()
+ 24        finished = set()
+ 25        queue = set(plan.leaves)
+ 26        contexts = {}
+ 27
+ 28        while queue:
+ 29            node = queue.pop()
+ 30            try:
+ 31                context = self.context(
+ 32                    {
+ 33                        name: table
+ 34                        for dep in node.dependencies
+ 35                        for name, table in contexts[dep].tables.items()
+ 36                    }
+ 37                )
+ 38                running.add(node)
+ 39
+ 40                if isinstance(node, planner.Scan):
+ 41                    contexts[node] = self.scan(node, context)
+ 42                elif isinstance(node, planner.Aggregate):
+ 43                    contexts[node] = self.aggregate(node, context)
+ 44                elif isinstance(node, planner.Join):
+ 45                    contexts[node] = self.join(node, context)
+ 46                elif isinstance(node, planner.Sort):
+ 47                    contexts[node] = self.sort(node, context)
+ 48                elif isinstance(node, planner.SetOperation):
+ 49                    contexts[node] = self.set_operation(node, context)
+ 50                else:
+ 51                    raise NotImplementedError
+ 52
+ 53                running.remove(node)
+ 54                finished.add(node)
+ 55
+ 56                for dep in node.dependents:
+ 57                    if dep not in running and all(d in contexts for d in dep.dependencies):
+ 58                        queue.add(dep)
+ 59
+ 60                for dep in node.dependencies:
+ 61                    if all(d in finished for d in dep.dependents):
+ 62                        contexts.pop(dep)
+ 63            except Exception as e:
+ 64                raise ExecuteError(f"Step '{node.id}' failed: {e}") from e
+ 65
+ 66        root = plan.root
+ 67        return contexts[root].tables[root.name]
+ 68
+ 69    def generate(self, expression):
+ 70        """Convert a SQL expression into literal Python code and compile it into bytecode."""
+ 71        if not expression:
+ 72            return None
+ 73
+ 74        sql = self.generator.generate(expression)
+ 75        return compile(sql, sql, "eval", optimize=2)
+ 76
+ 77    def generate_tuple(self, expressions):
+ 78        """Convert an array of SQL expressions into tuple of Python byte code."""
+ 79        if not expressions:
+ 80            return tuple()
+ 81        return tuple(self.generate(expression) for expression in expressions)
+ 82
+ 83    def context(self, tables):
+ 84        return Context(tables, env=self.env)
+ 85
+ 86    def table(self, expressions):
+ 87        return Table(
+ 88            expression.alias_or_name if isinstance(expression, exp.Expression) else expression
+ 89            for expression in expressions
+ 90        )
+ 91
+ 92    def scan(self, step, context):
+ 93        source = step.source
+ 94
+ 95        if source and isinstance(source, exp.Expression):
+ 96            source = source.name or source.alias
+ 97
+ 98        condition = self.generate(step.condition)
+ 99        projections = self.generate_tuple(step.projections)
+100
+101        if source is None:
+102            context, table_iter = self.static()
+103        elif source in context:
+104            if not projections and not condition:
+105                return self.context({step.name: context.tables[source]})
+106            table_iter = context.table_iter(source)
+107        elif isinstance(step.source, exp.Table) and isinstance(step.source.this, exp.ReadCSV):
+108            table_iter = self.scan_csv(step)
+109            context = next(table_iter)
+110        else:
+111            context, table_iter = self.scan_table(step)
+112
+113        if projections:
+114            sink = self.table(step.projections)
+115        else:
+116            sink = self.table(context.columns)
+117
+118        for reader in table_iter:
+119            if len(sink) >= step.limit:
+120                break
+121
+122            if condition and not context.eval(condition):
+123                continue
+124
+125            if projections:
+126                sink.append(context.eval_tuple(projections))
+127            else:
+128                sink.append(reader.row)
+129
+130        return self.context({step.name: sink})
+131
+132    def static(self):
+133        return self.context({}), [RowReader(())]
+134
+135    def scan_table(self, step):
+136        table = self.tables.find(step.source)
+137        context = self.context({step.source.alias_or_name: table})
+138        return context, iter(table)
+139
+140    def scan_csv(self, step):
+141        alias = step.source.alias
+142        source = step.source.this
+143
+144        with csv_reader(source) as reader:
+145            columns = next(reader)
+146            table = Table(columns)
+147            context = self.context({alias: table})
+148            yield context
+149            types = []
+150
+151            for row in reader:
+152                if not types:
+153                    for v in row:
+154                        try:
+155                            types.append(type(ast.literal_eval(v)))
+156                        except (ValueError, SyntaxError):
+157                            types.append(str)
+158                context.set_row(tuple(t(v) for t, v in zip(types, row)))
+159                yield context.table.reader
+160
+161    def join(self, step, context):
+162        source = step.name
+163
+164        source_table = context.tables[source]
+165        source_context = self.context({source: source_table})
+166        column_ranges = {source: range(0, len(source_table.columns))}
+167
+168        for name, join in step.joins.items():
+169            table = context.tables[name]
+170            start = max(r.stop for r in column_ranges.values())
+171            column_ranges[name] = range(start, len(table.columns) + start)
+172            join_context = self.context({name: table})
+173
+174            if join.get("source_key"):
+175                table = self.hash_join(join, source_context, join_context)
+176            else:
+177                table = self.nested_loop_join(join, source_context, join_context)
+178
+179            source_context = self.context(
+180                {
+181                    name: Table(table.columns, table.rows, column_range)
+182                    for name, column_range in column_ranges.items()
+183                }
+184            )
+185            condition = self.generate(join["condition"])
+186            if condition:
+187                source_context.filter(condition)
+188
+189        condition = self.generate(step.condition)
+190        projections = self.generate_tuple(step.projections)
+191
+192        if not condition and not projections:
+193            return source_context
+194
+195        sink = self.table(step.projections if projections else source_context.columns)
+196
+197        for reader, ctx in source_context:
+198            if condition and not ctx.eval(condition):
+199                continue
+200
+201            if projections:
+202                sink.append(ctx.eval_tuple(projections))
+203            else:
+204                sink.append(reader.row)
+205
+206            if len(sink) >= step.limit:
+207                break
+208
+209        if projections:
+210            return self.context({step.name: sink})
+211        else:
+212            return self.context(
+213                {
+214                    name: Table(table.columns, sink.rows, table.column_range)
+215                    for name, table in source_context.tables.items()
+216                }
+217            )
+218
+219    def nested_loop_join(self, _join, source_context, join_context):
+220        table = Table(source_context.columns + join_context.columns)
+221
+222        for reader_a, _ in source_context:
+223            for reader_b, _ in join_context:
+224                table.append(reader_a.row + reader_b.row)
+225
+226        return table
+227
+228    def hash_join(self, join, source_context, join_context):
+229        source_key = self.generate_tuple(join["source_key"])
+230        join_key = self.generate_tuple(join["join_key"])
+231        left = join.get("side") == "LEFT"
+232        right = join.get("side") == "RIGHT"
+233
+234        results = collections.defaultdict(lambda: ([], []))
+235
+236        for reader, ctx in source_context:
+237            results[ctx.eval_tuple(source_key)][0].append(reader.row)
+238        for reader, ctx in join_context:
+239            results[ctx.eval_tuple(join_key)][1].append(reader.row)
+240
+241        table = Table(source_context.columns + join_context.columns)
+242        nulls = [(None,) * len(join_context.columns if left else source_context.columns)]
+243
+244        for a_group, b_group in results.values():
+245            if left:
+246                b_group = b_group or nulls
+247            elif right:
+248                a_group = a_group or nulls
+249
+250            for a_row, b_row in itertools.product(a_group, b_group):
+251                table.append(a_row + b_row)
+252
+253        return table
+254
+255    def aggregate(self, step, context):
+256        group_by = self.generate_tuple(step.group.values())
+257        aggregations = self.generate_tuple(step.aggregations)
+258        operands = self.generate_tuple(step.operands)
+259
+260        if operands:
+261            operand_table = Table(self.table(step.operands).columns)
+262
+263            for reader, ctx in context:
+264                operand_table.append(ctx.eval_tuple(operands))
+265
+266            for i, (a, b) in enumerate(zip(context.table.rows, operand_table.rows)):
+267                context.table.rows[i] = a + b
+268
+269            width = len(context.columns)
+270            context.add_columns(*operand_table.columns)
+271
+272            operand_table = Table(
+273                context.columns,
+274                context.table.rows,
+275                range(width, width + len(operand_table.columns)),
+276            )
+277
+278            context = self.context(
+279                {
+280                    None: operand_table,
+281                    **context.tables,
+282                }
+283            )
+284
+285        context.sort(group_by)
+286
+287        group = None
+288        start = 0
+289        end = 1
+290        length = len(context.table)
+291        table = self.table(list(step.group) + step.aggregations)
+292        condition = self.generate(step.condition)
+293
+294        def add_row():
+295            if not condition or context.eval(condition):
+296                table.append(group + context.eval_tuple(aggregations))
+297
+298        if length:
+299            for i in range(length):
+300                context.set_index(i)
+301                key = context.eval_tuple(group_by)
+302                group = key if group is None else group
+303                end += 1
+304                if key != group:
+305                    context.set_range(start, end - 2)
+306                    add_row()
+307                    group = key
+308                    start = end - 2
+309                if len(table.rows) >= step.limit:
+310                    break
+311                if i == length - 1:
+312                    context.set_range(start, end - 1)
+313                    add_row()
+314        elif step.limit > 0 and not group_by:
+315            context.set_range(0, 0)
+316            table.append(context.eval_tuple(aggregations))
+317
+318        context = self.context({step.name: table, **{name: table for name in context.tables}})
+319
+320        if step.projections:
+321            return self.scan(step, context)
+322        return context
+323
+324    def sort(self, step, context):
+325        projections = self.generate_tuple(step.projections)
+326        projection_columns = [p.alias_or_name for p in step.projections]
+327        all_columns = list(context.columns) + projection_columns
+328        sink = self.table(all_columns)
+329        for reader, ctx in context:
+330            sink.append(reader.row + ctx.eval_tuple(projections))
+331
+332        sort_ctx = self.context(
+333            {
+334                None: sink,
+335                **{table: sink for table in context.tables},
+336            }
+337        )
+338        sort_ctx.sort(self.generate_tuple(step.key))
+339
+340        if not math.isinf(step.limit):
+341            sort_ctx.table.rows = sort_ctx.table.rows[0 : step.limit]
+342
+343        output = Table(
+344            projection_columns,
+345            rows=[r[len(context.columns) : len(all_columns)] for r in sort_ctx.table.rows],
+346        )
+347        return self.context({step.name: output})
+348
+349    def set_operation(self, step, context):
+350        left = context.tables[step.left]
+351        right = context.tables[step.right]
+352
+353        sink = self.table(left.columns)
+354
+355        if issubclass(step.op, exp.Intersect):
+356            sink.rows = list(set(left.rows).intersection(set(right.rows)))
+357        elif issubclass(step.op, exp.Except):
+358            sink.rows = list(set(left.rows).difference(set(right.rows)))
+359        elif issubclass(step.op, exp.Union) and step.distinct:
+360            sink.rows = list(set(left.rows).union(set(right.rows)))
+361        else:
+362            sink.rows = left.rows + right.rows
+363
+364        return self.context({step.name: sink})
+
+ + + + +
+ +
+ + PythonExecutor(env=None, tables=None) + + + +
+ +
17    def __init__(self, env=None, tables=None):
+18        self.generator = Python().generator(identify=True, comments=False)
+19        self.env = {**ENV, **(env or {})}
+20        self.tables = tables or {}
+
+ + + + +
+
+ +
+ + def + execute(self, plan): + + + +
+ +
22    def execute(self, plan):
+23        running = set()
+24        finished = set()
+25        queue = set(plan.leaves)
+26        contexts = {}
+27
+28        while queue:
+29            node = queue.pop()
+30            try:
+31                context = self.context(
+32                    {
+33                        name: table
+34                        for dep in node.dependencies
+35                        for name, table in contexts[dep].tables.items()
+36                    }
+37                )
+38                running.add(node)
+39
+40                if isinstance(node, planner.Scan):
+41                    contexts[node] = self.scan(node, context)
+42                elif isinstance(node, planner.Aggregate):
+43                    contexts[node] = self.aggregate(node, context)
+44                elif isinstance(node, planner.Join):
+45                    contexts[node] = self.join(node, context)
+46                elif isinstance(node, planner.Sort):
+47                    contexts[node] = self.sort(node, context)
+48                elif isinstance(node, planner.SetOperation):
+49                    contexts[node] = self.set_operation(node, context)
+50                else:
+51                    raise NotImplementedError
+52
+53                running.remove(node)
+54                finished.add(node)
+55
+56                for dep in node.dependents:
+57                    if dep not in running and all(d in contexts for d in dep.dependencies):
+58                        queue.add(dep)
+59
+60                for dep in node.dependencies:
+61                    if all(d in finished for d in dep.dependents):
+62                        contexts.pop(dep)
+63            except Exception as e:
+64                raise ExecuteError(f"Step '{node.id}' failed: {e}") from e
+65
+66        root = plan.root
+67        return contexts[root].tables[root.name]
+
+ + + + +
+
+ +
+ + def + generate(self, expression): + + + +
+ +
69    def generate(self, expression):
+70        """Convert a SQL expression into literal Python code and compile it into bytecode."""
+71        if not expression:
+72            return None
+73
+74        sql = self.generator.generate(expression)
+75        return compile(sql, sql, "eval", optimize=2)
+
+ + +

Convert a SQL expression into literal Python code and compile it into bytecode.

+
+ + +
+
+ +
+ + def + generate_tuple(self, expressions): + + + +
+ +
77    def generate_tuple(self, expressions):
+78        """Convert an array of SQL expressions into tuple of Python byte code."""
+79        if not expressions:
+80            return tuple()
+81        return tuple(self.generate(expression) for expression in expressions)
+
+ + +

Convert an array of SQL expressions into tuple of Python byte code.

+
+ + +
+
+ +
+ + def + context(self, tables): + + + +
+ +
83    def context(self, tables):
+84        return Context(tables, env=self.env)
+
+ + + + +
+
+ +
+ + def + table(self, expressions): + + + +
+ +
86    def table(self, expressions):
+87        return Table(
+88            expression.alias_or_name if isinstance(expression, exp.Expression) else expression
+89            for expression in expressions
+90        )
+
+ + + + +
+
+ +
+ + def + scan(self, step, context): + + + +
+ +
 92    def scan(self, step, context):
+ 93        source = step.source
+ 94
+ 95        if source and isinstance(source, exp.Expression):
+ 96            source = source.name or source.alias
+ 97
+ 98        condition = self.generate(step.condition)
+ 99        projections = self.generate_tuple(step.projections)
+100
+101        if source is None:
+102            context, table_iter = self.static()
+103        elif source in context:
+104            if not projections and not condition:
+105                return self.context({step.name: context.tables[source]})
+106            table_iter = context.table_iter(source)
+107        elif isinstance(step.source, exp.Table) and isinstance(step.source.this, exp.ReadCSV):
+108            table_iter = self.scan_csv(step)
+109            context = next(table_iter)
+110        else:
+111            context, table_iter = self.scan_table(step)
+112
+113        if projections:
+114            sink = self.table(step.projections)
+115        else:
+116            sink = self.table(context.columns)
+117
+118        for reader in table_iter:
+119            if len(sink) >= step.limit:
+120                break
+121
+122            if condition and not context.eval(condition):
+123                continue
+124
+125            if projections:
+126                sink.append(context.eval_tuple(projections))
+127            else:
+128                sink.append(reader.row)
+129
+130        return self.context({step.name: sink})
+
+ + + + +
+
+ +
+ + def + static(self): + + + +
+ +
132    def static(self):
+133        return self.context({}), [RowReader(())]
+
+ + + + +
+
+ +
+ + def + scan_table(self, step): + + + +
+ +
135    def scan_table(self, step):
+136        table = self.tables.find(step.source)
+137        context = self.context({step.source.alias_or_name: table})
+138        return context, iter(table)
+
+ + + + +
+
+ +
+ + def + scan_csv(self, step): + + + +
+ +
140    def scan_csv(self, step):
+141        alias = step.source.alias
+142        source = step.source.this
+143
+144        with csv_reader(source) as reader:
+145            columns = next(reader)
+146            table = Table(columns)
+147            context = self.context({alias: table})
+148            yield context
+149            types = []
+150
+151            for row in reader:
+152                if not types:
+153                    for v in row:
+154                        try:
+155                            types.append(type(ast.literal_eval(v)))
+156                        except (ValueError, SyntaxError):
+157                            types.append(str)
+158                context.set_row(tuple(t(v) for t, v in zip(types, row)))
+159                yield context.table.reader
+
+ + + + +
+
+ +
+ + def + join(self, step, context): + + + +
+ +
161    def join(self, step, context):
+162        source = step.name
+163
+164        source_table = context.tables[source]
+165        source_context = self.context({source: source_table})
+166        column_ranges = {source: range(0, len(source_table.columns))}
+167
+168        for name, join in step.joins.items():
+169            table = context.tables[name]
+170            start = max(r.stop for r in column_ranges.values())
+171            column_ranges[name] = range(start, len(table.columns) + start)
+172            join_context = self.context({name: table})
+173
+174            if join.get("source_key"):
+175                table = self.hash_join(join, source_context, join_context)
+176            else:
+177                table = self.nested_loop_join(join, source_context, join_context)
+178
+179            source_context = self.context(
+180                {
+181                    name: Table(table.columns, table.rows, column_range)
+182                    for name, column_range in column_ranges.items()
+183                }
+184            )
+185            condition = self.generate(join["condition"])
+186            if condition:
+187                source_context.filter(condition)
+188
+189        condition = self.generate(step.condition)
+190        projections = self.generate_tuple(step.projections)
+191
+192        if not condition and not projections:
+193            return source_context
+194
+195        sink = self.table(step.projections if projections else source_context.columns)
+196
+197        for reader, ctx in source_context:
+198            if condition and not ctx.eval(condition):
+199                continue
+200
+201            if projections:
+202                sink.append(ctx.eval_tuple(projections))
+203            else:
+204                sink.append(reader.row)
+205
+206            if len(sink) >= step.limit:
+207                break
+208
+209        if projections:
+210            return self.context({step.name: sink})
+211        else:
+212            return self.context(
+213                {
+214                    name: Table(table.columns, sink.rows, table.column_range)
+215                    for name, table in source_context.tables.items()
+216                }
+217            )
+
+ + + + +
+
+ +
+ + def + nested_loop_join(self, _join, source_context, join_context): + + + +
+ +
219    def nested_loop_join(self, _join, source_context, join_context):
+220        table = Table(source_context.columns + join_context.columns)
+221
+222        for reader_a, _ in source_context:
+223            for reader_b, _ in join_context:
+224                table.append(reader_a.row + reader_b.row)
+225
+226        return table
+
+ + + + +
+
+ +
+ + def + hash_join(self, join, source_context, join_context): + + + +
+ +
228    def hash_join(self, join, source_context, join_context):
+229        source_key = self.generate_tuple(join["source_key"])
+230        join_key = self.generate_tuple(join["join_key"])
+231        left = join.get("side") == "LEFT"
+232        right = join.get("side") == "RIGHT"
+233
+234        results = collections.defaultdict(lambda: ([], []))
+235
+236        for reader, ctx in source_context:
+237            results[ctx.eval_tuple(source_key)][0].append(reader.row)
+238        for reader, ctx in join_context:
+239            results[ctx.eval_tuple(join_key)][1].append(reader.row)
+240
+241        table = Table(source_context.columns + join_context.columns)
+242        nulls = [(None,) * len(join_context.columns if left else source_context.columns)]
+243
+244        for a_group, b_group in results.values():
+245            if left:
+246                b_group = b_group or nulls
+247            elif right:
+248                a_group = a_group or nulls
+249
+250            for a_row, b_row in itertools.product(a_group, b_group):
+251                table.append(a_row + b_row)
+252
+253        return table
+
+ + + + +
+
+ +
+ + def + aggregate(self, step, context): + + + +
+ +
255    def aggregate(self, step, context):
+256        group_by = self.generate_tuple(step.group.values())
+257        aggregations = self.generate_tuple(step.aggregations)
+258        operands = self.generate_tuple(step.operands)
+259
+260        if operands:
+261            operand_table = Table(self.table(step.operands).columns)
+262
+263            for reader, ctx in context:
+264                operand_table.append(ctx.eval_tuple(operands))
+265
+266            for i, (a, b) in enumerate(zip(context.table.rows, operand_table.rows)):
+267                context.table.rows[i] = a + b
+268
+269            width = len(context.columns)
+270            context.add_columns(*operand_table.columns)
+271
+272            operand_table = Table(
+273                context.columns,
+274                context.table.rows,
+275                range(width, width + len(operand_table.columns)),
+276            )
+277
+278            context = self.context(
+279                {
+280                    None: operand_table,
+281                    **context.tables,
+282                }
+283            )
+284
+285        context.sort(group_by)
+286
+287        group = None
+288        start = 0
+289        end = 1
+290        length = len(context.table)
+291        table = self.table(list(step.group) + step.aggregations)
+292        condition = self.generate(step.condition)
+293
+294        def add_row():
+295            if not condition or context.eval(condition):
+296                table.append(group + context.eval_tuple(aggregations))
+297
+298        if length:
+299            for i in range(length):
+300                context.set_index(i)
+301                key = context.eval_tuple(group_by)
+302                group = key if group is None else group
+303                end += 1
+304                if key != group:
+305                    context.set_range(start, end - 2)
+306                    add_row()
+307                    group = key
+308                    start = end - 2
+309                if len(table.rows) >= step.limit:
+310                    break
+311                if i == length - 1:
+312                    context.set_range(start, end - 1)
+313                    add_row()
+314        elif step.limit > 0 and not group_by:
+315            context.set_range(0, 0)
+316            table.append(context.eval_tuple(aggregations))
+317
+318        context = self.context({step.name: table, **{name: table for name in context.tables}})
+319
+320        if step.projections:
+321            return self.scan(step, context)
+322        return context
+
+ + + + +
+
+ +
+ + def + sort(self, step, context): + + + +
+ +
324    def sort(self, step, context):
+325        projections = self.generate_tuple(step.projections)
+326        projection_columns = [p.alias_or_name for p in step.projections]
+327        all_columns = list(context.columns) + projection_columns
+328        sink = self.table(all_columns)
+329        for reader, ctx in context:
+330            sink.append(reader.row + ctx.eval_tuple(projections))
+331
+332        sort_ctx = self.context(
+333            {
+334                None: sink,
+335                **{table: sink for table in context.tables},
+336            }
+337        )
+338        sort_ctx.sort(self.generate_tuple(step.key))
+339
+340        if not math.isinf(step.limit):
+341            sort_ctx.table.rows = sort_ctx.table.rows[0 : step.limit]
+342
+343        output = Table(
+344            projection_columns,
+345            rows=[r[len(context.columns) : len(all_columns)] for r in sort_ctx.table.rows],
+346        )
+347        return self.context({step.name: output})
+
+ + + + +
+
+ +
+ + def + set_operation(self, step, context): + + + +
+ +
349    def set_operation(self, step, context):
+350        left = context.tables[step.left]
+351        right = context.tables[step.right]
+352
+353        sink = self.table(left.columns)
+354
+355        if issubclass(step.op, exp.Intersect):
+356            sink.rows = list(set(left.rows).intersection(set(right.rows)))
+357        elif issubclass(step.op, exp.Except):
+358            sink.rows = list(set(left.rows).difference(set(right.rows)))
+359        elif issubclass(step.op, exp.Union) and step.distinct:
+360            sink.rows = list(set(left.rows).union(set(right.rows)))
+361        else:
+362            sink.rows = left.rows + right.rows
+363
+364        return self.context({step.name: sink})
+
+ + + + +
+
+
+ +
+ + class + Python(sqlglot.dialects.dialect.Dialect): + + + +
+ +
410class Python(Dialect):
+411    class Tokenizer(tokens.Tokenizer):
+412        STRING_ESCAPES = ["\\"]
+413
+414    class Generator(generator.Generator):
+415        TRANSFORMS = {
+416            **{klass: _rename for klass in subclasses(exp.__name__, exp.Binary)},
+417            **{klass: _rename for klass in exp.ALL_FUNCTIONS},
+418            exp.Case: _case_sql,
+419            exp.Alias: lambda self, e: self.sql(e.this),
+420            exp.Array: inline_array_sql,
+421            exp.And: lambda self, e: self.binary(e, "and"),
+422            exp.Between: _rename,
+423            exp.Boolean: lambda self, e: "True" if e.this else "False",
+424            exp.Cast: lambda self, e: f"CAST({self.sql(e.this)}, exp.DataType.Type.{e.args['to']})",
+425            exp.Column: lambda self, e: f"scope[{self.sql(e, 'table') or None}][{self.sql(e.this)}]",
+426            exp.Distinct: lambda self, e: f"set({self.sql(e, 'this')})",
+427            exp.Extract: lambda self, e: f"EXTRACT('{e.name.lower()}', {self.sql(e, 'expression')})",
+428            exp.In: lambda self, e: f"{self.sql(e, 'this')} in ({self.expressions(e, flat=True)})",
+429            exp.Is: lambda self, e: self.binary(e, "is"),
+430            exp.Lambda: _lambda_sql,
+431            exp.Not: lambda self, e: f"not {self.sql(e.this)}",
+432            exp.Null: lambda *_: "None",
+433            exp.Or: lambda self, e: self.binary(e, "or"),
+434            exp.Ordered: _ordered_py,
+435            exp.Star: lambda *_: "1",
+436        }
+
+ + + + +
+
+ + Python() + + +
+ + + + +
+ +
+
+ +
+ + class + Python.Tokenizer(sqlglot.tokens.Tokenizer): + + + +
+ +
411    class Tokenizer(tokens.Tokenizer):
+412        STRING_ESCAPES = ["\\"]
+
+ + + + +
+
Inherited Members
+
+ +
+
+
+
+ +
+ + class + Python.Generator(sqlglot.generator.Generator): + + + +
+ +
414    class Generator(generator.Generator):
+415        TRANSFORMS = {
+416            **{klass: _rename for klass in subclasses(exp.__name__, exp.Binary)},
+417            **{klass: _rename for klass in exp.ALL_FUNCTIONS},
+418            exp.Case: _case_sql,
+419            exp.Alias: lambda self, e: self.sql(e.this),
+420            exp.Array: inline_array_sql,
+421            exp.And: lambda self, e: self.binary(e, "and"),
+422            exp.Between: _rename,
+423            exp.Boolean: lambda self, e: "True" if e.this else "False",
+424            exp.Cast: lambda self, e: f"CAST({self.sql(e.this)}, exp.DataType.Type.{e.args['to']})",
+425            exp.Column: lambda self, e: f"scope[{self.sql(e, 'table') or None}][{self.sql(e.this)}]",
+426            exp.Distinct: lambda self, e: f"set({self.sql(e, 'this')})",
+427            exp.Extract: lambda self, e: f"EXTRACT('{e.name.lower()}', {self.sql(e, 'expression')})",
+428            exp.In: lambda self, e: f"{self.sql(e, 'this')} in ({self.expressions(e, flat=True)})",
+429            exp.Is: lambda self, e: self.binary(e, "is"),
+430            exp.Lambda: _lambda_sql,
+431            exp.Not: lambda self, e: f"not {self.sql(e.this)}",
+432            exp.Null: lambda *_: "None",
+433            exp.Or: lambda self, e: self.binary(e, "or"),
+434            exp.Ordered: _ordered_py,
+435            exp.Star: lambda *_: "1",
+436        }
+
+ + +

Generator interprets the given syntax tree and produces a SQL string as an output.

+ +
Arguments:
+ +
    +
  • time_mapping (dict): the dictionary of custom time mappings in which the key +represents a python time format and the output the target time format
  • +
  • time_trie (trie): a trie of the time_mapping keys
  • +
  • pretty (bool): if set to True the returned string will be formatted. Default: False.
  • +
  • quote_start (str): specifies which starting character to use to delimit quotes. Default: '.
  • +
  • quote_end (str): specifies which ending character to use to delimit quotes. Default: '.
  • +
  • identifier_start (str): specifies which starting character to use to delimit identifiers. Default: ".
  • +
  • identifier_end (str): specifies which ending character to use to delimit identifiers. Default: ".
  • +
  • identify (bool): if set to True all identifiers will be delimited by the corresponding +character.
  • +
  • normalize (bool): if set to True all identifiers will lower cased
  • +
  • string_escape (str): specifies a string escape character. Default: '.
  • +
  • identifier_escape (str): specifies an identifier escape character. Default: ".
  • +
  • pad (int): determines padding in a formatted string. Default: 2.
  • +
  • indent (int): determines the size of indentation in a formatted string. Default: 4.
  • +
  • unnest_column_only (bool): if true unnest table aliases are considered only as column aliases
  • +
  • normalize_functions (str): normalize function names, "upper", "lower", or None +Default: "upper"
  • +
  • alias_post_tablesample (bool): if the table alias comes after tablesample +Default: False
  • +
  • unsupported_level (ErrorLevel): determines the generator's behavior when it encounters +unsupported expressions. Default ErrorLevel.WARN.
  • +
  • null_ordering (str): Indicates the default null ordering method to use if not explicitly set. +Options are "nulls_are_small", "nulls_are_large", "nulls_are_last". +Default: "nulls_are_small"
  • +
  • max_unsupported (int): Maximum number of unsupported messages to include in a raised UnsupportedError. +This is only relevant if unsupported_level is ErrorLevel.RAISE. +Default: 3
  • +
  • leading_comma (bool): if the the comma is leading or trailing in select statements +Default: False
  • +
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode. +The default is on the smaller end because the length only represents a segment and not the true +line length. +Default: 80
  • +
  • comments: Whether or not to preserve comments in the output SQL code. +Default: True
  • +
+
+ + +
+
Inherited Members
+
+
sqlglot.generator.Generator
+
Generator
+
generate
+
unsupported
+
sep
+
seg
+
pad_comment
+
maybe_comment
+
wrap
+
no_identify
+
normalize_func
+
indent
+
sql
+
uncache_sql
+
cache_sql
+
characterset_sql
+
column_sql
+
columndef_sql
+
columnconstraint_sql
+
autoincrementcolumnconstraint_sql
+
checkcolumnconstraint_sql
+
commentcolumnconstraint_sql
+
collatecolumnconstraint_sql
+
encodecolumnconstraint_sql
+
defaultcolumnconstraint_sql
+
generatedasidentitycolumnconstraint_sql
+
notnullcolumnconstraint_sql
+
primarykeycolumnconstraint_sql
+
uniquecolumnconstraint_sql
+
create_sql
+
describe_sql
+
prepend_ctes
+
with_sql
+
cte_sql
+
tablealias_sql
+
bitstring_sql
+
hexstring_sql
+
datatype_sql
+
directory_sql
+
delete_sql
+
drop_sql
+
except_sql
+
except_op
+
fetch_sql
+
filter_sql
+
hint_sql
+
index_sql
+
identifier_sql
+
national_sql
+
partition_sql
+
properties_sql
+
root_properties
+
properties
+
with_properties
+
locate_properties
+
property_sql
+
likeproperty_sql
+
fallbackproperty_sql
+
journalproperty_sql
+
freespaceproperty_sql
+
afterjournalproperty_sql
+
checksumproperty_sql
+
mergeblockratioproperty_sql
+
datablocksizeproperty_sql
+
blockcompressionproperty_sql
+
isolatedloadingproperty_sql
+
insert_sql
+
intersect_sql
+
intersect_op
+
introducer_sql
+
pseudotype_sql
+
rowformatdelimitedproperty_sql
+
table_sql
+
tablesample_sql
+
pivot_sql
+
tuple_sql
+
update_sql
+
values_sql
+
var_sql
+
into_sql
+
from_sql
+
group_sql
+
having_sql
+
join_sql
+
lambda_sql
+
lateral_sql
+
limit_sql
+
offset_sql
+
lock_sql
+
literal_sql
+
loaddata_sql
+
null_sql
+
boolean_sql
+
order_sql
+
cluster_sql
+
distribute_sql
+
sort_sql
+
ordered_sql
+
matchrecognize_sql
+
query_modifiers
+
select_sql
+
schema_sql
+
star_sql
+
structkwarg_sql
+
parameter_sql
+
sessionparameter_sql
+
placeholder_sql
+
subquery_sql
+
qualify_sql
+
union_sql
+
union_op
+
unnest_sql
+
where_sql
+
window_sql
+
partition_by_sql
+
window_spec_sql
+
withingroup_sql
+
between_sql
+
bracket_sql
+
all_sql
+
any_sql
+
exists_sql
+
case_sql
+
constraint_sql
+
extract_sql
+
trim_sql
+
concat_sql
+
check_sql
+
foreignkey_sql
+
primarykey_sql
+
unique_sql
+
if_sql
+
in_sql
+
in_unnest_op
+
interval_sql
+
return_sql
+
reference_sql
+
anonymous_sql
+
paren_sql
+
neg_sql
+
not_sql
+
alias_sql
+
aliases_sql
+
attimezone_sql
+
add_sql
+
and_sql
+
connector_sql
+
bitwiseand_sql
+
bitwiseleftshift_sql
+
bitwisenot_sql
+
bitwiseor_sql
+
bitwiserightshift_sql
+
bitwisexor_sql
+
cast_sql
+
currentdate_sql
+
collate_sql
+
command_sql
+
transaction_sql
+
commit_sql
+
rollback_sql
+
altercolumn_sql
+
renametable_sql
+
altertable_sql
+
droppartition_sql
+
addconstraint_sql
+
distinct_sql
+
ignorenulls_sql
+
respectnulls_sql
+
intdiv_sql
+
dpipe_sql
+
div_sql
+
distance_sql
+
dot_sql
+
eq_sql
+
escape_sql
+
glob_sql
+
gt_sql
+
gte_sql
+
ilike_sql
+
is_sql
+
like_sql
+
similarto_sql
+
lt_sql
+
lte_sql
+
mod_sql
+
mul_sql
+
neq_sql
+
nullsafeeq_sql
+
nullsafeneq_sql
+
or_sql
+
slice_sql
+
sub_sql
+
trycast_sql
+
use_sql
+
binary
+
function_fallback_sql
+
format_args
+
text_width
+
format_time
+
expressions
+
op_expressions
+
naked_property
+
set_operation
+
tag_sql
+
token_sql
+
userdefinedfunction_sql
+
userdefinedfunctionkwarg_sql
+
joinhint_sql
+
kwarg_sql
+
when_sql
+
merge_sql
+ +
+
+
+
+
+ + \ No newline at end of file diff --git a/docs/sqlglot/executor/table.html b/docs/sqlglot/executor/table.html new file mode 100644 index 0000000..59f777b --- /dev/null +++ b/docs/sqlglot/executor/table.html @@ -0,0 +1,802 @@ + + + + + + + sqlglot.executor.table API documentation + + + + + + + + + +
+
+ Edit on GitHub +

+sqlglot.executor.table

+ + + + + + +
  1from __future__ import annotations
+  2
+  3import typing as t
+  4
+  5from sqlglot.helper import dict_depth
+  6from sqlglot.schema import AbstractMappingSchema
+  7
+  8
+  9class Table:
+ 10    def __init__(self, columns, rows=None, column_range=None):
+ 11        self.columns = tuple(columns)
+ 12        self.column_range = column_range
+ 13        self.reader = RowReader(self.columns, self.column_range)
+ 14        self.rows = rows or []
+ 15        if rows:
+ 16            assert len(rows[0]) == len(self.columns)
+ 17        self.range_reader = RangeReader(self)
+ 18
+ 19    def add_columns(self, *columns: str) -> None:
+ 20        self.columns += columns
+ 21        if self.column_range:
+ 22            self.column_range = range(
+ 23                self.column_range.start, self.column_range.stop + len(columns)
+ 24            )
+ 25        self.reader = RowReader(self.columns, self.column_range)
+ 26
+ 27    def append(self, row):
+ 28        assert len(row) == len(self.columns)
+ 29        self.rows.append(row)
+ 30
+ 31    def pop(self):
+ 32        self.rows.pop()
+ 33
+ 34    @property
+ 35    def width(self):
+ 36        return len(self.columns)
+ 37
+ 38    def __len__(self):
+ 39        return len(self.rows)
+ 40
+ 41    def __iter__(self):
+ 42        return TableIter(self)
+ 43
+ 44    def __getitem__(self, index):
+ 45        self.reader.row = self.rows[index]
+ 46        return self.reader
+ 47
+ 48    def __repr__(self):
+ 49        columns = tuple(
+ 50            column
+ 51            for i, column in enumerate(self.columns)
+ 52            if not self.column_range or i in self.column_range
+ 53        )
+ 54        widths = {column: len(column) for column in columns}
+ 55        lines = [" ".join(column for column in columns)]
+ 56
+ 57        for i, row in enumerate(self):
+ 58            if i > 10:
+ 59                break
+ 60
+ 61            lines.append(
+ 62                " ".join(
+ 63                    str(row[column]).rjust(widths[column])[0 : widths[column]] for column in columns
+ 64                )
+ 65            )
+ 66        return "\n".join(lines)
+ 67
+ 68
+ 69class TableIter:
+ 70    def __init__(self, table):
+ 71        self.table = table
+ 72        self.index = -1
+ 73
+ 74    def __iter__(self):
+ 75        return self
+ 76
+ 77    def __next__(self):
+ 78        self.index += 1
+ 79        if self.index < len(self.table):
+ 80            return self.table[self.index]
+ 81        raise StopIteration
+ 82
+ 83
+ 84class RangeReader:
+ 85    def __init__(self, table):
+ 86        self.table = table
+ 87        self.range = range(0)
+ 88
+ 89    def __len__(self):
+ 90        return len(self.range)
+ 91
+ 92    def __getitem__(self, column):
+ 93        return (self.table[i][column] for i in self.range)
+ 94
+ 95
+ 96class RowReader:
+ 97    def __init__(self, columns, column_range=None):
+ 98        self.columns = {
+ 99            column: i for i, column in enumerate(columns) if not column_range or i in column_range
+100        }
+101        self.row = None
+102
+103    def __getitem__(self, column):
+104        return self.row[self.columns[column]]
+105
+106
+107class Tables(AbstractMappingSchema[Table]):
+108    pass
+109
+110
+111def ensure_tables(d: t.Optional[t.Dict]) -> Tables:
+112    return Tables(_ensure_tables(d))
+113
+114
+115def _ensure_tables(d: t.Optional[t.Dict]) -> t.Dict:
+116    if not d:
+117        return {}
+118
+119    depth = dict_depth(d)
+120
+121    if depth > 1:
+122        return {k: _ensure_tables(v) for k, v in d.items()}
+123
+124    result = {}
+125    for name, table in d.items():
+126        if isinstance(table, Table):
+127            result[name] = table
+128        else:
+129            columns = tuple(table[0]) if table else ()
+130            rows = [tuple(row[c] for c in columns) for row in table]
+131            result[name] = Table(columns=columns, rows=rows)
+132
+133    return result
+
+ + +
+
+ +
+ + class + Table: + + + +
+ +
10class Table:
+11    def __init__(self, columns, rows=None, column_range=None):
+12        self.columns = tuple(columns)
+13        self.column_range = column_range
+14        self.reader = RowReader(self.columns, self.column_range)
+15        self.rows = rows or []
+16        if rows:
+17            assert len(rows[0]) == len(self.columns)
+18        self.range_reader = RangeReader(self)
+19
+20    def add_columns(self, *columns: str) -> None:
+21        self.columns += columns
+22        if self.column_range:
+23            self.column_range = range(
+24                self.column_range.start, self.column_range.stop + len(columns)
+25            )
+26        self.reader = RowReader(self.columns, self.column_range)
+27
+28    def append(self, row):
+29        assert len(row) == len(self.columns)
+30        self.rows.append(row)
+31
+32    def pop(self):
+33        self.rows.pop()
+34
+35    @property
+36    def width(self):
+37        return len(self.columns)
+38
+39    def __len__(self):
+40        return len(self.rows)
+41
+42    def __iter__(self):
+43        return TableIter(self)
+44
+45    def __getitem__(self, index):
+46        self.reader.row = self.rows[index]
+47        return self.reader
+48
+49    def __repr__(self):
+50        columns = tuple(
+51            column
+52            for i, column in enumerate(self.columns)
+53            if not self.column_range or i in self.column_range
+54        )
+55        widths = {column: len(column) for column in columns}
+56        lines = [" ".join(column for column in columns)]
+57
+58        for i, row in enumerate(self):
+59            if i > 10:
+60                break
+61
+62            lines.append(
+63                " ".join(
+64                    str(row[column]).rjust(widths[column])[0 : widths[column]] for column in columns
+65                )
+66            )
+67        return "\n".join(lines)
+
+ + + + +
+ +
+ + Table(columns, rows=None, column_range=None) + + + +
+ +
11    def __init__(self, columns, rows=None, column_range=None):
+12        self.columns = tuple(columns)
+13        self.column_range = column_range
+14        self.reader = RowReader(self.columns, self.column_range)
+15        self.rows = rows or []
+16        if rows:
+17            assert len(rows[0]) == len(self.columns)
+18        self.range_reader = RangeReader(self)
+
+ + + + +
+
+ +
+ + def + add_columns(self, *columns: str) -> None: + + + +
+ +
20    def add_columns(self, *columns: str) -> None:
+21        self.columns += columns
+22        if self.column_range:
+23            self.column_range = range(
+24                self.column_range.start, self.column_range.stop + len(columns)
+25            )
+26        self.reader = RowReader(self.columns, self.column_range)
+
+ + + + +
+
+ +
+ + def + append(self, row): + + + +
+ +
28    def append(self, row):
+29        assert len(row) == len(self.columns)
+30        self.rows.append(row)
+
+ + + + +
+
+ +
+ + def + pop(self): + + + +
+ +
32    def pop(self):
+33        self.rows.pop()
+
+ + + + +
+
+
+ +
+ + class + TableIter: + + + +
+ +
70class TableIter:
+71    def __init__(self, table):
+72        self.table = table
+73        self.index = -1
+74
+75    def __iter__(self):
+76        return self
+77
+78    def __next__(self):
+79        self.index += 1
+80        if self.index < len(self.table):
+81            return self.table[self.index]
+82        raise StopIteration
+
+ + + + +
+ +
+ + TableIter(table) + + + +
+ +
71    def __init__(self, table):
+72        self.table = table
+73        self.index = -1
+
+ + + + +
+
+
+ +
+ + class + RangeReader: + + + +
+ +
85class RangeReader:
+86    def __init__(self, table):
+87        self.table = table
+88        self.range = range(0)
+89
+90    def __len__(self):
+91        return len(self.range)
+92
+93    def __getitem__(self, column):
+94        return (self.table[i][column] for i in self.range)
+
+ + + + +
+ +
+ + RangeReader(table) + + + +
+ +
86    def __init__(self, table):
+87        self.table = table
+88        self.range = range(0)
+
+ + + + +
+
+
+ +
+ + class + RowReader: + + + +
+ +
 97class RowReader:
+ 98    def __init__(self, columns, column_range=None):
+ 99        self.columns = {
+100            column: i for i, column in enumerate(columns) if not column_range or i in column_range
+101        }
+102        self.row = None
+103
+104    def __getitem__(self, column):
+105        return self.row[self.columns[column]]
+
+ + + + +
+ +
+ + RowReader(columns, column_range=None) + + + +
+ +
 98    def __init__(self, columns, column_range=None):
+ 99        self.columns = {
+100            column: i for i, column in enumerate(columns) if not column_range or i in column_range
+101        }
+102        self.row = None
+
+ + + + +
+
+
+ + + +
108class Tables(AbstractMappingSchema[Table]):
+109    pass
+
+ + +

Abstract base class for generic types.

+ +

A generic type is typically declared by inheriting from +this class parameterized with one or more type variables. +For example, a generic mapping type might be defined as::

+ +

class Mapping(Generic[KT, VT]): + def __getitem__(self, key: KT) -> VT: + ... + # Etc.

+ +

This class can then be used as follows::

+ +

def lookup_name(mapping: Mapping[KT, VT], key: KT, default: VT) -> VT: + try: + return mapping[key] + except KeyError: + return default

+
+ + + +
+
+ +
+ + def + ensure_tables(d: Optional[Dict]) -> sqlglot.executor.table.Tables: + + + +
+ +
112def ensure_tables(d: t.Optional[t.Dict]) -> Tables:
+113    return Tables(_ensure_tables(d))
+
+ + + + +
+
+ + \ No newline at end of file diff --git a/docs/sqlglot/expressions.html b/docs/sqlglot/expressions.html new file mode 100644 index 0000000..997a895 --- /dev/null +++ b/docs/sqlglot/expressions.html @@ -0,0 +1,39484 @@ + + + + + + + sqlglot.expressions API documentation + + + + + + + + + +
+
+ Edit on GitHub + +

Expressions

+ +

Every AST node in SQLGlot is represented by a subclass of Expression.

+ +

This module contains the implementation of all supported Expression types. Additionally, +it exposes a number of helper functions, which are mainly used to programmatically build +SQL expressions, such as sqlglot.expressions.select.

+ +
+
+ + + + + +
   1"""
+   2## Expressions
+   3
+   4Every AST node in SQLGlot is represented by a subclass of `Expression`.
+   5
+   6This module contains the implementation of all supported `Expression` types. Additionally,
+   7it exposes a number of helper functions, which are mainly used to programmatically build
+   8SQL expressions, such as `sqlglot.expressions.select`.
+   9
+  10----
+  11"""
+  12
+  13from __future__ import annotations
+  14
+  15import datetime
+  16import math
+  17import numbers
+  18import re
+  19import typing as t
+  20from collections import deque
+  21from copy import deepcopy
+  22from enum import auto
+  23
+  24from sqlglot.errors import ParseError
+  25from sqlglot.helper import (
+  26    AutoName,
+  27    camel_to_snake_case,
+  28    ensure_collection,
+  29    seq_get,
+  30    split_num_words,
+  31    subclasses,
+  32)
+  33from sqlglot.tokens import Token
+  34
+  35if t.TYPE_CHECKING:
+  36    from sqlglot.dialects.dialect import DialectType
+  37
+  38
+  39class _Expression(type):
+  40    def __new__(cls, clsname, bases, attrs):
+  41        klass = super().__new__(cls, clsname, bases, attrs)
+  42
+  43        # When an Expression class is created, its key is automatically set to be
+  44        # the lowercase version of the class' name.
+  45        klass.key = clsname.lower()
+  46
+  47        # This is so that docstrings are not inherited in pdoc
+  48        klass.__doc__ = klass.__doc__ or ""
+  49
+  50        return klass
+  51
+  52
+  53class Expression(metaclass=_Expression):
+  54    """
+  55    The base class for all expressions in a syntax tree. Each Expression encapsulates any necessary
+  56    context, such as its child expressions, their names (arg keys), and whether a given child expression
+  57    is optional or not.
+  58
+  59    Attributes:
+  60        key: a unique key for each class in the Expression hierarchy. This is useful for hashing
+  61            and representing expressions as strings.
+  62        arg_types: determines what arguments (child nodes) are supported by an expression. It
+  63            maps arg keys to booleans that indicate whether the corresponding args are optional.
+  64
+  65    Example:
+  66        >>> class Foo(Expression):
+  67        ...     arg_types = {"this": True, "expression": False}
+  68
+  69        The above definition informs us that Foo is an Expression that requires an argument called
+  70        "this" and may also optionally receive an argument called "expression".
+  71
+  72    Args:
+  73        args: a mapping used for retrieving the arguments of an expression, given their arg keys.
+  74        parent: a reference to the parent expression (or None, in case of root expressions).
+  75        arg_key: the arg key an expression is associated with, i.e. the name its parent expression
+  76            uses to refer to it.
+  77        comments: a list of comments that are associated with a given expression. This is used in
+  78            order to preserve comments when transpiling SQL code.
+  79        _type: the `sqlglot.expressions.DataType` type of an expression. This is inferred by the
+  80            optimizer, in order to enable some transformations that require type information.
+  81    """
+  82
+  83    key = "expression"
+  84    arg_types = {"this": True}
+  85    __slots__ = ("args", "parent", "arg_key", "comments", "_type")
+  86
+  87    def __init__(self, **args: t.Any):
+  88        self.args: t.Dict[str, t.Any] = args
+  89        self.parent: t.Optional[Expression] = None
+  90        self.arg_key: t.Optional[str] = None
+  91        self.comments: t.Optional[t.List[str]] = None
+  92        self._type: t.Optional[DataType] = None
+  93
+  94        for arg_key, value in self.args.items():
+  95            self._set_parent(arg_key, value)
+  96
+  97    def __eq__(self, other) -> bool:
+  98        return type(self) is type(other) and _norm_args(self) == _norm_args(other)
+  99
+ 100    def __hash__(self) -> int:
+ 101        return hash(
+ 102            (
+ 103                self.key,
+ 104                tuple(
+ 105                    (k, tuple(v) if isinstance(v, list) else v) for k, v in _norm_args(self).items()
+ 106                ),
+ 107            )
+ 108        )
+ 109
+ 110    @property
+ 111    def this(self):
+ 112        """
+ 113        Retrieves the argument with key "this".
+ 114        """
+ 115        return self.args.get("this")
+ 116
+ 117    @property
+ 118    def expression(self):
+ 119        """
+ 120        Retrieves the argument with key "expression".
+ 121        """
+ 122        return self.args.get("expression")
+ 123
+ 124    @property
+ 125    def expressions(self):
+ 126        """
+ 127        Retrieves the argument with key "expressions".
+ 128        """
+ 129        return self.args.get("expressions") or []
+ 130
+ 131    def text(self, key):
+ 132        """
+ 133        Returns a textual representation of the argument corresponding to "key". This can only be used
+ 134        for args that are strings or leaf Expression instances, such as identifiers and literals.
+ 135        """
+ 136        field = self.args.get(key)
+ 137        if isinstance(field, str):
+ 138            return field
+ 139        if isinstance(field, (Identifier, Literal, Var)):
+ 140            return field.this
+ 141        if isinstance(field, (Star, Null)):
+ 142            return field.name
+ 143        return ""
+ 144
+ 145    @property
+ 146    def is_string(self):
+ 147        """
+ 148        Checks whether a Literal expression is a string.
+ 149        """
+ 150        return isinstance(self, Literal) and self.args["is_string"]
+ 151
+ 152    @property
+ 153    def is_number(self):
+ 154        """
+ 155        Checks whether a Literal expression is a number.
+ 156        """
+ 157        return isinstance(self, Literal) and not self.args["is_string"]
+ 158
+ 159    @property
+ 160    def is_int(self):
+ 161        """
+ 162        Checks whether a Literal expression is an integer.
+ 163        """
+ 164        if self.is_number:
+ 165            try:
+ 166                int(self.name)
+ 167                return True
+ 168            except ValueError:
+ 169                pass
+ 170        return False
+ 171
+ 172    @property
+ 173    def alias(self):
+ 174        """
+ 175        Returns the alias of the expression, or an empty string if it's not aliased.
+ 176        """
+ 177        if isinstance(self.args.get("alias"), TableAlias):
+ 178            return self.args["alias"].name
+ 179        return self.text("alias")
+ 180
+ 181    @property
+ 182    def name(self) -> str:
+ 183        return self.text("this")
+ 184
+ 185    @property
+ 186    def alias_or_name(self):
+ 187        return self.alias or self.name
+ 188
+ 189    @property
+ 190    def output_name(self):
+ 191        """
+ 192        Name of the output column if this expression is a selection.
+ 193
+ 194        If the Expression has no output name, an empty string is returned.
+ 195
+ 196        Example:
+ 197            >>> from sqlglot import parse_one
+ 198            >>> parse_one("SELECT a").expressions[0].output_name
+ 199            'a'
+ 200            >>> parse_one("SELECT b AS c").expressions[0].output_name
+ 201            'c'
+ 202            >>> parse_one("SELECT 1 + 2").expressions[0].output_name
+ 203            ''
+ 204        """
+ 205        return ""
+ 206
+ 207    @property
+ 208    def type(self) -> t.Optional[DataType]:
+ 209        return self._type
+ 210
+ 211    @type.setter
+ 212    def type(self, dtype: t.Optional[DataType | DataType.Type | str]) -> None:
+ 213        if dtype and not isinstance(dtype, DataType):
+ 214            dtype = DataType.build(dtype)
+ 215        self._type = dtype  # type: ignore
+ 216
+ 217    def __deepcopy__(self, memo):
+ 218        copy = self.__class__(**deepcopy(self.args))
+ 219        copy.comments = self.comments
+ 220        copy.type = self.type
+ 221        return copy
+ 222
+ 223    def copy(self):
+ 224        """
+ 225        Returns a deep copy of the expression.
+ 226        """
+ 227        new = deepcopy(self)
+ 228        new.parent = self.parent
+ 229        for item, parent, _ in new.bfs():
+ 230            if isinstance(item, Expression) and parent:
+ 231                item.parent = parent
+ 232        return new
+ 233
+ 234    def append(self, arg_key, value):
+ 235        """
+ 236        Appends value to arg_key if it's a list or sets it as a new list.
+ 237
+ 238        Args:
+ 239            arg_key (str): name of the list expression arg
+ 240            value (Any): value to append to the list
+ 241        """
+ 242        if not isinstance(self.args.get(arg_key), list):
+ 243            self.args[arg_key] = []
+ 244        self.args[arg_key].append(value)
+ 245        self._set_parent(arg_key, value)
+ 246
+ 247    def set(self, arg_key, value):
+ 248        """
+ 249        Sets `arg_key` to `value`.
+ 250
+ 251        Args:
+ 252            arg_key (str): name of the expression arg.
+ 253            value: value to set the arg to.
+ 254        """
+ 255        self.args[arg_key] = value
+ 256        self._set_parent(arg_key, value)
+ 257
+ 258    def _set_parent(self, arg_key, value):
+ 259        if isinstance(value, Expression):
+ 260            value.parent = self
+ 261            value.arg_key = arg_key
+ 262        elif isinstance(value, list):
+ 263            for v in value:
+ 264                if isinstance(v, Expression):
+ 265                    v.parent = self
+ 266                    v.arg_key = arg_key
+ 267
+ 268    @property
+ 269    def depth(self):
+ 270        """
+ 271        Returns the depth of this tree.
+ 272        """
+ 273        if self.parent:
+ 274            return self.parent.depth + 1
+ 275        return 0
+ 276
+ 277    def find(self, *expression_types, bfs=True):
+ 278        """
+ 279        Returns the first node in this tree which matches at least one of
+ 280        the specified types.
+ 281
+ 282        Args:
+ 283            expression_types (type): the expression type(s) to match.
+ 284
+ 285        Returns:
+ 286            The node which matches the criteria or None if no such node was found.
+ 287        """
+ 288        return next(self.find_all(*expression_types, bfs=bfs), None)
+ 289
+ 290    def find_all(self, *expression_types, bfs=True):
+ 291        """
+ 292        Returns a generator object which visits all nodes in this tree and only
+ 293        yields those that match at least one of the specified expression types.
+ 294
+ 295        Args:
+ 296            expression_types (type): the expression type(s) to match.
+ 297
+ 298        Returns:
+ 299            The generator object.
+ 300        """
+ 301        for expression, _, _ in self.walk(bfs=bfs):
+ 302            if isinstance(expression, expression_types):
+ 303                yield expression
+ 304
+ 305    def find_ancestor(self, *expression_types):
+ 306        """
+ 307        Returns a nearest parent matching expression_types.
+ 308
+ 309        Args:
+ 310            expression_types (type): the expression type(s) to match.
+ 311
+ 312        Returns:
+ 313            The parent node.
+ 314        """
+ 315        ancestor = self.parent
+ 316        while ancestor and not isinstance(ancestor, expression_types):
+ 317            ancestor = ancestor.parent
+ 318        return ancestor
+ 319
+ 320    @property
+ 321    def parent_select(self):
+ 322        """
+ 323        Returns the parent select statement.
+ 324        """
+ 325        return self.find_ancestor(Select)
+ 326
+ 327    def walk(self, bfs=True, prune=None):
+ 328        """
+ 329        Returns a generator object which visits all nodes in this tree.
+ 330
+ 331        Args:
+ 332            bfs (bool): if set to True the BFS traversal order will be applied,
+ 333                otherwise the DFS traversal will be used instead.
+ 334            prune ((node, parent, arg_key) -> bool): callable that returns True if
+ 335                the generator should stop traversing this branch of the tree.
+ 336
+ 337        Returns:
+ 338            the generator object.
+ 339        """
+ 340        if bfs:
+ 341            yield from self.bfs(prune=prune)
+ 342        else:
+ 343            yield from self.dfs(prune=prune)
+ 344
+ 345    def dfs(self, parent=None, key=None, prune=None):
+ 346        """
+ 347        Returns a generator object which visits all nodes in this tree in
+ 348        the DFS (Depth-first) order.
+ 349
+ 350        Returns:
+ 351            The generator object.
+ 352        """
+ 353        parent = parent or self.parent
+ 354        yield self, parent, key
+ 355        if prune and prune(self, parent, key):
+ 356            return
+ 357
+ 358        for k, v in self.args.items():
+ 359            for node in ensure_collection(v):
+ 360                if isinstance(node, Expression):
+ 361                    yield from node.dfs(self, k, prune)
+ 362
+ 363    def bfs(self, prune=None):
+ 364        """
+ 365        Returns a generator object which visits all nodes in this tree in
+ 366        the BFS (Breadth-first) order.
+ 367
+ 368        Returns:
+ 369            The generator object.
+ 370        """
+ 371        queue = deque([(self, self.parent, None)])
+ 372
+ 373        while queue:
+ 374            item, parent, key = queue.popleft()
+ 375
+ 376            yield item, parent, key
+ 377            if prune and prune(item, parent, key):
+ 378                continue
+ 379
+ 380            if isinstance(item, Expression):
+ 381                for k, v in item.args.items():
+ 382                    for node in ensure_collection(v):
+ 383                        if isinstance(node, Expression):
+ 384                            queue.append((node, item, k))
+ 385
+ 386    def unnest(self):
+ 387        """
+ 388        Returns the first non parenthesis child or self.
+ 389        """
+ 390        expression = self
+ 391        while isinstance(expression, Paren):
+ 392            expression = expression.this
+ 393        return expression
+ 394
+ 395    def unalias(self):
+ 396        """
+ 397        Returns the inner expression if this is an Alias.
+ 398        """
+ 399        if isinstance(self, Alias):
+ 400            return self.this
+ 401        return self
+ 402
+ 403    def unnest_operands(self):
+ 404        """
+ 405        Returns unnested operands as a tuple.
+ 406        """
+ 407        return tuple(arg.unnest() for arg in self.args.values() if arg)
+ 408
+ 409    def flatten(self, unnest=True):
+ 410        """
+ 411        Returns a generator which yields child nodes who's parents are the same class.
+ 412
+ 413        A AND B AND C -> [A, B, C]
+ 414        """
+ 415        for node, _, _ in self.dfs(prune=lambda n, p, *_: p and not isinstance(n, self.__class__)):
+ 416            if not isinstance(node, self.__class__):
+ 417                yield node.unnest() if unnest else node
+ 418
+ 419    def __str__(self):
+ 420        return self.sql()
+ 421
+ 422    def __repr__(self):
+ 423        return self._to_s()
+ 424
+ 425    def sql(self, dialect: DialectType = None, **opts) -> str:
+ 426        """
+ 427        Returns SQL string representation of this tree.
+ 428
+ 429        Args:
+ 430            dialect: the dialect of the output SQL string (eg. "spark", "hive", "presto", "mysql").
+ 431            opts: other `sqlglot.generator.Generator` options.
+ 432
+ 433        Returns:
+ 434            The SQL string.
+ 435        """
+ 436        from sqlglot.dialects import Dialect
+ 437
+ 438        return Dialect.get_or_raise(dialect)().generate(self, **opts)
+ 439
+ 440    def _to_s(self, hide_missing: bool = True, level: int = 0) -> str:
+ 441        indent = "" if not level else "\n"
+ 442        indent += "".join(["  "] * level)
+ 443        left = f"({self.key.upper()} "
+ 444
+ 445        args: t.Dict[str, t.Any] = {
+ 446            k: ", ".join(
+ 447                v._to_s(hide_missing=hide_missing, level=level + 1)
+ 448                if hasattr(v, "_to_s")
+ 449                else str(v)
+ 450                for v in ensure_collection(vs)
+ 451                if v is not None
+ 452            )
+ 453            for k, vs in self.args.items()
+ 454        }
+ 455        args["comments"] = self.comments
+ 456        args["type"] = self.type
+ 457        args = {k: v for k, v in args.items() if v or not hide_missing}
+ 458
+ 459        right = ", ".join(f"{k}: {v}" for k, v in args.items())
+ 460        right += ")"
+ 461
+ 462        return indent + left + right
+ 463
+ 464    def transform(self, fun, *args, copy=True, **kwargs):
+ 465        """
+ 466        Recursively visits all tree nodes (excluding already transformed ones)
+ 467        and applies the given transformation function to each node.
+ 468
+ 469        Args:
+ 470            fun (function): a function which takes a node as an argument and returns a
+ 471                new transformed node or the same node without modifications. If the function
+ 472                returns None, then the corresponding node will be removed from the syntax tree.
+ 473            copy (bool): if set to True a new tree instance is constructed, otherwise the tree is
+ 474                modified in place.
+ 475
+ 476        Returns:
+ 477            The transformed tree.
+ 478        """
+ 479        node = self.copy() if copy else self
+ 480        new_node = fun(node, *args, **kwargs)
+ 481
+ 482        if new_node is None or not isinstance(new_node, Expression):
+ 483            return new_node
+ 484        if new_node is not node:
+ 485            new_node.parent = node.parent
+ 486            return new_node
+ 487
+ 488        replace_children(new_node, lambda child: child.transform(fun, *args, copy=False, **kwargs))
+ 489        return new_node
+ 490
+ 491    def replace(self, expression):
+ 492        """
+ 493        Swap out this expression with a new expression.
+ 494
+ 495        For example::
+ 496
+ 497            >>> tree = Select().select("x").from_("tbl")
+ 498            >>> tree.find(Column).replace(Column(this="y"))
+ 499            (COLUMN this: y)
+ 500            >>> tree.sql()
+ 501            'SELECT y FROM tbl'
+ 502
+ 503        Args:
+ 504            expression (Expression|None): new node
+ 505
+ 506        Returns:
+ 507            The new expression or expressions.
+ 508        """
+ 509        if not self.parent:
+ 510            return expression
+ 511
+ 512        parent = self.parent
+ 513        self.parent = None
+ 514
+ 515        replace_children(parent, lambda child: expression if child is self else child)
+ 516        return expression
+ 517
+ 518    def pop(self):
+ 519        """
+ 520        Remove this expression from its AST.
+ 521        """
+ 522        self.replace(None)
+ 523
+ 524    def assert_is(self, type_):
+ 525        """
+ 526        Assert that this `Expression` is an instance of `type_`.
+ 527
+ 528        If it is NOT an instance of `type_`, this raises an assertion error.
+ 529        Otherwise, this returns this expression.
+ 530
+ 531        Examples:
+ 532            This is useful for type security in chained expressions:
+ 533
+ 534            >>> import sqlglot
+ 535            >>> sqlglot.parse_one("SELECT x from y").assert_is(Select).select("z").sql()
+ 536            'SELECT x, z FROM y'
+ 537        """
+ 538        assert isinstance(self, type_)
+ 539        return self
+ 540
+ 541    def error_messages(self, args: t.Optional[t.Sequence] = None) -> t.List[str]:
+ 542        """
+ 543        Checks if this expression is valid (e.g. all mandatory args are set).
+ 544
+ 545        Args:
+ 546            args: a sequence of values that were used to instantiate a Func expression. This is used
+ 547                to check that the provided arguments don't exceed the function argument limit.
+ 548
+ 549        Returns:
+ 550            A list of error messages for all possible errors that were found.
+ 551        """
+ 552        errors: t.List[str] = []
+ 553
+ 554        for k in self.args:
+ 555            if k not in self.arg_types:
+ 556                errors.append(f"Unexpected keyword: '{k}' for {self.__class__}")
+ 557        for k, mandatory in self.arg_types.items():
+ 558            v = self.args.get(k)
+ 559            if mandatory and (v is None or (isinstance(v, list) and not v)):
+ 560                errors.append(f"Required keyword: '{k}' missing for {self.__class__}")
+ 561
+ 562        if (
+ 563            args
+ 564            and isinstance(self, Func)
+ 565            and len(args) > len(self.arg_types)
+ 566            and not self.is_var_len_args
+ 567        ):
+ 568            errors.append(
+ 569                f"The number of provided arguments ({len(args)}) is greater than "
+ 570                f"the maximum number of supported arguments ({len(self.arg_types)})"
+ 571            )
+ 572
+ 573        return errors
+ 574
+ 575    def dump(self):
+ 576        """
+ 577        Dump this Expression to a JSON-serializable dict.
+ 578        """
+ 579        from sqlglot.serde import dump
+ 580
+ 581        return dump(self)
+ 582
+ 583    @classmethod
+ 584    def load(cls, obj):
+ 585        """
+ 586        Load a dict (as returned by `Expression.dump`) into an Expression instance.
+ 587        """
+ 588        from sqlglot.serde import load
+ 589
+ 590        return load(obj)
+ 591
+ 592
+ 593IntoType = t.Union[
+ 594    str,
+ 595    t.Type[Expression],
+ 596    t.Collection[t.Union[str, t.Type[Expression]]],
+ 597]
+ 598
+ 599
+ 600class Condition(Expression):
+ 601    def and_(self, *expressions, dialect=None, **opts):
+ 602        """
+ 603        AND this condition with one or multiple expressions.
+ 604
+ 605        Example:
+ 606            >>> condition("x=1").and_("y=1").sql()
+ 607            'x = 1 AND y = 1'
+ 608
+ 609        Args:
+ 610            *expressions (str | Expression): the SQL code strings to parse.
+ 611                If an `Expression` instance is passed, it will be used as-is.
+ 612            dialect (str): the dialect used to parse the input expression.
+ 613            opts (kwargs): other options to use to parse the input expressions.
+ 614
+ 615        Returns:
+ 616            And: the new condition.
+ 617        """
+ 618        return and_(self, *expressions, dialect=dialect, **opts)
+ 619
+ 620    def or_(self, *expressions, dialect=None, **opts):
+ 621        """
+ 622        OR this condition with one or multiple expressions.
+ 623
+ 624        Example:
+ 625            >>> condition("x=1").or_("y=1").sql()
+ 626            'x = 1 OR y = 1'
+ 627
+ 628        Args:
+ 629            *expressions (str | Expression): the SQL code strings to parse.
+ 630                If an `Expression` instance is passed, it will be used as-is.
+ 631            dialect (str): the dialect used to parse the input expression.
+ 632            opts (kwargs): other options to use to parse the input expressions.
+ 633
+ 634        Returns:
+ 635            Or: the new condition.
+ 636        """
+ 637        return or_(self, *expressions, dialect=dialect, **opts)
+ 638
+ 639    def not_(self):
+ 640        """
+ 641        Wrap this condition with NOT.
+ 642
+ 643        Example:
+ 644            >>> condition("x=1").not_().sql()
+ 645            'NOT x = 1'
+ 646
+ 647        Returns:
+ 648            Not: the new condition.
+ 649        """
+ 650        return not_(self)
+ 651
+ 652
+ 653class Predicate(Condition):
+ 654    """Relationships like x = y, x > 1, x >= y."""
+ 655
+ 656
+ 657class DerivedTable(Expression):
+ 658    @property
+ 659    def alias_column_names(self):
+ 660        table_alias = self.args.get("alias")
+ 661        if not table_alias:
+ 662            return []
+ 663        column_list = table_alias.assert_is(TableAlias).args.get("columns") or []
+ 664        return [c.name for c in column_list]
+ 665
+ 666    @property
+ 667    def selects(self):
+ 668        alias = self.args.get("alias")
+ 669
+ 670        if alias:
+ 671            return alias.columns
+ 672        return []
+ 673
+ 674    @property
+ 675    def named_selects(self):
+ 676        return [select.output_name for select in self.selects]
+ 677
+ 678
+ 679class Unionable(Expression):
+ 680    def union(self, expression, distinct=True, dialect=None, **opts):
+ 681        """
+ 682        Builds a UNION expression.
+ 683
+ 684        Example:
+ 685            >>> import sqlglot
+ 686            >>> sqlglot.parse_one("SELECT * FROM foo").union("SELECT * FROM bla").sql()
+ 687            'SELECT * FROM foo UNION SELECT * FROM bla'
+ 688
+ 689        Args:
+ 690            expression (str | Expression): the SQL code string.
+ 691                If an `Expression` instance is passed, it will be used as-is.
+ 692            distinct (bool): set the DISTINCT flag if and only if this is true.
+ 693            dialect (str): the dialect used to parse the input expression.
+ 694            opts (kwargs): other options to use to parse the input expressions.
+ 695        Returns:
+ 696            Union: the Union expression.
+ 697        """
+ 698        return union(left=self, right=expression, distinct=distinct, dialect=dialect, **opts)
+ 699
+ 700    def intersect(self, expression, distinct=True, dialect=None, **opts):
+ 701        """
+ 702        Builds an INTERSECT expression.
+ 703
+ 704        Example:
+ 705            >>> import sqlglot
+ 706            >>> sqlglot.parse_one("SELECT * FROM foo").intersect("SELECT * FROM bla").sql()
+ 707            'SELECT * FROM foo INTERSECT SELECT * FROM bla'
+ 708
+ 709        Args:
+ 710            expression (str | Expression): the SQL code string.
+ 711                If an `Expression` instance is passed, it will be used as-is.
+ 712            distinct (bool): set the DISTINCT flag if and only if this is true.
+ 713            dialect (str): the dialect used to parse the input expression.
+ 714            opts (kwargs): other options to use to parse the input expressions.
+ 715        Returns:
+ 716            Intersect: the Intersect expression
+ 717        """
+ 718        return intersect(left=self, right=expression, distinct=distinct, dialect=dialect, **opts)
+ 719
+ 720    def except_(self, expression, distinct=True, dialect=None, **opts):
+ 721        """
+ 722        Builds an EXCEPT expression.
+ 723
+ 724        Example:
+ 725            >>> import sqlglot
+ 726            >>> sqlglot.parse_one("SELECT * FROM foo").except_("SELECT * FROM bla").sql()
+ 727            'SELECT * FROM foo EXCEPT SELECT * FROM bla'
+ 728
+ 729        Args:
+ 730            expression (str | Expression): the SQL code string.
+ 731                If an `Expression` instance is passed, it will be used as-is.
+ 732            distinct (bool): set the DISTINCT flag if and only if this is true.
+ 733            dialect (str): the dialect used to parse the input expression.
+ 734            opts (kwargs): other options to use to parse the input expressions.
+ 735        Returns:
+ 736            Except: the Except expression
+ 737        """
+ 738        return except_(left=self, right=expression, distinct=distinct, dialect=dialect, **opts)
+ 739
+ 740
+ 741class UDTF(DerivedTable, Unionable):
+ 742    pass
+ 743
+ 744
+ 745class Cache(Expression):
+ 746    arg_types = {
+ 747        "with": False,
+ 748        "this": True,
+ 749        "lazy": False,
+ 750        "options": False,
+ 751        "expression": False,
+ 752    }
+ 753
+ 754
+ 755class Uncache(Expression):
+ 756    arg_types = {"this": True, "exists": False}
+ 757
+ 758
+ 759class Create(Expression):
+ 760    arg_types = {
+ 761        "with": False,
+ 762        "this": True,
+ 763        "kind": True,
+ 764        "expression": False,
+ 765        "set": False,
+ 766        "multiset": False,
+ 767        "global_temporary": False,
+ 768        "volatile": False,
+ 769        "exists": False,
+ 770        "properties": False,
+ 771        "temporary": False,
+ 772        "transient": False,
+ 773        "external": False,
+ 774        "replace": False,
+ 775        "unique": False,
+ 776        "materialized": False,
+ 777        "data": False,
+ 778        "statistics": False,
+ 779        "no_primary_index": False,
+ 780        "indexes": False,
+ 781        "no_schema_binding": False,
+ 782        "begin": False,
+ 783    }
+ 784
+ 785
+ 786class Describe(Expression):
+ 787    arg_types = {"this": True, "kind": False}
+ 788
+ 789
+ 790class Set(Expression):
+ 791    arg_types = {"expressions": True}
+ 792
+ 793
+ 794class SetItem(Expression):
+ 795    arg_types = {
+ 796        "this": False,
+ 797        "expressions": False,
+ 798        "kind": False,
+ 799        "collate": False,  # MySQL SET NAMES statement
+ 800        "global": False,
+ 801    }
+ 802
+ 803
+ 804class Show(Expression):
+ 805    arg_types = {
+ 806        "this": True,
+ 807        "target": False,
+ 808        "offset": False,
+ 809        "limit": False,
+ 810        "like": False,
+ 811        "where": False,
+ 812        "db": False,
+ 813        "full": False,
+ 814        "mutex": False,
+ 815        "query": False,
+ 816        "channel": False,
+ 817        "global": False,
+ 818        "log": False,
+ 819        "position": False,
+ 820        "types": False,
+ 821    }
+ 822
+ 823
+ 824class UserDefinedFunction(Expression):
+ 825    arg_types = {"this": True, "expressions": False, "wrapped": False}
+ 826
+ 827
+ 828class UserDefinedFunctionKwarg(Expression):
+ 829    arg_types = {"this": True, "kind": True, "default": False}
+ 830
+ 831
+ 832class CharacterSet(Expression):
+ 833    arg_types = {"this": True, "default": False}
+ 834
+ 835
+ 836class With(Expression):
+ 837    arg_types = {"expressions": True, "recursive": False}
+ 838
+ 839    @property
+ 840    def recursive(self) -> bool:
+ 841        return bool(self.args.get("recursive"))
+ 842
+ 843
+ 844class WithinGroup(Expression):
+ 845    arg_types = {"this": True, "expression": False}
+ 846
+ 847
+ 848class CTE(DerivedTable):
+ 849    arg_types = {"this": True, "alias": True}
+ 850
+ 851
+ 852class TableAlias(Expression):
+ 853    arg_types = {"this": False, "columns": False}
+ 854
+ 855    @property
+ 856    def columns(self):
+ 857        return self.args.get("columns") or []
+ 858
+ 859
+ 860class BitString(Condition):
+ 861    pass
+ 862
+ 863
+ 864class HexString(Condition):
+ 865    pass
+ 866
+ 867
+ 868class ByteString(Condition):
+ 869    pass
+ 870
+ 871
+ 872class Column(Condition):
+ 873    arg_types = {"this": True, "table": False}
+ 874
+ 875    @property
+ 876    def table(self):
+ 877        return self.text("table")
+ 878
+ 879    @property
+ 880    def output_name(self):
+ 881        return self.name
+ 882
+ 883
+ 884class ColumnDef(Expression):
+ 885    arg_types = {
+ 886        "this": True,
+ 887        "kind": False,
+ 888        "constraints": False,
+ 889        "exists": False,
+ 890    }
+ 891
+ 892
+ 893class AlterColumn(Expression):
+ 894    arg_types = {
+ 895        "this": True,
+ 896        "dtype": False,
+ 897        "collate": False,
+ 898        "using": False,
+ 899        "default": False,
+ 900        "drop": False,
+ 901    }
+ 902
+ 903
+ 904class RenameTable(Expression):
+ 905    pass
+ 906
+ 907
+ 908class ColumnConstraint(Expression):
+ 909    arg_types = {"this": False, "kind": True}
+ 910
+ 911
+ 912class ColumnConstraintKind(Expression):
+ 913    pass
+ 914
+ 915
+ 916class AutoIncrementColumnConstraint(ColumnConstraintKind):
+ 917    pass
+ 918
+ 919
+ 920class CheckColumnConstraint(ColumnConstraintKind):
+ 921    pass
+ 922
+ 923
+ 924class CollateColumnConstraint(ColumnConstraintKind):
+ 925    pass
+ 926
+ 927
+ 928class CommentColumnConstraint(ColumnConstraintKind):
+ 929    pass
+ 930
+ 931
+ 932class DefaultColumnConstraint(ColumnConstraintKind):
+ 933    pass
+ 934
+ 935
+ 936class EncodeColumnConstraint(ColumnConstraintKind):
+ 937    pass
+ 938
+ 939
+ 940class GeneratedAsIdentityColumnConstraint(ColumnConstraintKind):
+ 941    # this: True -> ALWAYS, this: False -> BY DEFAULT
+ 942    arg_types = {"this": False, "start": False, "increment": False}
+ 943
+ 944
+ 945class NotNullColumnConstraint(ColumnConstraintKind):
+ 946    arg_types = {"allow_null": False}
+ 947
+ 948
+ 949class PrimaryKeyColumnConstraint(ColumnConstraintKind):
+ 950    arg_types = {"desc": False}
+ 951
+ 952
+ 953class UniqueColumnConstraint(ColumnConstraintKind):
+ 954    pass
+ 955
+ 956
+ 957class Constraint(Expression):
+ 958    arg_types = {"this": True, "expressions": True}
+ 959
+ 960
+ 961class Delete(Expression):
+ 962    arg_types = {"with": False, "this": False, "using": False, "where": False}
+ 963
+ 964
+ 965class Drop(Expression):
+ 966    arg_types = {
+ 967        "this": False,
+ 968        "kind": False,
+ 969        "exists": False,
+ 970        "temporary": False,
+ 971        "materialized": False,
+ 972        "cascade": False,
+ 973    }
+ 974
+ 975
+ 976class Filter(Expression):
+ 977    arg_types = {"this": True, "expression": True}
+ 978
+ 979
+ 980class Check(Expression):
+ 981    pass
+ 982
+ 983
+ 984class Directory(Expression):
+ 985    # https://spark.apache.org/docs/3.0.0-preview/sql-ref-syntax-dml-insert-overwrite-directory-hive.html
+ 986    arg_types = {"this": True, "local": False, "row_format": False}
+ 987
+ 988
+ 989class ForeignKey(Expression):
+ 990    arg_types = {
+ 991        "expressions": True,
+ 992        "reference": False,
+ 993        "delete": False,
+ 994        "update": False,
+ 995    }
+ 996
+ 997
+ 998class PrimaryKey(Expression):
+ 999    arg_types = {"expressions": True, "options": False}
+1000
+1001
+1002class Unique(Expression):
+1003    arg_types = {"expressions": True}
+1004
+1005
+1006# https://www.postgresql.org/docs/9.1/sql-selectinto.html
+1007# https://docs.aws.amazon.com/redshift/latest/dg/r_SELECT_INTO.html#r_SELECT_INTO-examples
+1008class Into(Expression):
+1009    arg_types = {"this": True, "temporary": False, "unlogged": False}
+1010
+1011
+1012class From(Expression):
+1013    arg_types = {"expressions": True}
+1014
+1015
+1016class Having(Expression):
+1017    pass
+1018
+1019
+1020class Hint(Expression):
+1021    arg_types = {"expressions": True}
+1022
+1023
+1024class JoinHint(Expression):
+1025    arg_types = {"this": True, "expressions": True}
+1026
+1027
+1028class Identifier(Expression):
+1029    arg_types = {"this": True, "quoted": False}
+1030
+1031    @property
+1032    def quoted(self):
+1033        return bool(self.args.get("quoted"))
+1034
+1035    def __eq__(self, other):
+1036        return isinstance(other, self.__class__) and _norm_arg(self.this) == _norm_arg(other.this)
+1037
+1038    def __hash__(self):
+1039        return hash((self.key, self.this.lower()))
+1040
+1041    @property
+1042    def output_name(self):
+1043        return self.name
+1044
+1045
+1046class Index(Expression):
+1047    arg_types = {
+1048        "this": False,
+1049        "table": False,
+1050        "where": False,
+1051        "columns": False,
+1052        "unique": False,
+1053        "primary": False,
+1054        "amp": False,  # teradata
+1055    }
+1056
+1057
+1058class Insert(Expression):
+1059    arg_types = {
+1060        "with": False,
+1061        "this": True,
+1062        "expression": False,
+1063        "overwrite": False,
+1064        "exists": False,
+1065        "partition": False,
+1066    }
+1067
+1068
+1069# https://dev.mysql.com/doc/refman/8.0/en/charset-introducer.html
+1070class Introducer(Expression):
+1071    arg_types = {"this": True, "expression": True}
+1072
+1073
+1074# national char, like n'utf8'
+1075class National(Expression):
+1076    pass
+1077
+1078
+1079class LoadData(Expression):
+1080    arg_types = {
+1081        "this": True,
+1082        "local": False,
+1083        "overwrite": False,
+1084        "inpath": True,
+1085        "partition": False,
+1086        "input_format": False,
+1087        "serde": False,
+1088    }
+1089
+1090
+1091class Partition(Expression):
+1092    arg_types = {"expressions": True}
+1093
+1094
+1095class Fetch(Expression):
+1096    arg_types = {"direction": False, "count": False}
+1097
+1098
+1099class Group(Expression):
+1100    arg_types = {
+1101        "expressions": False,
+1102        "grouping_sets": False,
+1103        "cube": False,
+1104        "rollup": False,
+1105    }
+1106
+1107
+1108class Lambda(Expression):
+1109    arg_types = {"this": True, "expressions": True}
+1110
+1111
+1112class Limit(Expression):
+1113    arg_types = {"this": False, "expression": True}
+1114
+1115
+1116class Literal(Condition):
+1117    arg_types = {"this": True, "is_string": True}
+1118
+1119    def __eq__(self, other):
+1120        return (
+1121            isinstance(other, Literal)
+1122            and self.this == other.this
+1123            and self.args["is_string"] == other.args["is_string"]
+1124        )
+1125
+1126    def __hash__(self):
+1127        return hash((self.key, self.this, self.args["is_string"]))
+1128
+1129    @classmethod
+1130    def number(cls, number) -> Literal:
+1131        return cls(this=str(number), is_string=False)
+1132
+1133    @classmethod
+1134    def string(cls, string) -> Literal:
+1135        return cls(this=str(string), is_string=True)
+1136
+1137    @property
+1138    def output_name(self):
+1139        return self.name
+1140
+1141
+1142class Join(Expression):
+1143    arg_types = {
+1144        "this": True,
+1145        "on": False,
+1146        "side": False,
+1147        "kind": False,
+1148        "using": False,
+1149        "natural": False,
+1150    }
+1151
+1152    @property
+1153    def kind(self):
+1154        return self.text("kind").upper()
+1155
+1156    @property
+1157    def side(self):
+1158        return self.text("side").upper()
+1159
+1160    @property
+1161    def alias_or_name(self):
+1162        return self.this.alias_or_name
+1163
+1164    def on(self, *expressions, append=True, dialect=None, copy=True, **opts):
+1165        """
+1166        Append to or set the ON expressions.
+1167
+1168        Example:
+1169            >>> import sqlglot
+1170            >>> sqlglot.parse_one("JOIN x", into=Join).on("y = 1").sql()
+1171            'JOIN x ON y = 1'
+1172
+1173        Args:
+1174            *expressions (str | Expression): the SQL code strings to parse.
+1175                If an `Expression` instance is passed, it will be used as-is.
+1176                Multiple expressions are combined with an AND operator.
+1177            append (bool): if `True`, AND the new expressions to any existing expression.
+1178                Otherwise, this resets the expression.
+1179            dialect (str): the dialect used to parse the input expressions.
+1180            copy (bool): if `False`, modify this expression instance in-place.
+1181            opts (kwargs): other options to use to parse the input expressions.
+1182
+1183        Returns:
+1184            Join: the modified join expression.
+1185        """
+1186        join = _apply_conjunction_builder(
+1187            *expressions,
+1188            instance=self,
+1189            arg="on",
+1190            append=append,
+1191            dialect=dialect,
+1192            copy=copy,
+1193            **opts,
+1194        )
+1195
+1196        if join.kind == "CROSS":
+1197            join.set("kind", None)
+1198
+1199        return join
+1200
+1201    def using(self, *expressions, append=True, dialect=None, copy=True, **opts):
+1202        """
+1203        Append to or set the USING expressions.
+1204
+1205        Example:
+1206            >>> import sqlglot
+1207            >>> sqlglot.parse_one("JOIN x", into=Join).using("foo", "bla").sql()
+1208            'JOIN x USING (foo, bla)'
+1209
+1210        Args:
+1211            *expressions (str | Expression): the SQL code strings to parse.
+1212                If an `Expression` instance is passed, it will be used as-is.
+1213            append (bool): if `True`, concatenate the new expressions to the existing "using" list.
+1214                Otherwise, this resets the expression.
+1215            dialect (str): the dialect used to parse the input expressions.
+1216            copy (bool): if `False`, modify this expression instance in-place.
+1217            opts (kwargs): other options to use to parse the input expressions.
+1218
+1219        Returns:
+1220            Join: the modified join expression.
+1221        """
+1222        join = _apply_list_builder(
+1223            *expressions,
+1224            instance=self,
+1225            arg="using",
+1226            append=append,
+1227            dialect=dialect,
+1228            copy=copy,
+1229            **opts,
+1230        )
+1231
+1232        if join.kind == "CROSS":
+1233            join.set("kind", None)
+1234
+1235        return join
+1236
+1237
+1238class Lateral(UDTF):
+1239    arg_types = {"this": True, "view": False, "outer": False, "alias": False}
+1240
+1241
+1242class MatchRecognize(Expression):
+1243    arg_types = {
+1244        "partition_by": False,
+1245        "order": False,
+1246        "measures": False,
+1247        "rows": False,
+1248        "after": False,
+1249        "pattern": False,
+1250        "define": False,
+1251    }
+1252
+1253
+1254# Clickhouse FROM FINAL modifier
+1255# https://clickhouse.com/docs/en/sql-reference/statements/select/from/#final-modifier
+1256class Final(Expression):
+1257    pass
+1258
+1259
+1260class Offset(Expression):
+1261    arg_types = {"this": False, "expression": True}
+1262
+1263
+1264class Order(Expression):
+1265    arg_types = {"this": False, "expressions": True}
+1266
+1267
+1268# hive specific sorts
+1269# https://cwiki.apache.org/confluence/display/Hive/LanguageManual+SortBy
+1270class Cluster(Order):
+1271    pass
+1272
+1273
+1274class Distribute(Order):
+1275    pass
+1276
+1277
+1278class Sort(Order):
+1279    pass
+1280
+1281
+1282class Ordered(Expression):
+1283    arg_types = {"this": True, "desc": True, "nulls_first": True}
+1284
+1285
+1286class Property(Expression):
+1287    arg_types = {"this": True, "value": True}
+1288
+1289
+1290class AlgorithmProperty(Property):
+1291    arg_types = {"this": True}
+1292
+1293
+1294class DefinerProperty(Property):
+1295    arg_types = {"this": True}
+1296
+1297
+1298class SqlSecurityProperty(Property):
+1299    arg_types = {"definer": True}
+1300
+1301
+1302class TableFormatProperty(Property):
+1303    arg_types = {"this": True}
+1304
+1305
+1306class PartitionedByProperty(Property):
+1307    arg_types = {"this": True}
+1308
+1309
+1310class FileFormatProperty(Property):
+1311    arg_types = {"this": True}
+1312
+1313
+1314class DistKeyProperty(Property):
+1315    arg_types = {"this": True}
+1316
+1317
+1318class SortKeyProperty(Property):
+1319    arg_types = {"this": True, "compound": False}
+1320
+1321
+1322class DistStyleProperty(Property):
+1323    arg_types = {"this": True}
+1324
+1325
+1326class LikeProperty(Property):
+1327    arg_types = {"this": True, "expressions": False}
+1328
+1329
+1330class LocationProperty(Property):
+1331    arg_types = {"this": True}
+1332
+1333
+1334class EngineProperty(Property):
+1335    arg_types = {"this": True}
+1336
+1337
+1338class AutoIncrementProperty(Property):
+1339    arg_types = {"this": True}
+1340
+1341
+1342class CharacterSetProperty(Property):
+1343    arg_types = {"this": True, "default": True}
+1344
+1345
+1346class CollateProperty(Property):
+1347    arg_types = {"this": True}
+1348
+1349
+1350class SchemaCommentProperty(Property):
+1351    arg_types = {"this": True}
+1352
+1353
+1354class ReturnsProperty(Property):
+1355    arg_types = {"this": True, "is_table": False, "table": False}
+1356
+1357
+1358class LanguageProperty(Property):
+1359    arg_types = {"this": True}
+1360
+1361
+1362class ExecuteAsProperty(Property):
+1363    arg_types = {"this": True}
+1364
+1365
+1366class VolatilityProperty(Property):
+1367    arg_types = {"this": True}
+1368
+1369
+1370class RowFormatDelimitedProperty(Property):
+1371    # https://cwiki.apache.org/confluence/display/hive/languagemanual+dml
+1372    arg_types = {
+1373        "fields": False,
+1374        "escaped": False,
+1375        "collection_items": False,
+1376        "map_keys": False,
+1377        "lines": False,
+1378        "null": False,
+1379        "serde": False,
+1380    }
+1381
+1382
+1383class RowFormatSerdeProperty(Property):
+1384    arg_types = {"this": True}
+1385
+1386
+1387class SerdeProperties(Property):
+1388    arg_types = {"expressions": True}
+1389
+1390
+1391class FallbackProperty(Property):
+1392    arg_types = {"no": True, "protection": False}
+1393
+1394
+1395class WithJournalTableProperty(Property):
+1396    arg_types = {"this": True}
+1397
+1398
+1399class LogProperty(Property):
+1400    arg_types = {"no": True}
+1401
+1402
+1403class JournalProperty(Property):
+1404    arg_types = {"no": True, "dual": False, "before": False}
+1405
+1406
+1407class AfterJournalProperty(Property):
+1408    arg_types = {"no": True, "dual": False, "local": False}
+1409
+1410
+1411class ChecksumProperty(Property):
+1412    arg_types = {"on": False, "default": False}
+1413
+1414
+1415class FreespaceProperty(Property):
+1416    arg_types = {"this": True, "percent": False}
+1417
+1418
+1419class MergeBlockRatioProperty(Property):
+1420    arg_types = {"this": False, "no": False, "default": False, "percent": False}
+1421
+1422
+1423class DataBlocksizeProperty(Property):
+1424    arg_types = {"size": False, "units": False, "min": False, "default": False}
+1425
+1426
+1427class BlockCompressionProperty(Property):
+1428    arg_types = {"autotemp": False, "always": False, "default": True, "manual": True, "never": True}
+1429
+1430
+1431class IsolatedLoadingProperty(Property):
+1432    arg_types = {
+1433        "no": True,
+1434        "concurrent": True,
+1435        "for_all": True,
+1436        "for_insert": True,
+1437        "for_none": True,
+1438    }
+1439
+1440
+1441class Properties(Expression):
+1442    arg_types = {"expressions": True}
+1443
+1444    NAME_TO_PROPERTY = {
+1445        "ALGORITHM": AlgorithmProperty,
+1446        "AUTO_INCREMENT": AutoIncrementProperty,
+1447        "CHARACTER SET": CharacterSetProperty,
+1448        "COLLATE": CollateProperty,
+1449        "COMMENT": SchemaCommentProperty,
+1450        "DEFINER": DefinerProperty,
+1451        "DISTKEY": DistKeyProperty,
+1452        "DISTSTYLE": DistStyleProperty,
+1453        "ENGINE": EngineProperty,
+1454        "EXECUTE AS": ExecuteAsProperty,
+1455        "FORMAT": FileFormatProperty,
+1456        "LANGUAGE": LanguageProperty,
+1457        "LOCATION": LocationProperty,
+1458        "PARTITIONED_BY": PartitionedByProperty,
+1459        "RETURNS": ReturnsProperty,
+1460        "SORTKEY": SortKeyProperty,
+1461        "TABLE_FORMAT": TableFormatProperty,
+1462    }
+1463
+1464    PROPERTY_TO_NAME = {v: k for k, v in NAME_TO_PROPERTY.items()}
+1465
+1466    class Location(AutoName):
+1467        POST_CREATE = auto()
+1468        PRE_SCHEMA = auto()
+1469        POST_INDEX = auto()
+1470        POST_SCHEMA_ROOT = auto()
+1471        POST_SCHEMA_WITH = auto()
+1472        UNSUPPORTED = auto()
+1473
+1474    @classmethod
+1475    def from_dict(cls, properties_dict) -> Properties:
+1476        expressions = []
+1477        for key, value in properties_dict.items():
+1478            property_cls = cls.NAME_TO_PROPERTY.get(key.upper())
+1479            if property_cls:
+1480                expressions.append(property_cls(this=convert(value)))
+1481            else:
+1482                expressions.append(Property(this=Literal.string(key), value=convert(value)))
+1483
+1484        return cls(expressions=expressions)
+1485
+1486
+1487class Qualify(Expression):
+1488    pass
+1489
+1490
+1491# https://www.ibm.com/docs/en/ias?topic=procedures-return-statement-in-sql
+1492class Return(Expression):
+1493    pass
+1494
+1495
+1496class Reference(Expression):
+1497    arg_types = {"this": True, "expressions": False, "options": False}
+1498
+1499
+1500class Tuple(Expression):
+1501    arg_types = {"expressions": False}
+1502
+1503
+1504class Subqueryable(Unionable):
+1505    def subquery(self, alias=None, copy=True) -> Subquery:
+1506        """
+1507        Convert this expression to an aliased expression that can be used as a Subquery.
+1508
+1509        Example:
+1510            >>> subquery = Select().select("x").from_("tbl").subquery()
+1511            >>> Select().select("x").from_(subquery).sql()
+1512            'SELECT x FROM (SELECT x FROM tbl)'
+1513
+1514        Args:
+1515            alias (str | Identifier): an optional alias for the subquery
+1516            copy (bool): if `False`, modify this expression instance in-place.
+1517
+1518        Returns:
+1519            Alias: the subquery
+1520        """
+1521        instance = _maybe_copy(self, copy)
+1522        return Subquery(
+1523            this=instance,
+1524            alias=TableAlias(this=to_identifier(alias)),
+1525        )
+1526
+1527    def limit(self, expression, dialect=None, copy=True, **opts) -> Select:
+1528        raise NotImplementedError
+1529
+1530    @property
+1531    def ctes(self):
+1532        with_ = self.args.get("with")
+1533        if not with_:
+1534            return []
+1535        return with_.expressions
+1536
+1537    @property
+1538    def selects(self):
+1539        raise NotImplementedError("Subqueryable objects must implement `selects`")
+1540
+1541    @property
+1542    def named_selects(self):
+1543        raise NotImplementedError("Subqueryable objects must implement `named_selects`")
+1544
+1545    def with_(
+1546        self,
+1547        alias,
+1548        as_,
+1549        recursive=None,
+1550        append=True,
+1551        dialect=None,
+1552        copy=True,
+1553        **opts,
+1554    ):
+1555        """
+1556        Append to or set the common table expressions.
+1557
+1558        Example:
+1559            >>> Select().with_("tbl2", as_="SELECT * FROM tbl").select("x").from_("tbl2").sql()
+1560            'WITH tbl2 AS (SELECT * FROM tbl) SELECT x FROM tbl2'
+1561
+1562        Args:
+1563            alias (str | Expression): the SQL code string to parse as the table name.
+1564                If an `Expression` instance is passed, this is used as-is.
+1565            as_ (str | Expression): the SQL code string to parse as the table expression.
+1566                If an `Expression` instance is passed, it will be used as-is.
+1567            recursive (bool): set the RECURSIVE part of the expression. Defaults to `False`.
+1568            append (bool): if `True`, add to any existing expressions.
+1569                Otherwise, this resets the expressions.
+1570            dialect (str): the dialect used to parse the input expression.
+1571            copy (bool): if `False`, modify this expression instance in-place.
+1572            opts (kwargs): other options to use to parse the input expressions.
+1573
+1574        Returns:
+1575            Select: the modified expression.
+1576        """
+1577        alias_expression = maybe_parse(
+1578            alias,
+1579            dialect=dialect,
+1580            into=TableAlias,
+1581            **opts,
+1582        )
+1583        as_expression = maybe_parse(
+1584            as_,
+1585            dialect=dialect,
+1586            **opts,
+1587        )
+1588        cte = CTE(
+1589            this=as_expression,
+1590            alias=alias_expression,
+1591        )
+1592        return _apply_child_list_builder(
+1593            cte,
+1594            instance=self,
+1595            arg="with",
+1596            append=append,
+1597            copy=copy,
+1598            into=With,
+1599            properties={"recursive": recursive or False},
+1600        )
+1601
+1602
+1603QUERY_MODIFIERS = {
+1604    "match": False,
+1605    "laterals": False,
+1606    "joins": False,
+1607    "pivots": False,
+1608    "where": False,
+1609    "group": False,
+1610    "having": False,
+1611    "qualify": False,
+1612    "windows": False,
+1613    "distribute": False,
+1614    "sort": False,
+1615    "cluster": False,
+1616    "order": False,
+1617    "limit": False,
+1618    "offset": False,
+1619    "lock": False,
+1620}
+1621
+1622
+1623class Table(Expression):
+1624    arg_types = {
+1625        "this": True,
+1626        "alias": False,
+1627        "db": False,
+1628        "catalog": False,
+1629        "laterals": False,
+1630        "joins": False,
+1631        "pivots": False,
+1632        "hints": False,
+1633        "system_time": False,
+1634    }
+1635
+1636
+1637# See the TSQL "Querying data in a system-versioned temporal table" page
+1638class SystemTime(Expression):
+1639    arg_types = {
+1640        "this": False,
+1641        "expression": False,
+1642        "kind": True,
+1643    }
+1644
+1645
+1646class Union(Subqueryable):
+1647    arg_types = {
+1648        "with": False,
+1649        "this": True,
+1650        "expression": True,
+1651        "distinct": False,
+1652        **QUERY_MODIFIERS,
+1653    }
+1654
+1655    def limit(self, expression, dialect=None, copy=True, **opts) -> Select:
+1656        """
+1657        Set the LIMIT expression.
+1658
+1659        Example:
+1660            >>> select("1").union(select("1")).limit(1).sql()
+1661            'SELECT * FROM (SELECT 1 UNION SELECT 1) AS _l_0 LIMIT 1'
+1662
+1663        Args:
+1664            expression (str | int | Expression): the SQL code string to parse.
+1665                This can also be an integer.
+1666                If a `Limit` instance is passed, this is used as-is.
+1667                If another `Expression` instance is passed, it will be wrapped in a `Limit`.
+1668            dialect (str): the dialect used to parse the input expression.
+1669            copy (bool): if `False`, modify this expression instance in-place.
+1670            opts (kwargs): other options to use to parse the input expressions.
+1671
+1672        Returns:
+1673            Select: The limited subqueryable.
+1674        """
+1675        return (
+1676            select("*")
+1677            .from_(self.subquery(alias="_l_0", copy=copy))
+1678            .limit(expression, dialect=dialect, copy=False, **opts)
+1679        )
+1680
+1681    @property
+1682    def named_selects(self):
+1683        return self.this.unnest().named_selects
+1684
+1685    @property
+1686    def selects(self):
+1687        return self.this.unnest().selects
+1688
+1689    @property
+1690    def left(self):
+1691        return self.this
+1692
+1693    @property
+1694    def right(self):
+1695        return self.expression
+1696
+1697
+1698class Except(Union):
+1699    pass
+1700
+1701
+1702class Intersect(Union):
+1703    pass
+1704
+1705
+1706class Unnest(UDTF):
+1707    arg_types = {
+1708        "expressions": True,
+1709        "ordinality": False,
+1710        "alias": False,
+1711        "offset": False,
+1712    }
+1713
+1714
+1715class Update(Expression):
+1716    arg_types = {
+1717        "with": False,
+1718        "this": False,
+1719        "expressions": True,
+1720        "from": False,
+1721        "where": False,
+1722    }
+1723
+1724
+1725class Values(UDTF):
+1726    arg_types = {
+1727        "expressions": True,
+1728        "ordinality": False,
+1729        "alias": False,
+1730    }
+1731
+1732
+1733class Var(Expression):
+1734    pass
+1735
+1736
+1737class Schema(Expression):
+1738    arg_types = {"this": False, "expressions": False}
+1739
+1740
+1741# Used to represent the FOR UPDATE and FOR SHARE locking read types.
+1742# https://dev.mysql.com/doc/refman/8.0/en/innodb-locking-reads.html
+1743class Lock(Expression):
+1744    arg_types = {"update": True}
+1745
+1746
+1747class Select(Subqueryable):
+1748    arg_types = {
+1749        "with": False,
+1750        "expressions": False,
+1751        "hint": False,
+1752        "distinct": False,
+1753        "into": False,
+1754        "from": False,
+1755        **QUERY_MODIFIERS,
+1756    }
+1757
+1758    def from_(self, *expressions, append=True, dialect=None, copy=True, **opts) -> Select:
+1759        """
+1760        Set the FROM expression.
+1761
+1762        Example:
+1763            >>> Select().from_("tbl").select("x").sql()
+1764            'SELECT x FROM tbl'
+1765
+1766        Args:
+1767            *expressions (str | Expression): the SQL code strings to parse.
+1768                If a `From` instance is passed, this is used as-is.
+1769                If another `Expression` instance is passed, it will be wrapped in a `From`.
+1770            append (bool): if `True`, add to any existing expressions.
+1771                Otherwise, this flattens all the `From` expression into a single expression.
+1772            dialect (str): the dialect used to parse the input expression.
+1773            copy (bool): if `False`, modify this expression instance in-place.
+1774            opts (kwargs): other options to use to parse the input expressions.
+1775
+1776        Returns:
+1777            Select: the modified expression.
+1778        """
+1779        return _apply_child_list_builder(
+1780            *expressions,
+1781            instance=self,
+1782            arg="from",
+1783            append=append,
+1784            copy=copy,
+1785            prefix="FROM",
+1786            into=From,
+1787            dialect=dialect,
+1788            **opts,
+1789        )
+1790
+1791    def group_by(self, *expressions, append=True, dialect=None, copy=True, **opts) -> Select:
+1792        """
+1793        Set the GROUP BY expression.
+1794
+1795        Example:
+1796            >>> Select().from_("tbl").select("x", "COUNT(1)").group_by("x").sql()
+1797            'SELECT x, COUNT(1) FROM tbl GROUP BY x'
+1798
+1799        Args:
+1800            *expressions (str | Expression): the SQL code strings to parse.
+1801                If a `Group` instance is passed, this is used as-is.
+1802                If another `Expression` instance is passed, it will be wrapped in a `Group`.
+1803                If nothing is passed in then a group by is not applied to the expression
+1804            append (bool): if `True`, add to any existing expressions.
+1805                Otherwise, this flattens all the `Group` expression into a single expression.
+1806            dialect (str): the dialect used to parse the input expression.
+1807            copy (bool): if `False`, modify this expression instance in-place.
+1808            opts (kwargs): other options to use to parse the input expressions.
+1809
+1810        Returns:
+1811            Select: the modified expression.
+1812        """
+1813        if not expressions:
+1814            return self if not copy else self.copy()
+1815        return _apply_child_list_builder(
+1816            *expressions,
+1817            instance=self,
+1818            arg="group",
+1819            append=append,
+1820            copy=copy,
+1821            prefix="GROUP BY",
+1822            into=Group,
+1823            dialect=dialect,
+1824            **opts,
+1825        )
+1826
+1827    def order_by(self, *expressions, append=True, dialect=None, copy=True, **opts) -> Select:
+1828        """
+1829        Set the ORDER BY expression.
+1830
+1831        Example:
+1832            >>> Select().from_("tbl").select("x").order_by("x DESC").sql()
+1833            'SELECT x FROM tbl ORDER BY x DESC'
+1834
+1835        Args:
+1836            *expressions (str | Expression): the SQL code strings to parse.
+1837                If a `Group` instance is passed, this is used as-is.
+1838                If another `Expression` instance is passed, it will be wrapped in a `Order`.
+1839            append (bool): if `True`, add to any existing expressions.
+1840                Otherwise, this flattens all the `Order` expression into a single expression.
+1841            dialect (str): the dialect used to parse the input expression.
+1842            copy (bool): if `False`, modify this expression instance in-place.
+1843            opts (kwargs): other options to use to parse the input expressions.
+1844
+1845        Returns:
+1846            Select: the modified expression.
+1847        """
+1848        return _apply_child_list_builder(
+1849            *expressions,
+1850            instance=self,
+1851            arg="order",
+1852            append=append,
+1853            copy=copy,
+1854            prefix="ORDER BY",
+1855            into=Order,
+1856            dialect=dialect,
+1857            **opts,
+1858        )
+1859
+1860    def sort_by(self, *expressions, append=True, dialect=None, copy=True, **opts) -> Select:
+1861        """
+1862        Set the SORT BY expression.
+1863
+1864        Example:
+1865            >>> Select().from_("tbl").select("x").sort_by("x DESC").sql()
+1866            'SELECT x FROM tbl SORT BY x DESC'
+1867
+1868        Args:
+1869            *expressions (str | Expression): the SQL code strings to parse.
+1870                If a `Group` instance is passed, this is used as-is.
+1871                If another `Expression` instance is passed, it will be wrapped in a `SORT`.
+1872            append (bool): if `True`, add to any existing expressions.
+1873                Otherwise, this flattens all the `Order` expression into a single expression.
+1874            dialect (str): the dialect used to parse the input expression.
+1875            copy (bool): if `False`, modify this expression instance in-place.
+1876            opts (kwargs): other options to use to parse the input expressions.
+1877
+1878        Returns:
+1879            Select: the modified expression.
+1880        """
+1881        return _apply_child_list_builder(
+1882            *expressions,
+1883            instance=self,
+1884            arg="sort",
+1885            append=append,
+1886            copy=copy,
+1887            prefix="SORT BY",
+1888            into=Sort,
+1889            dialect=dialect,
+1890            **opts,
+1891        )
+1892
+1893    def cluster_by(self, *expressions, append=True, dialect=None, copy=True, **opts) -> Select:
+1894        """
+1895        Set the CLUSTER BY expression.
+1896
+1897        Example:
+1898            >>> Select().from_("tbl").select("x").cluster_by("x DESC").sql()
+1899            'SELECT x FROM tbl CLUSTER BY x DESC'
+1900
+1901        Args:
+1902            *expressions (str | Expression): the SQL code strings to parse.
+1903                If a `Group` instance is passed, this is used as-is.
+1904                If another `Expression` instance is passed, it will be wrapped in a `Cluster`.
+1905            append (bool): if `True`, add to any existing expressions.
+1906                Otherwise, this flattens all the `Order` expression into a single expression.
+1907            dialect (str): the dialect used to parse the input expression.
+1908            copy (bool): if `False`, modify this expression instance in-place.
+1909            opts (kwargs): other options to use to parse the input expressions.
+1910
+1911        Returns:
+1912            Select: the modified expression.
+1913        """
+1914        return _apply_child_list_builder(
+1915            *expressions,
+1916            instance=self,
+1917            arg="cluster",
+1918            append=append,
+1919            copy=copy,
+1920            prefix="CLUSTER BY",
+1921            into=Cluster,
+1922            dialect=dialect,
+1923            **opts,
+1924        )
+1925
+1926    def limit(self, expression, dialect=None, copy=True, **opts) -> Select:
+1927        """
+1928        Set the LIMIT expression.
+1929
+1930        Example:
+1931            >>> Select().from_("tbl").select("x").limit(10).sql()
+1932            'SELECT x FROM tbl LIMIT 10'
+1933
+1934        Args:
+1935            expression (str | int | Expression): the SQL code string to parse.
+1936                This can also be an integer.
+1937                If a `Limit` instance is passed, this is used as-is.
+1938                If another `Expression` instance is passed, it will be wrapped in a `Limit`.
+1939            dialect (str): the dialect used to parse the input expression.
+1940            copy (bool): if `False`, modify this expression instance in-place.
+1941            opts (kwargs): other options to use to parse the input expressions.
+1942
+1943        Returns:
+1944            Select: the modified expression.
+1945        """
+1946        return _apply_builder(
+1947            expression=expression,
+1948            instance=self,
+1949            arg="limit",
+1950            into=Limit,
+1951            prefix="LIMIT",
+1952            dialect=dialect,
+1953            copy=copy,
+1954            **opts,
+1955        )
+1956
+1957    def offset(self, expression, dialect=None, copy=True, **opts) -> Select:
+1958        """
+1959        Set the OFFSET expression.
+1960
+1961        Example:
+1962            >>> Select().from_("tbl").select("x").offset(10).sql()
+1963            'SELECT x FROM tbl OFFSET 10'
+1964
+1965        Args:
+1966            expression (str | int | Expression): the SQL code string to parse.
+1967                This can also be an integer.
+1968                If a `Offset` instance is passed, this is used as-is.
+1969                If another `Expression` instance is passed, it will be wrapped in a `Offset`.
+1970            dialect (str): the dialect used to parse the input expression.
+1971            copy (bool): if `False`, modify this expression instance in-place.
+1972            opts (kwargs): other options to use to parse the input expressions.
+1973
+1974        Returns:
+1975            Select: the modified expression.
+1976        """
+1977        return _apply_builder(
+1978            expression=expression,
+1979            instance=self,
+1980            arg="offset",
+1981            into=Offset,
+1982            prefix="OFFSET",
+1983            dialect=dialect,
+1984            copy=copy,
+1985            **opts,
+1986        )
+1987
+1988    def select(self, *expressions, append=True, dialect=None, copy=True, **opts) -> Select:
+1989        """
+1990        Append to or set the SELECT expressions.
+1991
+1992        Example:
+1993            >>> Select().select("x", "y").sql()
+1994            'SELECT x, y'
+1995
+1996        Args:
+1997            *expressions (str | Expression): the SQL code strings to parse.
+1998                If an `Expression` instance is passed, it will be used as-is.
+1999            append (bool): if `True`, add to any existing expressions.
+2000                Otherwise, this resets the expressions.
+2001            dialect (str): the dialect used to parse the input expressions.
+2002            copy (bool): if `False`, modify this expression instance in-place.
+2003            opts (kwargs): other options to use to parse the input expressions.
+2004
+2005        Returns:
+2006            Select: the modified expression.
+2007        """
+2008        return _apply_list_builder(
+2009            *expressions,
+2010            instance=self,
+2011            arg="expressions",
+2012            append=append,
+2013            dialect=dialect,
+2014            copy=copy,
+2015            **opts,
+2016        )
+2017
+2018    def lateral(self, *expressions, append=True, dialect=None, copy=True, **opts) -> Select:
+2019        """
+2020        Append to or set the LATERAL expressions.
+2021
+2022        Example:
+2023            >>> Select().select("x").lateral("OUTER explode(y) tbl2 AS z").from_("tbl").sql()
+2024            'SELECT x FROM tbl LATERAL VIEW OUTER EXPLODE(y) tbl2 AS z'
+2025
+2026        Args:
+2027            *expressions (str | Expression): the SQL code strings to parse.
+2028                If an `Expression` instance is passed, it will be used as-is.
+2029            append (bool): if `True`, add to any existing expressions.
+2030                Otherwise, this resets the expressions.
+2031            dialect (str): the dialect used to parse the input expressions.
+2032            copy (bool): if `False`, modify this expression instance in-place.
+2033            opts (kwargs): other options to use to parse the input expressions.
+2034
+2035        Returns:
+2036            Select: the modified expression.
+2037        """
+2038        return _apply_list_builder(
+2039            *expressions,
+2040            instance=self,
+2041            arg="laterals",
+2042            append=append,
+2043            into=Lateral,
+2044            prefix="LATERAL VIEW",
+2045            dialect=dialect,
+2046            copy=copy,
+2047            **opts,
+2048        )
+2049
+2050    def join(
+2051        self,
+2052        expression,
+2053        on=None,
+2054        using=None,
+2055        append=True,
+2056        join_type=None,
+2057        join_alias=None,
+2058        dialect=None,
+2059        copy=True,
+2060        **opts,
+2061    ) -> Select:
+2062        """
+2063        Append to or set the JOIN expressions.
+2064
+2065        Example:
+2066            >>> Select().select("*").from_("tbl").join("tbl2", on="tbl1.y = tbl2.y").sql()
+2067            'SELECT * FROM tbl JOIN tbl2 ON tbl1.y = tbl2.y'
+2068
+2069            >>> Select().select("1").from_("a").join("b", using=["x", "y", "z"]).sql()
+2070            'SELECT 1 FROM a JOIN b USING (x, y, z)'
+2071
+2072            Use `join_type` to change the type of join:
+2073
+2074            >>> Select().select("*").from_("tbl").join("tbl2", on="tbl1.y = tbl2.y", join_type="left outer").sql()
+2075            'SELECT * FROM tbl LEFT OUTER JOIN tbl2 ON tbl1.y = tbl2.y'
+2076
+2077        Args:
+2078            expression (str | Expression): the SQL code string to parse.
+2079                If an `Expression` instance is passed, it will be used as-is.
+2080            on (str | Expression): optionally specify the join "on" criteria as a SQL string.
+2081                If an `Expression` instance is passed, it will be used as-is.
+2082            using (str | Expression): optionally specify the join "using" criteria as a SQL string.
+2083                If an `Expression` instance is passed, it will be used as-is.
+2084            append (bool): if `True`, add to any existing expressions.
+2085                Otherwise, this resets the expressions.
+2086            join_type (str): If set, alter the parsed join type
+2087            dialect (str): the dialect used to parse the input expressions.
+2088            copy (bool): if `False`, modify this expression instance in-place.
+2089            opts (kwargs): other options to use to parse the input expressions.
+2090
+2091        Returns:
+2092            Select: the modified expression.
+2093        """
+2094        parse_args = {"dialect": dialect, **opts}
+2095
+2096        try:
+2097            expression = maybe_parse(expression, into=Join, prefix="JOIN", **parse_args)
+2098        except ParseError:
+2099            expression = maybe_parse(expression, into=(Join, Expression), **parse_args)
+2100
+2101        join = expression if isinstance(expression, Join) else Join(this=expression)
+2102
+2103        if isinstance(join.this, Select):
+2104            join.this.replace(join.this.subquery())
+2105
+2106        if join_type:
+2107            natural: t.Optional[Token]
+2108            side: t.Optional[Token]
+2109            kind: t.Optional[Token]
+2110
+2111            natural, side, kind = maybe_parse(join_type, into="JOIN_TYPE", **parse_args)  # type: ignore
+2112
+2113            if natural:
+2114                join.set("natural", True)
+2115            if side:
+2116                join.set("side", side.text)
+2117            if kind:
+2118                join.set("kind", kind.text)
+2119
+2120        if on:
+2121            on = and_(*ensure_collection(on), dialect=dialect, **opts)
+2122            join.set("on", on)
+2123
+2124        if using:
+2125            join = _apply_list_builder(
+2126                *ensure_collection(using),
+2127                instance=join,
+2128                arg="using",
+2129                append=append,
+2130                copy=copy,
+2131                **opts,
+2132            )
+2133
+2134        if join_alias:
+2135            join.set("this", alias_(join.this, join_alias, table=True))
+2136        return _apply_list_builder(
+2137            join,
+2138            instance=self,
+2139            arg="joins",
+2140            append=append,
+2141            copy=copy,
+2142            **opts,
+2143        )
+2144
+2145    def where(self, *expressions, append=True, dialect=None, copy=True, **opts) -> Select:
+2146        """
+2147        Append to or set the WHERE expressions.
+2148
+2149        Example:
+2150            >>> Select().select("x").from_("tbl").where("x = 'a' OR x < 'b'").sql()
+2151            "SELECT x FROM tbl WHERE x = 'a' OR x < 'b'"
+2152
+2153        Args:
+2154            *expressions (str | Expression): the SQL code strings to parse.
+2155                If an `Expression` instance is passed, it will be used as-is.
+2156                Multiple expressions are combined with an AND operator.
+2157            append (bool): if `True`, AND the new expressions to any existing expression.
+2158                Otherwise, this resets the expression.
+2159            dialect (str): the dialect used to parse the input expressions.
+2160            copy (bool): if `False`, modify this expression instance in-place.
+2161            opts (kwargs): other options to use to parse the input expressions.
+2162
+2163        Returns:
+2164            Select: the modified expression.
+2165        """
+2166        return _apply_conjunction_builder(
+2167            *expressions,
+2168            instance=self,
+2169            arg="where",
+2170            append=append,
+2171            into=Where,
+2172            dialect=dialect,
+2173            copy=copy,
+2174            **opts,
+2175        )
+2176
+2177    def having(self, *expressions, append=True, dialect=None, copy=True, **opts) -> Select:
+2178        """
+2179        Append to or set the HAVING expressions.
+2180
+2181        Example:
+2182            >>> Select().select("x", "COUNT(y)").from_("tbl").group_by("x").having("COUNT(y) > 3").sql()
+2183            'SELECT x, COUNT(y) FROM tbl GROUP BY x HAVING COUNT(y) > 3'
+2184
+2185        Args:
+2186            *expressions (str | Expression): the SQL code strings to parse.
+2187                If an `Expression` instance is passed, it will be used as-is.
+2188                Multiple expressions are combined with an AND operator.
+2189            append (bool): if `True`, AND the new expressions to any existing expression.
+2190                Otherwise, this resets the expression.
+2191            dialect (str): the dialect used to parse the input expressions.
+2192            copy (bool): if `False`, modify this expression instance in-place.
+2193            opts (kwargs): other options to use to parse the input expressions.
+2194
+2195        Returns:
+2196            Select: the modified expression.
+2197        """
+2198        return _apply_conjunction_builder(
+2199            *expressions,
+2200            instance=self,
+2201            arg="having",
+2202            append=append,
+2203            into=Having,
+2204            dialect=dialect,
+2205            copy=copy,
+2206            **opts,
+2207        )
+2208
+2209    def window(self, *expressions, append=True, dialect=None, copy=True, **opts) -> Select:
+2210        return _apply_list_builder(
+2211            *expressions,
+2212            instance=self,
+2213            arg="windows",
+2214            append=append,
+2215            into=Window,
+2216            dialect=dialect,
+2217            copy=copy,
+2218            **opts,
+2219        )
+2220
+2221    def distinct(self, distinct=True, copy=True) -> Select:
+2222        """
+2223        Set the OFFSET expression.
+2224
+2225        Example:
+2226            >>> Select().from_("tbl").select("x").distinct().sql()
+2227            'SELECT DISTINCT x FROM tbl'
+2228
+2229        Args:
+2230            distinct (bool): whether the Select should be distinct
+2231            copy (bool): if `False`, modify this expression instance in-place.
+2232
+2233        Returns:
+2234            Select: the modified expression.
+2235        """
+2236        instance = _maybe_copy(self, copy)
+2237        instance.set("distinct", Distinct() if distinct else None)
+2238        return instance
+2239
+2240    def ctas(self, table, properties=None, dialect=None, copy=True, **opts) -> Create:
+2241        """
+2242        Convert this expression to a CREATE TABLE AS statement.
+2243
+2244        Example:
+2245            >>> Select().select("*").from_("tbl").ctas("x").sql()
+2246            'CREATE TABLE x AS SELECT * FROM tbl'
+2247
+2248        Args:
+2249            table (str | Expression): the SQL code string to parse as the table name.
+2250                If another `Expression` instance is passed, it will be used as-is.
+2251            properties (dict): an optional mapping of table properties
+2252            dialect (str): the dialect used to parse the input table.
+2253            copy (bool): if `False`, modify this expression instance in-place.
+2254            opts (kwargs): other options to use to parse the input table.
+2255
+2256        Returns:
+2257            Create: the CREATE TABLE AS expression
+2258        """
+2259        instance = _maybe_copy(self, copy)
+2260        table_expression = maybe_parse(
+2261            table,
+2262            into=Table,
+2263            dialect=dialect,
+2264            **opts,
+2265        )
+2266        properties_expression = None
+2267        if properties:
+2268            properties_expression = Properties.from_dict(properties)
+2269
+2270        return Create(
+2271            this=table_expression,
+2272            kind="table",
+2273            expression=instance,
+2274            properties=properties_expression,
+2275        )
+2276
+2277    def lock(self, update: bool = True, copy: bool = True) -> Select:
+2278        """
+2279        Set the locking read mode for this expression.
+2280
+2281        Examples:
+2282            >>> Select().select("x").from_("tbl").where("x = 'a'").lock().sql("mysql")
+2283            "SELECT x FROM tbl WHERE x = 'a' FOR UPDATE"
+2284
+2285            >>> Select().select("x").from_("tbl").where("x = 'a'").lock(update=False).sql("mysql")
+2286            "SELECT x FROM tbl WHERE x = 'a' FOR SHARE"
+2287
+2288        Args:
+2289            update: if `True`, the locking type will be `FOR UPDATE`, else it will be `FOR SHARE`.
+2290            copy: if `False`, modify this expression instance in-place.
+2291
+2292        Returns:
+2293            The modified expression.
+2294        """
+2295
+2296        inst = _maybe_copy(self, copy)
+2297        inst.set("lock", Lock(update=update))
+2298
+2299        return inst
+2300
+2301    @property
+2302    def named_selects(self) -> t.List[str]:
+2303        return [e.output_name for e in self.expressions if e.alias_or_name]
+2304
+2305    @property
+2306    def selects(self) -> t.List[Expression]:
+2307        return self.expressions
+2308
+2309
+2310class Subquery(DerivedTable, Unionable):
+2311    arg_types = {
+2312        "this": True,
+2313        "alias": False,
+2314        "with": False,
+2315        **QUERY_MODIFIERS,
+2316    }
+2317
+2318    def unnest(self):
+2319        """
+2320        Returns the first non subquery.
+2321        """
+2322        expression = self
+2323        while isinstance(expression, Subquery):
+2324            expression = expression.this
+2325        return expression
+2326
+2327    @property
+2328    def output_name(self):
+2329        return self.alias
+2330
+2331
+2332class TableSample(Expression):
+2333    arg_types = {
+2334        "this": False,
+2335        "method": False,
+2336        "bucket_numerator": False,
+2337        "bucket_denominator": False,
+2338        "bucket_field": False,
+2339        "percent": False,
+2340        "rows": False,
+2341        "size": False,
+2342        "seed": False,
+2343    }
+2344
+2345
+2346class Tag(Expression):
+2347    """Tags are used for generating arbitrary sql like SELECT <span>x</span>."""
+2348
+2349    arg_types = {
+2350        "this": False,
+2351        "prefix": False,
+2352        "postfix": False,
+2353    }
+2354
+2355
+2356class Pivot(Expression):
+2357    arg_types = {
+2358        "this": False,
+2359        "expressions": True,
+2360        "field": True,
+2361        "unpivot": True,
+2362    }
+2363
+2364
+2365class Window(Expression):
+2366    arg_types = {
+2367        "this": True,
+2368        "partition_by": False,
+2369        "order": False,
+2370        "spec": False,
+2371        "alias": False,
+2372    }
+2373
+2374
+2375class WindowSpec(Expression):
+2376    arg_types = {
+2377        "kind": False,
+2378        "start": False,
+2379        "start_side": False,
+2380        "end": False,
+2381        "end_side": False,
+2382    }
+2383
+2384
+2385class Where(Expression):
+2386    pass
+2387
+2388
+2389class Star(Expression):
+2390    arg_types = {"except": False, "replace": False}
+2391
+2392    @property
+2393    def name(self) -> str:
+2394        return "*"
+2395
+2396    @property
+2397    def output_name(self):
+2398        return self.name
+2399
+2400
+2401class Parameter(Expression):
+2402    pass
+2403
+2404
+2405class SessionParameter(Expression):
+2406    arg_types = {"this": True, "kind": False}
+2407
+2408
+2409class Placeholder(Expression):
+2410    arg_types = {"this": False}
+2411
+2412
+2413class Null(Condition):
+2414    arg_types: t.Dict[str, t.Any] = {}
+2415
+2416    @property
+2417    def name(self) -> str:
+2418        return "NULL"
+2419
+2420
+2421class Boolean(Condition):
+2422    pass
+2423
+2424
+2425class DataType(Expression):
+2426    arg_types = {
+2427        "this": True,
+2428        "expressions": False,
+2429        "nested": False,
+2430        "values": False,
+2431    }
+2432
+2433    class Type(AutoName):
+2434        CHAR = auto()
+2435        NCHAR = auto()
+2436        VARCHAR = auto()
+2437        NVARCHAR = auto()
+2438        TEXT = auto()
+2439        MEDIUMTEXT = auto()
+2440        LONGTEXT = auto()
+2441        MEDIUMBLOB = auto()
+2442        LONGBLOB = auto()
+2443        BINARY = auto()
+2444        VARBINARY = auto()
+2445        INT = auto()
+2446        TINYINT = auto()
+2447        SMALLINT = auto()
+2448        BIGINT = auto()
+2449        FLOAT = auto()
+2450        DOUBLE = auto()
+2451        DECIMAL = auto()
+2452        BOOLEAN = auto()
+2453        JSON = auto()
+2454        JSONB = auto()
+2455        INTERVAL = auto()
+2456        TIME = auto()
+2457        TIMESTAMP = auto()
+2458        TIMESTAMPTZ = auto()
+2459        TIMESTAMPLTZ = auto()
+2460        DATE = auto()
+2461        DATETIME = auto()
+2462        ARRAY = auto()
+2463        MAP = auto()
+2464        UUID = auto()
+2465        GEOGRAPHY = auto()
+2466        GEOMETRY = auto()
+2467        STRUCT = auto()
+2468        NULLABLE = auto()
+2469        HLLSKETCH = auto()
+2470        HSTORE = auto()
+2471        SUPER = auto()
+2472        SERIAL = auto()
+2473        SMALLSERIAL = auto()
+2474        BIGSERIAL = auto()
+2475        XML = auto()
+2476        UNIQUEIDENTIFIER = auto()
+2477        MONEY = auto()
+2478        SMALLMONEY = auto()
+2479        ROWVERSION = auto()
+2480        IMAGE = auto()
+2481        VARIANT = auto()
+2482        OBJECT = auto()
+2483        NULL = auto()
+2484        UNKNOWN = auto()  # Sentinel value, useful for type annotation
+2485
+2486    TEXT_TYPES = {
+2487        Type.CHAR,
+2488        Type.NCHAR,
+2489        Type.VARCHAR,
+2490        Type.NVARCHAR,
+2491        Type.TEXT,
+2492    }
+2493
+2494    INTEGER_TYPES = {
+2495        Type.INT,
+2496        Type.TINYINT,
+2497        Type.SMALLINT,
+2498        Type.BIGINT,
+2499    }
+2500
+2501    FLOAT_TYPES = {
+2502        Type.FLOAT,
+2503        Type.DOUBLE,
+2504    }
+2505
+2506    NUMERIC_TYPES = {*INTEGER_TYPES, *FLOAT_TYPES}
+2507
+2508    TEMPORAL_TYPES = {
+2509        Type.TIMESTAMP,
+2510        Type.TIMESTAMPTZ,
+2511        Type.TIMESTAMPLTZ,
+2512        Type.DATE,
+2513        Type.DATETIME,
+2514    }
+2515
+2516    @classmethod
+2517    def build(
+2518        cls, dtype: str | DataType | DataType.Type, dialect: DialectType = None, **kwargs
+2519    ) -> DataType:
+2520        from sqlglot import parse_one
+2521
+2522        if isinstance(dtype, str):
+2523            if dtype.upper() in cls.Type.__members__:
+2524                data_type_exp: t.Optional[Expression] = DataType(this=DataType.Type[dtype.upper()])
+2525            else:
+2526                data_type_exp = parse_one(dtype, read=dialect, into=DataType)
+2527            if data_type_exp is None:
+2528                raise ValueError(f"Unparsable data type value: {dtype}")
+2529        elif isinstance(dtype, DataType.Type):
+2530            data_type_exp = DataType(this=dtype)
+2531        elif isinstance(dtype, DataType):
+2532            return dtype
+2533        else:
+2534            raise ValueError(f"Invalid data type: {type(dtype)}. Expected str or DataType.Type")
+2535        return DataType(**{**data_type_exp.args, **kwargs})
+2536
+2537    def is_type(self, dtype: DataType.Type) -> bool:
+2538        return self.this == dtype
+2539
+2540
+2541# https://www.postgresql.org/docs/15/datatype-pseudo.html
+2542class PseudoType(Expression):
+2543    pass
+2544
+2545
+2546class StructKwarg(Expression):
+2547    arg_types = {"this": True, "expression": True}
+2548
+2549
+2550# WHERE x <OP> EXISTS|ALL|ANY|SOME(SELECT ...)
+2551class SubqueryPredicate(Predicate):
+2552    pass
+2553
+2554
+2555class All(SubqueryPredicate):
+2556    pass
+2557
+2558
+2559class Any(SubqueryPredicate):
+2560    pass
+2561
+2562
+2563class Exists(SubqueryPredicate):
+2564    pass
+2565
+2566
+2567# Commands to interact with the databases or engines. For most of the command
+2568# expressions we parse whatever comes after the command's name as a string.
+2569class Command(Expression):
+2570    arg_types = {"this": True, "expression": False}
+2571
+2572
+2573class Transaction(Expression):
+2574    arg_types = {"this": False, "modes": False}
+2575
+2576
+2577class Commit(Expression):
+2578    arg_types = {"chain": False}
+2579
+2580
+2581class Rollback(Expression):
+2582    arg_types = {"savepoint": False}
+2583
+2584
+2585class AlterTable(Expression):
+2586    arg_types = {"this": True, "actions": True, "exists": False}
+2587
+2588
+2589class AddConstraint(Expression):
+2590    arg_types = {"this": False, "expression": False, "enforced": False}
+2591
+2592
+2593class DropPartition(Expression):
+2594    arg_types = {"expressions": True, "exists": False}
+2595
+2596
+2597# Binary expressions like (ADD a b)
+2598class Binary(Expression):
+2599    arg_types = {"this": True, "expression": True}
+2600
+2601    @property
+2602    def left(self):
+2603        return self.this
+2604
+2605    @property
+2606    def right(self):
+2607        return self.expression
+2608
+2609
+2610class Add(Binary):
+2611    pass
+2612
+2613
+2614class Connector(Binary, Condition):
+2615    pass
+2616
+2617
+2618class And(Connector):
+2619    pass
+2620
+2621
+2622class Or(Connector):
+2623    pass
+2624
+2625
+2626class BitwiseAnd(Binary):
+2627    pass
+2628
+2629
+2630class BitwiseLeftShift(Binary):
+2631    pass
+2632
+2633
+2634class BitwiseOr(Binary):
+2635    pass
+2636
+2637
+2638class BitwiseRightShift(Binary):
+2639    pass
+2640
+2641
+2642class BitwiseXor(Binary):
+2643    pass
+2644
+2645
+2646class Div(Binary):
+2647    pass
+2648
+2649
+2650class Dot(Binary):
+2651    @property
+2652    def name(self) -> str:
+2653        return self.expression.name
+2654
+2655
+2656class DPipe(Binary):
+2657    pass
+2658
+2659
+2660class EQ(Binary, Predicate):
+2661    pass
+2662
+2663
+2664class NullSafeEQ(Binary, Predicate):
+2665    pass
+2666
+2667
+2668class NullSafeNEQ(Binary, Predicate):
+2669    pass
+2670
+2671
+2672class Distance(Binary):
+2673    pass
+2674
+2675
+2676class Escape(Binary):
+2677    pass
+2678
+2679
+2680class Glob(Binary, Predicate):
+2681    pass
+2682
+2683
+2684class GT(Binary, Predicate):
+2685    pass
+2686
+2687
+2688class GTE(Binary, Predicate):
+2689    pass
+2690
+2691
+2692class ILike(Binary, Predicate):
+2693    pass
+2694
+2695
+2696class IntDiv(Binary):
+2697    pass
+2698
+2699
+2700class Is(Binary, Predicate):
+2701    pass
+2702
+2703
+2704class Kwarg(Binary):
+2705    """Kwarg in special functions like func(kwarg => y)."""
+2706
+2707
+2708class Like(Binary, Predicate):
+2709    pass
+2710
+2711
+2712class LT(Binary, Predicate):
+2713    pass
+2714
+2715
+2716class LTE(Binary, Predicate):
+2717    pass
+2718
+2719
+2720class Mod(Binary):
+2721    pass
+2722
+2723
+2724class Mul(Binary):
+2725    pass
+2726
+2727
+2728class NEQ(Binary, Predicate):
+2729    pass
+2730
+2731
+2732class SimilarTo(Binary, Predicate):
+2733    pass
+2734
+2735
+2736class Slice(Binary):
+2737    arg_types = {"this": False, "expression": False}
+2738
+2739
+2740class Sub(Binary):
+2741    pass
+2742
+2743
+2744# Unary Expressions
+2745# (NOT a)
+2746class Unary(Expression):
+2747    pass
+2748
+2749
+2750class BitwiseNot(Unary):
+2751    pass
+2752
+2753
+2754class Not(Unary, Condition):
+2755    pass
+2756
+2757
+2758class Paren(Unary, Condition):
+2759    arg_types = {"this": True, "with": False}
+2760
+2761
+2762class Neg(Unary):
+2763    pass
+2764
+2765
+2766# Special Functions
+2767class Alias(Expression):
+2768    arg_types = {"this": True, "alias": False}
+2769
+2770    @property
+2771    def output_name(self):
+2772        return self.alias
+2773
+2774
+2775class Aliases(Expression):
+2776    arg_types = {"this": True, "expressions": True}
+2777
+2778    @property
+2779    def aliases(self):
+2780        return self.expressions
+2781
+2782
+2783class AtTimeZone(Expression):
+2784    arg_types = {"this": True, "zone": True}
+2785
+2786
+2787class Between(Predicate):
+2788    arg_types = {"this": True, "low": True, "high": True}
+2789
+2790
+2791class Bracket(Condition):
+2792    arg_types = {"this": True, "expressions": True}
+2793
+2794
+2795class Distinct(Expression):
+2796    arg_types = {"expressions": False, "on": False}
+2797
+2798
+2799class In(Predicate):
+2800    arg_types = {
+2801        "this": True,
+2802        "expressions": False,
+2803        "query": False,
+2804        "unnest": False,
+2805        "field": False,
+2806        "is_global": False,
+2807    }
+2808
+2809
+2810class TimeUnit(Expression):
+2811    """Automatically converts unit arg into a var."""
+2812
+2813    arg_types = {"unit": False}
+2814
+2815    def __init__(self, **args):
+2816        unit = args.get("unit")
+2817        if isinstance(unit, Column):
+2818            args["unit"] = Var(this=unit.name)
+2819        elif isinstance(unit, Week):
+2820            unit.set("this", Var(this=unit.this.name))
+2821        super().__init__(**args)
+2822
+2823
+2824class Interval(TimeUnit):
+2825    arg_types = {"this": False, "unit": False}
+2826
+2827
+2828class IgnoreNulls(Expression):
+2829    pass
+2830
+2831
+2832class RespectNulls(Expression):
+2833    pass
+2834
+2835
+2836# Functions
+2837class Func(Condition):
+2838    """
+2839    The base class for all function expressions.
+2840
+2841    Attributes:
+2842        is_var_len_args (bool): if set to True the last argument defined in arg_types will be
+2843            treated as a variable length argument and the argument's value will be stored as a list.
+2844        _sql_names (list): determines the SQL name (1st item in the list) and aliases (subsequent items)
+2845            for this function expression. These values are used to map this node to a name during parsing
+2846            as well as to provide the function's name during SQL string generation. By default the SQL
+2847            name is set to the expression's class name transformed to snake case.
+2848    """
+2849
+2850    is_var_len_args = False
+2851
+2852    @classmethod
+2853    def from_arg_list(cls, args):
+2854        if cls.is_var_len_args:
+2855            all_arg_keys = list(cls.arg_types)
+2856            # If this function supports variable length argument treat the last argument as such.
+2857            non_var_len_arg_keys = all_arg_keys[:-1] if cls.is_var_len_args else all_arg_keys
+2858            num_non_var = len(non_var_len_arg_keys)
+2859
+2860            args_dict = {arg_key: arg for arg, arg_key in zip(args, non_var_len_arg_keys)}
+2861            args_dict[all_arg_keys[-1]] = args[num_non_var:]
+2862        else:
+2863            args_dict = {arg_key: arg for arg, arg_key in zip(args, cls.arg_types)}
+2864
+2865        return cls(**args_dict)
+2866
+2867    @classmethod
+2868    def sql_names(cls):
+2869        if cls is Func:
+2870            raise NotImplementedError(
+2871                "SQL name is only supported by concrete function implementations"
+2872            )
+2873        if "_sql_names" not in cls.__dict__:
+2874            cls._sql_names = [camel_to_snake_case(cls.__name__)]
+2875        return cls._sql_names
+2876
+2877    @classmethod
+2878    def sql_name(cls):
+2879        return cls.sql_names()[0]
+2880
+2881    @classmethod
+2882    def default_parser_mappings(cls):
+2883        return {name: cls.from_arg_list for name in cls.sql_names()}
+2884
+2885
+2886class AggFunc(Func):
+2887    pass
+2888
+2889
+2890class Abs(Func):
+2891    pass
+2892
+2893
+2894class Anonymous(Func):
+2895    arg_types = {"this": True, "expressions": False}
+2896    is_var_len_args = True
+2897
+2898
+2899class ApproxDistinct(AggFunc):
+2900    arg_types = {"this": True, "accuracy": False}
+2901
+2902
+2903class Array(Func):
+2904    arg_types = {"expressions": False}
+2905    is_var_len_args = True
+2906
+2907
+2908class GenerateSeries(Func):
+2909    arg_types = {"start": True, "end": True, "step": False}
+2910
+2911
+2912class ArrayAgg(AggFunc):
+2913    pass
+2914
+2915
+2916class ArrayAll(Func):
+2917    arg_types = {"this": True, "expression": True}
+2918
+2919
+2920class ArrayAny(Func):
+2921    arg_types = {"this": True, "expression": True}
+2922
+2923
+2924class ArrayConcat(Func):
+2925    arg_types = {"this": True, "expressions": False}
+2926    is_var_len_args = True
+2927
+2928
+2929class ArrayContains(Func):
+2930    arg_types = {"this": True, "expression": True}
+2931
+2932
+2933class ArrayFilter(Func):
+2934    arg_types = {"this": True, "expression": True}
+2935    _sql_names = ["FILTER", "ARRAY_FILTER"]
+2936
+2937
+2938class ArraySize(Func):
+2939    arg_types = {"this": True, "expression": False}
+2940
+2941
+2942class ArraySort(Func):
+2943    arg_types = {"this": True, "expression": False}
+2944
+2945
+2946class ArraySum(Func):
+2947    pass
+2948
+2949
+2950class ArrayUnionAgg(AggFunc):
+2951    pass
+2952
+2953
+2954class Avg(AggFunc):
+2955    pass
+2956
+2957
+2958class AnyValue(AggFunc):
+2959    pass
+2960
+2961
+2962class Case(Func):
+2963    arg_types = {"this": False, "ifs": True, "default": False}
+2964
+2965
+2966class Cast(Func):
+2967    arg_types = {"this": True, "to": True}
+2968
+2969    @property
+2970    def name(self) -> str:
+2971        return self.this.name
+2972
+2973    @property
+2974    def to(self):
+2975        return self.args["to"]
+2976
+2977    @property
+2978    def output_name(self):
+2979        return self.name
+2980
+2981    def is_type(self, dtype: DataType.Type) -> bool:
+2982        return self.to.is_type(dtype)
+2983
+2984
+2985class Collate(Binary):
+2986    pass
+2987
+2988
+2989class TryCast(Cast):
+2990    pass
+2991
+2992
+2993class Ceil(Func):
+2994    arg_types = {"this": True, "decimals": False}
+2995    _sql_names = ["CEIL", "CEILING"]
+2996
+2997
+2998class Coalesce(Func):
+2999    arg_types = {"this": True, "expressions": False}
+3000    is_var_len_args = True
+3001
+3002
+3003class Concat(Func):
+3004    arg_types = {"expressions": True}
+3005    is_var_len_args = True
+3006
+3007
+3008class ConcatWs(Concat):
+3009    _sql_names = ["CONCAT_WS"]
+3010
+3011
+3012class Count(AggFunc):
+3013    arg_types = {"this": False}
+3014
+3015
+3016class CurrentDate(Func):
+3017    arg_types = {"this": False}
+3018
+3019
+3020class CurrentDatetime(Func):
+3021    arg_types = {"this": False}
+3022
+3023
+3024class CurrentTime(Func):
+3025    arg_types = {"this": False}
+3026
+3027
+3028class CurrentTimestamp(Func):
+3029    arg_types = {"this": False}
+3030
+3031
+3032class DateAdd(Func, TimeUnit):
+3033    arg_types = {"this": True, "expression": True, "unit": False}
+3034
+3035
+3036class DateSub(Func, TimeUnit):
+3037    arg_types = {"this": True, "expression": True, "unit": False}
+3038
+3039
+3040class DateDiff(Func, TimeUnit):
+3041    arg_types = {"this": True, "expression": True, "unit": False}
+3042
+3043
+3044class DateTrunc(Func):
+3045    arg_types = {"this": True, "expression": True, "zone": False}
+3046
+3047
+3048class DatetimeAdd(Func, TimeUnit):
+3049    arg_types = {"this": True, "expression": True, "unit": False}
+3050
+3051
+3052class DatetimeSub(Func, TimeUnit):
+3053    arg_types = {"this": True, "expression": True, "unit": False}
+3054
+3055
+3056class DatetimeDiff(Func, TimeUnit):
+3057    arg_types = {"this": True, "expression": True, "unit": False}
+3058
+3059
+3060class DatetimeTrunc(Func, TimeUnit):
+3061    arg_types = {"this": True, "unit": True, "zone": False}
+3062
+3063
+3064class DayOfWeek(Func):
+3065    _sql_names = ["DAY_OF_WEEK", "DAYOFWEEK"]
+3066
+3067
+3068class DayOfMonth(Func):
+3069    _sql_names = ["DAY_OF_MONTH", "DAYOFMONTH"]
+3070
+3071
+3072class DayOfYear(Func):
+3073    _sql_names = ["DAY_OF_YEAR", "DAYOFYEAR"]
+3074
+3075
+3076class WeekOfYear(Func):
+3077    _sql_names = ["WEEK_OF_YEAR", "WEEKOFYEAR"]
+3078
+3079
+3080class LastDateOfMonth(Func):
+3081    pass
+3082
+3083
+3084class Extract(Func):
+3085    arg_types = {"this": True, "expression": True}
+3086
+3087
+3088class TimestampAdd(Func, TimeUnit):
+3089    arg_types = {"this": True, "expression": True, "unit": False}
+3090
+3091
+3092class TimestampSub(Func, TimeUnit):
+3093    arg_types = {"this": True, "expression": True, "unit": False}
+3094
+3095
+3096class TimestampDiff(Func, TimeUnit):
+3097    arg_types = {"this": True, "expression": True, "unit": False}
+3098
+3099
+3100class TimestampTrunc(Func, TimeUnit):
+3101    arg_types = {"this": True, "unit": True, "zone": False}
+3102
+3103
+3104class TimeAdd(Func, TimeUnit):
+3105    arg_types = {"this": True, "expression": True, "unit": False}
+3106
+3107
+3108class TimeSub(Func, TimeUnit):
+3109    arg_types = {"this": True, "expression": True, "unit": False}
+3110
+3111
+3112class TimeDiff(Func, TimeUnit):
+3113    arg_types = {"this": True, "expression": True, "unit": False}
+3114
+3115
+3116class TimeTrunc(Func, TimeUnit):
+3117    arg_types = {"this": True, "unit": True, "zone": False}
+3118
+3119
+3120class DateFromParts(Func):
+3121    _sql_names = ["DATEFROMPARTS"]
+3122    arg_types = {"year": True, "month": True, "day": True}
+3123
+3124
+3125class DateStrToDate(Func):
+3126    pass
+3127
+3128
+3129class DateToDateStr(Func):
+3130    pass
+3131
+3132
+3133class DateToDi(Func):
+3134    pass
+3135
+3136
+3137class Day(Func):
+3138    pass
+3139
+3140
+3141class Decode(Func):
+3142    arg_types = {"this": True, "charset": True, "replace": False}
+3143
+3144
+3145class DiToDate(Func):
+3146    pass
+3147
+3148
+3149class Encode(Func):
+3150    arg_types = {"this": True, "charset": True}
+3151
+3152
+3153class Exp(Func):
+3154    pass
+3155
+3156
+3157class Explode(Func):
+3158    pass
+3159
+3160
+3161class Floor(Func):
+3162    arg_types = {"this": True, "decimals": False}
+3163
+3164
+3165class Greatest(Func):
+3166    arg_types = {"this": True, "expressions": False}
+3167    is_var_len_args = True
+3168
+3169
+3170class GroupConcat(Func):
+3171    arg_types = {"this": True, "separator": False}
+3172
+3173
+3174class Hex(Func):
+3175    pass
+3176
+3177
+3178class If(Func):
+3179    arg_types = {"this": True, "true": True, "false": False}
+3180
+3181
+3182class IfNull(Func):
+3183    arg_types = {"this": True, "expression": False}
+3184    _sql_names = ["IFNULL", "NVL"]
+3185
+3186
+3187class Initcap(Func):
+3188    pass
+3189
+3190
+3191class JSONBContains(Binary):
+3192    _sql_names = ["JSONB_CONTAINS"]
+3193
+3194
+3195class JSONExtract(Binary, Func):
+3196    _sql_names = ["JSON_EXTRACT"]
+3197
+3198
+3199class JSONExtractScalar(JSONExtract):
+3200    _sql_names = ["JSON_EXTRACT_SCALAR"]
+3201
+3202
+3203class JSONBExtract(JSONExtract):
+3204    _sql_names = ["JSONB_EXTRACT"]
+3205
+3206
+3207class JSONBExtractScalar(JSONExtract):
+3208    _sql_names = ["JSONB_EXTRACT_SCALAR"]
+3209
+3210
+3211class Least(Func):
+3212    arg_types = {"this": True, "expressions": False}
+3213    is_var_len_args = True
+3214
+3215
+3216class Length(Func):
+3217    pass
+3218
+3219
+3220class Levenshtein(Func):
+3221    arg_types = {
+3222        "this": True,
+3223        "expression": False,
+3224        "ins_cost": False,
+3225        "del_cost": False,
+3226        "sub_cost": False,
+3227    }
+3228
+3229
+3230class Ln(Func):
+3231    pass
+3232
+3233
+3234class Log(Func):
+3235    arg_types = {"this": True, "expression": False}
+3236
+3237
+3238class Log2(Func):
+3239    pass
+3240
+3241
+3242class Log10(Func):
+3243    pass
+3244
+3245
+3246class LogicalOr(AggFunc):
+3247    _sql_names = ["LOGICAL_OR", "BOOL_OR"]
+3248
+3249
+3250class Lower(Func):
+3251    _sql_names = ["LOWER", "LCASE"]
+3252
+3253
+3254class Map(Func):
+3255    arg_types = {"keys": False, "values": False}
+3256
+3257
+3258class VarMap(Func):
+3259    arg_types = {"keys": True, "values": True}
+3260    is_var_len_args = True
+3261
+3262
+3263class Matches(Func):
+3264    """Oracle/Snowflake decode.
+3265    https://docs.oracle.com/cd/B19306_01/server.102/b14200/functions040.htm
+3266    Pattern matching MATCHES(value, search1, result1, ...searchN, resultN, else)
+3267    """
+3268
+3269    arg_types = {"this": True, "expressions": True}
+3270    is_var_len_args = True
+3271
+3272
+3273class Max(AggFunc):
+3274    arg_types = {"this": True, "expression": False}
+3275
+3276
+3277class Min(AggFunc):
+3278    arg_types = {"this": True, "expression": False}
+3279
+3280
+3281class Month(Func):
+3282    pass
+3283
+3284
+3285class Nvl2(Func):
+3286    arg_types = {"this": True, "true": True, "false": False}
+3287
+3288
+3289class Posexplode(Func):
+3290    pass
+3291
+3292
+3293class Pow(Binary, Func):
+3294    _sql_names = ["POWER", "POW"]
+3295
+3296
+3297class PercentileCont(AggFunc):
+3298    pass
+3299
+3300
+3301class PercentileDisc(AggFunc):
+3302    pass
+3303
+3304
+3305class Quantile(AggFunc):
+3306    arg_types = {"this": True, "quantile": True}
+3307
+3308
+3309# Clickhouse-specific:
+3310# https://clickhouse.com/docs/en/sql-reference/aggregate-functions/reference/quantiles/#quantiles
+3311class Quantiles(AggFunc):
+3312    arg_types = {"parameters": True, "expressions": True}
+3313
+3314
+3315class QuantileIf(AggFunc):
+3316    arg_types = {"parameters": True, "expressions": True}
+3317
+3318
+3319class ApproxQuantile(Quantile):
+3320    arg_types = {"this": True, "quantile": True, "accuracy": False, "weight": False}
+3321
+3322
+3323class ReadCSV(Func):
+3324    _sql_names = ["READ_CSV"]
+3325    is_var_len_args = True
+3326    arg_types = {"this": True, "expressions": False}
+3327
+3328
+3329class Reduce(Func):
+3330    arg_types = {"this": True, "initial": True, "merge": True, "finish": False}
+3331
+3332
+3333class RegexpLike(Func):
+3334    arg_types = {"this": True, "expression": True, "flag": False}
+3335
+3336
+3337class RegexpILike(Func):
+3338    arg_types = {"this": True, "expression": True, "flag": False}
+3339
+3340
+3341class RegexpSplit(Func):
+3342    arg_types = {"this": True, "expression": True}
+3343
+3344
+3345class Repeat(Func):
+3346    arg_types = {"this": True, "times": True}
+3347
+3348
+3349class Round(Func):
+3350    arg_types = {"this": True, "decimals": False}
+3351
+3352
+3353class RowNumber(Func):
+3354    arg_types: t.Dict[str, t.Any] = {}
+3355
+3356
+3357class SafeDivide(Func):
+3358    arg_types = {"this": True, "expression": True}
+3359
+3360
+3361class SetAgg(AggFunc):
+3362    pass
+3363
+3364
+3365class SortArray(Func):
+3366    arg_types = {"this": True, "asc": False}
+3367
+3368
+3369class Split(Func):
+3370    arg_types = {"this": True, "expression": True, "limit": False}
+3371
+3372
+3373# Start may be omitted in the case of postgres
+3374# https://www.postgresql.org/docs/9.1/functions-string.html @ Table 9-6
+3375class Substring(Func):
+3376    arg_types = {"this": True, "start": False, "length": False}
+3377
+3378
+3379class StrPosition(Func):
+3380    arg_types = {
+3381        "this": True,
+3382        "substr": True,
+3383        "position": False,
+3384        "instance": False,
+3385    }
+3386
+3387
+3388class StrToDate(Func):
+3389    arg_types = {"this": True, "format": True}
+3390
+3391
+3392class StrToTime(Func):
+3393    arg_types = {"this": True, "format": True}
+3394
+3395
+3396# Spark allows unix_timestamp()
+3397# https://spark.apache.org/docs/3.1.3/api/python/reference/api/pyspark.sql.functions.unix_timestamp.html
+3398class StrToUnix(Func):
+3399    arg_types = {"this": False, "format": False}
+3400
+3401
+3402class NumberToStr(Func):
+3403    arg_types = {"this": True, "format": True}
+3404
+3405
+3406class Struct(Func):
+3407    arg_types = {"expressions": True}
+3408    is_var_len_args = True
+3409
+3410
+3411class StructExtract(Func):
+3412    arg_types = {"this": True, "expression": True}
+3413
+3414
+3415class Sum(AggFunc):
+3416    pass
+3417
+3418
+3419class Sqrt(Func):
+3420    pass
+3421
+3422
+3423class Stddev(AggFunc):
+3424    pass
+3425
+3426
+3427class StddevPop(AggFunc):
+3428    pass
+3429
+3430
+3431class StddevSamp(AggFunc):
+3432    pass
+3433
+3434
+3435class TimeToStr(Func):
+3436    arg_types = {"this": True, "format": True}
+3437
+3438
+3439class TimeToTimeStr(Func):
+3440    pass
+3441
+3442
+3443class TimeToUnix(Func):
+3444    pass
+3445
+3446
+3447class TimeStrToDate(Func):
+3448    pass
+3449
+3450
+3451class TimeStrToTime(Func):
+3452    pass
+3453
+3454
+3455class TimeStrToUnix(Func):
+3456    pass
+3457
+3458
+3459class Trim(Func):
+3460    arg_types = {
+3461        "this": True,
+3462        "expression": False,
+3463        "position": False,
+3464        "collation": False,
+3465    }
+3466
+3467
+3468class TsOrDsAdd(Func, TimeUnit):
+3469    arg_types = {"this": True, "expression": True, "unit": False}
+3470
+3471
+3472class TsOrDsToDateStr(Func):
+3473    pass
+3474
+3475
+3476class TsOrDsToDate(Func):
+3477    arg_types = {"this": True, "format": False}
+3478
+3479
+3480class TsOrDiToDi(Func):
+3481    pass
+3482
+3483
+3484class Unhex(Func):
+3485    pass
+3486
+3487
+3488class UnixToStr(Func):
+3489    arg_types = {"this": True, "format": False}
+3490
+3491
+3492# https://prestodb.io/docs/current/functions/datetime.html
+3493# presto has weird zone/hours/minutes
+3494class UnixToTime(Func):
+3495    arg_types = {"this": True, "scale": False, "zone": False, "hours": False, "minutes": False}
+3496
+3497    SECONDS = Literal.string("seconds")
+3498    MILLIS = Literal.string("millis")
+3499    MICROS = Literal.string("micros")
+3500
+3501
+3502class UnixToTimeStr(Func):
+3503    pass
+3504
+3505
+3506class Upper(Func):
+3507    _sql_names = ["UPPER", "UCASE"]
+3508
+3509
+3510class Variance(AggFunc):
+3511    _sql_names = ["VARIANCE", "VARIANCE_SAMP", "VAR_SAMP"]
+3512
+3513
+3514class VariancePop(AggFunc):
+3515    _sql_names = ["VARIANCE_POP", "VAR_POP"]
+3516
+3517
+3518class Week(Func):
+3519    arg_types = {"this": True, "mode": False}
+3520
+3521
+3522class Year(Func):
+3523    pass
+3524
+3525
+3526class Use(Expression):
+3527    arg_types = {"this": True, "kind": False}
+3528
+3529
+3530class Merge(Expression):
+3531    arg_types = {"this": True, "using": True, "on": True, "expressions": True}
+3532
+3533
+3534class When(Func):
+3535    arg_types = {"this": True, "then": True}
+3536
+3537
+3538def _norm_args(expression):
+3539    args = {}
+3540
+3541    for k, arg in expression.args.items():
+3542        if isinstance(arg, list):
+3543            arg = [_norm_arg(a) for a in arg]
+3544            if not arg:
+3545                arg = None
+3546        else:
+3547            arg = _norm_arg(arg)
+3548
+3549        if arg is not None and arg is not False:
+3550            args[k] = arg
+3551
+3552    return args
+3553
+3554
+3555def _norm_arg(arg):
+3556    return arg.lower() if isinstance(arg, str) else arg
+3557
+3558
+3559ALL_FUNCTIONS = subclasses(__name__, Func, (AggFunc, Anonymous, Func))
+3560
+3561
+3562# Helpers
+3563def maybe_parse(
+3564    sql_or_expression: str | Expression,
+3565    *,
+3566    into: t.Optional[IntoType] = None,
+3567    dialect: DialectType = None,
+3568    prefix: t.Optional[str] = None,
+3569    **opts,
+3570) -> Expression:
+3571    """Gracefully handle a possible string or expression.
+3572
+3573    Example:
+3574        >>> maybe_parse("1")
+3575        (LITERAL this: 1, is_string: False)
+3576        >>> maybe_parse(to_identifier("x"))
+3577        (IDENTIFIER this: x, quoted: False)
+3578
+3579    Args:
+3580        sql_or_expression: the SQL code string or an expression
+3581        into: the SQLGlot Expression to parse into
+3582        dialect: the dialect used to parse the input expressions (in the case that an
+3583            input expression is a SQL string).
+3584        prefix: a string to prefix the sql with before it gets parsed
+3585            (automatically includes a space)
+3586        **opts: other options to use to parse the input expressions (again, in the case
+3587            that an input expression is a SQL string).
+3588
+3589    Returns:
+3590        Expression: the parsed or given expression.
+3591    """
+3592    if isinstance(sql_or_expression, Expression):
+3593        return sql_or_expression
+3594
+3595    import sqlglot
+3596
+3597    sql = str(sql_or_expression)
+3598    if prefix:
+3599        sql = f"{prefix} {sql}"
+3600    return sqlglot.parse_one(sql, read=dialect, into=into, **opts)
+3601
+3602
+3603def _maybe_copy(instance, copy=True):
+3604    return instance.copy() if copy else instance
+3605
+3606
+3607def _is_wrong_expression(expression, into):
+3608    return isinstance(expression, Expression) and not isinstance(expression, into)
+3609
+3610
+3611def _apply_builder(
+3612    expression,
+3613    instance,
+3614    arg,
+3615    copy=True,
+3616    prefix=None,
+3617    into=None,
+3618    dialect=None,
+3619    **opts,
+3620):
+3621    if _is_wrong_expression(expression, into):
+3622        expression = into(this=expression)
+3623    instance = _maybe_copy(instance, copy)
+3624    expression = maybe_parse(
+3625        sql_or_expression=expression,
+3626        prefix=prefix,
+3627        into=into,
+3628        dialect=dialect,
+3629        **opts,
+3630    )
+3631    instance.set(arg, expression)
+3632    return instance
+3633
+3634
+3635def _apply_child_list_builder(
+3636    *expressions,
+3637    instance,
+3638    arg,
+3639    append=True,
+3640    copy=True,
+3641    prefix=None,
+3642    into=None,
+3643    dialect=None,
+3644    properties=None,
+3645    **opts,
+3646):
+3647    instance = _maybe_copy(instance, copy)
+3648    parsed = []
+3649    for expression in expressions:
+3650        if _is_wrong_expression(expression, into):
+3651            expression = into(expressions=[expression])
+3652        expression = maybe_parse(
+3653            expression,
+3654            into=into,
+3655            dialect=dialect,
+3656            prefix=prefix,
+3657            **opts,
+3658        )
+3659        parsed.extend(expression.expressions)
+3660
+3661    existing = instance.args.get(arg)
+3662    if append and existing:
+3663        parsed = existing.expressions + parsed
+3664
+3665    child = into(expressions=parsed)
+3666    for k, v in (properties or {}).items():
+3667        child.set(k, v)
+3668    instance.set(arg, child)
+3669    return instance
+3670
+3671
+3672def _apply_list_builder(
+3673    *expressions,
+3674    instance,
+3675    arg,
+3676    append=True,
+3677    copy=True,
+3678    prefix=None,
+3679    into=None,
+3680    dialect=None,
+3681    **opts,
+3682):
+3683    inst = _maybe_copy(instance, copy)
+3684
+3685    expressions = [
+3686        maybe_parse(
+3687            sql_or_expression=expression,
+3688            into=into,
+3689            prefix=prefix,
+3690            dialect=dialect,
+3691            **opts,
+3692        )
+3693        for expression in expressions
+3694    ]
+3695
+3696    existing_expressions = inst.args.get(arg)
+3697    if append and existing_expressions:
+3698        expressions = existing_expressions + expressions
+3699
+3700    inst.set(arg, expressions)
+3701    return inst
+3702
+3703
+3704def _apply_conjunction_builder(
+3705    *expressions,
+3706    instance,
+3707    arg,
+3708    into=None,
+3709    append=True,
+3710    copy=True,
+3711    dialect=None,
+3712    **opts,
+3713):
+3714    expressions = [exp for exp in expressions if exp is not None and exp != ""]
+3715    if not expressions:
+3716        return instance
+3717
+3718    inst = _maybe_copy(instance, copy)
+3719
+3720    existing = inst.args.get(arg)
+3721    if append and existing is not None:
+3722        expressions = [existing.this if into else existing] + list(expressions)
+3723
+3724    node = and_(*expressions, dialect=dialect, **opts)
+3725
+3726    inst.set(arg, into(this=node) if into else node)
+3727    return inst
+3728
+3729
+3730def _combine(expressions, operator, dialect=None, **opts):
+3731    expressions = [condition(expression, dialect=dialect, **opts) for expression in expressions]
+3732    this = expressions[0]
+3733    if expressions[1:]:
+3734        this = _wrap_operator(this)
+3735    for expression in expressions[1:]:
+3736        this = operator(this=this, expression=_wrap_operator(expression))
+3737    return this
+3738
+3739
+3740def _wrap_operator(expression):
+3741    if isinstance(expression, (And, Or, Not)):
+3742        expression = Paren(this=expression)
+3743    return expression
+3744
+3745
+3746def union(left, right, distinct=True, dialect=None, **opts):
+3747    """
+3748    Initializes a syntax tree from one UNION expression.
+3749
+3750    Example:
+3751        >>> union("SELECT * FROM foo", "SELECT * FROM bla").sql()
+3752        'SELECT * FROM foo UNION SELECT * FROM bla'
+3753
+3754    Args:
+3755        left (str | Expression): the SQL code string corresponding to the left-hand side.
+3756            If an `Expression` instance is passed, it will be used as-is.
+3757        right (str | Expression): the SQL code string corresponding to the right-hand side.
+3758            If an `Expression` instance is passed, it will be used as-is.
+3759        distinct (bool): set the DISTINCT flag if and only if this is true.
+3760        dialect (str): the dialect used to parse the input expression.
+3761        opts (kwargs): other options to use to parse the input expressions.
+3762    Returns:
+3763        Union: the syntax tree for the UNION expression.
+3764    """
+3765    left = maybe_parse(sql_or_expression=left, dialect=dialect, **opts)
+3766    right = maybe_parse(sql_or_expression=right, dialect=dialect, **opts)
+3767
+3768    return Union(this=left, expression=right, distinct=distinct)
+3769
+3770
+3771def intersect(left, right, distinct=True, dialect=None, **opts):
+3772    """
+3773    Initializes a syntax tree from one INTERSECT expression.
+3774
+3775    Example:
+3776        >>> intersect("SELECT * FROM foo", "SELECT * FROM bla").sql()
+3777        'SELECT * FROM foo INTERSECT SELECT * FROM bla'
+3778
+3779    Args:
+3780        left (str | Expression): the SQL code string corresponding to the left-hand side.
+3781            If an `Expression` instance is passed, it will be used as-is.
+3782        right (str | Expression): the SQL code string corresponding to the right-hand side.
+3783            If an `Expression` instance is passed, it will be used as-is.
+3784        distinct (bool): set the DISTINCT flag if and only if this is true.
+3785        dialect (str): the dialect used to parse the input expression.
+3786        opts (kwargs): other options to use to parse the input expressions.
+3787    Returns:
+3788        Intersect: the syntax tree for the INTERSECT expression.
+3789    """
+3790    left = maybe_parse(sql_or_expression=left, dialect=dialect, **opts)
+3791    right = maybe_parse(sql_or_expression=right, dialect=dialect, **opts)
+3792
+3793    return Intersect(this=left, expression=right, distinct=distinct)
+3794
+3795
+3796def except_(left, right, distinct=True, dialect=None, **opts):
+3797    """
+3798    Initializes a syntax tree from one EXCEPT expression.
+3799
+3800    Example:
+3801        >>> except_("SELECT * FROM foo", "SELECT * FROM bla").sql()
+3802        'SELECT * FROM foo EXCEPT SELECT * FROM bla'
+3803
+3804    Args:
+3805        left (str | Expression): the SQL code string corresponding to the left-hand side.
+3806            If an `Expression` instance is passed, it will be used as-is.
+3807        right (str | Expression): the SQL code string corresponding to the right-hand side.
+3808            If an `Expression` instance is passed, it will be used as-is.
+3809        distinct (bool): set the DISTINCT flag if and only if this is true.
+3810        dialect (str): the dialect used to parse the input expression.
+3811        opts (kwargs): other options to use to parse the input expressions.
+3812    Returns:
+3813        Except: the syntax tree for the EXCEPT statement.
+3814    """
+3815    left = maybe_parse(sql_or_expression=left, dialect=dialect, **opts)
+3816    right = maybe_parse(sql_or_expression=right, dialect=dialect, **opts)
+3817
+3818    return Except(this=left, expression=right, distinct=distinct)
+3819
+3820
+3821def select(*expressions, dialect=None, **opts) -> Select:
+3822    """
+3823    Initializes a syntax tree from one or multiple SELECT expressions.
+3824
+3825    Example:
+3826        >>> select("col1", "col2").from_("tbl").sql()
+3827        'SELECT col1, col2 FROM tbl'
+3828
+3829    Args:
+3830        *expressions (str | Expression): the SQL code string to parse as the expressions of a
+3831            SELECT statement. If an Expression instance is passed, this is used as-is.
+3832        dialect (str): the dialect used to parse the input expressions (in the case that an
+3833            input expression is a SQL string).
+3834        **opts: other options to use to parse the input expressions (again, in the case
+3835            that an input expression is a SQL string).
+3836
+3837    Returns:
+3838        Select: the syntax tree for the SELECT statement.
+3839    """
+3840    return Select().select(*expressions, dialect=dialect, **opts)
+3841
+3842
+3843def from_(*expressions, dialect=None, **opts) -> Select:
+3844    """
+3845    Initializes a syntax tree from a FROM expression.
+3846
+3847    Example:
+3848        >>> from_("tbl").select("col1", "col2").sql()
+3849        'SELECT col1, col2 FROM tbl'
+3850
+3851    Args:
+3852        *expressions (str | Expression): the SQL code string to parse as the FROM expressions of a
+3853            SELECT statement. If an Expression instance is passed, this is used as-is.
+3854        dialect (str): the dialect used to parse the input expression (in the case that the
+3855            input expression is a SQL string).
+3856        **opts: other options to use to parse the input expressions (again, in the case
+3857            that the input expression is a SQL string).
+3858
+3859    Returns:
+3860        Select: the syntax tree for the SELECT statement.
+3861    """
+3862    return Select().from_(*expressions, dialect=dialect, **opts)
+3863
+3864
+3865def update(table, properties, where=None, from_=None, dialect=None, **opts) -> Update:
+3866    """
+3867    Creates an update statement.
+3868
+3869    Example:
+3870        >>> update("my_table", {"x": 1, "y": "2", "z": None}, from_="baz", where="id > 1").sql()
+3871        "UPDATE my_table SET x = 1, y = '2', z = NULL FROM baz WHERE id > 1"
+3872
+3873    Args:
+3874        *properties (Dict[str, Any]): dictionary of properties to set which are
+3875            auto converted to sql objects eg None -> NULL
+3876        where (str): sql conditional parsed into a WHERE statement
+3877        from_ (str): sql statement parsed into a FROM statement
+3878        dialect (str): the dialect used to parse the input expressions.
+3879        **opts: other options to use to parse the input expressions.
+3880
+3881    Returns:
+3882        Update: the syntax tree for the UPDATE statement.
+3883    """
+3884    update = Update(this=maybe_parse(table, into=Table, dialect=dialect))
+3885    update.set(
+3886        "expressions",
+3887        [
+3888            EQ(this=maybe_parse(k, dialect=dialect, **opts), expression=convert(v))
+3889            for k, v in properties.items()
+3890        ],
+3891    )
+3892    if from_:
+3893        update.set(
+3894            "from",
+3895            maybe_parse(from_, into=From, dialect=dialect, prefix="FROM", **opts),
+3896        )
+3897    if isinstance(where, Condition):
+3898        where = Where(this=where)
+3899    if where:
+3900        update.set(
+3901            "where",
+3902            maybe_parse(where, into=Where, dialect=dialect, prefix="WHERE", **opts),
+3903        )
+3904    return update
+3905
+3906
+3907def delete(table, where=None, dialect=None, **opts) -> Delete:
+3908    """
+3909    Builds a delete statement.
+3910
+3911    Example:
+3912        >>> delete("my_table", where="id > 1").sql()
+3913        'DELETE FROM my_table WHERE id > 1'
+3914
+3915    Args:
+3916        where (str|Condition): sql conditional parsed into a WHERE statement
+3917        dialect (str): the dialect used to parse the input expressions.
+3918        **opts: other options to use to parse the input expressions.
+3919
+3920    Returns:
+3921        Delete: the syntax tree for the DELETE statement.
+3922    """
+3923    return Delete(
+3924        this=maybe_parse(table, into=Table, dialect=dialect, **opts),
+3925        where=Where(this=where)
+3926        if isinstance(where, Condition)
+3927        else maybe_parse(where, into=Where, dialect=dialect, prefix="WHERE", **opts),
+3928    )
+3929
+3930
+3931def condition(expression, dialect=None, **opts) -> Condition:
+3932    """
+3933    Initialize a logical condition expression.
+3934
+3935    Example:
+3936        >>> condition("x=1").sql()
+3937        'x = 1'
+3938
+3939        This is helpful for composing larger logical syntax trees:
+3940        >>> where = condition("x=1")
+3941        >>> where = where.and_("y=1")
+3942        >>> Select().from_("tbl").select("*").where(where).sql()
+3943        'SELECT * FROM tbl WHERE x = 1 AND y = 1'
+3944
+3945    Args:
+3946        *expression (str | Expression): the SQL code string to parse.
+3947            If an Expression instance is passed, this is used as-is.
+3948        dialect (str): the dialect used to parse the input expression (in the case that the
+3949            input expression is a SQL string).
+3950        **opts: other options to use to parse the input expressions (again, in the case
+3951            that the input expression is a SQL string).
+3952
+3953    Returns:
+3954        Condition: the expression
+3955    """
+3956    return maybe_parse(  # type: ignore
+3957        expression,
+3958        into=Condition,
+3959        dialect=dialect,
+3960        **opts,
+3961    )
+3962
+3963
+3964def and_(*expressions, dialect=None, **opts) -> And:
+3965    """
+3966    Combine multiple conditions with an AND logical operator.
+3967
+3968    Example:
+3969        >>> and_("x=1", and_("y=1", "z=1")).sql()
+3970        'x = 1 AND (y = 1 AND z = 1)'
+3971
+3972    Args:
+3973        *expressions (str | Expression): the SQL code strings to parse.
+3974            If an Expression instance is passed, this is used as-is.
+3975        dialect (str): the dialect used to parse the input expression.
+3976        **opts: other options to use to parse the input expressions.
+3977
+3978    Returns:
+3979        And: the new condition
+3980    """
+3981    return _combine(expressions, And, dialect, **opts)
+3982
+3983
+3984def or_(*expressions, dialect=None, **opts) -> Or:
+3985    """
+3986    Combine multiple conditions with an OR logical operator.
+3987
+3988    Example:
+3989        >>> or_("x=1", or_("y=1", "z=1")).sql()
+3990        'x = 1 OR (y = 1 OR z = 1)'
+3991
+3992    Args:
+3993        *expressions (str | Expression): the SQL code strings to parse.
+3994            If an Expression instance is passed, this is used as-is.
+3995        dialect (str): the dialect used to parse the input expression.
+3996        **opts: other options to use to parse the input expressions.
+3997
+3998    Returns:
+3999        Or: the new condition
+4000    """
+4001    return _combine(expressions, Or, dialect, **opts)
+4002
+4003
+4004def not_(expression, dialect=None, **opts) -> Not:
+4005    """
+4006    Wrap a condition with a NOT operator.
+4007
+4008    Example:
+4009        >>> not_("this_suit='black'").sql()
+4010        "NOT this_suit = 'black'"
+4011
+4012    Args:
+4013        expression (str | Expression): the SQL code strings to parse.
+4014            If an Expression instance is passed, this is used as-is.
+4015        dialect (str): the dialect used to parse the input expression.
+4016        **opts: other options to use to parse the input expressions.
+4017
+4018    Returns:
+4019        Not: the new condition
+4020    """
+4021    this = condition(
+4022        expression,
+4023        dialect=dialect,
+4024        **opts,
+4025    )
+4026    return Not(this=_wrap_operator(this))
+4027
+4028
+4029def paren(expression) -> Paren:
+4030    return Paren(this=expression)
+4031
+4032
+4033SAFE_IDENTIFIER_RE = re.compile(r"^[_a-zA-Z][\w]*$")
+4034
+4035
+4036@t.overload
+4037def to_identifier(name: None, quoted: t.Optional[bool] = None) -> None:
+4038    ...
+4039
+4040
+4041@t.overload
+4042def to_identifier(name: str | Identifier, quoted: t.Optional[bool] = None) -> Identifier:
+4043    ...
+4044
+4045
+4046def to_identifier(name, quoted=None):
+4047    """Builds an identifier.
+4048
+4049    Args:
+4050        name: The name to turn into an identifier.
+4051        quoted: Whether or not force quote the identifier.
+4052
+4053    Returns:
+4054        The identifier ast node.
+4055    """
+4056
+4057    if name is None:
+4058        return None
+4059
+4060    if isinstance(name, Identifier):
+4061        identifier = name
+4062    elif isinstance(name, str):
+4063        identifier = Identifier(
+4064            this=name,
+4065            quoted=not re.match(SAFE_IDENTIFIER_RE, name) if quoted is None else quoted,
+4066        )
+4067    else:
+4068        raise ValueError(f"Name needs to be a string or an Identifier, got: {name.__class__}")
+4069    return identifier
+4070
+4071
+4072INTERVAL_STRING_RE = re.compile(r"\s*([0-9]+)\s*([a-zA-Z]+)\s*")
+4073
+4074
+4075def to_interval(interval: str | Literal) -> Interval:
+4076    """Builds an interval expression from a string like '1 day' or '5 months'."""
+4077    if isinstance(interval, Literal):
+4078        if not interval.is_string:
+4079            raise ValueError("Invalid interval string.")
+4080
+4081        interval = interval.this
+4082
+4083    interval_parts = INTERVAL_STRING_RE.match(interval)  # type: ignore
+4084
+4085    if not interval_parts:
+4086        raise ValueError("Invalid interval string.")
+4087
+4088    return Interval(
+4089        this=Literal.string(interval_parts.group(1)),
+4090        unit=Var(this=interval_parts.group(2)),
+4091    )
+4092
+4093
+4094@t.overload
+4095def to_table(sql_path: str | Table, **kwargs) -> Table:
+4096    ...
+4097
+4098
+4099@t.overload
+4100def to_table(sql_path: None, **kwargs) -> None:
+4101    ...
+4102
+4103
+4104def to_table(sql_path: t.Optional[str | Table], **kwargs) -> t.Optional[Table]:
+4105    """
+4106    Create a table expression from a `[catalog].[schema].[table]` sql path. Catalog and schema are optional.
+4107    If a table is passed in then that table is returned.
+4108
+4109    Args:
+4110        sql_path: a `[catalog].[schema].[table]` string.
+4111
+4112    Returns:
+4113        A table expression.
+4114    """
+4115    if sql_path is None or isinstance(sql_path, Table):
+4116        return sql_path
+4117    if not isinstance(sql_path, str):
+4118        raise ValueError(f"Invalid type provided for a table: {type(sql_path)}")
+4119
+4120    catalog, db, table_name = (to_identifier(x) for x in split_num_words(sql_path, ".", 3))
+4121    return Table(this=table_name, db=db, catalog=catalog, **kwargs)
+4122
+4123
+4124def to_column(sql_path: str | Column, **kwargs) -> Column:
+4125    """
+4126    Create a column from a `[table].[column]` sql path. Schema is optional.
+4127
+4128    If a column is passed in then that column is returned.
+4129
+4130    Args:
+4131        sql_path: `[table].[column]` string
+4132    Returns:
+4133        Table: A column expression
+4134    """
+4135    if sql_path is None or isinstance(sql_path, Column):
+4136        return sql_path
+4137    if not isinstance(sql_path, str):
+4138        raise ValueError(f"Invalid type provided for column: {type(sql_path)}")
+4139    table_name, column_name = (to_identifier(x) for x in split_num_words(sql_path, ".", 2))
+4140    return Column(this=column_name, table=table_name, **kwargs)
+4141
+4142
+4143def alias_(
+4144    expression: str | Expression,
+4145    alias: str | Identifier,
+4146    table: bool | t.Sequence[str | Identifier] = False,
+4147    quoted: t.Optional[bool] = None,
+4148    dialect: DialectType = None,
+4149    **opts,
+4150):
+4151    """Create an Alias expression.
+4152
+4153    Example:
+4154        >>> alias_('foo', 'bar').sql()
+4155        'foo AS bar'
+4156
+4157        >>> alias_('(select 1, 2)', 'bar', table=['a', 'b']).sql()
+4158        '(SELECT 1, 2) AS bar(a, b)'
+4159
+4160    Args:
+4161        expression: the SQL code strings to parse.
+4162            If an Expression instance is passed, this is used as-is.
+4163        alias: the alias name to use. If the name has
+4164            special characters it is quoted.
+4165        table: Whether or not to create a table alias, can also be a list of columns.
+4166        quoted: whether or not to quote the alias
+4167        dialect: the dialect used to parse the input expression.
+4168        **opts: other options to use to parse the input expressions.
+4169
+4170    Returns:
+4171        Alias: the aliased expression
+4172    """
+4173    exp = maybe_parse(expression, dialect=dialect, **opts)
+4174    alias = to_identifier(alias, quoted=quoted)
+4175
+4176    if table:
+4177        table_alias = TableAlias(this=alias)
+4178        exp.set("alias", table_alias)
+4179
+4180        if not isinstance(table, bool):
+4181            for column in table:
+4182                table_alias.append("columns", to_identifier(column, quoted=quoted))
+4183
+4184        return exp
+4185
+4186    # We don't set the "alias" arg for Window expressions, because that would add an IDENTIFIER node in
+4187    # the AST, representing a "named_window" [1] construct (eg. bigquery). What we want is an ALIAS node
+4188    # for the complete Window expression.
+4189    #
+4190    # [1]: https://cloud.google.com/bigquery/docs/reference/standard-sql/window-function-calls
+4191
+4192    if "alias" in exp.arg_types and not isinstance(exp, Window):
+4193        exp = exp.copy()
+4194        exp.set("alias", alias)
+4195        return exp
+4196    return Alias(this=exp, alias=alias)
+4197
+4198
+4199def subquery(expression, alias=None, dialect=None, **opts):
+4200    """
+4201    Build a subquery expression.
+4202
+4203    Example:
+4204        >>> subquery('select x from tbl', 'bar').select('x').sql()
+4205        'SELECT x FROM (SELECT x FROM tbl) AS bar'
+4206
+4207    Args:
+4208        expression (str | Expression): the SQL code strings to parse.
+4209            If an Expression instance is passed, this is used as-is.
+4210        alias (str | Expression): the alias name to use.
+4211        dialect (str): the dialect used to parse the input expression.
+4212        **opts: other options to use to parse the input expressions.
+4213
+4214    Returns:
+4215        Select: a new select with the subquery expression included
+4216    """
+4217
+4218    expression = maybe_parse(expression, dialect=dialect, **opts).subquery(alias)
+4219    return Select().from_(expression, dialect=dialect, **opts)
+4220
+4221
+4222def column(col, table=None, quoted=None) -> Column:
+4223    """
+4224    Build a Column.
+4225
+4226    Args:
+4227        col (str | Expression): column name
+4228        table (str | Expression): table name
+4229    Returns:
+4230        Column: column instance
+4231    """
+4232    return Column(
+4233        this=to_identifier(col, quoted=quoted),
+4234        table=to_identifier(table, quoted=quoted),
+4235    )
+4236
+4237
+4238def cast(expression: str | Expression, to: str | DataType | DataType.Type, **opts) -> Cast:
+4239    """Cast an expression to a data type.
+4240
+4241    Example:
+4242        >>> cast('x + 1', 'int').sql()
+4243        'CAST(x + 1 AS INT)'
+4244
+4245    Args:
+4246        expression: The expression to cast.
+4247        to: The datatype to cast to.
+4248
+4249    Returns:
+4250        A cast node.
+4251    """
+4252    expression = maybe_parse(expression, **opts)
+4253    return Cast(this=expression, to=DataType.build(to, **opts))
+4254
+4255
+4256def table_(table, db=None, catalog=None, quoted=None, alias=None) -> Table:
+4257    """Build a Table.
+4258
+4259    Args:
+4260        table (str | Expression): column name
+4261        db (str | Expression): db name
+4262        catalog (str | Expression): catalog name
+4263
+4264    Returns:
+4265        Table: table instance
+4266    """
+4267    return Table(
+4268        this=to_identifier(table, quoted=quoted),
+4269        db=to_identifier(db, quoted=quoted),
+4270        catalog=to_identifier(catalog, quoted=quoted),
+4271        alias=TableAlias(this=to_identifier(alias)) if alias else None,
+4272    )
+4273
+4274
+4275def values(
+4276    values: t.Iterable[t.Tuple[t.Any, ...]],
+4277    alias: t.Optional[str] = None,
+4278    columns: t.Optional[t.Iterable[str] | t.Dict[str, DataType]] = None,
+4279) -> Values:
+4280    """Build VALUES statement.
+4281
+4282    Example:
+4283        >>> values([(1, '2')]).sql()
+4284        "VALUES (1, '2')"
+4285
+4286    Args:
+4287        values: values statements that will be converted to SQL
+4288        alias: optional alias
+4289        columns: Optional list of ordered column names or ordered dictionary of column names to types.
+4290         If either are provided then an alias is also required.
+4291         If a dictionary is provided then the first column of the values will be casted to the expected type
+4292         in order to help with type inference.
+4293
+4294    Returns:
+4295        Values: the Values expression object
+4296    """
+4297    if columns and not alias:
+4298        raise ValueError("Alias is required when providing columns")
+4299    table_alias = (
+4300        TableAlias(this=to_identifier(alias), columns=[to_identifier(x) for x in columns])
+4301        if columns
+4302        else TableAlias(this=to_identifier(alias) if alias else None)
+4303    )
+4304    expressions = [convert(tup) for tup in values]
+4305    if columns and isinstance(columns, dict):
+4306        types = list(columns.values())
+4307        expressions[0].set(
+4308            "expressions",
+4309            [cast(x, types[i]) for i, x in enumerate(expressions[0].expressions)],
+4310        )
+4311    return Values(
+4312        expressions=expressions,
+4313        alias=table_alias,
+4314    )
+4315
+4316
+4317def rename_table(old_name: str | Table, new_name: str | Table) -> AlterTable:
+4318    """Build ALTER TABLE... RENAME... expression
+4319
+4320    Args:
+4321        old_name: The old name of the table
+4322        new_name: The new name of the table
+4323
+4324    Returns:
+4325        Alter table expression
+4326    """
+4327    old_table = to_table(old_name)
+4328    new_table = to_table(new_name)
+4329    return AlterTable(
+4330        this=old_table,
+4331        actions=[
+4332            RenameTable(this=new_table),
+4333        ],
+4334    )
+4335
+4336
+4337def convert(value) -> Expression:
+4338    """Convert a python value into an expression object.
+4339
+4340    Raises an error if a conversion is not possible.
+4341
+4342    Args:
+4343        value (Any): a python object
+4344
+4345    Returns:
+4346        Expression: the equivalent expression object
+4347    """
+4348    if isinstance(value, Expression):
+4349        return value
+4350    if value is None:
+4351        return NULL
+4352    if isinstance(value, bool):
+4353        return Boolean(this=value)
+4354    if isinstance(value, str):
+4355        return Literal.string(value)
+4356    if isinstance(value, float) and math.isnan(value):
+4357        return NULL
+4358    if isinstance(value, numbers.Number):
+4359        return Literal.number(value)
+4360    if isinstance(value, tuple):
+4361        return Tuple(expressions=[convert(v) for v in value])
+4362    if isinstance(value, list):
+4363        return Array(expressions=[convert(v) for v in value])
+4364    if isinstance(value, dict):
+4365        return Map(
+4366            keys=[convert(k) for k in value],
+4367            values=[convert(v) for v in value.values()],
+4368        )
+4369    if isinstance(value, datetime.datetime):
+4370        datetime_literal = Literal.string(
+4371            (value if value.tzinfo else value.replace(tzinfo=datetime.timezone.utc)).isoformat()
+4372        )
+4373        return TimeStrToTime(this=datetime_literal)
+4374    if isinstance(value, datetime.date):
+4375        date_literal = Literal.string(value.strftime("%Y-%m-%d"))
+4376        return DateStrToDate(this=date_literal)
+4377    raise ValueError(f"Cannot convert {value}")
+4378
+4379
+4380def replace_children(expression, fun):
+4381    """
+4382    Replace children of an expression with the result of a lambda fun(child) -> exp.
+4383    """
+4384    for k, v in expression.args.items():
+4385        is_list_arg = isinstance(v, list)
+4386
+4387        child_nodes = v if is_list_arg else [v]
+4388        new_child_nodes = []
+4389
+4390        for cn in child_nodes:
+4391            if isinstance(cn, Expression):
+4392                for child_node in ensure_collection(fun(cn)):
+4393                    new_child_nodes.append(child_node)
+4394                    child_node.parent = expression
+4395                    child_node.arg_key = k
+4396            else:
+4397                new_child_nodes.append(cn)
+4398
+4399        expression.args[k] = new_child_nodes if is_list_arg else seq_get(new_child_nodes, 0)
+4400
+4401
+4402def column_table_names(expression):
+4403    """
+4404    Return all table names referenced through columns in an expression.
+4405
+4406    Example:
+4407        >>> import sqlglot
+4408        >>> column_table_names(sqlglot.parse_one("a.b AND c.d AND c.e"))
+4409        ['c', 'a']
+4410
+4411    Args:
+4412        expression (sqlglot.Expression): expression to find table names
+4413
+4414    Returns:
+4415        list: A list of unique names
+4416    """
+4417    return list(dict.fromkeys(column.table for column in expression.find_all(Column)))
+4418
+4419
+4420def table_name(table) -> str:
+4421    """Get the full name of a table as a string.
+4422
+4423    Args:
+4424        table (exp.Table | str): table expression node or string.
+4425
+4426    Examples:
+4427        >>> from sqlglot import exp, parse_one
+4428        >>> table_name(parse_one("select * from a.b.c").find(exp.Table))
+4429        'a.b.c'
+4430
+4431    Returns:
+4432        The table name.
+4433    """
+4434
+4435    table = maybe_parse(table, into=Table)
+4436
+4437    if not table:
+4438        raise ValueError(f"Cannot parse {table}")
+4439
+4440    return ".".join(
+4441        part
+4442        for part in (
+4443            table.text("catalog"),
+4444            table.text("db"),
+4445            table.name,
+4446        )
+4447        if part
+4448    )
+4449
+4450
+4451def replace_tables(expression, mapping):
+4452    """Replace all tables in expression according to the mapping.
+4453
+4454    Args:
+4455        expression (sqlglot.Expression): expression node to be transformed and replaced.
+4456        mapping (Dict[str, str]): mapping of table names.
+4457
+4458    Examples:
+4459        >>> from sqlglot import exp, parse_one
+4460        >>> replace_tables(parse_one("select * from a.b"), {"a.b": "c"}).sql()
+4461        'SELECT * FROM c'
+4462
+4463    Returns:
+4464        The mapped expression.
+4465    """
+4466
+4467    def _replace_tables(node):
+4468        if isinstance(node, Table):
+4469            new_name = mapping.get(table_name(node))
+4470            if new_name:
+4471                return to_table(
+4472                    new_name,
+4473                    **{k: v for k, v in node.args.items() if k not in ("this", "db", "catalog")},
+4474                )
+4475        return node
+4476
+4477    return expression.transform(_replace_tables)
+4478
+4479
+4480def replace_placeholders(expression, *args, **kwargs):
+4481    """Replace placeholders in an expression.
+4482
+4483    Args:
+4484        expression (sqlglot.Expression): expression node to be transformed and replaced.
+4485        args: positional names that will substitute unnamed placeholders in the given order.
+4486        kwargs: keyword arguments that will substitute named placeholders.
+4487
+4488    Examples:
+4489        >>> from sqlglot import exp, parse_one
+4490        >>> replace_placeholders(
+4491        ...     parse_one("select * from :tbl where ? = ?"), "a", "b", tbl="foo"
+4492        ... ).sql()
+4493        'SELECT * FROM foo WHERE a = b'
+4494
+4495    Returns:
+4496        The mapped expression.
+4497    """
+4498
+4499    def _replace_placeholders(node, args, **kwargs):
+4500        if isinstance(node, Placeholder):
+4501            if node.name:
+4502                new_name = kwargs.get(node.name)
+4503                if new_name:
+4504                    return to_identifier(new_name)
+4505            else:
+4506                try:
+4507                    return to_identifier(next(args))
+4508                except StopIteration:
+4509                    pass
+4510        return node
+4511
+4512    return expression.transform(_replace_placeholders, iter(args), **kwargs)
+4513
+4514
+4515def expand(expression: Expression, sources: t.Dict[str, Subqueryable], copy=True) -> Expression:
+4516    """Transforms an expression by expanding all referenced sources into subqueries.
+4517
+4518    Examples:
+4519        >>> from sqlglot import parse_one
+4520        >>> expand(parse_one("select * from x AS z"), {"x": parse_one("select * from y")}).sql()
+4521        'SELECT * FROM (SELECT * FROM y) AS z /* source: x */'
+4522
+4523    Args:
+4524        expression: The expression to expand.
+4525        sources: A dictionary of name to Subqueryables.
+4526        copy: Whether or not to copy the expression during transformation. Defaults to True.
+4527
+4528    Returns:
+4529        The transformed expression.
+4530    """
+4531
+4532    def _expand(node: Expression):
+4533        if isinstance(node, Table):
+4534            name = table_name(node)
+4535            source = sources.get(name)
+4536            if source:
+4537                subquery = source.subquery(node.alias or name)
+4538                subquery.comments = [f"source: {name}"]
+4539                return subquery
+4540        return node
+4541
+4542    return expression.transform(_expand, copy=copy)
+4543
+4544
+4545def func(name: str, *args, dialect: DialectType = None, **kwargs) -> Func:
+4546    """
+4547    Returns a Func expression.
+4548
+4549    Examples:
+4550        >>> func("abs", 5).sql()
+4551        'ABS(5)'
+4552
+4553        >>> func("cast", this=5, to=DataType.build("DOUBLE")).sql()
+4554        'CAST(5 AS DOUBLE)'
+4555
+4556    Args:
+4557        name: the name of the function to build.
+4558        args: the args used to instantiate the function of interest.
+4559        dialect: the source dialect.
+4560        kwargs: the kwargs used to instantiate the function of interest.
+4561
+4562    Note:
+4563        The arguments `args` and `kwargs` are mutually exclusive.
+4564
+4565    Returns:
+4566        An instance of the function of interest, or an anonymous function, if `name` doesn't
+4567        correspond to an existing `sqlglot.expressions.Func` class.
+4568    """
+4569    if args and kwargs:
+4570        raise ValueError("Can't use both args and kwargs to instantiate a function.")
+4571
+4572    from sqlglot.dialects.dialect import Dialect
+4573
+4574    args = tuple(convert(arg) for arg in args)
+4575    kwargs = {key: convert(value) for key, value in kwargs.items()}
+4576
+4577    parser = Dialect.get_or_raise(dialect)().parser()
+4578    from_args_list = parser.FUNCTIONS.get(name.upper())
+4579
+4580    if from_args_list:
+4581        function = from_args_list(args) if args else from_args_list.__self__(**kwargs)  # type: ignore
+4582    else:
+4583        kwargs = kwargs or {"expressions": args}
+4584        function = Anonymous(this=name, **kwargs)
+4585
+4586    for error_message in function.error_messages(args):
+4587        raise ValueError(error_message)
+4588
+4589    return function
+4590
+4591
+4592def true():
+4593    """
+4594    Returns a true Boolean expression.
+4595    """
+4596    return Boolean(this=True)
+4597
+4598
+4599def false():
+4600    """
+4601    Returns a false Boolean expression.
+4602    """
+4603    return Boolean(this=False)
+4604
+4605
+4606def null():
+4607    """
+4608    Returns a Null expression.
+4609    """
+4610    return Null()
+4611
+4612
+4613# TODO: deprecate this
+4614TRUE = Boolean(this=True)
+4615FALSE = Boolean(this=False)
+4616NULL = Null()
+
+ + +
+
+ +
+ + class + Expression: + + + +
+ +
 54class Expression(metaclass=_Expression):
+ 55    """
+ 56    The base class for all expressions in a syntax tree. Each Expression encapsulates any necessary
+ 57    context, such as its child expressions, their names (arg keys), and whether a given child expression
+ 58    is optional or not.
+ 59
+ 60    Attributes:
+ 61        key: a unique key for each class in the Expression hierarchy. This is useful for hashing
+ 62            and representing expressions as strings.
+ 63        arg_types: determines what arguments (child nodes) are supported by an expression. It
+ 64            maps arg keys to booleans that indicate whether the corresponding args are optional.
+ 65
+ 66    Example:
+ 67        >>> class Foo(Expression):
+ 68        ...     arg_types = {"this": True, "expression": False}
+ 69
+ 70        The above definition informs us that Foo is an Expression that requires an argument called
+ 71        "this" and may also optionally receive an argument called "expression".
+ 72
+ 73    Args:
+ 74        args: a mapping used for retrieving the arguments of an expression, given their arg keys.
+ 75        parent: a reference to the parent expression (or None, in case of root expressions).
+ 76        arg_key: the arg key an expression is associated with, i.e. the name its parent expression
+ 77            uses to refer to it.
+ 78        comments: a list of comments that are associated with a given expression. This is used in
+ 79            order to preserve comments when transpiling SQL code.
+ 80        _type: the `sqlglot.expressions.DataType` type of an expression. This is inferred by the
+ 81            optimizer, in order to enable some transformations that require type information.
+ 82    """
+ 83
+ 84    key = "expression"
+ 85    arg_types = {"this": True}
+ 86    __slots__ = ("args", "parent", "arg_key", "comments", "_type")
+ 87
+ 88    def __init__(self, **args: t.Any):
+ 89        self.args: t.Dict[str, t.Any] = args
+ 90        self.parent: t.Optional[Expression] = None
+ 91        self.arg_key: t.Optional[str] = None
+ 92        self.comments: t.Optional[t.List[str]] = None
+ 93        self._type: t.Optional[DataType] = None
+ 94
+ 95        for arg_key, value in self.args.items():
+ 96            self._set_parent(arg_key, value)
+ 97
+ 98    def __eq__(self, other) -> bool:
+ 99        return type(self) is type(other) and _norm_args(self) == _norm_args(other)
+100
+101    def __hash__(self) -> int:
+102        return hash(
+103            (
+104                self.key,
+105                tuple(
+106                    (k, tuple(v) if isinstance(v, list) else v) for k, v in _norm_args(self).items()
+107                ),
+108            )
+109        )
+110
+111    @property
+112    def this(self):
+113        """
+114        Retrieves the argument with key "this".
+115        """
+116        return self.args.get("this")
+117
+118    @property
+119    def expression(self):
+120        """
+121        Retrieves the argument with key "expression".
+122        """
+123        return self.args.get("expression")
+124
+125    @property
+126    def expressions(self):
+127        """
+128        Retrieves the argument with key "expressions".
+129        """
+130        return self.args.get("expressions") or []
+131
+132    def text(self, key):
+133        """
+134        Returns a textual representation of the argument corresponding to "key". This can only be used
+135        for args that are strings or leaf Expression instances, such as identifiers and literals.
+136        """
+137        field = self.args.get(key)
+138        if isinstance(field, str):
+139            return field
+140        if isinstance(field, (Identifier, Literal, Var)):
+141            return field.this
+142        if isinstance(field, (Star, Null)):
+143            return field.name
+144        return ""
+145
+146    @property
+147    def is_string(self):
+148        """
+149        Checks whether a Literal expression is a string.
+150        """
+151        return isinstance(self, Literal) and self.args["is_string"]
+152
+153    @property
+154    def is_number(self):
+155        """
+156        Checks whether a Literal expression is a number.
+157        """
+158        return isinstance(self, Literal) and not self.args["is_string"]
+159
+160    @property
+161    def is_int(self):
+162        """
+163        Checks whether a Literal expression is an integer.
+164        """
+165        if self.is_number:
+166            try:
+167                int(self.name)
+168                return True
+169            except ValueError:
+170                pass
+171        return False
+172
+173    @property
+174    def alias(self):
+175        """
+176        Returns the alias of the expression, or an empty string if it's not aliased.
+177        """
+178        if isinstance(self.args.get("alias"), TableAlias):
+179            return self.args["alias"].name
+180        return self.text("alias")
+181
+182    @property
+183    def name(self) -> str:
+184        return self.text("this")
+185
+186    @property
+187    def alias_or_name(self):
+188        return self.alias or self.name
+189
+190    @property
+191    def output_name(self):
+192        """
+193        Name of the output column if this expression is a selection.
+194
+195        If the Expression has no output name, an empty string is returned.
+196
+197        Example:
+198            >>> from sqlglot import parse_one
+199            >>> parse_one("SELECT a").expressions[0].output_name
+200            'a'
+201            >>> parse_one("SELECT b AS c").expressions[0].output_name
+202            'c'
+203            >>> parse_one("SELECT 1 + 2").expressions[0].output_name
+204            ''
+205        """
+206        return ""
+207
+208    @property
+209    def type(self) -> t.Optional[DataType]:
+210        return self._type
+211
+212    @type.setter
+213    def type(self, dtype: t.Optional[DataType | DataType.Type | str]) -> None:
+214        if dtype and not isinstance(dtype, DataType):
+215            dtype = DataType.build(dtype)
+216        self._type = dtype  # type: ignore
+217
+218    def __deepcopy__(self, memo):
+219        copy = self.__class__(**deepcopy(self.args))
+220        copy.comments = self.comments
+221        copy.type = self.type
+222        return copy
+223
+224    def copy(self):
+225        """
+226        Returns a deep copy of the expression.
+227        """
+228        new = deepcopy(self)
+229        new.parent = self.parent
+230        for item, parent, _ in new.bfs():
+231            if isinstance(item, Expression) and parent:
+232                item.parent = parent
+233        return new
+234
+235    def append(self, arg_key, value):
+236        """
+237        Appends value to arg_key if it's a list or sets it as a new list.
+238
+239        Args:
+240            arg_key (str): name of the list expression arg
+241            value (Any): value to append to the list
+242        """
+243        if not isinstance(self.args.get(arg_key), list):
+244            self.args[arg_key] = []
+245        self.args[arg_key].append(value)
+246        self._set_parent(arg_key, value)
+247
+248    def set(self, arg_key, value):
+249        """
+250        Sets `arg_key` to `value`.
+251
+252        Args:
+253            arg_key (str): name of the expression arg.
+254            value: value to set the arg to.
+255        """
+256        self.args[arg_key] = value
+257        self._set_parent(arg_key, value)
+258
+259    def _set_parent(self, arg_key, value):
+260        if isinstance(value, Expression):
+261            value.parent = self
+262            value.arg_key = arg_key
+263        elif isinstance(value, list):
+264            for v in value:
+265                if isinstance(v, Expression):
+266                    v.parent = self
+267                    v.arg_key = arg_key
+268
+269    @property
+270    def depth(self):
+271        """
+272        Returns the depth of this tree.
+273        """
+274        if self.parent:
+275            return self.parent.depth + 1
+276        return 0
+277
+278    def find(self, *expression_types, bfs=True):
+279        """
+280        Returns the first node in this tree which matches at least one of
+281        the specified types.
+282
+283        Args:
+284            expression_types (type): the expression type(s) to match.
+285
+286        Returns:
+287            The node which matches the criteria or None if no such node was found.
+288        """
+289        return next(self.find_all(*expression_types, bfs=bfs), None)
+290
+291    def find_all(self, *expression_types, bfs=True):
+292        """
+293        Returns a generator object which visits all nodes in this tree and only
+294        yields those that match at least one of the specified expression types.
+295
+296        Args:
+297            expression_types (type): the expression type(s) to match.
+298
+299        Returns:
+300            The generator object.
+301        """
+302        for expression, _, _ in self.walk(bfs=bfs):
+303            if isinstance(expression, expression_types):
+304                yield expression
+305
+306    def find_ancestor(self, *expression_types):
+307        """
+308        Returns a nearest parent matching expression_types.
+309
+310        Args:
+311            expression_types (type): the expression type(s) to match.
+312
+313        Returns:
+314            The parent node.
+315        """
+316        ancestor = self.parent
+317        while ancestor and not isinstance(ancestor, expression_types):
+318            ancestor = ancestor.parent
+319        return ancestor
+320
+321    @property
+322    def parent_select(self):
+323        """
+324        Returns the parent select statement.
+325        """
+326        return self.find_ancestor(Select)
+327
+328    def walk(self, bfs=True, prune=None):
+329        """
+330        Returns a generator object which visits all nodes in this tree.
+331
+332        Args:
+333            bfs (bool): if set to True the BFS traversal order will be applied,
+334                otherwise the DFS traversal will be used instead.
+335            prune ((node, parent, arg_key) -> bool): callable that returns True if
+336                the generator should stop traversing this branch of the tree.
+337
+338        Returns:
+339            the generator object.
+340        """
+341        if bfs:
+342            yield from self.bfs(prune=prune)
+343        else:
+344            yield from self.dfs(prune=prune)
+345
+346    def dfs(self, parent=None, key=None, prune=None):
+347        """
+348        Returns a generator object which visits all nodes in this tree in
+349        the DFS (Depth-first) order.
+350
+351        Returns:
+352            The generator object.
+353        """
+354        parent = parent or self.parent
+355        yield self, parent, key
+356        if prune and prune(self, parent, key):
+357            return
+358
+359        for k, v in self.args.items():
+360            for node in ensure_collection(v):
+361                if isinstance(node, Expression):
+362                    yield from node.dfs(self, k, prune)
+363
+364    def bfs(self, prune=None):
+365        """
+366        Returns a generator object which visits all nodes in this tree in
+367        the BFS (Breadth-first) order.
+368
+369        Returns:
+370            The generator object.
+371        """
+372        queue = deque([(self, self.parent, None)])
+373
+374        while queue:
+375            item, parent, key = queue.popleft()
+376
+377            yield item, parent, key
+378            if prune and prune(item, parent, key):
+379                continue
+380
+381            if isinstance(item, Expression):
+382                for k, v in item.args.items():
+383                    for node in ensure_collection(v):
+384                        if isinstance(node, Expression):
+385                            queue.append((node, item, k))
+386
+387    def unnest(self):
+388        """
+389        Returns the first non parenthesis child or self.
+390        """
+391        expression = self
+392        while isinstance(expression, Paren):
+393            expression = expression.this
+394        return expression
+395
+396    def unalias(self):
+397        """
+398        Returns the inner expression if this is an Alias.
+399        """
+400        if isinstance(self, Alias):
+401            return self.this
+402        return self
+403
+404    def unnest_operands(self):
+405        """
+406        Returns unnested operands as a tuple.
+407        """
+408        return tuple(arg.unnest() for arg in self.args.values() if arg)
+409
+410    def flatten(self, unnest=True):
+411        """
+412        Returns a generator which yields child nodes who's parents are the same class.
+413
+414        A AND B AND C -> [A, B, C]
+415        """
+416        for node, _, _ in self.dfs(prune=lambda n, p, *_: p and not isinstance(n, self.__class__)):
+417            if not isinstance(node, self.__class__):
+418                yield node.unnest() if unnest else node
+419
+420    def __str__(self):
+421        return self.sql()
+422
+423    def __repr__(self):
+424        return self._to_s()
+425
+426    def sql(self, dialect: DialectType = None, **opts) -> str:
+427        """
+428        Returns SQL string representation of this tree.
+429
+430        Args:
+431            dialect: the dialect of the output SQL string (eg. "spark", "hive", "presto", "mysql").
+432            opts: other `sqlglot.generator.Generator` options.
+433
+434        Returns:
+435            The SQL string.
+436        """
+437        from sqlglot.dialects import Dialect
+438
+439        return Dialect.get_or_raise(dialect)().generate(self, **opts)
+440
+441    def _to_s(self, hide_missing: bool = True, level: int = 0) -> str:
+442        indent = "" if not level else "\n"
+443        indent += "".join(["  "] * level)
+444        left = f"({self.key.upper()} "
+445
+446        args: t.Dict[str, t.Any] = {
+447            k: ", ".join(
+448                v._to_s(hide_missing=hide_missing, level=level + 1)
+449                if hasattr(v, "_to_s")
+450                else str(v)
+451                for v in ensure_collection(vs)
+452                if v is not None
+453            )
+454            for k, vs in self.args.items()
+455        }
+456        args["comments"] = self.comments
+457        args["type"] = self.type
+458        args = {k: v for k, v in args.items() if v or not hide_missing}
+459
+460        right = ", ".join(f"{k}: {v}" for k, v in args.items())
+461        right += ")"
+462
+463        return indent + left + right
+464
+465    def transform(self, fun, *args, copy=True, **kwargs):
+466        """
+467        Recursively visits all tree nodes (excluding already transformed ones)
+468        and applies the given transformation function to each node.
+469
+470        Args:
+471            fun (function): a function which takes a node as an argument and returns a
+472                new transformed node or the same node without modifications. If the function
+473                returns None, then the corresponding node will be removed from the syntax tree.
+474            copy (bool): if set to True a new tree instance is constructed, otherwise the tree is
+475                modified in place.
+476
+477        Returns:
+478            The transformed tree.
+479        """
+480        node = self.copy() if copy else self
+481        new_node = fun(node, *args, **kwargs)
+482
+483        if new_node is None or not isinstance(new_node, Expression):
+484            return new_node
+485        if new_node is not node:
+486            new_node.parent = node.parent
+487            return new_node
+488
+489        replace_children(new_node, lambda child: child.transform(fun, *args, copy=False, **kwargs))
+490        return new_node
+491
+492    def replace(self, expression):
+493        """
+494        Swap out this expression with a new expression.
+495
+496        For example::
+497
+498            >>> tree = Select().select("x").from_("tbl")
+499            >>> tree.find(Column).replace(Column(this="y"))
+500            (COLUMN this: y)
+501            >>> tree.sql()
+502            'SELECT y FROM tbl'
+503
+504        Args:
+505            expression (Expression|None): new node
+506
+507        Returns:
+508            The new expression or expressions.
+509        """
+510        if not self.parent:
+511            return expression
+512
+513        parent = self.parent
+514        self.parent = None
+515
+516        replace_children(parent, lambda child: expression if child is self else child)
+517        return expression
+518
+519    def pop(self):
+520        """
+521        Remove this expression from its AST.
+522        """
+523        self.replace(None)
+524
+525    def assert_is(self, type_):
+526        """
+527        Assert that this `Expression` is an instance of `type_`.
+528
+529        If it is NOT an instance of `type_`, this raises an assertion error.
+530        Otherwise, this returns this expression.
+531
+532        Examples:
+533            This is useful for type security in chained expressions:
+534
+535            >>> import sqlglot
+536            >>> sqlglot.parse_one("SELECT x from y").assert_is(Select).select("z").sql()
+537            'SELECT x, z FROM y'
+538        """
+539        assert isinstance(self, type_)
+540        return self
+541
+542    def error_messages(self, args: t.Optional[t.Sequence] = None) -> t.List[str]:
+543        """
+544        Checks if this expression is valid (e.g. all mandatory args are set).
+545
+546        Args:
+547            args: a sequence of values that were used to instantiate a Func expression. This is used
+548                to check that the provided arguments don't exceed the function argument limit.
+549
+550        Returns:
+551            A list of error messages for all possible errors that were found.
+552        """
+553        errors: t.List[str] = []
+554
+555        for k in self.args:
+556            if k not in self.arg_types:
+557                errors.append(f"Unexpected keyword: '{k}' for {self.__class__}")
+558        for k, mandatory in self.arg_types.items():
+559            v = self.args.get(k)
+560            if mandatory and (v is None or (isinstance(v, list) and not v)):
+561                errors.append(f"Required keyword: '{k}' missing for {self.__class__}")
+562
+563        if (
+564            args
+565            and isinstance(self, Func)
+566            and len(args) > len(self.arg_types)
+567            and not self.is_var_len_args
+568        ):
+569            errors.append(
+570                f"The number of provided arguments ({len(args)}) is greater than "
+571                f"the maximum number of supported arguments ({len(self.arg_types)})"
+572            )
+573
+574        return errors
+575
+576    def dump(self):
+577        """
+578        Dump this Expression to a JSON-serializable dict.
+579        """
+580        from sqlglot.serde import dump
+581
+582        return dump(self)
+583
+584    @classmethod
+585    def load(cls, obj):
+586        """
+587        Load a dict (as returned by `Expression.dump`) into an Expression instance.
+588        """
+589        from sqlglot.serde import load
+590
+591        return load(obj)
+
+ + +

The base class for all expressions in a syntax tree. Each Expression encapsulates any necessary +context, such as its child expressions, their names (arg keys), and whether a given child expression +is optional or not.

+ +
Attributes:
+ +
    +
  • key: a unique key for each class in the Expression hierarchy. This is useful for hashing +and representing expressions as strings.
  • +
  • arg_types: determines what arguments (child nodes) are supported by an expression. It +maps arg keys to booleans that indicate whether the corresponding args are optional.
  • +
+ +
Example:
+ +
+
+
>>> class Foo(Expression):
+...     arg_types = {"this": True, "expression": False}
+
+
+ +

The above definition informs us that Foo is an Expression that requires an argument called + "this" and may also optionally receive an argument called "expression".

+
+ +
Arguments:
+ +
    +
  • args: a mapping used for retrieving the arguments of an expression, given their arg keys.
  • +
  • parent: a reference to the parent expression (or None, in case of root expressions).
  • +
  • arg_key: the arg key an expression is associated with, i.e. the name its parent expression +uses to refer to it.
  • +
  • comments: a list of comments that are associated with a given expression. This is used in +order to preserve comments when transpiling SQL code.
  • +
  • _type: the sqlglot.expressions.DataType type of an expression. This is inferred by the +optimizer, in order to enable some transformations that require type information.
  • +
+
+ + +
+ +
+ + Expression(**args: Any) + + + +
+ +
88    def __init__(self, **args: t.Any):
+89        self.args: t.Dict[str, t.Any] = args
+90        self.parent: t.Optional[Expression] = None
+91        self.arg_key: t.Optional[str] = None
+92        self.comments: t.Optional[t.List[str]] = None
+93        self._type: t.Optional[DataType] = None
+94
+95        for arg_key, value in self.args.items():
+96            self._set_parent(arg_key, value)
+
+ + + + +
+
+
+ this + + +
+ + +

Retrieves the argument with key "this".

+
+ + +
+
+
+ expression + + +
+ + +

Retrieves the argument with key "expression".

+
+ + +
+
+
+ expressions + + +
+ + +

Retrieves the argument with key "expressions".

+
+ + +
+
+ +
+ + def + text(self, key): + + + +
+ +
132    def text(self, key):
+133        """
+134        Returns a textual representation of the argument corresponding to "key". This can only be used
+135        for args that are strings or leaf Expression instances, such as identifiers and literals.
+136        """
+137        field = self.args.get(key)
+138        if isinstance(field, str):
+139            return field
+140        if isinstance(field, (Identifier, Literal, Var)):
+141            return field.this
+142        if isinstance(field, (Star, Null)):
+143            return field.name
+144        return ""
+
+ + +

Returns a textual representation of the argument corresponding to "key". This can only be used +for args that are strings or leaf Expression instances, such as identifiers and literals.

+
+ + +
+
+
+ is_string + + +
+ + +

Checks whether a Literal expression is a string.

+
+ + +
+
+
+ is_number + + +
+ + +

Checks whether a Literal expression is a number.

+
+ + +
+
+
+ is_int + + +
+ + +

Checks whether a Literal expression is an integer.

+
+ + +
+
+
+ alias + + +
+ + +

Returns the alias of the expression, or an empty string if it's not aliased.

+
+ + +
+
+
+ output_name + + +
+ + +

Name of the output column if this expression is a selection.

+ +

If the Expression has no output name, an empty string is returned.

+ +
Example:
+ +
+
+
>>> from sqlglot import parse_one
+>>> parse_one("SELECT a").expressions[0].output_name
+'a'
+>>> parse_one("SELECT b AS c").expressions[0].output_name
+'c'
+>>> parse_one("SELECT 1 + 2").expressions[0].output_name
+''
+
+
+
+
+ + +
+
+ +
+ + def + copy(self): + + + +
+ +
224    def copy(self):
+225        """
+226        Returns a deep copy of the expression.
+227        """
+228        new = deepcopy(self)
+229        new.parent = self.parent
+230        for item, parent, _ in new.bfs():
+231            if isinstance(item, Expression) and parent:
+232                item.parent = parent
+233        return new
+
+ + +

Returns a deep copy of the expression.

+
+ + +
+
+ +
+ + def + append(self, arg_key, value): + + + +
+ +
235    def append(self, arg_key, value):
+236        """
+237        Appends value to arg_key if it's a list or sets it as a new list.
+238
+239        Args:
+240            arg_key (str): name of the list expression arg
+241            value (Any): value to append to the list
+242        """
+243        if not isinstance(self.args.get(arg_key), list):
+244            self.args[arg_key] = []
+245        self.args[arg_key].append(value)
+246        self._set_parent(arg_key, value)
+
+ + +

Appends value to arg_key if it's a list or sets it as a new list.

+ +
Arguments:
+ +
    +
  • arg_key (str): name of the list expression arg
  • +
  • value (Any): value to append to the list
  • +
+
+ + +
+
+ +
+ + def + set(self, arg_key, value): + + + +
+ +
248    def set(self, arg_key, value):
+249        """
+250        Sets `arg_key` to `value`.
+251
+252        Args:
+253            arg_key (str): name of the expression arg.
+254            value: value to set the arg to.
+255        """
+256        self.args[arg_key] = value
+257        self._set_parent(arg_key, value)
+
+ + +

Sets arg_key to value.

+ +
Arguments:
+ +
    +
  • arg_key (str): name of the expression arg.
  • +
  • value: value to set the arg to.
  • +
+
+ + +
+
+
+ depth + + +
+ + +

Returns the depth of this tree.

+
+ + +
+
+ +
+ + def + find(self, *expression_types, bfs=True): + + + +
+ +
278    def find(self, *expression_types, bfs=True):
+279        """
+280        Returns the first node in this tree which matches at least one of
+281        the specified types.
+282
+283        Args:
+284            expression_types (type): the expression type(s) to match.
+285
+286        Returns:
+287            The node which matches the criteria or None if no such node was found.
+288        """
+289        return next(self.find_all(*expression_types, bfs=bfs), None)
+
+ + +

Returns the first node in this tree which matches at least one of +the specified types.

+ +
Arguments:
+ +
    +
  • expression_types (type): the expression type(s) to match.
  • +
+ +
Returns:
+ +
+

The node which matches the criteria or None if no such node was found.

+
+
+ + +
+
+ +
+ + def + find_all(self, *expression_types, bfs=True): + + + +
+ +
291    def find_all(self, *expression_types, bfs=True):
+292        """
+293        Returns a generator object which visits all nodes in this tree and only
+294        yields those that match at least one of the specified expression types.
+295
+296        Args:
+297            expression_types (type): the expression type(s) to match.
+298
+299        Returns:
+300            The generator object.
+301        """
+302        for expression, _, _ in self.walk(bfs=bfs):
+303            if isinstance(expression, expression_types):
+304                yield expression
+
+ + +

Returns a generator object which visits all nodes in this tree and only +yields those that match at least one of the specified expression types.

+ +
Arguments:
+ +
    +
  • expression_types (type): the expression type(s) to match.
  • +
+ +
Returns:
+ +
+

The generator object.

+
+
+ + +
+
+ +
+ + def + find_ancestor(self, *expression_types): + + + +
+ +
306    def find_ancestor(self, *expression_types):
+307        """
+308        Returns a nearest parent matching expression_types.
+309
+310        Args:
+311            expression_types (type): the expression type(s) to match.
+312
+313        Returns:
+314            The parent node.
+315        """
+316        ancestor = self.parent
+317        while ancestor and not isinstance(ancestor, expression_types):
+318            ancestor = ancestor.parent
+319        return ancestor
+
+ + +

Returns a nearest parent matching expression_types.

+ +
Arguments:
+ +
    +
  • expression_types (type): the expression type(s) to match.
  • +
+ +
Returns:
+ +
+

The parent node.

+
+
+ + +
+
+
+ parent_select + + +
+ + +

Returns the parent select statement.

+
+ + +
+
+ +
+ + def + walk(self, bfs=True, prune=None): + + + +
+ +
328    def walk(self, bfs=True, prune=None):
+329        """
+330        Returns a generator object which visits all nodes in this tree.
+331
+332        Args:
+333            bfs (bool): if set to True the BFS traversal order will be applied,
+334                otherwise the DFS traversal will be used instead.
+335            prune ((node, parent, arg_key) -> bool): callable that returns True if
+336                the generator should stop traversing this branch of the tree.
+337
+338        Returns:
+339            the generator object.
+340        """
+341        if bfs:
+342            yield from self.bfs(prune=prune)
+343        else:
+344            yield from self.dfs(prune=prune)
+
+ + +

Returns a generator object which visits all nodes in this tree.

+ +
Arguments:
+ +
    +
  • bfs (bool): if set to True the BFS traversal order will be applied, +otherwise the DFS traversal will be used instead.
  • +
  • prune ((node, parent, arg_key) -> bool): callable that returns True if +the generator should stop traversing this branch of the tree.
  • +
+ +
Returns:
+ +
+

the generator object.

+
+
+ + +
+
+ +
+ + def + dfs(self, parent=None, key=None, prune=None): + + + +
+ +
346    def dfs(self, parent=None, key=None, prune=None):
+347        """
+348        Returns a generator object which visits all nodes in this tree in
+349        the DFS (Depth-first) order.
+350
+351        Returns:
+352            The generator object.
+353        """
+354        parent = parent or self.parent
+355        yield self, parent, key
+356        if prune and prune(self, parent, key):
+357            return
+358
+359        for k, v in self.args.items():
+360            for node in ensure_collection(v):
+361                if isinstance(node, Expression):
+362                    yield from node.dfs(self, k, prune)
+
+ + +

Returns a generator object which visits all nodes in this tree in +the DFS (Depth-first) order.

+ +
Returns:
+ +
+

The generator object.

+
+
+ + +
+
+ +
+ + def + bfs(self, prune=None): + + + +
+ +
364    def bfs(self, prune=None):
+365        """
+366        Returns a generator object which visits all nodes in this tree in
+367        the BFS (Breadth-first) order.
+368
+369        Returns:
+370            The generator object.
+371        """
+372        queue = deque([(self, self.parent, None)])
+373
+374        while queue:
+375            item, parent, key = queue.popleft()
+376
+377            yield item, parent, key
+378            if prune and prune(item, parent, key):
+379                continue
+380
+381            if isinstance(item, Expression):
+382                for k, v in item.args.items():
+383                    for node in ensure_collection(v):
+384                        if isinstance(node, Expression):
+385                            queue.append((node, item, k))
+
+ + +

Returns a generator object which visits all nodes in this tree in +the BFS (Breadth-first) order.

+ +
Returns:
+ +
+

The generator object.

+
+
+ + +
+
+ +
+ + def + unnest(self): + + + +
+ +
387    def unnest(self):
+388        """
+389        Returns the first non parenthesis child or self.
+390        """
+391        expression = self
+392        while isinstance(expression, Paren):
+393            expression = expression.this
+394        return expression
+
+ + +

Returns the first non parenthesis child or self.

+
+ + +
+
+ +
+ + def + unalias(self): + + + +
+ +
396    def unalias(self):
+397        """
+398        Returns the inner expression if this is an Alias.
+399        """
+400        if isinstance(self, Alias):
+401            return self.this
+402        return self
+
+ + +

Returns the inner expression if this is an Alias.

+
+ + +
+
+ +
+ + def + unnest_operands(self): + + + +
+ +
404    def unnest_operands(self):
+405        """
+406        Returns unnested operands as a tuple.
+407        """
+408        return tuple(arg.unnest() for arg in self.args.values() if arg)
+
+ + +

Returns unnested operands as a tuple.

+
+ + +
+
+ +
+ + def + flatten(self, unnest=True): + + + +
+ +
410    def flatten(self, unnest=True):
+411        """
+412        Returns a generator which yields child nodes who's parents are the same class.
+413
+414        A AND B AND C -> [A, B, C]
+415        """
+416        for node, _, _ in self.dfs(prune=lambda n, p, *_: p and not isinstance(n, self.__class__)):
+417            if not isinstance(node, self.__class__):
+418                yield node.unnest() if unnest else node
+
+ + +

Returns a generator which yields child nodes who's parents are the same class.

+ +

A AND B AND C -> [A, B, C]

+
+ + +
+
+ +
+ + def + sql( self, dialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None, **opts) -> str: + + + +
+ +
426    def sql(self, dialect: DialectType = None, **opts) -> str:
+427        """
+428        Returns SQL string representation of this tree.
+429
+430        Args:
+431            dialect: the dialect of the output SQL string (eg. "spark", "hive", "presto", "mysql").
+432            opts: other `sqlglot.generator.Generator` options.
+433
+434        Returns:
+435            The SQL string.
+436        """
+437        from sqlglot.dialects import Dialect
+438
+439        return Dialect.get_or_raise(dialect)().generate(self, **opts)
+
+ + +

Returns SQL string representation of this tree.

+ +
Arguments:
+ +
    +
  • dialect: the dialect of the output SQL string (eg. "spark", "hive", "presto", "mysql").
  • +
  • opts: other sqlglot.generator.Generator options.
  • +
+ +
Returns:
+ +
+

The SQL string.

+
+
+ + +
+
+ +
+ + def + transform(self, fun, *args, copy=True, **kwargs): + + + +
+ +
465    def transform(self, fun, *args, copy=True, **kwargs):
+466        """
+467        Recursively visits all tree nodes (excluding already transformed ones)
+468        and applies the given transformation function to each node.
+469
+470        Args:
+471            fun (function): a function which takes a node as an argument and returns a
+472                new transformed node or the same node without modifications. If the function
+473                returns None, then the corresponding node will be removed from the syntax tree.
+474            copy (bool): if set to True a new tree instance is constructed, otherwise the tree is
+475                modified in place.
+476
+477        Returns:
+478            The transformed tree.
+479        """
+480        node = self.copy() if copy else self
+481        new_node = fun(node, *args, **kwargs)
+482
+483        if new_node is None or not isinstance(new_node, Expression):
+484            return new_node
+485        if new_node is not node:
+486            new_node.parent = node.parent
+487            return new_node
+488
+489        replace_children(new_node, lambda child: child.transform(fun, *args, copy=False, **kwargs))
+490        return new_node
+
+ + +

Recursively visits all tree nodes (excluding already transformed ones) +and applies the given transformation function to each node.

+ +
Arguments:
+ +
    +
  • fun (function): a function which takes a node as an argument and returns a +new transformed node or the same node without modifications. If the function +returns None, then the corresponding node will be removed from the syntax tree.
  • +
  • copy (bool): if set to True a new tree instance is constructed, otherwise the tree is +modified in place.
  • +
+ +
Returns:
+ +
+

The transformed tree.

+
+
+ + +
+
+ +
+ + def + replace(self, expression): + + + +
+ +
492    def replace(self, expression):
+493        """
+494        Swap out this expression with a new expression.
+495
+496        For example::
+497
+498            >>> tree = Select().select("x").from_("tbl")
+499            >>> tree.find(Column).replace(Column(this="y"))
+500            (COLUMN this: y)
+501            >>> tree.sql()
+502            'SELECT y FROM tbl'
+503
+504        Args:
+505            expression (Expression|None): new node
+506
+507        Returns:
+508            The new expression or expressions.
+509        """
+510        if not self.parent:
+511            return expression
+512
+513        parent = self.parent
+514        self.parent = None
+515
+516        replace_children(parent, lambda child: expression if child is self else child)
+517        return expression
+
+ + +

Swap out this expression with a new expression.

+ +

For example::

+ +
>>> tree = Select().select("x").from_("tbl")
+>>> tree.find(Column).replace(Column(this="y"))
+(COLUMN this: y)
+>>> tree.sql()
+'SELECT y FROM tbl'
+
+ +
Arguments:
+ +
    +
  • expression (Expression|None): new node
  • +
+ +
Returns:
+ +
+

The new expression or expressions.

+
+
+ + +
+
+ +
+ + def + pop(self): + + + +
+ +
519    def pop(self):
+520        """
+521        Remove this expression from its AST.
+522        """
+523        self.replace(None)
+
+ + +

Remove this expression from its AST.

+
+ + +
+
+ +
+ + def + assert_is(self, type_): + + + +
+ +
525    def assert_is(self, type_):
+526        """
+527        Assert that this `Expression` is an instance of `type_`.
+528
+529        If it is NOT an instance of `type_`, this raises an assertion error.
+530        Otherwise, this returns this expression.
+531
+532        Examples:
+533            This is useful for type security in chained expressions:
+534
+535            >>> import sqlglot
+536            >>> sqlglot.parse_one("SELECT x from y").assert_is(Select).select("z").sql()
+537            'SELECT x, z FROM y'
+538        """
+539        assert isinstance(self, type_)
+540        return self
+
+ + +

Assert that this Expression is an instance of type_.

+ +

If it is NOT an instance of type_, this raises an assertion error. +Otherwise, this returns this expression.

+ +
Examples:
+ +
+

This is useful for type security in chained expressions:

+ +
+
>>> import sqlglot
+>>> sqlglot.parse_one("SELECT x from y").assert_is(Select).select("z").sql()
+'SELECT x, z FROM y'
+
+
+
+
+ + +
+
+ +
+ + def + error_messages(self, args: Optional[Sequence] = None) -> List[str]: + + + +
+ +
542    def error_messages(self, args: t.Optional[t.Sequence] = None) -> t.List[str]:
+543        """
+544        Checks if this expression is valid (e.g. all mandatory args are set).
+545
+546        Args:
+547            args: a sequence of values that were used to instantiate a Func expression. This is used
+548                to check that the provided arguments don't exceed the function argument limit.
+549
+550        Returns:
+551            A list of error messages for all possible errors that were found.
+552        """
+553        errors: t.List[str] = []
+554
+555        for k in self.args:
+556            if k not in self.arg_types:
+557                errors.append(f"Unexpected keyword: '{k}' for {self.__class__}")
+558        for k, mandatory in self.arg_types.items():
+559            v = self.args.get(k)
+560            if mandatory and (v is None or (isinstance(v, list) and not v)):
+561                errors.append(f"Required keyword: '{k}' missing for {self.__class__}")
+562
+563        if (
+564            args
+565            and isinstance(self, Func)
+566            and len(args) > len(self.arg_types)
+567            and not self.is_var_len_args
+568        ):
+569            errors.append(
+570                f"The number of provided arguments ({len(args)}) is greater than "
+571                f"the maximum number of supported arguments ({len(self.arg_types)})"
+572            )
+573
+574        return errors
+
+ + +

Checks if this expression is valid (e.g. all mandatory args are set).

+ +
Arguments:
+ +
    +
  • args: a sequence of values that were used to instantiate a Func expression. This is used +to check that the provided arguments don't exceed the function argument limit.
  • +
+ +
Returns:
+ +
+

A list of error messages for all possible errors that were found.

+
+
+ + +
+
+ +
+ + def + dump(self): + + + +
+ +
576    def dump(self):
+577        """
+578        Dump this Expression to a JSON-serializable dict.
+579        """
+580        from sqlglot.serde import dump
+581
+582        return dump(self)
+
+ + +

Dump this Expression to a JSON-serializable dict.

+
+ + +
+
+ +
+
@classmethod
+ + def + load(cls, obj): + + + +
+ +
584    @classmethod
+585    def load(cls, obj):
+586        """
+587        Load a dict (as returned by `Expression.dump`) into an Expression instance.
+588        """
+589        from sqlglot.serde import load
+590
+591        return load(obj)
+
+ + +

Load a dict (as returned by Expression.dump) into an Expression instance.

+
+ + +
+
+
+ +
+ + class + Condition(Expression): + + + +
+ +
601class Condition(Expression):
+602    def and_(self, *expressions, dialect=None, **opts):
+603        """
+604        AND this condition with one or multiple expressions.
+605
+606        Example:
+607            >>> condition("x=1").and_("y=1").sql()
+608            'x = 1 AND y = 1'
+609
+610        Args:
+611            *expressions (str | Expression): the SQL code strings to parse.
+612                If an `Expression` instance is passed, it will be used as-is.
+613            dialect (str): the dialect used to parse the input expression.
+614            opts (kwargs): other options to use to parse the input expressions.
+615
+616        Returns:
+617            And: the new condition.
+618        """
+619        return and_(self, *expressions, dialect=dialect, **opts)
+620
+621    def or_(self, *expressions, dialect=None, **opts):
+622        """
+623        OR this condition with one or multiple expressions.
+624
+625        Example:
+626            >>> condition("x=1").or_("y=1").sql()
+627            'x = 1 OR y = 1'
+628
+629        Args:
+630            *expressions (str | Expression): the SQL code strings to parse.
+631                If an `Expression` instance is passed, it will be used as-is.
+632            dialect (str): the dialect used to parse the input expression.
+633            opts (kwargs): other options to use to parse the input expressions.
+634
+635        Returns:
+636            Or: the new condition.
+637        """
+638        return or_(self, *expressions, dialect=dialect, **opts)
+639
+640    def not_(self):
+641        """
+642        Wrap this condition with NOT.
+643
+644        Example:
+645            >>> condition("x=1").not_().sql()
+646            'NOT x = 1'
+647
+648        Returns:
+649            Not: the new condition.
+650        """
+651        return not_(self)
+
+ + + + +
+ +
+ + def + and_(self, *expressions, dialect=None, **opts): + + + +
+ +
602    def and_(self, *expressions, dialect=None, **opts):
+603        """
+604        AND this condition with one or multiple expressions.
+605
+606        Example:
+607            >>> condition("x=1").and_("y=1").sql()
+608            'x = 1 AND y = 1'
+609
+610        Args:
+611            *expressions (str | Expression): the SQL code strings to parse.
+612                If an `Expression` instance is passed, it will be used as-is.
+613            dialect (str): the dialect used to parse the input expression.
+614            opts (kwargs): other options to use to parse the input expressions.
+615
+616        Returns:
+617            And: the new condition.
+618        """
+619        return and_(self, *expressions, dialect=dialect, **opts)
+
+ + +

AND this condition with one or multiple expressions.

+ +
Example:
+ +
+
+
>>> condition("x=1").and_("y=1").sql()
+'x = 1 AND y = 1'
+
+
+
+ +
Arguments:
+ +
    +
  • *expressions (str | Expression): the SQL code strings to parse. +If an Expression instance is passed, it will be used as-is.
  • +
  • dialect (str): the dialect used to parse the input expression.
  • +
  • opts (kwargs): other options to use to parse the input expressions.
  • +
+ +
Returns:
+ +
+

And: the new condition.

+
+
+ + +
+
+ +
+ + def + or_(self, *expressions, dialect=None, **opts): + + + +
+ +
621    def or_(self, *expressions, dialect=None, **opts):
+622        """
+623        OR this condition with one or multiple expressions.
+624
+625        Example:
+626            >>> condition("x=1").or_("y=1").sql()
+627            'x = 1 OR y = 1'
+628
+629        Args:
+630            *expressions (str | Expression): the SQL code strings to parse.
+631                If an `Expression` instance is passed, it will be used as-is.
+632            dialect (str): the dialect used to parse the input expression.
+633            opts (kwargs): other options to use to parse the input expressions.
+634
+635        Returns:
+636            Or: the new condition.
+637        """
+638        return or_(self, *expressions, dialect=dialect, **opts)
+
+ + +

OR this condition with one or multiple expressions.

+ +
Example:
+ +
+
+
>>> condition("x=1").or_("y=1").sql()
+'x = 1 OR y = 1'
+
+
+
+ +
Arguments:
+ +
    +
  • *expressions (str | Expression): the SQL code strings to parse. +If an Expression instance is passed, it will be used as-is.
  • +
  • dialect (str): the dialect used to parse the input expression.
  • +
  • opts (kwargs): other options to use to parse the input expressions.
  • +
+ +
Returns:
+ +
+

Or: the new condition.

+
+
+ + +
+
+ +
+ + def + not_(self): + + + +
+ +
640    def not_(self):
+641        """
+642        Wrap this condition with NOT.
+643
+644        Example:
+645            >>> condition("x=1").not_().sql()
+646            'NOT x = 1'
+647
+648        Returns:
+649            Not: the new condition.
+650        """
+651        return not_(self)
+
+ + +

Wrap this condition with NOT.

+ +
Example:
+ +
+
+
>>> condition("x=1").not_().sql()
+'NOT x = 1'
+
+
+
+ +
Returns:
+ +
+

Not: the new condition.

+
+
+ + +
+ +
+
+ +
+ + class + Predicate(Condition): + + + +
+ +
654class Predicate(Condition):
+655    """Relationships like x = y, x > 1, x >= y."""
+
+ + +

Relationships like x = y, x > 1, x >= y.

+
+ + + +
+
+ +
+ + class + DerivedTable(Expression): + + + +
+ +
658class DerivedTable(Expression):
+659    @property
+660    def alias_column_names(self):
+661        table_alias = self.args.get("alias")
+662        if not table_alias:
+663            return []
+664        column_list = table_alias.assert_is(TableAlias).args.get("columns") or []
+665        return [c.name for c in column_list]
+666
+667    @property
+668    def selects(self):
+669        alias = self.args.get("alias")
+670
+671        if alias:
+672            return alias.columns
+673        return []
+674
+675    @property
+676    def named_selects(self):
+677        return [select.output_name for select in self.selects]
+
+ + + + + +
+
+ +
+ + class + Unionable(Expression): + + + +
+ +
680class Unionable(Expression):
+681    def union(self, expression, distinct=True, dialect=None, **opts):
+682        """
+683        Builds a UNION expression.
+684
+685        Example:
+686            >>> import sqlglot
+687            >>> sqlglot.parse_one("SELECT * FROM foo").union("SELECT * FROM bla").sql()
+688            'SELECT * FROM foo UNION SELECT * FROM bla'
+689
+690        Args:
+691            expression (str | Expression): the SQL code string.
+692                If an `Expression` instance is passed, it will be used as-is.
+693            distinct (bool): set the DISTINCT flag if and only if this is true.
+694            dialect (str): the dialect used to parse the input expression.
+695            opts (kwargs): other options to use to parse the input expressions.
+696        Returns:
+697            Union: the Union expression.
+698        """
+699        return union(left=self, right=expression, distinct=distinct, dialect=dialect, **opts)
+700
+701    def intersect(self, expression, distinct=True, dialect=None, **opts):
+702        """
+703        Builds an INTERSECT expression.
+704
+705        Example:
+706            >>> import sqlglot
+707            >>> sqlglot.parse_one("SELECT * FROM foo").intersect("SELECT * FROM bla").sql()
+708            'SELECT * FROM foo INTERSECT SELECT * FROM bla'
+709
+710        Args:
+711            expression (str | Expression): the SQL code string.
+712                If an `Expression` instance is passed, it will be used as-is.
+713            distinct (bool): set the DISTINCT flag if and only if this is true.
+714            dialect (str): the dialect used to parse the input expression.
+715            opts (kwargs): other options to use to parse the input expressions.
+716        Returns:
+717            Intersect: the Intersect expression
+718        """
+719        return intersect(left=self, right=expression, distinct=distinct, dialect=dialect, **opts)
+720
+721    def except_(self, expression, distinct=True, dialect=None, **opts):
+722        """
+723        Builds an EXCEPT expression.
+724
+725        Example:
+726            >>> import sqlglot
+727            >>> sqlglot.parse_one("SELECT * FROM foo").except_("SELECT * FROM bla").sql()
+728            'SELECT * FROM foo EXCEPT SELECT * FROM bla'
+729
+730        Args:
+731            expression (str | Expression): the SQL code string.
+732                If an `Expression` instance is passed, it will be used as-is.
+733            distinct (bool): set the DISTINCT flag if and only if this is true.
+734            dialect (str): the dialect used to parse the input expression.
+735            opts (kwargs): other options to use to parse the input expressions.
+736        Returns:
+737            Except: the Except expression
+738        """
+739        return except_(left=self, right=expression, distinct=distinct, dialect=dialect, **opts)
+
+ + + + +
+ +
+ + def + union(self, expression, distinct=True, dialect=None, **opts): + + + +
+ +
681    def union(self, expression, distinct=True, dialect=None, **opts):
+682        """
+683        Builds a UNION expression.
+684
+685        Example:
+686            >>> import sqlglot
+687            >>> sqlglot.parse_one("SELECT * FROM foo").union("SELECT * FROM bla").sql()
+688            'SELECT * FROM foo UNION SELECT * FROM bla'
+689
+690        Args:
+691            expression (str | Expression): the SQL code string.
+692                If an `Expression` instance is passed, it will be used as-is.
+693            distinct (bool): set the DISTINCT flag if and only if this is true.
+694            dialect (str): the dialect used to parse the input expression.
+695            opts (kwargs): other options to use to parse the input expressions.
+696        Returns:
+697            Union: the Union expression.
+698        """
+699        return union(left=self, right=expression, distinct=distinct, dialect=dialect, **opts)
+
+ + +

Builds a UNION expression.

+ +
Example:
+ +
+
+
>>> import sqlglot
+>>> sqlglot.parse_one("SELECT * FROM foo").union("SELECT * FROM bla").sql()
+'SELECT * FROM foo UNION SELECT * FROM bla'
+
+
+
+ +
Arguments:
+ +
    +
  • expression (str | Expression): the SQL code string. +If an Expression instance is passed, it will be used as-is.
  • +
  • distinct (bool): set the DISTINCT flag if and only if this is true.
  • +
  • dialect (str): the dialect used to parse the input expression.
  • +
  • opts (kwargs): other options to use to parse the input expressions.
  • +
+ +
Returns:
+ +
+

Union: the Union expression.

+
+
+ + +
+
+ +
+ + def + intersect(self, expression, distinct=True, dialect=None, **opts): + + + +
+ +
701    def intersect(self, expression, distinct=True, dialect=None, **opts):
+702        """
+703        Builds an INTERSECT expression.
+704
+705        Example:
+706            >>> import sqlglot
+707            >>> sqlglot.parse_one("SELECT * FROM foo").intersect("SELECT * FROM bla").sql()
+708            'SELECT * FROM foo INTERSECT SELECT * FROM bla'
+709
+710        Args:
+711            expression (str | Expression): the SQL code string.
+712                If an `Expression` instance is passed, it will be used as-is.
+713            distinct (bool): set the DISTINCT flag if and only if this is true.
+714            dialect (str): the dialect used to parse the input expression.
+715            opts (kwargs): other options to use to parse the input expressions.
+716        Returns:
+717            Intersect: the Intersect expression
+718        """
+719        return intersect(left=self, right=expression, distinct=distinct, dialect=dialect, **opts)
+
+ + +

Builds an INTERSECT expression.

+ +
Example:
+ +
+
+
>>> import sqlglot
+>>> sqlglot.parse_one("SELECT * FROM foo").intersect("SELECT * FROM bla").sql()
+'SELECT * FROM foo INTERSECT SELECT * FROM bla'
+
+
+
+ +
Arguments:
+ +
    +
  • expression (str | Expression): the SQL code string. +If an Expression instance is passed, it will be used as-is.
  • +
  • distinct (bool): set the DISTINCT flag if and only if this is true.
  • +
  • dialect (str): the dialect used to parse the input expression.
  • +
  • opts (kwargs): other options to use to parse the input expressions.
  • +
+ +
Returns:
+ +
+

Intersect: the Intersect expression

+
+
+ + +
+
+ +
+ + def + except_(self, expression, distinct=True, dialect=None, **opts): + + + +
+ +
721    def except_(self, expression, distinct=True, dialect=None, **opts):
+722        """
+723        Builds an EXCEPT expression.
+724
+725        Example:
+726            >>> import sqlglot
+727            >>> sqlglot.parse_one("SELECT * FROM foo").except_("SELECT * FROM bla").sql()
+728            'SELECT * FROM foo EXCEPT SELECT * FROM bla'
+729
+730        Args:
+731            expression (str | Expression): the SQL code string.
+732                If an `Expression` instance is passed, it will be used as-is.
+733            distinct (bool): set the DISTINCT flag if and only if this is true.
+734            dialect (str): the dialect used to parse the input expression.
+735            opts (kwargs): other options to use to parse the input expressions.
+736        Returns:
+737            Except: the Except expression
+738        """
+739        return except_(left=self, right=expression, distinct=distinct, dialect=dialect, **opts)
+
+ + +

Builds an EXCEPT expression.

+ +
Example:
+ +
+
+
>>> import sqlglot
+>>> sqlglot.parse_one("SELECT * FROM foo").except_("SELECT * FROM bla").sql()
+'SELECT * FROM foo EXCEPT SELECT * FROM bla'
+
+
+
+ +
Arguments:
+ +
    +
  • expression (str | Expression): the SQL code string. +If an Expression instance is passed, it will be used as-is.
  • +
  • distinct (bool): set the DISTINCT flag if and only if this is true.
  • +
  • dialect (str): the dialect used to parse the input expression.
  • +
  • opts (kwargs): other options to use to parse the input expressions.
  • +
+ +
Returns:
+ +
+

Except: the Except expression

+
+
+ + +
+ +
+
+ +
+ + class + UDTF(DerivedTable, Unionable): + + + +
+ +
742class UDTF(DerivedTable, Unionable):
+743    pass
+
+ + + + + +
+
+ +
+ + class + Cache(Expression): + + + +
+ +
746class Cache(Expression):
+747    arg_types = {
+748        "with": False,
+749        "this": True,
+750        "lazy": False,
+751        "options": False,
+752        "expression": False,
+753    }
+
+ + + + + +
+
+ +
+ + class + Uncache(Expression): + + + +
+ +
756class Uncache(Expression):
+757    arg_types = {"this": True, "exists": False}
+
+ + + + + +
+
+ +
+ + class + Create(Expression): + + + +
+ +
760class Create(Expression):
+761    arg_types = {
+762        "with": False,
+763        "this": True,
+764        "kind": True,
+765        "expression": False,
+766        "set": False,
+767        "multiset": False,
+768        "global_temporary": False,
+769        "volatile": False,
+770        "exists": False,
+771        "properties": False,
+772        "temporary": False,
+773        "transient": False,
+774        "external": False,
+775        "replace": False,
+776        "unique": False,
+777        "materialized": False,
+778        "data": False,
+779        "statistics": False,
+780        "no_primary_index": False,
+781        "indexes": False,
+782        "no_schema_binding": False,
+783        "begin": False,
+784    }
+
+ + + + + +
+
+ +
+ + class + Describe(Expression): + + + +
+ +
787class Describe(Expression):
+788    arg_types = {"this": True, "kind": False}
+
+ + + + + +
+
+ +
+ + class + Set(Expression): + + + +
+ +
791class Set(Expression):
+792    arg_types = {"expressions": True}
+
+ + + + + +
+
+ +
+ + class + SetItem(Expression): + + + +
+ +
795class SetItem(Expression):
+796    arg_types = {
+797        "this": False,
+798        "expressions": False,
+799        "kind": False,
+800        "collate": False,  # MySQL SET NAMES statement
+801        "global": False,
+802    }
+
+ + + + + +
+
+ +
+ + class + Show(Expression): + + + +
+ +
805class Show(Expression):
+806    arg_types = {
+807        "this": True,
+808        "target": False,
+809        "offset": False,
+810        "limit": False,
+811        "like": False,
+812        "where": False,
+813        "db": False,
+814        "full": False,
+815        "mutex": False,
+816        "query": False,
+817        "channel": False,
+818        "global": False,
+819        "log": False,
+820        "position": False,
+821        "types": False,
+822    }
+
+ + + + + +
+
+ +
+ + class + UserDefinedFunction(Expression): + + + +
+ +
825class UserDefinedFunction(Expression):
+826    arg_types = {"this": True, "expressions": False, "wrapped": False}
+
+ + + + + +
+
+ +
+ + class + UserDefinedFunctionKwarg(Expression): + + + +
+ +
829class UserDefinedFunctionKwarg(Expression):
+830    arg_types = {"this": True, "kind": True, "default": False}
+
+ + + + + +
+
+ +
+ + class + CharacterSet(Expression): + + + +
+ +
833class CharacterSet(Expression):
+834    arg_types = {"this": True, "default": False}
+
+ + + + + +
+
+ +
+ + class + With(Expression): + + + +
+ +
837class With(Expression):
+838    arg_types = {"expressions": True, "recursive": False}
+839
+840    @property
+841    def recursive(self) -> bool:
+842        return bool(self.args.get("recursive"))
+
+ + + + + +
+
+ +
+ + class + WithinGroup(Expression): + + + +
+ +
845class WithinGroup(Expression):
+846    arg_types = {"this": True, "expression": False}
+
+ + + + + +
+
+ +
+ + class + CTE(DerivedTable): + + + +
+ +
849class CTE(DerivedTable):
+850    arg_types = {"this": True, "alias": True}
+
+ + + + + +
+
+ +
+ + class + TableAlias(Expression): + + + +
+ +
853class TableAlias(Expression):
+854    arg_types = {"this": False, "columns": False}
+855
+856    @property
+857    def columns(self):
+858        return self.args.get("columns") or []
+
+ + + + + +
+
+ +
+ + class + BitString(Condition): + + + +
+ +
861class BitString(Condition):
+862    pass
+
+ + + + + +
+
+ +
+ + class + HexString(Condition): + + + +
+ +
865class HexString(Condition):
+866    pass
+
+ + + + + +
+
+ +
+ + class + ByteString(Condition): + + + +
+ +
869class ByteString(Condition):
+870    pass
+
+ + + + + +
+
+ +
+ + class + Column(Condition): + + + +
+ +
873class Column(Condition):
+874    arg_types = {"this": True, "table": False}
+875
+876    @property
+877    def table(self):
+878        return self.text("table")
+879
+880    @property
+881    def output_name(self):
+882        return self.name
+
+ + + + +
+
+ output_name + + +
+ + +

Name of the output column if this expression is a selection.

+ +

If the Expression has no output name, an empty string is returned.

+ +
Example:
+ +
+
+
>>> from sqlglot import parse_one
+>>> parse_one("SELECT a").expressions[0].output_name
+'a'
+>>> parse_one("SELECT b AS c").expressions[0].output_name
+'c'
+>>> parse_one("SELECT 1 + 2").expressions[0].output_name
+''
+
+
+
+
+ + +
+ +
+
+ +
+ + class + ColumnDef(Expression): + + + +
+ +
885class ColumnDef(Expression):
+886    arg_types = {
+887        "this": True,
+888        "kind": False,
+889        "constraints": False,
+890        "exists": False,
+891    }
+
+ + + + + +
+
+ +
+ + class + AlterColumn(Expression): + + + +
+ +
894class AlterColumn(Expression):
+895    arg_types = {
+896        "this": True,
+897        "dtype": False,
+898        "collate": False,
+899        "using": False,
+900        "default": False,
+901        "drop": False,
+902    }
+
+ + + + + +
+
+ +
+ + class + RenameTable(Expression): + + + +
+ +
905class RenameTable(Expression):
+906    pass
+
+ + + + + +
+
+ +
+ + class + ColumnConstraint(Expression): + + + +
+ +
909class ColumnConstraint(Expression):
+910    arg_types = {"this": False, "kind": True}
+
+ + + + + +
+
+ +
+ + class + ColumnConstraintKind(Expression): + + + +
+ +
913class ColumnConstraintKind(Expression):
+914    pass
+
+ + + + + +
+
+ +
+ + class + AutoIncrementColumnConstraint(ColumnConstraintKind): + + + +
+ +
917class AutoIncrementColumnConstraint(ColumnConstraintKind):
+918    pass
+
+ + + + + +
+
+ +
+ + class + CheckColumnConstraint(ColumnConstraintKind): + + + +
+ +
921class CheckColumnConstraint(ColumnConstraintKind):
+922    pass
+
+ + + + + +
+
+ +
+ + class + CollateColumnConstraint(ColumnConstraintKind): + + + +
+ +
925class CollateColumnConstraint(ColumnConstraintKind):
+926    pass
+
+ + + + + +
+
+ +
+ + class + CommentColumnConstraint(ColumnConstraintKind): + + + +
+ +
929class CommentColumnConstraint(ColumnConstraintKind):
+930    pass
+
+ + + + + +
+
+ +
+ + class + DefaultColumnConstraint(ColumnConstraintKind): + + + +
+ +
933class DefaultColumnConstraint(ColumnConstraintKind):
+934    pass
+
+ + + + + +
+
+ +
+ + class + EncodeColumnConstraint(ColumnConstraintKind): + + + +
+ +
937class EncodeColumnConstraint(ColumnConstraintKind):
+938    pass
+
+ + + + + +
+
+ +
+ + class + GeneratedAsIdentityColumnConstraint(ColumnConstraintKind): + + + +
+ +
941class GeneratedAsIdentityColumnConstraint(ColumnConstraintKind):
+942    # this: True -> ALWAYS, this: False -> BY DEFAULT
+943    arg_types = {"this": False, "start": False, "increment": False}
+
+ + + + + +
+
+ +
+ + class + NotNullColumnConstraint(ColumnConstraintKind): + + + +
+ +
946class NotNullColumnConstraint(ColumnConstraintKind):
+947    arg_types = {"allow_null": False}
+
+ + + + + +
+
+ +
+ + class + PrimaryKeyColumnConstraint(ColumnConstraintKind): + + + +
+ +
950class PrimaryKeyColumnConstraint(ColumnConstraintKind):
+951    arg_types = {"desc": False}
+
+ + + + + +
+
+ +
+ + class + UniqueColumnConstraint(ColumnConstraintKind): + + + +
+ +
954class UniqueColumnConstraint(ColumnConstraintKind):
+955    pass
+
+ + + + + +
+
+ +
+ + class + Constraint(Expression): + + + +
+ +
958class Constraint(Expression):
+959    arg_types = {"this": True, "expressions": True}
+
+ + + + + +
+
+ +
+ + class + Delete(Expression): + + + +
+ +
962class Delete(Expression):
+963    arg_types = {"with": False, "this": False, "using": False, "where": False}
+
+ + + + + +
+
+ +
+ + class + Drop(Expression): + + + +
+ +
966class Drop(Expression):
+967    arg_types = {
+968        "this": False,
+969        "kind": False,
+970        "exists": False,
+971        "temporary": False,
+972        "materialized": False,
+973        "cascade": False,
+974    }
+
+ + + + + +
+
+ +
+ + class + Filter(Expression): + + + +
+ +
977class Filter(Expression):
+978    arg_types = {"this": True, "expression": True}
+
+ + + + + +
+
+ +
+ + class + Check(Expression): + + + +
+ +
981class Check(Expression):
+982    pass
+
+ + + + + +
+
+ +
+ + class + Directory(Expression): + + + +
+ +
985class Directory(Expression):
+986    # https://spark.apache.org/docs/3.0.0-preview/sql-ref-syntax-dml-insert-overwrite-directory-hive.html
+987    arg_types = {"this": True, "local": False, "row_format": False}
+
+ + + + + +
+
+ +
+ + class + ForeignKey(Expression): + + + +
+ +
990class ForeignKey(Expression):
+991    arg_types = {
+992        "expressions": True,
+993        "reference": False,
+994        "delete": False,
+995        "update": False,
+996    }
+
+ + + + + +
+
+ +
+ + class + PrimaryKey(Expression): + + + +
+ +
 999class PrimaryKey(Expression):
+1000    arg_types = {"expressions": True, "options": False}
+
+ + + + + +
+
+ +
+ + class + Unique(Expression): + + + +
+ +
1003class Unique(Expression):
+1004    arg_types = {"expressions": True}
+
+ + + + + +
+
+ +
+ + class + Into(Expression): + + + +
+ +
1009class Into(Expression):
+1010    arg_types = {"this": True, "temporary": False, "unlogged": False}
+
+ + + + + +
+
+ +
+ + class + From(Expression): + + + +
+ +
1013class From(Expression):
+1014    arg_types = {"expressions": True}
+
+ + + + + +
+
+ +
+ + class + Having(Expression): + + + +
+ +
1017class Having(Expression):
+1018    pass
+
+ + + + + +
+
+ +
+ + class + Hint(Expression): + + + +
+ +
1021class Hint(Expression):
+1022    arg_types = {"expressions": True}
+
+ + + + + +
+
+ +
+ + class + JoinHint(Expression): + + + +
+ +
1025class JoinHint(Expression):
+1026    arg_types = {"this": True, "expressions": True}
+
+ + + + + +
+
+ +
+ + class + Identifier(Expression): + + + +
+ +
1029class Identifier(Expression):
+1030    arg_types = {"this": True, "quoted": False}
+1031
+1032    @property
+1033    def quoted(self):
+1034        return bool(self.args.get("quoted"))
+1035
+1036    def __eq__(self, other):
+1037        return isinstance(other, self.__class__) and _norm_arg(self.this) == _norm_arg(other.this)
+1038
+1039    def __hash__(self):
+1040        return hash((self.key, self.this.lower()))
+1041
+1042    @property
+1043    def output_name(self):
+1044        return self.name
+
+ + + + +
+
+ output_name + + +
+ + +

Name of the output column if this expression is a selection.

+ +

If the Expression has no output name, an empty string is returned.

+ +
Example:
+ +
+
+
>>> from sqlglot import parse_one
+>>> parse_one("SELECT a").expressions[0].output_name
+'a'
+>>> parse_one("SELECT b AS c").expressions[0].output_name
+'c'
+>>> parse_one("SELECT 1 + 2").expressions[0].output_name
+''
+
+
+
+
+ + +
+ +
+
+ +
+ + class + Index(Expression): + + + +
+ +
1047class Index(Expression):
+1048    arg_types = {
+1049        "this": False,
+1050        "table": False,
+1051        "where": False,
+1052        "columns": False,
+1053        "unique": False,
+1054        "primary": False,
+1055        "amp": False,  # teradata
+1056    }
+
+ + + + + +
+
+ +
+ + class + Insert(Expression): + + + +
+ +
1059class Insert(Expression):
+1060    arg_types = {
+1061        "with": False,
+1062        "this": True,
+1063        "expression": False,
+1064        "overwrite": False,
+1065        "exists": False,
+1066        "partition": False,
+1067    }
+
+ + + + + +
+
+ +
+ + class + Introducer(Expression): + + + +
+ +
1071class Introducer(Expression):
+1072    arg_types = {"this": True, "expression": True}
+
+ + + + + +
+
+ +
+ + class + National(Expression): + + + +
+ +
1076class National(Expression):
+1077    pass
+
+ + + + + +
+
+ +
+ + class + LoadData(Expression): + + + +
+ +
1080class LoadData(Expression):
+1081    arg_types = {
+1082        "this": True,
+1083        "local": False,
+1084        "overwrite": False,
+1085        "inpath": True,
+1086        "partition": False,
+1087        "input_format": False,
+1088        "serde": False,
+1089    }
+
+ + + + + +
+
+ +
+ + class + Partition(Expression): + + + +
+ +
1092class Partition(Expression):
+1093    arg_types = {"expressions": True}
+
+ + + + + +
+
+ +
+ + class + Fetch(Expression): + + + +
+ +
1096class Fetch(Expression):
+1097    arg_types = {"direction": False, "count": False}
+
+ + + + + +
+
+ +
+ + class + Group(Expression): + + + +
+ +
1100class Group(Expression):
+1101    arg_types = {
+1102        "expressions": False,
+1103        "grouping_sets": False,
+1104        "cube": False,
+1105        "rollup": False,
+1106    }
+
+ + + + + +
+
+ +
+ + class + Lambda(Expression): + + + +
+ +
1109class Lambda(Expression):
+1110    arg_types = {"this": True, "expressions": True}
+
+ + + + + +
+
+ +
+ + class + Limit(Expression): + + + +
+ +
1113class Limit(Expression):
+1114    arg_types = {"this": False, "expression": True}
+
+ + + + + +
+
+ +
+ + class + Literal(Condition): + + + +
+ +
1117class Literal(Condition):
+1118    arg_types = {"this": True, "is_string": True}
+1119
+1120    def __eq__(self, other):
+1121        return (
+1122            isinstance(other, Literal)
+1123            and self.this == other.this
+1124            and self.args["is_string"] == other.args["is_string"]
+1125        )
+1126
+1127    def __hash__(self):
+1128        return hash((self.key, self.this, self.args["is_string"]))
+1129
+1130    @classmethod
+1131    def number(cls, number) -> Literal:
+1132        return cls(this=str(number), is_string=False)
+1133
+1134    @classmethod
+1135    def string(cls, string) -> Literal:
+1136        return cls(this=str(string), is_string=True)
+1137
+1138    @property
+1139    def output_name(self):
+1140        return self.name
+
+ + + + +
+ +
+
@classmethod
+ + def + number(cls, number) -> sqlglot.expressions.Literal: + + + +
+ +
1130    @classmethod
+1131    def number(cls, number) -> Literal:
+1132        return cls(this=str(number), is_string=False)
+
+ + + + +
+
+ +
+
@classmethod
+ + def + string(cls, string) -> sqlglot.expressions.Literal: + + + +
+ +
1134    @classmethod
+1135    def string(cls, string) -> Literal:
+1136        return cls(this=str(string), is_string=True)
+
+ + + + +
+
+
+ output_name + + +
+ + +

Name of the output column if this expression is a selection.

+ +

If the Expression has no output name, an empty string is returned.

+ +
Example:
+ +
+
+
>>> from sqlglot import parse_one
+>>> parse_one("SELECT a").expressions[0].output_name
+'a'
+>>> parse_one("SELECT b AS c").expressions[0].output_name
+'c'
+>>> parse_one("SELECT 1 + 2").expressions[0].output_name
+''
+
+
+
+
+ + +
+ +
+
+ +
+ + class + Join(Expression): + + + +
+ +
1143class Join(Expression):
+1144    arg_types = {
+1145        "this": True,
+1146        "on": False,
+1147        "side": False,
+1148        "kind": False,
+1149        "using": False,
+1150        "natural": False,
+1151    }
+1152
+1153    @property
+1154    def kind(self):
+1155        return self.text("kind").upper()
+1156
+1157    @property
+1158    def side(self):
+1159        return self.text("side").upper()
+1160
+1161    @property
+1162    def alias_or_name(self):
+1163        return self.this.alias_or_name
+1164
+1165    def on(self, *expressions, append=True, dialect=None, copy=True, **opts):
+1166        """
+1167        Append to or set the ON expressions.
+1168
+1169        Example:
+1170            >>> import sqlglot
+1171            >>> sqlglot.parse_one("JOIN x", into=Join).on("y = 1").sql()
+1172            'JOIN x ON y = 1'
+1173
+1174        Args:
+1175            *expressions (str | Expression): the SQL code strings to parse.
+1176                If an `Expression` instance is passed, it will be used as-is.
+1177                Multiple expressions are combined with an AND operator.
+1178            append (bool): if `True`, AND the new expressions to any existing expression.
+1179                Otherwise, this resets the expression.
+1180            dialect (str): the dialect used to parse the input expressions.
+1181            copy (bool): if `False`, modify this expression instance in-place.
+1182            opts (kwargs): other options to use to parse the input expressions.
+1183
+1184        Returns:
+1185            Join: the modified join expression.
+1186        """
+1187        join = _apply_conjunction_builder(
+1188            *expressions,
+1189            instance=self,
+1190            arg="on",
+1191            append=append,
+1192            dialect=dialect,
+1193            copy=copy,
+1194            **opts,
+1195        )
+1196
+1197        if join.kind == "CROSS":
+1198            join.set("kind", None)
+1199
+1200        return join
+1201
+1202    def using(self, *expressions, append=True, dialect=None, copy=True, **opts):
+1203        """
+1204        Append to or set the USING expressions.
+1205
+1206        Example:
+1207            >>> import sqlglot
+1208            >>> sqlglot.parse_one("JOIN x", into=Join).using("foo", "bla").sql()
+1209            'JOIN x USING (foo, bla)'
+1210
+1211        Args:
+1212            *expressions (str | Expression): the SQL code strings to parse.
+1213                If an `Expression` instance is passed, it will be used as-is.
+1214            append (bool): if `True`, concatenate the new expressions to the existing "using" list.
+1215                Otherwise, this resets the expression.
+1216            dialect (str): the dialect used to parse the input expressions.
+1217            copy (bool): if `False`, modify this expression instance in-place.
+1218            opts (kwargs): other options to use to parse the input expressions.
+1219
+1220        Returns:
+1221            Join: the modified join expression.
+1222        """
+1223        join = _apply_list_builder(
+1224            *expressions,
+1225            instance=self,
+1226            arg="using",
+1227            append=append,
+1228            dialect=dialect,
+1229            copy=copy,
+1230            **opts,
+1231        )
+1232
+1233        if join.kind == "CROSS":
+1234            join.set("kind", None)
+1235
+1236        return join
+
+ + + + +
+ +
+ + def + on(self, *expressions, append=True, dialect=None, copy=True, **opts): + + + +
+ +
1165    def on(self, *expressions, append=True, dialect=None, copy=True, **opts):
+1166        """
+1167        Append to or set the ON expressions.
+1168
+1169        Example:
+1170            >>> import sqlglot
+1171            >>> sqlglot.parse_one("JOIN x", into=Join).on("y = 1").sql()
+1172            'JOIN x ON y = 1'
+1173
+1174        Args:
+1175            *expressions (str | Expression): the SQL code strings to parse.
+1176                If an `Expression` instance is passed, it will be used as-is.
+1177                Multiple expressions are combined with an AND operator.
+1178            append (bool): if `True`, AND the new expressions to any existing expression.
+1179                Otherwise, this resets the expression.
+1180            dialect (str): the dialect used to parse the input expressions.
+1181            copy (bool): if `False`, modify this expression instance in-place.
+1182            opts (kwargs): other options to use to parse the input expressions.
+1183
+1184        Returns:
+1185            Join: the modified join expression.
+1186        """
+1187        join = _apply_conjunction_builder(
+1188            *expressions,
+1189            instance=self,
+1190            arg="on",
+1191            append=append,
+1192            dialect=dialect,
+1193            copy=copy,
+1194            **opts,
+1195        )
+1196
+1197        if join.kind == "CROSS":
+1198            join.set("kind", None)
+1199
+1200        return join
+
+ + +

Append to or set the ON expressions.

+ +
Example:
+ +
+
+
>>> import sqlglot
+>>> sqlglot.parse_one("JOIN x", into=Join).on("y = 1").sql()
+'JOIN x ON y = 1'
+
+
+
+ +
Arguments:
+ +
    +
  • *expressions (str | Expression): the SQL code strings to parse. +If an Expression instance is passed, it will be used as-is. +Multiple expressions are combined with an AND operator.
  • +
  • append (bool): if True, AND the new expressions to any existing expression. +Otherwise, this resets the expression.
  • +
  • dialect (str): the dialect used to parse the input expressions.
  • +
  • copy (bool): if False, modify this expression instance in-place.
  • +
  • opts (kwargs): other options to use to parse the input expressions.
  • +
+ +
Returns:
+ +
+

Join: the modified join expression.

+
+
+ + +
+
+ +
+ + def + using(self, *expressions, append=True, dialect=None, copy=True, **opts): + + + +
+ +
1202    def using(self, *expressions, append=True, dialect=None, copy=True, **opts):
+1203        """
+1204        Append to or set the USING expressions.
+1205
+1206        Example:
+1207            >>> import sqlglot
+1208            >>> sqlglot.parse_one("JOIN x", into=Join).using("foo", "bla").sql()
+1209            'JOIN x USING (foo, bla)'
+1210
+1211        Args:
+1212            *expressions (str | Expression): the SQL code strings to parse.
+1213                If an `Expression` instance is passed, it will be used as-is.
+1214            append (bool): if `True`, concatenate the new expressions to the existing "using" list.
+1215                Otherwise, this resets the expression.
+1216            dialect (str): the dialect used to parse the input expressions.
+1217            copy (bool): if `False`, modify this expression instance in-place.
+1218            opts (kwargs): other options to use to parse the input expressions.
+1219
+1220        Returns:
+1221            Join: the modified join expression.
+1222        """
+1223        join = _apply_list_builder(
+1224            *expressions,
+1225            instance=self,
+1226            arg="using",
+1227            append=append,
+1228            dialect=dialect,
+1229            copy=copy,
+1230            **opts,
+1231        )
+1232
+1233        if join.kind == "CROSS":
+1234            join.set("kind", None)
+1235
+1236        return join
+
+ + +

Append to or set the USING expressions.

+ +
Example:
+ +
+
+
>>> import sqlglot
+>>> sqlglot.parse_one("JOIN x", into=Join).using("foo", "bla").sql()
+'JOIN x USING (foo, bla)'
+
+
+
+ +
Arguments:
+ +
    +
  • *expressions (str | Expression): the SQL code strings to parse. +If an Expression instance is passed, it will be used as-is.
  • +
  • append (bool): if True, concatenate the new expressions to the existing "using" list. +Otherwise, this resets the expression.
  • +
  • dialect (str): the dialect used to parse the input expressions.
  • +
  • copy (bool): if False, modify this expression instance in-place.
  • +
  • opts (kwargs): other options to use to parse the input expressions.
  • +
+ +
Returns:
+ +
+

Join: the modified join expression.

+
+
+ + +
+ +
+
+ +
+ + class + Lateral(UDTF): + + + +
+ +
1239class Lateral(UDTF):
+1240    arg_types = {"this": True, "view": False, "outer": False, "alias": False}
+
+ + + + + +
+
+ +
+ + class + MatchRecognize(Expression): + + + +
+ +
1243class MatchRecognize(Expression):
+1244    arg_types = {
+1245        "partition_by": False,
+1246        "order": False,
+1247        "measures": False,
+1248        "rows": False,
+1249        "after": False,
+1250        "pattern": False,
+1251        "define": False,
+1252    }
+
+ + + + + +
+
+ +
+ + class + Final(Expression): + + + +
+ +
1257class Final(Expression):
+1258    pass
+
+ + + + + +
+
+ +
+ + class + Offset(Expression): + + + +
+ +
1261class Offset(Expression):
+1262    arg_types = {"this": False, "expression": True}
+
+ + + + + +
+
+ +
+ + class + Order(Expression): + + + +
+ +
1265class Order(Expression):
+1266    arg_types = {"this": False, "expressions": True}
+
+ + + + + +
+
+ +
+ + class + Cluster(Order): + + + +
+ +
1271class Cluster(Order):
+1272    pass
+
+ + + + + +
+
+ +
+ + class + Distribute(Order): + + + +
+ +
1275class Distribute(Order):
+1276    pass
+
+ + + + + +
+
+ +
+ + class + Sort(Order): + + + +
+ +
1279class Sort(Order):
+1280    pass
+
+ + + + + +
+
+ +
+ + class + Ordered(Expression): + + + +
+ +
1283class Ordered(Expression):
+1284    arg_types = {"this": True, "desc": True, "nulls_first": True}
+
+ + + + + +
+
+ +
+ + class + Property(Expression): + + + +
+ +
1287class Property(Expression):
+1288    arg_types = {"this": True, "value": True}
+
+ + + + + +
+
+ +
+ + class + AlgorithmProperty(Property): + + + +
+ +
1291class AlgorithmProperty(Property):
+1292    arg_types = {"this": True}
+
+ + + + + +
+
+ +
+ + class + DefinerProperty(Property): + + + +
+ +
1295class DefinerProperty(Property):
+1296    arg_types = {"this": True}
+
+ + + + + +
+
+ +
+ + class + SqlSecurityProperty(Property): + + + +
+ +
1299class SqlSecurityProperty(Property):
+1300    arg_types = {"definer": True}
+
+ + + + + +
+
+ +
+ + class + TableFormatProperty(Property): + + + +
+ +
1303class TableFormatProperty(Property):
+1304    arg_types = {"this": True}
+
+ + + + + +
+
+ +
+ + class + PartitionedByProperty(Property): + + + +
+ +
1307class PartitionedByProperty(Property):
+1308    arg_types = {"this": True}
+
+ + + + + +
+
+ +
+ + class + FileFormatProperty(Property): + + + +
+ +
1311class FileFormatProperty(Property):
+1312    arg_types = {"this": True}
+
+ + + + + +
+
+ +
+ + class + DistKeyProperty(Property): + + + +
+ +
1315class DistKeyProperty(Property):
+1316    arg_types = {"this": True}
+
+ + + + + +
+
+ +
+ + class + SortKeyProperty(Property): + + + +
+ +
1319class SortKeyProperty(Property):
+1320    arg_types = {"this": True, "compound": False}
+
+ + + + + +
+
+ +
+ + class + DistStyleProperty(Property): + + + +
+ +
1323class DistStyleProperty(Property):
+1324    arg_types = {"this": True}
+
+ + + + + +
+
+ +
+ + class + LikeProperty(Property): + + + +
+ +
1327class LikeProperty(Property):
+1328    arg_types = {"this": True, "expressions": False}
+
+ + + + + +
+
+ +
+ + class + LocationProperty(Property): + + + +
+ +
1331class LocationProperty(Property):
+1332    arg_types = {"this": True}
+
+ + + + + +
+
+ +
+ + class + EngineProperty(Property): + + + +
+ +
1335class EngineProperty(Property):
+1336    arg_types = {"this": True}
+
+ + + + + +
+
+ +
+ + class + AutoIncrementProperty(Property): + + + +
+ +
1339class AutoIncrementProperty(Property):
+1340    arg_types = {"this": True}
+
+ + + + + +
+
+ +
+ + class + CharacterSetProperty(Property): + + + +
+ +
1343class CharacterSetProperty(Property):
+1344    arg_types = {"this": True, "default": True}
+
+ + + + + +
+
+ +
+ + class + CollateProperty(Property): + + + +
+ +
1347class CollateProperty(Property):
+1348    arg_types = {"this": True}
+
+ + + + + +
+
+ +
+ + class + SchemaCommentProperty(Property): + + + +
+ +
1351class SchemaCommentProperty(Property):
+1352    arg_types = {"this": True}
+
+ + + + + +
+
+ +
+ + class + ReturnsProperty(Property): + + + +
+ +
1355class ReturnsProperty(Property):
+1356    arg_types = {"this": True, "is_table": False, "table": False}
+
+ + + + + +
+
+ +
+ + class + LanguageProperty(Property): + + + +
+ +
1359class LanguageProperty(Property):
+1360    arg_types = {"this": True}
+
+ + + + + +
+
+ +
+ + class + ExecuteAsProperty(Property): + + + +
+ +
1363class ExecuteAsProperty(Property):
+1364    arg_types = {"this": True}
+
+ + + + + +
+
+ +
+ + class + VolatilityProperty(Property): + + + +
+ +
1367class VolatilityProperty(Property):
+1368    arg_types = {"this": True}
+
+ + + + + +
+
+ +
+ + class + RowFormatDelimitedProperty(Property): + + + +
+ +
1371class RowFormatDelimitedProperty(Property):
+1372    # https://cwiki.apache.org/confluence/display/hive/languagemanual+dml
+1373    arg_types = {
+1374        "fields": False,
+1375        "escaped": False,
+1376        "collection_items": False,
+1377        "map_keys": False,
+1378        "lines": False,
+1379        "null": False,
+1380        "serde": False,
+1381    }
+
+ + + + + +
+
+ +
+ + class + RowFormatSerdeProperty(Property): + + + +
+ +
1384class RowFormatSerdeProperty(Property):
+1385    arg_types = {"this": True}
+
+ + + + + +
+
+ +
+ + class + SerdeProperties(Property): + + + +
+ +
1388class SerdeProperties(Property):
+1389    arg_types = {"expressions": True}
+
+ + + + + +
+
+ +
+ + class + FallbackProperty(Property): + + + +
+ +
1392class FallbackProperty(Property):
+1393    arg_types = {"no": True, "protection": False}
+
+ + + + + +
+
+ +
+ + class + WithJournalTableProperty(Property): + + + +
+ +
1396class WithJournalTableProperty(Property):
+1397    arg_types = {"this": True}
+
+ + + + + +
+
+ +
+ + class + LogProperty(Property): + + + +
+ +
1400class LogProperty(Property):
+1401    arg_types = {"no": True}
+
+ + + + + +
+
+ +
+ + class + JournalProperty(Property): + + + +
+ +
1404class JournalProperty(Property):
+1405    arg_types = {"no": True, "dual": False, "before": False}
+
+ + + + + +
+
+ +
+ + class + AfterJournalProperty(Property): + + + +
+ +
1408class AfterJournalProperty(Property):
+1409    arg_types = {"no": True, "dual": False, "local": False}
+
+ + + + + +
+
+ +
+ + class + ChecksumProperty(Property): + + + +
+ +
1412class ChecksumProperty(Property):
+1413    arg_types = {"on": False, "default": False}
+
+ + + + + +
+
+ +
+ + class + FreespaceProperty(Property): + + + +
+ +
1416class FreespaceProperty(Property):
+1417    arg_types = {"this": True, "percent": False}
+
+ + + + + +
+
+ +
+ + class + MergeBlockRatioProperty(Property): + + + +
+ +
1420class MergeBlockRatioProperty(Property):
+1421    arg_types = {"this": False, "no": False, "default": False, "percent": False}
+
+ + + + + +
+
+ +
+ + class + DataBlocksizeProperty(Property): + + + +
+ +
1424class DataBlocksizeProperty(Property):
+1425    arg_types = {"size": False, "units": False, "min": False, "default": False}
+
+ + + + + +
+
+ +
+ + class + BlockCompressionProperty(Property): + + + +
+ +
1428class BlockCompressionProperty(Property):
+1429    arg_types = {"autotemp": False, "always": False, "default": True, "manual": True, "never": True}
+
+ + + + + +
+
+ +
+ + class + IsolatedLoadingProperty(Property): + + + +
+ +
1432class IsolatedLoadingProperty(Property):
+1433    arg_types = {
+1434        "no": True,
+1435        "concurrent": True,
+1436        "for_all": True,
+1437        "for_insert": True,
+1438        "for_none": True,
+1439    }
+
+ + + + + +
+
+ +
+ + class + Properties(Expression): + + + +
+ +
1442class Properties(Expression):
+1443    arg_types = {"expressions": True}
+1444
+1445    NAME_TO_PROPERTY = {
+1446        "ALGORITHM": AlgorithmProperty,
+1447        "AUTO_INCREMENT": AutoIncrementProperty,
+1448        "CHARACTER SET": CharacterSetProperty,
+1449        "COLLATE": CollateProperty,
+1450        "COMMENT": SchemaCommentProperty,
+1451        "DEFINER": DefinerProperty,
+1452        "DISTKEY": DistKeyProperty,
+1453        "DISTSTYLE": DistStyleProperty,
+1454        "ENGINE": EngineProperty,
+1455        "EXECUTE AS": ExecuteAsProperty,
+1456        "FORMAT": FileFormatProperty,
+1457        "LANGUAGE": LanguageProperty,
+1458        "LOCATION": LocationProperty,
+1459        "PARTITIONED_BY": PartitionedByProperty,
+1460        "RETURNS": ReturnsProperty,
+1461        "SORTKEY": SortKeyProperty,
+1462        "TABLE_FORMAT": TableFormatProperty,
+1463    }
+1464
+1465    PROPERTY_TO_NAME = {v: k for k, v in NAME_TO_PROPERTY.items()}
+1466
+1467    class Location(AutoName):
+1468        POST_CREATE = auto()
+1469        PRE_SCHEMA = auto()
+1470        POST_INDEX = auto()
+1471        POST_SCHEMA_ROOT = auto()
+1472        POST_SCHEMA_WITH = auto()
+1473        UNSUPPORTED = auto()
+1474
+1475    @classmethod
+1476    def from_dict(cls, properties_dict) -> Properties:
+1477        expressions = []
+1478        for key, value in properties_dict.items():
+1479            property_cls = cls.NAME_TO_PROPERTY.get(key.upper())
+1480            if property_cls:
+1481                expressions.append(property_cls(this=convert(value)))
+1482            else:
+1483                expressions.append(Property(this=Literal.string(key), value=convert(value)))
+1484
+1485        return cls(expressions=expressions)
+
+ + + + +
+ +
+
@classmethod
+ + def + from_dict(cls, properties_dict) -> sqlglot.expressions.Properties: + + + +
+ +
1475    @classmethod
+1476    def from_dict(cls, properties_dict) -> Properties:
+1477        expressions = []
+1478        for key, value in properties_dict.items():
+1479            property_cls = cls.NAME_TO_PROPERTY.get(key.upper())
+1480            if property_cls:
+1481                expressions.append(property_cls(this=convert(value)))
+1482            else:
+1483                expressions.append(Property(this=Literal.string(key), value=convert(value)))
+1484
+1485        return cls(expressions=expressions)
+
+ + + + +
+ +
+
+ +
+ + class + Properties.Location(sqlglot.helper.AutoName): + + + +
+ +
1467    class Location(AutoName):
+1468        POST_CREATE = auto()
+1469        PRE_SCHEMA = auto()
+1470        POST_INDEX = auto()
+1471        POST_SCHEMA_ROOT = auto()
+1472        POST_SCHEMA_WITH = auto()
+1473        UNSUPPORTED = auto()
+
+ + +

An enumeration.

+
+ + +
+
+ POST_CREATE = <Location.POST_CREATE: 'POST_CREATE'> + + +
+ + + + +
+
+
+ PRE_SCHEMA = <Location.PRE_SCHEMA: 'PRE_SCHEMA'> + + +
+ + + + +
+
+
+ POST_INDEX = <Location.POST_INDEX: 'POST_INDEX'> + + +
+ + + + +
+
+
+ POST_SCHEMA_ROOT = <Location.POST_SCHEMA_ROOT: 'POST_SCHEMA_ROOT'> + + +
+ + + + +
+
+
+ POST_SCHEMA_WITH = <Location.POST_SCHEMA_WITH: 'POST_SCHEMA_WITH'> + + +
+ + + + +
+
+
+ UNSUPPORTED = <Location.UNSUPPORTED: 'UNSUPPORTED'> + + +
+ + + + +
+
+
Inherited Members
+
+
enum.Enum
+
name
+
value
+ +
+
+
+
+
+ +
+ + class + Qualify(Expression): + + + +
+ +
1488class Qualify(Expression):
+1489    pass
+
+ + + + + +
+
+ +
+ + class + Return(Expression): + + + +
+ +
1493class Return(Expression):
+1494    pass
+
+ + + + + +
+
+ +
+ + class + Reference(Expression): + + + +
+ +
1497class Reference(Expression):
+1498    arg_types = {"this": True, "expressions": False, "options": False}
+
+ + + + + +
+
+ +
+ + class + Tuple(Expression): + + + +
+ +
1501class Tuple(Expression):
+1502    arg_types = {"expressions": False}
+
+ + + + + +
+
+ +
+ + class + Subqueryable(Unionable): + + + +
+ +
1505class Subqueryable(Unionable):
+1506    def subquery(self, alias=None, copy=True) -> Subquery:
+1507        """
+1508        Convert this expression to an aliased expression that can be used as a Subquery.
+1509
+1510        Example:
+1511            >>> subquery = Select().select("x").from_("tbl").subquery()
+1512            >>> Select().select("x").from_(subquery).sql()
+1513            'SELECT x FROM (SELECT x FROM tbl)'
+1514
+1515        Args:
+1516            alias (str | Identifier): an optional alias for the subquery
+1517            copy (bool): if `False`, modify this expression instance in-place.
+1518
+1519        Returns:
+1520            Alias: the subquery
+1521        """
+1522        instance = _maybe_copy(self, copy)
+1523        return Subquery(
+1524            this=instance,
+1525            alias=TableAlias(this=to_identifier(alias)),
+1526        )
+1527
+1528    def limit(self, expression, dialect=None, copy=True, **opts) -> Select:
+1529        raise NotImplementedError
+1530
+1531    @property
+1532    def ctes(self):
+1533        with_ = self.args.get("with")
+1534        if not with_:
+1535            return []
+1536        return with_.expressions
+1537
+1538    @property
+1539    def selects(self):
+1540        raise NotImplementedError("Subqueryable objects must implement `selects`")
+1541
+1542    @property
+1543    def named_selects(self):
+1544        raise NotImplementedError("Subqueryable objects must implement `named_selects`")
+1545
+1546    def with_(
+1547        self,
+1548        alias,
+1549        as_,
+1550        recursive=None,
+1551        append=True,
+1552        dialect=None,
+1553        copy=True,
+1554        **opts,
+1555    ):
+1556        """
+1557        Append to or set the common table expressions.
+1558
+1559        Example:
+1560            >>> Select().with_("tbl2", as_="SELECT * FROM tbl").select("x").from_("tbl2").sql()
+1561            'WITH tbl2 AS (SELECT * FROM tbl) SELECT x FROM tbl2'
+1562
+1563        Args:
+1564            alias (str | Expression): the SQL code string to parse as the table name.
+1565                If an `Expression` instance is passed, this is used as-is.
+1566            as_ (str | Expression): the SQL code string to parse as the table expression.
+1567                If an `Expression` instance is passed, it will be used as-is.
+1568            recursive (bool): set the RECURSIVE part of the expression. Defaults to `False`.
+1569            append (bool): if `True`, add to any existing expressions.
+1570                Otherwise, this resets the expressions.
+1571            dialect (str): the dialect used to parse the input expression.
+1572            copy (bool): if `False`, modify this expression instance in-place.
+1573            opts (kwargs): other options to use to parse the input expressions.
+1574
+1575        Returns:
+1576            Select: the modified expression.
+1577        """
+1578        alias_expression = maybe_parse(
+1579            alias,
+1580            dialect=dialect,
+1581            into=TableAlias,
+1582            **opts,
+1583        )
+1584        as_expression = maybe_parse(
+1585            as_,
+1586            dialect=dialect,
+1587            **opts,
+1588        )
+1589        cte = CTE(
+1590            this=as_expression,
+1591            alias=alias_expression,
+1592        )
+1593        return _apply_child_list_builder(
+1594            cte,
+1595            instance=self,
+1596            arg="with",
+1597            append=append,
+1598            copy=copy,
+1599            into=With,
+1600            properties={"recursive": recursive or False},
+1601        )
+
+ + + + +
+ +
+ + def + subquery(self, alias=None, copy=True) -> sqlglot.expressions.Subquery: + + + +
+ +
1506    def subquery(self, alias=None, copy=True) -> Subquery:
+1507        """
+1508        Convert this expression to an aliased expression that can be used as a Subquery.
+1509
+1510        Example:
+1511            >>> subquery = Select().select("x").from_("tbl").subquery()
+1512            >>> Select().select("x").from_(subquery).sql()
+1513            'SELECT x FROM (SELECT x FROM tbl)'
+1514
+1515        Args:
+1516            alias (str | Identifier): an optional alias for the subquery
+1517            copy (bool): if `False`, modify this expression instance in-place.
+1518
+1519        Returns:
+1520            Alias: the subquery
+1521        """
+1522        instance = _maybe_copy(self, copy)
+1523        return Subquery(
+1524            this=instance,
+1525            alias=TableAlias(this=to_identifier(alias)),
+1526        )
+
+ + +

Convert this expression to an aliased expression that can be used as a Subquery.

+ +
Example:
+ +
+
+
>>> subquery = Select().select("x").from_("tbl").subquery()
+>>> Select().select("x").from_(subquery).sql()
+'SELECT x FROM (SELECT x FROM tbl)'
+
+
+
+ +
Arguments:
+ +
    +
  • alias (str | Identifier): an optional alias for the subquery
  • +
  • copy (bool): if False, modify this expression instance in-place.
  • +
+ +
Returns:
+ +
+

Alias: the subquery

+
+
+ + +
+
+ +
+ + def + limit( self, expression, dialect=None, copy=True, **opts) -> sqlglot.expressions.Select: + + + +
+ +
1528    def limit(self, expression, dialect=None, copy=True, **opts) -> Select:
+1529        raise NotImplementedError
+
+ + + + +
+
+ +
+ + def + with_( self, alias, as_, recursive=None, append=True, dialect=None, copy=True, **opts): + + + +
+ +
1546    def with_(
+1547        self,
+1548        alias,
+1549        as_,
+1550        recursive=None,
+1551        append=True,
+1552        dialect=None,
+1553        copy=True,
+1554        **opts,
+1555    ):
+1556        """
+1557        Append to or set the common table expressions.
+1558
+1559        Example:
+1560            >>> Select().with_("tbl2", as_="SELECT * FROM tbl").select("x").from_("tbl2").sql()
+1561            'WITH tbl2 AS (SELECT * FROM tbl) SELECT x FROM tbl2'
+1562
+1563        Args:
+1564            alias (str | Expression): the SQL code string to parse as the table name.
+1565                If an `Expression` instance is passed, this is used as-is.
+1566            as_ (str | Expression): the SQL code string to parse as the table expression.
+1567                If an `Expression` instance is passed, it will be used as-is.
+1568            recursive (bool): set the RECURSIVE part of the expression. Defaults to `False`.
+1569            append (bool): if `True`, add to any existing expressions.
+1570                Otherwise, this resets the expressions.
+1571            dialect (str): the dialect used to parse the input expression.
+1572            copy (bool): if `False`, modify this expression instance in-place.
+1573            opts (kwargs): other options to use to parse the input expressions.
+1574
+1575        Returns:
+1576            Select: the modified expression.
+1577        """
+1578        alias_expression = maybe_parse(
+1579            alias,
+1580            dialect=dialect,
+1581            into=TableAlias,
+1582            **opts,
+1583        )
+1584        as_expression = maybe_parse(
+1585            as_,
+1586            dialect=dialect,
+1587            **opts,
+1588        )
+1589        cte = CTE(
+1590            this=as_expression,
+1591            alias=alias_expression,
+1592        )
+1593        return _apply_child_list_builder(
+1594            cte,
+1595            instance=self,
+1596            arg="with",
+1597            append=append,
+1598            copy=copy,
+1599            into=With,
+1600            properties={"recursive": recursive or False},
+1601        )
+
+ + +

Append to or set the common table expressions.

+ +
Example:
+ +
+
+
>>> Select().with_("tbl2", as_="SELECT * FROM tbl").select("x").from_("tbl2").sql()
+'WITH tbl2 AS (SELECT * FROM tbl) SELECT x FROM tbl2'
+
+
+
+ +
Arguments:
+ +
    +
  • alias (str | Expression): the SQL code string to parse as the table name. +If an Expression instance is passed, this is used as-is.
  • +
  • as_ (str | Expression): the SQL code string to parse as the table expression. +If an Expression instance is passed, it will be used as-is.
  • +
  • recursive (bool): set the RECURSIVE part of the expression. Defaults to False.
  • +
  • append (bool): if True, add to any existing expressions. +Otherwise, this resets the expressions.
  • +
  • dialect (str): the dialect used to parse the input expression.
  • +
  • copy (bool): if False, modify this expression instance in-place.
  • +
  • opts (kwargs): other options to use to parse the input expressions.
  • +
+ +
Returns:
+ +
+

Select: the modified expression.

+
+
+ + +
+ +
+
+ +
+ + class + Table(Expression): + + + +
+ +
1624class Table(Expression):
+1625    arg_types = {
+1626        "this": True,
+1627        "alias": False,
+1628        "db": False,
+1629        "catalog": False,
+1630        "laterals": False,
+1631        "joins": False,
+1632        "pivots": False,
+1633        "hints": False,
+1634        "system_time": False,
+1635    }
+
+ + + + + +
+
+ +
+ + class + SystemTime(Expression): + + + +
+ +
1639class SystemTime(Expression):
+1640    arg_types = {
+1641        "this": False,
+1642        "expression": False,
+1643        "kind": True,
+1644    }
+
+ + + + + +
+
+ +
+ + class + Union(Subqueryable): + + + +
+ +
1647class Union(Subqueryable):
+1648    arg_types = {
+1649        "with": False,
+1650        "this": True,
+1651        "expression": True,
+1652        "distinct": False,
+1653        **QUERY_MODIFIERS,
+1654    }
+1655
+1656    def limit(self, expression, dialect=None, copy=True, **opts) -> Select:
+1657        """
+1658        Set the LIMIT expression.
+1659
+1660        Example:
+1661            >>> select("1").union(select("1")).limit(1).sql()
+1662            'SELECT * FROM (SELECT 1 UNION SELECT 1) AS _l_0 LIMIT 1'
+1663
+1664        Args:
+1665            expression (str | int | Expression): the SQL code string to parse.
+1666                This can also be an integer.
+1667                If a `Limit` instance is passed, this is used as-is.
+1668                If another `Expression` instance is passed, it will be wrapped in a `Limit`.
+1669            dialect (str): the dialect used to parse the input expression.
+1670            copy (bool): if `False`, modify this expression instance in-place.
+1671            opts (kwargs): other options to use to parse the input expressions.
+1672
+1673        Returns:
+1674            Select: The limited subqueryable.
+1675        """
+1676        return (
+1677            select("*")
+1678            .from_(self.subquery(alias="_l_0", copy=copy))
+1679            .limit(expression, dialect=dialect, copy=False, **opts)
+1680        )
+1681
+1682    @property
+1683    def named_selects(self):
+1684        return self.this.unnest().named_selects
+1685
+1686    @property
+1687    def selects(self):
+1688        return self.this.unnest().selects
+1689
+1690    @property
+1691    def left(self):
+1692        return self.this
+1693
+1694    @property
+1695    def right(self):
+1696        return self.expression
+
+ + + + +
+ +
+ + def + limit( self, expression, dialect=None, copy=True, **opts) -> sqlglot.expressions.Select: + + + +
+ +
1656    def limit(self, expression, dialect=None, copy=True, **opts) -> Select:
+1657        """
+1658        Set the LIMIT expression.
+1659
+1660        Example:
+1661            >>> select("1").union(select("1")).limit(1).sql()
+1662            'SELECT * FROM (SELECT 1 UNION SELECT 1) AS _l_0 LIMIT 1'
+1663
+1664        Args:
+1665            expression (str | int | Expression): the SQL code string to parse.
+1666                This can also be an integer.
+1667                If a `Limit` instance is passed, this is used as-is.
+1668                If another `Expression` instance is passed, it will be wrapped in a `Limit`.
+1669            dialect (str): the dialect used to parse the input expression.
+1670            copy (bool): if `False`, modify this expression instance in-place.
+1671            opts (kwargs): other options to use to parse the input expressions.
+1672
+1673        Returns:
+1674            Select: The limited subqueryable.
+1675        """
+1676        return (
+1677            select("*")
+1678            .from_(self.subquery(alias="_l_0", copy=copy))
+1679            .limit(expression, dialect=dialect, copy=False, **opts)
+1680        )
+
+ + +

Set the LIMIT expression.

+ +
Example:
+ +
+
+
>>> select("1").union(select("1")).limit(1).sql()
+'SELECT * FROM (SELECT 1 UNION SELECT 1) AS _l_0 LIMIT 1'
+
+
+
+ +
Arguments:
+ +
    +
  • expression (str | int | Expression): the SQL code string to parse. +This can also be an integer. +If a Limit instance is passed, this is used as-is. +If another Expression instance is passed, it will be wrapped in a Limit.
  • +
  • dialect (str): the dialect used to parse the input expression.
  • +
  • copy (bool): if False, modify this expression instance in-place.
  • +
  • opts (kwargs): other options to use to parse the input expressions.
  • +
+ +
Returns:
+ +
+

Select: The limited subqueryable.

+
+
+ + +
+ +
+
+ +
+ + class + Except(Union): + + + +
+ +
1699class Except(Union):
+1700    pass
+
+ + + + + +
+
+ +
+ + class + Intersect(Union): + + + +
+ +
1703class Intersect(Union):
+1704    pass
+
+ + + + + +
+
+ +
+ + class + Unnest(UDTF): + + + +
+ +
1707class Unnest(UDTF):
+1708    arg_types = {
+1709        "expressions": True,
+1710        "ordinality": False,
+1711        "alias": False,
+1712        "offset": False,
+1713    }
+
+ + + + + +
+
+ +
+ + class + Update(Expression): + + + +
+ +
1716class Update(Expression):
+1717    arg_types = {
+1718        "with": False,
+1719        "this": False,
+1720        "expressions": True,
+1721        "from": False,
+1722        "where": False,
+1723    }
+
+ + + + + +
+
+ +
+ + class + Values(UDTF): + + + +
+ +
1726class Values(UDTF):
+1727    arg_types = {
+1728        "expressions": True,
+1729        "ordinality": False,
+1730        "alias": False,
+1731    }
+
+ + + + + +
+
+ +
+ + class + Var(Expression): + + + +
+ +
1734class Var(Expression):
+1735    pass
+
+ + + + + +
+
+ +
+ + class + Schema(Expression): + + + +
+ +
1738class Schema(Expression):
+1739    arg_types = {"this": False, "expressions": False}
+
+ + + + + +
+
+ +
+ + class + Lock(Expression): + + + +
+ +
1744class Lock(Expression):
+1745    arg_types = {"update": True}
+
+ + + + + +
+
+ +
+ + class + Select(Subqueryable): + + + +
+ +
1748class Select(Subqueryable):
+1749    arg_types = {
+1750        "with": False,
+1751        "expressions": False,
+1752        "hint": False,
+1753        "distinct": False,
+1754        "into": False,
+1755        "from": False,
+1756        **QUERY_MODIFIERS,
+1757    }
+1758
+1759    def from_(self, *expressions, append=True, dialect=None, copy=True, **opts) -> Select:
+1760        """
+1761        Set the FROM expression.
+1762
+1763        Example:
+1764            >>> Select().from_("tbl").select("x").sql()
+1765            'SELECT x FROM tbl'
+1766
+1767        Args:
+1768            *expressions (str | Expression): the SQL code strings to parse.
+1769                If a `From` instance is passed, this is used as-is.
+1770                If another `Expression` instance is passed, it will be wrapped in a `From`.
+1771            append (bool): if `True`, add to any existing expressions.
+1772                Otherwise, this flattens all the `From` expression into a single expression.
+1773            dialect (str): the dialect used to parse the input expression.
+1774            copy (bool): if `False`, modify this expression instance in-place.
+1775            opts (kwargs): other options to use to parse the input expressions.
+1776
+1777        Returns:
+1778            Select: the modified expression.
+1779        """
+1780        return _apply_child_list_builder(
+1781            *expressions,
+1782            instance=self,
+1783            arg="from",
+1784            append=append,
+1785            copy=copy,
+1786            prefix="FROM",
+1787            into=From,
+1788            dialect=dialect,
+1789            **opts,
+1790        )
+1791
+1792    def group_by(self, *expressions, append=True, dialect=None, copy=True, **opts) -> Select:
+1793        """
+1794        Set the GROUP BY expression.
+1795
+1796        Example:
+1797            >>> Select().from_("tbl").select("x", "COUNT(1)").group_by("x").sql()
+1798            'SELECT x, COUNT(1) FROM tbl GROUP BY x'
+1799
+1800        Args:
+1801            *expressions (str | Expression): the SQL code strings to parse.
+1802                If a `Group` instance is passed, this is used as-is.
+1803                If another `Expression` instance is passed, it will be wrapped in a `Group`.
+1804                If nothing is passed in then a group by is not applied to the expression
+1805            append (bool): if `True`, add to any existing expressions.
+1806                Otherwise, this flattens all the `Group` expression into a single expression.
+1807            dialect (str): the dialect used to parse the input expression.
+1808            copy (bool): if `False`, modify this expression instance in-place.
+1809            opts (kwargs): other options to use to parse the input expressions.
+1810
+1811        Returns:
+1812            Select: the modified expression.
+1813        """
+1814        if not expressions:
+1815            return self if not copy else self.copy()
+1816        return _apply_child_list_builder(
+1817            *expressions,
+1818            instance=self,
+1819            arg="group",
+1820            append=append,
+1821            copy=copy,
+1822            prefix="GROUP BY",
+1823            into=Group,
+1824            dialect=dialect,
+1825            **opts,
+1826        )
+1827
+1828    def order_by(self, *expressions, append=True, dialect=None, copy=True, **opts) -> Select:
+1829        """
+1830        Set the ORDER BY expression.
+1831
+1832        Example:
+1833            >>> Select().from_("tbl").select("x").order_by("x DESC").sql()
+1834            'SELECT x FROM tbl ORDER BY x DESC'
+1835
+1836        Args:
+1837            *expressions (str | Expression): the SQL code strings to parse.
+1838                If a `Group` instance is passed, this is used as-is.
+1839                If another `Expression` instance is passed, it will be wrapped in a `Order`.
+1840            append (bool): if `True`, add to any existing expressions.
+1841                Otherwise, this flattens all the `Order` expression into a single expression.
+1842            dialect (str): the dialect used to parse the input expression.
+1843            copy (bool): if `False`, modify this expression instance in-place.
+1844            opts (kwargs): other options to use to parse the input expressions.
+1845
+1846        Returns:
+1847            Select: the modified expression.
+1848        """
+1849        return _apply_child_list_builder(
+1850            *expressions,
+1851            instance=self,
+1852            arg="order",
+1853            append=append,
+1854            copy=copy,
+1855            prefix="ORDER BY",
+1856            into=Order,
+1857            dialect=dialect,
+1858            **opts,
+1859        )
+1860
+1861    def sort_by(self, *expressions, append=True, dialect=None, copy=True, **opts) -> Select:
+1862        """
+1863        Set the SORT BY expression.
+1864
+1865        Example:
+1866            >>> Select().from_("tbl").select("x").sort_by("x DESC").sql()
+1867            'SELECT x FROM tbl SORT BY x DESC'
+1868
+1869        Args:
+1870            *expressions (str | Expression): the SQL code strings to parse.
+1871                If a `Group` instance is passed, this is used as-is.
+1872                If another `Expression` instance is passed, it will be wrapped in a `SORT`.
+1873            append (bool): if `True`, add to any existing expressions.
+1874                Otherwise, this flattens all the `Order` expression into a single expression.
+1875            dialect (str): the dialect used to parse the input expression.
+1876            copy (bool): if `False`, modify this expression instance in-place.
+1877            opts (kwargs): other options to use to parse the input expressions.
+1878
+1879        Returns:
+1880            Select: the modified expression.
+1881        """
+1882        return _apply_child_list_builder(
+1883            *expressions,
+1884            instance=self,
+1885            arg="sort",
+1886            append=append,
+1887            copy=copy,
+1888            prefix="SORT BY",
+1889            into=Sort,
+1890            dialect=dialect,
+1891            **opts,
+1892        )
+1893
+1894    def cluster_by(self, *expressions, append=True, dialect=None, copy=True, **opts) -> Select:
+1895        """
+1896        Set the CLUSTER BY expression.
+1897
+1898        Example:
+1899            >>> Select().from_("tbl").select("x").cluster_by("x DESC").sql()
+1900            'SELECT x FROM tbl CLUSTER BY x DESC'
+1901
+1902        Args:
+1903            *expressions (str | Expression): the SQL code strings to parse.
+1904                If a `Group` instance is passed, this is used as-is.
+1905                If another `Expression` instance is passed, it will be wrapped in a `Cluster`.
+1906            append (bool): if `True`, add to any existing expressions.
+1907                Otherwise, this flattens all the `Order` expression into a single expression.
+1908            dialect (str): the dialect used to parse the input expression.
+1909            copy (bool): if `False`, modify this expression instance in-place.
+1910            opts (kwargs): other options to use to parse the input expressions.
+1911
+1912        Returns:
+1913            Select: the modified expression.
+1914        """
+1915        return _apply_child_list_builder(
+1916            *expressions,
+1917            instance=self,
+1918            arg="cluster",
+1919            append=append,
+1920            copy=copy,
+1921            prefix="CLUSTER BY",
+1922            into=Cluster,
+1923            dialect=dialect,
+1924            **opts,
+1925        )
+1926
+1927    def limit(self, expression, dialect=None, copy=True, **opts) -> Select:
+1928        """
+1929        Set the LIMIT expression.
+1930
+1931        Example:
+1932            >>> Select().from_("tbl").select("x").limit(10).sql()
+1933            'SELECT x FROM tbl LIMIT 10'
+1934
+1935        Args:
+1936            expression (str | int | Expression): the SQL code string to parse.
+1937                This can also be an integer.
+1938                If a `Limit` instance is passed, this is used as-is.
+1939                If another `Expression` instance is passed, it will be wrapped in a `Limit`.
+1940            dialect (str): the dialect used to parse the input expression.
+1941            copy (bool): if `False`, modify this expression instance in-place.
+1942            opts (kwargs): other options to use to parse the input expressions.
+1943
+1944        Returns:
+1945            Select: the modified expression.
+1946        """
+1947        return _apply_builder(
+1948            expression=expression,
+1949            instance=self,
+1950            arg="limit",
+1951            into=Limit,
+1952            prefix="LIMIT",
+1953            dialect=dialect,
+1954            copy=copy,
+1955            **opts,
+1956        )
+1957
+1958    def offset(self, expression, dialect=None, copy=True, **opts) -> Select:
+1959        """
+1960        Set the OFFSET expression.
+1961
+1962        Example:
+1963            >>> Select().from_("tbl").select("x").offset(10).sql()
+1964            'SELECT x FROM tbl OFFSET 10'
+1965
+1966        Args:
+1967            expression (str | int | Expression): the SQL code string to parse.
+1968                This can also be an integer.
+1969                If a `Offset` instance is passed, this is used as-is.
+1970                If another `Expression` instance is passed, it will be wrapped in a `Offset`.
+1971            dialect (str): the dialect used to parse the input expression.
+1972            copy (bool): if `False`, modify this expression instance in-place.
+1973            opts (kwargs): other options to use to parse the input expressions.
+1974
+1975        Returns:
+1976            Select: the modified expression.
+1977        """
+1978        return _apply_builder(
+1979            expression=expression,
+1980            instance=self,
+1981            arg="offset",
+1982            into=Offset,
+1983            prefix="OFFSET",
+1984            dialect=dialect,
+1985            copy=copy,
+1986            **opts,
+1987        )
+1988
+1989    def select(self, *expressions, append=True, dialect=None, copy=True, **opts) -> Select:
+1990        """
+1991        Append to or set the SELECT expressions.
+1992
+1993        Example:
+1994            >>> Select().select("x", "y").sql()
+1995            'SELECT x, y'
+1996
+1997        Args:
+1998            *expressions (str | Expression): the SQL code strings to parse.
+1999                If an `Expression` instance is passed, it will be used as-is.
+2000            append (bool): if `True`, add to any existing expressions.
+2001                Otherwise, this resets the expressions.
+2002            dialect (str): the dialect used to parse the input expressions.
+2003            copy (bool): if `False`, modify this expression instance in-place.
+2004            opts (kwargs): other options to use to parse the input expressions.
+2005
+2006        Returns:
+2007            Select: the modified expression.
+2008        """
+2009        return _apply_list_builder(
+2010            *expressions,
+2011            instance=self,
+2012            arg="expressions",
+2013            append=append,
+2014            dialect=dialect,
+2015            copy=copy,
+2016            **opts,
+2017        )
+2018
+2019    def lateral(self, *expressions, append=True, dialect=None, copy=True, **opts) -> Select:
+2020        """
+2021        Append to or set the LATERAL expressions.
+2022
+2023        Example:
+2024            >>> Select().select("x").lateral("OUTER explode(y) tbl2 AS z").from_("tbl").sql()
+2025            'SELECT x FROM tbl LATERAL VIEW OUTER EXPLODE(y) tbl2 AS z'
+2026
+2027        Args:
+2028            *expressions (str | Expression): the SQL code strings to parse.
+2029                If an `Expression` instance is passed, it will be used as-is.
+2030            append (bool): if `True`, add to any existing expressions.
+2031                Otherwise, this resets the expressions.
+2032            dialect (str): the dialect used to parse the input expressions.
+2033            copy (bool): if `False`, modify this expression instance in-place.
+2034            opts (kwargs): other options to use to parse the input expressions.
+2035
+2036        Returns:
+2037            Select: the modified expression.
+2038        """
+2039        return _apply_list_builder(
+2040            *expressions,
+2041            instance=self,
+2042            arg="laterals",
+2043            append=append,
+2044            into=Lateral,
+2045            prefix="LATERAL VIEW",
+2046            dialect=dialect,
+2047            copy=copy,
+2048            **opts,
+2049        )
+2050
+2051    def join(
+2052        self,
+2053        expression,
+2054        on=None,
+2055        using=None,
+2056        append=True,
+2057        join_type=None,
+2058        join_alias=None,
+2059        dialect=None,
+2060        copy=True,
+2061        **opts,
+2062    ) -> Select:
+2063        """
+2064        Append to or set the JOIN expressions.
+2065
+2066        Example:
+2067            >>> Select().select("*").from_("tbl").join("tbl2", on="tbl1.y = tbl2.y").sql()
+2068            'SELECT * FROM tbl JOIN tbl2 ON tbl1.y = tbl2.y'
+2069
+2070            >>> Select().select("1").from_("a").join("b", using=["x", "y", "z"]).sql()
+2071            'SELECT 1 FROM a JOIN b USING (x, y, z)'
+2072
+2073            Use `join_type` to change the type of join:
+2074
+2075            >>> Select().select("*").from_("tbl").join("tbl2", on="tbl1.y = tbl2.y", join_type="left outer").sql()
+2076            'SELECT * FROM tbl LEFT OUTER JOIN tbl2 ON tbl1.y = tbl2.y'
+2077
+2078        Args:
+2079            expression (str | Expression): the SQL code string to parse.
+2080                If an `Expression` instance is passed, it will be used as-is.
+2081            on (str | Expression): optionally specify the join "on" criteria as a SQL string.
+2082                If an `Expression` instance is passed, it will be used as-is.
+2083            using (str | Expression): optionally specify the join "using" criteria as a SQL string.
+2084                If an `Expression` instance is passed, it will be used as-is.
+2085            append (bool): if `True`, add to any existing expressions.
+2086                Otherwise, this resets the expressions.
+2087            join_type (str): If set, alter the parsed join type
+2088            dialect (str): the dialect used to parse the input expressions.
+2089            copy (bool): if `False`, modify this expression instance in-place.
+2090            opts (kwargs): other options to use to parse the input expressions.
+2091
+2092        Returns:
+2093            Select: the modified expression.
+2094        """
+2095        parse_args = {"dialect": dialect, **opts}
+2096
+2097        try:
+2098            expression = maybe_parse(expression, into=Join, prefix="JOIN", **parse_args)
+2099        except ParseError:
+2100            expression = maybe_parse(expression, into=(Join, Expression), **parse_args)
+2101
+2102        join = expression if isinstance(expression, Join) else Join(this=expression)
+2103
+2104        if isinstance(join.this, Select):
+2105            join.this.replace(join.this.subquery())
+2106
+2107        if join_type:
+2108            natural: t.Optional[Token]
+2109            side: t.Optional[Token]
+2110            kind: t.Optional[Token]
+2111
+2112            natural, side, kind = maybe_parse(join_type, into="JOIN_TYPE", **parse_args)  # type: ignore
+2113
+2114            if natural:
+2115                join.set("natural", True)
+2116            if side:
+2117                join.set("side", side.text)
+2118            if kind:
+2119                join.set("kind", kind.text)
+2120
+2121        if on:
+2122            on = and_(*ensure_collection(on), dialect=dialect, **opts)
+2123            join.set("on", on)
+2124
+2125        if using:
+2126            join = _apply_list_builder(
+2127                *ensure_collection(using),
+2128                instance=join,
+2129                arg="using",
+2130                append=append,
+2131                copy=copy,
+2132                **opts,
+2133            )
+2134
+2135        if join_alias:
+2136            join.set("this", alias_(join.this, join_alias, table=True))
+2137        return _apply_list_builder(
+2138            join,
+2139            instance=self,
+2140            arg="joins",
+2141            append=append,
+2142            copy=copy,
+2143            **opts,
+2144        )
+2145
+2146    def where(self, *expressions, append=True, dialect=None, copy=True, **opts) -> Select:
+2147        """
+2148        Append to or set the WHERE expressions.
+2149
+2150        Example:
+2151            >>> Select().select("x").from_("tbl").where("x = 'a' OR x < 'b'").sql()
+2152            "SELECT x FROM tbl WHERE x = 'a' OR x < 'b'"
+2153
+2154        Args:
+2155            *expressions (str | Expression): the SQL code strings to parse.
+2156                If an `Expression` instance is passed, it will be used as-is.
+2157                Multiple expressions are combined with an AND operator.
+2158            append (bool): if `True`, AND the new expressions to any existing expression.
+2159                Otherwise, this resets the expression.
+2160            dialect (str): the dialect used to parse the input expressions.
+2161            copy (bool): if `False`, modify this expression instance in-place.
+2162            opts (kwargs): other options to use to parse the input expressions.
+2163
+2164        Returns:
+2165            Select: the modified expression.
+2166        """
+2167        return _apply_conjunction_builder(
+2168            *expressions,
+2169            instance=self,
+2170            arg="where",
+2171            append=append,
+2172            into=Where,
+2173            dialect=dialect,
+2174            copy=copy,
+2175            **opts,
+2176        )
+2177
+2178    def having(self, *expressions, append=True, dialect=None, copy=True, **opts) -> Select:
+2179        """
+2180        Append to or set the HAVING expressions.
+2181
+2182        Example:
+2183            >>> Select().select("x", "COUNT(y)").from_("tbl").group_by("x").having("COUNT(y) > 3").sql()
+2184            'SELECT x, COUNT(y) FROM tbl GROUP BY x HAVING COUNT(y) > 3'
+2185
+2186        Args:
+2187            *expressions (str | Expression): the SQL code strings to parse.
+2188                If an `Expression` instance is passed, it will be used as-is.
+2189                Multiple expressions are combined with an AND operator.
+2190            append (bool): if `True`, AND the new expressions to any existing expression.
+2191                Otherwise, this resets the expression.
+2192            dialect (str): the dialect used to parse the input expressions.
+2193            copy (bool): if `False`, modify this expression instance in-place.
+2194            opts (kwargs): other options to use to parse the input expressions.
+2195
+2196        Returns:
+2197            Select: the modified expression.
+2198        """
+2199        return _apply_conjunction_builder(
+2200            *expressions,
+2201            instance=self,
+2202            arg="having",
+2203            append=append,
+2204            into=Having,
+2205            dialect=dialect,
+2206            copy=copy,
+2207            **opts,
+2208        )
+2209
+2210    def window(self, *expressions, append=True, dialect=None, copy=True, **opts) -> Select:
+2211        return _apply_list_builder(
+2212            *expressions,
+2213            instance=self,
+2214            arg="windows",
+2215            append=append,
+2216            into=Window,
+2217            dialect=dialect,
+2218            copy=copy,
+2219            **opts,
+2220        )
+2221
+2222    def distinct(self, distinct=True, copy=True) -> Select:
+2223        """
+2224        Set the OFFSET expression.
+2225
+2226        Example:
+2227            >>> Select().from_("tbl").select("x").distinct().sql()
+2228            'SELECT DISTINCT x FROM tbl'
+2229
+2230        Args:
+2231            distinct (bool): whether the Select should be distinct
+2232            copy (bool): if `False`, modify this expression instance in-place.
+2233
+2234        Returns:
+2235            Select: the modified expression.
+2236        """
+2237        instance = _maybe_copy(self, copy)
+2238        instance.set("distinct", Distinct() if distinct else None)
+2239        return instance
+2240
+2241    def ctas(self, table, properties=None, dialect=None, copy=True, **opts) -> Create:
+2242        """
+2243        Convert this expression to a CREATE TABLE AS statement.
+2244
+2245        Example:
+2246            >>> Select().select("*").from_("tbl").ctas("x").sql()
+2247            'CREATE TABLE x AS SELECT * FROM tbl'
+2248
+2249        Args:
+2250            table (str | Expression): the SQL code string to parse as the table name.
+2251                If another `Expression` instance is passed, it will be used as-is.
+2252            properties (dict): an optional mapping of table properties
+2253            dialect (str): the dialect used to parse the input table.
+2254            copy (bool): if `False`, modify this expression instance in-place.
+2255            opts (kwargs): other options to use to parse the input table.
+2256
+2257        Returns:
+2258            Create: the CREATE TABLE AS expression
+2259        """
+2260        instance = _maybe_copy(self, copy)
+2261        table_expression = maybe_parse(
+2262            table,
+2263            into=Table,
+2264            dialect=dialect,
+2265            **opts,
+2266        )
+2267        properties_expression = None
+2268        if properties:
+2269            properties_expression = Properties.from_dict(properties)
+2270
+2271        return Create(
+2272            this=table_expression,
+2273            kind="table",
+2274            expression=instance,
+2275            properties=properties_expression,
+2276        )
+2277
+2278    def lock(self, update: bool = True, copy: bool = True) -> Select:
+2279        """
+2280        Set the locking read mode for this expression.
+2281
+2282        Examples:
+2283            >>> Select().select("x").from_("tbl").where("x = 'a'").lock().sql("mysql")
+2284            "SELECT x FROM tbl WHERE x = 'a' FOR UPDATE"
+2285
+2286            >>> Select().select("x").from_("tbl").where("x = 'a'").lock(update=False).sql("mysql")
+2287            "SELECT x FROM tbl WHERE x = 'a' FOR SHARE"
+2288
+2289        Args:
+2290            update: if `True`, the locking type will be `FOR UPDATE`, else it will be `FOR SHARE`.
+2291            copy: if `False`, modify this expression instance in-place.
+2292
+2293        Returns:
+2294            The modified expression.
+2295        """
+2296
+2297        inst = _maybe_copy(self, copy)
+2298        inst.set("lock", Lock(update=update))
+2299
+2300        return inst
+2301
+2302    @property
+2303    def named_selects(self) -> t.List[str]:
+2304        return [e.output_name for e in self.expressions if e.alias_or_name]
+2305
+2306    @property
+2307    def selects(self) -> t.List[Expression]:
+2308        return self.expressions
+
+ + + + +
+ +
+ + def + from_( self, *expressions, append=True, dialect=None, copy=True, **opts) -> sqlglot.expressions.Select: + + + +
+ +
1759    def from_(self, *expressions, append=True, dialect=None, copy=True, **opts) -> Select:
+1760        """
+1761        Set the FROM expression.
+1762
+1763        Example:
+1764            >>> Select().from_("tbl").select("x").sql()
+1765            'SELECT x FROM tbl'
+1766
+1767        Args:
+1768            *expressions (str | Expression): the SQL code strings to parse.
+1769                If a `From` instance is passed, this is used as-is.
+1770                If another `Expression` instance is passed, it will be wrapped in a `From`.
+1771            append (bool): if `True`, add to any existing expressions.
+1772                Otherwise, this flattens all the `From` expression into a single expression.
+1773            dialect (str): the dialect used to parse the input expression.
+1774            copy (bool): if `False`, modify this expression instance in-place.
+1775            opts (kwargs): other options to use to parse the input expressions.
+1776
+1777        Returns:
+1778            Select: the modified expression.
+1779        """
+1780        return _apply_child_list_builder(
+1781            *expressions,
+1782            instance=self,
+1783            arg="from",
+1784            append=append,
+1785            copy=copy,
+1786            prefix="FROM",
+1787            into=From,
+1788            dialect=dialect,
+1789            **opts,
+1790        )
+
+ + +

Set the FROM expression.

+ +
Example:
+ +
+
+
>>> Select().from_("tbl").select("x").sql()
+'SELECT x FROM tbl'
+
+
+
+ +
Arguments:
+ +
    +
  • *expressions (str | Expression): the SQL code strings to parse. +If a From instance is passed, this is used as-is. +If another Expression instance is passed, it will be wrapped in a From.
  • +
  • append (bool): if True, add to any existing expressions. +Otherwise, this flattens all the From expression into a single expression.
  • +
  • dialect (str): the dialect used to parse the input expression.
  • +
  • copy (bool): if False, modify this expression instance in-place.
  • +
  • opts (kwargs): other options to use to parse the input expressions.
  • +
+ +
Returns:
+ +
+

Select: the modified expression.

+
+
+ + +
+
+ +
+ + def + group_by( self, *expressions, append=True, dialect=None, copy=True, **opts) -> sqlglot.expressions.Select: + + + +
+ +
1792    def group_by(self, *expressions, append=True, dialect=None, copy=True, **opts) -> Select:
+1793        """
+1794        Set the GROUP BY expression.
+1795
+1796        Example:
+1797            >>> Select().from_("tbl").select("x", "COUNT(1)").group_by("x").sql()
+1798            'SELECT x, COUNT(1) FROM tbl GROUP BY x'
+1799
+1800        Args:
+1801            *expressions (str | Expression): the SQL code strings to parse.
+1802                If a `Group` instance is passed, this is used as-is.
+1803                If another `Expression` instance is passed, it will be wrapped in a `Group`.
+1804                If nothing is passed in then a group by is not applied to the expression
+1805            append (bool): if `True`, add to any existing expressions.
+1806                Otherwise, this flattens all the `Group` expression into a single expression.
+1807            dialect (str): the dialect used to parse the input expression.
+1808            copy (bool): if `False`, modify this expression instance in-place.
+1809            opts (kwargs): other options to use to parse the input expressions.
+1810
+1811        Returns:
+1812            Select: the modified expression.
+1813        """
+1814        if not expressions:
+1815            return self if not copy else self.copy()
+1816        return _apply_child_list_builder(
+1817            *expressions,
+1818            instance=self,
+1819            arg="group",
+1820            append=append,
+1821            copy=copy,
+1822            prefix="GROUP BY",
+1823            into=Group,
+1824            dialect=dialect,
+1825            **opts,
+1826        )
+
+ + +

Set the GROUP BY expression.

+ +
Example:
+ +
+
+
>>> Select().from_("tbl").select("x", "COUNT(1)").group_by("x").sql()
+'SELECT x, COUNT(1) FROM tbl GROUP BY x'
+
+
+
+ +
Arguments:
+ +
    +
  • *expressions (str | Expression): the SQL code strings to parse. +If a Group instance is passed, this is used as-is. +If another Expression instance is passed, it will be wrapped in a Group. +If nothing is passed in then a group by is not applied to the expression
  • +
  • append (bool): if True, add to any existing expressions. +Otherwise, this flattens all the Group expression into a single expression.
  • +
  • dialect (str): the dialect used to parse the input expression.
  • +
  • copy (bool): if False, modify this expression instance in-place.
  • +
  • opts (kwargs): other options to use to parse the input expressions.
  • +
+ +
Returns:
+ +
+

Select: the modified expression.

+
+
+ + +
+
+ +
+ + def + order_by( self, *expressions, append=True, dialect=None, copy=True, **opts) -> sqlglot.expressions.Select: + + + +
+ +
1828    def order_by(self, *expressions, append=True, dialect=None, copy=True, **opts) -> Select:
+1829        """
+1830        Set the ORDER BY expression.
+1831
+1832        Example:
+1833            >>> Select().from_("tbl").select("x").order_by("x DESC").sql()
+1834            'SELECT x FROM tbl ORDER BY x DESC'
+1835
+1836        Args:
+1837            *expressions (str | Expression): the SQL code strings to parse.
+1838                If a `Group` instance is passed, this is used as-is.
+1839                If another `Expression` instance is passed, it will be wrapped in a `Order`.
+1840            append (bool): if `True`, add to any existing expressions.
+1841                Otherwise, this flattens all the `Order` expression into a single expression.
+1842            dialect (str): the dialect used to parse the input expression.
+1843            copy (bool): if `False`, modify this expression instance in-place.
+1844            opts (kwargs): other options to use to parse the input expressions.
+1845
+1846        Returns:
+1847            Select: the modified expression.
+1848        """
+1849        return _apply_child_list_builder(
+1850            *expressions,
+1851            instance=self,
+1852            arg="order",
+1853            append=append,
+1854            copy=copy,
+1855            prefix="ORDER BY",
+1856            into=Order,
+1857            dialect=dialect,
+1858            **opts,
+1859        )
+
+ + +

Set the ORDER BY expression.

+ +
Example:
+ +
+
+
>>> Select().from_("tbl").select("x").order_by("x DESC").sql()
+'SELECT x FROM tbl ORDER BY x DESC'
+
+
+
+ +
Arguments:
+ +
    +
  • *expressions (str | Expression): the SQL code strings to parse. +If a Group instance is passed, this is used as-is. +If another Expression instance is passed, it will be wrapped in a Order.
  • +
  • append (bool): if True, add to any existing expressions. +Otherwise, this flattens all the Order expression into a single expression.
  • +
  • dialect (str): the dialect used to parse the input expression.
  • +
  • copy (bool): if False, modify this expression instance in-place.
  • +
  • opts (kwargs): other options to use to parse the input expressions.
  • +
+ +
Returns:
+ +
+

Select: the modified expression.

+
+
+ + +
+
+ +
+ + def + sort_by( self, *expressions, append=True, dialect=None, copy=True, **opts) -> sqlglot.expressions.Select: + + + +
+ +
1861    def sort_by(self, *expressions, append=True, dialect=None, copy=True, **opts) -> Select:
+1862        """
+1863        Set the SORT BY expression.
+1864
+1865        Example:
+1866            >>> Select().from_("tbl").select("x").sort_by("x DESC").sql()
+1867            'SELECT x FROM tbl SORT BY x DESC'
+1868
+1869        Args:
+1870            *expressions (str | Expression): the SQL code strings to parse.
+1871                If a `Group` instance is passed, this is used as-is.
+1872                If another `Expression` instance is passed, it will be wrapped in a `SORT`.
+1873            append (bool): if `True`, add to any existing expressions.
+1874                Otherwise, this flattens all the `Order` expression into a single expression.
+1875            dialect (str): the dialect used to parse the input expression.
+1876            copy (bool): if `False`, modify this expression instance in-place.
+1877            opts (kwargs): other options to use to parse the input expressions.
+1878
+1879        Returns:
+1880            Select: the modified expression.
+1881        """
+1882        return _apply_child_list_builder(
+1883            *expressions,
+1884            instance=self,
+1885            arg="sort",
+1886            append=append,
+1887            copy=copy,
+1888            prefix="SORT BY",
+1889            into=Sort,
+1890            dialect=dialect,
+1891            **opts,
+1892        )
+
+ + +

Set the SORT BY expression.

+ +
Example:
+ +
+
+
>>> Select().from_("tbl").select("x").sort_by("x DESC").sql()
+'SELECT x FROM tbl SORT BY x DESC'
+
+
+
+ +
Arguments:
+ +
    +
  • *expressions (str | Expression): the SQL code strings to parse. +If a Group instance is passed, this is used as-is. +If another Expression instance is passed, it will be wrapped in a SORT.
  • +
  • append (bool): if True, add to any existing expressions. +Otherwise, this flattens all the Order expression into a single expression.
  • +
  • dialect (str): the dialect used to parse the input expression.
  • +
  • copy (bool): if False, modify this expression instance in-place.
  • +
  • opts (kwargs): other options to use to parse the input expressions.
  • +
+ +
Returns:
+ +
+

Select: the modified expression.

+
+
+ + +
+
+ +
+ + def + cluster_by( self, *expressions, append=True, dialect=None, copy=True, **opts) -> sqlglot.expressions.Select: + + + +
+ +
1894    def cluster_by(self, *expressions, append=True, dialect=None, copy=True, **opts) -> Select:
+1895        """
+1896        Set the CLUSTER BY expression.
+1897
+1898        Example:
+1899            >>> Select().from_("tbl").select("x").cluster_by("x DESC").sql()
+1900            'SELECT x FROM tbl CLUSTER BY x DESC'
+1901
+1902        Args:
+1903            *expressions (str | Expression): the SQL code strings to parse.
+1904                If a `Group` instance is passed, this is used as-is.
+1905                If another `Expression` instance is passed, it will be wrapped in a `Cluster`.
+1906            append (bool): if `True`, add to any existing expressions.
+1907                Otherwise, this flattens all the `Order` expression into a single expression.
+1908            dialect (str): the dialect used to parse the input expression.
+1909            copy (bool): if `False`, modify this expression instance in-place.
+1910            opts (kwargs): other options to use to parse the input expressions.
+1911
+1912        Returns:
+1913            Select: the modified expression.
+1914        """
+1915        return _apply_child_list_builder(
+1916            *expressions,
+1917            instance=self,
+1918            arg="cluster",
+1919            append=append,
+1920            copy=copy,
+1921            prefix="CLUSTER BY",
+1922            into=Cluster,
+1923            dialect=dialect,
+1924            **opts,
+1925        )
+
+ + +

Set the CLUSTER BY expression.

+ +
Example:
+ +
+
+
>>> Select().from_("tbl").select("x").cluster_by("x DESC").sql()
+'SELECT x FROM tbl CLUSTER BY x DESC'
+
+
+
+ +
Arguments:
+ +
    +
  • *expressions (str | Expression): the SQL code strings to parse. +If a Group instance is passed, this is used as-is. +If another Expression instance is passed, it will be wrapped in a Cluster.
  • +
  • append (bool): if True, add to any existing expressions. +Otherwise, this flattens all the Order expression into a single expression.
  • +
  • dialect (str): the dialect used to parse the input expression.
  • +
  • copy (bool): if False, modify this expression instance in-place.
  • +
  • opts (kwargs): other options to use to parse the input expressions.
  • +
+ +
Returns:
+ +
+

Select: the modified expression.

+
+
+ + +
+
+ +
+ + def + limit( self, expression, dialect=None, copy=True, **opts) -> sqlglot.expressions.Select: + + + +
+ +
1927    def limit(self, expression, dialect=None, copy=True, **opts) -> Select:
+1928        """
+1929        Set the LIMIT expression.
+1930
+1931        Example:
+1932            >>> Select().from_("tbl").select("x").limit(10).sql()
+1933            'SELECT x FROM tbl LIMIT 10'
+1934
+1935        Args:
+1936            expression (str | int | Expression): the SQL code string to parse.
+1937                This can also be an integer.
+1938                If a `Limit` instance is passed, this is used as-is.
+1939                If another `Expression` instance is passed, it will be wrapped in a `Limit`.
+1940            dialect (str): the dialect used to parse the input expression.
+1941            copy (bool): if `False`, modify this expression instance in-place.
+1942            opts (kwargs): other options to use to parse the input expressions.
+1943
+1944        Returns:
+1945            Select: the modified expression.
+1946        """
+1947        return _apply_builder(
+1948            expression=expression,
+1949            instance=self,
+1950            arg="limit",
+1951            into=Limit,
+1952            prefix="LIMIT",
+1953            dialect=dialect,
+1954            copy=copy,
+1955            **opts,
+1956        )
+
+ + +

Set the LIMIT expression.

+ +
Example:
+ +
+
+
>>> Select().from_("tbl").select("x").limit(10).sql()
+'SELECT x FROM tbl LIMIT 10'
+
+
+
+ +
Arguments:
+ +
    +
  • expression (str | int | Expression): the SQL code string to parse. +This can also be an integer. +If a Limit instance is passed, this is used as-is. +If another Expression instance is passed, it will be wrapped in a Limit.
  • +
  • dialect (str): the dialect used to parse the input expression.
  • +
  • copy (bool): if False, modify this expression instance in-place.
  • +
  • opts (kwargs): other options to use to parse the input expressions.
  • +
+ +
Returns:
+ +
+

Select: the modified expression.

+
+
+ + +
+
+ +
+ + def + offset( self, expression, dialect=None, copy=True, **opts) -> sqlglot.expressions.Select: + + + +
+ +
1958    def offset(self, expression, dialect=None, copy=True, **opts) -> Select:
+1959        """
+1960        Set the OFFSET expression.
+1961
+1962        Example:
+1963            >>> Select().from_("tbl").select("x").offset(10).sql()
+1964            'SELECT x FROM tbl OFFSET 10'
+1965
+1966        Args:
+1967            expression (str | int | Expression): the SQL code string to parse.
+1968                This can also be an integer.
+1969                If a `Offset` instance is passed, this is used as-is.
+1970                If another `Expression` instance is passed, it will be wrapped in a `Offset`.
+1971            dialect (str): the dialect used to parse the input expression.
+1972            copy (bool): if `False`, modify this expression instance in-place.
+1973            opts (kwargs): other options to use to parse the input expressions.
+1974
+1975        Returns:
+1976            Select: the modified expression.
+1977        """
+1978        return _apply_builder(
+1979            expression=expression,
+1980            instance=self,
+1981            arg="offset",
+1982            into=Offset,
+1983            prefix="OFFSET",
+1984            dialect=dialect,
+1985            copy=copy,
+1986            **opts,
+1987        )
+
+ + +

Set the OFFSET expression.

+ +
Example:
+ +
+
+
>>> Select().from_("tbl").select("x").offset(10).sql()
+'SELECT x FROM tbl OFFSET 10'
+
+
+
+ +
Arguments:
+ +
    +
  • expression (str | int | Expression): the SQL code string to parse. +This can also be an integer. +If a Offset instance is passed, this is used as-is. +If another Expression instance is passed, it will be wrapped in a Offset.
  • +
  • dialect (str): the dialect used to parse the input expression.
  • +
  • copy (bool): if False, modify this expression instance in-place.
  • +
  • opts (kwargs): other options to use to parse the input expressions.
  • +
+ +
Returns:
+ +
+

Select: the modified expression.

+
+
+ + +
+
+ +
+ + def + select( self, *expressions, append=True, dialect=None, copy=True, **opts) -> sqlglot.expressions.Select: + + + +
+ +
1989    def select(self, *expressions, append=True, dialect=None, copy=True, **opts) -> Select:
+1990        """
+1991        Append to or set the SELECT expressions.
+1992
+1993        Example:
+1994            >>> Select().select("x", "y").sql()
+1995            'SELECT x, y'
+1996
+1997        Args:
+1998            *expressions (str | Expression): the SQL code strings to parse.
+1999                If an `Expression` instance is passed, it will be used as-is.
+2000            append (bool): if `True`, add to any existing expressions.
+2001                Otherwise, this resets the expressions.
+2002            dialect (str): the dialect used to parse the input expressions.
+2003            copy (bool): if `False`, modify this expression instance in-place.
+2004            opts (kwargs): other options to use to parse the input expressions.
+2005
+2006        Returns:
+2007            Select: the modified expression.
+2008        """
+2009        return _apply_list_builder(
+2010            *expressions,
+2011            instance=self,
+2012            arg="expressions",
+2013            append=append,
+2014            dialect=dialect,
+2015            copy=copy,
+2016            **opts,
+2017        )
+
+ + +

Append to or set the SELECT expressions.

+ +
Example:
+ +
+
+
>>> Select().select("x", "y").sql()
+'SELECT x, y'
+
+
+
+ +
Arguments:
+ +
    +
  • *expressions (str | Expression): the SQL code strings to parse. +If an Expression instance is passed, it will be used as-is.
  • +
  • append (bool): if True, add to any existing expressions. +Otherwise, this resets the expressions.
  • +
  • dialect (str): the dialect used to parse the input expressions.
  • +
  • copy (bool): if False, modify this expression instance in-place.
  • +
  • opts (kwargs): other options to use to parse the input expressions.
  • +
+ +
Returns:
+ +
+

Select: the modified expression.

+
+
+ + +
+
+ +
+ + def + lateral( self, *expressions, append=True, dialect=None, copy=True, **opts) -> sqlglot.expressions.Select: + + + +
+ +
2019    def lateral(self, *expressions, append=True, dialect=None, copy=True, **opts) -> Select:
+2020        """
+2021        Append to or set the LATERAL expressions.
+2022
+2023        Example:
+2024            >>> Select().select("x").lateral("OUTER explode(y) tbl2 AS z").from_("tbl").sql()
+2025            'SELECT x FROM tbl LATERAL VIEW OUTER EXPLODE(y) tbl2 AS z'
+2026
+2027        Args:
+2028            *expressions (str | Expression): the SQL code strings to parse.
+2029                If an `Expression` instance is passed, it will be used as-is.
+2030            append (bool): if `True`, add to any existing expressions.
+2031                Otherwise, this resets the expressions.
+2032            dialect (str): the dialect used to parse the input expressions.
+2033            copy (bool): if `False`, modify this expression instance in-place.
+2034            opts (kwargs): other options to use to parse the input expressions.
+2035
+2036        Returns:
+2037            Select: the modified expression.
+2038        """
+2039        return _apply_list_builder(
+2040            *expressions,
+2041            instance=self,
+2042            arg="laterals",
+2043            append=append,
+2044            into=Lateral,
+2045            prefix="LATERAL VIEW",
+2046            dialect=dialect,
+2047            copy=copy,
+2048            **opts,
+2049        )
+
+ + +

Append to or set the LATERAL expressions.

+ +
Example:
+ +
+
+
>>> Select().select("x").lateral("OUTER explode(y) tbl2 AS z").from_("tbl").sql()
+'SELECT x FROM tbl LATERAL VIEW OUTER EXPLODE(y) tbl2 AS z'
+
+
+
+ +
Arguments:
+ +
    +
  • *expressions (str | Expression): the SQL code strings to parse. +If an Expression instance is passed, it will be used as-is.
  • +
  • append (bool): if True, add to any existing expressions. +Otherwise, this resets the expressions.
  • +
  • dialect (str): the dialect used to parse the input expressions.
  • +
  • copy (bool): if False, modify this expression instance in-place.
  • +
  • opts (kwargs): other options to use to parse the input expressions.
  • +
+ +
Returns:
+ +
+

Select: the modified expression.

+
+
+ + +
+
+ +
+ + def + join( self, expression, on=None, using=None, append=True, join_type=None, join_alias=None, dialect=None, copy=True, **opts) -> sqlglot.expressions.Select: + + + +
+ +
2051    def join(
+2052        self,
+2053        expression,
+2054        on=None,
+2055        using=None,
+2056        append=True,
+2057        join_type=None,
+2058        join_alias=None,
+2059        dialect=None,
+2060        copy=True,
+2061        **opts,
+2062    ) -> Select:
+2063        """
+2064        Append to or set the JOIN expressions.
+2065
+2066        Example:
+2067            >>> Select().select("*").from_("tbl").join("tbl2", on="tbl1.y = tbl2.y").sql()
+2068            'SELECT * FROM tbl JOIN tbl2 ON tbl1.y = tbl2.y'
+2069
+2070            >>> Select().select("1").from_("a").join("b", using=["x", "y", "z"]).sql()
+2071            'SELECT 1 FROM a JOIN b USING (x, y, z)'
+2072
+2073            Use `join_type` to change the type of join:
+2074
+2075            >>> Select().select("*").from_("tbl").join("tbl2", on="tbl1.y = tbl2.y", join_type="left outer").sql()
+2076            'SELECT * FROM tbl LEFT OUTER JOIN tbl2 ON tbl1.y = tbl2.y'
+2077
+2078        Args:
+2079            expression (str | Expression): the SQL code string to parse.
+2080                If an `Expression` instance is passed, it will be used as-is.
+2081            on (str | Expression): optionally specify the join "on" criteria as a SQL string.
+2082                If an `Expression` instance is passed, it will be used as-is.
+2083            using (str | Expression): optionally specify the join "using" criteria as a SQL string.
+2084                If an `Expression` instance is passed, it will be used as-is.
+2085            append (bool): if `True`, add to any existing expressions.
+2086                Otherwise, this resets the expressions.
+2087            join_type (str): If set, alter the parsed join type
+2088            dialect (str): the dialect used to parse the input expressions.
+2089            copy (bool): if `False`, modify this expression instance in-place.
+2090            opts (kwargs): other options to use to parse the input expressions.
+2091
+2092        Returns:
+2093            Select: the modified expression.
+2094        """
+2095        parse_args = {"dialect": dialect, **opts}
+2096
+2097        try:
+2098            expression = maybe_parse(expression, into=Join, prefix="JOIN", **parse_args)
+2099        except ParseError:
+2100            expression = maybe_parse(expression, into=(Join, Expression), **parse_args)
+2101
+2102        join = expression if isinstance(expression, Join) else Join(this=expression)
+2103
+2104        if isinstance(join.this, Select):
+2105            join.this.replace(join.this.subquery())
+2106
+2107        if join_type:
+2108            natural: t.Optional[Token]
+2109            side: t.Optional[Token]
+2110            kind: t.Optional[Token]
+2111
+2112            natural, side, kind = maybe_parse(join_type, into="JOIN_TYPE", **parse_args)  # type: ignore
+2113
+2114            if natural:
+2115                join.set("natural", True)
+2116            if side:
+2117                join.set("side", side.text)
+2118            if kind:
+2119                join.set("kind", kind.text)
+2120
+2121        if on:
+2122            on = and_(*ensure_collection(on), dialect=dialect, **opts)
+2123            join.set("on", on)
+2124
+2125        if using:
+2126            join = _apply_list_builder(
+2127                *ensure_collection(using),
+2128                instance=join,
+2129                arg="using",
+2130                append=append,
+2131                copy=copy,
+2132                **opts,
+2133            )
+2134
+2135        if join_alias:
+2136            join.set("this", alias_(join.this, join_alias, table=True))
+2137        return _apply_list_builder(
+2138            join,
+2139            instance=self,
+2140            arg="joins",
+2141            append=append,
+2142            copy=copy,
+2143            **opts,
+2144        )
+
+ + +

Append to or set the JOIN expressions.

+ +
Example:
+ +
+
+
>>> Select().select("*").from_("tbl").join("tbl2", on="tbl1.y = tbl2.y").sql()
+'SELECT * FROM tbl JOIN tbl2 ON tbl1.y = tbl2.y'
+
+
+ +
+
>>> Select().select("1").from_("a").join("b", using=["x", "y", "z"]).sql()
+'SELECT 1 FROM a JOIN b USING (x, y, z)'
+
+
+ +

Use join_type to change the type of join:

+ +
+
>>> Select().select("*").from_("tbl").join("tbl2", on="tbl1.y = tbl2.y", join_type="left outer").sql()
+'SELECT * FROM tbl LEFT OUTER JOIN tbl2 ON tbl1.y = tbl2.y'
+
+
+
+ +
Arguments:
+ +
    +
  • expression (str | Expression): the SQL code string to parse. +If an Expression instance is passed, it will be used as-is.
  • +
  • on (str | Expression): optionally specify the join "on" criteria as a SQL string. +If an Expression instance is passed, it will be used as-is.
  • +
  • using (str | Expression): optionally specify the join "using" criteria as a SQL string. +If an Expression instance is passed, it will be used as-is.
  • +
  • append (bool): if True, add to any existing expressions. +Otherwise, this resets the expressions.
  • +
  • join_type (str): If set, alter the parsed join type
  • +
  • dialect (str): the dialect used to parse the input expressions.
  • +
  • copy (bool): if False, modify this expression instance in-place.
  • +
  • opts (kwargs): other options to use to parse the input expressions.
  • +
+ +
Returns:
+ +
+

Select: the modified expression.

+
+
+ + +
+
+ +
+ + def + where( self, *expressions, append=True, dialect=None, copy=True, **opts) -> sqlglot.expressions.Select: + + + +
+ +
2146    def where(self, *expressions, append=True, dialect=None, copy=True, **opts) -> Select:
+2147        """
+2148        Append to or set the WHERE expressions.
+2149
+2150        Example:
+2151            >>> Select().select("x").from_("tbl").where("x = 'a' OR x < 'b'").sql()
+2152            "SELECT x FROM tbl WHERE x = 'a' OR x < 'b'"
+2153
+2154        Args:
+2155            *expressions (str | Expression): the SQL code strings to parse.
+2156                If an `Expression` instance is passed, it will be used as-is.
+2157                Multiple expressions are combined with an AND operator.
+2158            append (bool): if `True`, AND the new expressions to any existing expression.
+2159                Otherwise, this resets the expression.
+2160            dialect (str): the dialect used to parse the input expressions.
+2161            copy (bool): if `False`, modify this expression instance in-place.
+2162            opts (kwargs): other options to use to parse the input expressions.
+2163
+2164        Returns:
+2165            Select: the modified expression.
+2166        """
+2167        return _apply_conjunction_builder(
+2168            *expressions,
+2169            instance=self,
+2170            arg="where",
+2171            append=append,
+2172            into=Where,
+2173            dialect=dialect,
+2174            copy=copy,
+2175            **opts,
+2176        )
+
+ + +

Append to or set the WHERE expressions.

+ +
Example:
+ +
+
+
>>> Select().select("x").from_("tbl").where("x = 'a' OR x < 'b'").sql()
+"SELECT x FROM tbl WHERE x = 'a' OR x < 'b'"
+
+
+
+ +
Arguments:
+ +
    +
  • *expressions (str | Expression): the SQL code strings to parse. +If an Expression instance is passed, it will be used as-is. +Multiple expressions are combined with an AND operator.
  • +
  • append (bool): if True, AND the new expressions to any existing expression. +Otherwise, this resets the expression.
  • +
  • dialect (str): the dialect used to parse the input expressions.
  • +
  • copy (bool): if False, modify this expression instance in-place.
  • +
  • opts (kwargs): other options to use to parse the input expressions.
  • +
+ +
Returns:
+ +
+

Select: the modified expression.

+
+
+ + +
+
+ +
+ + def + having( self, *expressions, append=True, dialect=None, copy=True, **opts) -> sqlglot.expressions.Select: + + + +
+ +
2178    def having(self, *expressions, append=True, dialect=None, copy=True, **opts) -> Select:
+2179        """
+2180        Append to or set the HAVING expressions.
+2181
+2182        Example:
+2183            >>> Select().select("x", "COUNT(y)").from_("tbl").group_by("x").having("COUNT(y) > 3").sql()
+2184            'SELECT x, COUNT(y) FROM tbl GROUP BY x HAVING COUNT(y) > 3'
+2185
+2186        Args:
+2187            *expressions (str | Expression): the SQL code strings to parse.
+2188                If an `Expression` instance is passed, it will be used as-is.
+2189                Multiple expressions are combined with an AND operator.
+2190            append (bool): if `True`, AND the new expressions to any existing expression.
+2191                Otherwise, this resets the expression.
+2192            dialect (str): the dialect used to parse the input expressions.
+2193            copy (bool): if `False`, modify this expression instance in-place.
+2194            opts (kwargs): other options to use to parse the input expressions.
+2195
+2196        Returns:
+2197            Select: the modified expression.
+2198        """
+2199        return _apply_conjunction_builder(
+2200            *expressions,
+2201            instance=self,
+2202            arg="having",
+2203            append=append,
+2204            into=Having,
+2205            dialect=dialect,
+2206            copy=copy,
+2207            **opts,
+2208        )
+
+ + +

Append to or set the HAVING expressions.

+ +
Example:
+ +
+
+
>>> Select().select("x", "COUNT(y)").from_("tbl").group_by("x").having("COUNT(y) > 3").sql()
+'SELECT x, COUNT(y) FROM tbl GROUP BY x HAVING COUNT(y) > 3'
+
+
+
+ +
Arguments:
+ +
    +
  • *expressions (str | Expression): the SQL code strings to parse. +If an Expression instance is passed, it will be used as-is. +Multiple expressions are combined with an AND operator.
  • +
  • append (bool): if True, AND the new expressions to any existing expression. +Otherwise, this resets the expression.
  • +
  • dialect (str): the dialect used to parse the input expressions.
  • +
  • copy (bool): if False, modify this expression instance in-place.
  • +
  • opts (kwargs): other options to use to parse the input expressions.
  • +
+ +
Returns:
+ +
+

Select: the modified expression.

+
+
+ + +
+
+ +
+ + def + window( self, *expressions, append=True, dialect=None, copy=True, **opts) -> sqlglot.expressions.Select: + + + +
+ +
2210    def window(self, *expressions, append=True, dialect=None, copy=True, **opts) -> Select:
+2211        return _apply_list_builder(
+2212            *expressions,
+2213            instance=self,
+2214            arg="windows",
+2215            append=append,
+2216            into=Window,
+2217            dialect=dialect,
+2218            copy=copy,
+2219            **opts,
+2220        )
+
+ + + + +
+
+ +
+ + def + distinct(self, distinct=True, copy=True) -> sqlglot.expressions.Select: + + + +
+ +
2222    def distinct(self, distinct=True, copy=True) -> Select:
+2223        """
+2224        Set the OFFSET expression.
+2225
+2226        Example:
+2227            >>> Select().from_("tbl").select("x").distinct().sql()
+2228            'SELECT DISTINCT x FROM tbl'
+2229
+2230        Args:
+2231            distinct (bool): whether the Select should be distinct
+2232            copy (bool): if `False`, modify this expression instance in-place.
+2233
+2234        Returns:
+2235            Select: the modified expression.
+2236        """
+2237        instance = _maybe_copy(self, copy)
+2238        instance.set("distinct", Distinct() if distinct else None)
+2239        return instance
+
+ + +

Set the OFFSET expression.

+ +
Example:
+ +
+
+
>>> Select().from_("tbl").select("x").distinct().sql()
+'SELECT DISTINCT x FROM tbl'
+
+
+
+ +
Arguments:
+ +
    +
  • distinct (bool): whether the Select should be distinct
  • +
  • copy (bool): if False, modify this expression instance in-place.
  • +
+ +
Returns:
+ +
+

Select: the modified expression.

+
+
+ + +
+
+ +
+ + def + ctas( self, table, properties=None, dialect=None, copy=True, **opts) -> sqlglot.expressions.Create: + + + +
+ +
2241    def ctas(self, table, properties=None, dialect=None, copy=True, **opts) -> Create:
+2242        """
+2243        Convert this expression to a CREATE TABLE AS statement.
+2244
+2245        Example:
+2246            >>> Select().select("*").from_("tbl").ctas("x").sql()
+2247            'CREATE TABLE x AS SELECT * FROM tbl'
+2248
+2249        Args:
+2250            table (str | Expression): the SQL code string to parse as the table name.
+2251                If another `Expression` instance is passed, it will be used as-is.
+2252            properties (dict): an optional mapping of table properties
+2253            dialect (str): the dialect used to parse the input table.
+2254            copy (bool): if `False`, modify this expression instance in-place.
+2255            opts (kwargs): other options to use to parse the input table.
+2256
+2257        Returns:
+2258            Create: the CREATE TABLE AS expression
+2259        """
+2260        instance = _maybe_copy(self, copy)
+2261        table_expression = maybe_parse(
+2262            table,
+2263            into=Table,
+2264            dialect=dialect,
+2265            **opts,
+2266        )
+2267        properties_expression = None
+2268        if properties:
+2269            properties_expression = Properties.from_dict(properties)
+2270
+2271        return Create(
+2272            this=table_expression,
+2273            kind="table",
+2274            expression=instance,
+2275            properties=properties_expression,
+2276        )
+
+ + +

Convert this expression to a CREATE TABLE AS statement.

+ +
Example:
+ +
+
+
>>> Select().select("*").from_("tbl").ctas("x").sql()
+'CREATE TABLE x AS SELECT * FROM tbl'
+
+
+
+ +
Arguments:
+ +
    +
  • table (str | Expression): the SQL code string to parse as the table name. +If another Expression instance is passed, it will be used as-is.
  • +
  • properties (dict): an optional mapping of table properties
  • +
  • dialect (str): the dialect used to parse the input table.
  • +
  • copy (bool): if False, modify this expression instance in-place.
  • +
  • opts (kwargs): other options to use to parse the input table.
  • +
+ +
Returns:
+ +
+

Create: the CREATE TABLE AS expression

+
+
+ + +
+
+ +
+ + def + lock( self, update: bool = True, copy: bool = True) -> sqlglot.expressions.Select: + + + +
+ +
2278    def lock(self, update: bool = True, copy: bool = True) -> Select:
+2279        """
+2280        Set the locking read mode for this expression.
+2281
+2282        Examples:
+2283            >>> Select().select("x").from_("tbl").where("x = 'a'").lock().sql("mysql")
+2284            "SELECT x FROM tbl WHERE x = 'a' FOR UPDATE"
+2285
+2286            >>> Select().select("x").from_("tbl").where("x = 'a'").lock(update=False).sql("mysql")
+2287            "SELECT x FROM tbl WHERE x = 'a' FOR SHARE"
+2288
+2289        Args:
+2290            update: if `True`, the locking type will be `FOR UPDATE`, else it will be `FOR SHARE`.
+2291            copy: if `False`, modify this expression instance in-place.
+2292
+2293        Returns:
+2294            The modified expression.
+2295        """
+2296
+2297        inst = _maybe_copy(self, copy)
+2298        inst.set("lock", Lock(update=update))
+2299
+2300        return inst
+
+ + +

Set the locking read mode for this expression.

+ +
Examples:
+ +
+
+
>>> Select().select("x").from_("tbl").where("x = 'a'").lock().sql("mysql")
+"SELECT x FROM tbl WHERE x = 'a' FOR UPDATE"
+
+
+ +
+
>>> Select().select("x").from_("tbl").where("x = 'a'").lock(update=False).sql("mysql")
+"SELECT x FROM tbl WHERE x = 'a' FOR SHARE"
+
+
+
+ +
Arguments:
+ +
    +
  • update: if True, the locking type will be FOR UPDATE, else it will be FOR SHARE.
  • +
  • copy: if False, modify this expression instance in-place.
  • +
+ +
Returns:
+ +
+

The modified expression.

+
+
+ + +
+ +
+
+ +
+ + class + Subquery(DerivedTable, Unionable): + + + +
+ +
2311class Subquery(DerivedTable, Unionable):
+2312    arg_types = {
+2313        "this": True,
+2314        "alias": False,
+2315        "with": False,
+2316        **QUERY_MODIFIERS,
+2317    }
+2318
+2319    def unnest(self):
+2320        """
+2321        Returns the first non subquery.
+2322        """
+2323        expression = self
+2324        while isinstance(expression, Subquery):
+2325            expression = expression.this
+2326        return expression
+2327
+2328    @property
+2329    def output_name(self):
+2330        return self.alias
+
+ + + + +
+ +
+ + def + unnest(self): + + + +
+ +
2319    def unnest(self):
+2320        """
+2321        Returns the first non subquery.
+2322        """
+2323        expression = self
+2324        while isinstance(expression, Subquery):
+2325            expression = expression.this
+2326        return expression
+
+ + +

Returns the first non subquery.

+
+ + +
+
+
+ output_name + + +
+ + +

Name of the output column if this expression is a selection.

+ +

If the Expression has no output name, an empty string is returned.

+ +
Example:
+ +
+
+
>>> from sqlglot import parse_one
+>>> parse_one("SELECT a").expressions[0].output_name
+'a'
+>>> parse_one("SELECT b AS c").expressions[0].output_name
+'c'
+>>> parse_one("SELECT 1 + 2").expressions[0].output_name
+''
+
+
+
+
+ + +
+ +
+
+ +
+ + class + TableSample(Expression): + + + +
+ +
2333class TableSample(Expression):
+2334    arg_types = {
+2335        "this": False,
+2336        "method": False,
+2337        "bucket_numerator": False,
+2338        "bucket_denominator": False,
+2339        "bucket_field": False,
+2340        "percent": False,
+2341        "rows": False,
+2342        "size": False,
+2343        "seed": False,
+2344    }
+
+ + + + + +
+
+ +
+ + class + Tag(Expression): + + + +
+ +
2347class Tag(Expression):
+2348    """Tags are used for generating arbitrary sql like SELECT <span>x</span>."""
+2349
+2350    arg_types = {
+2351        "this": False,
+2352        "prefix": False,
+2353        "postfix": False,
+2354    }
+
+ + +

Tags are used for generating arbitrary sql like SELECT x.

+
+ + + +
+
+ +
+ + class + Pivot(Expression): + + + +
+ +
2357class Pivot(Expression):
+2358    arg_types = {
+2359        "this": False,
+2360        "expressions": True,
+2361        "field": True,
+2362        "unpivot": True,
+2363    }
+
+ + + + + +
+
+ +
+ + class + Window(Expression): + + + +
+ +
2366class Window(Expression):
+2367    arg_types = {
+2368        "this": True,
+2369        "partition_by": False,
+2370        "order": False,
+2371        "spec": False,
+2372        "alias": False,
+2373    }
+
+ + + + + +
+
+ +
+ + class + WindowSpec(Expression): + + + +
+ +
2376class WindowSpec(Expression):
+2377    arg_types = {
+2378        "kind": False,
+2379        "start": False,
+2380        "start_side": False,
+2381        "end": False,
+2382        "end_side": False,
+2383    }
+
+ + + + + +
+
+ +
+ + class + Where(Expression): + + + +
+ +
2386class Where(Expression):
+2387    pass
+
+ + + + + +
+
+ +
+ + class + Star(Expression): + + + +
+ +
2390class Star(Expression):
+2391    arg_types = {"except": False, "replace": False}
+2392
+2393    @property
+2394    def name(self) -> str:
+2395        return "*"
+2396
+2397    @property
+2398    def output_name(self):
+2399        return self.name
+
+ + + + +
+
+ output_name + + +
+ + +

Name of the output column if this expression is a selection.

+ +

If the Expression has no output name, an empty string is returned.

+ +
Example:
+ +
+
+
>>> from sqlglot import parse_one
+>>> parse_one("SELECT a").expressions[0].output_name
+'a'
+>>> parse_one("SELECT b AS c").expressions[0].output_name
+'c'
+>>> parse_one("SELECT 1 + 2").expressions[0].output_name
+''
+
+
+
+
+ + +
+ +
+
+ +
+ + class + Parameter(Expression): + + + +
+ +
2402class Parameter(Expression):
+2403    pass
+
+ + + + + +
+
+ +
+ + class + SessionParameter(Expression): + + + +
+ +
2406class SessionParameter(Expression):
+2407    arg_types = {"this": True, "kind": False}
+
+ + + + + +
+
+ +
+ + class + Placeholder(Expression): + + + +
+ +
2410class Placeholder(Expression):
+2411    arg_types = {"this": False}
+
+ + + + + +
+
+ +
+ + class + Null(Condition): + + + +
+ +
2414class Null(Condition):
+2415    arg_types: t.Dict[str, t.Any] = {}
+2416
+2417    @property
+2418    def name(self) -> str:
+2419        return "NULL"
+
+ + + + + +
+
+ +
+ + class + Boolean(Condition): + + + +
+ +
2422class Boolean(Condition):
+2423    pass
+
+ + + + + +
+
+ +
+ + class + DataType(Expression): + + + +
+ +
2426class DataType(Expression):
+2427    arg_types = {
+2428        "this": True,
+2429        "expressions": False,
+2430        "nested": False,
+2431        "values": False,
+2432    }
+2433
+2434    class Type(AutoName):
+2435        CHAR = auto()
+2436        NCHAR = auto()
+2437        VARCHAR = auto()
+2438        NVARCHAR = auto()
+2439        TEXT = auto()
+2440        MEDIUMTEXT = auto()
+2441        LONGTEXT = auto()
+2442        MEDIUMBLOB = auto()
+2443        LONGBLOB = auto()
+2444        BINARY = auto()
+2445        VARBINARY = auto()
+2446        INT = auto()
+2447        TINYINT = auto()
+2448        SMALLINT = auto()
+2449        BIGINT = auto()
+2450        FLOAT = auto()
+2451        DOUBLE = auto()
+2452        DECIMAL = auto()
+2453        BOOLEAN = auto()
+2454        JSON = auto()
+2455        JSONB = auto()
+2456        INTERVAL = auto()
+2457        TIME = auto()
+2458        TIMESTAMP = auto()
+2459        TIMESTAMPTZ = auto()
+2460        TIMESTAMPLTZ = auto()
+2461        DATE = auto()
+2462        DATETIME = auto()
+2463        ARRAY = auto()
+2464        MAP = auto()
+2465        UUID = auto()
+2466        GEOGRAPHY = auto()
+2467        GEOMETRY = auto()
+2468        STRUCT = auto()
+2469        NULLABLE = auto()
+2470        HLLSKETCH = auto()
+2471        HSTORE = auto()
+2472        SUPER = auto()
+2473        SERIAL = auto()
+2474        SMALLSERIAL = auto()
+2475        BIGSERIAL = auto()
+2476        XML = auto()
+2477        UNIQUEIDENTIFIER = auto()
+2478        MONEY = auto()
+2479        SMALLMONEY = auto()
+2480        ROWVERSION = auto()
+2481        IMAGE = auto()
+2482        VARIANT = auto()
+2483        OBJECT = auto()
+2484        NULL = auto()
+2485        UNKNOWN = auto()  # Sentinel value, useful for type annotation
+2486
+2487    TEXT_TYPES = {
+2488        Type.CHAR,
+2489        Type.NCHAR,
+2490        Type.VARCHAR,
+2491        Type.NVARCHAR,
+2492        Type.TEXT,
+2493    }
+2494
+2495    INTEGER_TYPES = {
+2496        Type.INT,
+2497        Type.TINYINT,
+2498        Type.SMALLINT,
+2499        Type.BIGINT,
+2500    }
+2501
+2502    FLOAT_TYPES = {
+2503        Type.FLOAT,
+2504        Type.DOUBLE,
+2505    }
+2506
+2507    NUMERIC_TYPES = {*INTEGER_TYPES, *FLOAT_TYPES}
+2508
+2509    TEMPORAL_TYPES = {
+2510        Type.TIMESTAMP,
+2511        Type.TIMESTAMPTZ,
+2512        Type.TIMESTAMPLTZ,
+2513        Type.DATE,
+2514        Type.DATETIME,
+2515    }
+2516
+2517    @classmethod
+2518    def build(
+2519        cls, dtype: str | DataType | DataType.Type, dialect: DialectType = None, **kwargs
+2520    ) -> DataType:
+2521        from sqlglot import parse_one
+2522
+2523        if isinstance(dtype, str):
+2524            if dtype.upper() in cls.Type.__members__:
+2525                data_type_exp: t.Optional[Expression] = DataType(this=DataType.Type[dtype.upper()])
+2526            else:
+2527                data_type_exp = parse_one(dtype, read=dialect, into=DataType)
+2528            if data_type_exp is None:
+2529                raise ValueError(f"Unparsable data type value: {dtype}")
+2530        elif isinstance(dtype, DataType.Type):
+2531            data_type_exp = DataType(this=dtype)
+2532        elif isinstance(dtype, DataType):
+2533            return dtype
+2534        else:
+2535            raise ValueError(f"Invalid data type: {type(dtype)}. Expected str or DataType.Type")
+2536        return DataType(**{**data_type_exp.args, **kwargs})
+2537
+2538    def is_type(self, dtype: DataType.Type) -> bool:
+2539        return self.this == dtype
+
+ + + + +
+ +
+
@classmethod
+ + def + build( cls, dtype: str | sqlglot.expressions.DataType | sqlglot.expressions.DataType.Type, dialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None, **kwargs) -> sqlglot.expressions.DataType: + + + +
+ +
2517    @classmethod
+2518    def build(
+2519        cls, dtype: str | DataType | DataType.Type, dialect: DialectType = None, **kwargs
+2520    ) -> DataType:
+2521        from sqlglot import parse_one
+2522
+2523        if isinstance(dtype, str):
+2524            if dtype.upper() in cls.Type.__members__:
+2525                data_type_exp: t.Optional[Expression] = DataType(this=DataType.Type[dtype.upper()])
+2526            else:
+2527                data_type_exp = parse_one(dtype, read=dialect, into=DataType)
+2528            if data_type_exp is None:
+2529                raise ValueError(f"Unparsable data type value: {dtype}")
+2530        elif isinstance(dtype, DataType.Type):
+2531            data_type_exp = DataType(this=dtype)
+2532        elif isinstance(dtype, DataType):
+2533            return dtype
+2534        else:
+2535            raise ValueError(f"Invalid data type: {type(dtype)}. Expected str or DataType.Type")
+2536        return DataType(**{**data_type_exp.args, **kwargs})
+
+ + + + +
+
+ +
+ + def + is_type(self, dtype: sqlglot.expressions.DataType.Type) -> bool: + + + +
+ +
2538    def is_type(self, dtype: DataType.Type) -> bool:
+2539        return self.this == dtype
+
+ + + + +
+ +
+
+ +
+ + class + DataType.Type(sqlglot.helper.AutoName): + + + +
+ +
2434    class Type(AutoName):
+2435        CHAR = auto()
+2436        NCHAR = auto()
+2437        VARCHAR = auto()
+2438        NVARCHAR = auto()
+2439        TEXT = auto()
+2440        MEDIUMTEXT = auto()
+2441        LONGTEXT = auto()
+2442        MEDIUMBLOB = auto()
+2443        LONGBLOB = auto()
+2444        BINARY = auto()
+2445        VARBINARY = auto()
+2446        INT = auto()
+2447        TINYINT = auto()
+2448        SMALLINT = auto()
+2449        BIGINT = auto()
+2450        FLOAT = auto()
+2451        DOUBLE = auto()
+2452        DECIMAL = auto()
+2453        BOOLEAN = auto()
+2454        JSON = auto()
+2455        JSONB = auto()
+2456        INTERVAL = auto()
+2457        TIME = auto()
+2458        TIMESTAMP = auto()
+2459        TIMESTAMPTZ = auto()
+2460        TIMESTAMPLTZ = auto()
+2461        DATE = auto()
+2462        DATETIME = auto()
+2463        ARRAY = auto()
+2464        MAP = auto()
+2465        UUID = auto()
+2466        GEOGRAPHY = auto()
+2467        GEOMETRY = auto()
+2468        STRUCT = auto()
+2469        NULLABLE = auto()
+2470        HLLSKETCH = auto()
+2471        HSTORE = auto()
+2472        SUPER = auto()
+2473        SERIAL = auto()
+2474        SMALLSERIAL = auto()
+2475        BIGSERIAL = auto()
+2476        XML = auto()
+2477        UNIQUEIDENTIFIER = auto()
+2478        MONEY = auto()
+2479        SMALLMONEY = auto()
+2480        ROWVERSION = auto()
+2481        IMAGE = auto()
+2482        VARIANT = auto()
+2483        OBJECT = auto()
+2484        NULL = auto()
+2485        UNKNOWN = auto()  # Sentinel value, useful for type annotation
+
+ + +

An enumeration.

+
+ + +
+
+ CHAR = <Type.CHAR: 'CHAR'> + + +
+ + + + +
+
+
+ NCHAR = <Type.NCHAR: 'NCHAR'> + + +
+ + + + +
+
+
+ VARCHAR = <Type.VARCHAR: 'VARCHAR'> + + +
+ + + + +
+
+
+ NVARCHAR = <Type.NVARCHAR: 'NVARCHAR'> + + +
+ + + + +
+
+
+ TEXT = <Type.TEXT: 'TEXT'> + + +
+ + + + +
+
+
+ MEDIUMTEXT = <Type.MEDIUMTEXT: 'MEDIUMTEXT'> + + +
+ + + + +
+
+
+ LONGTEXT = <Type.LONGTEXT: 'LONGTEXT'> + + +
+ + + + +
+
+
+ MEDIUMBLOB = <Type.MEDIUMBLOB: 'MEDIUMBLOB'> + + +
+ + + + +
+
+
+ LONGBLOB = <Type.LONGBLOB: 'LONGBLOB'> + + +
+ + + + +
+
+
+ BINARY = <Type.BINARY: 'BINARY'> + + +
+ + + + +
+
+
+ VARBINARY = <Type.VARBINARY: 'VARBINARY'> + + +
+ + + + +
+
+
+ INT = <Type.INT: 'INT'> + + +
+ + + + +
+
+
+ TINYINT = <Type.TINYINT: 'TINYINT'> + + +
+ + + + +
+
+
+ SMALLINT = <Type.SMALLINT: 'SMALLINT'> + + +
+ + + + +
+
+
+ BIGINT = <Type.BIGINT: 'BIGINT'> + + +
+ + + + +
+
+
+ FLOAT = <Type.FLOAT: 'FLOAT'> + + +
+ + + + +
+
+
+ DOUBLE = <Type.DOUBLE: 'DOUBLE'> + + +
+ + + + +
+
+
+ DECIMAL = <Type.DECIMAL: 'DECIMAL'> + + +
+ + + + +
+
+
+ BOOLEAN = <Type.BOOLEAN: 'BOOLEAN'> + + +
+ + + + +
+
+
+ JSON = <Type.JSON: 'JSON'> + + +
+ + + + +
+
+
+ JSONB = <Type.JSONB: 'JSONB'> + + +
+ + + + +
+
+
+ INTERVAL = <Type.INTERVAL: 'INTERVAL'> + + +
+ + + + +
+
+
+ TIME = <Type.TIME: 'TIME'> + + +
+ + + + +
+
+
+ TIMESTAMP = <Type.TIMESTAMP: 'TIMESTAMP'> + + +
+ + + + +
+
+
+ TIMESTAMPTZ = <Type.TIMESTAMPTZ: 'TIMESTAMPTZ'> + + +
+ + + + +
+
+
+ TIMESTAMPLTZ = <Type.TIMESTAMPLTZ: 'TIMESTAMPLTZ'> + + +
+ + + + +
+
+
+ DATE = <Type.DATE: 'DATE'> + + +
+ + + + +
+
+
+ DATETIME = <Type.DATETIME: 'DATETIME'> + + +
+ + + + +
+
+
+ ARRAY = <Type.ARRAY: 'ARRAY'> + + +
+ + + + +
+
+
+ MAP = <Type.MAP: 'MAP'> + + +
+ + + + +
+
+
+ UUID = <Type.UUID: 'UUID'> + + +
+ + + + +
+
+
+ GEOGRAPHY = <Type.GEOGRAPHY: 'GEOGRAPHY'> + + +
+ + + + +
+
+
+ GEOMETRY = <Type.GEOMETRY: 'GEOMETRY'> + + +
+ + + + +
+
+
+ STRUCT = <Type.STRUCT: 'STRUCT'> + + +
+ + + + +
+
+
+ NULLABLE = <Type.NULLABLE: 'NULLABLE'> + + +
+ + + + +
+
+
+ HLLSKETCH = <Type.HLLSKETCH: 'HLLSKETCH'> + + +
+ + + + +
+
+
+ HSTORE = <Type.HSTORE: 'HSTORE'> + + +
+ + + + +
+
+
+ SUPER = <Type.SUPER: 'SUPER'> + + +
+ + + + +
+
+
+ SERIAL = <Type.SERIAL: 'SERIAL'> + + +
+ + + + +
+
+
+ SMALLSERIAL = <Type.SMALLSERIAL: 'SMALLSERIAL'> + + +
+ + + + +
+
+
+ BIGSERIAL = <Type.BIGSERIAL: 'BIGSERIAL'> + + +
+ + + + +
+
+
+ XML = <Type.XML: 'XML'> + + +
+ + + + +
+
+
+ UNIQUEIDENTIFIER = <Type.UNIQUEIDENTIFIER: 'UNIQUEIDENTIFIER'> + + +
+ + + + +
+
+
+ MONEY = <Type.MONEY: 'MONEY'> + + +
+ + + + +
+
+
+ SMALLMONEY = <Type.SMALLMONEY: 'SMALLMONEY'> + + +
+ + + + +
+
+
+ ROWVERSION = <Type.ROWVERSION: 'ROWVERSION'> + + +
+ + + + +
+
+
+ IMAGE = <Type.IMAGE: 'IMAGE'> + + +
+ + + + +
+
+
+ VARIANT = <Type.VARIANT: 'VARIANT'> + + +
+ + + + +
+
+
+ OBJECT = <Type.OBJECT: 'OBJECT'> + + +
+ + + + +
+
+
+ NULL = <Type.NULL: 'NULL'> + + +
+ + + + +
+
+
+ UNKNOWN = <Type.UNKNOWN: 'UNKNOWN'> + + +
+ + + + +
+
+
Inherited Members
+
+
enum.Enum
+
name
+
value
+ +
+
+
+
+
+ +
+ + class + PseudoType(Expression): + + + +
+ +
2543class PseudoType(Expression):
+2544    pass
+
+ + + + + +
+
+ +
+ + class + StructKwarg(Expression): + + + +
+ +
2547class StructKwarg(Expression):
+2548    arg_types = {"this": True, "expression": True}
+
+ + + + + +
+
+ +
+ + class + SubqueryPredicate(Predicate): + + + +
+ +
2552class SubqueryPredicate(Predicate):
+2553    pass
+
+ + + + + +
+
+ +
+ + class + All(SubqueryPredicate): + + + +
+ +
2556class All(SubqueryPredicate):
+2557    pass
+
+ + + + + +
+
+ +
+ + class + Any(SubqueryPredicate): + + + +
+ +
2560class Any(SubqueryPredicate):
+2561    pass
+
+ + + + + +
+
+ +
+ + class + Exists(SubqueryPredicate): + + + +
+ +
2564class Exists(SubqueryPredicate):
+2565    pass
+
+ + + + + +
+
+ +
+ + class + Command(Expression): + + + +
+ +
2570class Command(Expression):
+2571    arg_types = {"this": True, "expression": False}
+
+ + + + + +
+
+ +
+ + class + Transaction(Expression): + + + +
+ +
2574class Transaction(Expression):
+2575    arg_types = {"this": False, "modes": False}
+
+ + + + + +
+
+ +
+ + class + Commit(Expression): + + + +
+ +
2578class Commit(Expression):
+2579    arg_types = {"chain": False}
+
+ + + + + +
+
+ +
+ + class + Rollback(Expression): + + + +
+ +
2582class Rollback(Expression):
+2583    arg_types = {"savepoint": False}
+
+ + + + + +
+
+ +
+ + class + AlterTable(Expression): + + + +
+ +
2586class AlterTable(Expression):
+2587    arg_types = {"this": True, "actions": True, "exists": False}
+
+ + + + + +
+
+ +
+ + class + AddConstraint(Expression): + + + +
+ +
2590class AddConstraint(Expression):
+2591    arg_types = {"this": False, "expression": False, "enforced": False}
+
+ + + + + +
+
+ +
+ + class + DropPartition(Expression): + + + +
+ +
2594class DropPartition(Expression):
+2595    arg_types = {"expressions": True, "exists": False}
+
+ + + + + +
+
+ +
+ + class + Binary(Expression): + + + +
+ +
2599class Binary(Expression):
+2600    arg_types = {"this": True, "expression": True}
+2601
+2602    @property
+2603    def left(self):
+2604        return self.this
+2605
+2606    @property
+2607    def right(self):
+2608        return self.expression
+
+ + + + + +
+
+ +
+ + class + Add(Binary): + + + +
+ +
2611class Add(Binary):
+2612    pass
+
+ + + + + +
+
+ +
+ + class + Connector(Binary, Condition): + + + +
+ +
2615class Connector(Binary, Condition):
+2616    pass
+
+ + + + + +
+
+ +
+ + class + And(Connector): + + + +
+ +
2619class And(Connector):
+2620    pass
+
+ + + + + +
+
+ +
+ + class + Or(Connector): + + + +
+ +
2623class Or(Connector):
+2624    pass
+
+ + + + + +
+
+ +
+ + class + BitwiseAnd(Binary): + + + +
+ +
2627class BitwiseAnd(Binary):
+2628    pass
+
+ + + + + +
+
+ +
+ + class + BitwiseLeftShift(Binary): + + + +
+ +
2631class BitwiseLeftShift(Binary):
+2632    pass
+
+ + + + + +
+
+ +
+ + class + BitwiseOr(Binary): + + + +
+ +
2635class BitwiseOr(Binary):
+2636    pass
+
+ + + + + +
+
+ +
+ + class + BitwiseRightShift(Binary): + + + +
+ +
2639class BitwiseRightShift(Binary):
+2640    pass
+
+ + + + + +
+
+ +
+ + class + BitwiseXor(Binary): + + + +
+ +
2643class BitwiseXor(Binary):
+2644    pass
+
+ + + + + +
+
+ +
+ + class + Div(Binary): + + + +
+ +
2647class Div(Binary):
+2648    pass
+
+ + + + + +
+
+ +
+ + class + Dot(Binary): + + + +
+ +
2651class Dot(Binary):
+2652    @property
+2653    def name(self) -> str:
+2654        return self.expression.name
+
+ + + + + +
+
+ +
+ + class + DPipe(Binary): + + + +
+ +
2657class DPipe(Binary):
+2658    pass
+
+ + + + + +
+
+ +
+ + class + EQ(Binary, Predicate): + + + +
+ +
2661class EQ(Binary, Predicate):
+2662    pass
+
+ + + + + +
+
+ +
+ + class + NullSafeEQ(Binary, Predicate): + + + +
+ +
2665class NullSafeEQ(Binary, Predicate):
+2666    pass
+
+ + + + + +
+
+ +
+ + class + NullSafeNEQ(Binary, Predicate): + + + +
+ +
2669class NullSafeNEQ(Binary, Predicate):
+2670    pass
+
+ + + + + +
+
+ +
+ + class + Distance(Binary): + + + +
+ +
2673class Distance(Binary):
+2674    pass
+
+ + + + + +
+
+ +
+ + class + Escape(Binary): + + + +
+ +
2677class Escape(Binary):
+2678    pass
+
+ + + + + +
+
+ +
+ + class + Glob(Binary, Predicate): + + + +
+ +
2681class Glob(Binary, Predicate):
+2682    pass
+
+ + + + + +
+
+ +
+ + class + GT(Binary, Predicate): + + + +
+ +
2685class GT(Binary, Predicate):
+2686    pass
+
+ + + + + +
+
+ +
+ + class + GTE(Binary, Predicate): + + + +
+ +
2689class GTE(Binary, Predicate):
+2690    pass
+
+ + + + + +
+
+ +
+ + class + ILike(Binary, Predicate): + + + +
+ +
2693class ILike(Binary, Predicate):
+2694    pass
+
+ + + + + +
+
+ +
+ + class + IntDiv(Binary): + + + +
+ +
2697class IntDiv(Binary):
+2698    pass
+
+ + + + + +
+
+ +
+ + class + Is(Binary, Predicate): + + + +
+ +
2701class Is(Binary, Predicate):
+2702    pass
+
+ + + + + +
+
+ +
+ + class + Kwarg(Binary): + + + +
+ +
2705class Kwarg(Binary):
+2706    """Kwarg in special functions like func(kwarg => y)."""
+
+ + +

Kwarg in special functions like func(kwarg => y).

+
+ + + +
+
+ +
+ + class + Like(Binary, Predicate): + + + +
+ +
2709class Like(Binary, Predicate):
+2710    pass
+
+ + + + + +
+
+ +
+ + class + LT(Binary, Predicate): + + + +
+ +
2713class LT(Binary, Predicate):
+2714    pass
+
+ + + + + +
+
+ +
+ + class + LTE(Binary, Predicate): + + + +
+ +
2717class LTE(Binary, Predicate):
+2718    pass
+
+ + + + + +
+
+ +
+ + class + Mod(Binary): + + + +
+ +
2721class Mod(Binary):
+2722    pass
+
+ + + + + +
+
+ +
+ + class + Mul(Binary): + + + +
+ +
2725class Mul(Binary):
+2726    pass
+
+ + + + + +
+
+ +
+ + class + NEQ(Binary, Predicate): + + + +
+ +
2729class NEQ(Binary, Predicate):
+2730    pass
+
+ + + + + +
+
+ +
+ + class + SimilarTo(Binary, Predicate): + + + +
+ +
2733class SimilarTo(Binary, Predicate):
+2734    pass
+
+ + + + + +
+
+ +
+ + class + Slice(Binary): + + + +
+ +
2737class Slice(Binary):
+2738    arg_types = {"this": False, "expression": False}
+
+ + + + + +
+
+ +
+ + class + Sub(Binary): + + + +
+ +
2741class Sub(Binary):
+2742    pass
+
+ + + + + +
+
+ +
+ + class + Unary(Expression): + + + +
+ +
2747class Unary(Expression):
+2748    pass
+
+ + + + + +
+
+ +
+ + class + BitwiseNot(Unary): + + + +
+ +
2751class BitwiseNot(Unary):
+2752    pass
+
+ + + + + +
+
+ +
+ + class + Not(Unary, Condition): + + + +
+ +
2755class Not(Unary, Condition):
+2756    pass
+
+ + + + + +
+
+ +
+ + class + Paren(Unary, Condition): + + + +
+ +
2759class Paren(Unary, Condition):
+2760    arg_types = {"this": True, "with": False}
+
+ + + + + +
+
+ +
+ + class + Neg(Unary): + + + +
+ +
2763class Neg(Unary):
+2764    pass
+
+ + + + + +
+
+ +
+ + class + Alias(Expression): + + + +
+ +
2768class Alias(Expression):
+2769    arg_types = {"this": True, "alias": False}
+2770
+2771    @property
+2772    def output_name(self):
+2773        return self.alias
+
+ + + + +
+
+ output_name + + +
+ + +

Name of the output column if this expression is a selection.

+ +

If the Expression has no output name, an empty string is returned.

+ +
Example:
+ +
+
+
>>> from sqlglot import parse_one
+>>> parse_one("SELECT a").expressions[0].output_name
+'a'
+>>> parse_one("SELECT b AS c").expressions[0].output_name
+'c'
+>>> parse_one("SELECT 1 + 2").expressions[0].output_name
+''
+
+
+
+
+ + +
+ +
+
+ +
+ + class + Aliases(Expression): + + + +
+ +
2776class Aliases(Expression):
+2777    arg_types = {"this": True, "expressions": True}
+2778
+2779    @property
+2780    def aliases(self):
+2781        return self.expressions
+
+ + + + + +
+
+ +
+ + class + AtTimeZone(Expression): + + + +
+ +
2784class AtTimeZone(Expression):
+2785    arg_types = {"this": True, "zone": True}
+
+ + + + + +
+
+ +
+ + class + Between(Predicate): + + + +
+ +
2788class Between(Predicate):
+2789    arg_types = {"this": True, "low": True, "high": True}
+
+ + + + + +
+
+ +
+ + class + Bracket(Condition): + + + +
+ +
2792class Bracket(Condition):
+2793    arg_types = {"this": True, "expressions": True}
+
+ + + + + +
+
+ +
+ + class + Distinct(Expression): + + + +
+ +
2796class Distinct(Expression):
+2797    arg_types = {"expressions": False, "on": False}
+
+ + + + + +
+
+ +
+ + class + In(Predicate): + + + +
+ +
2800class In(Predicate):
+2801    arg_types = {
+2802        "this": True,
+2803        "expressions": False,
+2804        "query": False,
+2805        "unnest": False,
+2806        "field": False,
+2807        "is_global": False,
+2808    }
+
+ + + + + +
+
+ +
+ + class + TimeUnit(Expression): + + + +
+ +
2811class TimeUnit(Expression):
+2812    """Automatically converts unit arg into a var."""
+2813
+2814    arg_types = {"unit": False}
+2815
+2816    def __init__(self, **args):
+2817        unit = args.get("unit")
+2818        if isinstance(unit, Column):
+2819            args["unit"] = Var(this=unit.name)
+2820        elif isinstance(unit, Week):
+2821            unit.set("this", Var(this=unit.this.name))
+2822        super().__init__(**args)
+
+ + +

Automatically converts unit arg into a var.

+
+ + +
+ +
+ + TimeUnit(**args) + + + +
+ +
2816    def __init__(self, **args):
+2817        unit = args.get("unit")
+2818        if isinstance(unit, Column):
+2819            args["unit"] = Var(this=unit.name)
+2820        elif isinstance(unit, Week):
+2821            unit.set("this", Var(this=unit.this.name))
+2822        super().__init__(**args)
+
+ + + + +
+ +
+
+ +
+ + class + Interval(TimeUnit): + + + +
+ +
2825class Interval(TimeUnit):
+2826    arg_types = {"this": False, "unit": False}
+
+ + + + + +
+
+ +
+ + class + IgnoreNulls(Expression): + + + +
+ +
2829class IgnoreNulls(Expression):
+2830    pass
+
+ + + + + +
+
+ +
+ + class + RespectNulls(Expression): + + + +
+ +
2833class RespectNulls(Expression):
+2834    pass
+
+ + + + + +
+
+ +
+ + class + Func(Condition): + + + +
+ +
2838class Func(Condition):
+2839    """
+2840    The base class for all function expressions.
+2841
+2842    Attributes:
+2843        is_var_len_args (bool): if set to True the last argument defined in arg_types will be
+2844            treated as a variable length argument and the argument's value will be stored as a list.
+2845        _sql_names (list): determines the SQL name (1st item in the list) and aliases (subsequent items)
+2846            for this function expression. These values are used to map this node to a name during parsing
+2847            as well as to provide the function's name during SQL string generation. By default the SQL
+2848            name is set to the expression's class name transformed to snake case.
+2849    """
+2850
+2851    is_var_len_args = False
+2852
+2853    @classmethod
+2854    def from_arg_list(cls, args):
+2855        if cls.is_var_len_args:
+2856            all_arg_keys = list(cls.arg_types)
+2857            # If this function supports variable length argument treat the last argument as such.
+2858            non_var_len_arg_keys = all_arg_keys[:-1] if cls.is_var_len_args else all_arg_keys
+2859            num_non_var = len(non_var_len_arg_keys)
+2860
+2861            args_dict = {arg_key: arg for arg, arg_key in zip(args, non_var_len_arg_keys)}
+2862            args_dict[all_arg_keys[-1]] = args[num_non_var:]
+2863        else:
+2864            args_dict = {arg_key: arg for arg, arg_key in zip(args, cls.arg_types)}
+2865
+2866        return cls(**args_dict)
+2867
+2868    @classmethod
+2869    def sql_names(cls):
+2870        if cls is Func:
+2871            raise NotImplementedError(
+2872                "SQL name is only supported by concrete function implementations"
+2873            )
+2874        if "_sql_names" not in cls.__dict__:
+2875            cls._sql_names = [camel_to_snake_case(cls.__name__)]
+2876        return cls._sql_names
+2877
+2878    @classmethod
+2879    def sql_name(cls):
+2880        return cls.sql_names()[0]
+2881
+2882    @classmethod
+2883    def default_parser_mappings(cls):
+2884        return {name: cls.from_arg_list for name in cls.sql_names()}
+
+ + +

The base class for all function expressions.

+ +
Attributes:
+ +
    +
  • is_var_len_args (bool): if set to True the last argument defined in arg_types will be +treated as a variable length argument and the argument's value will be stored as a list.
  • +
  • _sql_names (list): determines the SQL name (1st item in the list) and aliases (subsequent items) +for this function expression. These values are used to map this node to a name during parsing +as well as to provide the function's name during SQL string generation. By default the SQL +name is set to the expression's class name transformed to snake case.
  • +
+
+ + +
+ +
+
@classmethod
+ + def + from_arg_list(cls, args): + + + +
+ +
2853    @classmethod
+2854    def from_arg_list(cls, args):
+2855        if cls.is_var_len_args:
+2856            all_arg_keys = list(cls.arg_types)
+2857            # If this function supports variable length argument treat the last argument as such.
+2858            non_var_len_arg_keys = all_arg_keys[:-1] if cls.is_var_len_args else all_arg_keys
+2859            num_non_var = len(non_var_len_arg_keys)
+2860
+2861            args_dict = {arg_key: arg for arg, arg_key in zip(args, non_var_len_arg_keys)}
+2862            args_dict[all_arg_keys[-1]] = args[num_non_var:]
+2863        else:
+2864            args_dict = {arg_key: arg for arg, arg_key in zip(args, cls.arg_types)}
+2865
+2866        return cls(**args_dict)
+
+ + + + +
+
+ +
+
@classmethod
+ + def + sql_names(cls): + + + +
+ +
2868    @classmethod
+2869    def sql_names(cls):
+2870        if cls is Func:
+2871            raise NotImplementedError(
+2872                "SQL name is only supported by concrete function implementations"
+2873            )
+2874        if "_sql_names" not in cls.__dict__:
+2875            cls._sql_names = [camel_to_snake_case(cls.__name__)]
+2876        return cls._sql_names
+
+ + + + +
+
+ +
+
@classmethod
+ + def + sql_name(cls): + + + +
+ +
2878    @classmethod
+2879    def sql_name(cls):
+2880        return cls.sql_names()[0]
+
+ + + + +
+
+ +
+
@classmethod
+ + def + default_parser_mappings(cls): + + + +
+ +
2882    @classmethod
+2883    def default_parser_mappings(cls):
+2884        return {name: cls.from_arg_list for name in cls.sql_names()}
+
+ + + + +
+ +
+
+ +
+ + class + AggFunc(Func): + + + +
+ +
2887class AggFunc(Func):
+2888    pass
+
+ + + + + +
+
+ +
+ + class + Abs(Func): + + + +
+ +
2891class Abs(Func):
+2892    pass
+
+ + + + + +
+
+ +
+ + class + Anonymous(Func): + + + +
+ +
2895class Anonymous(Func):
+2896    arg_types = {"this": True, "expressions": False}
+2897    is_var_len_args = True
+
+ + + + + +
+
+ +
+ + class + ApproxDistinct(AggFunc): + + + +
+ +
2900class ApproxDistinct(AggFunc):
+2901    arg_types = {"this": True, "accuracy": False}
+
+ + + + + +
+
+ +
+ + class + Array(Func): + + + +
+ +
2904class Array(Func):
+2905    arg_types = {"expressions": False}
+2906    is_var_len_args = True
+
+ + + + + +
+
+ +
+ + class + GenerateSeries(Func): + + + +
+ +
2909class GenerateSeries(Func):
+2910    arg_types = {"start": True, "end": True, "step": False}
+
+ + + + + +
+
+ +
+ + class + ArrayAgg(AggFunc): + + + +
+ +
2913class ArrayAgg(AggFunc):
+2914    pass
+
+ + + + + +
+
+ +
+ + class + ArrayAll(Func): + + + +
+ +
2917class ArrayAll(Func):
+2918    arg_types = {"this": True, "expression": True}
+
+ + + + + +
+
+ +
+ + class + ArrayAny(Func): + + + +
+ +
2921class ArrayAny(Func):
+2922    arg_types = {"this": True, "expression": True}
+
+ + + + + +
+
+ +
+ + class + ArrayConcat(Func): + + + +
+ +
2925class ArrayConcat(Func):
+2926    arg_types = {"this": True, "expressions": False}
+2927    is_var_len_args = True
+
+ + + + + +
+
+ +
+ + class + ArrayContains(Func): + + + +
+ +
2930class ArrayContains(Func):
+2931    arg_types = {"this": True, "expression": True}
+
+ + + + + +
+
+ +
+ + class + ArrayFilter(Func): + + + +
+ +
2934class ArrayFilter(Func):
+2935    arg_types = {"this": True, "expression": True}
+2936    _sql_names = ["FILTER", "ARRAY_FILTER"]
+
+ + + + + +
+
+ +
+ + class + ArraySize(Func): + + + +
+ +
2939class ArraySize(Func):
+2940    arg_types = {"this": True, "expression": False}
+
+ + + + + +
+
+ +
+ + class + ArraySort(Func): + + + +
+ +
2943class ArraySort(Func):
+2944    arg_types = {"this": True, "expression": False}
+
+ + + + + +
+
+ +
+ + class + ArraySum(Func): + + + +
+ +
2947class ArraySum(Func):
+2948    pass
+
+ + + + + +
+
+ +
+ + class + ArrayUnionAgg(AggFunc): + + + +
+ +
2951class ArrayUnionAgg(AggFunc):
+2952    pass
+
+ + + + + +
+
+ +
+ + class + Avg(AggFunc): + + + +
+ +
2955class Avg(AggFunc):
+2956    pass
+
+ + + + + +
+
+ +
+ + class + AnyValue(AggFunc): + + + +
+ +
2959class AnyValue(AggFunc):
+2960    pass
+
+ + + + + +
+
+ +
+ + class + Case(Func): + + + +
+ +
2963class Case(Func):
+2964    arg_types = {"this": False, "ifs": True, "default": False}
+
+ + + + + +
+
+ +
+ + class + Cast(Func): + + + +
+ +
2967class Cast(Func):
+2968    arg_types = {"this": True, "to": True}
+2969
+2970    @property
+2971    def name(self) -> str:
+2972        return self.this.name
+2973
+2974    @property
+2975    def to(self):
+2976        return self.args["to"]
+2977
+2978    @property
+2979    def output_name(self):
+2980        return self.name
+2981
+2982    def is_type(self, dtype: DataType.Type) -> bool:
+2983        return self.to.is_type(dtype)
+
+ + + + +
+
+ output_name + + +
+ + +

Name of the output column if this expression is a selection.

+ +

If the Expression has no output name, an empty string is returned.

+ +
Example:
+ +
+
+
>>> from sqlglot import parse_one
+>>> parse_one("SELECT a").expressions[0].output_name
+'a'
+>>> parse_one("SELECT b AS c").expressions[0].output_name
+'c'
+>>> parse_one("SELECT 1 + 2").expressions[0].output_name
+''
+
+
+
+
+ + +
+
+ +
+ + def + is_type(self, dtype: sqlglot.expressions.DataType.Type) -> bool: + + + +
+ +
2982    def is_type(self, dtype: DataType.Type) -> bool:
+2983        return self.to.is_type(dtype)
+
+ + + + +
+ +
+
+ +
+ + class + Collate(Binary): + + + +
+ +
2986class Collate(Binary):
+2987    pass
+
+ + + + + +
+
+ +
+ + class + TryCast(Cast): + + + +
+ +
2990class TryCast(Cast):
+2991    pass
+
+ + + + + +
+
+ +
+ + class + Ceil(Func): + + + +
+ +
2994class Ceil(Func):
+2995    arg_types = {"this": True, "decimals": False}
+2996    _sql_names = ["CEIL", "CEILING"]
+
+ + + + + +
+
+ +
+ + class + Coalesce(Func): + + + +
+ +
2999class Coalesce(Func):
+3000    arg_types = {"this": True, "expressions": False}
+3001    is_var_len_args = True
+
+ + + + + +
+
+ +
+ + class + Concat(Func): + + + +
+ +
3004class Concat(Func):
+3005    arg_types = {"expressions": True}
+3006    is_var_len_args = True
+
+ + + + + +
+
+ +
+ + class + ConcatWs(Concat): + + + +
+ +
3009class ConcatWs(Concat):
+3010    _sql_names = ["CONCAT_WS"]
+
+ + + + + +
+
+ +
+ + class + Count(AggFunc): + + + +
+ +
3013class Count(AggFunc):
+3014    arg_types = {"this": False}
+
+ + + + + +
+
+ +
+ + class + CurrentDate(Func): + + + +
+ +
3017class CurrentDate(Func):
+3018    arg_types = {"this": False}
+
+ + + + + +
+
+ +
+ + class + CurrentDatetime(Func): + + + +
+ +
3021class CurrentDatetime(Func):
+3022    arg_types = {"this": False}
+
+ + + + + +
+
+ +
+ + class + CurrentTime(Func): + + + +
+ +
3025class CurrentTime(Func):
+3026    arg_types = {"this": False}
+
+ + + + + +
+
+ +
+ + class + CurrentTimestamp(Func): + + + +
+ +
3029class CurrentTimestamp(Func):
+3030    arg_types = {"this": False}
+
+ + + + + +
+
+ +
+ + class + DateAdd(Func, TimeUnit): + + + +
+ +
3033class DateAdd(Func, TimeUnit):
+3034    arg_types = {"this": True, "expression": True, "unit": False}
+
+ + + + + +
+
+ +
+ + class + DateSub(Func, TimeUnit): + + + +
+ +
3037class DateSub(Func, TimeUnit):
+3038    arg_types = {"this": True, "expression": True, "unit": False}
+
+ + + + + +
+
+ +
+ + class + DateDiff(Func, TimeUnit): + + + +
+ +
3041class DateDiff(Func, TimeUnit):
+3042    arg_types = {"this": True, "expression": True, "unit": False}
+
+ + + + + +
+
+ +
+ + class + DateTrunc(Func): + + + +
+ +
3045class DateTrunc(Func):
+3046    arg_types = {"this": True, "expression": True, "zone": False}
+
+ + + + + +
+
+ +
+ + class + DatetimeAdd(Func, TimeUnit): + + + +
+ +
3049class DatetimeAdd(Func, TimeUnit):
+3050    arg_types = {"this": True, "expression": True, "unit": False}
+
+ + + + + +
+
+ +
+ + class + DatetimeSub(Func, TimeUnit): + + + +
+ +
3053class DatetimeSub(Func, TimeUnit):
+3054    arg_types = {"this": True, "expression": True, "unit": False}
+
+ + + + + +
+
+ +
+ + class + DatetimeDiff(Func, TimeUnit): + + + +
+ +
3057class DatetimeDiff(Func, TimeUnit):
+3058    arg_types = {"this": True, "expression": True, "unit": False}
+
+ + + + + +
+
+ +
+ + class + DatetimeTrunc(Func, TimeUnit): + + + +
+ +
3061class DatetimeTrunc(Func, TimeUnit):
+3062    arg_types = {"this": True, "unit": True, "zone": False}
+
+ + + + + +
+
+ +
+ + class + DayOfWeek(Func): + + + +
+ +
3065class DayOfWeek(Func):
+3066    _sql_names = ["DAY_OF_WEEK", "DAYOFWEEK"]
+
+ + + + + +
+
+ +
+ + class + DayOfMonth(Func): + + + +
+ +
3069class DayOfMonth(Func):
+3070    _sql_names = ["DAY_OF_MONTH", "DAYOFMONTH"]
+
+ + + + + +
+
+ +
+ + class + DayOfYear(Func): + + + +
+ +
3073class DayOfYear(Func):
+3074    _sql_names = ["DAY_OF_YEAR", "DAYOFYEAR"]
+
+ + + + + +
+
+ +
+ + class + WeekOfYear(Func): + + + +
+ +
3077class WeekOfYear(Func):
+3078    _sql_names = ["WEEK_OF_YEAR", "WEEKOFYEAR"]
+
+ + + + + +
+
+ +
+ + class + LastDateOfMonth(Func): + + + +
+ +
3081class LastDateOfMonth(Func):
+3082    pass
+
+ + + + + +
+
+ +
+ + class + Extract(Func): + + + +
+ +
3085class Extract(Func):
+3086    arg_types = {"this": True, "expression": True}
+
+ + + + + +
+
+ +
+ + class + TimestampAdd(Func, TimeUnit): + + + +
+ +
3089class TimestampAdd(Func, TimeUnit):
+3090    arg_types = {"this": True, "expression": True, "unit": False}
+
+ + + + + +
+
+ +
+ + class + TimestampSub(Func, TimeUnit): + + + +
+ +
3093class TimestampSub(Func, TimeUnit):
+3094    arg_types = {"this": True, "expression": True, "unit": False}
+
+ + + + + +
+
+ +
+ + class + TimestampDiff(Func, TimeUnit): + + + +
+ +
3097class TimestampDiff(Func, TimeUnit):
+3098    arg_types = {"this": True, "expression": True, "unit": False}
+
+ + + + + +
+
+ +
+ + class + TimestampTrunc(Func, TimeUnit): + + + +
+ +
3101class TimestampTrunc(Func, TimeUnit):
+3102    arg_types = {"this": True, "unit": True, "zone": False}
+
+ + + + + +
+
+ +
+ + class + TimeAdd(Func, TimeUnit): + + + +
+ +
3105class TimeAdd(Func, TimeUnit):
+3106    arg_types = {"this": True, "expression": True, "unit": False}
+
+ + + + + +
+
+ +
+ + class + TimeSub(Func, TimeUnit): + + + +
+ +
3109class TimeSub(Func, TimeUnit):
+3110    arg_types = {"this": True, "expression": True, "unit": False}
+
+ + + + + +
+
+ +
+ + class + TimeDiff(Func, TimeUnit): + + + +
+ +
3113class TimeDiff(Func, TimeUnit):
+3114    arg_types = {"this": True, "expression": True, "unit": False}
+
+ + + + + +
+
+ +
+ + class + TimeTrunc(Func, TimeUnit): + + + +
+ +
3117class TimeTrunc(Func, TimeUnit):
+3118    arg_types = {"this": True, "unit": True, "zone": False}
+
+ + + + + +
+
+ +
+ + class + DateFromParts(Func): + + + +
+ +
3121class DateFromParts(Func):
+3122    _sql_names = ["DATEFROMPARTS"]
+3123    arg_types = {"year": True, "month": True, "day": True}
+
+ + + + + +
+
+ +
+ + class + DateStrToDate(Func): + + + +
+ +
3126class DateStrToDate(Func):
+3127    pass
+
+ + + + + +
+
+ +
+ + class + DateToDateStr(Func): + + + +
+ +
3130class DateToDateStr(Func):
+3131    pass
+
+ + + + + +
+
+ +
+ + class + DateToDi(Func): + + + +
+ +
3134class DateToDi(Func):
+3135    pass
+
+ + + + + +
+
+ +
+ + class + Day(Func): + + + +
+ +
3138class Day(Func):
+3139    pass
+
+ + + + + +
+
+ +
+ + class + Decode(Func): + + + +
+ +
3142class Decode(Func):
+3143    arg_types = {"this": True, "charset": True, "replace": False}
+
+ + + + + +
+
+ +
+ + class + DiToDate(Func): + + + +
+ +
3146class DiToDate(Func):
+3147    pass
+
+ + + + + +
+
+ +
+ + class + Encode(Func): + + + +
+ +
3150class Encode(Func):
+3151    arg_types = {"this": True, "charset": True}
+
+ + + + + +
+
+ +
+ + class + Exp(Func): + + + +
+ +
3154class Exp(Func):
+3155    pass
+
+ + + + + +
+
+ +
+ + class + Explode(Func): + + + +
+ +
3158class Explode(Func):
+3159    pass
+
+ + + + + +
+
+ +
+ + class + Floor(Func): + + + +
+ +
3162class Floor(Func):
+3163    arg_types = {"this": True, "decimals": False}
+
+ + + + + +
+
+ +
+ + class + Greatest(Func): + + + +
+ +
3166class Greatest(Func):
+3167    arg_types = {"this": True, "expressions": False}
+3168    is_var_len_args = True
+
+ + + + + +
+
+ +
+ + class + GroupConcat(Func): + + + +
+ +
3171class GroupConcat(Func):
+3172    arg_types = {"this": True, "separator": False}
+
+ + + + + +
+
+ +
+ + class + Hex(Func): + + + +
+ +
3175class Hex(Func):
+3176    pass
+
+ + + + + +
+
+ +
+ + class + If(Func): + + + +
+ +
3179class If(Func):
+3180    arg_types = {"this": True, "true": True, "false": False}
+
+ + + + + +
+
+ +
+ + class + IfNull(Func): + + + +
+ +
3183class IfNull(Func):
+3184    arg_types = {"this": True, "expression": False}
+3185    _sql_names = ["IFNULL", "NVL"]
+
+ + + + + +
+
+ +
+ + class + Initcap(Func): + + + +
+ +
3188class Initcap(Func):
+3189    pass
+
+ + + + + +
+
+ +
+ + class + JSONBContains(Binary): + + + +
+ +
3192class JSONBContains(Binary):
+3193    _sql_names = ["JSONB_CONTAINS"]
+
+ + + + + +
+
+ +
+ + class + JSONExtract(Binary, Func): + + + +
+ +
3196class JSONExtract(Binary, Func):
+3197    _sql_names = ["JSON_EXTRACT"]
+
+ + + + + +
+
+ +
+ + class + JSONExtractScalar(JSONExtract): + + + +
+ +
3200class JSONExtractScalar(JSONExtract):
+3201    _sql_names = ["JSON_EXTRACT_SCALAR"]
+
+ + + + + +
+
+ +
+ + class + JSONBExtract(JSONExtract): + + + +
+ +
3204class JSONBExtract(JSONExtract):
+3205    _sql_names = ["JSONB_EXTRACT"]
+
+ + + + + +
+
+ +
+ + class + JSONBExtractScalar(JSONExtract): + + + +
+ +
3208class JSONBExtractScalar(JSONExtract):
+3209    _sql_names = ["JSONB_EXTRACT_SCALAR"]
+
+ + + + + +
+
+ +
+ + class + Least(Func): + + + +
+ +
3212class Least(Func):
+3213    arg_types = {"this": True, "expressions": False}
+3214    is_var_len_args = True
+
+ + + + + +
+
+ +
+ + class + Length(Func): + + + +
+ +
3217class Length(Func):
+3218    pass
+
+ + + + + +
+
+ +
+ + class + Levenshtein(Func): + + + +
+ +
3221class Levenshtein(Func):
+3222    arg_types = {
+3223        "this": True,
+3224        "expression": False,
+3225        "ins_cost": False,
+3226        "del_cost": False,
+3227        "sub_cost": False,
+3228    }
+
+ + + + + +
+
+ +
+ + class + Ln(Func): + + + +
+ +
3231class Ln(Func):
+3232    pass
+
+ + + + + +
+
+ +
+ + class + Log(Func): + + + +
+ +
3235class Log(Func):
+3236    arg_types = {"this": True, "expression": False}
+
+ + + + + +
+
+ +
+ + class + Log2(Func): + + + +
+ +
3239class Log2(Func):
+3240    pass
+
+ + + + + +
+
+ +
+ + class + Log10(Func): + + + +
+ +
3243class Log10(Func):
+3244    pass
+
+ + + + + +
+
+ +
+ + class + LogicalOr(AggFunc): + + + +
+ +
3247class LogicalOr(AggFunc):
+3248    _sql_names = ["LOGICAL_OR", "BOOL_OR"]
+
+ + + + + +
+
+ +
+ + class + Lower(Func): + + + +
+ +
3251class Lower(Func):
+3252    _sql_names = ["LOWER", "LCASE"]
+
+ + + + + +
+
+ +
+ + class + Map(Func): + + + +
+ +
3255class Map(Func):
+3256    arg_types = {"keys": False, "values": False}
+
+ + + + + +
+
+ +
+ + class + VarMap(Func): + + + +
+ +
3259class VarMap(Func):
+3260    arg_types = {"keys": True, "values": True}
+3261    is_var_len_args = True
+
+ + + + + +
+
+ +
+ + class + Matches(Func): + + + +
+ +
3264class Matches(Func):
+3265    """Oracle/Snowflake decode.
+3266    https://docs.oracle.com/cd/B19306_01/server.102/b14200/functions040.htm
+3267    Pattern matching MATCHES(value, search1, result1, ...searchN, resultN, else)
+3268    """
+3269
+3270    arg_types = {"this": True, "expressions": True}
+3271    is_var_len_args = True
+
+ + +

Oracle/Snowflake decode. +https://docs.oracle.com/cd/B19306_01/server.102/b14200/functions040.htm +Pattern matching MATCHES(value, search1, result1, ...searchN, resultN, else)

+
+ + + +
+
+ +
+ + class + Max(AggFunc): + + + +
+ +
3274class Max(AggFunc):
+3275    arg_types = {"this": True, "expression": False}
+
+ + + + + +
+
+ +
+ + class + Min(AggFunc): + + + +
+ +
3278class Min(AggFunc):
+3279    arg_types = {"this": True, "expression": False}
+
+ + + + + +
+
+ +
+ + class + Month(Func): + + + +
+ +
3282class Month(Func):
+3283    pass
+
+ + + + + +
+
+ +
+ + class + Nvl2(Func): + + + +
+ +
3286class Nvl2(Func):
+3287    arg_types = {"this": True, "true": True, "false": False}
+
+ + + + + +
+
+ +
+ + class + Posexplode(Func): + + + +
+ +
3290class Posexplode(Func):
+3291    pass
+
+ + + + + +
+
+ +
+ + class + Pow(Binary, Func): + + + +
+ +
3294class Pow(Binary, Func):
+3295    _sql_names = ["POWER", "POW"]
+
+ + + + + +
+
+ +
+ + class + PercentileCont(AggFunc): + + + +
+ +
3298class PercentileCont(AggFunc):
+3299    pass
+
+ + + + + +
+
+ +
+ + class + PercentileDisc(AggFunc): + + + +
+ +
3302class PercentileDisc(AggFunc):
+3303    pass
+
+ + + + + +
+
+ +
+ + class + Quantile(AggFunc): + + + +
+ +
3306class Quantile(AggFunc):
+3307    arg_types = {"this": True, "quantile": True}
+
+ + + + + +
+
+ +
+ + class + Quantiles(AggFunc): + + + +
+ +
3312class Quantiles(AggFunc):
+3313    arg_types = {"parameters": True, "expressions": True}
+
+ + + + + +
+
+ +
+ + class + QuantileIf(AggFunc): + + + +
+ +
3316class QuantileIf(AggFunc):
+3317    arg_types = {"parameters": True, "expressions": True}
+
+ + + + + +
+
+ +
+ + class + ApproxQuantile(Quantile): + + + +
+ +
3320class ApproxQuantile(Quantile):
+3321    arg_types = {"this": True, "quantile": True, "accuracy": False, "weight": False}
+
+ + + + + +
+
+ +
+ + class + ReadCSV(Func): + + + +
+ +
3324class ReadCSV(Func):
+3325    _sql_names = ["READ_CSV"]
+3326    is_var_len_args = True
+3327    arg_types = {"this": True, "expressions": False}
+
+ + + + + +
+
+ +
+ + class + Reduce(Func): + + + +
+ +
3330class Reduce(Func):
+3331    arg_types = {"this": True, "initial": True, "merge": True, "finish": False}
+
+ + + + + +
+
+ +
+ + class + RegexpLike(Func): + + + +
+ +
3334class RegexpLike(Func):
+3335    arg_types = {"this": True, "expression": True, "flag": False}
+
+ + + + + +
+
+ +
+ + class + RegexpILike(Func): + + + +
+ +
3338class RegexpILike(Func):
+3339    arg_types = {"this": True, "expression": True, "flag": False}
+
+ + + + + +
+
+ +
+ + class + RegexpSplit(Func): + + + +
+ +
3342class RegexpSplit(Func):
+3343    arg_types = {"this": True, "expression": True}
+
+ + + + + +
+
+ +
+ + class + Repeat(Func): + + + +
+ +
3346class Repeat(Func):
+3347    arg_types = {"this": True, "times": True}
+
+ + + + + +
+
+ +
+ + class + Round(Func): + + + +
+ +
3350class Round(Func):
+3351    arg_types = {"this": True, "decimals": False}
+
+ + + + + +
+
+ +
+ + class + RowNumber(Func): + + + +
+ +
3354class RowNumber(Func):
+3355    arg_types: t.Dict[str, t.Any] = {}
+
+ + + + + +
+
+ +
+ + class + SafeDivide(Func): + + + +
+ +
3358class SafeDivide(Func):
+3359    arg_types = {"this": True, "expression": True}
+
+ + + + + +
+
+ +
+ + class + SetAgg(AggFunc): + + + +
+ +
3362class SetAgg(AggFunc):
+3363    pass
+
+ + + + + +
+
+ +
+ + class + SortArray(Func): + + + +
+ +
3366class SortArray(Func):
+3367    arg_types = {"this": True, "asc": False}
+
+ + + + + +
+
+ +
+ + class + Split(Func): + + + +
+ +
3370class Split(Func):
+3371    arg_types = {"this": True, "expression": True, "limit": False}
+
+ + + + + +
+
+ +
+ + class + Substring(Func): + + + +
+ +
3376class Substring(Func):
+3377    arg_types = {"this": True, "start": False, "length": False}
+
+ + + + + +
+
+ +
+ + class + StrPosition(Func): + + + +
+ +
3380class StrPosition(Func):
+3381    arg_types = {
+3382        "this": True,
+3383        "substr": True,
+3384        "position": False,
+3385        "instance": False,
+3386    }
+
+ + + + + +
+
+ +
+ + class + StrToDate(Func): + + + +
+ +
3389class StrToDate(Func):
+3390    arg_types = {"this": True, "format": True}
+
+ + + + + +
+
+ +
+ + class + StrToTime(Func): + + + +
+ +
3393class StrToTime(Func):
+3394    arg_types = {"this": True, "format": True}
+
+ + + + + +
+
+ +
+ + class + StrToUnix(Func): + + + +
+ +
3399class StrToUnix(Func):
+3400    arg_types = {"this": False, "format": False}
+
+ + + + + +
+
+ +
+ + class + NumberToStr(Func): + + + +
+ +
3403class NumberToStr(Func):
+3404    arg_types = {"this": True, "format": True}
+
+ + + + + +
+
+ +
+ + class + Struct(Func): + + + +
+ +
3407class Struct(Func):
+3408    arg_types = {"expressions": True}
+3409    is_var_len_args = True
+
+ + + + + +
+
+ +
+ + class + StructExtract(Func): + + + +
+ +
3412class StructExtract(Func):
+3413    arg_types = {"this": True, "expression": True}
+
+ + + + + +
+
+ +
+ + class + Sum(AggFunc): + + + +
+ +
3416class Sum(AggFunc):
+3417    pass
+
+ + + + + +
+
+ +
+ + class + Sqrt(Func): + + + +
+ +
3420class Sqrt(Func):
+3421    pass
+
+ + + + + +
+
+ +
+ + class + Stddev(AggFunc): + + + +
+ +
3424class Stddev(AggFunc):
+3425    pass
+
+ + + + + +
+
+ +
+ + class + StddevPop(AggFunc): + + + +
+ +
3428class StddevPop(AggFunc):
+3429    pass
+
+ + + + + +
+
+ +
+ + class + StddevSamp(AggFunc): + + + +
+ +
3432class StddevSamp(AggFunc):
+3433    pass
+
+ + + + + +
+
+ +
+ + class + TimeToStr(Func): + + + +
+ +
3436class TimeToStr(Func):
+3437    arg_types = {"this": True, "format": True}
+
+ + + + + +
+
+ +
+ + class + TimeToTimeStr(Func): + + + +
+ +
3440class TimeToTimeStr(Func):
+3441    pass
+
+ + + + + +
+
+ +
+ + class + TimeToUnix(Func): + + + +
+ +
3444class TimeToUnix(Func):
+3445    pass
+
+ + + + + +
+
+ +
+ + class + TimeStrToDate(Func): + + + +
+ +
3448class TimeStrToDate(Func):
+3449    pass
+
+ + + + + +
+
+ +
+ + class + TimeStrToTime(Func): + + + +
+ +
3452class TimeStrToTime(Func):
+3453    pass
+
+ + + + + +
+
+ +
+ + class + TimeStrToUnix(Func): + + + +
+ +
3456class TimeStrToUnix(Func):
+3457    pass
+
+ + + + + +
+
+ +
+ + class + Trim(Func): + + + +
+ +
3460class Trim(Func):
+3461    arg_types = {
+3462        "this": True,
+3463        "expression": False,
+3464        "position": False,
+3465        "collation": False,
+3466    }
+
+ + + + + +
+
+ +
+ + class + TsOrDsAdd(Func, TimeUnit): + + + +
+ +
3469class TsOrDsAdd(Func, TimeUnit):
+3470    arg_types = {"this": True, "expression": True, "unit": False}
+
+ + + + + +
+
+ +
+ + class + TsOrDsToDateStr(Func): + + + +
+ +
3473class TsOrDsToDateStr(Func):
+3474    pass
+
+ + + + + +
+
+ +
+ + class + TsOrDsToDate(Func): + + + +
+ +
3477class TsOrDsToDate(Func):
+3478    arg_types = {"this": True, "format": False}
+
+ + + + + +
+
+ +
+ + class + TsOrDiToDi(Func): + + + +
+ +
3481class TsOrDiToDi(Func):
+3482    pass
+
+ + + + + +
+
+ +
+ + class + Unhex(Func): + + + +
+ +
3485class Unhex(Func):
+3486    pass
+
+ + + + + +
+
+ +
+ + class + UnixToStr(Func): + + + +
+ +
3489class UnixToStr(Func):
+3490    arg_types = {"this": True, "format": False}
+
+ + + + + +
+
+ +
+ + class + UnixToTime(Func): + + + +
+ +
3495class UnixToTime(Func):
+3496    arg_types = {"this": True, "scale": False, "zone": False, "hours": False, "minutes": False}
+3497
+3498    SECONDS = Literal.string("seconds")
+3499    MILLIS = Literal.string("millis")
+3500    MICROS = Literal.string("micros")
+
+ + + + + +
+
+ +
+ + class + UnixToTimeStr(Func): + + + +
+ +
3503class UnixToTimeStr(Func):
+3504    pass
+
+ + + + + +
+
+ +
+ + class + Upper(Func): + + + +
+ +
3507class Upper(Func):
+3508    _sql_names = ["UPPER", "UCASE"]
+
+ + + + + +
+
+ +
+ + class + Variance(AggFunc): + + + +
+ +
3511class Variance(AggFunc):
+3512    _sql_names = ["VARIANCE", "VARIANCE_SAMP", "VAR_SAMP"]
+
+ + + + + +
+
+ +
+ + class + VariancePop(AggFunc): + + + +
+ +
3515class VariancePop(AggFunc):
+3516    _sql_names = ["VARIANCE_POP", "VAR_POP"]
+
+ + + + + +
+
+ +
+ + class + Week(Func): + + + +
+ +
3519class Week(Func):
+3520    arg_types = {"this": True, "mode": False}
+
+ + + + + +
+
+ +
+ + class + Year(Func): + + + +
+ +
3523class Year(Func):
+3524    pass
+
+ + + + + +
+
+ +
+ + class + Use(Expression): + + + +
+ +
3527class Use(Expression):
+3528    arg_types = {"this": True, "kind": False}
+
+ + + + + +
+
+ +
+ + class + Merge(Expression): + + + +
+ +
3531class Merge(Expression):
+3532    arg_types = {"this": True, "using": True, "on": True, "expressions": True}
+
+ + + + + +
+
+ +
+ + class + When(Func): + + + +
+ +
3535class When(Func):
+3536    arg_types = {"this": True, "then": True}
+
+ + + + + +
+
+ +
+ + def + maybe_parse( sql_or_expression: str | sqlglot.expressions.Expression, *, into: Union[str, Type[sqlglot.expressions.Expression], Collection[Union[str, Type[sqlglot.expressions.Expression]]], NoneType] = None, dialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None, prefix: Optional[str] = None, **opts) -> sqlglot.expressions.Expression: + + + +
+ +
3564def maybe_parse(
+3565    sql_or_expression: str | Expression,
+3566    *,
+3567    into: t.Optional[IntoType] = None,
+3568    dialect: DialectType = None,
+3569    prefix: t.Optional[str] = None,
+3570    **opts,
+3571) -> Expression:
+3572    """Gracefully handle a possible string or expression.
+3573
+3574    Example:
+3575        >>> maybe_parse("1")
+3576        (LITERAL this: 1, is_string: False)
+3577        >>> maybe_parse(to_identifier("x"))
+3578        (IDENTIFIER this: x, quoted: False)
+3579
+3580    Args:
+3581        sql_or_expression: the SQL code string or an expression
+3582        into: the SQLGlot Expression to parse into
+3583        dialect: the dialect used to parse the input expressions (in the case that an
+3584            input expression is a SQL string).
+3585        prefix: a string to prefix the sql with before it gets parsed
+3586            (automatically includes a space)
+3587        **opts: other options to use to parse the input expressions (again, in the case
+3588            that an input expression is a SQL string).
+3589
+3590    Returns:
+3591        Expression: the parsed or given expression.
+3592    """
+3593    if isinstance(sql_or_expression, Expression):
+3594        return sql_or_expression
+3595
+3596    import sqlglot
+3597
+3598    sql = str(sql_or_expression)
+3599    if prefix:
+3600        sql = f"{prefix} {sql}"
+3601    return sqlglot.parse_one(sql, read=dialect, into=into, **opts)
+
+ + +

Gracefully handle a possible string or expression.

+ +
Example:
+ +
+
+
>>> maybe_parse("1")
+(LITERAL this: 1, is_string: False)
+>>> maybe_parse(to_identifier("x"))
+(IDENTIFIER this: x, quoted: False)
+
+
+
+ +
Arguments:
+ +
    +
  • sql_or_expression: the SQL code string or an expression
  • +
  • into: the SQLGlot Expression to parse into
  • +
  • dialect: the dialect used to parse the input expressions (in the case that an +input expression is a SQL string).
  • +
  • prefix: a string to prefix the sql with before it gets parsed +(automatically includes a space)
  • +
  • **opts: other options to use to parse the input expressions (again, in the case +that an input expression is a SQL string).
  • +
+ +
Returns:
+ +
+

Expression: the parsed or given expression.

+
+
+ + +
+
+ +
+ + def + union(left, right, distinct=True, dialect=None, **opts): + + + +
+ +
3747def union(left, right, distinct=True, dialect=None, **opts):
+3748    """
+3749    Initializes a syntax tree from one UNION expression.
+3750
+3751    Example:
+3752        >>> union("SELECT * FROM foo", "SELECT * FROM bla").sql()
+3753        'SELECT * FROM foo UNION SELECT * FROM bla'
+3754
+3755    Args:
+3756        left (str | Expression): the SQL code string corresponding to the left-hand side.
+3757            If an `Expression` instance is passed, it will be used as-is.
+3758        right (str | Expression): the SQL code string corresponding to the right-hand side.
+3759            If an `Expression` instance is passed, it will be used as-is.
+3760        distinct (bool): set the DISTINCT flag if and only if this is true.
+3761        dialect (str): the dialect used to parse the input expression.
+3762        opts (kwargs): other options to use to parse the input expressions.
+3763    Returns:
+3764        Union: the syntax tree for the UNION expression.
+3765    """
+3766    left = maybe_parse(sql_or_expression=left, dialect=dialect, **opts)
+3767    right = maybe_parse(sql_or_expression=right, dialect=dialect, **opts)
+3768
+3769    return Union(this=left, expression=right, distinct=distinct)
+
+ + +

Initializes a syntax tree from one UNION expression.

+ +
Example:
+ +
+
+
>>> union("SELECT * FROM foo", "SELECT * FROM bla").sql()
+'SELECT * FROM foo UNION SELECT * FROM bla'
+
+
+
+ +
Arguments:
+ +
    +
  • left (str | Expression): the SQL code string corresponding to the left-hand side. +If an Expression instance is passed, it will be used as-is.
  • +
  • right (str | Expression): the SQL code string corresponding to the right-hand side. +If an Expression instance is passed, it will be used as-is.
  • +
  • distinct (bool): set the DISTINCT flag if and only if this is true.
  • +
  • dialect (str): the dialect used to parse the input expression.
  • +
  • opts (kwargs): other options to use to parse the input expressions.
  • +
+ +
Returns:
+ +
+

Union: the syntax tree for the UNION expression.

+
+
+ + +
+
+ +
+ + def + intersect(left, right, distinct=True, dialect=None, **opts): + + + +
+ +
3772def intersect(left, right, distinct=True, dialect=None, **opts):
+3773    """
+3774    Initializes a syntax tree from one INTERSECT expression.
+3775
+3776    Example:
+3777        >>> intersect("SELECT * FROM foo", "SELECT * FROM bla").sql()
+3778        'SELECT * FROM foo INTERSECT SELECT * FROM bla'
+3779
+3780    Args:
+3781        left (str | Expression): the SQL code string corresponding to the left-hand side.
+3782            If an `Expression` instance is passed, it will be used as-is.
+3783        right (str | Expression): the SQL code string corresponding to the right-hand side.
+3784            If an `Expression` instance is passed, it will be used as-is.
+3785        distinct (bool): set the DISTINCT flag if and only if this is true.
+3786        dialect (str): the dialect used to parse the input expression.
+3787        opts (kwargs): other options to use to parse the input expressions.
+3788    Returns:
+3789        Intersect: the syntax tree for the INTERSECT expression.
+3790    """
+3791    left = maybe_parse(sql_or_expression=left, dialect=dialect, **opts)
+3792    right = maybe_parse(sql_or_expression=right, dialect=dialect, **opts)
+3793
+3794    return Intersect(this=left, expression=right, distinct=distinct)
+
+ + +

Initializes a syntax tree from one INTERSECT expression.

+ +
Example:
+ +
+
+
>>> intersect("SELECT * FROM foo", "SELECT * FROM bla").sql()
+'SELECT * FROM foo INTERSECT SELECT * FROM bla'
+
+
+
+ +
Arguments:
+ +
    +
  • left (str | Expression): the SQL code string corresponding to the left-hand side. +If an Expression instance is passed, it will be used as-is.
  • +
  • right (str | Expression): the SQL code string corresponding to the right-hand side. +If an Expression instance is passed, it will be used as-is.
  • +
  • distinct (bool): set the DISTINCT flag if and only if this is true.
  • +
  • dialect (str): the dialect used to parse the input expression.
  • +
  • opts (kwargs): other options to use to parse the input expressions.
  • +
+ +
Returns:
+ +
+

Intersect: the syntax tree for the INTERSECT expression.

+
+
+ + +
+
+ +
+ + def + except_(left, right, distinct=True, dialect=None, **opts): + + + +
+ +
3797def except_(left, right, distinct=True, dialect=None, **opts):
+3798    """
+3799    Initializes a syntax tree from one EXCEPT expression.
+3800
+3801    Example:
+3802        >>> except_("SELECT * FROM foo", "SELECT * FROM bla").sql()
+3803        'SELECT * FROM foo EXCEPT SELECT * FROM bla'
+3804
+3805    Args:
+3806        left (str | Expression): the SQL code string corresponding to the left-hand side.
+3807            If an `Expression` instance is passed, it will be used as-is.
+3808        right (str | Expression): the SQL code string corresponding to the right-hand side.
+3809            If an `Expression` instance is passed, it will be used as-is.
+3810        distinct (bool): set the DISTINCT flag if and only if this is true.
+3811        dialect (str): the dialect used to parse the input expression.
+3812        opts (kwargs): other options to use to parse the input expressions.
+3813    Returns:
+3814        Except: the syntax tree for the EXCEPT statement.
+3815    """
+3816    left = maybe_parse(sql_or_expression=left, dialect=dialect, **opts)
+3817    right = maybe_parse(sql_or_expression=right, dialect=dialect, **opts)
+3818
+3819    return Except(this=left, expression=right, distinct=distinct)
+
+ + +

Initializes a syntax tree from one EXCEPT expression.

+ +
Example:
+ +
+
+
>>> except_("SELECT * FROM foo", "SELECT * FROM bla").sql()
+'SELECT * FROM foo EXCEPT SELECT * FROM bla'
+
+
+
+ +
Arguments:
+ +
    +
  • left (str | Expression): the SQL code string corresponding to the left-hand side. +If an Expression instance is passed, it will be used as-is.
  • +
  • right (str | Expression): the SQL code string corresponding to the right-hand side. +If an Expression instance is passed, it will be used as-is.
  • +
  • distinct (bool): set the DISTINCT flag if and only if this is true.
  • +
  • dialect (str): the dialect used to parse the input expression.
  • +
  • opts (kwargs): other options to use to parse the input expressions.
  • +
+ +
Returns:
+ +
+

Except: the syntax tree for the EXCEPT statement.

+
+
+ + +
+
+ +
+ + def + select(*expressions, dialect=None, **opts) -> sqlglot.expressions.Select: + + + +
+ +
3822def select(*expressions, dialect=None, **opts) -> Select:
+3823    """
+3824    Initializes a syntax tree from one or multiple SELECT expressions.
+3825
+3826    Example:
+3827        >>> select("col1", "col2").from_("tbl").sql()
+3828        'SELECT col1, col2 FROM tbl'
+3829
+3830    Args:
+3831        *expressions (str | Expression): the SQL code string to parse as the expressions of a
+3832            SELECT statement. If an Expression instance is passed, this is used as-is.
+3833        dialect (str): the dialect used to parse the input expressions (in the case that an
+3834            input expression is a SQL string).
+3835        **opts: other options to use to parse the input expressions (again, in the case
+3836            that an input expression is a SQL string).
+3837
+3838    Returns:
+3839        Select: the syntax tree for the SELECT statement.
+3840    """
+3841    return Select().select(*expressions, dialect=dialect, **opts)
+
+ + +

Initializes a syntax tree from one or multiple SELECT expressions.

+ +
Example:
+ +
+
+
>>> select("col1", "col2").from_("tbl").sql()
+'SELECT col1, col2 FROM tbl'
+
+
+
+ +
Arguments:
+ +
    +
  • *expressions (str | Expression): the SQL code string to parse as the expressions of a +SELECT statement. If an Expression instance is passed, this is used as-is.
  • +
  • dialect (str): the dialect used to parse the input expressions (in the case that an +input expression is a SQL string).
  • +
  • **opts: other options to use to parse the input expressions (again, in the case +that an input expression is a SQL string).
  • +
+ +
Returns:
+ +
+

Select: the syntax tree for the SELECT statement.

+
+
+ + +
+
+ +
+ + def + from_(*expressions, dialect=None, **opts) -> sqlglot.expressions.Select: + + + +
+ +
3844def from_(*expressions, dialect=None, **opts) -> Select:
+3845    """
+3846    Initializes a syntax tree from a FROM expression.
+3847
+3848    Example:
+3849        >>> from_("tbl").select("col1", "col2").sql()
+3850        'SELECT col1, col2 FROM tbl'
+3851
+3852    Args:
+3853        *expressions (str | Expression): the SQL code string to parse as the FROM expressions of a
+3854            SELECT statement. If an Expression instance is passed, this is used as-is.
+3855        dialect (str): the dialect used to parse the input expression (in the case that the
+3856            input expression is a SQL string).
+3857        **opts: other options to use to parse the input expressions (again, in the case
+3858            that the input expression is a SQL string).
+3859
+3860    Returns:
+3861        Select: the syntax tree for the SELECT statement.
+3862    """
+3863    return Select().from_(*expressions, dialect=dialect, **opts)
+
+ + +

Initializes a syntax tree from a FROM expression.

+ +
Example:
+ +
+
+
>>> from_("tbl").select("col1", "col2").sql()
+'SELECT col1, col2 FROM tbl'
+
+
+
+ +
Arguments:
+ +
    +
  • *expressions (str | Expression): the SQL code string to parse as the FROM expressions of a +SELECT statement. If an Expression instance is passed, this is used as-is.
  • +
  • dialect (str): the dialect used to parse the input expression (in the case that the +input expression is a SQL string).
  • +
  • **opts: other options to use to parse the input expressions (again, in the case +that the input expression is a SQL string).
  • +
+ +
Returns:
+ +
+

Select: the syntax tree for the SELECT statement.

+
+
+ + +
+
+ +
+ + def + update( table, properties, where=None, from_=None, dialect=None, **opts) -> sqlglot.expressions.Update: + + + +
+ +
3866def update(table, properties, where=None, from_=None, dialect=None, **opts) -> Update:
+3867    """
+3868    Creates an update statement.
+3869
+3870    Example:
+3871        >>> update("my_table", {"x": 1, "y": "2", "z": None}, from_="baz", where="id > 1").sql()
+3872        "UPDATE my_table SET x = 1, y = '2', z = NULL FROM baz WHERE id > 1"
+3873
+3874    Args:
+3875        *properties (Dict[str, Any]): dictionary of properties to set which are
+3876            auto converted to sql objects eg None -> NULL
+3877        where (str): sql conditional parsed into a WHERE statement
+3878        from_ (str): sql statement parsed into a FROM statement
+3879        dialect (str): the dialect used to parse the input expressions.
+3880        **opts: other options to use to parse the input expressions.
+3881
+3882    Returns:
+3883        Update: the syntax tree for the UPDATE statement.
+3884    """
+3885    update = Update(this=maybe_parse(table, into=Table, dialect=dialect))
+3886    update.set(
+3887        "expressions",
+3888        [
+3889            EQ(this=maybe_parse(k, dialect=dialect, **opts), expression=convert(v))
+3890            for k, v in properties.items()
+3891        ],
+3892    )
+3893    if from_:
+3894        update.set(
+3895            "from",
+3896            maybe_parse(from_, into=From, dialect=dialect, prefix="FROM", **opts),
+3897        )
+3898    if isinstance(where, Condition):
+3899        where = Where(this=where)
+3900    if where:
+3901        update.set(
+3902            "where",
+3903            maybe_parse(where, into=Where, dialect=dialect, prefix="WHERE", **opts),
+3904        )
+3905    return update
+
+ + +

Creates an update statement.

+ +
Example:
+ +
+
+
>>> update("my_table", {"x": 1, "y": "2", "z": None}, from_="baz", where="id > 1").sql()
+"UPDATE my_table SET x = 1, y = '2', z = NULL FROM baz WHERE id > 1"
+
+
+
+ +
Arguments:
+ +
    +
  • *properties (Dict[str, Any]): dictionary of properties to set which are +auto converted to sql objects eg None -> NULL
  • +
  • where (str): sql conditional parsed into a WHERE statement
  • +
  • from_ (str): sql statement parsed into a FROM statement
  • +
  • dialect (str): the dialect used to parse the input expressions.
  • +
  • **opts: other options to use to parse the input expressions.
  • +
+ +
Returns:
+ +
+

Update: the syntax tree for the UPDATE statement.

+
+
+ + +
+
+ +
+ + def + delete(table, where=None, dialect=None, **opts) -> sqlglot.expressions.Delete: + + + +
+ +
3908def delete(table, where=None, dialect=None, **opts) -> Delete:
+3909    """
+3910    Builds a delete statement.
+3911
+3912    Example:
+3913        >>> delete("my_table", where="id > 1").sql()
+3914        'DELETE FROM my_table WHERE id > 1'
+3915
+3916    Args:
+3917        where (str|Condition): sql conditional parsed into a WHERE statement
+3918        dialect (str): the dialect used to parse the input expressions.
+3919        **opts: other options to use to parse the input expressions.
+3920
+3921    Returns:
+3922        Delete: the syntax tree for the DELETE statement.
+3923    """
+3924    return Delete(
+3925        this=maybe_parse(table, into=Table, dialect=dialect, **opts),
+3926        where=Where(this=where)
+3927        if isinstance(where, Condition)
+3928        else maybe_parse(where, into=Where, dialect=dialect, prefix="WHERE", **opts),
+3929    )
+
+ + +

Builds a delete statement.

+ +
Example:
+ +
+
+
>>> delete("my_table", where="id > 1").sql()
+'DELETE FROM my_table WHERE id > 1'
+
+
+
+ +
Arguments:
+ +
    +
  • where (str|Condition): sql conditional parsed into a WHERE statement
  • +
  • dialect (str): the dialect used to parse the input expressions.
  • +
  • **opts: other options to use to parse the input expressions.
  • +
+ +
Returns:
+ +
+

Delete: the syntax tree for the DELETE statement.

+
+
+ + +
+
+ +
+ + def + condition(expression, dialect=None, **opts) -> sqlglot.expressions.Condition: + + + +
+ +
3932def condition(expression, dialect=None, **opts) -> Condition:
+3933    """
+3934    Initialize a logical condition expression.
+3935
+3936    Example:
+3937        >>> condition("x=1").sql()
+3938        'x = 1'
+3939
+3940        This is helpful for composing larger logical syntax trees:
+3941        >>> where = condition("x=1")
+3942        >>> where = where.and_("y=1")
+3943        >>> Select().from_("tbl").select("*").where(where).sql()
+3944        'SELECT * FROM tbl WHERE x = 1 AND y = 1'
+3945
+3946    Args:
+3947        *expression (str | Expression): the SQL code string to parse.
+3948            If an Expression instance is passed, this is used as-is.
+3949        dialect (str): the dialect used to parse the input expression (in the case that the
+3950            input expression is a SQL string).
+3951        **opts: other options to use to parse the input expressions (again, in the case
+3952            that the input expression is a SQL string).
+3953
+3954    Returns:
+3955        Condition: the expression
+3956    """
+3957    return maybe_parse(  # type: ignore
+3958        expression,
+3959        into=Condition,
+3960        dialect=dialect,
+3961        **opts,
+3962    )
+
+ + +

Initialize a logical condition expression.

+ +
Example:
+ +
+
+
>>> condition("x=1").sql()
+'x = 1'
+
+
+ +

This is helpful for composing larger logical syntax trees:

+ +
+
>>> where = condition("x=1")
+>>> where = where.and_("y=1")
+>>> Select().from_("tbl").select("*").where(where).sql()
+'SELECT * FROM tbl WHERE x = 1 AND y = 1'
+
+
+
+ +
Arguments:
+ +
    +
  • *expression (str | Expression): the SQL code string to parse. +If an Expression instance is passed, this is used as-is.
  • +
  • dialect (str): the dialect used to parse the input expression (in the case that the +input expression is a SQL string).
  • +
  • **opts: other options to use to parse the input expressions (again, in the case +that the input expression is a SQL string).
  • +
+ +
Returns:
+ +
+

Condition: the expression

+
+
+ + +
+
+ +
+ + def + and_(*expressions, dialect=None, **opts) -> sqlglot.expressions.And: + + + +
+ +
3965def and_(*expressions, dialect=None, **opts) -> And:
+3966    """
+3967    Combine multiple conditions with an AND logical operator.
+3968
+3969    Example:
+3970        >>> and_("x=1", and_("y=1", "z=1")).sql()
+3971        'x = 1 AND (y = 1 AND z = 1)'
+3972
+3973    Args:
+3974        *expressions (str | Expression): the SQL code strings to parse.
+3975            If an Expression instance is passed, this is used as-is.
+3976        dialect (str): the dialect used to parse the input expression.
+3977        **opts: other options to use to parse the input expressions.
+3978
+3979    Returns:
+3980        And: the new condition
+3981    """
+3982    return _combine(expressions, And, dialect, **opts)
+
+ + +

Combine multiple conditions with an AND logical operator.

+ +
Example:
+ +
+
+
>>> and_("x=1", and_("y=1", "z=1")).sql()
+'x = 1 AND (y = 1 AND z = 1)'
+
+
+
+ +
Arguments:
+ +
    +
  • *expressions (str | Expression): the SQL code strings to parse. +If an Expression instance is passed, this is used as-is.
  • +
  • dialect (str): the dialect used to parse the input expression.
  • +
  • **opts: other options to use to parse the input expressions.
  • +
+ +
Returns:
+ +
+

And: the new condition

+
+
+ + +
+
+ +
+ + def + or_(*expressions, dialect=None, **opts) -> sqlglot.expressions.Or: + + + +
+ +
3985def or_(*expressions, dialect=None, **opts) -> Or:
+3986    """
+3987    Combine multiple conditions with an OR logical operator.
+3988
+3989    Example:
+3990        >>> or_("x=1", or_("y=1", "z=1")).sql()
+3991        'x = 1 OR (y = 1 OR z = 1)'
+3992
+3993    Args:
+3994        *expressions (str | Expression): the SQL code strings to parse.
+3995            If an Expression instance is passed, this is used as-is.
+3996        dialect (str): the dialect used to parse the input expression.
+3997        **opts: other options to use to parse the input expressions.
+3998
+3999    Returns:
+4000        Or: the new condition
+4001    """
+4002    return _combine(expressions, Or, dialect, **opts)
+
+ + +

Combine multiple conditions with an OR logical operator.

+ +
Example:
+ +
+
+
>>> or_("x=1", or_("y=1", "z=1")).sql()
+'x = 1 OR (y = 1 OR z = 1)'
+
+
+
+ +
Arguments:
+ +
    +
  • *expressions (str | Expression): the SQL code strings to parse. +If an Expression instance is passed, this is used as-is.
  • +
  • dialect (str): the dialect used to parse the input expression.
  • +
  • **opts: other options to use to parse the input expressions.
  • +
+ +
Returns:
+ +
+

Or: the new condition

+
+
+ + +
+
+ +
+ + def + not_(expression, dialect=None, **opts) -> sqlglot.expressions.Not: + + + +
+ +
4005def not_(expression, dialect=None, **opts) -> Not:
+4006    """
+4007    Wrap a condition with a NOT operator.
+4008
+4009    Example:
+4010        >>> not_("this_suit='black'").sql()
+4011        "NOT this_suit = 'black'"
+4012
+4013    Args:
+4014        expression (str | Expression): the SQL code strings to parse.
+4015            If an Expression instance is passed, this is used as-is.
+4016        dialect (str): the dialect used to parse the input expression.
+4017        **opts: other options to use to parse the input expressions.
+4018
+4019    Returns:
+4020        Not: the new condition
+4021    """
+4022    this = condition(
+4023        expression,
+4024        dialect=dialect,
+4025        **opts,
+4026    )
+4027    return Not(this=_wrap_operator(this))
+
+ + +

Wrap a condition with a NOT operator.

+ +
Example:
+ +
+
+
>>> not_("this_suit='black'").sql()
+"NOT this_suit = 'black'"
+
+
+
+ +
Arguments:
+ +
    +
  • expression (str | Expression): the SQL code strings to parse. +If an Expression instance is passed, this is used as-is.
  • +
  • dialect (str): the dialect used to parse the input expression.
  • +
  • **opts: other options to use to parse the input expressions.
  • +
+ +
Returns:
+ +
+

Not: the new condition

+
+
+ + +
+
+ +
+ + def + paren(expression) -> sqlglot.expressions.Paren: + + + +
+ +
4030def paren(expression) -> Paren:
+4031    return Paren(this=expression)
+
+ + + + +
+
+ +
+ + def + to_identifier(name, quoted=None): + + + +
+ +
4047def to_identifier(name, quoted=None):
+4048    """Builds an identifier.
+4049
+4050    Args:
+4051        name: The name to turn into an identifier.
+4052        quoted: Whether or not force quote the identifier.
+4053
+4054    Returns:
+4055        The identifier ast node.
+4056    """
+4057
+4058    if name is None:
+4059        return None
+4060
+4061    if isinstance(name, Identifier):
+4062        identifier = name
+4063    elif isinstance(name, str):
+4064        identifier = Identifier(
+4065            this=name,
+4066            quoted=not re.match(SAFE_IDENTIFIER_RE, name) if quoted is None else quoted,
+4067        )
+4068    else:
+4069        raise ValueError(f"Name needs to be a string or an Identifier, got: {name.__class__}")
+4070    return identifier
+
+ + +

Builds an identifier.

+ +
Arguments:
+ +
    +
  • name: The name to turn into an identifier.
  • +
  • quoted: Whether or not force quote the identifier.
  • +
+ +
Returns:
+ +
+

The identifier ast node.

+
+
+ + +
+
+ +
+ + def + to_interval( interval: str | sqlglot.expressions.Literal) -> sqlglot.expressions.Interval: + + + +
+ +
4076def to_interval(interval: str | Literal) -> Interval:
+4077    """Builds an interval expression from a string like '1 day' or '5 months'."""
+4078    if isinstance(interval, Literal):
+4079        if not interval.is_string:
+4080            raise ValueError("Invalid interval string.")
+4081
+4082        interval = interval.this
+4083
+4084    interval_parts = INTERVAL_STRING_RE.match(interval)  # type: ignore
+4085
+4086    if not interval_parts:
+4087        raise ValueError("Invalid interval string.")
+4088
+4089    return Interval(
+4090        this=Literal.string(interval_parts.group(1)),
+4091        unit=Var(this=interval_parts.group(2)),
+4092    )
+
+ + +

Builds an interval expression from a string like '1 day' or '5 months'.

+
+ + +
+
+ +
+ + def + to_table( sql_path: Union[str, sqlglot.expressions.Table, NoneType], **kwargs) -> Optional[sqlglot.expressions.Table]: + + + +
+ +
4105def to_table(sql_path: t.Optional[str | Table], **kwargs) -> t.Optional[Table]:
+4106    """
+4107    Create a table expression from a `[catalog].[schema].[table]` sql path. Catalog and schema are optional.
+4108    If a table is passed in then that table is returned.
+4109
+4110    Args:
+4111        sql_path: a `[catalog].[schema].[table]` string.
+4112
+4113    Returns:
+4114        A table expression.
+4115    """
+4116    if sql_path is None or isinstance(sql_path, Table):
+4117        return sql_path
+4118    if not isinstance(sql_path, str):
+4119        raise ValueError(f"Invalid type provided for a table: {type(sql_path)}")
+4120
+4121    catalog, db, table_name = (to_identifier(x) for x in split_num_words(sql_path, ".", 3))
+4122    return Table(this=table_name, db=db, catalog=catalog, **kwargs)
+
+ + +

Create a table expression from a [catalog].[schema].[table] sql path. Catalog and schema are optional. +If a table is passed in then that table is returned.

+ +
Arguments:
+ +
    +
  • sql_path: a [catalog].[schema].[table] string.
  • +
+ +
Returns:
+ +
+

A table expression.

+
+
+ + +
+
+ +
+ + def + to_column( sql_path: str | sqlglot.expressions.Column, **kwargs) -> sqlglot.expressions.Column: + + + +
+ +
4125def to_column(sql_path: str | Column, **kwargs) -> Column:
+4126    """
+4127    Create a column from a `[table].[column]` sql path. Schema is optional.
+4128
+4129    If a column is passed in then that column is returned.
+4130
+4131    Args:
+4132        sql_path: `[table].[column]` string
+4133    Returns:
+4134        Table: A column expression
+4135    """
+4136    if sql_path is None or isinstance(sql_path, Column):
+4137        return sql_path
+4138    if not isinstance(sql_path, str):
+4139        raise ValueError(f"Invalid type provided for column: {type(sql_path)}")
+4140    table_name, column_name = (to_identifier(x) for x in split_num_words(sql_path, ".", 2))
+4141    return Column(this=column_name, table=table_name, **kwargs)
+
+ + +

Create a column from a [table].[column] sql path. Schema is optional.

+ +

If a column is passed in then that column is returned.

+ +
Arguments:
+ +
    +
  • sql_path: [table].[column] string
  • +
+ +
Returns:
+ +
+

Table: A column expression

+
+
+ + +
+
+ +
+ + def + alias_( expression: str | sqlglot.expressions.Expression, alias: str | sqlglot.expressions.Identifier, table: Union[bool, Sequence[str | sqlglot.expressions.Identifier]] = False, quoted: Optional[bool] = None, dialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None, **opts): + + + +
+ +
4144def alias_(
+4145    expression: str | Expression,
+4146    alias: str | Identifier,
+4147    table: bool | t.Sequence[str | Identifier] = False,
+4148    quoted: t.Optional[bool] = None,
+4149    dialect: DialectType = None,
+4150    **opts,
+4151):
+4152    """Create an Alias expression.
+4153
+4154    Example:
+4155        >>> alias_('foo', 'bar').sql()
+4156        'foo AS bar'
+4157
+4158        >>> alias_('(select 1, 2)', 'bar', table=['a', 'b']).sql()
+4159        '(SELECT 1, 2) AS bar(a, b)'
+4160
+4161    Args:
+4162        expression: the SQL code strings to parse.
+4163            If an Expression instance is passed, this is used as-is.
+4164        alias: the alias name to use. If the name has
+4165            special characters it is quoted.
+4166        table: Whether or not to create a table alias, can also be a list of columns.
+4167        quoted: whether or not to quote the alias
+4168        dialect: the dialect used to parse the input expression.
+4169        **opts: other options to use to parse the input expressions.
+4170
+4171    Returns:
+4172        Alias: the aliased expression
+4173    """
+4174    exp = maybe_parse(expression, dialect=dialect, **opts)
+4175    alias = to_identifier(alias, quoted=quoted)
+4176
+4177    if table:
+4178        table_alias = TableAlias(this=alias)
+4179        exp.set("alias", table_alias)
+4180
+4181        if not isinstance(table, bool):
+4182            for column in table:
+4183                table_alias.append("columns", to_identifier(column, quoted=quoted))
+4184
+4185        return exp
+4186
+4187    # We don't set the "alias" arg for Window expressions, because that would add an IDENTIFIER node in
+4188    # the AST, representing a "named_window" [1] construct (eg. bigquery). What we want is an ALIAS node
+4189    # for the complete Window expression.
+4190    #
+4191    # [1]: https://cloud.google.com/bigquery/docs/reference/standard-sql/window-function-calls
+4192
+4193    if "alias" in exp.arg_types and not isinstance(exp, Window):
+4194        exp = exp.copy()
+4195        exp.set("alias", alias)
+4196        return exp
+4197    return Alias(this=exp, alias=alias)
+
+ + +

Create an Alias expression.

+ +
Example:
+ +
+
+
>>> alias_('foo', 'bar').sql()
+'foo AS bar'
+
+
+ +
+
>>> alias_('(select 1, 2)', 'bar', table=['a', 'b']).sql()
+'(SELECT 1, 2) AS bar(a, b)'
+
+
+
+ +
Arguments:
+ +
    +
  • expression: the SQL code strings to parse. +If an Expression instance is passed, this is used as-is.
  • +
  • alias: the alias name to use. If the name has +special characters it is quoted.
  • +
  • table: Whether or not to create a table alias, can also be a list of columns.
  • +
  • quoted: whether or not to quote the alias
  • +
  • dialect: the dialect used to parse the input expression.
  • +
  • **opts: other options to use to parse the input expressions.
  • +
+ +
Returns:
+ +
+

Alias: the aliased expression

+
+
+ + +
+
+ +
+ + def + subquery(expression, alias=None, dialect=None, **opts): + + + +
+ +
4200def subquery(expression, alias=None, dialect=None, **opts):
+4201    """
+4202    Build a subquery expression.
+4203
+4204    Example:
+4205        >>> subquery('select x from tbl', 'bar').select('x').sql()
+4206        'SELECT x FROM (SELECT x FROM tbl) AS bar'
+4207
+4208    Args:
+4209        expression (str | Expression): the SQL code strings to parse.
+4210            If an Expression instance is passed, this is used as-is.
+4211        alias (str | Expression): the alias name to use.
+4212        dialect (str): the dialect used to parse the input expression.
+4213        **opts: other options to use to parse the input expressions.
+4214
+4215    Returns:
+4216        Select: a new select with the subquery expression included
+4217    """
+4218
+4219    expression = maybe_parse(expression, dialect=dialect, **opts).subquery(alias)
+4220    return Select().from_(expression, dialect=dialect, **opts)
+
+ + +

Build a subquery expression.

+ +
Example:
+ +
+
+
>>> subquery('select x from tbl', 'bar').select('x').sql()
+'SELECT x FROM (SELECT x FROM tbl) AS bar'
+
+
+
+ +
Arguments:
+ +
    +
  • expression (str | Expression): the SQL code strings to parse. +If an Expression instance is passed, this is used as-is.
  • +
  • alias (str | Expression): the alias name to use.
  • +
  • dialect (str): the dialect used to parse the input expression.
  • +
  • **opts: other options to use to parse the input expressions.
  • +
+ +
Returns:
+ +
+

Select: a new select with the subquery expression included

+
+
+ + +
+
+ +
+ + def + column(col, table=None, quoted=None) -> sqlglot.expressions.Column: + + + +
+ +
4223def column(col, table=None, quoted=None) -> Column:
+4224    """
+4225    Build a Column.
+4226
+4227    Args:
+4228        col (str | Expression): column name
+4229        table (str | Expression): table name
+4230    Returns:
+4231        Column: column instance
+4232    """
+4233    return Column(
+4234        this=to_identifier(col, quoted=quoted),
+4235        table=to_identifier(table, quoted=quoted),
+4236    )
+
+ + +

Build a Column.

+ +
Arguments:
+ +
    +
  • col (str | Expression): column name
  • +
  • table (str | Expression): table name
  • +
+ +
Returns:
+ +
+

Column: column instance

+
+
+ + +
+
+ +
+ + def + cast( expression: str | sqlglot.expressions.Expression, to: str | sqlglot.expressions.DataType | sqlglot.expressions.DataType.Type, **opts) -> sqlglot.expressions.Cast: + + + +
+ +
4239def cast(expression: str | Expression, to: str | DataType | DataType.Type, **opts) -> Cast:
+4240    """Cast an expression to a data type.
+4241
+4242    Example:
+4243        >>> cast('x + 1', 'int').sql()
+4244        'CAST(x + 1 AS INT)'
+4245
+4246    Args:
+4247        expression: The expression to cast.
+4248        to: The datatype to cast to.
+4249
+4250    Returns:
+4251        A cast node.
+4252    """
+4253    expression = maybe_parse(expression, **opts)
+4254    return Cast(this=expression, to=DataType.build(to, **opts))
+
+ + +

Cast an expression to a data type.

+ +
Example:
+ +
+
+
>>> cast('x + 1', 'int').sql()
+'CAST(x + 1 AS INT)'
+
+
+
+ +
Arguments:
+ +
    +
  • expression: The expression to cast.
  • +
  • to: The datatype to cast to.
  • +
+ +
Returns:
+ +
+

A cast node.

+
+
+ + +
+
+ +
+ + def + table_( table, db=None, catalog=None, quoted=None, alias=None) -> sqlglot.expressions.Table: + + + +
+ +
4257def table_(table, db=None, catalog=None, quoted=None, alias=None) -> Table:
+4258    """Build a Table.
+4259
+4260    Args:
+4261        table (str | Expression): column name
+4262        db (str | Expression): db name
+4263        catalog (str | Expression): catalog name
+4264
+4265    Returns:
+4266        Table: table instance
+4267    """
+4268    return Table(
+4269        this=to_identifier(table, quoted=quoted),
+4270        db=to_identifier(db, quoted=quoted),
+4271        catalog=to_identifier(catalog, quoted=quoted),
+4272        alias=TableAlias(this=to_identifier(alias)) if alias else None,
+4273    )
+
+ + +

Build a Table.

+ +
Arguments:
+ +
    +
  • table (str | Expression): column name
  • +
  • db (str | Expression): db name
  • +
  • catalog (str | Expression): catalog name
  • +
+ +
Returns:
+ +
+

Table: table instance

+
+
+ + +
+
+ +
+ + def + values( values: Iterable[Tuple[Any, ...]], alias: Optional[str] = None, columns: Union[Iterable[str], Dict[str, sqlglot.expressions.DataType], NoneType] = None) -> sqlglot.expressions.Values: + + + +
+ +
4276def values(
+4277    values: t.Iterable[t.Tuple[t.Any, ...]],
+4278    alias: t.Optional[str] = None,
+4279    columns: t.Optional[t.Iterable[str] | t.Dict[str, DataType]] = None,
+4280) -> Values:
+4281    """Build VALUES statement.
+4282
+4283    Example:
+4284        >>> values([(1, '2')]).sql()
+4285        "VALUES (1, '2')"
+4286
+4287    Args:
+4288        values: values statements that will be converted to SQL
+4289        alias: optional alias
+4290        columns: Optional list of ordered column names or ordered dictionary of column names to types.
+4291         If either are provided then an alias is also required.
+4292         If a dictionary is provided then the first column of the values will be casted to the expected type
+4293         in order to help with type inference.
+4294
+4295    Returns:
+4296        Values: the Values expression object
+4297    """
+4298    if columns and not alias:
+4299        raise ValueError("Alias is required when providing columns")
+4300    table_alias = (
+4301        TableAlias(this=to_identifier(alias), columns=[to_identifier(x) for x in columns])
+4302        if columns
+4303        else TableAlias(this=to_identifier(alias) if alias else None)
+4304    )
+4305    expressions = [convert(tup) for tup in values]
+4306    if columns and isinstance(columns, dict):
+4307        types = list(columns.values())
+4308        expressions[0].set(
+4309            "expressions",
+4310            [cast(x, types[i]) for i, x in enumerate(expressions[0].expressions)],
+4311        )
+4312    return Values(
+4313        expressions=expressions,
+4314        alias=table_alias,
+4315    )
+
+ + +

Build VALUES statement.

+ +
Example:
+ +
+
+
>>> values([(1, '2')]).sql()
+"VALUES (1, '2')"
+
+
+
+ +
Arguments:
+ +
    +
  • values: values statements that will be converted to SQL
  • +
  • alias: optional alias
  • +
  • columns: Optional list of ordered column names or ordered dictionary of column names to types. +If either are provided then an alias is also required. +If a dictionary is provided then the first column of the values will be casted to the expected type +in order to help with type inference.
  • +
+ +
Returns:
+ +
+

Values: the Values expression object

+
+
+ + +
+
+ +
+ + def + rename_table( old_name: str | sqlglot.expressions.Table, new_name: str | sqlglot.expressions.Table) -> sqlglot.expressions.AlterTable: + + + +
+ +
4318def rename_table(old_name: str | Table, new_name: str | Table) -> AlterTable:
+4319    """Build ALTER TABLE... RENAME... expression
+4320
+4321    Args:
+4322        old_name: The old name of the table
+4323        new_name: The new name of the table
+4324
+4325    Returns:
+4326        Alter table expression
+4327    """
+4328    old_table = to_table(old_name)
+4329    new_table = to_table(new_name)
+4330    return AlterTable(
+4331        this=old_table,
+4332        actions=[
+4333            RenameTable(this=new_table),
+4334        ],
+4335    )
+
+ + +

Build ALTER TABLE... RENAME... expression

+ +
Arguments:
+ +
    +
  • old_name: The old name of the table
  • +
  • new_name: The new name of the table
  • +
+ +
Returns:
+ +
+

Alter table expression

+
+
+ + +
+
+ +
+ + def + convert(value) -> sqlglot.expressions.Expression: + + + +
+ +
4338def convert(value) -> Expression:
+4339    """Convert a python value into an expression object.
+4340
+4341    Raises an error if a conversion is not possible.
+4342
+4343    Args:
+4344        value (Any): a python object
+4345
+4346    Returns:
+4347        Expression: the equivalent expression object
+4348    """
+4349    if isinstance(value, Expression):
+4350        return value
+4351    if value is None:
+4352        return NULL
+4353    if isinstance(value, bool):
+4354        return Boolean(this=value)
+4355    if isinstance(value, str):
+4356        return Literal.string(value)
+4357    if isinstance(value, float) and math.isnan(value):
+4358        return NULL
+4359    if isinstance(value, numbers.Number):
+4360        return Literal.number(value)
+4361    if isinstance(value, tuple):
+4362        return Tuple(expressions=[convert(v) for v in value])
+4363    if isinstance(value, list):
+4364        return Array(expressions=[convert(v) for v in value])
+4365    if isinstance(value, dict):
+4366        return Map(
+4367            keys=[convert(k) for k in value],
+4368            values=[convert(v) for v in value.values()],
+4369        )
+4370    if isinstance(value, datetime.datetime):
+4371        datetime_literal = Literal.string(
+4372            (value if value.tzinfo else value.replace(tzinfo=datetime.timezone.utc)).isoformat()
+4373        )
+4374        return TimeStrToTime(this=datetime_literal)
+4375    if isinstance(value, datetime.date):
+4376        date_literal = Literal.string(value.strftime("%Y-%m-%d"))
+4377        return DateStrToDate(this=date_literal)
+4378    raise ValueError(f"Cannot convert {value}")
+
+ + +

Convert a python value into an expression object.

+ +

Raises an error if a conversion is not possible.

+ +
Arguments:
+ +
    +
  • value (Any): a python object
  • +
+ +
Returns:
+ +
+

Expression: the equivalent expression object

+
+
+ + +
+
+ +
+ + def + replace_children(expression, fun): + + + +
+ +
4381def replace_children(expression, fun):
+4382    """
+4383    Replace children of an expression with the result of a lambda fun(child) -> exp.
+4384    """
+4385    for k, v in expression.args.items():
+4386        is_list_arg = isinstance(v, list)
+4387
+4388        child_nodes = v if is_list_arg else [v]
+4389        new_child_nodes = []
+4390
+4391        for cn in child_nodes:
+4392            if isinstance(cn, Expression):
+4393                for child_node in ensure_collection(fun(cn)):
+4394                    new_child_nodes.append(child_node)
+4395                    child_node.parent = expression
+4396                    child_node.arg_key = k
+4397            else:
+4398                new_child_nodes.append(cn)
+4399
+4400        expression.args[k] = new_child_nodes if is_list_arg else seq_get(new_child_nodes, 0)
+
+ + +

Replace children of an expression with the result of a lambda fun(child) -> exp.

+
+ + +
+
+ +
+ + def + column_table_names(expression): + + + +
+ +
4403def column_table_names(expression):
+4404    """
+4405    Return all table names referenced through columns in an expression.
+4406
+4407    Example:
+4408        >>> import sqlglot
+4409        >>> column_table_names(sqlglot.parse_one("a.b AND c.d AND c.e"))
+4410        ['c', 'a']
+4411
+4412    Args:
+4413        expression (sqlglot.Expression): expression to find table names
+4414
+4415    Returns:
+4416        list: A list of unique names
+4417    """
+4418    return list(dict.fromkeys(column.table for column in expression.find_all(Column)))
+
+ + +

Return all table names referenced through columns in an expression.

+ +
Example:
+ +
+
+
>>> import sqlglot
+>>> column_table_names(sqlglot.parse_one("a.b AND c.d AND c.e"))
+['c', 'a']
+
+
+
+ +
Arguments:
+ +
    +
  • expression (sqlglot.Expression): expression to find table names
  • +
+ +
Returns:
+ +
+

list: A list of unique names

+
+
+ + +
+
+ +
+ + def + table_name(table) -> str: + + + +
+ +
4421def table_name(table) -> str:
+4422    """Get the full name of a table as a string.
+4423
+4424    Args:
+4425        table (exp.Table | str): table expression node or string.
+4426
+4427    Examples:
+4428        >>> from sqlglot import exp, parse_one
+4429        >>> table_name(parse_one("select * from a.b.c").find(exp.Table))
+4430        'a.b.c'
+4431
+4432    Returns:
+4433        The table name.
+4434    """
+4435
+4436    table = maybe_parse(table, into=Table)
+4437
+4438    if not table:
+4439        raise ValueError(f"Cannot parse {table}")
+4440
+4441    return ".".join(
+4442        part
+4443        for part in (
+4444            table.text("catalog"),
+4445            table.text("db"),
+4446            table.name,
+4447        )
+4448        if part
+4449    )
+
+ + +

Get the full name of a table as a string.

+ +
Arguments:
+ +
    +
  • table (exp.Table | str): table expression node or string.
  • +
+ +
Examples:
+ +
+
+
>>> from sqlglot import exp, parse_one
+>>> table_name(parse_one("select * from a.b.c").find(exp.Table))
+'a.b.c'
+
+
+
+ +
Returns:
+ +
+

The table name.

+
+
+ + +
+
+ +
+ + def + replace_tables(expression, mapping): + + + +
+ +
4452def replace_tables(expression, mapping):
+4453    """Replace all tables in expression according to the mapping.
+4454
+4455    Args:
+4456        expression (sqlglot.Expression): expression node to be transformed and replaced.
+4457        mapping (Dict[str, str]): mapping of table names.
+4458
+4459    Examples:
+4460        >>> from sqlglot import exp, parse_one
+4461        >>> replace_tables(parse_one("select * from a.b"), {"a.b": "c"}).sql()
+4462        'SELECT * FROM c'
+4463
+4464    Returns:
+4465        The mapped expression.
+4466    """
+4467
+4468    def _replace_tables(node):
+4469        if isinstance(node, Table):
+4470            new_name = mapping.get(table_name(node))
+4471            if new_name:
+4472                return to_table(
+4473                    new_name,
+4474                    **{k: v for k, v in node.args.items() if k not in ("this", "db", "catalog")},
+4475                )
+4476        return node
+4477
+4478    return expression.transform(_replace_tables)
+
+ + +

Replace all tables in expression according to the mapping.

+ +
Arguments:
+ +
    +
  • expression (sqlglot.Expression): expression node to be transformed and replaced.
  • +
  • mapping (Dict[str, str]): mapping of table names.
  • +
+ +
Examples:
+ +
+
+
>>> from sqlglot import exp, parse_one
+>>> replace_tables(parse_one("select * from a.b"), {"a.b": "c"}).sql()
+'SELECT * FROM c'
+
+
+
+ +
Returns:
+ +
+

The mapped expression.

+
+
+ + +
+
+ +
+ + def + replace_placeholders(expression, *args, **kwargs): + + + +
+ +
4481def replace_placeholders(expression, *args, **kwargs):
+4482    """Replace placeholders in an expression.
+4483
+4484    Args:
+4485        expression (sqlglot.Expression): expression node to be transformed and replaced.
+4486        args: positional names that will substitute unnamed placeholders in the given order.
+4487        kwargs: keyword arguments that will substitute named placeholders.
+4488
+4489    Examples:
+4490        >>> from sqlglot import exp, parse_one
+4491        >>> replace_placeholders(
+4492        ...     parse_one("select * from :tbl where ? = ?"), "a", "b", tbl="foo"
+4493        ... ).sql()
+4494        'SELECT * FROM foo WHERE a = b'
+4495
+4496    Returns:
+4497        The mapped expression.
+4498    """
+4499
+4500    def _replace_placeholders(node, args, **kwargs):
+4501        if isinstance(node, Placeholder):
+4502            if node.name:
+4503                new_name = kwargs.get(node.name)
+4504                if new_name:
+4505                    return to_identifier(new_name)
+4506            else:
+4507                try:
+4508                    return to_identifier(next(args))
+4509                except StopIteration:
+4510                    pass
+4511        return node
+4512
+4513    return expression.transform(_replace_placeholders, iter(args), **kwargs)
+
+ + +

Replace placeholders in an expression.

+ +
Arguments:
+ +
    +
  • expression (sqlglot.Expression): expression node to be transformed and replaced.
  • +
  • args: positional names that will substitute unnamed placeholders in the given order.
  • +
  • kwargs: keyword arguments that will substitute named placeholders.
  • +
+ +
Examples:
+ +
+
+
>>> from sqlglot import exp, parse_one
+>>> replace_placeholders(
+...     parse_one("select * from :tbl where ? = ?"), "a", "b", tbl="foo"
+... ).sql()
+'SELECT * FROM foo WHERE a = b'
+
+
+
+ +
Returns:
+ +
+

The mapped expression.

+
+
+ + +
+
+ +
+ + def + expand( expression: sqlglot.expressions.Expression, sources: Dict[str, sqlglot.expressions.Subqueryable], copy=True) -> sqlglot.expressions.Expression: + + + +
+ +
4516def expand(expression: Expression, sources: t.Dict[str, Subqueryable], copy=True) -> Expression:
+4517    """Transforms an expression by expanding all referenced sources into subqueries.
+4518
+4519    Examples:
+4520        >>> from sqlglot import parse_one
+4521        >>> expand(parse_one("select * from x AS z"), {"x": parse_one("select * from y")}).sql()
+4522        'SELECT * FROM (SELECT * FROM y) AS z /* source: x */'
+4523
+4524    Args:
+4525        expression: The expression to expand.
+4526        sources: A dictionary of name to Subqueryables.
+4527        copy: Whether or not to copy the expression during transformation. Defaults to True.
+4528
+4529    Returns:
+4530        The transformed expression.
+4531    """
+4532
+4533    def _expand(node: Expression):
+4534        if isinstance(node, Table):
+4535            name = table_name(node)
+4536            source = sources.get(name)
+4537            if source:
+4538                subquery = source.subquery(node.alias or name)
+4539                subquery.comments = [f"source: {name}"]
+4540                return subquery
+4541        return node
+4542
+4543    return expression.transform(_expand, copy=copy)
+
+ + +

Transforms an expression by expanding all referenced sources into subqueries.

+ +
Examples:
+ +
+
+
>>> from sqlglot import parse_one
+>>> expand(parse_one("select * from x AS z"), {"x": parse_one("select * from y")}).sql()
+'SELECT * FROM (SELECT * FROM y) AS z /* source: x */'
+
+
+
+ +
Arguments:
+ +
    +
  • expression: The expression to expand.
  • +
  • sources: A dictionary of name to Subqueryables.
  • +
  • copy: Whether or not to copy the expression during transformation. Defaults to True.
  • +
+ +
Returns:
+ +
+

The transformed expression.

+
+
+ + +
+
+ +
+ + def + func( name: str, *args, dialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None, **kwargs) -> sqlglot.expressions.Func: + + + +
+ +
4546def func(name: str, *args, dialect: DialectType = None, **kwargs) -> Func:
+4547    """
+4548    Returns a Func expression.
+4549
+4550    Examples:
+4551        >>> func("abs", 5).sql()
+4552        'ABS(5)'
+4553
+4554        >>> func("cast", this=5, to=DataType.build("DOUBLE")).sql()
+4555        'CAST(5 AS DOUBLE)'
+4556
+4557    Args:
+4558        name: the name of the function to build.
+4559        args: the args used to instantiate the function of interest.
+4560        dialect: the source dialect.
+4561        kwargs: the kwargs used to instantiate the function of interest.
+4562
+4563    Note:
+4564        The arguments `args` and `kwargs` are mutually exclusive.
+4565
+4566    Returns:
+4567        An instance of the function of interest, or an anonymous function, if `name` doesn't
+4568        correspond to an existing `sqlglot.expressions.Func` class.
+4569    """
+4570    if args and kwargs:
+4571        raise ValueError("Can't use both args and kwargs to instantiate a function.")
+4572
+4573    from sqlglot.dialects.dialect import Dialect
+4574
+4575    args = tuple(convert(arg) for arg in args)
+4576    kwargs = {key: convert(value) for key, value in kwargs.items()}
+4577
+4578    parser = Dialect.get_or_raise(dialect)().parser()
+4579    from_args_list = parser.FUNCTIONS.get(name.upper())
+4580
+4581    if from_args_list:
+4582        function = from_args_list(args) if args else from_args_list.__self__(**kwargs)  # type: ignore
+4583    else:
+4584        kwargs = kwargs or {"expressions": args}
+4585        function = Anonymous(this=name, **kwargs)
+4586
+4587    for error_message in function.error_messages(args):
+4588        raise ValueError(error_message)
+4589
+4590    return function
+
+ + +

Returns a Func expression.

+ +
Examples:
+ +
+
+
>>> func("abs", 5).sql()
+'ABS(5)'
+
+
+ +
+
>>> func("cast", this=5, to=DataType.build("DOUBLE")).sql()
+'CAST(5 AS DOUBLE)'
+
+
+
+ +
Arguments:
+ +
    +
  • name: the name of the function to build.
  • +
  • args: the args used to instantiate the function of interest.
  • +
  • dialect: the source dialect.
  • +
  • kwargs: the kwargs used to instantiate the function of interest.
  • +
+ +
Note:
+ +
+

The arguments args and kwargs are mutually exclusive.

+
+ +
Returns:
+ +
+

An instance of the function of interest, or an anonymous function, if name doesn't + correspond to an existing sqlglot.expressions.Func class.

+
+
+ + +
+
+ +
+ + def + true(): + + + +
+ +
4593def true():
+4594    """
+4595    Returns a true Boolean expression.
+4596    """
+4597    return Boolean(this=True)
+
+ + +

Returns a true Boolean expression.

+
+ + +
+
+ +
+ + def + false(): + + + +
+ +
4600def false():
+4601    """
+4602    Returns a false Boolean expression.
+4603    """
+4604    return Boolean(this=False)
+
+ + +

Returns a false Boolean expression.

+
+ + +
+
+ +
+ + def + null(): + + + +
+ +
4607def null():
+4608    """
+4609    Returns a Null expression.
+4610    """
+4611    return Null()
+
+ + +

Returns a Null expression.

+
+ + +
+
+ + \ No newline at end of file diff --git a/docs/sqlglot/generator.html b/docs/sqlglot/generator.html new file mode 100644 index 0000000..76dbbc0 --- /dev/null +++ b/docs/sqlglot/generator.html @@ -0,0 +1,9855 @@ + + + + + + + sqlglot.generator API documentation + + + + + + + + + +
+
+ Edit on GitHub +

+sqlglot.generator

+ + + + + + +
   1from __future__ import annotations
+   2
+   3import logging
+   4import re
+   5import typing as t
+   6
+   7from sqlglot import exp
+   8from sqlglot.errors import ErrorLevel, UnsupportedError, concat_messages
+   9from sqlglot.helper import apply_index_offset, csv
+  10from sqlglot.time import format_time
+  11from sqlglot.tokens import TokenType
+  12
+  13logger = logging.getLogger("sqlglot")
+  14
+  15BACKSLASH_RE = re.compile(r"\\(?!b|f|n|r|t|0)")
+  16
+  17
+  18class Generator:
+  19    """
+  20    Generator interprets the given syntax tree and produces a SQL string as an output.
+  21
+  22    Args:
+  23        time_mapping (dict): the dictionary of custom time mappings in which the key
+  24            represents a python time format and the output the target time format
+  25        time_trie (trie): a trie of the time_mapping keys
+  26        pretty (bool): if set to True the returned string will be formatted. Default: False.
+  27        quote_start (str): specifies which starting character to use to delimit quotes. Default: '.
+  28        quote_end (str): specifies which ending character to use to delimit quotes. Default: '.
+  29        identifier_start (str): specifies which starting character to use to delimit identifiers. Default: ".
+  30        identifier_end (str): specifies which ending character to use to delimit identifiers. Default: ".
+  31        identify (bool): if set to True all identifiers will be delimited by the corresponding
+  32            character.
+  33        normalize (bool): if set to True all identifiers will lower cased
+  34        string_escape (str): specifies a string escape character. Default: '.
+  35        identifier_escape (str): specifies an identifier escape character. Default: ".
+  36        pad (int): determines padding in a formatted string. Default: 2.
+  37        indent (int): determines the size of indentation in a formatted string. Default: 4.
+  38        unnest_column_only (bool): if true unnest table aliases are considered only as column aliases
+  39        normalize_functions (str): normalize function names, "upper", "lower", or None
+  40            Default: "upper"
+  41        alias_post_tablesample (bool): if the table alias comes after tablesample
+  42            Default: False
+  43        unsupported_level (ErrorLevel): determines the generator's behavior when it encounters
+  44            unsupported expressions. Default ErrorLevel.WARN.
+  45        null_ordering (str): Indicates the default null ordering method to use if not explicitly set.
+  46            Options are "nulls_are_small", "nulls_are_large", "nulls_are_last".
+  47            Default: "nulls_are_small"
+  48        max_unsupported (int): Maximum number of unsupported messages to include in a raised UnsupportedError.
+  49            This is only relevant if unsupported_level is ErrorLevel.RAISE.
+  50            Default: 3
+  51        leading_comma (bool): if the the comma is leading or trailing in select statements
+  52            Default: False
+  53        max_text_width: The max number of characters in a segment before creating new lines in pretty mode.
+  54            The default is on the smaller end because the length only represents a segment and not the true
+  55            line length.
+  56            Default: 80
+  57        comments: Whether or not to preserve comments in the output SQL code.
+  58            Default: True
+  59    """
+  60
+  61    TRANSFORMS = {
+  62        exp.DateAdd: lambda self, e: f"DATE_ADD({self.format_args(e.this, e.expression, e.args.get('unit'))})",
+  63        exp.DateDiff: lambda self, e: f"DATEDIFF({self.format_args(e.this, e.expression)})",
+  64        exp.TsOrDsAdd: lambda self, e: f"TS_OR_DS_ADD({self.format_args(e.this, e.expression, e.args.get('unit'))})",
+  65        exp.VarMap: lambda self, e: f"MAP({self.format_args(e.args['keys'], e.args['values'])})",
+  66        exp.CharacterSetProperty: lambda self, e: f"{'DEFAULT ' if e.args['default'] else ''}CHARACTER SET={self.sql(e, 'this')}",
+  67        exp.LanguageProperty: lambda self, e: self.naked_property(e),
+  68        exp.LocationProperty: lambda self, e: self.naked_property(e),
+  69        exp.ReturnsProperty: lambda self, e: self.naked_property(e),
+  70        exp.ExecuteAsProperty: lambda self, e: self.naked_property(e),
+  71        exp.VolatilityProperty: lambda self, e: e.name,
+  72        exp.WithJournalTableProperty: lambda self, e: f"WITH JOURNAL TABLE={self.sql(e, 'this')}",
+  73        exp.LogProperty: lambda self, e: f"{'NO ' if e.args.get('no') else ''}LOG",
+  74        exp.SqlSecurityProperty: lambda self, e: f"SQL SECURITY {'DEFINER' if e.args.get('definer') else 'INVOKER'}",
+  75    }
+  76
+  77    # Whether 'CREATE ... TRANSIENT ... TABLE' is allowed
+  78    CREATE_TRANSIENT = False
+  79
+  80    # Whether or not null ordering is supported in order by
+  81    NULL_ORDERING_SUPPORTED = True
+  82
+  83    # Whether or not locking reads (i.e. SELECT ... FOR UPDATE/SHARE) are supported
+  84    LOCKING_READS_SUPPORTED = False
+  85
+  86    # Always do union distinct or union all
+  87    EXPLICIT_UNION = False
+  88
+  89    # Wrap derived values in parens, usually standard but spark doesn't support it
+  90    WRAP_DERIVED_VALUES = True
+  91
+  92    # Whether or not create function uses an AS before the def.
+  93    CREATE_FUNCTION_AS = True
+  94
+  95    TYPE_MAPPING = {
+  96        exp.DataType.Type.NCHAR: "CHAR",
+  97        exp.DataType.Type.NVARCHAR: "VARCHAR",
+  98        exp.DataType.Type.MEDIUMTEXT: "TEXT",
+  99        exp.DataType.Type.LONGTEXT: "TEXT",
+ 100        exp.DataType.Type.MEDIUMBLOB: "BLOB",
+ 101        exp.DataType.Type.LONGBLOB: "BLOB",
+ 102    }
+ 103
+ 104    STAR_MAPPING = {
+ 105        "except": "EXCEPT",
+ 106        "replace": "REPLACE",
+ 107    }
+ 108
+ 109    TOKEN_MAPPING: t.Dict[TokenType, str] = {}
+ 110
+ 111    STRUCT_DELIMITER = ("<", ">")
+ 112
+ 113    PROPERTIES_LOCATION = {
+ 114        exp.AfterJournalProperty: exp.Properties.Location.PRE_SCHEMA,
+ 115        exp.AlgorithmProperty: exp.Properties.Location.POST_CREATE,
+ 116        exp.AutoIncrementProperty: exp.Properties.Location.POST_SCHEMA_ROOT,
+ 117        exp.BlockCompressionProperty: exp.Properties.Location.PRE_SCHEMA,
+ 118        exp.CharacterSetProperty: exp.Properties.Location.POST_SCHEMA_ROOT,
+ 119        exp.ChecksumProperty: exp.Properties.Location.PRE_SCHEMA,
+ 120        exp.CollateProperty: exp.Properties.Location.POST_SCHEMA_ROOT,
+ 121        exp.DataBlocksizeProperty: exp.Properties.Location.PRE_SCHEMA,
+ 122        exp.DefinerProperty: exp.Properties.Location.POST_CREATE,
+ 123        exp.DistKeyProperty: exp.Properties.Location.POST_SCHEMA_ROOT,
+ 124        exp.DistStyleProperty: exp.Properties.Location.POST_SCHEMA_ROOT,
+ 125        exp.EngineProperty: exp.Properties.Location.POST_SCHEMA_ROOT,
+ 126        exp.ExecuteAsProperty: exp.Properties.Location.POST_SCHEMA_ROOT,
+ 127        exp.FallbackProperty: exp.Properties.Location.PRE_SCHEMA,
+ 128        exp.FileFormatProperty: exp.Properties.Location.POST_SCHEMA_WITH,
+ 129        exp.FreespaceProperty: exp.Properties.Location.PRE_SCHEMA,
+ 130        exp.IsolatedLoadingProperty: exp.Properties.Location.PRE_SCHEMA,
+ 131        exp.JournalProperty: exp.Properties.Location.PRE_SCHEMA,
+ 132        exp.LanguageProperty: exp.Properties.Location.POST_SCHEMA_ROOT,
+ 133        exp.LikeProperty: exp.Properties.Location.POST_SCHEMA_ROOT,
+ 134        exp.LocationProperty: exp.Properties.Location.POST_SCHEMA_ROOT,
+ 135        exp.LogProperty: exp.Properties.Location.PRE_SCHEMA,
+ 136        exp.MergeBlockRatioProperty: exp.Properties.Location.PRE_SCHEMA,
+ 137        exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA_WITH,
+ 138        exp.Property: exp.Properties.Location.POST_SCHEMA_WITH,
+ 139        exp.ReturnsProperty: exp.Properties.Location.POST_SCHEMA_ROOT,
+ 140        exp.RowFormatDelimitedProperty: exp.Properties.Location.POST_SCHEMA_ROOT,
+ 141        exp.RowFormatSerdeProperty: exp.Properties.Location.POST_SCHEMA_ROOT,
+ 142        exp.SchemaCommentProperty: exp.Properties.Location.POST_SCHEMA_ROOT,
+ 143        exp.SerdeProperties: exp.Properties.Location.POST_SCHEMA_ROOT,
+ 144        exp.SortKeyProperty: exp.Properties.Location.POST_SCHEMA_ROOT,
+ 145        exp.SqlSecurityProperty: exp.Properties.Location.POST_CREATE,
+ 146        exp.TableFormatProperty: exp.Properties.Location.POST_SCHEMA_WITH,
+ 147        exp.VolatilityProperty: exp.Properties.Location.POST_SCHEMA_ROOT,
+ 148        exp.WithJournalTableProperty: exp.Properties.Location.PRE_SCHEMA,
+ 149    }
+ 150
+ 151    WITH_SEPARATED_COMMENTS = (exp.Select, exp.From, exp.Where, exp.Binary)
+ 152    SENTINEL_LINE_BREAK = "__SQLGLOT__LB__"
+ 153
+ 154    __slots__ = (
+ 155        "time_mapping",
+ 156        "time_trie",
+ 157        "pretty",
+ 158        "quote_start",
+ 159        "quote_end",
+ 160        "identifier_start",
+ 161        "identifier_end",
+ 162        "identify",
+ 163        "normalize",
+ 164        "string_escape",
+ 165        "identifier_escape",
+ 166        "pad",
+ 167        "index_offset",
+ 168        "unnest_column_only",
+ 169        "alias_post_tablesample",
+ 170        "normalize_functions",
+ 171        "unsupported_level",
+ 172        "unsupported_messages",
+ 173        "null_ordering",
+ 174        "max_unsupported",
+ 175        "_indent",
+ 176        "_replace_backslash",
+ 177        "_escaped_quote_end",
+ 178        "_escaped_identifier_end",
+ 179        "_leading_comma",
+ 180        "_max_text_width",
+ 181        "_comments",
+ 182    )
+ 183
+ 184    def __init__(
+ 185        self,
+ 186        time_mapping=None,
+ 187        time_trie=None,
+ 188        pretty=None,
+ 189        quote_start=None,
+ 190        quote_end=None,
+ 191        identifier_start=None,
+ 192        identifier_end=None,
+ 193        identify=False,
+ 194        normalize=False,
+ 195        string_escape=None,
+ 196        identifier_escape=None,
+ 197        pad=2,
+ 198        indent=2,
+ 199        index_offset=0,
+ 200        unnest_column_only=False,
+ 201        alias_post_tablesample=False,
+ 202        normalize_functions="upper",
+ 203        unsupported_level=ErrorLevel.WARN,
+ 204        null_ordering=None,
+ 205        max_unsupported=3,
+ 206        leading_comma=False,
+ 207        max_text_width=80,
+ 208        comments=True,
+ 209    ):
+ 210        import sqlglot
+ 211
+ 212        self.time_mapping = time_mapping or {}
+ 213        self.time_trie = time_trie
+ 214        self.pretty = pretty if pretty is not None else sqlglot.pretty
+ 215        self.quote_start = quote_start or "'"
+ 216        self.quote_end = quote_end or "'"
+ 217        self.identifier_start = identifier_start or '"'
+ 218        self.identifier_end = identifier_end or '"'
+ 219        self.identify = identify
+ 220        self.normalize = normalize
+ 221        self.string_escape = string_escape or "'"
+ 222        self.identifier_escape = identifier_escape or '"'
+ 223        self.pad = pad
+ 224        self.index_offset = index_offset
+ 225        self.unnest_column_only = unnest_column_only
+ 226        self.alias_post_tablesample = alias_post_tablesample
+ 227        self.normalize_functions = normalize_functions
+ 228        self.unsupported_level = unsupported_level
+ 229        self.unsupported_messages = []
+ 230        self.max_unsupported = max_unsupported
+ 231        self.null_ordering = null_ordering
+ 232        self._indent = indent
+ 233        self._replace_backslash = self.string_escape == "\\"
+ 234        self._escaped_quote_end = self.string_escape + self.quote_end
+ 235        self._escaped_identifier_end = self.identifier_escape + self.identifier_end
+ 236        self._leading_comma = leading_comma
+ 237        self._max_text_width = max_text_width
+ 238        self._comments = comments
+ 239
+ 240    def generate(self, expression: t.Optional[exp.Expression]) -> str:
+ 241        """
+ 242        Generates a SQL string by interpreting the given syntax tree.
+ 243
+ 244        Args
+ 245            expression: the syntax tree.
+ 246
+ 247        Returns
+ 248            the SQL string.
+ 249        """
+ 250        self.unsupported_messages = []
+ 251        sql = self.sql(expression).strip()
+ 252
+ 253        if self.unsupported_level == ErrorLevel.IGNORE:
+ 254            return sql
+ 255
+ 256        if self.unsupported_level == ErrorLevel.WARN:
+ 257            for msg in self.unsupported_messages:
+ 258                logger.warning(msg)
+ 259        elif self.unsupported_level == ErrorLevel.RAISE and self.unsupported_messages:
+ 260            raise UnsupportedError(concat_messages(self.unsupported_messages, self.max_unsupported))
+ 261
+ 262        if self.pretty:
+ 263            sql = sql.replace(self.SENTINEL_LINE_BREAK, "\n")
+ 264        return sql
+ 265
+ 266    def unsupported(self, message: str) -> None:
+ 267        if self.unsupported_level == ErrorLevel.IMMEDIATE:
+ 268            raise UnsupportedError(message)
+ 269        self.unsupported_messages.append(message)
+ 270
+ 271    def sep(self, sep: str = " ") -> str:
+ 272        return f"{sep.strip()}\n" if self.pretty else sep
+ 273
+ 274    def seg(self, sql: str, sep: str = " ") -> str:
+ 275        return f"{self.sep(sep)}{sql}"
+ 276
+ 277    def pad_comment(self, comment: str) -> str:
+ 278        comment = " " + comment if comment[0].strip() else comment
+ 279        comment = comment + " " if comment[-1].strip() else comment
+ 280        return comment
+ 281
+ 282    def maybe_comment(self, sql: str, expression: exp.Expression) -> str:
+ 283        comments = expression.comments if self._comments else None
+ 284
+ 285        if not comments:
+ 286            return sql
+ 287
+ 288        sep = "\n" if self.pretty else " "
+ 289        comments_sql = sep.join(
+ 290            f"/*{self.pad_comment(comment)}*/" for comment in comments if comment
+ 291        )
+ 292
+ 293        if not comments_sql:
+ 294            return sql
+ 295
+ 296        if isinstance(expression, self.WITH_SEPARATED_COMMENTS):
+ 297            return f"{comments_sql}{self.sep()}{sql}"
+ 298
+ 299        return f"{sql} {comments_sql}"
+ 300
+ 301    def wrap(self, expression: exp.Expression | str) -> str:
+ 302        this_sql = self.indent(
+ 303            self.sql(expression)
+ 304            if isinstance(expression, (exp.Select, exp.Union))
+ 305            else self.sql(expression, "this"),
+ 306            level=1,
+ 307            pad=0,
+ 308        )
+ 309        return f"({self.sep('')}{this_sql}{self.seg(')', sep='')}"
+ 310
+ 311    def no_identify(self, func: t.Callable[..., str], *args, **kwargs) -> str:
+ 312        original = self.identify
+ 313        self.identify = False
+ 314        result = func(*args, **kwargs)
+ 315        self.identify = original
+ 316        return result
+ 317
+ 318    def normalize_func(self, name: str) -> str:
+ 319        if self.normalize_functions == "upper":
+ 320            return name.upper()
+ 321        if self.normalize_functions == "lower":
+ 322            return name.lower()
+ 323        return name
+ 324
+ 325    def indent(
+ 326        self,
+ 327        sql: str,
+ 328        level: int = 0,
+ 329        pad: t.Optional[int] = None,
+ 330        skip_first: bool = False,
+ 331        skip_last: bool = False,
+ 332    ) -> str:
+ 333        if not self.pretty:
+ 334            return sql
+ 335
+ 336        pad = self.pad if pad is None else pad
+ 337        lines = sql.split("\n")
+ 338
+ 339        return "\n".join(
+ 340            line
+ 341            if (skip_first and i == 0) or (skip_last and i == len(lines) - 1)
+ 342            else f"{' ' * (level * self._indent + pad)}{line}"
+ 343            for i, line in enumerate(lines)
+ 344        )
+ 345
+ 346    def sql(
+ 347        self,
+ 348        expression: t.Optional[str | exp.Expression],
+ 349        key: t.Optional[str] = None,
+ 350        comment: bool = True,
+ 351    ) -> str:
+ 352        if not expression:
+ 353            return ""
+ 354
+ 355        if isinstance(expression, str):
+ 356            return expression
+ 357
+ 358        if key:
+ 359            return self.sql(expression.args.get(key))
+ 360
+ 361        transform = self.TRANSFORMS.get(expression.__class__)
+ 362
+ 363        if callable(transform):
+ 364            sql = transform(self, expression)
+ 365        elif transform:
+ 366            sql = transform
+ 367        elif isinstance(expression, exp.Expression):
+ 368            exp_handler_name = f"{expression.key}_sql"
+ 369
+ 370            if hasattr(self, exp_handler_name):
+ 371                sql = getattr(self, exp_handler_name)(expression)
+ 372            elif isinstance(expression, exp.Func):
+ 373                sql = self.function_fallback_sql(expression)
+ 374            elif isinstance(expression, exp.Property):
+ 375                sql = self.property_sql(expression)
+ 376            else:
+ 377                raise ValueError(f"Unsupported expression type {expression.__class__.__name__}")
+ 378        else:
+ 379            raise ValueError(f"Expected an Expression. Received {type(expression)}: {expression}")
+ 380
+ 381        return self.maybe_comment(sql, expression) if self._comments and comment else sql
+ 382
+ 383    def uncache_sql(self, expression: exp.Uncache) -> str:
+ 384        table = self.sql(expression, "this")
+ 385        exists_sql = " IF EXISTS" if expression.args.get("exists") else ""
+ 386        return f"UNCACHE TABLE{exists_sql} {table}"
+ 387
+ 388    def cache_sql(self, expression: exp.Cache) -> str:
+ 389        lazy = " LAZY" if expression.args.get("lazy") else ""
+ 390        table = self.sql(expression, "this")
+ 391        options = expression.args.get("options")
+ 392        options = f" OPTIONS({self.sql(options[0])} = {self.sql(options[1])})" if options else ""
+ 393        sql = self.sql(expression, "expression")
+ 394        sql = f" AS{self.sep()}{sql}" if sql else ""
+ 395        sql = f"CACHE{lazy} TABLE {table}{options}{sql}"
+ 396        return self.prepend_ctes(expression, sql)
+ 397
+ 398    def characterset_sql(self, expression: exp.CharacterSet) -> str:
+ 399        if isinstance(expression.parent, exp.Cast):
+ 400            return f"CHAR CHARACTER SET {self.sql(expression, 'this')}"
+ 401        default = "DEFAULT " if expression.args.get("default") else ""
+ 402        return f"{default}CHARACTER SET={self.sql(expression, 'this')}"
+ 403
+ 404    def column_sql(self, expression: exp.Column) -> str:
+ 405        return ".".join(
+ 406            part
+ 407            for part in [
+ 408                self.sql(expression, "db"),
+ 409                self.sql(expression, "table"),
+ 410                self.sql(expression, "this"),
+ 411            ]
+ 412            if part
+ 413        )
+ 414
+ 415    def columndef_sql(self, expression: exp.ColumnDef) -> str:
+ 416        column = self.sql(expression, "this")
+ 417        kind = self.sql(expression, "kind")
+ 418        constraints = self.expressions(expression, key="constraints", sep=" ", flat=True)
+ 419        exists = "IF NOT EXISTS " if expression.args.get("exists") else ""
+ 420        kind = f" {kind}" if kind else ""
+ 421        constraints = f" {constraints}" if constraints else ""
+ 422
+ 423        return f"{exists}{column}{kind}{constraints}"
+ 424
+ 425    def columnconstraint_sql(self, expression: exp.ColumnConstraint) -> str:
+ 426        this = self.sql(expression, "this")
+ 427        kind_sql = self.sql(expression, "kind")
+ 428        return f"CONSTRAINT {this} {kind_sql}" if this else kind_sql
+ 429
+ 430    def autoincrementcolumnconstraint_sql(self, _) -> str:
+ 431        return self.token_sql(TokenType.AUTO_INCREMENT)
+ 432
+ 433    def checkcolumnconstraint_sql(self, expression: exp.CheckColumnConstraint) -> str:
+ 434        this = self.sql(expression, "this")
+ 435        return f"CHECK ({this})"
+ 436
+ 437    def commentcolumnconstraint_sql(self, expression: exp.CommentColumnConstraint) -> str:
+ 438        comment = self.sql(expression, "this")
+ 439        return f"COMMENT {comment}"
+ 440
+ 441    def collatecolumnconstraint_sql(self, expression: exp.CollateColumnConstraint) -> str:
+ 442        collate = self.sql(expression, "this")
+ 443        return f"COLLATE {collate}"
+ 444
+ 445    def encodecolumnconstraint_sql(self, expression: exp.EncodeColumnConstraint) -> str:
+ 446        encode = self.sql(expression, "this")
+ 447        return f"ENCODE {encode}"
+ 448
+ 449    def defaultcolumnconstraint_sql(self, expression: exp.DefaultColumnConstraint) -> str:
+ 450        default = self.sql(expression, "this")
+ 451        return f"DEFAULT {default}"
+ 452
+ 453    def generatedasidentitycolumnconstraint_sql(
+ 454        self, expression: exp.GeneratedAsIdentityColumnConstraint
+ 455    ) -> str:
+ 456        this = ""
+ 457        if expression.this is not None:
+ 458            this = " ALWAYS " if expression.this else " BY DEFAULT "
+ 459        start = expression.args.get("start")
+ 460        start = f"START WITH {start}" if start else ""
+ 461        increment = expression.args.get("increment")
+ 462        increment = f"INCREMENT BY {increment}" if increment else ""
+ 463        sequence_opts = ""
+ 464        if start or increment:
+ 465            sequence_opts = f"{start} {increment}"
+ 466            sequence_opts = f" ({sequence_opts.strip()})"
+ 467        return f"GENERATED{this}AS IDENTITY{sequence_opts}"
+ 468
+ 469    def notnullcolumnconstraint_sql(self, expression: exp.NotNullColumnConstraint) -> str:
+ 470        return f"{'' if expression.args.get('allow_null') else 'NOT '}NULL"
+ 471
+ 472    def primarykeycolumnconstraint_sql(self, expression: exp.PrimaryKeyColumnConstraint) -> str:
+ 473        desc = expression.args.get("desc")
+ 474        if desc is not None:
+ 475            return f"PRIMARY KEY{' DESC' if desc else ' ASC'}"
+ 476        return f"PRIMARY KEY"
+ 477
+ 478    def uniquecolumnconstraint_sql(self, _) -> str:
+ 479        return "UNIQUE"
+ 480
+ 481    def create_sql(self, expression: exp.Create) -> str:
+ 482        kind = self.sql(expression, "kind").upper()
+ 483        properties = expression.args.get("properties")
+ 484        properties_exp = expression.copy()
+ 485        properties_locs = self.locate_properties(properties) if properties else {}
+ 486        if properties_locs.get(exp.Properties.Location.POST_SCHEMA_ROOT) or properties_locs.get(
+ 487            exp.Properties.Location.POST_SCHEMA_WITH
+ 488        ):
+ 489            properties_exp.set(
+ 490                "properties",
+ 491                exp.Properties(
+ 492                    expressions=[
+ 493                        *properties_locs[exp.Properties.Location.POST_SCHEMA_ROOT],
+ 494                        *properties_locs[exp.Properties.Location.POST_SCHEMA_WITH],
+ 495                    ]
+ 496                ),
+ 497            )
+ 498        if kind == "TABLE" and properties_locs.get(exp.Properties.Location.PRE_SCHEMA):
+ 499            this_name = self.sql(expression.this, "this")
+ 500            this_properties = self.properties(
+ 501                exp.Properties(expressions=properties_locs[exp.Properties.Location.PRE_SCHEMA]),
+ 502                wrapped=False,
+ 503            )
+ 504            this_schema = f"({self.expressions(expression.this)})"
+ 505            this = f"{this_name}, {this_properties} {this_schema}"
+ 506            properties_sql = ""
+ 507        else:
+ 508            this = self.sql(expression, "this")
+ 509            properties_sql = self.sql(properties_exp, "properties")
+ 510        begin = " BEGIN" if expression.args.get("begin") else ""
+ 511        expression_sql = self.sql(expression, "expression")
+ 512        if expression_sql:
+ 513            expression_sql = f"{begin}{self.sep()}{expression_sql}"
+ 514
+ 515            if self.CREATE_FUNCTION_AS or kind != "FUNCTION":
+ 516                expression_sql = f" AS{expression_sql}"
+ 517
+ 518        temporary = " TEMPORARY" if expression.args.get("temporary") else ""
+ 519        transient = (
+ 520            " TRANSIENT" if self.CREATE_TRANSIENT and expression.args.get("transient") else ""
+ 521        )
+ 522        external = " EXTERNAL" if expression.args.get("external") else ""
+ 523        replace = " OR REPLACE" if expression.args.get("replace") else ""
+ 524        exists_sql = " IF NOT EXISTS" if expression.args.get("exists") else ""
+ 525        unique = " UNIQUE" if expression.args.get("unique") else ""
+ 526        materialized = " MATERIALIZED" if expression.args.get("materialized") else ""
+ 527        set_ = " SET" if expression.args.get("set") else ""
+ 528        multiset = " MULTISET" if expression.args.get("multiset") else ""
+ 529        global_temporary = " GLOBAL TEMPORARY" if expression.args.get("global_temporary") else ""
+ 530        volatile = " VOLATILE" if expression.args.get("volatile") else ""
+ 531        data = expression.args.get("data")
+ 532        if data is None:
+ 533            data = ""
+ 534        elif data:
+ 535            data = " WITH DATA"
+ 536        else:
+ 537            data = " WITH NO DATA"
+ 538        statistics = expression.args.get("statistics")
+ 539        if statistics is None:
+ 540            statistics = ""
+ 541        elif statistics:
+ 542            statistics = " AND STATISTICS"
+ 543        else:
+ 544            statistics = " AND NO STATISTICS"
+ 545        no_primary_index = " NO PRIMARY INDEX" if expression.args.get("no_primary_index") else ""
+ 546
+ 547        indexes = expression.args.get("indexes")
+ 548        index_sql = ""
+ 549        if indexes:
+ 550            indexes_sql = []
+ 551            for index in indexes:
+ 552                ind_unique = " UNIQUE" if index.args.get("unique") else ""
+ 553                ind_primary = " PRIMARY" if index.args.get("primary") else ""
+ 554                ind_amp = " AMP" if index.args.get("amp") else ""
+ 555                ind_name = f" {index.name}" if index.name else ""
+ 556                ind_columns = (
+ 557                    f' ({self.expressions(index, key="columns", flat=True)})'
+ 558                    if index.args.get("columns")
+ 559                    else ""
+ 560                )
+ 561                if index.args.get("primary") and properties_locs.get(
+ 562                    exp.Properties.Location.POST_INDEX
+ 563                ):
+ 564                    postindex_props_sql = self.properties(
+ 565                        exp.Properties(
+ 566                            expressions=properties_locs[exp.Properties.Location.POST_INDEX]
+ 567                        ),
+ 568                        wrapped=False,
+ 569                    )
+ 570                    ind_columns = f"{ind_columns} {postindex_props_sql}"
+ 571
+ 572                indexes_sql.append(
+ 573                    f"{ind_unique}{ind_primary}{ind_amp} INDEX{ind_name}{ind_columns}"
+ 574                )
+ 575            index_sql = "".join(indexes_sql)
+ 576
+ 577        postcreate_props_sql = ""
+ 578        if properties_locs.get(exp.Properties.Location.POST_CREATE):
+ 579            postcreate_props_sql = self.properties(
+ 580                exp.Properties(expressions=properties_locs[exp.Properties.Location.POST_CREATE]),
+ 581                sep=" ",
+ 582                prefix=" ",
+ 583                wrapped=False,
+ 584            )
+ 585
+ 586        modifiers = "".join(
+ 587            (
+ 588                replace,
+ 589                temporary,
+ 590                transient,
+ 591                external,
+ 592                unique,
+ 593                materialized,
+ 594                set_,
+ 595                multiset,
+ 596                global_temporary,
+ 597                volatile,
+ 598                postcreate_props_sql,
+ 599            )
+ 600        )
+ 601        no_schema_binding = (
+ 602            " WITH NO SCHEMA BINDING" if expression.args.get("no_schema_binding") else ""
+ 603        )
+ 604
+ 605        post_expression_modifiers = "".join((data, statistics, no_primary_index))
+ 606
+ 607        expression_sql = f"CREATE{modifiers} {kind}{exists_sql} {this}{properties_sql}{expression_sql}{post_expression_modifiers}{index_sql}{no_schema_binding}"
+ 608        return self.prepend_ctes(expression, expression_sql)
+ 609
+ 610    def describe_sql(self, expression: exp.Describe) -> str:
+ 611        return f"DESCRIBE {self.sql(expression, 'this')}"
+ 612
+ 613    def prepend_ctes(self, expression: exp.Expression, sql: str) -> str:
+ 614        with_ = self.sql(expression, "with")
+ 615        if with_:
+ 616            sql = f"{with_}{self.sep()}{sql}"
+ 617        return sql
+ 618
+ 619    def with_sql(self, expression: exp.With) -> str:
+ 620        sql = self.expressions(expression, flat=True)
+ 621        recursive = "RECURSIVE " if expression.args.get("recursive") else ""
+ 622
+ 623        return f"WITH {recursive}{sql}"
+ 624
+ 625    def cte_sql(self, expression: exp.CTE) -> str:
+ 626        alias = self.sql(expression, "alias")
+ 627        return f"{alias} AS {self.wrap(expression)}"
+ 628
+ 629    def tablealias_sql(self, expression: exp.TableAlias) -> str:
+ 630        alias = self.sql(expression, "this")
+ 631        columns = self.expressions(expression, key="columns", flat=True)
+ 632        columns = f"({columns})" if columns else ""
+ 633        return f"{alias}{columns}"
+ 634
+ 635    def bitstring_sql(self, expression: exp.BitString) -> str:
+ 636        return self.sql(expression, "this")
+ 637
+ 638    def hexstring_sql(self, expression: exp.HexString) -> str:
+ 639        return self.sql(expression, "this")
+ 640
+ 641    def datatype_sql(self, expression: exp.DataType) -> str:
+ 642        type_value = expression.this
+ 643        type_sql = self.TYPE_MAPPING.get(type_value, type_value.value)
+ 644        nested = ""
+ 645        interior = self.expressions(expression, flat=True)
+ 646        values = ""
+ 647        if interior:
+ 648            if expression.args.get("nested"):
+ 649                nested = f"{self.STRUCT_DELIMITER[0]}{interior}{self.STRUCT_DELIMITER[1]}"
+ 650                if expression.args.get("values") is not None:
+ 651                    delimiters = ("[", "]") if type_value == exp.DataType.Type.ARRAY else ("(", ")")
+ 652                    values = (
+ 653                        f"{delimiters[0]}{self.expressions(expression, 'values')}{delimiters[1]}"
+ 654                    )
+ 655            else:
+ 656                nested = f"({interior})"
+ 657
+ 658        return f"{type_sql}{nested}{values}"
+ 659
+ 660    def directory_sql(self, expression: exp.Directory) -> str:
+ 661        local = "LOCAL " if expression.args.get("local") else ""
+ 662        row_format = self.sql(expression, "row_format")
+ 663        row_format = f" {row_format}" if row_format else ""
+ 664        return f"{local}DIRECTORY {self.sql(expression, 'this')}{row_format}"
+ 665
+ 666    def delete_sql(self, expression: exp.Delete) -> str:
+ 667        this = self.sql(expression, "this")
+ 668        this = f" FROM {this}" if this else ""
+ 669        using_sql = (
+ 670            f" USING {self.expressions(expression, 'using', sep=', USING ')}"
+ 671            if expression.args.get("using")
+ 672            else ""
+ 673        )
+ 674        where_sql = self.sql(expression, "where")
+ 675        sql = f"DELETE{this}{using_sql}{where_sql}"
+ 676        return self.prepend_ctes(expression, sql)
+ 677
+ 678    def drop_sql(self, expression: exp.Drop) -> str:
+ 679        this = self.sql(expression, "this")
+ 680        kind = expression.args["kind"]
+ 681        exists_sql = " IF EXISTS " if expression.args.get("exists") else " "
+ 682        temporary = " TEMPORARY" if expression.args.get("temporary") else ""
+ 683        materialized = " MATERIALIZED" if expression.args.get("materialized") else ""
+ 684        cascade = " CASCADE" if expression.args.get("cascade") else ""
+ 685        return f"DROP{temporary}{materialized} {kind}{exists_sql}{this}{cascade}"
+ 686
+ 687    def except_sql(self, expression: exp.Except) -> str:
+ 688        return self.prepend_ctes(
+ 689            expression,
+ 690            self.set_operation(expression, self.except_op(expression)),
+ 691        )
+ 692
+ 693    def except_op(self, expression: exp.Except) -> str:
+ 694        return f"EXCEPT{'' if expression.args.get('distinct') else ' ALL'}"
+ 695
+ 696    def fetch_sql(self, expression: exp.Fetch) -> str:
+ 697        direction = expression.args.get("direction")
+ 698        direction = f" {direction.upper()}" if direction else ""
+ 699        count = expression.args.get("count")
+ 700        count = f" {count}" if count else ""
+ 701        return f"{self.seg('FETCH')}{direction}{count} ROWS ONLY"
+ 702
+ 703    def filter_sql(self, expression: exp.Filter) -> str:
+ 704        this = self.sql(expression, "this")
+ 705        where = self.sql(expression, "expression")[1:]  # where has a leading space
+ 706        return f"{this} FILTER({where})"
+ 707
+ 708    def hint_sql(self, expression: exp.Hint) -> str:
+ 709        if self.sql(expression, "this"):
+ 710            self.unsupported("Hints are not supported")
+ 711        return ""
+ 712
+ 713    def index_sql(self, expression: exp.Index) -> str:
+ 714        this = self.sql(expression, "this")
+ 715        table = self.sql(expression, "table")
+ 716        columns = self.sql(expression, "columns")
+ 717        return f"{this} ON {table} {columns}"
+ 718
+ 719    def identifier_sql(self, expression: exp.Identifier) -> str:
+ 720        text = expression.name
+ 721        text = text.lower() if self.normalize else text
+ 722        text = text.replace(self.identifier_end, self._escaped_identifier_end)
+ 723        if expression.args.get("quoted") or self.identify:
+ 724            text = f"{self.identifier_start}{text}{self.identifier_end}"
+ 725        return text
+ 726
+ 727    def national_sql(self, expression: exp.National) -> str:
+ 728        return f"N{self.sql(expression, 'this')}"
+ 729
+ 730    def partition_sql(self, expression: exp.Partition) -> str:
+ 731        return f"PARTITION({self.expressions(expression)})"
+ 732
+ 733    def properties_sql(self, expression: exp.Properties) -> str:
+ 734        root_properties = []
+ 735        with_properties = []
+ 736
+ 737        for p in expression.expressions:
+ 738            p_loc = self.PROPERTIES_LOCATION[p.__class__]
+ 739            if p_loc == exp.Properties.Location.POST_SCHEMA_WITH:
+ 740                with_properties.append(p)
+ 741            elif p_loc == exp.Properties.Location.POST_SCHEMA_ROOT:
+ 742                root_properties.append(p)
+ 743
+ 744        return self.root_properties(
+ 745            exp.Properties(expressions=root_properties)
+ 746        ) + self.with_properties(exp.Properties(expressions=with_properties))
+ 747
+ 748    def root_properties(self, properties: exp.Properties) -> str:
+ 749        if properties.expressions:
+ 750            return self.sep() + self.expressions(properties, indent=False, sep=" ")
+ 751        return ""
+ 752
+ 753    def properties(
+ 754        self,
+ 755        properties: exp.Properties,
+ 756        prefix: str = "",
+ 757        sep: str = ", ",
+ 758        suffix: str = "",
+ 759        wrapped: bool = True,
+ 760    ) -> str:
+ 761        if properties.expressions:
+ 762            expressions = self.expressions(properties, sep=sep, indent=False)
+ 763            expressions = self.wrap(expressions) if wrapped else expressions
+ 764            return f"{prefix}{' ' if prefix and prefix != ' ' else ''}{expressions}{suffix}"
+ 765        return ""
+ 766
+ 767    def with_properties(self, properties: exp.Properties) -> str:
+ 768        return self.properties(properties, prefix=self.seg("WITH"))
+ 769
+ 770    def locate_properties(
+ 771        self, properties: exp.Properties
+ 772    ) -> t.Dict[exp.Properties.Location, list[exp.Property]]:
+ 773        properties_locs: t.Dict[exp.Properties.Location, list[exp.Property]] = {
+ 774            key: [] for key in exp.Properties.Location
+ 775        }
+ 776
+ 777        for p in properties.expressions:
+ 778            p_loc = self.PROPERTIES_LOCATION[p.__class__]
+ 779            if p_loc == exp.Properties.Location.PRE_SCHEMA:
+ 780                properties_locs[exp.Properties.Location.PRE_SCHEMA].append(p)
+ 781            elif p_loc == exp.Properties.Location.POST_INDEX:
+ 782                properties_locs[exp.Properties.Location.POST_INDEX].append(p)
+ 783            elif p_loc == exp.Properties.Location.POST_SCHEMA_ROOT:
+ 784                properties_locs[exp.Properties.Location.POST_SCHEMA_ROOT].append(p)
+ 785            elif p_loc == exp.Properties.Location.POST_SCHEMA_WITH:
+ 786                properties_locs[exp.Properties.Location.POST_SCHEMA_WITH].append(p)
+ 787            elif p_loc == exp.Properties.Location.POST_CREATE:
+ 788                properties_locs[exp.Properties.Location.POST_CREATE].append(p)
+ 789            elif p_loc == exp.Properties.Location.UNSUPPORTED:
+ 790                self.unsupported(f"Unsupported property {p.key}")
+ 791
+ 792        return properties_locs
+ 793
+ 794    def property_sql(self, expression: exp.Property) -> str:
+ 795        property_cls = expression.__class__
+ 796        if property_cls == exp.Property:
+ 797            return f"{expression.name}={self.sql(expression, 'value')}"
+ 798
+ 799        property_name = exp.Properties.PROPERTY_TO_NAME.get(property_cls)
+ 800        if not property_name:
+ 801            self.unsupported(f"Unsupported property {expression.key}")
+ 802
+ 803        return f"{property_name}={self.sql(expression, 'this')}"
+ 804
+ 805    def likeproperty_sql(self, expression: exp.LikeProperty) -> str:
+ 806        options = " ".join(f"{e.name} {self.sql(e, 'value')}" for e in expression.expressions)
+ 807        options = f" {options}" if options else ""
+ 808        return f"LIKE {self.sql(expression, 'this')}{options}"
+ 809
+ 810    def fallbackproperty_sql(self, expression: exp.FallbackProperty) -> str:
+ 811        no = "NO " if expression.args.get("no") else ""
+ 812        protection = " PROTECTION" if expression.args.get("protection") else ""
+ 813        return f"{no}FALLBACK{protection}"
+ 814
+ 815    def journalproperty_sql(self, expression: exp.JournalProperty) -> str:
+ 816        no = "NO " if expression.args.get("no") else ""
+ 817        dual = "DUAL " if expression.args.get("dual") else ""
+ 818        before = "BEFORE " if expression.args.get("before") else ""
+ 819        return f"{no}{dual}{before}JOURNAL"
+ 820
+ 821    def freespaceproperty_sql(self, expression: exp.FreespaceProperty) -> str:
+ 822        freespace = self.sql(expression, "this")
+ 823        percent = " PERCENT" if expression.args.get("percent") else ""
+ 824        return f"FREESPACE={freespace}{percent}"
+ 825
+ 826    def afterjournalproperty_sql(self, expression: exp.AfterJournalProperty) -> str:
+ 827        no = "NO " if expression.args.get("no") else ""
+ 828        dual = "DUAL " if expression.args.get("dual") else ""
+ 829        local = ""
+ 830        if expression.args.get("local") is not None:
+ 831            local = "LOCAL " if expression.args.get("local") else "NOT LOCAL "
+ 832        return f"{no}{dual}{local}AFTER JOURNAL"
+ 833
+ 834    def checksumproperty_sql(self, expression: exp.ChecksumProperty) -> str:
+ 835        if expression.args.get("default"):
+ 836            property = "DEFAULT"
+ 837        elif expression.args.get("on"):
+ 838            property = "ON"
+ 839        else:
+ 840            property = "OFF"
+ 841        return f"CHECKSUM={property}"
+ 842
+ 843    def mergeblockratioproperty_sql(self, expression: exp.MergeBlockRatioProperty) -> str:
+ 844        if expression.args.get("no"):
+ 845            return "NO MERGEBLOCKRATIO"
+ 846        if expression.args.get("default"):
+ 847            return "DEFAULT MERGEBLOCKRATIO"
+ 848
+ 849        percent = " PERCENT" if expression.args.get("percent") else ""
+ 850        return f"MERGEBLOCKRATIO={self.sql(expression, 'this')}{percent}"
+ 851
+ 852    def datablocksizeproperty_sql(self, expression: exp.DataBlocksizeProperty) -> str:
+ 853        default = expression.args.get("default")
+ 854        min = expression.args.get("min")
+ 855        if default is not None or min is not None:
+ 856            if default:
+ 857                property = "DEFAULT"
+ 858            elif min:
+ 859                property = "MINIMUM"
+ 860            else:
+ 861                property = "MAXIMUM"
+ 862            return f"{property} DATABLOCKSIZE"
+ 863        else:
+ 864            units = expression.args.get("units")
+ 865            units = f" {units}" if units else ""
+ 866            return f"DATABLOCKSIZE={self.sql(expression, 'size')}{units}"
+ 867
+ 868    def blockcompressionproperty_sql(self, expression: exp.BlockCompressionProperty) -> str:
+ 869        autotemp = expression.args.get("autotemp")
+ 870        always = expression.args.get("always")
+ 871        default = expression.args.get("default")
+ 872        manual = expression.args.get("manual")
+ 873        never = expression.args.get("never")
+ 874
+ 875        if autotemp is not None:
+ 876            property = f"AUTOTEMP({self.expressions(autotemp)})"
+ 877        elif always:
+ 878            property = "ALWAYS"
+ 879        elif default:
+ 880            property = "DEFAULT"
+ 881        elif manual:
+ 882            property = "MANUAL"
+ 883        elif never:
+ 884            property = "NEVER"
+ 885        return f"BLOCKCOMPRESSION={property}"
+ 886
+ 887    def isolatedloadingproperty_sql(self, expression: exp.IsolatedLoadingProperty) -> str:
+ 888        no = expression.args.get("no")
+ 889        no = " NO" if no else ""
+ 890        concurrent = expression.args.get("concurrent")
+ 891        concurrent = " CONCURRENT" if concurrent else ""
+ 892
+ 893        for_ = ""
+ 894        if expression.args.get("for_all"):
+ 895            for_ = " FOR ALL"
+ 896        elif expression.args.get("for_insert"):
+ 897            for_ = " FOR INSERT"
+ 898        elif expression.args.get("for_none"):
+ 899            for_ = " FOR NONE"
+ 900        return f"WITH{no}{concurrent} ISOLATED LOADING{for_}"
+ 901
+ 902    def insert_sql(self, expression: exp.Insert) -> str:
+ 903        overwrite = expression.args.get("overwrite")
+ 904
+ 905        if isinstance(expression.this, exp.Directory):
+ 906            this = "OVERWRITE " if overwrite else "INTO "
+ 907        else:
+ 908            this = "OVERWRITE TABLE " if overwrite else "INTO "
+ 909
+ 910        this = f"{this}{self.sql(expression, 'this')}"
+ 911        exists = " IF EXISTS " if expression.args.get("exists") else " "
+ 912        partition_sql = (
+ 913            self.sql(expression, "partition") if expression.args.get("partition") else ""
+ 914        )
+ 915        expression_sql = self.sql(expression, "expression")
+ 916        sep = self.sep() if partition_sql else ""
+ 917        sql = f"INSERT {this}{exists}{partition_sql}{sep}{expression_sql}"
+ 918        return self.prepend_ctes(expression, sql)
+ 919
+ 920    def intersect_sql(self, expression: exp.Intersect) -> str:
+ 921        return self.prepend_ctes(
+ 922            expression,
+ 923            self.set_operation(expression, self.intersect_op(expression)),
+ 924        )
+ 925
+ 926    def intersect_op(self, expression: exp.Intersect) -> str:
+ 927        return f"INTERSECT{'' if expression.args.get('distinct') else ' ALL'}"
+ 928
+ 929    def introducer_sql(self, expression: exp.Introducer) -> str:
+ 930        return f"{self.sql(expression, 'this')} {self.sql(expression, 'expression')}"
+ 931
+ 932    def pseudotype_sql(self, expression: exp.PseudoType) -> str:
+ 933        return expression.name.upper()
+ 934
+ 935    def rowformatdelimitedproperty_sql(self, expression: exp.RowFormatDelimitedProperty) -> str:
+ 936        fields = expression.args.get("fields")
+ 937        fields = f" FIELDS TERMINATED BY {fields}" if fields else ""
+ 938        escaped = expression.args.get("escaped")
+ 939        escaped = f" ESCAPED BY {escaped}" if escaped else ""
+ 940        items = expression.args.get("collection_items")
+ 941        items = f" COLLECTION ITEMS TERMINATED BY {items}" if items else ""
+ 942        keys = expression.args.get("map_keys")
+ 943        keys = f" MAP KEYS TERMINATED BY {keys}" if keys else ""
+ 944        lines = expression.args.get("lines")
+ 945        lines = f" LINES TERMINATED BY {lines}" if lines else ""
+ 946        null = expression.args.get("null")
+ 947        null = f" NULL DEFINED AS {null}" if null else ""
+ 948        return f"ROW FORMAT DELIMITED{fields}{escaped}{items}{keys}{lines}{null}"
+ 949
+ 950    def table_sql(self, expression: exp.Table, sep: str = " AS ") -> str:
+ 951        table = ".".join(
+ 952            part
+ 953            for part in [
+ 954                self.sql(expression, "catalog"),
+ 955                self.sql(expression, "db"),
+ 956                self.sql(expression, "this"),
+ 957            ]
+ 958            if part
+ 959        )
+ 960
+ 961        alias = self.sql(expression, "alias")
+ 962        alias = f"{sep}{alias}" if alias else ""
+ 963        hints = self.expressions(expression, key="hints", sep=", ", flat=True)
+ 964        hints = f" WITH ({hints})" if hints else ""
+ 965        laterals = self.expressions(expression, key="laterals", sep="")
+ 966        joins = self.expressions(expression, key="joins", sep="")
+ 967        pivots = self.expressions(expression, key="pivots", sep="")
+ 968        system_time = expression.args.get("system_time")
+ 969        system_time = f" {self.sql(expression, 'system_time')}" if system_time else ""
+ 970
+ 971        if alias and pivots:
+ 972            pivots = f"{pivots}{alias}"
+ 973            alias = ""
+ 974
+ 975        return f"{table}{system_time}{alias}{hints}{laterals}{joins}{pivots}"
+ 976
+ 977    def tablesample_sql(self, expression: exp.TableSample) -> str:
+ 978        if self.alias_post_tablesample and expression.this.alias:
+ 979            this = self.sql(expression.this, "this")
+ 980            alias = f" AS {self.sql(expression.this, 'alias')}"
+ 981        else:
+ 982            this = self.sql(expression, "this")
+ 983            alias = ""
+ 984        method = self.sql(expression, "method")
+ 985        method = f" {method.upper()} " if method else ""
+ 986        numerator = self.sql(expression, "bucket_numerator")
+ 987        denominator = self.sql(expression, "bucket_denominator")
+ 988        field = self.sql(expression, "bucket_field")
+ 989        field = f" ON {field}" if field else ""
+ 990        bucket = f"BUCKET {numerator} OUT OF {denominator}{field}" if numerator else ""
+ 991        percent = self.sql(expression, "percent")
+ 992        percent = f"{percent} PERCENT" if percent else ""
+ 993        rows = self.sql(expression, "rows")
+ 994        rows = f"{rows} ROWS" if rows else ""
+ 995        size = self.sql(expression, "size")
+ 996        seed = self.sql(expression, "seed")
+ 997        seed = f" SEED ({seed})" if seed else ""
+ 998        return f"{this} TABLESAMPLE{method}({bucket}{percent}{rows}{size}){seed}{alias}"
+ 999
+1000    def pivot_sql(self, expression: exp.Pivot) -> str:
+1001        this = self.sql(expression, "this")
+1002        unpivot = expression.args.get("unpivot")
+1003        direction = "UNPIVOT" if unpivot else "PIVOT"
+1004        expressions = self.expressions(expression, key="expressions")
+1005        field = self.sql(expression, "field")
+1006        return f"{this} {direction}({expressions} FOR {field})"
+1007
+1008    def tuple_sql(self, expression: exp.Tuple) -> str:
+1009        return f"({self.expressions(expression, flat=True)})"
+1010
+1011    def update_sql(self, expression: exp.Update) -> str:
+1012        this = self.sql(expression, "this")
+1013        set_sql = self.expressions(expression, flat=True)
+1014        from_sql = self.sql(expression, "from")
+1015        where_sql = self.sql(expression, "where")
+1016        sql = f"UPDATE {this} SET {set_sql}{from_sql}{where_sql}"
+1017        return self.prepend_ctes(expression, sql)
+1018
+1019    def values_sql(self, expression: exp.Values) -> str:
+1020        args = self.expressions(expression)
+1021        alias = self.sql(expression, "alias")
+1022        values = f"VALUES{self.seg('')}{args}"
+1023        values = (
+1024            f"({values})"
+1025            if self.WRAP_DERIVED_VALUES and (alias or isinstance(expression.parent, exp.From))
+1026            else values
+1027        )
+1028        return f"{values} AS {alias}" if alias else values
+1029
+1030    def var_sql(self, expression: exp.Var) -> str:
+1031        return self.sql(expression, "this")
+1032
+1033    def into_sql(self, expression: exp.Into) -> str:
+1034        temporary = " TEMPORARY" if expression.args.get("temporary") else ""
+1035        unlogged = " UNLOGGED" if expression.args.get("unlogged") else ""
+1036        return f"{self.seg('INTO')}{temporary or unlogged} {self.sql(expression, 'this')}"
+1037
+1038    def from_sql(self, expression: exp.From) -> str:
+1039        expressions = self.expressions(expression, flat=True)
+1040        return f"{self.seg('FROM')} {expressions}"
+1041
+1042    def group_sql(self, expression: exp.Group) -> str:
+1043        group_by = self.op_expressions("GROUP BY", expression)
+1044        grouping_sets = self.expressions(expression, key="grouping_sets", indent=False)
+1045        grouping_sets = (
+1046            f"{self.seg('GROUPING SETS')} {self.wrap(grouping_sets)}" if grouping_sets else ""
+1047        )
+1048
+1049        cube = expression.args.get("cube")
+1050        if cube is True:
+1051            cube = self.seg("WITH CUBE")
+1052        else:
+1053            cube = self.expressions(expression, key="cube", indent=False)
+1054            cube = f"{self.seg('CUBE')} {self.wrap(cube)}" if cube else ""
+1055
+1056        rollup = expression.args.get("rollup")
+1057        if rollup is True:
+1058            rollup = self.seg("WITH ROLLUP")
+1059        else:
+1060            rollup = self.expressions(expression, key="rollup", indent=False)
+1061            rollup = f"{self.seg('ROLLUP')} {self.wrap(rollup)}" if rollup else ""
+1062
+1063        return f"{group_by}{csv(grouping_sets, cube, rollup, sep=',')}"
+1064
+1065    def having_sql(self, expression: exp.Having) -> str:
+1066        this = self.indent(self.sql(expression, "this"))
+1067        return f"{self.seg('HAVING')}{self.sep()}{this}"
+1068
+1069    def join_sql(self, expression: exp.Join) -> str:
+1070        op_sql = self.seg(
+1071            " ".join(
+1072                op
+1073                for op in (
+1074                    "NATURAL" if expression.args.get("natural") else None,
+1075                    expression.side,
+1076                    expression.kind,
+1077                    "JOIN",
+1078                )
+1079                if op
+1080            )
+1081        )
+1082        on_sql = self.sql(expression, "on")
+1083        using = expression.args.get("using")
+1084
+1085        if not on_sql and using:
+1086            on_sql = csv(*(self.sql(column) for column in using))
+1087
+1088        if on_sql:
+1089            on_sql = self.indent(on_sql, skip_first=True)
+1090            space = self.seg(" " * self.pad) if self.pretty else " "
+1091            if using:
+1092                on_sql = f"{space}USING ({on_sql})"
+1093            else:
+1094                on_sql = f"{space}ON {on_sql}"
+1095
+1096        expression_sql = self.sql(expression, "expression")
+1097        this_sql = self.sql(expression, "this")
+1098        return f"{expression_sql}{op_sql} {this_sql}{on_sql}"
+1099
+1100    def lambda_sql(self, expression: exp.Lambda, arrow_sep: str = "->") -> str:
+1101        args = self.expressions(expression, flat=True)
+1102        args = f"({args})" if len(args.split(",")) > 1 else args
+1103        return f"{args} {arrow_sep} {self.sql(expression, 'this')}"
+1104
+1105    def lateral_sql(self, expression: exp.Lateral) -> str:
+1106        this = self.sql(expression, "this")
+1107
+1108        if isinstance(expression.this, exp.Subquery):
+1109            return f"LATERAL {this}"
+1110
+1111        if expression.args.get("view"):
+1112            alias = expression.args["alias"]
+1113            columns = self.expressions(alias, key="columns", flat=True)
+1114            table = f" {alias.name}" if alias.name else ""
+1115            columns = f" AS {columns}" if columns else ""
+1116            op_sql = self.seg(f"LATERAL VIEW{' OUTER' if expression.args.get('outer') else ''}")
+1117            return f"{op_sql}{self.sep()}{this}{table}{columns}"
+1118
+1119        alias = self.sql(expression, "alias")
+1120        alias = f" AS {alias}" if alias else ""
+1121        return f"LATERAL {this}{alias}"
+1122
+1123    def limit_sql(self, expression: exp.Limit) -> str:
+1124        this = self.sql(expression, "this")
+1125        return f"{this}{self.seg('LIMIT')} {self.sql(expression, 'expression')}"
+1126
+1127    def offset_sql(self, expression: exp.Offset) -> str:
+1128        this = self.sql(expression, "this")
+1129        return f"{this}{self.seg('OFFSET')} {self.sql(expression, 'expression')}"
+1130
+1131    def lock_sql(self, expression: exp.Lock) -> str:
+1132        if self.LOCKING_READS_SUPPORTED:
+1133            lock_type = "UPDATE" if expression.args["update"] else "SHARE"
+1134            return self.seg(f"FOR {lock_type}")
+1135
+1136        self.unsupported("Locking reads using 'FOR UPDATE/SHARE' are not supported")
+1137        return ""
+1138
+1139    def literal_sql(self, expression: exp.Literal) -> str:
+1140        text = expression.this or ""
+1141        if expression.is_string:
+1142            if self._replace_backslash:
+1143                text = BACKSLASH_RE.sub(r"\\\\", text)
+1144            text = text.replace(self.quote_end, self._escaped_quote_end)
+1145            if self.pretty:
+1146                text = text.replace("\n", self.SENTINEL_LINE_BREAK)
+1147            text = f"{self.quote_start}{text}{self.quote_end}"
+1148        return text
+1149
+1150    def loaddata_sql(self, expression: exp.LoadData) -> str:
+1151        local = " LOCAL" if expression.args.get("local") else ""
+1152        inpath = f" INPATH {self.sql(expression, 'inpath')}"
+1153        overwrite = " OVERWRITE" if expression.args.get("overwrite") else ""
+1154        this = f" INTO TABLE {self.sql(expression, 'this')}"
+1155        partition = self.sql(expression, "partition")
+1156        partition = f" {partition}" if partition else ""
+1157        input_format = self.sql(expression, "input_format")
+1158        input_format = f" INPUTFORMAT {input_format}" if input_format else ""
+1159        serde = self.sql(expression, "serde")
+1160        serde = f" SERDE {serde}" if serde else ""
+1161        return f"LOAD DATA{local}{inpath}{overwrite}{this}{partition}{input_format}{serde}"
+1162
+1163    def null_sql(self, *_) -> str:
+1164        return "NULL"
+1165
+1166    def boolean_sql(self, expression: exp.Boolean) -> str:
+1167        return "TRUE" if expression.this else "FALSE"
+1168
+1169    def order_sql(self, expression: exp.Order, flat: bool = False) -> str:
+1170        this = self.sql(expression, "this")
+1171        this = f"{this} " if this else this
+1172        return self.op_expressions(f"{this}ORDER BY", expression, flat=this or flat)  # type: ignore
+1173
+1174    def cluster_sql(self, expression: exp.Cluster) -> str:
+1175        return self.op_expressions("CLUSTER BY", expression)
+1176
+1177    def distribute_sql(self, expression: exp.Distribute) -> str:
+1178        return self.op_expressions("DISTRIBUTE BY", expression)
+1179
+1180    def sort_sql(self, expression: exp.Sort) -> str:
+1181        return self.op_expressions("SORT BY", expression)
+1182
+1183    def ordered_sql(self, expression: exp.Ordered) -> str:
+1184        desc = expression.args.get("desc")
+1185        asc = not desc
+1186
+1187        nulls_first = expression.args.get("nulls_first")
+1188        nulls_last = not nulls_first
+1189        nulls_are_large = self.null_ordering == "nulls_are_large"
+1190        nulls_are_small = self.null_ordering == "nulls_are_small"
+1191        nulls_are_last = self.null_ordering == "nulls_are_last"
+1192
+1193        sort_order = " DESC" if desc else ""
+1194        nulls_sort_change = ""
+1195        if nulls_first and (
+1196            (asc and nulls_are_large) or (desc and nulls_are_small) or nulls_are_last
+1197        ):
+1198            nulls_sort_change = " NULLS FIRST"
+1199        elif (
+1200            nulls_last
+1201            and ((asc and nulls_are_small) or (desc and nulls_are_large))
+1202            and not nulls_are_last
+1203        ):
+1204            nulls_sort_change = " NULLS LAST"
+1205
+1206        if nulls_sort_change and not self.NULL_ORDERING_SUPPORTED:
+1207            self.unsupported(
+1208                "Sorting in an ORDER BY on NULLS FIRST/NULLS LAST is not supported by this dialect"
+1209            )
+1210            nulls_sort_change = ""
+1211
+1212        return f"{self.sql(expression, 'this')}{sort_order}{nulls_sort_change}"
+1213
+1214    def matchrecognize_sql(self, expression: exp.MatchRecognize) -> str:
+1215        partition = self.partition_by_sql(expression)
+1216        order = self.sql(expression, "order")
+1217        measures = self.sql(expression, "measures")
+1218        measures = self.seg(f"MEASURES {measures}") if measures else ""
+1219        rows = self.sql(expression, "rows")
+1220        rows = self.seg(rows) if rows else ""
+1221        after = self.sql(expression, "after")
+1222        after = self.seg(after) if after else ""
+1223        pattern = self.sql(expression, "pattern")
+1224        pattern = self.seg(f"PATTERN ({pattern})") if pattern else ""
+1225        define = self.sql(expression, "define")
+1226        define = self.seg(f"DEFINE {define}") if define else ""
+1227        body = "".join(
+1228            (
+1229                partition,
+1230                order,
+1231                measures,
+1232                rows,
+1233                after,
+1234                pattern,
+1235                define,
+1236            )
+1237        )
+1238        return f"{self.seg('MATCH_RECOGNIZE')} {self.wrap(body)}"
+1239
+1240    def query_modifiers(self, expression: exp.Expression, *sqls: str) -> str:
+1241        return csv(
+1242            *sqls,
+1243            *[self.sql(sql) for sql in expression.args.get("joins") or []],
+1244            self.sql(expression, "match"),
+1245            *[self.sql(sql) for sql in expression.args.get("laterals") or []],
+1246            self.sql(expression, "where"),
+1247            self.sql(expression, "group"),
+1248            self.sql(expression, "having"),
+1249            self.sql(expression, "qualify"),
+1250            self.seg("WINDOW ") + self.expressions(expression, "windows", flat=True)
+1251            if expression.args.get("windows")
+1252            else "",
+1253            self.sql(expression, "distribute"),
+1254            self.sql(expression, "sort"),
+1255            self.sql(expression, "cluster"),
+1256            self.sql(expression, "order"),
+1257            self.sql(expression, "limit"),
+1258            self.sql(expression, "offset"),
+1259            self.sql(expression, "lock"),
+1260            sep="",
+1261        )
+1262
+1263    def select_sql(self, expression: exp.Select) -> str:
+1264        hint = self.sql(expression, "hint")
+1265        distinct = self.sql(expression, "distinct")
+1266        distinct = f" {distinct}" if distinct else ""
+1267        expressions = self.expressions(expression)
+1268        expressions = f"{self.sep()}{expressions}" if expressions else expressions
+1269        sql = self.query_modifiers(
+1270            expression,
+1271            f"SELECT{hint}{distinct}{expressions}",
+1272            self.sql(expression, "into", comment=False),
+1273            self.sql(expression, "from", comment=False),
+1274        )
+1275        return self.prepend_ctes(expression, sql)
+1276
+1277    def schema_sql(self, expression: exp.Schema) -> str:
+1278        this = self.sql(expression, "this")
+1279        this = f"{this} " if this else ""
+1280        sql = f"({self.sep('')}{self.expressions(expression)}{self.seg(')', sep='')}"
+1281        return f"{this}{sql}"
+1282
+1283    def star_sql(self, expression: exp.Star) -> str:
+1284        except_ = self.expressions(expression, key="except", flat=True)
+1285        except_ = f"{self.seg(self.STAR_MAPPING['except'])} ({except_})" if except_ else ""
+1286        replace = self.expressions(expression, key="replace", flat=True)
+1287        replace = f"{self.seg(self.STAR_MAPPING['replace'])} ({replace})" if replace else ""
+1288        return f"*{except_}{replace}"
+1289
+1290    def structkwarg_sql(self, expression: exp.StructKwarg) -> str:
+1291        return f"{self.sql(expression, 'this')} {self.sql(expression, 'expression')}"
+1292
+1293    def parameter_sql(self, expression: exp.Parameter) -> str:
+1294        return f"@{self.sql(expression, 'this')}"
+1295
+1296    def sessionparameter_sql(self, expression: exp.SessionParameter) -> str:
+1297        this = self.sql(expression, "this")
+1298        kind = expression.text("kind")
+1299        if kind:
+1300            kind = f"{kind}."
+1301        return f"@@{kind}{this}"
+1302
+1303    def placeholder_sql(self, expression: exp.Placeholder) -> str:
+1304        return f":{expression.name}" if expression.name else "?"
+1305
+1306    def subquery_sql(self, expression: exp.Subquery) -> str:
+1307        alias = self.sql(expression, "alias")
+1308
+1309        sql = self.query_modifiers(
+1310            expression,
+1311            self.wrap(expression),
+1312            self.expressions(expression, key="pivots", sep=" "),
+1313            f" AS {alias}" if alias else "",
+1314        )
+1315
+1316        return self.prepend_ctes(expression, sql)
+1317
+1318    def qualify_sql(self, expression: exp.Qualify) -> str:
+1319        this = self.indent(self.sql(expression, "this"))
+1320        return f"{self.seg('QUALIFY')}{self.sep()}{this}"
+1321
+1322    def union_sql(self, expression: exp.Union) -> str:
+1323        return self.prepend_ctes(
+1324            expression,
+1325            self.set_operation(expression, self.union_op(expression)),
+1326        )
+1327
+1328    def union_op(self, expression: exp.Union) -> str:
+1329        kind = " DISTINCT" if self.EXPLICIT_UNION else ""
+1330        kind = kind if expression.args.get("distinct") else " ALL"
+1331        return f"UNION{kind}"
+1332
+1333    def unnest_sql(self, expression: exp.Unnest) -> str:
+1334        args = self.expressions(expression, flat=True)
+1335        alias = expression.args.get("alias")
+1336        if alias and self.unnest_column_only:
+1337            columns = alias.columns
+1338            alias = self.sql(columns[0]) if columns else ""
+1339        else:
+1340            alias = self.sql(expression, "alias")
+1341        alias = f" AS {alias}" if alias else alias
+1342        ordinality = " WITH ORDINALITY" if expression.args.get("ordinality") else ""
+1343        offset = expression.args.get("offset")
+1344        offset = f" WITH OFFSET AS {self.sql(offset)}" if offset else ""
+1345        return f"UNNEST({args}){ordinality}{alias}{offset}"
+1346
+1347    def where_sql(self, expression: exp.Where) -> str:
+1348        this = self.indent(self.sql(expression, "this"))
+1349        return f"{self.seg('WHERE')}{self.sep()}{this}"
+1350
+1351    def window_sql(self, expression: exp.Window) -> str:
+1352        this = self.sql(expression, "this")
+1353
+1354        partition = self.partition_by_sql(expression)
+1355
+1356        order = expression.args.get("order")
+1357        order_sql = self.order_sql(order, flat=True) if order else ""
+1358
+1359        partition_sql = partition + " " if partition and order else partition
+1360
+1361        spec = expression.args.get("spec")
+1362        spec_sql = " " + self.window_spec_sql(spec) if spec else ""
+1363
+1364        alias = self.sql(expression, "alias")
+1365        this = f"{this} {'AS' if expression.arg_key == 'windows' else 'OVER'}"
+1366
+1367        if not partition and not order and not spec and alias:
+1368            return f"{this} {alias}"
+1369
+1370        window_args = alias + partition_sql + order_sql + spec_sql
+1371
+1372        return f"{this} ({window_args.strip()})"
+1373
+1374    def partition_by_sql(self, expression: exp.Window | exp.MatchRecognize) -> str:
+1375        partition = self.expressions(expression, key="partition_by", flat=True)
+1376        return f"PARTITION BY {partition}" if partition else ""
+1377
+1378    def window_spec_sql(self, expression: exp.WindowSpec) -> str:
+1379        kind = self.sql(expression, "kind")
+1380        start = csv(self.sql(expression, "start"), self.sql(expression, "start_side"), sep=" ")
+1381        end = (
+1382            csv(self.sql(expression, "end"), self.sql(expression, "end_side"), sep=" ")
+1383            or "CURRENT ROW"
+1384        )
+1385        return f"{kind} BETWEEN {start} AND {end}"
+1386
+1387    def withingroup_sql(self, expression: exp.WithinGroup) -> str:
+1388        this = self.sql(expression, "this")
+1389        expression_sql = self.sql(expression, "expression")[1:]  # order has a leading space
+1390        return f"{this} WITHIN GROUP ({expression_sql})"
+1391
+1392    def between_sql(self, expression: exp.Between) -> str:
+1393        this = self.sql(expression, "this")
+1394        low = self.sql(expression, "low")
+1395        high = self.sql(expression, "high")
+1396        return f"{this} BETWEEN {low} AND {high}"
+1397
+1398    def bracket_sql(self, expression: exp.Bracket) -> str:
+1399        expressions = apply_index_offset(expression.expressions, self.index_offset)
+1400        expressions_sql = ", ".join(self.sql(e) for e in expressions)
+1401
+1402        return f"{self.sql(expression, 'this')}[{expressions_sql}]"
+1403
+1404    def all_sql(self, expression: exp.All) -> str:
+1405        return f"ALL {self.wrap(expression)}"
+1406
+1407    def any_sql(self, expression: exp.Any) -> str:
+1408        return f"ANY {self.wrap(expression)}"
+1409
+1410    def exists_sql(self, expression: exp.Exists) -> str:
+1411        return f"EXISTS{self.wrap(expression)}"
+1412
+1413    def case_sql(self, expression: exp.Case) -> str:
+1414        this = self.sql(expression, "this")
+1415        statements = [f"CASE {this}" if this else "CASE"]
+1416
+1417        for e in expression.args["ifs"]:
+1418            statements.append(f"WHEN {self.sql(e, 'this')}")
+1419            statements.append(f"THEN {self.sql(e, 'true')}")
+1420
+1421        default = self.sql(expression, "default")
+1422
+1423        if default:
+1424            statements.append(f"ELSE {default}")
+1425
+1426        statements.append("END")
+1427
+1428        if self.pretty and self.text_width(statements) > self._max_text_width:
+1429            return self.indent("\n".join(statements), skip_first=True, skip_last=True)
+1430
+1431        return " ".join(statements)
+1432
+1433    def constraint_sql(self, expression: exp.Constraint) -> str:
+1434        this = self.sql(expression, "this")
+1435        expressions = self.expressions(expression, flat=True)
+1436        return f"CONSTRAINT {this} {expressions}"
+1437
+1438    def extract_sql(self, expression: exp.Extract) -> str:
+1439        this = self.sql(expression, "this")
+1440        expression_sql = self.sql(expression, "expression")
+1441        return f"EXTRACT({this} FROM {expression_sql})"
+1442
+1443    def trim_sql(self, expression: exp.Trim) -> str:
+1444        trim_type = self.sql(expression, "position")
+1445
+1446        if trim_type == "LEADING":
+1447            return f"{self.normalize_func('LTRIM')}({self.format_args(expression.this)})"
+1448        elif trim_type == "TRAILING":
+1449            return f"{self.normalize_func('RTRIM')}({self.format_args(expression.this)})"
+1450        else:
+1451            return f"{self.normalize_func('TRIM')}({self.format_args(expression.this, expression.expression)})"
+1452
+1453    def concat_sql(self, expression: exp.Concat) -> str:
+1454        if len(expression.expressions) == 1:
+1455            return self.sql(expression.expressions[0])
+1456        return self.function_fallback_sql(expression)
+1457
+1458    def check_sql(self, expression: exp.Check) -> str:
+1459        this = self.sql(expression, key="this")
+1460        return f"CHECK ({this})"
+1461
+1462    def foreignkey_sql(self, expression: exp.ForeignKey) -> str:
+1463        expressions = self.expressions(expression, flat=True)
+1464        reference = self.sql(expression, "reference")
+1465        reference = f" {reference}" if reference else ""
+1466        delete = self.sql(expression, "delete")
+1467        delete = f" ON DELETE {delete}" if delete else ""
+1468        update = self.sql(expression, "update")
+1469        update = f" ON UPDATE {update}" if update else ""
+1470        return f"FOREIGN KEY ({expressions}){reference}{delete}{update}"
+1471
+1472    def primarykey_sql(self, expression: exp.ForeignKey) -> str:
+1473        expressions = self.expressions(expression, flat=True)
+1474        options = self.expressions(expression, "options", flat=True, sep=" ")
+1475        options = f" {options}" if options else ""
+1476        return f"PRIMARY KEY ({expressions}){options}"
+1477
+1478    def unique_sql(self, expression: exp.Unique) -> str:
+1479        columns = self.expressions(expression, key="expressions")
+1480        return f"UNIQUE ({columns})"
+1481
+1482    def if_sql(self, expression: exp.If) -> str:
+1483        return self.case_sql(
+1484            exp.Case(ifs=[expression.copy()], default=expression.args.get("false"))
+1485        )
+1486
+1487    def in_sql(self, expression: exp.In) -> str:
+1488        query = expression.args.get("query")
+1489        unnest = expression.args.get("unnest")
+1490        field = expression.args.get("field")
+1491        is_global = " GLOBAL" if expression.args.get("is_global") else ""
+1492
+1493        if query:
+1494            in_sql = self.wrap(query)
+1495        elif unnest:
+1496            in_sql = self.in_unnest_op(unnest)
+1497        elif field:
+1498            in_sql = self.sql(field)
+1499        else:
+1500            in_sql = f"({self.expressions(expression, flat=True)})"
+1501
+1502        return f"{self.sql(expression, 'this')}{is_global} IN {in_sql}"
+1503
+1504    def in_unnest_op(self, unnest: exp.Unnest) -> str:
+1505        return f"(SELECT {self.sql(unnest)})"
+1506
+1507    def interval_sql(self, expression: exp.Interval) -> str:
+1508        this = expression.args.get("this")
+1509        if this:
+1510            this = (
+1511                f" {this}"
+1512                if isinstance(this, exp.Literal) or isinstance(this, exp.Paren)
+1513                else f" ({this})"
+1514            )
+1515        else:
+1516            this = ""
+1517        unit = expression.args.get("unit")
+1518        unit = f" {unit}" if unit else ""
+1519        return f"INTERVAL{this}{unit}"
+1520
+1521    def return_sql(self, expression: exp.Return) -> str:
+1522        return f"RETURN {self.sql(expression, 'this')}"
+1523
+1524    def reference_sql(self, expression: exp.Reference) -> str:
+1525        this = self.sql(expression, "this")
+1526        expressions = self.expressions(expression, flat=True)
+1527        expressions = f"({expressions})" if expressions else ""
+1528        options = self.expressions(expression, "options", flat=True, sep=" ")
+1529        options = f" {options}" if options else ""
+1530        return f"REFERENCES {this}{expressions}{options}"
+1531
+1532    def anonymous_sql(self, expression: exp.Anonymous) -> str:
+1533        args = self.format_args(*expression.expressions)
+1534        return f"{self.normalize_func(self.sql(expression, 'this'))}({args})"
+1535
+1536    def paren_sql(self, expression: exp.Paren) -> str:
+1537        if isinstance(expression.unnest(), exp.Select):
+1538            sql = self.wrap(expression)
+1539        else:
+1540            sql = self.seg(self.indent(self.sql(expression, "this")), sep="")
+1541            sql = f"({sql}{self.seg(')', sep='')}"
+1542
+1543        return self.prepend_ctes(expression, sql)
+1544
+1545    def neg_sql(self, expression: exp.Neg) -> str:
+1546        # This makes sure we don't convert "- - 5" to "--5", which is a comment
+1547        this_sql = self.sql(expression, "this")
+1548        sep = " " if this_sql[0] == "-" else ""
+1549        return f"-{sep}{this_sql}"
+1550
+1551    def not_sql(self, expression: exp.Not) -> str:
+1552        return f"NOT {self.sql(expression, 'this')}"
+1553
+1554    def alias_sql(self, expression: exp.Alias) -> str:
+1555        to_sql = self.sql(expression, "alias")
+1556        to_sql = f" AS {to_sql}" if to_sql else ""
+1557        return f"{self.sql(expression, 'this')}{to_sql}"
+1558
+1559    def aliases_sql(self, expression: exp.Aliases) -> str:
+1560        return f"{self.sql(expression, 'this')} AS ({self.expressions(expression, flat=True)})"
+1561
+1562    def attimezone_sql(self, expression: exp.AtTimeZone) -> str:
+1563        this = self.sql(expression, "this")
+1564        zone = self.sql(expression, "zone")
+1565        return f"{this} AT TIME ZONE {zone}"
+1566
+1567    def add_sql(self, expression: exp.Add) -> str:
+1568        return self.binary(expression, "+")
+1569
+1570    def and_sql(self, expression: exp.And) -> str:
+1571        return self.connector_sql(expression, "AND")
+1572
+1573    def connector_sql(self, expression: exp.Connector, op: str) -> str:
+1574        if not self.pretty:
+1575            return self.binary(expression, op)
+1576
+1577        sqls = tuple(self.sql(e) for e in expression.flatten(unnest=False))
+1578        sep = "\n" if self.text_width(sqls) > self._max_text_width else " "
+1579        return f"{sep}{op} ".join(sqls)
+1580
+1581    def bitwiseand_sql(self, expression: exp.BitwiseAnd) -> str:
+1582        return self.binary(expression, "&")
+1583
+1584    def bitwiseleftshift_sql(self, expression: exp.BitwiseLeftShift) -> str:
+1585        return self.binary(expression, "<<")
+1586
+1587    def bitwisenot_sql(self, expression: exp.BitwiseNot) -> str:
+1588        return f"~{self.sql(expression, 'this')}"
+1589
+1590    def bitwiseor_sql(self, expression: exp.BitwiseOr) -> str:
+1591        return self.binary(expression, "|")
+1592
+1593    def bitwiserightshift_sql(self, expression: exp.BitwiseRightShift) -> str:
+1594        return self.binary(expression, ">>")
+1595
+1596    def bitwisexor_sql(self, expression: exp.BitwiseXor) -> str:
+1597        return self.binary(expression, "^")
+1598
+1599    def cast_sql(self, expression: exp.Cast) -> str:
+1600        return f"CAST({self.sql(expression, 'this')} AS {self.sql(expression, 'to')})"
+1601
+1602    def currentdate_sql(self, expression: exp.CurrentDate) -> str:
+1603        zone = self.sql(expression, "this")
+1604        return f"CURRENT_DATE({zone})" if zone else "CURRENT_DATE"
+1605
+1606    def collate_sql(self, expression: exp.Collate) -> str:
+1607        return self.binary(expression, "COLLATE")
+1608
+1609    def command_sql(self, expression: exp.Command) -> str:
+1610        return f"{self.sql(expression, 'this').upper()} {expression.text('expression').strip()}"
+1611
+1612    def transaction_sql(self, *_) -> str:
+1613        return "BEGIN"
+1614
+1615    def commit_sql(self, expression: exp.Commit) -> str:
+1616        chain = expression.args.get("chain")
+1617        if chain is not None:
+1618            chain = " AND CHAIN" if chain else " AND NO CHAIN"
+1619
+1620        return f"COMMIT{chain or ''}"
+1621
+1622    def rollback_sql(self, expression: exp.Rollback) -> str:
+1623        savepoint = expression.args.get("savepoint")
+1624        savepoint = f" TO {savepoint}" if savepoint else ""
+1625        return f"ROLLBACK{savepoint}"
+1626
+1627    def altercolumn_sql(self, expression: exp.AlterColumn) -> str:
+1628        this = self.sql(expression, "this")
+1629
+1630        dtype = self.sql(expression, "dtype")
+1631        if dtype:
+1632            collate = self.sql(expression, "collate")
+1633            collate = f" COLLATE {collate}" if collate else ""
+1634            using = self.sql(expression, "using")
+1635            using = f" USING {using}" if using else ""
+1636            return f"ALTER COLUMN {this} TYPE {dtype}{collate}{using}"
+1637
+1638        default = self.sql(expression, "default")
+1639        if default:
+1640            return f"ALTER COLUMN {this} SET DEFAULT {default}"
+1641
+1642        if not expression.args.get("drop"):
+1643            self.unsupported("Unsupported ALTER COLUMN syntax")
+1644
+1645        return f"ALTER COLUMN {this} DROP DEFAULT"
+1646
+1647    def renametable_sql(self, expression: exp.RenameTable) -> str:
+1648        this = self.sql(expression, "this")
+1649        return f"RENAME TO {this}"
+1650
+1651    def altertable_sql(self, expression: exp.AlterTable) -> str:
+1652        actions = expression.args["actions"]
+1653
+1654        if isinstance(actions[0], exp.ColumnDef):
+1655            actions = self.expressions(expression, "actions", prefix="ADD COLUMN ")
+1656        elif isinstance(actions[0], exp.Schema):
+1657            actions = self.expressions(expression, "actions", prefix="ADD COLUMNS ")
+1658        elif isinstance(actions[0], exp.Delete):
+1659            actions = self.expressions(expression, "actions", flat=True)
+1660        else:
+1661            actions = self.expressions(expression, "actions")
+1662
+1663        exists = " IF EXISTS" if expression.args.get("exists") else ""
+1664        return f"ALTER TABLE{exists} {self.sql(expression, 'this')} {actions}"
+1665
+1666    def droppartition_sql(self, expression: exp.DropPartition) -> str:
+1667        expressions = self.expressions(expression)
+1668        exists = " IF EXISTS " if expression.args.get("exists") else " "
+1669        return f"DROP{exists}{expressions}"
+1670
+1671    def addconstraint_sql(self, expression: exp.AddConstraint) -> str:
+1672        this = self.sql(expression, "this")
+1673        expression_ = self.sql(expression, "expression")
+1674        add_constraint = f"ADD CONSTRAINT {this}" if this else "ADD"
+1675
+1676        enforced = expression.args.get("enforced")
+1677        if enforced is not None:
+1678            return f"{add_constraint} CHECK ({expression_}){' ENFORCED' if enforced else ''}"
+1679
+1680        return f"{add_constraint} {expression_}"
+1681
+1682    def distinct_sql(self, expression: exp.Distinct) -> str:
+1683        this = self.expressions(expression, flat=True)
+1684        this = f" {this}" if this else ""
+1685
+1686        on = self.sql(expression, "on")
+1687        on = f" ON {on}" if on else ""
+1688        return f"DISTINCT{this}{on}"
+1689
+1690    def ignorenulls_sql(self, expression: exp.IgnoreNulls) -> str:
+1691        return f"{self.sql(expression, 'this')} IGNORE NULLS"
+1692
+1693    def respectnulls_sql(self, expression: exp.RespectNulls) -> str:
+1694        return f"{self.sql(expression, 'this')} RESPECT NULLS"
+1695
+1696    def intdiv_sql(self, expression: exp.IntDiv) -> str:
+1697        return self.sql(
+1698            exp.Cast(
+1699                this=exp.Div(this=expression.this, expression=expression.expression),
+1700                to=exp.DataType(this=exp.DataType.Type.INT),
+1701            )
+1702        )
+1703
+1704    def dpipe_sql(self, expression: exp.DPipe) -> str:
+1705        return self.binary(expression, "||")
+1706
+1707    def div_sql(self, expression: exp.Div) -> str:
+1708        return self.binary(expression, "/")
+1709
+1710    def distance_sql(self, expression: exp.Distance) -> str:
+1711        return self.binary(expression, "<->")
+1712
+1713    def dot_sql(self, expression: exp.Dot) -> str:
+1714        return f"{self.sql(expression, 'this')}.{self.sql(expression, 'expression')}"
+1715
+1716    def eq_sql(self, expression: exp.EQ) -> str:
+1717        return self.binary(expression, "=")
+1718
+1719    def escape_sql(self, expression: exp.Escape) -> str:
+1720        return self.binary(expression, "ESCAPE")
+1721
+1722    def glob_sql(self, expression: exp.Glob) -> str:
+1723        return self.binary(expression, "GLOB")
+1724
+1725    def gt_sql(self, expression: exp.GT) -> str:
+1726        return self.binary(expression, ">")
+1727
+1728    def gte_sql(self, expression: exp.GTE) -> str:
+1729        return self.binary(expression, ">=")
+1730
+1731    def ilike_sql(self, expression: exp.ILike) -> str:
+1732        return self.binary(expression, "ILIKE")
+1733
+1734    def is_sql(self, expression: exp.Is) -> str:
+1735        return self.binary(expression, "IS")
+1736
+1737    def like_sql(self, expression: exp.Like) -> str:
+1738        return self.binary(expression, "LIKE")
+1739
+1740    def similarto_sql(self, expression: exp.SimilarTo) -> str:
+1741        return self.binary(expression, "SIMILAR TO")
+1742
+1743    def lt_sql(self, expression: exp.LT) -> str:
+1744        return self.binary(expression, "<")
+1745
+1746    def lte_sql(self, expression: exp.LTE) -> str:
+1747        return self.binary(expression, "<=")
+1748
+1749    def mod_sql(self, expression: exp.Mod) -> str:
+1750        return self.binary(expression, "%")
+1751
+1752    def mul_sql(self, expression: exp.Mul) -> str:
+1753        return self.binary(expression, "*")
+1754
+1755    def neq_sql(self, expression: exp.NEQ) -> str:
+1756        return self.binary(expression, "<>")
+1757
+1758    def nullsafeeq_sql(self, expression: exp.NullSafeEQ) -> str:
+1759        return self.binary(expression, "IS NOT DISTINCT FROM")
+1760
+1761    def nullsafeneq_sql(self, expression: exp.NullSafeNEQ) -> str:
+1762        return self.binary(expression, "IS DISTINCT FROM")
+1763
+1764    def or_sql(self, expression: exp.Or) -> str:
+1765        return self.connector_sql(expression, "OR")
+1766
+1767    def slice_sql(self, expression: exp.Slice) -> str:
+1768        return self.binary(expression, ":")
+1769
+1770    def sub_sql(self, expression: exp.Sub) -> str:
+1771        return self.binary(expression, "-")
+1772
+1773    def trycast_sql(self, expression: exp.TryCast) -> str:
+1774        return f"TRY_CAST({self.sql(expression, 'this')} AS {self.sql(expression, 'to')})"
+1775
+1776    def use_sql(self, expression: exp.Use) -> str:
+1777        kind = self.sql(expression, "kind")
+1778        kind = f" {kind}" if kind else ""
+1779        this = self.sql(expression, "this")
+1780        this = f" {this}" if this else ""
+1781        return f"USE{kind}{this}"
+1782
+1783    def binary(self, expression: exp.Binary, op: str) -> str:
+1784        return f"{self.sql(expression, 'this')} {op} {self.sql(expression, 'expression')}"
+1785
+1786    def function_fallback_sql(self, expression: exp.Func) -> str:
+1787        args = []
+1788        for arg_value in expression.args.values():
+1789            if isinstance(arg_value, list):
+1790                for value in arg_value:
+1791                    args.append(value)
+1792            else:
+1793                args.append(arg_value)
+1794
+1795        return f"{self.normalize_func(expression.sql_name())}({self.format_args(*args)})"
+1796
+1797    def format_args(self, *args: t.Optional[str | exp.Expression]) -> str:
+1798        arg_sqls = tuple(self.sql(arg) for arg in args if arg is not None)
+1799        if self.pretty and self.text_width(arg_sqls) > self._max_text_width:
+1800            return self.indent("\n" + f",\n".join(arg_sqls) + "\n", skip_first=True, skip_last=True)
+1801        return ", ".join(arg_sqls)
+1802
+1803    def text_width(self, args: t.Iterable) -> int:
+1804        return sum(len(arg) for arg in args)
+1805
+1806    def format_time(self, expression: exp.Expression) -> t.Optional[str]:
+1807        return format_time(self.sql(expression, "format"), self.time_mapping, self.time_trie)
+1808
+1809    def expressions(
+1810        self,
+1811        expression: exp.Expression,
+1812        key: t.Optional[str] = None,
+1813        flat: bool = False,
+1814        indent: bool = True,
+1815        sep: str = ", ",
+1816        prefix: str = "",
+1817    ) -> str:
+1818        expressions = expression.args.get(key or "expressions")
+1819
+1820        if not expressions:
+1821            return ""
+1822
+1823        if flat:
+1824            return sep.join(self.sql(e) for e in expressions)
+1825
+1826        num_sqls = len(expressions)
+1827
+1828        # These are calculated once in case we have the leading_comma / pretty option set, correspondingly
+1829        pad = " " * self.pad
+1830        stripped_sep = sep.strip()
+1831
+1832        result_sqls = []
+1833        for i, e in enumerate(expressions):
+1834            sql = self.sql(e, comment=False)
+1835            comments = self.maybe_comment("", e) if isinstance(e, exp.Expression) else ""
+1836
+1837            if self.pretty:
+1838                if self._leading_comma:
+1839                    result_sqls.append(f"{sep if i > 0 else pad}{prefix}{sql}{comments}")
+1840                else:
+1841                    result_sqls.append(
+1842                        f"{prefix}{sql}{stripped_sep if i + 1 < num_sqls else ''}{comments}"
+1843                    )
+1844            else:
+1845                result_sqls.append(f"{prefix}{sql}{comments}{sep if i + 1 < num_sqls else ''}")
+1846
+1847        result_sql = "\n".join(result_sqls) if self.pretty else "".join(result_sqls)
+1848        return self.indent(result_sql, skip_first=False) if indent else result_sql
+1849
+1850    def op_expressions(self, op: str, expression: exp.Expression, flat: bool = False) -> str:
+1851        expressions_sql = self.expressions(expression, flat=flat)
+1852        if flat:
+1853            return f"{op} {expressions_sql}"
+1854        return f"{self.seg(op)}{self.sep() if expressions_sql else ''}{expressions_sql}"
+1855
+1856    def naked_property(self, expression: exp.Property) -> str:
+1857        property_name = exp.Properties.PROPERTY_TO_NAME.get(expression.__class__)
+1858        if not property_name:
+1859            self.unsupported(f"Unsupported property {expression.__class__.__name__}")
+1860        return f"{property_name} {self.sql(expression, 'this')}"
+1861
+1862    def set_operation(self, expression: exp.Expression, op: str) -> str:
+1863        this = self.sql(expression, "this")
+1864        op = self.seg(op)
+1865        return self.query_modifiers(
+1866            expression, f"{this}{op}{self.sep()}{self.sql(expression, 'expression')}"
+1867        )
+1868
+1869    def tag_sql(self, expression: exp.Tag) -> str:
+1870        return f"{expression.args.get('prefix')}{self.sql(expression.this)}{expression.args.get('postfix')}"
+1871
+1872    def token_sql(self, token_type: TokenType) -> str:
+1873        return self.TOKEN_MAPPING.get(token_type, token_type.name)
+1874
+1875    def userdefinedfunction_sql(self, expression: exp.UserDefinedFunction) -> str:
+1876        this = self.sql(expression, "this")
+1877        expressions = self.no_identify(self.expressions, expression)
+1878        expressions = (
+1879            self.wrap(expressions) if expression.args.get("wrapped") else f" {expressions}"
+1880        )
+1881        return f"{this}{expressions}"
+1882
+1883    def userdefinedfunctionkwarg_sql(self, expression: exp.UserDefinedFunctionKwarg) -> str:
+1884        this = self.sql(expression, "this")
+1885        kind = self.sql(expression, "kind")
+1886        return f"{this} {kind}"
+1887
+1888    def joinhint_sql(self, expression: exp.JoinHint) -> str:
+1889        this = self.sql(expression, "this")
+1890        expressions = self.expressions(expression, flat=True)
+1891        return f"{this}({expressions})"
+1892
+1893    def kwarg_sql(self, expression: exp.Kwarg) -> str:
+1894        return self.binary(expression, "=>")
+1895
+1896    def when_sql(self, expression: exp.When) -> str:
+1897        this = self.sql(expression, "this")
+1898        then_expression = expression.args.get("then")
+1899        if isinstance(then_expression, exp.Insert):
+1900            then = f"INSERT {self.sql(then_expression, 'this')}"
+1901            if "expression" in then_expression.args:
+1902                then += f" VALUES {self.sql(then_expression, 'expression')}"
+1903        elif isinstance(then_expression, exp.Update):
+1904            if isinstance(then_expression.args.get("expressions"), exp.Star):
+1905                then = f"UPDATE {self.sql(then_expression, 'expressions')}"
+1906            else:
+1907                then = f"UPDATE SET {self.expressions(then_expression, flat=True)}"
+1908        else:
+1909            then = self.sql(then_expression)
+1910        return f"WHEN {this} THEN {then}"
+1911
+1912    def merge_sql(self, expression: exp.Merge) -> str:
+1913        this = self.sql(expression, "this")
+1914        using = f"USING {self.sql(expression, 'using')}"
+1915        on = f"ON {self.sql(expression, 'on')}"
+1916        return f"MERGE INTO {this} {using} {on} {self.expressions(expression, sep=' ')}"
+
+ + +
+
+ +
+ + class + Generator: + + + +
+ +
  19class Generator:
+  20    """
+  21    Generator interprets the given syntax tree and produces a SQL string as an output.
+  22
+  23    Args:
+  24        time_mapping (dict): the dictionary of custom time mappings in which the key
+  25            represents a python time format and the output the target time format
+  26        time_trie (trie): a trie of the time_mapping keys
+  27        pretty (bool): if set to True the returned string will be formatted. Default: False.
+  28        quote_start (str): specifies which starting character to use to delimit quotes. Default: '.
+  29        quote_end (str): specifies which ending character to use to delimit quotes. Default: '.
+  30        identifier_start (str): specifies which starting character to use to delimit identifiers. Default: ".
+  31        identifier_end (str): specifies which ending character to use to delimit identifiers. Default: ".
+  32        identify (bool): if set to True all identifiers will be delimited by the corresponding
+  33            character.
+  34        normalize (bool): if set to True all identifiers will lower cased
+  35        string_escape (str): specifies a string escape character. Default: '.
+  36        identifier_escape (str): specifies an identifier escape character. Default: ".
+  37        pad (int): determines padding in a formatted string. Default: 2.
+  38        indent (int): determines the size of indentation in a formatted string. Default: 4.
+  39        unnest_column_only (bool): if true unnest table aliases are considered only as column aliases
+  40        normalize_functions (str): normalize function names, "upper", "lower", or None
+  41            Default: "upper"
+  42        alias_post_tablesample (bool): if the table alias comes after tablesample
+  43            Default: False
+  44        unsupported_level (ErrorLevel): determines the generator's behavior when it encounters
+  45            unsupported expressions. Default ErrorLevel.WARN.
+  46        null_ordering (str): Indicates the default null ordering method to use if not explicitly set.
+  47            Options are "nulls_are_small", "nulls_are_large", "nulls_are_last".
+  48            Default: "nulls_are_small"
+  49        max_unsupported (int): Maximum number of unsupported messages to include in a raised UnsupportedError.
+  50            This is only relevant if unsupported_level is ErrorLevel.RAISE.
+  51            Default: 3
+  52        leading_comma (bool): if the the comma is leading or trailing in select statements
+  53            Default: False
+  54        max_text_width: The max number of characters in a segment before creating new lines in pretty mode.
+  55            The default is on the smaller end because the length only represents a segment and not the true
+  56            line length.
+  57            Default: 80
+  58        comments: Whether or not to preserve comments in the output SQL code.
+  59            Default: True
+  60    """
+  61
+  62    TRANSFORMS = {
+  63        exp.DateAdd: lambda self, e: f"DATE_ADD({self.format_args(e.this, e.expression, e.args.get('unit'))})",
+  64        exp.DateDiff: lambda self, e: f"DATEDIFF({self.format_args(e.this, e.expression)})",
+  65        exp.TsOrDsAdd: lambda self, e: f"TS_OR_DS_ADD({self.format_args(e.this, e.expression, e.args.get('unit'))})",
+  66        exp.VarMap: lambda self, e: f"MAP({self.format_args(e.args['keys'], e.args['values'])})",
+  67        exp.CharacterSetProperty: lambda self, e: f"{'DEFAULT ' if e.args['default'] else ''}CHARACTER SET={self.sql(e, 'this')}",
+  68        exp.LanguageProperty: lambda self, e: self.naked_property(e),
+  69        exp.LocationProperty: lambda self, e: self.naked_property(e),
+  70        exp.ReturnsProperty: lambda self, e: self.naked_property(e),
+  71        exp.ExecuteAsProperty: lambda self, e: self.naked_property(e),
+  72        exp.VolatilityProperty: lambda self, e: e.name,
+  73        exp.WithJournalTableProperty: lambda self, e: f"WITH JOURNAL TABLE={self.sql(e, 'this')}",
+  74        exp.LogProperty: lambda self, e: f"{'NO ' if e.args.get('no') else ''}LOG",
+  75        exp.SqlSecurityProperty: lambda self, e: f"SQL SECURITY {'DEFINER' if e.args.get('definer') else 'INVOKER'}",
+  76    }
+  77
+  78    # Whether 'CREATE ... TRANSIENT ... TABLE' is allowed
+  79    CREATE_TRANSIENT = False
+  80
+  81    # Whether or not null ordering is supported in order by
+  82    NULL_ORDERING_SUPPORTED = True
+  83
+  84    # Whether or not locking reads (i.e. SELECT ... FOR UPDATE/SHARE) are supported
+  85    LOCKING_READS_SUPPORTED = False
+  86
+  87    # Always do union distinct or union all
+  88    EXPLICIT_UNION = False
+  89
+  90    # Wrap derived values in parens, usually standard but spark doesn't support it
+  91    WRAP_DERIVED_VALUES = True
+  92
+  93    # Whether or not create function uses an AS before the def.
+  94    CREATE_FUNCTION_AS = True
+  95
+  96    TYPE_MAPPING = {
+  97        exp.DataType.Type.NCHAR: "CHAR",
+  98        exp.DataType.Type.NVARCHAR: "VARCHAR",
+  99        exp.DataType.Type.MEDIUMTEXT: "TEXT",
+ 100        exp.DataType.Type.LONGTEXT: "TEXT",
+ 101        exp.DataType.Type.MEDIUMBLOB: "BLOB",
+ 102        exp.DataType.Type.LONGBLOB: "BLOB",
+ 103    }
+ 104
+ 105    STAR_MAPPING = {
+ 106        "except": "EXCEPT",
+ 107        "replace": "REPLACE",
+ 108    }
+ 109
+ 110    TOKEN_MAPPING: t.Dict[TokenType, str] = {}
+ 111
+ 112    STRUCT_DELIMITER = ("<", ">")
+ 113
+ 114    PROPERTIES_LOCATION = {
+ 115        exp.AfterJournalProperty: exp.Properties.Location.PRE_SCHEMA,
+ 116        exp.AlgorithmProperty: exp.Properties.Location.POST_CREATE,
+ 117        exp.AutoIncrementProperty: exp.Properties.Location.POST_SCHEMA_ROOT,
+ 118        exp.BlockCompressionProperty: exp.Properties.Location.PRE_SCHEMA,
+ 119        exp.CharacterSetProperty: exp.Properties.Location.POST_SCHEMA_ROOT,
+ 120        exp.ChecksumProperty: exp.Properties.Location.PRE_SCHEMA,
+ 121        exp.CollateProperty: exp.Properties.Location.POST_SCHEMA_ROOT,
+ 122        exp.DataBlocksizeProperty: exp.Properties.Location.PRE_SCHEMA,
+ 123        exp.DefinerProperty: exp.Properties.Location.POST_CREATE,
+ 124        exp.DistKeyProperty: exp.Properties.Location.POST_SCHEMA_ROOT,
+ 125        exp.DistStyleProperty: exp.Properties.Location.POST_SCHEMA_ROOT,
+ 126        exp.EngineProperty: exp.Properties.Location.POST_SCHEMA_ROOT,
+ 127        exp.ExecuteAsProperty: exp.Properties.Location.POST_SCHEMA_ROOT,
+ 128        exp.FallbackProperty: exp.Properties.Location.PRE_SCHEMA,
+ 129        exp.FileFormatProperty: exp.Properties.Location.POST_SCHEMA_WITH,
+ 130        exp.FreespaceProperty: exp.Properties.Location.PRE_SCHEMA,
+ 131        exp.IsolatedLoadingProperty: exp.Properties.Location.PRE_SCHEMA,
+ 132        exp.JournalProperty: exp.Properties.Location.PRE_SCHEMA,
+ 133        exp.LanguageProperty: exp.Properties.Location.POST_SCHEMA_ROOT,
+ 134        exp.LikeProperty: exp.Properties.Location.POST_SCHEMA_ROOT,
+ 135        exp.LocationProperty: exp.Properties.Location.POST_SCHEMA_ROOT,
+ 136        exp.LogProperty: exp.Properties.Location.PRE_SCHEMA,
+ 137        exp.MergeBlockRatioProperty: exp.Properties.Location.PRE_SCHEMA,
+ 138        exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA_WITH,
+ 139        exp.Property: exp.Properties.Location.POST_SCHEMA_WITH,
+ 140        exp.ReturnsProperty: exp.Properties.Location.POST_SCHEMA_ROOT,
+ 141        exp.RowFormatDelimitedProperty: exp.Properties.Location.POST_SCHEMA_ROOT,
+ 142        exp.RowFormatSerdeProperty: exp.Properties.Location.POST_SCHEMA_ROOT,
+ 143        exp.SchemaCommentProperty: exp.Properties.Location.POST_SCHEMA_ROOT,
+ 144        exp.SerdeProperties: exp.Properties.Location.POST_SCHEMA_ROOT,
+ 145        exp.SortKeyProperty: exp.Properties.Location.POST_SCHEMA_ROOT,
+ 146        exp.SqlSecurityProperty: exp.Properties.Location.POST_CREATE,
+ 147        exp.TableFormatProperty: exp.Properties.Location.POST_SCHEMA_WITH,
+ 148        exp.VolatilityProperty: exp.Properties.Location.POST_SCHEMA_ROOT,
+ 149        exp.WithJournalTableProperty: exp.Properties.Location.PRE_SCHEMA,
+ 150    }
+ 151
+ 152    WITH_SEPARATED_COMMENTS = (exp.Select, exp.From, exp.Where, exp.Binary)
+ 153    SENTINEL_LINE_BREAK = "__SQLGLOT__LB__"
+ 154
+ 155    __slots__ = (
+ 156        "time_mapping",
+ 157        "time_trie",
+ 158        "pretty",
+ 159        "quote_start",
+ 160        "quote_end",
+ 161        "identifier_start",
+ 162        "identifier_end",
+ 163        "identify",
+ 164        "normalize",
+ 165        "string_escape",
+ 166        "identifier_escape",
+ 167        "pad",
+ 168        "index_offset",
+ 169        "unnest_column_only",
+ 170        "alias_post_tablesample",
+ 171        "normalize_functions",
+ 172        "unsupported_level",
+ 173        "unsupported_messages",
+ 174        "null_ordering",
+ 175        "max_unsupported",
+ 176        "_indent",
+ 177        "_replace_backslash",
+ 178        "_escaped_quote_end",
+ 179        "_escaped_identifier_end",
+ 180        "_leading_comma",
+ 181        "_max_text_width",
+ 182        "_comments",
+ 183    )
+ 184
+ 185    def __init__(
+ 186        self,
+ 187        time_mapping=None,
+ 188        time_trie=None,
+ 189        pretty=None,
+ 190        quote_start=None,
+ 191        quote_end=None,
+ 192        identifier_start=None,
+ 193        identifier_end=None,
+ 194        identify=False,
+ 195        normalize=False,
+ 196        string_escape=None,
+ 197        identifier_escape=None,
+ 198        pad=2,
+ 199        indent=2,
+ 200        index_offset=0,
+ 201        unnest_column_only=False,
+ 202        alias_post_tablesample=False,
+ 203        normalize_functions="upper",
+ 204        unsupported_level=ErrorLevel.WARN,
+ 205        null_ordering=None,
+ 206        max_unsupported=3,
+ 207        leading_comma=False,
+ 208        max_text_width=80,
+ 209        comments=True,
+ 210    ):
+ 211        import sqlglot
+ 212
+ 213        self.time_mapping = time_mapping or {}
+ 214        self.time_trie = time_trie
+ 215        self.pretty = pretty if pretty is not None else sqlglot.pretty
+ 216        self.quote_start = quote_start or "'"
+ 217        self.quote_end = quote_end or "'"
+ 218        self.identifier_start = identifier_start or '"'
+ 219        self.identifier_end = identifier_end or '"'
+ 220        self.identify = identify
+ 221        self.normalize = normalize
+ 222        self.string_escape = string_escape or "'"
+ 223        self.identifier_escape = identifier_escape or '"'
+ 224        self.pad = pad
+ 225        self.index_offset = index_offset
+ 226        self.unnest_column_only = unnest_column_only
+ 227        self.alias_post_tablesample = alias_post_tablesample
+ 228        self.normalize_functions = normalize_functions
+ 229        self.unsupported_level = unsupported_level
+ 230        self.unsupported_messages = []
+ 231        self.max_unsupported = max_unsupported
+ 232        self.null_ordering = null_ordering
+ 233        self._indent = indent
+ 234        self._replace_backslash = self.string_escape == "\\"
+ 235        self._escaped_quote_end = self.string_escape + self.quote_end
+ 236        self._escaped_identifier_end = self.identifier_escape + self.identifier_end
+ 237        self._leading_comma = leading_comma
+ 238        self._max_text_width = max_text_width
+ 239        self._comments = comments
+ 240
+ 241    def generate(self, expression: t.Optional[exp.Expression]) -> str:
+ 242        """
+ 243        Generates a SQL string by interpreting the given syntax tree.
+ 244
+ 245        Args
+ 246            expression: the syntax tree.
+ 247
+ 248        Returns
+ 249            the SQL string.
+ 250        """
+ 251        self.unsupported_messages = []
+ 252        sql = self.sql(expression).strip()
+ 253
+ 254        if self.unsupported_level == ErrorLevel.IGNORE:
+ 255            return sql
+ 256
+ 257        if self.unsupported_level == ErrorLevel.WARN:
+ 258            for msg in self.unsupported_messages:
+ 259                logger.warning(msg)
+ 260        elif self.unsupported_level == ErrorLevel.RAISE and self.unsupported_messages:
+ 261            raise UnsupportedError(concat_messages(self.unsupported_messages, self.max_unsupported))
+ 262
+ 263        if self.pretty:
+ 264            sql = sql.replace(self.SENTINEL_LINE_BREAK, "\n")
+ 265        return sql
+ 266
+ 267    def unsupported(self, message: str) -> None:
+ 268        if self.unsupported_level == ErrorLevel.IMMEDIATE:
+ 269            raise UnsupportedError(message)
+ 270        self.unsupported_messages.append(message)
+ 271
+ 272    def sep(self, sep: str = " ") -> str:
+ 273        return f"{sep.strip()}\n" if self.pretty else sep
+ 274
+ 275    def seg(self, sql: str, sep: str = " ") -> str:
+ 276        return f"{self.sep(sep)}{sql}"
+ 277
+ 278    def pad_comment(self, comment: str) -> str:
+ 279        comment = " " + comment if comment[0].strip() else comment
+ 280        comment = comment + " " if comment[-1].strip() else comment
+ 281        return comment
+ 282
+ 283    def maybe_comment(self, sql: str, expression: exp.Expression) -> str:
+ 284        comments = expression.comments if self._comments else None
+ 285
+ 286        if not comments:
+ 287            return sql
+ 288
+ 289        sep = "\n" if self.pretty else " "
+ 290        comments_sql = sep.join(
+ 291            f"/*{self.pad_comment(comment)}*/" for comment in comments if comment
+ 292        )
+ 293
+ 294        if not comments_sql:
+ 295            return sql
+ 296
+ 297        if isinstance(expression, self.WITH_SEPARATED_COMMENTS):
+ 298            return f"{comments_sql}{self.sep()}{sql}"
+ 299
+ 300        return f"{sql} {comments_sql}"
+ 301
+ 302    def wrap(self, expression: exp.Expression | str) -> str:
+ 303        this_sql = self.indent(
+ 304            self.sql(expression)
+ 305            if isinstance(expression, (exp.Select, exp.Union))
+ 306            else self.sql(expression, "this"),
+ 307            level=1,
+ 308            pad=0,
+ 309        )
+ 310        return f"({self.sep('')}{this_sql}{self.seg(')', sep='')}"
+ 311
+ 312    def no_identify(self, func: t.Callable[..., str], *args, **kwargs) -> str:
+ 313        original = self.identify
+ 314        self.identify = False
+ 315        result = func(*args, **kwargs)
+ 316        self.identify = original
+ 317        return result
+ 318
+ 319    def normalize_func(self, name: str) -> str:
+ 320        if self.normalize_functions == "upper":
+ 321            return name.upper()
+ 322        if self.normalize_functions == "lower":
+ 323            return name.lower()
+ 324        return name
+ 325
+ 326    def indent(
+ 327        self,
+ 328        sql: str,
+ 329        level: int = 0,
+ 330        pad: t.Optional[int] = None,
+ 331        skip_first: bool = False,
+ 332        skip_last: bool = False,
+ 333    ) -> str:
+ 334        if not self.pretty:
+ 335            return sql
+ 336
+ 337        pad = self.pad if pad is None else pad
+ 338        lines = sql.split("\n")
+ 339
+ 340        return "\n".join(
+ 341            line
+ 342            if (skip_first and i == 0) or (skip_last and i == len(lines) - 1)
+ 343            else f"{' ' * (level * self._indent + pad)}{line}"
+ 344            for i, line in enumerate(lines)
+ 345        )
+ 346
+ 347    def sql(
+ 348        self,
+ 349        expression: t.Optional[str | exp.Expression],
+ 350        key: t.Optional[str] = None,
+ 351        comment: bool = True,
+ 352    ) -> str:
+ 353        if not expression:
+ 354            return ""
+ 355
+ 356        if isinstance(expression, str):
+ 357            return expression
+ 358
+ 359        if key:
+ 360            return self.sql(expression.args.get(key))
+ 361
+ 362        transform = self.TRANSFORMS.get(expression.__class__)
+ 363
+ 364        if callable(transform):
+ 365            sql = transform(self, expression)
+ 366        elif transform:
+ 367            sql = transform
+ 368        elif isinstance(expression, exp.Expression):
+ 369            exp_handler_name = f"{expression.key}_sql"
+ 370
+ 371            if hasattr(self, exp_handler_name):
+ 372                sql = getattr(self, exp_handler_name)(expression)
+ 373            elif isinstance(expression, exp.Func):
+ 374                sql = self.function_fallback_sql(expression)
+ 375            elif isinstance(expression, exp.Property):
+ 376                sql = self.property_sql(expression)
+ 377            else:
+ 378                raise ValueError(f"Unsupported expression type {expression.__class__.__name__}")
+ 379        else:
+ 380            raise ValueError(f"Expected an Expression. Received {type(expression)}: {expression}")
+ 381
+ 382        return self.maybe_comment(sql, expression) if self._comments and comment else sql
+ 383
+ 384    def uncache_sql(self, expression: exp.Uncache) -> str:
+ 385        table = self.sql(expression, "this")
+ 386        exists_sql = " IF EXISTS" if expression.args.get("exists") else ""
+ 387        return f"UNCACHE TABLE{exists_sql} {table}"
+ 388
+ 389    def cache_sql(self, expression: exp.Cache) -> str:
+ 390        lazy = " LAZY" if expression.args.get("lazy") else ""
+ 391        table = self.sql(expression, "this")
+ 392        options = expression.args.get("options")
+ 393        options = f" OPTIONS({self.sql(options[0])} = {self.sql(options[1])})" if options else ""
+ 394        sql = self.sql(expression, "expression")
+ 395        sql = f" AS{self.sep()}{sql}" if sql else ""
+ 396        sql = f"CACHE{lazy} TABLE {table}{options}{sql}"
+ 397        return self.prepend_ctes(expression, sql)
+ 398
+ 399    def characterset_sql(self, expression: exp.CharacterSet) -> str:
+ 400        if isinstance(expression.parent, exp.Cast):
+ 401            return f"CHAR CHARACTER SET {self.sql(expression, 'this')}"
+ 402        default = "DEFAULT " if expression.args.get("default") else ""
+ 403        return f"{default}CHARACTER SET={self.sql(expression, 'this')}"
+ 404
+ 405    def column_sql(self, expression: exp.Column) -> str:
+ 406        return ".".join(
+ 407            part
+ 408            for part in [
+ 409                self.sql(expression, "db"),
+ 410                self.sql(expression, "table"),
+ 411                self.sql(expression, "this"),
+ 412            ]
+ 413            if part
+ 414        )
+ 415
+ 416    def columndef_sql(self, expression: exp.ColumnDef) -> str:
+ 417        column = self.sql(expression, "this")
+ 418        kind = self.sql(expression, "kind")
+ 419        constraints = self.expressions(expression, key="constraints", sep=" ", flat=True)
+ 420        exists = "IF NOT EXISTS " if expression.args.get("exists") else ""
+ 421        kind = f" {kind}" if kind else ""
+ 422        constraints = f" {constraints}" if constraints else ""
+ 423
+ 424        return f"{exists}{column}{kind}{constraints}"
+ 425
+ 426    def columnconstraint_sql(self, expression: exp.ColumnConstraint) -> str:
+ 427        this = self.sql(expression, "this")
+ 428        kind_sql = self.sql(expression, "kind")
+ 429        return f"CONSTRAINT {this} {kind_sql}" if this else kind_sql
+ 430
+ 431    def autoincrementcolumnconstraint_sql(self, _) -> str:
+ 432        return self.token_sql(TokenType.AUTO_INCREMENT)
+ 433
+ 434    def checkcolumnconstraint_sql(self, expression: exp.CheckColumnConstraint) -> str:
+ 435        this = self.sql(expression, "this")
+ 436        return f"CHECK ({this})"
+ 437
+ 438    def commentcolumnconstraint_sql(self, expression: exp.CommentColumnConstraint) -> str:
+ 439        comment = self.sql(expression, "this")
+ 440        return f"COMMENT {comment}"
+ 441
+ 442    def collatecolumnconstraint_sql(self, expression: exp.CollateColumnConstraint) -> str:
+ 443        collate = self.sql(expression, "this")
+ 444        return f"COLLATE {collate}"
+ 445
+ 446    def encodecolumnconstraint_sql(self, expression: exp.EncodeColumnConstraint) -> str:
+ 447        encode = self.sql(expression, "this")
+ 448        return f"ENCODE {encode}"
+ 449
+ 450    def defaultcolumnconstraint_sql(self, expression: exp.DefaultColumnConstraint) -> str:
+ 451        default = self.sql(expression, "this")
+ 452        return f"DEFAULT {default}"
+ 453
+ 454    def generatedasidentitycolumnconstraint_sql(
+ 455        self, expression: exp.GeneratedAsIdentityColumnConstraint
+ 456    ) -> str:
+ 457        this = ""
+ 458        if expression.this is not None:
+ 459            this = " ALWAYS " if expression.this else " BY DEFAULT "
+ 460        start = expression.args.get("start")
+ 461        start = f"START WITH {start}" if start else ""
+ 462        increment = expression.args.get("increment")
+ 463        increment = f"INCREMENT BY {increment}" if increment else ""
+ 464        sequence_opts = ""
+ 465        if start or increment:
+ 466            sequence_opts = f"{start} {increment}"
+ 467            sequence_opts = f" ({sequence_opts.strip()})"
+ 468        return f"GENERATED{this}AS IDENTITY{sequence_opts}"
+ 469
+ 470    def notnullcolumnconstraint_sql(self, expression: exp.NotNullColumnConstraint) -> str:
+ 471        return f"{'' if expression.args.get('allow_null') else 'NOT '}NULL"
+ 472
+ 473    def primarykeycolumnconstraint_sql(self, expression: exp.PrimaryKeyColumnConstraint) -> str:
+ 474        desc = expression.args.get("desc")
+ 475        if desc is not None:
+ 476            return f"PRIMARY KEY{' DESC' if desc else ' ASC'}"
+ 477        return f"PRIMARY KEY"
+ 478
+ 479    def uniquecolumnconstraint_sql(self, _) -> str:
+ 480        return "UNIQUE"
+ 481
+ 482    def create_sql(self, expression: exp.Create) -> str:
+ 483        kind = self.sql(expression, "kind").upper()
+ 484        properties = expression.args.get("properties")
+ 485        properties_exp = expression.copy()
+ 486        properties_locs = self.locate_properties(properties) if properties else {}
+ 487        if properties_locs.get(exp.Properties.Location.POST_SCHEMA_ROOT) or properties_locs.get(
+ 488            exp.Properties.Location.POST_SCHEMA_WITH
+ 489        ):
+ 490            properties_exp.set(
+ 491                "properties",
+ 492                exp.Properties(
+ 493                    expressions=[
+ 494                        *properties_locs[exp.Properties.Location.POST_SCHEMA_ROOT],
+ 495                        *properties_locs[exp.Properties.Location.POST_SCHEMA_WITH],
+ 496                    ]
+ 497                ),
+ 498            )
+ 499        if kind == "TABLE" and properties_locs.get(exp.Properties.Location.PRE_SCHEMA):
+ 500            this_name = self.sql(expression.this, "this")
+ 501            this_properties = self.properties(
+ 502                exp.Properties(expressions=properties_locs[exp.Properties.Location.PRE_SCHEMA]),
+ 503                wrapped=False,
+ 504            )
+ 505            this_schema = f"({self.expressions(expression.this)})"
+ 506            this = f"{this_name}, {this_properties} {this_schema}"
+ 507            properties_sql = ""
+ 508        else:
+ 509            this = self.sql(expression, "this")
+ 510            properties_sql = self.sql(properties_exp, "properties")
+ 511        begin = " BEGIN" if expression.args.get("begin") else ""
+ 512        expression_sql = self.sql(expression, "expression")
+ 513        if expression_sql:
+ 514            expression_sql = f"{begin}{self.sep()}{expression_sql}"
+ 515
+ 516            if self.CREATE_FUNCTION_AS or kind != "FUNCTION":
+ 517                expression_sql = f" AS{expression_sql}"
+ 518
+ 519        temporary = " TEMPORARY" if expression.args.get("temporary") else ""
+ 520        transient = (
+ 521            " TRANSIENT" if self.CREATE_TRANSIENT and expression.args.get("transient") else ""
+ 522        )
+ 523        external = " EXTERNAL" if expression.args.get("external") else ""
+ 524        replace = " OR REPLACE" if expression.args.get("replace") else ""
+ 525        exists_sql = " IF NOT EXISTS" if expression.args.get("exists") else ""
+ 526        unique = " UNIQUE" if expression.args.get("unique") else ""
+ 527        materialized = " MATERIALIZED" if expression.args.get("materialized") else ""
+ 528        set_ = " SET" if expression.args.get("set") else ""
+ 529        multiset = " MULTISET" if expression.args.get("multiset") else ""
+ 530        global_temporary = " GLOBAL TEMPORARY" if expression.args.get("global_temporary") else ""
+ 531        volatile = " VOLATILE" if expression.args.get("volatile") else ""
+ 532        data = expression.args.get("data")
+ 533        if data is None:
+ 534            data = ""
+ 535        elif data:
+ 536            data = " WITH DATA"
+ 537        else:
+ 538            data = " WITH NO DATA"
+ 539        statistics = expression.args.get("statistics")
+ 540        if statistics is None:
+ 541            statistics = ""
+ 542        elif statistics:
+ 543            statistics = " AND STATISTICS"
+ 544        else:
+ 545            statistics = " AND NO STATISTICS"
+ 546        no_primary_index = " NO PRIMARY INDEX" if expression.args.get("no_primary_index") else ""
+ 547
+ 548        indexes = expression.args.get("indexes")
+ 549        index_sql = ""
+ 550        if indexes:
+ 551            indexes_sql = []
+ 552            for index in indexes:
+ 553                ind_unique = " UNIQUE" if index.args.get("unique") else ""
+ 554                ind_primary = " PRIMARY" if index.args.get("primary") else ""
+ 555                ind_amp = " AMP" if index.args.get("amp") else ""
+ 556                ind_name = f" {index.name}" if index.name else ""
+ 557                ind_columns = (
+ 558                    f' ({self.expressions(index, key="columns", flat=True)})'
+ 559                    if index.args.get("columns")
+ 560                    else ""
+ 561                )
+ 562                if index.args.get("primary") and properties_locs.get(
+ 563                    exp.Properties.Location.POST_INDEX
+ 564                ):
+ 565                    postindex_props_sql = self.properties(
+ 566                        exp.Properties(
+ 567                            expressions=properties_locs[exp.Properties.Location.POST_INDEX]
+ 568                        ),
+ 569                        wrapped=False,
+ 570                    )
+ 571                    ind_columns = f"{ind_columns} {postindex_props_sql}"
+ 572
+ 573                indexes_sql.append(
+ 574                    f"{ind_unique}{ind_primary}{ind_amp} INDEX{ind_name}{ind_columns}"
+ 575                )
+ 576            index_sql = "".join(indexes_sql)
+ 577
+ 578        postcreate_props_sql = ""
+ 579        if properties_locs.get(exp.Properties.Location.POST_CREATE):
+ 580            postcreate_props_sql = self.properties(
+ 581                exp.Properties(expressions=properties_locs[exp.Properties.Location.POST_CREATE]),
+ 582                sep=" ",
+ 583                prefix=" ",
+ 584                wrapped=False,
+ 585            )
+ 586
+ 587        modifiers = "".join(
+ 588            (
+ 589                replace,
+ 590                temporary,
+ 591                transient,
+ 592                external,
+ 593                unique,
+ 594                materialized,
+ 595                set_,
+ 596                multiset,
+ 597                global_temporary,
+ 598                volatile,
+ 599                postcreate_props_sql,
+ 600            )
+ 601        )
+ 602        no_schema_binding = (
+ 603            " WITH NO SCHEMA BINDING" if expression.args.get("no_schema_binding") else ""
+ 604        )
+ 605
+ 606        post_expression_modifiers = "".join((data, statistics, no_primary_index))
+ 607
+ 608        expression_sql = f"CREATE{modifiers} {kind}{exists_sql} {this}{properties_sql}{expression_sql}{post_expression_modifiers}{index_sql}{no_schema_binding}"
+ 609        return self.prepend_ctes(expression, expression_sql)
+ 610
+ 611    def describe_sql(self, expression: exp.Describe) -> str:
+ 612        return f"DESCRIBE {self.sql(expression, 'this')}"
+ 613
+ 614    def prepend_ctes(self, expression: exp.Expression, sql: str) -> str:
+ 615        with_ = self.sql(expression, "with")
+ 616        if with_:
+ 617            sql = f"{with_}{self.sep()}{sql}"
+ 618        return sql
+ 619
+ 620    def with_sql(self, expression: exp.With) -> str:
+ 621        sql = self.expressions(expression, flat=True)
+ 622        recursive = "RECURSIVE " if expression.args.get("recursive") else ""
+ 623
+ 624        return f"WITH {recursive}{sql}"
+ 625
+ 626    def cte_sql(self, expression: exp.CTE) -> str:
+ 627        alias = self.sql(expression, "alias")
+ 628        return f"{alias} AS {self.wrap(expression)}"
+ 629
+ 630    def tablealias_sql(self, expression: exp.TableAlias) -> str:
+ 631        alias = self.sql(expression, "this")
+ 632        columns = self.expressions(expression, key="columns", flat=True)
+ 633        columns = f"({columns})" if columns else ""
+ 634        return f"{alias}{columns}"
+ 635
+ 636    def bitstring_sql(self, expression: exp.BitString) -> str:
+ 637        return self.sql(expression, "this")
+ 638
+ 639    def hexstring_sql(self, expression: exp.HexString) -> str:
+ 640        return self.sql(expression, "this")
+ 641
+ 642    def datatype_sql(self, expression: exp.DataType) -> str:
+ 643        type_value = expression.this
+ 644        type_sql = self.TYPE_MAPPING.get(type_value, type_value.value)
+ 645        nested = ""
+ 646        interior = self.expressions(expression, flat=True)
+ 647        values = ""
+ 648        if interior:
+ 649            if expression.args.get("nested"):
+ 650                nested = f"{self.STRUCT_DELIMITER[0]}{interior}{self.STRUCT_DELIMITER[1]}"
+ 651                if expression.args.get("values") is not None:
+ 652                    delimiters = ("[", "]") if type_value == exp.DataType.Type.ARRAY else ("(", ")")
+ 653                    values = (
+ 654                        f"{delimiters[0]}{self.expressions(expression, 'values')}{delimiters[1]}"
+ 655                    )
+ 656            else:
+ 657                nested = f"({interior})"
+ 658
+ 659        return f"{type_sql}{nested}{values}"
+ 660
+ 661    def directory_sql(self, expression: exp.Directory) -> str:
+ 662        local = "LOCAL " if expression.args.get("local") else ""
+ 663        row_format = self.sql(expression, "row_format")
+ 664        row_format = f" {row_format}" if row_format else ""
+ 665        return f"{local}DIRECTORY {self.sql(expression, 'this')}{row_format}"
+ 666
+ 667    def delete_sql(self, expression: exp.Delete) -> str:
+ 668        this = self.sql(expression, "this")
+ 669        this = f" FROM {this}" if this else ""
+ 670        using_sql = (
+ 671            f" USING {self.expressions(expression, 'using', sep=', USING ')}"
+ 672            if expression.args.get("using")
+ 673            else ""
+ 674        )
+ 675        where_sql = self.sql(expression, "where")
+ 676        sql = f"DELETE{this}{using_sql}{where_sql}"
+ 677        return self.prepend_ctes(expression, sql)
+ 678
+ 679    def drop_sql(self, expression: exp.Drop) -> str:
+ 680        this = self.sql(expression, "this")
+ 681        kind = expression.args["kind"]
+ 682        exists_sql = " IF EXISTS " if expression.args.get("exists") else " "
+ 683        temporary = " TEMPORARY" if expression.args.get("temporary") else ""
+ 684        materialized = " MATERIALIZED" if expression.args.get("materialized") else ""
+ 685        cascade = " CASCADE" if expression.args.get("cascade") else ""
+ 686        return f"DROP{temporary}{materialized} {kind}{exists_sql}{this}{cascade}"
+ 687
+ 688    def except_sql(self, expression: exp.Except) -> str:
+ 689        return self.prepend_ctes(
+ 690            expression,
+ 691            self.set_operation(expression, self.except_op(expression)),
+ 692        )
+ 693
+ 694    def except_op(self, expression: exp.Except) -> str:
+ 695        return f"EXCEPT{'' if expression.args.get('distinct') else ' ALL'}"
+ 696
+ 697    def fetch_sql(self, expression: exp.Fetch) -> str:
+ 698        direction = expression.args.get("direction")
+ 699        direction = f" {direction.upper()}" if direction else ""
+ 700        count = expression.args.get("count")
+ 701        count = f" {count}" if count else ""
+ 702        return f"{self.seg('FETCH')}{direction}{count} ROWS ONLY"
+ 703
+ 704    def filter_sql(self, expression: exp.Filter) -> str:
+ 705        this = self.sql(expression, "this")
+ 706        where = self.sql(expression, "expression")[1:]  # where has a leading space
+ 707        return f"{this} FILTER({where})"
+ 708
+ 709    def hint_sql(self, expression: exp.Hint) -> str:
+ 710        if self.sql(expression, "this"):
+ 711            self.unsupported("Hints are not supported")
+ 712        return ""
+ 713
+ 714    def index_sql(self, expression: exp.Index) -> str:
+ 715        this = self.sql(expression, "this")
+ 716        table = self.sql(expression, "table")
+ 717        columns = self.sql(expression, "columns")
+ 718        return f"{this} ON {table} {columns}"
+ 719
+ 720    def identifier_sql(self, expression: exp.Identifier) -> str:
+ 721        text = expression.name
+ 722        text = text.lower() if self.normalize else text
+ 723        text = text.replace(self.identifier_end, self._escaped_identifier_end)
+ 724        if expression.args.get("quoted") or self.identify:
+ 725            text = f"{self.identifier_start}{text}{self.identifier_end}"
+ 726        return text
+ 727
+ 728    def national_sql(self, expression: exp.National) -> str:
+ 729        return f"N{self.sql(expression, 'this')}"
+ 730
+ 731    def partition_sql(self, expression: exp.Partition) -> str:
+ 732        return f"PARTITION({self.expressions(expression)})"
+ 733
+ 734    def properties_sql(self, expression: exp.Properties) -> str:
+ 735        root_properties = []
+ 736        with_properties = []
+ 737
+ 738        for p in expression.expressions:
+ 739            p_loc = self.PROPERTIES_LOCATION[p.__class__]
+ 740            if p_loc == exp.Properties.Location.POST_SCHEMA_WITH:
+ 741                with_properties.append(p)
+ 742            elif p_loc == exp.Properties.Location.POST_SCHEMA_ROOT:
+ 743                root_properties.append(p)
+ 744
+ 745        return self.root_properties(
+ 746            exp.Properties(expressions=root_properties)
+ 747        ) + self.with_properties(exp.Properties(expressions=with_properties))
+ 748
+ 749    def root_properties(self, properties: exp.Properties) -> str:
+ 750        if properties.expressions:
+ 751            return self.sep() + self.expressions(properties, indent=False, sep=" ")
+ 752        return ""
+ 753
+ 754    def properties(
+ 755        self,
+ 756        properties: exp.Properties,
+ 757        prefix: str = "",
+ 758        sep: str = ", ",
+ 759        suffix: str = "",
+ 760        wrapped: bool = True,
+ 761    ) -> str:
+ 762        if properties.expressions:
+ 763            expressions = self.expressions(properties, sep=sep, indent=False)
+ 764            expressions = self.wrap(expressions) if wrapped else expressions
+ 765            return f"{prefix}{' ' if prefix and prefix != ' ' else ''}{expressions}{suffix}"
+ 766        return ""
+ 767
+ 768    def with_properties(self, properties: exp.Properties) -> str:
+ 769        return self.properties(properties, prefix=self.seg("WITH"))
+ 770
+ 771    def locate_properties(
+ 772        self, properties: exp.Properties
+ 773    ) -> t.Dict[exp.Properties.Location, list[exp.Property]]:
+ 774        properties_locs: t.Dict[exp.Properties.Location, list[exp.Property]] = {
+ 775            key: [] for key in exp.Properties.Location
+ 776        }
+ 777
+ 778        for p in properties.expressions:
+ 779            p_loc = self.PROPERTIES_LOCATION[p.__class__]
+ 780            if p_loc == exp.Properties.Location.PRE_SCHEMA:
+ 781                properties_locs[exp.Properties.Location.PRE_SCHEMA].append(p)
+ 782            elif p_loc == exp.Properties.Location.POST_INDEX:
+ 783                properties_locs[exp.Properties.Location.POST_INDEX].append(p)
+ 784            elif p_loc == exp.Properties.Location.POST_SCHEMA_ROOT:
+ 785                properties_locs[exp.Properties.Location.POST_SCHEMA_ROOT].append(p)
+ 786            elif p_loc == exp.Properties.Location.POST_SCHEMA_WITH:
+ 787                properties_locs[exp.Properties.Location.POST_SCHEMA_WITH].append(p)
+ 788            elif p_loc == exp.Properties.Location.POST_CREATE:
+ 789                properties_locs[exp.Properties.Location.POST_CREATE].append(p)
+ 790            elif p_loc == exp.Properties.Location.UNSUPPORTED:
+ 791                self.unsupported(f"Unsupported property {p.key}")
+ 792
+ 793        return properties_locs
+ 794
+ 795    def property_sql(self, expression: exp.Property) -> str:
+ 796        property_cls = expression.__class__
+ 797        if property_cls == exp.Property:
+ 798            return f"{expression.name}={self.sql(expression, 'value')}"
+ 799
+ 800        property_name = exp.Properties.PROPERTY_TO_NAME.get(property_cls)
+ 801        if not property_name:
+ 802            self.unsupported(f"Unsupported property {expression.key}")
+ 803
+ 804        return f"{property_name}={self.sql(expression, 'this')}"
+ 805
+ 806    def likeproperty_sql(self, expression: exp.LikeProperty) -> str:
+ 807        options = " ".join(f"{e.name} {self.sql(e, 'value')}" for e in expression.expressions)
+ 808        options = f" {options}" if options else ""
+ 809        return f"LIKE {self.sql(expression, 'this')}{options}"
+ 810
+ 811    def fallbackproperty_sql(self, expression: exp.FallbackProperty) -> str:
+ 812        no = "NO " if expression.args.get("no") else ""
+ 813        protection = " PROTECTION" if expression.args.get("protection") else ""
+ 814        return f"{no}FALLBACK{protection}"
+ 815
+ 816    def journalproperty_sql(self, expression: exp.JournalProperty) -> str:
+ 817        no = "NO " if expression.args.get("no") else ""
+ 818        dual = "DUAL " if expression.args.get("dual") else ""
+ 819        before = "BEFORE " if expression.args.get("before") else ""
+ 820        return f"{no}{dual}{before}JOURNAL"
+ 821
+ 822    def freespaceproperty_sql(self, expression: exp.FreespaceProperty) -> str:
+ 823        freespace = self.sql(expression, "this")
+ 824        percent = " PERCENT" if expression.args.get("percent") else ""
+ 825        return f"FREESPACE={freespace}{percent}"
+ 826
+ 827    def afterjournalproperty_sql(self, expression: exp.AfterJournalProperty) -> str:
+ 828        no = "NO " if expression.args.get("no") else ""
+ 829        dual = "DUAL " if expression.args.get("dual") else ""
+ 830        local = ""
+ 831        if expression.args.get("local") is not None:
+ 832            local = "LOCAL " if expression.args.get("local") else "NOT LOCAL "
+ 833        return f"{no}{dual}{local}AFTER JOURNAL"
+ 834
+ 835    def checksumproperty_sql(self, expression: exp.ChecksumProperty) -> str:
+ 836        if expression.args.get("default"):
+ 837            property = "DEFAULT"
+ 838        elif expression.args.get("on"):
+ 839            property = "ON"
+ 840        else:
+ 841            property = "OFF"
+ 842        return f"CHECKSUM={property}"
+ 843
+ 844    def mergeblockratioproperty_sql(self, expression: exp.MergeBlockRatioProperty) -> str:
+ 845        if expression.args.get("no"):
+ 846            return "NO MERGEBLOCKRATIO"
+ 847        if expression.args.get("default"):
+ 848            return "DEFAULT MERGEBLOCKRATIO"
+ 849
+ 850        percent = " PERCENT" if expression.args.get("percent") else ""
+ 851        return f"MERGEBLOCKRATIO={self.sql(expression, 'this')}{percent}"
+ 852
+ 853    def datablocksizeproperty_sql(self, expression: exp.DataBlocksizeProperty) -> str:
+ 854        default = expression.args.get("default")
+ 855        min = expression.args.get("min")
+ 856        if default is not None or min is not None:
+ 857            if default:
+ 858                property = "DEFAULT"
+ 859            elif min:
+ 860                property = "MINIMUM"
+ 861            else:
+ 862                property = "MAXIMUM"
+ 863            return f"{property} DATABLOCKSIZE"
+ 864        else:
+ 865            units = expression.args.get("units")
+ 866            units = f" {units}" if units else ""
+ 867            return f"DATABLOCKSIZE={self.sql(expression, 'size')}{units}"
+ 868
+ 869    def blockcompressionproperty_sql(self, expression: exp.BlockCompressionProperty) -> str:
+ 870        autotemp = expression.args.get("autotemp")
+ 871        always = expression.args.get("always")
+ 872        default = expression.args.get("default")
+ 873        manual = expression.args.get("manual")
+ 874        never = expression.args.get("never")
+ 875
+ 876        if autotemp is not None:
+ 877            property = f"AUTOTEMP({self.expressions(autotemp)})"
+ 878        elif always:
+ 879            property = "ALWAYS"
+ 880        elif default:
+ 881            property = "DEFAULT"
+ 882        elif manual:
+ 883            property = "MANUAL"
+ 884        elif never:
+ 885            property = "NEVER"
+ 886        return f"BLOCKCOMPRESSION={property}"
+ 887
+ 888    def isolatedloadingproperty_sql(self, expression: exp.IsolatedLoadingProperty) -> str:
+ 889        no = expression.args.get("no")
+ 890        no = " NO" if no else ""
+ 891        concurrent = expression.args.get("concurrent")
+ 892        concurrent = " CONCURRENT" if concurrent else ""
+ 893
+ 894        for_ = ""
+ 895        if expression.args.get("for_all"):
+ 896            for_ = " FOR ALL"
+ 897        elif expression.args.get("for_insert"):
+ 898            for_ = " FOR INSERT"
+ 899        elif expression.args.get("for_none"):
+ 900            for_ = " FOR NONE"
+ 901        return f"WITH{no}{concurrent} ISOLATED LOADING{for_}"
+ 902
+ 903    def insert_sql(self, expression: exp.Insert) -> str:
+ 904        overwrite = expression.args.get("overwrite")
+ 905
+ 906        if isinstance(expression.this, exp.Directory):
+ 907            this = "OVERWRITE " if overwrite else "INTO "
+ 908        else:
+ 909            this = "OVERWRITE TABLE " if overwrite else "INTO "
+ 910
+ 911        this = f"{this}{self.sql(expression, 'this')}"
+ 912        exists = " IF EXISTS " if expression.args.get("exists") else " "
+ 913        partition_sql = (
+ 914            self.sql(expression, "partition") if expression.args.get("partition") else ""
+ 915        )
+ 916        expression_sql = self.sql(expression, "expression")
+ 917        sep = self.sep() if partition_sql else ""
+ 918        sql = f"INSERT {this}{exists}{partition_sql}{sep}{expression_sql}"
+ 919        return self.prepend_ctes(expression, sql)
+ 920
+ 921    def intersect_sql(self, expression: exp.Intersect) -> str:
+ 922        return self.prepend_ctes(
+ 923            expression,
+ 924            self.set_operation(expression, self.intersect_op(expression)),
+ 925        )
+ 926
+ 927    def intersect_op(self, expression: exp.Intersect) -> str:
+ 928        return f"INTERSECT{'' if expression.args.get('distinct') else ' ALL'}"
+ 929
+ 930    def introducer_sql(self, expression: exp.Introducer) -> str:
+ 931        return f"{self.sql(expression, 'this')} {self.sql(expression, 'expression')}"
+ 932
+ 933    def pseudotype_sql(self, expression: exp.PseudoType) -> str:
+ 934        return expression.name.upper()
+ 935
+ 936    def rowformatdelimitedproperty_sql(self, expression: exp.RowFormatDelimitedProperty) -> str:
+ 937        fields = expression.args.get("fields")
+ 938        fields = f" FIELDS TERMINATED BY {fields}" if fields else ""
+ 939        escaped = expression.args.get("escaped")
+ 940        escaped = f" ESCAPED BY {escaped}" if escaped else ""
+ 941        items = expression.args.get("collection_items")
+ 942        items = f" COLLECTION ITEMS TERMINATED BY {items}" if items else ""
+ 943        keys = expression.args.get("map_keys")
+ 944        keys = f" MAP KEYS TERMINATED BY {keys}" if keys else ""
+ 945        lines = expression.args.get("lines")
+ 946        lines = f" LINES TERMINATED BY {lines}" if lines else ""
+ 947        null = expression.args.get("null")
+ 948        null = f" NULL DEFINED AS {null}" if null else ""
+ 949        return f"ROW FORMAT DELIMITED{fields}{escaped}{items}{keys}{lines}{null}"
+ 950
+ 951    def table_sql(self, expression: exp.Table, sep: str = " AS ") -> str:
+ 952        table = ".".join(
+ 953            part
+ 954            for part in [
+ 955                self.sql(expression, "catalog"),
+ 956                self.sql(expression, "db"),
+ 957                self.sql(expression, "this"),
+ 958            ]
+ 959            if part
+ 960        )
+ 961
+ 962        alias = self.sql(expression, "alias")
+ 963        alias = f"{sep}{alias}" if alias else ""
+ 964        hints = self.expressions(expression, key="hints", sep=", ", flat=True)
+ 965        hints = f" WITH ({hints})" if hints else ""
+ 966        laterals = self.expressions(expression, key="laterals", sep="")
+ 967        joins = self.expressions(expression, key="joins", sep="")
+ 968        pivots = self.expressions(expression, key="pivots", sep="")
+ 969        system_time = expression.args.get("system_time")
+ 970        system_time = f" {self.sql(expression, 'system_time')}" if system_time else ""
+ 971
+ 972        if alias and pivots:
+ 973            pivots = f"{pivots}{alias}"
+ 974            alias = ""
+ 975
+ 976        return f"{table}{system_time}{alias}{hints}{laterals}{joins}{pivots}"
+ 977
+ 978    def tablesample_sql(self, expression: exp.TableSample) -> str:
+ 979        if self.alias_post_tablesample and expression.this.alias:
+ 980            this = self.sql(expression.this, "this")
+ 981            alias = f" AS {self.sql(expression.this, 'alias')}"
+ 982        else:
+ 983            this = self.sql(expression, "this")
+ 984            alias = ""
+ 985        method = self.sql(expression, "method")
+ 986        method = f" {method.upper()} " if method else ""
+ 987        numerator = self.sql(expression, "bucket_numerator")
+ 988        denominator = self.sql(expression, "bucket_denominator")
+ 989        field = self.sql(expression, "bucket_field")
+ 990        field = f" ON {field}" if field else ""
+ 991        bucket = f"BUCKET {numerator} OUT OF {denominator}{field}" if numerator else ""
+ 992        percent = self.sql(expression, "percent")
+ 993        percent = f"{percent} PERCENT" if percent else ""
+ 994        rows = self.sql(expression, "rows")
+ 995        rows = f"{rows} ROWS" if rows else ""
+ 996        size = self.sql(expression, "size")
+ 997        seed = self.sql(expression, "seed")
+ 998        seed = f" SEED ({seed})" if seed else ""
+ 999        return f"{this} TABLESAMPLE{method}({bucket}{percent}{rows}{size}){seed}{alias}"
+1000
+1001    def pivot_sql(self, expression: exp.Pivot) -> str:
+1002        this = self.sql(expression, "this")
+1003        unpivot = expression.args.get("unpivot")
+1004        direction = "UNPIVOT" if unpivot else "PIVOT"
+1005        expressions = self.expressions(expression, key="expressions")
+1006        field = self.sql(expression, "field")
+1007        return f"{this} {direction}({expressions} FOR {field})"
+1008
+1009    def tuple_sql(self, expression: exp.Tuple) -> str:
+1010        return f"({self.expressions(expression, flat=True)})"
+1011
+1012    def update_sql(self, expression: exp.Update) -> str:
+1013        this = self.sql(expression, "this")
+1014        set_sql = self.expressions(expression, flat=True)
+1015        from_sql = self.sql(expression, "from")
+1016        where_sql = self.sql(expression, "where")
+1017        sql = f"UPDATE {this} SET {set_sql}{from_sql}{where_sql}"
+1018        return self.prepend_ctes(expression, sql)
+1019
+1020    def values_sql(self, expression: exp.Values) -> str:
+1021        args = self.expressions(expression)
+1022        alias = self.sql(expression, "alias")
+1023        values = f"VALUES{self.seg('')}{args}"
+1024        values = (
+1025            f"({values})"
+1026            if self.WRAP_DERIVED_VALUES and (alias or isinstance(expression.parent, exp.From))
+1027            else values
+1028        )
+1029        return f"{values} AS {alias}" if alias else values
+1030
+1031    def var_sql(self, expression: exp.Var) -> str:
+1032        return self.sql(expression, "this")
+1033
+1034    def into_sql(self, expression: exp.Into) -> str:
+1035        temporary = " TEMPORARY" if expression.args.get("temporary") else ""
+1036        unlogged = " UNLOGGED" if expression.args.get("unlogged") else ""
+1037        return f"{self.seg('INTO')}{temporary or unlogged} {self.sql(expression, 'this')}"
+1038
+1039    def from_sql(self, expression: exp.From) -> str:
+1040        expressions = self.expressions(expression, flat=True)
+1041        return f"{self.seg('FROM')} {expressions}"
+1042
+1043    def group_sql(self, expression: exp.Group) -> str:
+1044        group_by = self.op_expressions("GROUP BY", expression)
+1045        grouping_sets = self.expressions(expression, key="grouping_sets", indent=False)
+1046        grouping_sets = (
+1047            f"{self.seg('GROUPING SETS')} {self.wrap(grouping_sets)}" if grouping_sets else ""
+1048        )
+1049
+1050        cube = expression.args.get("cube")
+1051        if cube is True:
+1052            cube = self.seg("WITH CUBE")
+1053        else:
+1054            cube = self.expressions(expression, key="cube", indent=False)
+1055            cube = f"{self.seg('CUBE')} {self.wrap(cube)}" if cube else ""
+1056
+1057        rollup = expression.args.get("rollup")
+1058        if rollup is True:
+1059            rollup = self.seg("WITH ROLLUP")
+1060        else:
+1061            rollup = self.expressions(expression, key="rollup", indent=False)
+1062            rollup = f"{self.seg('ROLLUP')} {self.wrap(rollup)}" if rollup else ""
+1063
+1064        return f"{group_by}{csv(grouping_sets, cube, rollup, sep=',')}"
+1065
+1066    def having_sql(self, expression: exp.Having) -> str:
+1067        this = self.indent(self.sql(expression, "this"))
+1068        return f"{self.seg('HAVING')}{self.sep()}{this}"
+1069
+1070    def join_sql(self, expression: exp.Join) -> str:
+1071        op_sql = self.seg(
+1072            " ".join(
+1073                op
+1074                for op in (
+1075                    "NATURAL" if expression.args.get("natural") else None,
+1076                    expression.side,
+1077                    expression.kind,
+1078                    "JOIN",
+1079                )
+1080                if op
+1081            )
+1082        )
+1083        on_sql = self.sql(expression, "on")
+1084        using = expression.args.get("using")
+1085
+1086        if not on_sql and using:
+1087            on_sql = csv(*(self.sql(column) for column in using))
+1088
+1089        if on_sql:
+1090            on_sql = self.indent(on_sql, skip_first=True)
+1091            space = self.seg(" " * self.pad) if self.pretty else " "
+1092            if using:
+1093                on_sql = f"{space}USING ({on_sql})"
+1094            else:
+1095                on_sql = f"{space}ON {on_sql}"
+1096
+1097        expression_sql = self.sql(expression, "expression")
+1098        this_sql = self.sql(expression, "this")
+1099        return f"{expression_sql}{op_sql} {this_sql}{on_sql}"
+1100
+1101    def lambda_sql(self, expression: exp.Lambda, arrow_sep: str = "->") -> str:
+1102        args = self.expressions(expression, flat=True)
+1103        args = f"({args})" if len(args.split(",")) > 1 else args
+1104        return f"{args} {arrow_sep} {self.sql(expression, 'this')}"
+1105
+1106    def lateral_sql(self, expression: exp.Lateral) -> str:
+1107        this = self.sql(expression, "this")
+1108
+1109        if isinstance(expression.this, exp.Subquery):
+1110            return f"LATERAL {this}"
+1111
+1112        if expression.args.get("view"):
+1113            alias = expression.args["alias"]
+1114            columns = self.expressions(alias, key="columns", flat=True)
+1115            table = f" {alias.name}" if alias.name else ""
+1116            columns = f" AS {columns}" if columns else ""
+1117            op_sql = self.seg(f"LATERAL VIEW{' OUTER' if expression.args.get('outer') else ''}")
+1118            return f"{op_sql}{self.sep()}{this}{table}{columns}"
+1119
+1120        alias = self.sql(expression, "alias")
+1121        alias = f" AS {alias}" if alias else ""
+1122        return f"LATERAL {this}{alias}"
+1123
+1124    def limit_sql(self, expression: exp.Limit) -> str:
+1125        this = self.sql(expression, "this")
+1126        return f"{this}{self.seg('LIMIT')} {self.sql(expression, 'expression')}"
+1127
+1128    def offset_sql(self, expression: exp.Offset) -> str:
+1129        this = self.sql(expression, "this")
+1130        return f"{this}{self.seg('OFFSET')} {self.sql(expression, 'expression')}"
+1131
+1132    def lock_sql(self, expression: exp.Lock) -> str:
+1133        if self.LOCKING_READS_SUPPORTED:
+1134            lock_type = "UPDATE" if expression.args["update"] else "SHARE"
+1135            return self.seg(f"FOR {lock_type}")
+1136
+1137        self.unsupported("Locking reads using 'FOR UPDATE/SHARE' are not supported")
+1138        return ""
+1139
+1140    def literal_sql(self, expression: exp.Literal) -> str:
+1141        text = expression.this or ""
+1142        if expression.is_string:
+1143            if self._replace_backslash:
+1144                text = BACKSLASH_RE.sub(r"\\\\", text)
+1145            text = text.replace(self.quote_end, self._escaped_quote_end)
+1146            if self.pretty:
+1147                text = text.replace("\n", self.SENTINEL_LINE_BREAK)
+1148            text = f"{self.quote_start}{text}{self.quote_end}"
+1149        return text
+1150
+1151    def loaddata_sql(self, expression: exp.LoadData) -> str:
+1152        local = " LOCAL" if expression.args.get("local") else ""
+1153        inpath = f" INPATH {self.sql(expression, 'inpath')}"
+1154        overwrite = " OVERWRITE" if expression.args.get("overwrite") else ""
+1155        this = f" INTO TABLE {self.sql(expression, 'this')}"
+1156        partition = self.sql(expression, "partition")
+1157        partition = f" {partition}" if partition else ""
+1158        input_format = self.sql(expression, "input_format")
+1159        input_format = f" INPUTFORMAT {input_format}" if input_format else ""
+1160        serde = self.sql(expression, "serde")
+1161        serde = f" SERDE {serde}" if serde else ""
+1162        return f"LOAD DATA{local}{inpath}{overwrite}{this}{partition}{input_format}{serde}"
+1163
+1164    def null_sql(self, *_) -> str:
+1165        return "NULL"
+1166
+1167    def boolean_sql(self, expression: exp.Boolean) -> str:
+1168        return "TRUE" if expression.this else "FALSE"
+1169
+1170    def order_sql(self, expression: exp.Order, flat: bool = False) -> str:
+1171        this = self.sql(expression, "this")
+1172        this = f"{this} " if this else this
+1173        return self.op_expressions(f"{this}ORDER BY", expression, flat=this or flat)  # type: ignore
+1174
+1175    def cluster_sql(self, expression: exp.Cluster) -> str:
+1176        return self.op_expressions("CLUSTER BY", expression)
+1177
+1178    def distribute_sql(self, expression: exp.Distribute) -> str:
+1179        return self.op_expressions("DISTRIBUTE BY", expression)
+1180
+1181    def sort_sql(self, expression: exp.Sort) -> str:
+1182        return self.op_expressions("SORT BY", expression)
+1183
+1184    def ordered_sql(self, expression: exp.Ordered) -> str:
+1185        desc = expression.args.get("desc")
+1186        asc = not desc
+1187
+1188        nulls_first = expression.args.get("nulls_first")
+1189        nulls_last = not nulls_first
+1190        nulls_are_large = self.null_ordering == "nulls_are_large"
+1191        nulls_are_small = self.null_ordering == "nulls_are_small"
+1192        nulls_are_last = self.null_ordering == "nulls_are_last"
+1193
+1194        sort_order = " DESC" if desc else ""
+1195        nulls_sort_change = ""
+1196        if nulls_first and (
+1197            (asc and nulls_are_large) or (desc and nulls_are_small) or nulls_are_last
+1198        ):
+1199            nulls_sort_change = " NULLS FIRST"
+1200        elif (
+1201            nulls_last
+1202            and ((asc and nulls_are_small) or (desc and nulls_are_large))
+1203            and not nulls_are_last
+1204        ):
+1205            nulls_sort_change = " NULLS LAST"
+1206
+1207        if nulls_sort_change and not self.NULL_ORDERING_SUPPORTED:
+1208            self.unsupported(
+1209                "Sorting in an ORDER BY on NULLS FIRST/NULLS LAST is not supported by this dialect"
+1210            )
+1211            nulls_sort_change = ""
+1212
+1213        return f"{self.sql(expression, 'this')}{sort_order}{nulls_sort_change}"
+1214
+1215    def matchrecognize_sql(self, expression: exp.MatchRecognize) -> str:
+1216        partition = self.partition_by_sql(expression)
+1217        order = self.sql(expression, "order")
+1218        measures = self.sql(expression, "measures")
+1219        measures = self.seg(f"MEASURES {measures}") if measures else ""
+1220        rows = self.sql(expression, "rows")
+1221        rows = self.seg(rows) if rows else ""
+1222        after = self.sql(expression, "after")
+1223        after = self.seg(after) if after else ""
+1224        pattern = self.sql(expression, "pattern")
+1225        pattern = self.seg(f"PATTERN ({pattern})") if pattern else ""
+1226        define = self.sql(expression, "define")
+1227        define = self.seg(f"DEFINE {define}") if define else ""
+1228        body = "".join(
+1229            (
+1230                partition,
+1231                order,
+1232                measures,
+1233                rows,
+1234                after,
+1235                pattern,
+1236                define,
+1237            )
+1238        )
+1239        return f"{self.seg('MATCH_RECOGNIZE')} {self.wrap(body)}"
+1240
+1241    def query_modifiers(self, expression: exp.Expression, *sqls: str) -> str:
+1242        return csv(
+1243            *sqls,
+1244            *[self.sql(sql) for sql in expression.args.get("joins") or []],
+1245            self.sql(expression, "match"),
+1246            *[self.sql(sql) for sql in expression.args.get("laterals") or []],
+1247            self.sql(expression, "where"),
+1248            self.sql(expression, "group"),
+1249            self.sql(expression, "having"),
+1250            self.sql(expression, "qualify"),
+1251            self.seg("WINDOW ") + self.expressions(expression, "windows", flat=True)
+1252            if expression.args.get("windows")
+1253            else "",
+1254            self.sql(expression, "distribute"),
+1255            self.sql(expression, "sort"),
+1256            self.sql(expression, "cluster"),
+1257            self.sql(expression, "order"),
+1258            self.sql(expression, "limit"),
+1259            self.sql(expression, "offset"),
+1260            self.sql(expression, "lock"),
+1261            sep="",
+1262        )
+1263
+1264    def select_sql(self, expression: exp.Select) -> str:
+1265        hint = self.sql(expression, "hint")
+1266        distinct = self.sql(expression, "distinct")
+1267        distinct = f" {distinct}" if distinct else ""
+1268        expressions = self.expressions(expression)
+1269        expressions = f"{self.sep()}{expressions}" if expressions else expressions
+1270        sql = self.query_modifiers(
+1271            expression,
+1272            f"SELECT{hint}{distinct}{expressions}",
+1273            self.sql(expression, "into", comment=False),
+1274            self.sql(expression, "from", comment=False),
+1275        )
+1276        return self.prepend_ctes(expression, sql)
+1277
+1278    def schema_sql(self, expression: exp.Schema) -> str:
+1279        this = self.sql(expression, "this")
+1280        this = f"{this} " if this else ""
+1281        sql = f"({self.sep('')}{self.expressions(expression)}{self.seg(')', sep='')}"
+1282        return f"{this}{sql}"
+1283
+1284    def star_sql(self, expression: exp.Star) -> str:
+1285        except_ = self.expressions(expression, key="except", flat=True)
+1286        except_ = f"{self.seg(self.STAR_MAPPING['except'])} ({except_})" if except_ else ""
+1287        replace = self.expressions(expression, key="replace", flat=True)
+1288        replace = f"{self.seg(self.STAR_MAPPING['replace'])} ({replace})" if replace else ""
+1289        return f"*{except_}{replace}"
+1290
+1291    def structkwarg_sql(self, expression: exp.StructKwarg) -> str:
+1292        return f"{self.sql(expression, 'this')} {self.sql(expression, 'expression')}"
+1293
+1294    def parameter_sql(self, expression: exp.Parameter) -> str:
+1295        return f"@{self.sql(expression, 'this')}"
+1296
+1297    def sessionparameter_sql(self, expression: exp.SessionParameter) -> str:
+1298        this = self.sql(expression, "this")
+1299        kind = expression.text("kind")
+1300        if kind:
+1301            kind = f"{kind}."
+1302        return f"@@{kind}{this}"
+1303
+1304    def placeholder_sql(self, expression: exp.Placeholder) -> str:
+1305        return f":{expression.name}" if expression.name else "?"
+1306
+1307    def subquery_sql(self, expression: exp.Subquery) -> str:
+1308        alias = self.sql(expression, "alias")
+1309
+1310        sql = self.query_modifiers(
+1311            expression,
+1312            self.wrap(expression),
+1313            self.expressions(expression, key="pivots", sep=" "),
+1314            f" AS {alias}" if alias else "",
+1315        )
+1316
+1317        return self.prepend_ctes(expression, sql)
+1318
+1319    def qualify_sql(self, expression: exp.Qualify) -> str:
+1320        this = self.indent(self.sql(expression, "this"))
+1321        return f"{self.seg('QUALIFY')}{self.sep()}{this}"
+1322
+1323    def union_sql(self, expression: exp.Union) -> str:
+1324        return self.prepend_ctes(
+1325            expression,
+1326            self.set_operation(expression, self.union_op(expression)),
+1327        )
+1328
+1329    def union_op(self, expression: exp.Union) -> str:
+1330        kind = " DISTINCT" if self.EXPLICIT_UNION else ""
+1331        kind = kind if expression.args.get("distinct") else " ALL"
+1332        return f"UNION{kind}"
+1333
+1334    def unnest_sql(self, expression: exp.Unnest) -> str:
+1335        args = self.expressions(expression, flat=True)
+1336        alias = expression.args.get("alias")
+1337        if alias and self.unnest_column_only:
+1338            columns = alias.columns
+1339            alias = self.sql(columns[0]) if columns else ""
+1340        else:
+1341            alias = self.sql(expression, "alias")
+1342        alias = f" AS {alias}" if alias else alias
+1343        ordinality = " WITH ORDINALITY" if expression.args.get("ordinality") else ""
+1344        offset = expression.args.get("offset")
+1345        offset = f" WITH OFFSET AS {self.sql(offset)}" if offset else ""
+1346        return f"UNNEST({args}){ordinality}{alias}{offset}"
+1347
+1348    def where_sql(self, expression: exp.Where) -> str:
+1349        this = self.indent(self.sql(expression, "this"))
+1350        return f"{self.seg('WHERE')}{self.sep()}{this}"
+1351
+1352    def window_sql(self, expression: exp.Window) -> str:
+1353        this = self.sql(expression, "this")
+1354
+1355        partition = self.partition_by_sql(expression)
+1356
+1357        order = expression.args.get("order")
+1358        order_sql = self.order_sql(order, flat=True) if order else ""
+1359
+1360        partition_sql = partition + " " if partition and order else partition
+1361
+1362        spec = expression.args.get("spec")
+1363        spec_sql = " " + self.window_spec_sql(spec) if spec else ""
+1364
+1365        alias = self.sql(expression, "alias")
+1366        this = f"{this} {'AS' if expression.arg_key == 'windows' else 'OVER'}"
+1367
+1368        if not partition and not order and not spec and alias:
+1369            return f"{this} {alias}"
+1370
+1371        window_args = alias + partition_sql + order_sql + spec_sql
+1372
+1373        return f"{this} ({window_args.strip()})"
+1374
+1375    def partition_by_sql(self, expression: exp.Window | exp.MatchRecognize) -> str:
+1376        partition = self.expressions(expression, key="partition_by", flat=True)
+1377        return f"PARTITION BY {partition}" if partition else ""
+1378
+1379    def window_spec_sql(self, expression: exp.WindowSpec) -> str:
+1380        kind = self.sql(expression, "kind")
+1381        start = csv(self.sql(expression, "start"), self.sql(expression, "start_side"), sep=" ")
+1382        end = (
+1383            csv(self.sql(expression, "end"), self.sql(expression, "end_side"), sep=" ")
+1384            or "CURRENT ROW"
+1385        )
+1386        return f"{kind} BETWEEN {start} AND {end}"
+1387
+1388    def withingroup_sql(self, expression: exp.WithinGroup) -> str:
+1389        this = self.sql(expression, "this")
+1390        expression_sql = self.sql(expression, "expression")[1:]  # order has a leading space
+1391        return f"{this} WITHIN GROUP ({expression_sql})"
+1392
+1393    def between_sql(self, expression: exp.Between) -> str:
+1394        this = self.sql(expression, "this")
+1395        low = self.sql(expression, "low")
+1396        high = self.sql(expression, "high")
+1397        return f"{this} BETWEEN {low} AND {high}"
+1398
+1399    def bracket_sql(self, expression: exp.Bracket) -> str:
+1400        expressions = apply_index_offset(expression.expressions, self.index_offset)
+1401        expressions_sql = ", ".join(self.sql(e) for e in expressions)
+1402
+1403        return f"{self.sql(expression, 'this')}[{expressions_sql}]"
+1404
+1405    def all_sql(self, expression: exp.All) -> str:
+1406        return f"ALL {self.wrap(expression)}"
+1407
+1408    def any_sql(self, expression: exp.Any) -> str:
+1409        return f"ANY {self.wrap(expression)}"
+1410
+1411    def exists_sql(self, expression: exp.Exists) -> str:
+1412        return f"EXISTS{self.wrap(expression)}"
+1413
+1414    def case_sql(self, expression: exp.Case) -> str:
+1415        this = self.sql(expression, "this")
+1416        statements = [f"CASE {this}" if this else "CASE"]
+1417
+1418        for e in expression.args["ifs"]:
+1419            statements.append(f"WHEN {self.sql(e, 'this')}")
+1420            statements.append(f"THEN {self.sql(e, 'true')}")
+1421
+1422        default = self.sql(expression, "default")
+1423
+1424        if default:
+1425            statements.append(f"ELSE {default}")
+1426
+1427        statements.append("END")
+1428
+1429        if self.pretty and self.text_width(statements) > self._max_text_width:
+1430            return self.indent("\n".join(statements), skip_first=True, skip_last=True)
+1431
+1432        return " ".join(statements)
+1433
+1434    def constraint_sql(self, expression: exp.Constraint) -> str:
+1435        this = self.sql(expression, "this")
+1436        expressions = self.expressions(expression, flat=True)
+1437        return f"CONSTRAINT {this} {expressions}"
+1438
+1439    def extract_sql(self, expression: exp.Extract) -> str:
+1440        this = self.sql(expression, "this")
+1441        expression_sql = self.sql(expression, "expression")
+1442        return f"EXTRACT({this} FROM {expression_sql})"
+1443
+1444    def trim_sql(self, expression: exp.Trim) -> str:
+1445        trim_type = self.sql(expression, "position")
+1446
+1447        if trim_type == "LEADING":
+1448            return f"{self.normalize_func('LTRIM')}({self.format_args(expression.this)})"
+1449        elif trim_type == "TRAILING":
+1450            return f"{self.normalize_func('RTRIM')}({self.format_args(expression.this)})"
+1451        else:
+1452            return f"{self.normalize_func('TRIM')}({self.format_args(expression.this, expression.expression)})"
+1453
+1454    def concat_sql(self, expression: exp.Concat) -> str:
+1455        if len(expression.expressions) == 1:
+1456            return self.sql(expression.expressions[0])
+1457        return self.function_fallback_sql(expression)
+1458
+1459    def check_sql(self, expression: exp.Check) -> str:
+1460        this = self.sql(expression, key="this")
+1461        return f"CHECK ({this})"
+1462
+1463    def foreignkey_sql(self, expression: exp.ForeignKey) -> str:
+1464        expressions = self.expressions(expression, flat=True)
+1465        reference = self.sql(expression, "reference")
+1466        reference = f" {reference}" if reference else ""
+1467        delete = self.sql(expression, "delete")
+1468        delete = f" ON DELETE {delete}" if delete else ""
+1469        update = self.sql(expression, "update")
+1470        update = f" ON UPDATE {update}" if update else ""
+1471        return f"FOREIGN KEY ({expressions}){reference}{delete}{update}"
+1472
+1473    def primarykey_sql(self, expression: exp.ForeignKey) -> str:
+1474        expressions = self.expressions(expression, flat=True)
+1475        options = self.expressions(expression, "options", flat=True, sep=" ")
+1476        options = f" {options}" if options else ""
+1477        return f"PRIMARY KEY ({expressions}){options}"
+1478
+1479    def unique_sql(self, expression: exp.Unique) -> str:
+1480        columns = self.expressions(expression, key="expressions")
+1481        return f"UNIQUE ({columns})"
+1482
+1483    def if_sql(self, expression: exp.If) -> str:
+1484        return self.case_sql(
+1485            exp.Case(ifs=[expression.copy()], default=expression.args.get("false"))
+1486        )
+1487
+1488    def in_sql(self, expression: exp.In) -> str:
+1489        query = expression.args.get("query")
+1490        unnest = expression.args.get("unnest")
+1491        field = expression.args.get("field")
+1492        is_global = " GLOBAL" if expression.args.get("is_global") else ""
+1493
+1494        if query:
+1495            in_sql = self.wrap(query)
+1496        elif unnest:
+1497            in_sql = self.in_unnest_op(unnest)
+1498        elif field:
+1499            in_sql = self.sql(field)
+1500        else:
+1501            in_sql = f"({self.expressions(expression, flat=True)})"
+1502
+1503        return f"{self.sql(expression, 'this')}{is_global} IN {in_sql}"
+1504
+1505    def in_unnest_op(self, unnest: exp.Unnest) -> str:
+1506        return f"(SELECT {self.sql(unnest)})"
+1507
+1508    def interval_sql(self, expression: exp.Interval) -> str:
+1509        this = expression.args.get("this")
+1510        if this:
+1511            this = (
+1512                f" {this}"
+1513                if isinstance(this, exp.Literal) or isinstance(this, exp.Paren)
+1514                else f" ({this})"
+1515            )
+1516        else:
+1517            this = ""
+1518        unit = expression.args.get("unit")
+1519        unit = f" {unit}" if unit else ""
+1520        return f"INTERVAL{this}{unit}"
+1521
+1522    def return_sql(self, expression: exp.Return) -> str:
+1523        return f"RETURN {self.sql(expression, 'this')}"
+1524
+1525    def reference_sql(self, expression: exp.Reference) -> str:
+1526        this = self.sql(expression, "this")
+1527        expressions = self.expressions(expression, flat=True)
+1528        expressions = f"({expressions})" if expressions else ""
+1529        options = self.expressions(expression, "options", flat=True, sep=" ")
+1530        options = f" {options}" if options else ""
+1531        return f"REFERENCES {this}{expressions}{options}"
+1532
+1533    def anonymous_sql(self, expression: exp.Anonymous) -> str:
+1534        args = self.format_args(*expression.expressions)
+1535        return f"{self.normalize_func(self.sql(expression, 'this'))}({args})"
+1536
+1537    def paren_sql(self, expression: exp.Paren) -> str:
+1538        if isinstance(expression.unnest(), exp.Select):
+1539            sql = self.wrap(expression)
+1540        else:
+1541            sql = self.seg(self.indent(self.sql(expression, "this")), sep="")
+1542            sql = f"({sql}{self.seg(')', sep='')}"
+1543
+1544        return self.prepend_ctes(expression, sql)
+1545
+1546    def neg_sql(self, expression: exp.Neg) -> str:
+1547        # This makes sure we don't convert "- - 5" to "--5", which is a comment
+1548        this_sql = self.sql(expression, "this")
+1549        sep = " " if this_sql[0] == "-" else ""
+1550        return f"-{sep}{this_sql}"
+1551
+1552    def not_sql(self, expression: exp.Not) -> str:
+1553        return f"NOT {self.sql(expression, 'this')}"
+1554
+1555    def alias_sql(self, expression: exp.Alias) -> str:
+1556        to_sql = self.sql(expression, "alias")
+1557        to_sql = f" AS {to_sql}" if to_sql else ""
+1558        return f"{self.sql(expression, 'this')}{to_sql}"
+1559
+1560    def aliases_sql(self, expression: exp.Aliases) -> str:
+1561        return f"{self.sql(expression, 'this')} AS ({self.expressions(expression, flat=True)})"
+1562
+1563    def attimezone_sql(self, expression: exp.AtTimeZone) -> str:
+1564        this = self.sql(expression, "this")
+1565        zone = self.sql(expression, "zone")
+1566        return f"{this} AT TIME ZONE {zone}"
+1567
+1568    def add_sql(self, expression: exp.Add) -> str:
+1569        return self.binary(expression, "+")
+1570
+1571    def and_sql(self, expression: exp.And) -> str:
+1572        return self.connector_sql(expression, "AND")
+1573
+1574    def connector_sql(self, expression: exp.Connector, op: str) -> str:
+1575        if not self.pretty:
+1576            return self.binary(expression, op)
+1577
+1578        sqls = tuple(self.sql(e) for e in expression.flatten(unnest=False))
+1579        sep = "\n" if self.text_width(sqls) > self._max_text_width else " "
+1580        return f"{sep}{op} ".join(sqls)
+1581
+1582    def bitwiseand_sql(self, expression: exp.BitwiseAnd) -> str:
+1583        return self.binary(expression, "&")
+1584
+1585    def bitwiseleftshift_sql(self, expression: exp.BitwiseLeftShift) -> str:
+1586        return self.binary(expression, "<<")
+1587
+1588    def bitwisenot_sql(self, expression: exp.BitwiseNot) -> str:
+1589        return f"~{self.sql(expression, 'this')}"
+1590
+1591    def bitwiseor_sql(self, expression: exp.BitwiseOr) -> str:
+1592        return self.binary(expression, "|")
+1593
+1594    def bitwiserightshift_sql(self, expression: exp.BitwiseRightShift) -> str:
+1595        return self.binary(expression, ">>")
+1596
+1597    def bitwisexor_sql(self, expression: exp.BitwiseXor) -> str:
+1598        return self.binary(expression, "^")
+1599
+1600    def cast_sql(self, expression: exp.Cast) -> str:
+1601        return f"CAST({self.sql(expression, 'this')} AS {self.sql(expression, 'to')})"
+1602
+1603    def currentdate_sql(self, expression: exp.CurrentDate) -> str:
+1604        zone = self.sql(expression, "this")
+1605        return f"CURRENT_DATE({zone})" if zone else "CURRENT_DATE"
+1606
+1607    def collate_sql(self, expression: exp.Collate) -> str:
+1608        return self.binary(expression, "COLLATE")
+1609
+1610    def command_sql(self, expression: exp.Command) -> str:
+1611        return f"{self.sql(expression, 'this').upper()} {expression.text('expression').strip()}"
+1612
+1613    def transaction_sql(self, *_) -> str:
+1614        return "BEGIN"
+1615
+1616    def commit_sql(self, expression: exp.Commit) -> str:
+1617        chain = expression.args.get("chain")
+1618        if chain is not None:
+1619            chain = " AND CHAIN" if chain else " AND NO CHAIN"
+1620
+1621        return f"COMMIT{chain or ''}"
+1622
+1623    def rollback_sql(self, expression: exp.Rollback) -> str:
+1624        savepoint = expression.args.get("savepoint")
+1625        savepoint = f" TO {savepoint}" if savepoint else ""
+1626        return f"ROLLBACK{savepoint}"
+1627
+1628    def altercolumn_sql(self, expression: exp.AlterColumn) -> str:
+1629        this = self.sql(expression, "this")
+1630
+1631        dtype = self.sql(expression, "dtype")
+1632        if dtype:
+1633            collate = self.sql(expression, "collate")
+1634            collate = f" COLLATE {collate}" if collate else ""
+1635            using = self.sql(expression, "using")
+1636            using = f" USING {using}" if using else ""
+1637            return f"ALTER COLUMN {this} TYPE {dtype}{collate}{using}"
+1638
+1639        default = self.sql(expression, "default")
+1640        if default:
+1641            return f"ALTER COLUMN {this} SET DEFAULT {default}"
+1642
+1643        if not expression.args.get("drop"):
+1644            self.unsupported("Unsupported ALTER COLUMN syntax")
+1645
+1646        return f"ALTER COLUMN {this} DROP DEFAULT"
+1647
+1648    def renametable_sql(self, expression: exp.RenameTable) -> str:
+1649        this = self.sql(expression, "this")
+1650        return f"RENAME TO {this}"
+1651
+1652    def altertable_sql(self, expression: exp.AlterTable) -> str:
+1653        actions = expression.args["actions"]
+1654
+1655        if isinstance(actions[0], exp.ColumnDef):
+1656            actions = self.expressions(expression, "actions", prefix="ADD COLUMN ")
+1657        elif isinstance(actions[0], exp.Schema):
+1658            actions = self.expressions(expression, "actions", prefix="ADD COLUMNS ")
+1659        elif isinstance(actions[0], exp.Delete):
+1660            actions = self.expressions(expression, "actions", flat=True)
+1661        else:
+1662            actions = self.expressions(expression, "actions")
+1663
+1664        exists = " IF EXISTS" if expression.args.get("exists") else ""
+1665        return f"ALTER TABLE{exists} {self.sql(expression, 'this')} {actions}"
+1666
+1667    def droppartition_sql(self, expression: exp.DropPartition) -> str:
+1668        expressions = self.expressions(expression)
+1669        exists = " IF EXISTS " if expression.args.get("exists") else " "
+1670        return f"DROP{exists}{expressions}"
+1671
+1672    def addconstraint_sql(self, expression: exp.AddConstraint) -> str:
+1673        this = self.sql(expression, "this")
+1674        expression_ = self.sql(expression, "expression")
+1675        add_constraint = f"ADD CONSTRAINT {this}" if this else "ADD"
+1676
+1677        enforced = expression.args.get("enforced")
+1678        if enforced is not None:
+1679            return f"{add_constraint} CHECK ({expression_}){' ENFORCED' if enforced else ''}"
+1680
+1681        return f"{add_constraint} {expression_}"
+1682
+1683    def distinct_sql(self, expression: exp.Distinct) -> str:
+1684        this = self.expressions(expression, flat=True)
+1685        this = f" {this}" if this else ""
+1686
+1687        on = self.sql(expression, "on")
+1688        on = f" ON {on}" if on else ""
+1689        return f"DISTINCT{this}{on}"
+1690
+1691    def ignorenulls_sql(self, expression: exp.IgnoreNulls) -> str:
+1692        return f"{self.sql(expression, 'this')} IGNORE NULLS"
+1693
+1694    def respectnulls_sql(self, expression: exp.RespectNulls) -> str:
+1695        return f"{self.sql(expression, 'this')} RESPECT NULLS"
+1696
+1697    def intdiv_sql(self, expression: exp.IntDiv) -> str:
+1698        return self.sql(
+1699            exp.Cast(
+1700                this=exp.Div(this=expression.this, expression=expression.expression),
+1701                to=exp.DataType(this=exp.DataType.Type.INT),
+1702            )
+1703        )
+1704
+1705    def dpipe_sql(self, expression: exp.DPipe) -> str:
+1706        return self.binary(expression, "||")
+1707
+1708    def div_sql(self, expression: exp.Div) -> str:
+1709        return self.binary(expression, "/")
+1710
+1711    def distance_sql(self, expression: exp.Distance) -> str:
+1712        return self.binary(expression, "<->")
+1713
+1714    def dot_sql(self, expression: exp.Dot) -> str:
+1715        return f"{self.sql(expression, 'this')}.{self.sql(expression, 'expression')}"
+1716
+1717    def eq_sql(self, expression: exp.EQ) -> str:
+1718        return self.binary(expression, "=")
+1719
+1720    def escape_sql(self, expression: exp.Escape) -> str:
+1721        return self.binary(expression, "ESCAPE")
+1722
+1723    def glob_sql(self, expression: exp.Glob) -> str:
+1724        return self.binary(expression, "GLOB")
+1725
+1726    def gt_sql(self, expression: exp.GT) -> str:
+1727        return self.binary(expression, ">")
+1728
+1729    def gte_sql(self, expression: exp.GTE) -> str:
+1730        return self.binary(expression, ">=")
+1731
+1732    def ilike_sql(self, expression: exp.ILike) -> str:
+1733        return self.binary(expression, "ILIKE")
+1734
+1735    def is_sql(self, expression: exp.Is) -> str:
+1736        return self.binary(expression, "IS")
+1737
+1738    def like_sql(self, expression: exp.Like) -> str:
+1739        return self.binary(expression, "LIKE")
+1740
+1741    def similarto_sql(self, expression: exp.SimilarTo) -> str:
+1742        return self.binary(expression, "SIMILAR TO")
+1743
+1744    def lt_sql(self, expression: exp.LT) -> str:
+1745        return self.binary(expression, "<")
+1746
+1747    def lte_sql(self, expression: exp.LTE) -> str:
+1748        return self.binary(expression, "<=")
+1749
+1750    def mod_sql(self, expression: exp.Mod) -> str:
+1751        return self.binary(expression, "%")
+1752
+1753    def mul_sql(self, expression: exp.Mul) -> str:
+1754        return self.binary(expression, "*")
+1755
+1756    def neq_sql(self, expression: exp.NEQ) -> str:
+1757        return self.binary(expression, "<>")
+1758
+1759    def nullsafeeq_sql(self, expression: exp.NullSafeEQ) -> str:
+1760        return self.binary(expression, "IS NOT DISTINCT FROM")
+1761
+1762    def nullsafeneq_sql(self, expression: exp.NullSafeNEQ) -> str:
+1763        return self.binary(expression, "IS DISTINCT FROM")
+1764
+1765    def or_sql(self, expression: exp.Or) -> str:
+1766        return self.connector_sql(expression, "OR")
+1767
+1768    def slice_sql(self, expression: exp.Slice) -> str:
+1769        return self.binary(expression, ":")
+1770
+1771    def sub_sql(self, expression: exp.Sub) -> str:
+1772        return self.binary(expression, "-")
+1773
+1774    def trycast_sql(self, expression: exp.TryCast) -> str:
+1775        return f"TRY_CAST({self.sql(expression, 'this')} AS {self.sql(expression, 'to')})"
+1776
+1777    def use_sql(self, expression: exp.Use) -> str:
+1778        kind = self.sql(expression, "kind")
+1779        kind = f" {kind}" if kind else ""
+1780        this = self.sql(expression, "this")
+1781        this = f" {this}" if this else ""
+1782        return f"USE{kind}{this}"
+1783
+1784    def binary(self, expression: exp.Binary, op: str) -> str:
+1785        return f"{self.sql(expression, 'this')} {op} {self.sql(expression, 'expression')}"
+1786
+1787    def function_fallback_sql(self, expression: exp.Func) -> str:
+1788        args = []
+1789        for arg_value in expression.args.values():
+1790            if isinstance(arg_value, list):
+1791                for value in arg_value:
+1792                    args.append(value)
+1793            else:
+1794                args.append(arg_value)
+1795
+1796        return f"{self.normalize_func(expression.sql_name())}({self.format_args(*args)})"
+1797
+1798    def format_args(self, *args: t.Optional[str | exp.Expression]) -> str:
+1799        arg_sqls = tuple(self.sql(arg) for arg in args if arg is not None)
+1800        if self.pretty and self.text_width(arg_sqls) > self._max_text_width:
+1801            return self.indent("\n" + f",\n".join(arg_sqls) + "\n", skip_first=True, skip_last=True)
+1802        return ", ".join(arg_sqls)
+1803
+1804    def text_width(self, args: t.Iterable) -> int:
+1805        return sum(len(arg) for arg in args)
+1806
+1807    def format_time(self, expression: exp.Expression) -> t.Optional[str]:
+1808        return format_time(self.sql(expression, "format"), self.time_mapping, self.time_trie)
+1809
+1810    def expressions(
+1811        self,
+1812        expression: exp.Expression,
+1813        key: t.Optional[str] = None,
+1814        flat: bool = False,
+1815        indent: bool = True,
+1816        sep: str = ", ",
+1817        prefix: str = "",
+1818    ) -> str:
+1819        expressions = expression.args.get(key or "expressions")
+1820
+1821        if not expressions:
+1822            return ""
+1823
+1824        if flat:
+1825            return sep.join(self.sql(e) for e in expressions)
+1826
+1827        num_sqls = len(expressions)
+1828
+1829        # These are calculated once in case we have the leading_comma / pretty option set, correspondingly
+1830        pad = " " * self.pad
+1831        stripped_sep = sep.strip()
+1832
+1833        result_sqls = []
+1834        for i, e in enumerate(expressions):
+1835            sql = self.sql(e, comment=False)
+1836            comments = self.maybe_comment("", e) if isinstance(e, exp.Expression) else ""
+1837
+1838            if self.pretty:
+1839                if self._leading_comma:
+1840                    result_sqls.append(f"{sep if i > 0 else pad}{prefix}{sql}{comments}")
+1841                else:
+1842                    result_sqls.append(
+1843                        f"{prefix}{sql}{stripped_sep if i + 1 < num_sqls else ''}{comments}"
+1844                    )
+1845            else:
+1846                result_sqls.append(f"{prefix}{sql}{comments}{sep if i + 1 < num_sqls else ''}")
+1847
+1848        result_sql = "\n".join(result_sqls) if self.pretty else "".join(result_sqls)
+1849        return self.indent(result_sql, skip_first=False) if indent else result_sql
+1850
+1851    def op_expressions(self, op: str, expression: exp.Expression, flat: bool = False) -> str:
+1852        expressions_sql = self.expressions(expression, flat=flat)
+1853        if flat:
+1854            return f"{op} {expressions_sql}"
+1855        return f"{self.seg(op)}{self.sep() if expressions_sql else ''}{expressions_sql}"
+1856
+1857    def naked_property(self, expression: exp.Property) -> str:
+1858        property_name = exp.Properties.PROPERTY_TO_NAME.get(expression.__class__)
+1859        if not property_name:
+1860            self.unsupported(f"Unsupported property {expression.__class__.__name__}")
+1861        return f"{property_name} {self.sql(expression, 'this')}"
+1862
+1863    def set_operation(self, expression: exp.Expression, op: str) -> str:
+1864        this = self.sql(expression, "this")
+1865        op = self.seg(op)
+1866        return self.query_modifiers(
+1867            expression, f"{this}{op}{self.sep()}{self.sql(expression, 'expression')}"
+1868        )
+1869
+1870    def tag_sql(self, expression: exp.Tag) -> str:
+1871        return f"{expression.args.get('prefix')}{self.sql(expression.this)}{expression.args.get('postfix')}"
+1872
+1873    def token_sql(self, token_type: TokenType) -> str:
+1874        return self.TOKEN_MAPPING.get(token_type, token_type.name)
+1875
+1876    def userdefinedfunction_sql(self, expression: exp.UserDefinedFunction) -> str:
+1877        this = self.sql(expression, "this")
+1878        expressions = self.no_identify(self.expressions, expression)
+1879        expressions = (
+1880            self.wrap(expressions) if expression.args.get("wrapped") else f" {expressions}"
+1881        )
+1882        return f"{this}{expressions}"
+1883
+1884    def userdefinedfunctionkwarg_sql(self, expression: exp.UserDefinedFunctionKwarg) -> str:
+1885        this = self.sql(expression, "this")
+1886        kind = self.sql(expression, "kind")
+1887        return f"{this} {kind}"
+1888
+1889    def joinhint_sql(self, expression: exp.JoinHint) -> str:
+1890        this = self.sql(expression, "this")
+1891        expressions = self.expressions(expression, flat=True)
+1892        return f"{this}({expressions})"
+1893
+1894    def kwarg_sql(self, expression: exp.Kwarg) -> str:
+1895        return self.binary(expression, "=>")
+1896
+1897    def when_sql(self, expression: exp.When) -> str:
+1898        this = self.sql(expression, "this")
+1899        then_expression = expression.args.get("then")
+1900        if isinstance(then_expression, exp.Insert):
+1901            then = f"INSERT {self.sql(then_expression, 'this')}"
+1902            if "expression" in then_expression.args:
+1903                then += f" VALUES {self.sql(then_expression, 'expression')}"
+1904        elif isinstance(then_expression, exp.Update):
+1905            if isinstance(then_expression.args.get("expressions"), exp.Star):
+1906                then = f"UPDATE {self.sql(then_expression, 'expressions')}"
+1907            else:
+1908                then = f"UPDATE SET {self.expressions(then_expression, flat=True)}"
+1909        else:
+1910            then = self.sql(then_expression)
+1911        return f"WHEN {this} THEN {then}"
+1912
+1913    def merge_sql(self, expression: exp.Merge) -> str:
+1914        this = self.sql(expression, "this")
+1915        using = f"USING {self.sql(expression, 'using')}"
+1916        on = f"ON {self.sql(expression, 'on')}"
+1917        return f"MERGE INTO {this} {using} {on} {self.expressions(expression, sep=' ')}"
+
+ + +

Generator interprets the given syntax tree and produces a SQL string as an output.

+ +
Arguments:
+ +
    +
  • time_mapping (dict): the dictionary of custom time mappings in which the key +represents a python time format and the output the target time format
  • +
  • time_trie (trie): a trie of the time_mapping keys
  • +
  • pretty (bool): if set to True the returned string will be formatted. Default: False.
  • +
  • quote_start (str): specifies which starting character to use to delimit quotes. Default: '.
  • +
  • quote_end (str): specifies which ending character to use to delimit quotes. Default: '.
  • +
  • identifier_start (str): specifies which starting character to use to delimit identifiers. Default: ".
  • +
  • identifier_end (str): specifies which ending character to use to delimit identifiers. Default: ".
  • +
  • identify (bool): if set to True all identifiers will be delimited by the corresponding +character.
  • +
  • normalize (bool): if set to True all identifiers will lower cased
  • +
  • string_escape (str): specifies a string escape character. Default: '.
  • +
  • identifier_escape (str): specifies an identifier escape character. Default: ".
  • +
  • pad (int): determines padding in a formatted string. Default: 2.
  • +
  • indent (int): determines the size of indentation in a formatted string. Default: 4.
  • +
  • unnest_column_only (bool): if true unnest table aliases are considered only as column aliases
  • +
  • normalize_functions (str): normalize function names, "upper", "lower", or None +Default: "upper"
  • +
  • alias_post_tablesample (bool): if the table alias comes after tablesample +Default: False
  • +
  • unsupported_level (ErrorLevel): determines the generator's behavior when it encounters +unsupported expressions. Default ErrorLevel.WARN.
  • +
  • null_ordering (str): Indicates the default null ordering method to use if not explicitly set. +Options are "nulls_are_small", "nulls_are_large", "nulls_are_last". +Default: "nulls_are_small"
  • +
  • max_unsupported (int): Maximum number of unsupported messages to include in a raised UnsupportedError. +This is only relevant if unsupported_level is ErrorLevel.RAISE. +Default: 3
  • +
  • leading_comma (bool): if the the comma is leading or trailing in select statements +Default: False
  • +
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode. +The default is on the smaller end because the length only represents a segment and not the true +line length. +Default: 80
  • +
  • comments: Whether or not to preserve comments in the output SQL code. +Default: True
  • +
+
+ + +
+ +
+ + Generator( time_mapping=None, time_trie=None, pretty=None, quote_start=None, quote_end=None, identifier_start=None, identifier_end=None, identify=False, normalize=False, string_escape=None, identifier_escape=None, pad=2, indent=2, index_offset=0, unnest_column_only=False, alias_post_tablesample=False, normalize_functions='upper', unsupported_level=<ErrorLevel.WARN: 'WARN'>, null_ordering=None, max_unsupported=3, leading_comma=False, max_text_width=80, comments=True) + + + +
+ +
185    def __init__(
+186        self,
+187        time_mapping=None,
+188        time_trie=None,
+189        pretty=None,
+190        quote_start=None,
+191        quote_end=None,
+192        identifier_start=None,
+193        identifier_end=None,
+194        identify=False,
+195        normalize=False,
+196        string_escape=None,
+197        identifier_escape=None,
+198        pad=2,
+199        indent=2,
+200        index_offset=0,
+201        unnest_column_only=False,
+202        alias_post_tablesample=False,
+203        normalize_functions="upper",
+204        unsupported_level=ErrorLevel.WARN,
+205        null_ordering=None,
+206        max_unsupported=3,
+207        leading_comma=False,
+208        max_text_width=80,
+209        comments=True,
+210    ):
+211        import sqlglot
+212
+213        self.time_mapping = time_mapping or {}
+214        self.time_trie = time_trie
+215        self.pretty = pretty if pretty is not None else sqlglot.pretty
+216        self.quote_start = quote_start or "'"
+217        self.quote_end = quote_end or "'"
+218        self.identifier_start = identifier_start or '"'
+219        self.identifier_end = identifier_end or '"'
+220        self.identify = identify
+221        self.normalize = normalize
+222        self.string_escape = string_escape or "'"
+223        self.identifier_escape = identifier_escape or '"'
+224        self.pad = pad
+225        self.index_offset = index_offset
+226        self.unnest_column_only = unnest_column_only
+227        self.alias_post_tablesample = alias_post_tablesample
+228        self.normalize_functions = normalize_functions
+229        self.unsupported_level = unsupported_level
+230        self.unsupported_messages = []
+231        self.max_unsupported = max_unsupported
+232        self.null_ordering = null_ordering
+233        self._indent = indent
+234        self._replace_backslash = self.string_escape == "\\"
+235        self._escaped_quote_end = self.string_escape + self.quote_end
+236        self._escaped_identifier_end = self.identifier_escape + self.identifier_end
+237        self._leading_comma = leading_comma
+238        self._max_text_width = max_text_width
+239        self._comments = comments
+
+ + + + +
+
+ +
+ + def + generate(self, expression: Optional[sqlglot.expressions.Expression]) -> str: + + + +
+ +
241    def generate(self, expression: t.Optional[exp.Expression]) -> str:
+242        """
+243        Generates a SQL string by interpreting the given syntax tree.
+244
+245        Args
+246            expression: the syntax tree.
+247
+248        Returns
+249            the SQL string.
+250        """
+251        self.unsupported_messages = []
+252        sql = self.sql(expression).strip()
+253
+254        if self.unsupported_level == ErrorLevel.IGNORE:
+255            return sql
+256
+257        if self.unsupported_level == ErrorLevel.WARN:
+258            for msg in self.unsupported_messages:
+259                logger.warning(msg)
+260        elif self.unsupported_level == ErrorLevel.RAISE and self.unsupported_messages:
+261            raise UnsupportedError(concat_messages(self.unsupported_messages, self.max_unsupported))
+262
+263        if self.pretty:
+264            sql = sql.replace(self.SENTINEL_LINE_BREAK, "\n")
+265        return sql
+
+ + +

Generates a SQL string by interpreting the given syntax tree.

+ +

Args + expression: the syntax tree.

+ +

Returns + the SQL string.

+
+ + +
+
+ +
+ + def + unsupported(self, message: str) -> None: + + + +
+ +
267    def unsupported(self, message: str) -> None:
+268        if self.unsupported_level == ErrorLevel.IMMEDIATE:
+269            raise UnsupportedError(message)
+270        self.unsupported_messages.append(message)
+
+ + + + +
+
+ +
+ + def + sep(self, sep: str = ' ') -> str: + + + +
+ +
272    def sep(self, sep: str = " ") -> str:
+273        return f"{sep.strip()}\n" if self.pretty else sep
+
+ + + + +
+
+ +
+ + def + seg(self, sql: str, sep: str = ' ') -> str: + + + +
+ +
275    def seg(self, sql: str, sep: str = " ") -> str:
+276        return f"{self.sep(sep)}{sql}"
+
+ + + + +
+
+ +
+ + def + pad_comment(self, comment: str) -> str: + + + +
+ +
278    def pad_comment(self, comment: str) -> str:
+279        comment = " " + comment if comment[0].strip() else comment
+280        comment = comment + " " if comment[-1].strip() else comment
+281        return comment
+
+ + + + +
+
+ +
+ + def + maybe_comment(self, sql: str, expression: sqlglot.expressions.Expression) -> str: + + + +
+ +
283    def maybe_comment(self, sql: str, expression: exp.Expression) -> str:
+284        comments = expression.comments if self._comments else None
+285
+286        if not comments:
+287            return sql
+288
+289        sep = "\n" if self.pretty else " "
+290        comments_sql = sep.join(
+291            f"/*{self.pad_comment(comment)}*/" for comment in comments if comment
+292        )
+293
+294        if not comments_sql:
+295            return sql
+296
+297        if isinstance(expression, self.WITH_SEPARATED_COMMENTS):
+298            return f"{comments_sql}{self.sep()}{sql}"
+299
+300        return f"{sql} {comments_sql}"
+
+ + + + +
+
+ +
+ + def + wrap(self, expression: sqlglot.expressions.Expression | str) -> str: + + + +
+ +
302    def wrap(self, expression: exp.Expression | str) -> str:
+303        this_sql = self.indent(
+304            self.sql(expression)
+305            if isinstance(expression, (exp.Select, exp.Union))
+306            else self.sql(expression, "this"),
+307            level=1,
+308            pad=0,
+309        )
+310        return f"({self.sep('')}{this_sql}{self.seg(')', sep='')}"
+
+ + + + +
+
+ +
+ + def + no_identify(self, func: Callable[..., str], *args, **kwargs) -> str: + + + +
+ +
312    def no_identify(self, func: t.Callable[..., str], *args, **kwargs) -> str:
+313        original = self.identify
+314        self.identify = False
+315        result = func(*args, **kwargs)
+316        self.identify = original
+317        return result
+
+ + + + +
+
+ +
+ + def + normalize_func(self, name: str) -> str: + + + +
+ +
319    def normalize_func(self, name: str) -> str:
+320        if self.normalize_functions == "upper":
+321            return name.upper()
+322        if self.normalize_functions == "lower":
+323            return name.lower()
+324        return name
+
+ + + + +
+
+ +
+ + def + indent( self, sql: str, level: int = 0, pad: Optional[int] = None, skip_first: bool = False, skip_last: bool = False) -> str: + + + +
+ +
326    def indent(
+327        self,
+328        sql: str,
+329        level: int = 0,
+330        pad: t.Optional[int] = None,
+331        skip_first: bool = False,
+332        skip_last: bool = False,
+333    ) -> str:
+334        if not self.pretty:
+335            return sql
+336
+337        pad = self.pad if pad is None else pad
+338        lines = sql.split("\n")
+339
+340        return "\n".join(
+341            line
+342            if (skip_first and i == 0) or (skip_last and i == len(lines) - 1)
+343            else f"{' ' * (level * self._indent + pad)}{line}"
+344            for i, line in enumerate(lines)
+345        )
+
+ + + + +
+
+ +
+ + def + sql( self, expression: Union[str, sqlglot.expressions.Expression, NoneType], key: Optional[str] = None, comment: bool = True) -> str: + + + +
+ +
347    def sql(
+348        self,
+349        expression: t.Optional[str | exp.Expression],
+350        key: t.Optional[str] = None,
+351        comment: bool = True,
+352    ) -> str:
+353        if not expression:
+354            return ""
+355
+356        if isinstance(expression, str):
+357            return expression
+358
+359        if key:
+360            return self.sql(expression.args.get(key))
+361
+362        transform = self.TRANSFORMS.get(expression.__class__)
+363
+364        if callable(transform):
+365            sql = transform(self, expression)
+366        elif transform:
+367            sql = transform
+368        elif isinstance(expression, exp.Expression):
+369            exp_handler_name = f"{expression.key}_sql"
+370
+371            if hasattr(self, exp_handler_name):
+372                sql = getattr(self, exp_handler_name)(expression)
+373            elif isinstance(expression, exp.Func):
+374                sql = self.function_fallback_sql(expression)
+375            elif isinstance(expression, exp.Property):
+376                sql = self.property_sql(expression)
+377            else:
+378                raise ValueError(f"Unsupported expression type {expression.__class__.__name__}")
+379        else:
+380            raise ValueError(f"Expected an Expression. Received {type(expression)}: {expression}")
+381
+382        return self.maybe_comment(sql, expression) if self._comments and comment else sql
+
+ + + + +
+
+ +
+ + def + uncache_sql(self, expression: sqlglot.expressions.Uncache) -> str: + + + +
+ +
384    def uncache_sql(self, expression: exp.Uncache) -> str:
+385        table = self.sql(expression, "this")
+386        exists_sql = " IF EXISTS" if expression.args.get("exists") else ""
+387        return f"UNCACHE TABLE{exists_sql} {table}"
+
+ + + + +
+
+ +
+ + def + cache_sql(self, expression: sqlglot.expressions.Cache) -> str: + + + +
+ +
389    def cache_sql(self, expression: exp.Cache) -> str:
+390        lazy = " LAZY" if expression.args.get("lazy") else ""
+391        table = self.sql(expression, "this")
+392        options = expression.args.get("options")
+393        options = f" OPTIONS({self.sql(options[0])} = {self.sql(options[1])})" if options else ""
+394        sql = self.sql(expression, "expression")
+395        sql = f" AS{self.sep()}{sql}" if sql else ""
+396        sql = f"CACHE{lazy} TABLE {table}{options}{sql}"
+397        return self.prepend_ctes(expression, sql)
+
+ + + + +
+
+ +
+ + def + characterset_sql(self, expression: sqlglot.expressions.CharacterSet) -> str: + + + +
+ +
399    def characterset_sql(self, expression: exp.CharacterSet) -> str:
+400        if isinstance(expression.parent, exp.Cast):
+401            return f"CHAR CHARACTER SET {self.sql(expression, 'this')}"
+402        default = "DEFAULT " if expression.args.get("default") else ""
+403        return f"{default}CHARACTER SET={self.sql(expression, 'this')}"
+
+ + + + +
+
+ +
+ + def + column_sql(self, expression: sqlglot.expressions.Column) -> str: + + + +
+ +
405    def column_sql(self, expression: exp.Column) -> str:
+406        return ".".join(
+407            part
+408            for part in [
+409                self.sql(expression, "db"),
+410                self.sql(expression, "table"),
+411                self.sql(expression, "this"),
+412            ]
+413            if part
+414        )
+
+ + + + +
+
+ +
+ + def + columndef_sql(self, expression: sqlglot.expressions.ColumnDef) -> str: + + + +
+ +
416    def columndef_sql(self, expression: exp.ColumnDef) -> str:
+417        column = self.sql(expression, "this")
+418        kind = self.sql(expression, "kind")
+419        constraints = self.expressions(expression, key="constraints", sep=" ", flat=True)
+420        exists = "IF NOT EXISTS " if expression.args.get("exists") else ""
+421        kind = f" {kind}" if kind else ""
+422        constraints = f" {constraints}" if constraints else ""
+423
+424        return f"{exists}{column}{kind}{constraints}"
+
+ + + + +
+
+ +
+ + def + columnconstraint_sql(self, expression: sqlglot.expressions.ColumnConstraint) -> str: + + + +
+ +
426    def columnconstraint_sql(self, expression: exp.ColumnConstraint) -> str:
+427        this = self.sql(expression, "this")
+428        kind_sql = self.sql(expression, "kind")
+429        return f"CONSTRAINT {this} {kind_sql}" if this else kind_sql
+
+ + + + +
+
+ +
+ + def + autoincrementcolumnconstraint_sql(self, _) -> str: + + + +
+ +
431    def autoincrementcolumnconstraint_sql(self, _) -> str:
+432        return self.token_sql(TokenType.AUTO_INCREMENT)
+
+ + + + +
+
+ +
+ + def + checkcolumnconstraint_sql(self, expression: sqlglot.expressions.CheckColumnConstraint) -> str: + + + +
+ +
434    def checkcolumnconstraint_sql(self, expression: exp.CheckColumnConstraint) -> str:
+435        this = self.sql(expression, "this")
+436        return f"CHECK ({this})"
+
+ + + + +
+
+ +
+ + def + commentcolumnconstraint_sql(self, expression: sqlglot.expressions.CommentColumnConstraint) -> str: + + + +
+ +
438    def commentcolumnconstraint_sql(self, expression: exp.CommentColumnConstraint) -> str:
+439        comment = self.sql(expression, "this")
+440        return f"COMMENT {comment}"
+
+ + + + +
+
+ +
+ + def + collatecolumnconstraint_sql(self, expression: sqlglot.expressions.CollateColumnConstraint) -> str: + + + +
+ +
442    def collatecolumnconstraint_sql(self, expression: exp.CollateColumnConstraint) -> str:
+443        collate = self.sql(expression, "this")
+444        return f"COLLATE {collate}"
+
+ + + + +
+
+ +
+ + def + encodecolumnconstraint_sql(self, expression: sqlglot.expressions.EncodeColumnConstraint) -> str: + + + +
+ +
446    def encodecolumnconstraint_sql(self, expression: exp.EncodeColumnConstraint) -> str:
+447        encode = self.sql(expression, "this")
+448        return f"ENCODE {encode}"
+
+ + + + +
+
+ +
+ + def + defaultcolumnconstraint_sql(self, expression: sqlglot.expressions.DefaultColumnConstraint) -> str: + + + +
+ +
450    def defaultcolumnconstraint_sql(self, expression: exp.DefaultColumnConstraint) -> str:
+451        default = self.sql(expression, "this")
+452        return f"DEFAULT {default}"
+
+ + + + +
+
+ +
+ + def + generatedasidentitycolumnconstraint_sql( self, expression: sqlglot.expressions.GeneratedAsIdentityColumnConstraint) -> str: + + + +
+ +
454    def generatedasidentitycolumnconstraint_sql(
+455        self, expression: exp.GeneratedAsIdentityColumnConstraint
+456    ) -> str:
+457        this = ""
+458        if expression.this is not None:
+459            this = " ALWAYS " if expression.this else " BY DEFAULT "
+460        start = expression.args.get("start")
+461        start = f"START WITH {start}" if start else ""
+462        increment = expression.args.get("increment")
+463        increment = f"INCREMENT BY {increment}" if increment else ""
+464        sequence_opts = ""
+465        if start or increment:
+466            sequence_opts = f"{start} {increment}"
+467            sequence_opts = f" ({sequence_opts.strip()})"
+468        return f"GENERATED{this}AS IDENTITY{sequence_opts}"
+
+ + + + +
+
+ +
+ + def + notnullcolumnconstraint_sql(self, expression: sqlglot.expressions.NotNullColumnConstraint) -> str: + + + +
+ +
470    def notnullcolumnconstraint_sql(self, expression: exp.NotNullColumnConstraint) -> str:
+471        return f"{'' if expression.args.get('allow_null') else 'NOT '}NULL"
+
+ + + + +
+
+ +
+ + def + primarykeycolumnconstraint_sql(self, expression: sqlglot.expressions.PrimaryKeyColumnConstraint) -> str: + + + +
+ +
473    def primarykeycolumnconstraint_sql(self, expression: exp.PrimaryKeyColumnConstraint) -> str:
+474        desc = expression.args.get("desc")
+475        if desc is not None:
+476            return f"PRIMARY KEY{' DESC' if desc else ' ASC'}"
+477        return f"PRIMARY KEY"
+
+ + + + +
+
+ +
+ + def + uniquecolumnconstraint_sql(self, _) -> str: + + + +
+ +
479    def uniquecolumnconstraint_sql(self, _) -> str:
+480        return "UNIQUE"
+
+ + + + +
+
+ +
+ + def + create_sql(self, expression: sqlglot.expressions.Create) -> str: + + + +
+ +
482    def create_sql(self, expression: exp.Create) -> str:
+483        kind = self.sql(expression, "kind").upper()
+484        properties = expression.args.get("properties")
+485        properties_exp = expression.copy()
+486        properties_locs = self.locate_properties(properties) if properties else {}
+487        if properties_locs.get(exp.Properties.Location.POST_SCHEMA_ROOT) or properties_locs.get(
+488            exp.Properties.Location.POST_SCHEMA_WITH
+489        ):
+490            properties_exp.set(
+491                "properties",
+492                exp.Properties(
+493                    expressions=[
+494                        *properties_locs[exp.Properties.Location.POST_SCHEMA_ROOT],
+495                        *properties_locs[exp.Properties.Location.POST_SCHEMA_WITH],
+496                    ]
+497                ),
+498            )
+499        if kind == "TABLE" and properties_locs.get(exp.Properties.Location.PRE_SCHEMA):
+500            this_name = self.sql(expression.this, "this")
+501            this_properties = self.properties(
+502                exp.Properties(expressions=properties_locs[exp.Properties.Location.PRE_SCHEMA]),
+503                wrapped=False,
+504            )
+505            this_schema = f"({self.expressions(expression.this)})"
+506            this = f"{this_name}, {this_properties} {this_schema}"
+507            properties_sql = ""
+508        else:
+509            this = self.sql(expression, "this")
+510            properties_sql = self.sql(properties_exp, "properties")
+511        begin = " BEGIN" if expression.args.get("begin") else ""
+512        expression_sql = self.sql(expression, "expression")
+513        if expression_sql:
+514            expression_sql = f"{begin}{self.sep()}{expression_sql}"
+515
+516            if self.CREATE_FUNCTION_AS or kind != "FUNCTION":
+517                expression_sql = f" AS{expression_sql}"
+518
+519        temporary = " TEMPORARY" if expression.args.get("temporary") else ""
+520        transient = (
+521            " TRANSIENT" if self.CREATE_TRANSIENT and expression.args.get("transient") else ""
+522        )
+523        external = " EXTERNAL" if expression.args.get("external") else ""
+524        replace = " OR REPLACE" if expression.args.get("replace") else ""
+525        exists_sql = " IF NOT EXISTS" if expression.args.get("exists") else ""
+526        unique = " UNIQUE" if expression.args.get("unique") else ""
+527        materialized = " MATERIALIZED" if expression.args.get("materialized") else ""
+528        set_ = " SET" if expression.args.get("set") else ""
+529        multiset = " MULTISET" if expression.args.get("multiset") else ""
+530        global_temporary = " GLOBAL TEMPORARY" if expression.args.get("global_temporary") else ""
+531        volatile = " VOLATILE" if expression.args.get("volatile") else ""
+532        data = expression.args.get("data")
+533        if data is None:
+534            data = ""
+535        elif data:
+536            data = " WITH DATA"
+537        else:
+538            data = " WITH NO DATA"
+539        statistics = expression.args.get("statistics")
+540        if statistics is None:
+541            statistics = ""
+542        elif statistics:
+543            statistics = " AND STATISTICS"
+544        else:
+545            statistics = " AND NO STATISTICS"
+546        no_primary_index = " NO PRIMARY INDEX" if expression.args.get("no_primary_index") else ""
+547
+548        indexes = expression.args.get("indexes")
+549        index_sql = ""
+550        if indexes:
+551            indexes_sql = []
+552            for index in indexes:
+553                ind_unique = " UNIQUE" if index.args.get("unique") else ""
+554                ind_primary = " PRIMARY" if index.args.get("primary") else ""
+555                ind_amp = " AMP" if index.args.get("amp") else ""
+556                ind_name = f" {index.name}" if index.name else ""
+557                ind_columns = (
+558                    f' ({self.expressions(index, key="columns", flat=True)})'
+559                    if index.args.get("columns")
+560                    else ""
+561                )
+562                if index.args.get("primary") and properties_locs.get(
+563                    exp.Properties.Location.POST_INDEX
+564                ):
+565                    postindex_props_sql = self.properties(
+566                        exp.Properties(
+567                            expressions=properties_locs[exp.Properties.Location.POST_INDEX]
+568                        ),
+569                        wrapped=False,
+570                    )
+571                    ind_columns = f"{ind_columns} {postindex_props_sql}"
+572
+573                indexes_sql.append(
+574                    f"{ind_unique}{ind_primary}{ind_amp} INDEX{ind_name}{ind_columns}"
+575                )
+576            index_sql = "".join(indexes_sql)
+577
+578        postcreate_props_sql = ""
+579        if properties_locs.get(exp.Properties.Location.POST_CREATE):
+580            postcreate_props_sql = self.properties(
+581                exp.Properties(expressions=properties_locs[exp.Properties.Location.POST_CREATE]),
+582                sep=" ",
+583                prefix=" ",
+584                wrapped=False,
+585            )
+586
+587        modifiers = "".join(
+588            (
+589                replace,
+590                temporary,
+591                transient,
+592                external,
+593                unique,
+594                materialized,
+595                set_,
+596                multiset,
+597                global_temporary,
+598                volatile,
+599                postcreate_props_sql,
+600            )
+601        )
+602        no_schema_binding = (
+603            " WITH NO SCHEMA BINDING" if expression.args.get("no_schema_binding") else ""
+604        )
+605
+606        post_expression_modifiers = "".join((data, statistics, no_primary_index))
+607
+608        expression_sql = f"CREATE{modifiers} {kind}{exists_sql} {this}{properties_sql}{expression_sql}{post_expression_modifiers}{index_sql}{no_schema_binding}"
+609        return self.prepend_ctes(expression, expression_sql)
+
+ + + + +
+
+ +
+ + def + describe_sql(self, expression: sqlglot.expressions.Describe) -> str: + + + +
+ +
611    def describe_sql(self, expression: exp.Describe) -> str:
+612        return f"DESCRIBE {self.sql(expression, 'this')}"
+
+ + + + +
+
+ +
+ + def + prepend_ctes(self, expression: sqlglot.expressions.Expression, sql: str) -> str: + + + +
+ +
614    def prepend_ctes(self, expression: exp.Expression, sql: str) -> str:
+615        with_ = self.sql(expression, "with")
+616        if with_:
+617            sql = f"{with_}{self.sep()}{sql}"
+618        return sql
+
+ + + + +
+
+ +
+ + def + with_sql(self, expression: sqlglot.expressions.With) -> str: + + + +
+ +
620    def with_sql(self, expression: exp.With) -> str:
+621        sql = self.expressions(expression, flat=True)
+622        recursive = "RECURSIVE " if expression.args.get("recursive") else ""
+623
+624        return f"WITH {recursive}{sql}"
+
+ + + + +
+
+ +
+ + def + cte_sql(self, expression: sqlglot.expressions.CTE) -> str: + + + +
+ +
626    def cte_sql(self, expression: exp.CTE) -> str:
+627        alias = self.sql(expression, "alias")
+628        return f"{alias} AS {self.wrap(expression)}"
+
+ + + + +
+
+ +
+ + def + tablealias_sql(self, expression: sqlglot.expressions.TableAlias) -> str: + + + +
+ +
630    def tablealias_sql(self, expression: exp.TableAlias) -> str:
+631        alias = self.sql(expression, "this")
+632        columns = self.expressions(expression, key="columns", flat=True)
+633        columns = f"({columns})" if columns else ""
+634        return f"{alias}{columns}"
+
+ + + + +
+
+ +
+ + def + bitstring_sql(self, expression: sqlglot.expressions.BitString) -> str: + + + +
+ +
636    def bitstring_sql(self, expression: exp.BitString) -> str:
+637        return self.sql(expression, "this")
+
+ + + + +
+
+ +
+ + def + hexstring_sql(self, expression: sqlglot.expressions.HexString) -> str: + + + +
+ +
639    def hexstring_sql(self, expression: exp.HexString) -> str:
+640        return self.sql(expression, "this")
+
+ + + + +
+
+ +
+ + def + datatype_sql(self, expression: sqlglot.expressions.DataType) -> str: + + + +
+ +
642    def datatype_sql(self, expression: exp.DataType) -> str:
+643        type_value = expression.this
+644        type_sql = self.TYPE_MAPPING.get(type_value, type_value.value)
+645        nested = ""
+646        interior = self.expressions(expression, flat=True)
+647        values = ""
+648        if interior:
+649            if expression.args.get("nested"):
+650                nested = f"{self.STRUCT_DELIMITER[0]}{interior}{self.STRUCT_DELIMITER[1]}"
+651                if expression.args.get("values") is not None:
+652                    delimiters = ("[", "]") if type_value == exp.DataType.Type.ARRAY else ("(", ")")
+653                    values = (
+654                        f"{delimiters[0]}{self.expressions(expression, 'values')}{delimiters[1]}"
+655                    )
+656            else:
+657                nested = f"({interior})"
+658
+659        return f"{type_sql}{nested}{values}"
+
+ + + + +
+
+ +
+ + def + directory_sql(self, expression: sqlglot.expressions.Directory) -> str: + + + +
+ +
661    def directory_sql(self, expression: exp.Directory) -> str:
+662        local = "LOCAL " if expression.args.get("local") else ""
+663        row_format = self.sql(expression, "row_format")
+664        row_format = f" {row_format}" if row_format else ""
+665        return f"{local}DIRECTORY {self.sql(expression, 'this')}{row_format}"
+
+ + + + +
+
+ +
+ + def + delete_sql(self, expression: sqlglot.expressions.Delete) -> str: + + + +
+ +
667    def delete_sql(self, expression: exp.Delete) -> str:
+668        this = self.sql(expression, "this")
+669        this = f" FROM {this}" if this else ""
+670        using_sql = (
+671            f" USING {self.expressions(expression, 'using', sep=', USING ')}"
+672            if expression.args.get("using")
+673            else ""
+674        )
+675        where_sql = self.sql(expression, "where")
+676        sql = f"DELETE{this}{using_sql}{where_sql}"
+677        return self.prepend_ctes(expression, sql)
+
+ + + + +
+
+ +
+ + def + drop_sql(self, expression: sqlglot.expressions.Drop) -> str: + + + +
+ +
679    def drop_sql(self, expression: exp.Drop) -> str:
+680        this = self.sql(expression, "this")
+681        kind = expression.args["kind"]
+682        exists_sql = " IF EXISTS " if expression.args.get("exists") else " "
+683        temporary = " TEMPORARY" if expression.args.get("temporary") else ""
+684        materialized = " MATERIALIZED" if expression.args.get("materialized") else ""
+685        cascade = " CASCADE" if expression.args.get("cascade") else ""
+686        return f"DROP{temporary}{materialized} {kind}{exists_sql}{this}{cascade}"
+
+ + + + +
+
+ +
+ + def + except_sql(self, expression: sqlglot.expressions.Except) -> str: + + + +
+ +
688    def except_sql(self, expression: exp.Except) -> str:
+689        return self.prepend_ctes(
+690            expression,
+691            self.set_operation(expression, self.except_op(expression)),
+692        )
+
+ + + + +
+
+ +
+ + def + except_op(self, expression: sqlglot.expressions.Except) -> str: + + + +
+ +
694    def except_op(self, expression: exp.Except) -> str:
+695        return f"EXCEPT{'' if expression.args.get('distinct') else ' ALL'}"
+
+ + + + +
+
+ +
+ + def + fetch_sql(self, expression: sqlglot.expressions.Fetch) -> str: + + + +
+ +
697    def fetch_sql(self, expression: exp.Fetch) -> str:
+698        direction = expression.args.get("direction")
+699        direction = f" {direction.upper()}" if direction else ""
+700        count = expression.args.get("count")
+701        count = f" {count}" if count else ""
+702        return f"{self.seg('FETCH')}{direction}{count} ROWS ONLY"
+
+ + + + +
+
+ +
+ + def + filter_sql(self, expression: sqlglot.expressions.Filter) -> str: + + + +
+ +
704    def filter_sql(self, expression: exp.Filter) -> str:
+705        this = self.sql(expression, "this")
+706        where = self.sql(expression, "expression")[1:]  # where has a leading space
+707        return f"{this} FILTER({where})"
+
+ + + + +
+
+ +
+ + def + hint_sql(self, expression: sqlglot.expressions.Hint) -> str: + + + +
+ +
709    def hint_sql(self, expression: exp.Hint) -> str:
+710        if self.sql(expression, "this"):
+711            self.unsupported("Hints are not supported")
+712        return ""
+
+ + + + +
+
+ +
+ + def + index_sql(self, expression: sqlglot.expressions.Index) -> str: + + + +
+ +
714    def index_sql(self, expression: exp.Index) -> str:
+715        this = self.sql(expression, "this")
+716        table = self.sql(expression, "table")
+717        columns = self.sql(expression, "columns")
+718        return f"{this} ON {table} {columns}"
+
+ + + + +
+
+ +
+ + def + identifier_sql(self, expression: sqlglot.expressions.Identifier) -> str: + + + +
+ +
720    def identifier_sql(self, expression: exp.Identifier) -> str:
+721        text = expression.name
+722        text = text.lower() if self.normalize else text
+723        text = text.replace(self.identifier_end, self._escaped_identifier_end)
+724        if expression.args.get("quoted") or self.identify:
+725            text = f"{self.identifier_start}{text}{self.identifier_end}"
+726        return text
+
+ + + + +
+
+ +
+ + def + national_sql(self, expression: sqlglot.expressions.National) -> str: + + + +
+ +
728    def national_sql(self, expression: exp.National) -> str:
+729        return f"N{self.sql(expression, 'this')}"
+
+ + + + +
+
+ +
+ + def + partition_sql(self, expression: sqlglot.expressions.Partition) -> str: + + + +
+ +
731    def partition_sql(self, expression: exp.Partition) -> str:
+732        return f"PARTITION({self.expressions(expression)})"
+
+ + + + +
+
+ +
+ + def + properties_sql(self, expression: sqlglot.expressions.Properties) -> str: + + + +
+ +
734    def properties_sql(self, expression: exp.Properties) -> str:
+735        root_properties = []
+736        with_properties = []
+737
+738        for p in expression.expressions:
+739            p_loc = self.PROPERTIES_LOCATION[p.__class__]
+740            if p_loc == exp.Properties.Location.POST_SCHEMA_WITH:
+741                with_properties.append(p)
+742            elif p_loc == exp.Properties.Location.POST_SCHEMA_ROOT:
+743                root_properties.append(p)
+744
+745        return self.root_properties(
+746            exp.Properties(expressions=root_properties)
+747        ) + self.with_properties(exp.Properties(expressions=with_properties))
+
+ + + + +
+
+ +
+ + def + root_properties(self, properties: sqlglot.expressions.Properties) -> str: + + + +
+ +
749    def root_properties(self, properties: exp.Properties) -> str:
+750        if properties.expressions:
+751            return self.sep() + self.expressions(properties, indent=False, sep=" ")
+752        return ""
+
+ + + + +
+
+ +
+ + def + properties( self, properties: sqlglot.expressions.Properties, prefix: str = '', sep: str = ', ', suffix: str = '', wrapped: bool = True) -> str: + + + +
+ +
754    def properties(
+755        self,
+756        properties: exp.Properties,
+757        prefix: str = "",
+758        sep: str = ", ",
+759        suffix: str = "",
+760        wrapped: bool = True,
+761    ) -> str:
+762        if properties.expressions:
+763            expressions = self.expressions(properties, sep=sep, indent=False)
+764            expressions = self.wrap(expressions) if wrapped else expressions
+765            return f"{prefix}{' ' if prefix and prefix != ' ' else ''}{expressions}{suffix}"
+766        return ""
+
+ + + + +
+
+ +
+ + def + with_properties(self, properties: sqlglot.expressions.Properties) -> str: + + + +
+ +
768    def with_properties(self, properties: exp.Properties) -> str:
+769        return self.properties(properties, prefix=self.seg("WITH"))
+
+ + + + +
+
+ +
+ + def + locate_properties( self, properties: sqlglot.expressions.Properties) -> Dict[sqlglot.expressions.Properties.Location, list[sqlglot.expressions.Property]]: + + + +
+ +
771    def locate_properties(
+772        self, properties: exp.Properties
+773    ) -> t.Dict[exp.Properties.Location, list[exp.Property]]:
+774        properties_locs: t.Dict[exp.Properties.Location, list[exp.Property]] = {
+775            key: [] for key in exp.Properties.Location
+776        }
+777
+778        for p in properties.expressions:
+779            p_loc = self.PROPERTIES_LOCATION[p.__class__]
+780            if p_loc == exp.Properties.Location.PRE_SCHEMA:
+781                properties_locs[exp.Properties.Location.PRE_SCHEMA].append(p)
+782            elif p_loc == exp.Properties.Location.POST_INDEX:
+783                properties_locs[exp.Properties.Location.POST_INDEX].append(p)
+784            elif p_loc == exp.Properties.Location.POST_SCHEMA_ROOT:
+785                properties_locs[exp.Properties.Location.POST_SCHEMA_ROOT].append(p)
+786            elif p_loc == exp.Properties.Location.POST_SCHEMA_WITH:
+787                properties_locs[exp.Properties.Location.POST_SCHEMA_WITH].append(p)
+788            elif p_loc == exp.Properties.Location.POST_CREATE:
+789                properties_locs[exp.Properties.Location.POST_CREATE].append(p)
+790            elif p_loc == exp.Properties.Location.UNSUPPORTED:
+791                self.unsupported(f"Unsupported property {p.key}")
+792
+793        return properties_locs
+
+ + + + +
+
+ +
+ + def + property_sql(self, expression: sqlglot.expressions.Property) -> str: + + + +
+ +
795    def property_sql(self, expression: exp.Property) -> str:
+796        property_cls = expression.__class__
+797        if property_cls == exp.Property:
+798            return f"{expression.name}={self.sql(expression, 'value')}"
+799
+800        property_name = exp.Properties.PROPERTY_TO_NAME.get(property_cls)
+801        if not property_name:
+802            self.unsupported(f"Unsupported property {expression.key}")
+803
+804        return f"{property_name}={self.sql(expression, 'this')}"
+
+ + + + +
+
+ +
+ + def + likeproperty_sql(self, expression: sqlglot.expressions.LikeProperty) -> str: + + + +
+ +
806    def likeproperty_sql(self, expression: exp.LikeProperty) -> str:
+807        options = " ".join(f"{e.name} {self.sql(e, 'value')}" for e in expression.expressions)
+808        options = f" {options}" if options else ""
+809        return f"LIKE {self.sql(expression, 'this')}{options}"
+
+ + + + +
+
+ +
+ + def + fallbackproperty_sql(self, expression: sqlglot.expressions.FallbackProperty) -> str: + + + +
+ +
811    def fallbackproperty_sql(self, expression: exp.FallbackProperty) -> str:
+812        no = "NO " if expression.args.get("no") else ""
+813        protection = " PROTECTION" if expression.args.get("protection") else ""
+814        return f"{no}FALLBACK{protection}"
+
+ + + + +
+
+ +
+ + def + journalproperty_sql(self, expression: sqlglot.expressions.JournalProperty) -> str: + + + +
+ +
816    def journalproperty_sql(self, expression: exp.JournalProperty) -> str:
+817        no = "NO " if expression.args.get("no") else ""
+818        dual = "DUAL " if expression.args.get("dual") else ""
+819        before = "BEFORE " if expression.args.get("before") else ""
+820        return f"{no}{dual}{before}JOURNAL"
+
+ + + + +
+
+ +
+ + def + freespaceproperty_sql(self, expression: sqlglot.expressions.FreespaceProperty) -> str: + + + +
+ +
822    def freespaceproperty_sql(self, expression: exp.FreespaceProperty) -> str:
+823        freespace = self.sql(expression, "this")
+824        percent = " PERCENT" if expression.args.get("percent") else ""
+825        return f"FREESPACE={freespace}{percent}"
+
+ + + + +
+
+ +
+ + def + afterjournalproperty_sql(self, expression: sqlglot.expressions.AfterJournalProperty) -> str: + + + +
+ +
827    def afterjournalproperty_sql(self, expression: exp.AfterJournalProperty) -> str:
+828        no = "NO " if expression.args.get("no") else ""
+829        dual = "DUAL " if expression.args.get("dual") else ""
+830        local = ""
+831        if expression.args.get("local") is not None:
+832            local = "LOCAL " if expression.args.get("local") else "NOT LOCAL "
+833        return f"{no}{dual}{local}AFTER JOURNAL"
+
+ + + + +
+
+ +
+ + def + checksumproperty_sql(self, expression: sqlglot.expressions.ChecksumProperty) -> str: + + + +
+ +
835    def checksumproperty_sql(self, expression: exp.ChecksumProperty) -> str:
+836        if expression.args.get("default"):
+837            property = "DEFAULT"
+838        elif expression.args.get("on"):
+839            property = "ON"
+840        else:
+841            property = "OFF"
+842        return f"CHECKSUM={property}"
+
+ + + + +
+
+ +
+ + def + mergeblockratioproperty_sql(self, expression: sqlglot.expressions.MergeBlockRatioProperty) -> str: + + + +
+ +
844    def mergeblockratioproperty_sql(self, expression: exp.MergeBlockRatioProperty) -> str:
+845        if expression.args.get("no"):
+846            return "NO MERGEBLOCKRATIO"
+847        if expression.args.get("default"):
+848            return "DEFAULT MERGEBLOCKRATIO"
+849
+850        percent = " PERCENT" if expression.args.get("percent") else ""
+851        return f"MERGEBLOCKRATIO={self.sql(expression, 'this')}{percent}"
+
+ + + + +
+
+ +
+ + def + datablocksizeproperty_sql(self, expression: sqlglot.expressions.DataBlocksizeProperty) -> str: + + + +
+ +
853    def datablocksizeproperty_sql(self, expression: exp.DataBlocksizeProperty) -> str:
+854        default = expression.args.get("default")
+855        min = expression.args.get("min")
+856        if default is not None or min is not None:
+857            if default:
+858                property = "DEFAULT"
+859            elif min:
+860                property = "MINIMUM"
+861            else:
+862                property = "MAXIMUM"
+863            return f"{property} DATABLOCKSIZE"
+864        else:
+865            units = expression.args.get("units")
+866            units = f" {units}" if units else ""
+867            return f"DATABLOCKSIZE={self.sql(expression, 'size')}{units}"
+
+ + + + +
+
+ +
+ + def + blockcompressionproperty_sql(self, expression: sqlglot.expressions.BlockCompressionProperty) -> str: + + + +
+ +
869    def blockcompressionproperty_sql(self, expression: exp.BlockCompressionProperty) -> str:
+870        autotemp = expression.args.get("autotemp")
+871        always = expression.args.get("always")
+872        default = expression.args.get("default")
+873        manual = expression.args.get("manual")
+874        never = expression.args.get("never")
+875
+876        if autotemp is not None:
+877            property = f"AUTOTEMP({self.expressions(autotemp)})"
+878        elif always:
+879            property = "ALWAYS"
+880        elif default:
+881            property = "DEFAULT"
+882        elif manual:
+883            property = "MANUAL"
+884        elif never:
+885            property = "NEVER"
+886        return f"BLOCKCOMPRESSION={property}"
+
+ + + + +
+
+ +
+ + def + isolatedloadingproperty_sql(self, expression: sqlglot.expressions.IsolatedLoadingProperty) -> str: + + + +
+ +
888    def isolatedloadingproperty_sql(self, expression: exp.IsolatedLoadingProperty) -> str:
+889        no = expression.args.get("no")
+890        no = " NO" if no else ""
+891        concurrent = expression.args.get("concurrent")
+892        concurrent = " CONCURRENT" if concurrent else ""
+893
+894        for_ = ""
+895        if expression.args.get("for_all"):
+896            for_ = " FOR ALL"
+897        elif expression.args.get("for_insert"):
+898            for_ = " FOR INSERT"
+899        elif expression.args.get("for_none"):
+900            for_ = " FOR NONE"
+901        return f"WITH{no}{concurrent} ISOLATED LOADING{for_}"
+
+ + + + +
+
+ +
+ + def + insert_sql(self, expression: sqlglot.expressions.Insert) -> str: + + + +
+ +
903    def insert_sql(self, expression: exp.Insert) -> str:
+904        overwrite = expression.args.get("overwrite")
+905
+906        if isinstance(expression.this, exp.Directory):
+907            this = "OVERWRITE " if overwrite else "INTO "
+908        else:
+909            this = "OVERWRITE TABLE " if overwrite else "INTO "
+910
+911        this = f"{this}{self.sql(expression, 'this')}"
+912        exists = " IF EXISTS " if expression.args.get("exists") else " "
+913        partition_sql = (
+914            self.sql(expression, "partition") if expression.args.get("partition") else ""
+915        )
+916        expression_sql = self.sql(expression, "expression")
+917        sep = self.sep() if partition_sql else ""
+918        sql = f"INSERT {this}{exists}{partition_sql}{sep}{expression_sql}"
+919        return self.prepend_ctes(expression, sql)
+
+ + + + +
+
+ +
+ + def + intersect_sql(self, expression: sqlglot.expressions.Intersect) -> str: + + + +
+ +
921    def intersect_sql(self, expression: exp.Intersect) -> str:
+922        return self.prepend_ctes(
+923            expression,
+924            self.set_operation(expression, self.intersect_op(expression)),
+925        )
+
+ + + + +
+
+ +
+ + def + intersect_op(self, expression: sqlglot.expressions.Intersect) -> str: + + + +
+ +
927    def intersect_op(self, expression: exp.Intersect) -> str:
+928        return f"INTERSECT{'' if expression.args.get('distinct') else ' ALL'}"
+
+ + + + +
+
+ +
+ + def + introducer_sql(self, expression: sqlglot.expressions.Introducer) -> str: + + + +
+ +
930    def introducer_sql(self, expression: exp.Introducer) -> str:
+931        return f"{self.sql(expression, 'this')} {self.sql(expression, 'expression')}"
+
+ + + + +
+
+ +
+ + def + pseudotype_sql(self, expression: sqlglot.expressions.PseudoType) -> str: + + + +
+ +
933    def pseudotype_sql(self, expression: exp.PseudoType) -> str:
+934        return expression.name.upper()
+
+ + + + +
+
+ +
+ + def + rowformatdelimitedproperty_sql(self, expression: sqlglot.expressions.RowFormatDelimitedProperty) -> str: + + + +
+ +
936    def rowformatdelimitedproperty_sql(self, expression: exp.RowFormatDelimitedProperty) -> str:
+937        fields = expression.args.get("fields")
+938        fields = f" FIELDS TERMINATED BY {fields}" if fields else ""
+939        escaped = expression.args.get("escaped")
+940        escaped = f" ESCAPED BY {escaped}" if escaped else ""
+941        items = expression.args.get("collection_items")
+942        items = f" COLLECTION ITEMS TERMINATED BY {items}" if items else ""
+943        keys = expression.args.get("map_keys")
+944        keys = f" MAP KEYS TERMINATED BY {keys}" if keys else ""
+945        lines = expression.args.get("lines")
+946        lines = f" LINES TERMINATED BY {lines}" if lines else ""
+947        null = expression.args.get("null")
+948        null = f" NULL DEFINED AS {null}" if null else ""
+949        return f"ROW FORMAT DELIMITED{fields}{escaped}{items}{keys}{lines}{null}"
+
+ + + + +
+
+ +
+ + def + table_sql(self, expression: sqlglot.expressions.Table, sep: str = ' AS ') -> str: + + + +
+ +
951    def table_sql(self, expression: exp.Table, sep: str = " AS ") -> str:
+952        table = ".".join(
+953            part
+954            for part in [
+955                self.sql(expression, "catalog"),
+956                self.sql(expression, "db"),
+957                self.sql(expression, "this"),
+958            ]
+959            if part
+960        )
+961
+962        alias = self.sql(expression, "alias")
+963        alias = f"{sep}{alias}" if alias else ""
+964        hints = self.expressions(expression, key="hints", sep=", ", flat=True)
+965        hints = f" WITH ({hints})" if hints else ""
+966        laterals = self.expressions(expression, key="laterals", sep="")
+967        joins = self.expressions(expression, key="joins", sep="")
+968        pivots = self.expressions(expression, key="pivots", sep="")
+969        system_time = expression.args.get("system_time")
+970        system_time = f" {self.sql(expression, 'system_time')}" if system_time else ""
+971
+972        if alias and pivots:
+973            pivots = f"{pivots}{alias}"
+974            alias = ""
+975
+976        return f"{table}{system_time}{alias}{hints}{laterals}{joins}{pivots}"
+
+ + + + +
+
+ +
+ + def + tablesample_sql(self, expression: sqlglot.expressions.TableSample) -> str: + + + +
+ +
978    def tablesample_sql(self, expression: exp.TableSample) -> str:
+979        if self.alias_post_tablesample and expression.this.alias:
+980            this = self.sql(expression.this, "this")
+981            alias = f" AS {self.sql(expression.this, 'alias')}"
+982        else:
+983            this = self.sql(expression, "this")
+984            alias = ""
+985        method = self.sql(expression, "method")
+986        method = f" {method.upper()} " if method else ""
+987        numerator = self.sql(expression, "bucket_numerator")
+988        denominator = self.sql(expression, "bucket_denominator")
+989        field = self.sql(expression, "bucket_field")
+990        field = f" ON {field}" if field else ""
+991        bucket = f"BUCKET {numerator} OUT OF {denominator}{field}" if numerator else ""
+992        percent = self.sql(expression, "percent")
+993        percent = f"{percent} PERCENT" if percent else ""
+994        rows = self.sql(expression, "rows")
+995        rows = f"{rows} ROWS" if rows else ""
+996        size = self.sql(expression, "size")
+997        seed = self.sql(expression, "seed")
+998        seed = f" SEED ({seed})" if seed else ""
+999        return f"{this} TABLESAMPLE{method}({bucket}{percent}{rows}{size}){seed}{alias}"
+
+ + + + +
+
+ +
+ + def + pivot_sql(self, expression: sqlglot.expressions.Pivot) -> str: + + + +
+ +
1001    def pivot_sql(self, expression: exp.Pivot) -> str:
+1002        this = self.sql(expression, "this")
+1003        unpivot = expression.args.get("unpivot")
+1004        direction = "UNPIVOT" if unpivot else "PIVOT"
+1005        expressions = self.expressions(expression, key="expressions")
+1006        field = self.sql(expression, "field")
+1007        return f"{this} {direction}({expressions} FOR {field})"
+
+ + + + +
+
+ +
+ + def + tuple_sql(self, expression: sqlglot.expressions.Tuple) -> str: + + + +
+ +
1009    def tuple_sql(self, expression: exp.Tuple) -> str:
+1010        return f"({self.expressions(expression, flat=True)})"
+
+ + + + +
+
+ +
+ + def + update_sql(self, expression: sqlglot.expressions.Update) -> str: + + + +
+ +
1012    def update_sql(self, expression: exp.Update) -> str:
+1013        this = self.sql(expression, "this")
+1014        set_sql = self.expressions(expression, flat=True)
+1015        from_sql = self.sql(expression, "from")
+1016        where_sql = self.sql(expression, "where")
+1017        sql = f"UPDATE {this} SET {set_sql}{from_sql}{where_sql}"
+1018        return self.prepend_ctes(expression, sql)
+
+ + + + +
+
+ +
+ + def + values_sql(self, expression: sqlglot.expressions.Values) -> str: + + + +
+ +
1020    def values_sql(self, expression: exp.Values) -> str:
+1021        args = self.expressions(expression)
+1022        alias = self.sql(expression, "alias")
+1023        values = f"VALUES{self.seg('')}{args}"
+1024        values = (
+1025            f"({values})"
+1026            if self.WRAP_DERIVED_VALUES and (alias or isinstance(expression.parent, exp.From))
+1027            else values
+1028        )
+1029        return f"{values} AS {alias}" if alias else values
+
+ + + + +
+
+ +
+ + def + var_sql(self, expression: sqlglot.expressions.Var) -> str: + + + +
+ +
1031    def var_sql(self, expression: exp.Var) -> str:
+1032        return self.sql(expression, "this")
+
+ + + + +
+
+ +
+ + def + into_sql(self, expression: sqlglot.expressions.Into) -> str: + + + +
+ +
1034    def into_sql(self, expression: exp.Into) -> str:
+1035        temporary = " TEMPORARY" if expression.args.get("temporary") else ""
+1036        unlogged = " UNLOGGED" if expression.args.get("unlogged") else ""
+1037        return f"{self.seg('INTO')}{temporary or unlogged} {self.sql(expression, 'this')}"
+
+ + + + +
+
+ +
+ + def + from_sql(self, expression: sqlglot.expressions.From) -> str: + + + +
+ +
1039    def from_sql(self, expression: exp.From) -> str:
+1040        expressions = self.expressions(expression, flat=True)
+1041        return f"{self.seg('FROM')} {expressions}"
+
+ + + + +
+
+ +
+ + def + group_sql(self, expression: sqlglot.expressions.Group) -> str: + + + +
+ +
1043    def group_sql(self, expression: exp.Group) -> str:
+1044        group_by = self.op_expressions("GROUP BY", expression)
+1045        grouping_sets = self.expressions(expression, key="grouping_sets", indent=False)
+1046        grouping_sets = (
+1047            f"{self.seg('GROUPING SETS')} {self.wrap(grouping_sets)}" if grouping_sets else ""
+1048        )
+1049
+1050        cube = expression.args.get("cube")
+1051        if cube is True:
+1052            cube = self.seg("WITH CUBE")
+1053        else:
+1054            cube = self.expressions(expression, key="cube", indent=False)
+1055            cube = f"{self.seg('CUBE')} {self.wrap(cube)}" if cube else ""
+1056
+1057        rollup = expression.args.get("rollup")
+1058        if rollup is True:
+1059            rollup = self.seg("WITH ROLLUP")
+1060        else:
+1061            rollup = self.expressions(expression, key="rollup", indent=False)
+1062            rollup = f"{self.seg('ROLLUP')} {self.wrap(rollup)}" if rollup else ""
+1063
+1064        return f"{group_by}{csv(grouping_sets, cube, rollup, sep=',')}"
+
+ + + + +
+
+ +
+ + def + having_sql(self, expression: sqlglot.expressions.Having) -> str: + + + +
+ +
1066    def having_sql(self, expression: exp.Having) -> str:
+1067        this = self.indent(self.sql(expression, "this"))
+1068        return f"{self.seg('HAVING')}{self.sep()}{this}"
+
+ + + + +
+
+ +
+ + def + join_sql(self, expression: sqlglot.expressions.Join) -> str: + + + +
+ +
1070    def join_sql(self, expression: exp.Join) -> str:
+1071        op_sql = self.seg(
+1072            " ".join(
+1073                op
+1074                for op in (
+1075                    "NATURAL" if expression.args.get("natural") else None,
+1076                    expression.side,
+1077                    expression.kind,
+1078                    "JOIN",
+1079                )
+1080                if op
+1081            )
+1082        )
+1083        on_sql = self.sql(expression, "on")
+1084        using = expression.args.get("using")
+1085
+1086        if not on_sql and using:
+1087            on_sql = csv(*(self.sql(column) for column in using))
+1088
+1089        if on_sql:
+1090            on_sql = self.indent(on_sql, skip_first=True)
+1091            space = self.seg(" " * self.pad) if self.pretty else " "
+1092            if using:
+1093                on_sql = f"{space}USING ({on_sql})"
+1094            else:
+1095                on_sql = f"{space}ON {on_sql}"
+1096
+1097        expression_sql = self.sql(expression, "expression")
+1098        this_sql = self.sql(expression, "this")
+1099        return f"{expression_sql}{op_sql} {this_sql}{on_sql}"
+
+ + + + +
+
+ +
+ + def + lambda_sql( self, expression: sqlglot.expressions.Lambda, arrow_sep: str = '->') -> str: + + + +
+ +
1101    def lambda_sql(self, expression: exp.Lambda, arrow_sep: str = "->") -> str:
+1102        args = self.expressions(expression, flat=True)
+1103        args = f"({args})" if len(args.split(",")) > 1 else args
+1104        return f"{args} {arrow_sep} {self.sql(expression, 'this')}"
+
+ + + + +
+
+ +
+ + def + lateral_sql(self, expression: sqlglot.expressions.Lateral) -> str: + + + +
+ +
1106    def lateral_sql(self, expression: exp.Lateral) -> str:
+1107        this = self.sql(expression, "this")
+1108
+1109        if isinstance(expression.this, exp.Subquery):
+1110            return f"LATERAL {this}"
+1111
+1112        if expression.args.get("view"):
+1113            alias = expression.args["alias"]
+1114            columns = self.expressions(alias, key="columns", flat=True)
+1115            table = f" {alias.name}" if alias.name else ""
+1116            columns = f" AS {columns}" if columns else ""
+1117            op_sql = self.seg(f"LATERAL VIEW{' OUTER' if expression.args.get('outer') else ''}")
+1118            return f"{op_sql}{self.sep()}{this}{table}{columns}"
+1119
+1120        alias = self.sql(expression, "alias")
+1121        alias = f" AS {alias}" if alias else ""
+1122        return f"LATERAL {this}{alias}"
+
+ + + + +
+
+ +
+ + def + limit_sql(self, expression: sqlglot.expressions.Limit) -> str: + + + +
+ +
1124    def limit_sql(self, expression: exp.Limit) -> str:
+1125        this = self.sql(expression, "this")
+1126        return f"{this}{self.seg('LIMIT')} {self.sql(expression, 'expression')}"
+
+ + + + +
+
+ +
+ + def + offset_sql(self, expression: sqlglot.expressions.Offset) -> str: + + + +
+ +
1128    def offset_sql(self, expression: exp.Offset) -> str:
+1129        this = self.sql(expression, "this")
+1130        return f"{this}{self.seg('OFFSET')} {self.sql(expression, 'expression')}"
+
+ + + + +
+
+ +
+ + def + lock_sql(self, expression: sqlglot.expressions.Lock) -> str: + + + +
+ +
1132    def lock_sql(self, expression: exp.Lock) -> str:
+1133        if self.LOCKING_READS_SUPPORTED:
+1134            lock_type = "UPDATE" if expression.args["update"] else "SHARE"
+1135            return self.seg(f"FOR {lock_type}")
+1136
+1137        self.unsupported("Locking reads using 'FOR UPDATE/SHARE' are not supported")
+1138        return ""
+
+ + + + +
+
+ +
+ + def + literal_sql(self, expression: sqlglot.expressions.Literal) -> str: + + + +
+ +
1140    def literal_sql(self, expression: exp.Literal) -> str:
+1141        text = expression.this or ""
+1142        if expression.is_string:
+1143            if self._replace_backslash:
+1144                text = BACKSLASH_RE.sub(r"\\\\", text)
+1145            text = text.replace(self.quote_end, self._escaped_quote_end)
+1146            if self.pretty:
+1147                text = text.replace("\n", self.SENTINEL_LINE_BREAK)
+1148            text = f"{self.quote_start}{text}{self.quote_end}"
+1149        return text
+
+ + + + +
+
+ +
+ + def + loaddata_sql(self, expression: sqlglot.expressions.LoadData) -> str: + + + +
+ +
1151    def loaddata_sql(self, expression: exp.LoadData) -> str:
+1152        local = " LOCAL" if expression.args.get("local") else ""
+1153        inpath = f" INPATH {self.sql(expression, 'inpath')}"
+1154        overwrite = " OVERWRITE" if expression.args.get("overwrite") else ""
+1155        this = f" INTO TABLE {self.sql(expression, 'this')}"
+1156        partition = self.sql(expression, "partition")
+1157        partition = f" {partition}" if partition else ""
+1158        input_format = self.sql(expression, "input_format")
+1159        input_format = f" INPUTFORMAT {input_format}" if input_format else ""
+1160        serde = self.sql(expression, "serde")
+1161        serde = f" SERDE {serde}" if serde else ""
+1162        return f"LOAD DATA{local}{inpath}{overwrite}{this}{partition}{input_format}{serde}"
+
+ + + + +
+
+ +
+ + def + null_sql(self, *_) -> str: + + + +
+ +
1164    def null_sql(self, *_) -> str:
+1165        return "NULL"
+
+ + + + +
+
+ +
+ + def + boolean_sql(self, expression: sqlglot.expressions.Boolean) -> str: + + + +
+ +
1167    def boolean_sql(self, expression: exp.Boolean) -> str:
+1168        return "TRUE" if expression.this else "FALSE"
+
+ + + + +
+
+ +
+ + def + order_sql(self, expression: sqlglot.expressions.Order, flat: bool = False) -> str: + + + +
+ +
1170    def order_sql(self, expression: exp.Order, flat: bool = False) -> str:
+1171        this = self.sql(expression, "this")
+1172        this = f"{this} " if this else this
+1173        return self.op_expressions(f"{this}ORDER BY", expression, flat=this or flat)  # type: ignore
+
+ + + + +
+
+ +
+ + def + cluster_sql(self, expression: sqlglot.expressions.Cluster) -> str: + + + +
+ +
1175    def cluster_sql(self, expression: exp.Cluster) -> str:
+1176        return self.op_expressions("CLUSTER BY", expression)
+
+ + + + +
+
+ +
+ + def + distribute_sql(self, expression: sqlglot.expressions.Distribute) -> str: + + + +
+ +
1178    def distribute_sql(self, expression: exp.Distribute) -> str:
+1179        return self.op_expressions("DISTRIBUTE BY", expression)
+
+ + + + +
+
+ +
+ + def + sort_sql(self, expression: sqlglot.expressions.Sort) -> str: + + + +
+ +
1181    def sort_sql(self, expression: exp.Sort) -> str:
+1182        return self.op_expressions("SORT BY", expression)
+
+ + + + +
+
+ +
+ + def + ordered_sql(self, expression: sqlglot.expressions.Ordered) -> str: + + + +
+ +
1184    def ordered_sql(self, expression: exp.Ordered) -> str:
+1185        desc = expression.args.get("desc")
+1186        asc = not desc
+1187
+1188        nulls_first = expression.args.get("nulls_first")
+1189        nulls_last = not nulls_first
+1190        nulls_are_large = self.null_ordering == "nulls_are_large"
+1191        nulls_are_small = self.null_ordering == "nulls_are_small"
+1192        nulls_are_last = self.null_ordering == "nulls_are_last"
+1193
+1194        sort_order = " DESC" if desc else ""
+1195        nulls_sort_change = ""
+1196        if nulls_first and (
+1197            (asc and nulls_are_large) or (desc and nulls_are_small) or nulls_are_last
+1198        ):
+1199            nulls_sort_change = " NULLS FIRST"
+1200        elif (
+1201            nulls_last
+1202            and ((asc and nulls_are_small) or (desc and nulls_are_large))
+1203            and not nulls_are_last
+1204        ):
+1205            nulls_sort_change = " NULLS LAST"
+1206
+1207        if nulls_sort_change and not self.NULL_ORDERING_SUPPORTED:
+1208            self.unsupported(
+1209                "Sorting in an ORDER BY on NULLS FIRST/NULLS LAST is not supported by this dialect"
+1210            )
+1211            nulls_sort_change = ""
+1212
+1213        return f"{self.sql(expression, 'this')}{sort_order}{nulls_sort_change}"
+
+ + + + +
+
+ +
+ + def + matchrecognize_sql(self, expression: sqlglot.expressions.MatchRecognize) -> str: + + + +
+ +
1215    def matchrecognize_sql(self, expression: exp.MatchRecognize) -> str:
+1216        partition = self.partition_by_sql(expression)
+1217        order = self.sql(expression, "order")
+1218        measures = self.sql(expression, "measures")
+1219        measures = self.seg(f"MEASURES {measures}") if measures else ""
+1220        rows = self.sql(expression, "rows")
+1221        rows = self.seg(rows) if rows else ""
+1222        after = self.sql(expression, "after")
+1223        after = self.seg(after) if after else ""
+1224        pattern = self.sql(expression, "pattern")
+1225        pattern = self.seg(f"PATTERN ({pattern})") if pattern else ""
+1226        define = self.sql(expression, "define")
+1227        define = self.seg(f"DEFINE {define}") if define else ""
+1228        body = "".join(
+1229            (
+1230                partition,
+1231                order,
+1232                measures,
+1233                rows,
+1234                after,
+1235                pattern,
+1236                define,
+1237            )
+1238        )
+1239        return f"{self.seg('MATCH_RECOGNIZE')} {self.wrap(body)}"
+
+ + + + +
+
+ +
+ + def + query_modifiers(self, expression: sqlglot.expressions.Expression, *sqls: str) -> str: + + + +
+ +
1241    def query_modifiers(self, expression: exp.Expression, *sqls: str) -> str:
+1242        return csv(
+1243            *sqls,
+1244            *[self.sql(sql) for sql in expression.args.get("joins") or []],
+1245            self.sql(expression, "match"),
+1246            *[self.sql(sql) for sql in expression.args.get("laterals") or []],
+1247            self.sql(expression, "where"),
+1248            self.sql(expression, "group"),
+1249            self.sql(expression, "having"),
+1250            self.sql(expression, "qualify"),
+1251            self.seg("WINDOW ") + self.expressions(expression, "windows", flat=True)
+1252            if expression.args.get("windows")
+1253            else "",
+1254            self.sql(expression, "distribute"),
+1255            self.sql(expression, "sort"),
+1256            self.sql(expression, "cluster"),
+1257            self.sql(expression, "order"),
+1258            self.sql(expression, "limit"),
+1259            self.sql(expression, "offset"),
+1260            self.sql(expression, "lock"),
+1261            sep="",
+1262        )
+
+ + + + +
+
+ +
+ + def + select_sql(self, expression: sqlglot.expressions.Select) -> str: + + + +
+ +
1264    def select_sql(self, expression: exp.Select) -> str:
+1265        hint = self.sql(expression, "hint")
+1266        distinct = self.sql(expression, "distinct")
+1267        distinct = f" {distinct}" if distinct else ""
+1268        expressions = self.expressions(expression)
+1269        expressions = f"{self.sep()}{expressions}" if expressions else expressions
+1270        sql = self.query_modifiers(
+1271            expression,
+1272            f"SELECT{hint}{distinct}{expressions}",
+1273            self.sql(expression, "into", comment=False),
+1274            self.sql(expression, "from", comment=False),
+1275        )
+1276        return self.prepend_ctes(expression, sql)
+
+ + + + +
+
+ +
+ + def + schema_sql(self, expression: sqlglot.expressions.Schema) -> str: + + + +
+ +
1278    def schema_sql(self, expression: exp.Schema) -> str:
+1279        this = self.sql(expression, "this")
+1280        this = f"{this} " if this else ""
+1281        sql = f"({self.sep('')}{self.expressions(expression)}{self.seg(')', sep='')}"
+1282        return f"{this}{sql}"
+
+ + + + +
+
+ +
+ + def + star_sql(self, expression: sqlglot.expressions.Star) -> str: + + + +
+ +
1284    def star_sql(self, expression: exp.Star) -> str:
+1285        except_ = self.expressions(expression, key="except", flat=True)
+1286        except_ = f"{self.seg(self.STAR_MAPPING['except'])} ({except_})" if except_ else ""
+1287        replace = self.expressions(expression, key="replace", flat=True)
+1288        replace = f"{self.seg(self.STAR_MAPPING['replace'])} ({replace})" if replace else ""
+1289        return f"*{except_}{replace}"
+
+ + + + +
+
+ +
+ + def + structkwarg_sql(self, expression: sqlglot.expressions.StructKwarg) -> str: + + + +
+ +
1291    def structkwarg_sql(self, expression: exp.StructKwarg) -> str:
+1292        return f"{self.sql(expression, 'this')} {self.sql(expression, 'expression')}"
+
+ + + + +
+
+ +
+ + def + parameter_sql(self, expression: sqlglot.expressions.Parameter) -> str: + + + +
+ +
1294    def parameter_sql(self, expression: exp.Parameter) -> str:
+1295        return f"@{self.sql(expression, 'this')}"
+
+ + + + +
+
+ +
+ + def + sessionparameter_sql(self, expression: sqlglot.expressions.SessionParameter) -> str: + + + +
+ +
1297    def sessionparameter_sql(self, expression: exp.SessionParameter) -> str:
+1298        this = self.sql(expression, "this")
+1299        kind = expression.text("kind")
+1300        if kind:
+1301            kind = f"{kind}."
+1302        return f"@@{kind}{this}"
+
+ + + + +
+
+ +
+ + def + placeholder_sql(self, expression: sqlglot.expressions.Placeholder) -> str: + + + +
+ +
1304    def placeholder_sql(self, expression: exp.Placeholder) -> str:
+1305        return f":{expression.name}" if expression.name else "?"
+
+ + + + +
+
+ +
+ + def + subquery_sql(self, expression: sqlglot.expressions.Subquery) -> str: + + + +
+ +
1307    def subquery_sql(self, expression: exp.Subquery) -> str:
+1308        alias = self.sql(expression, "alias")
+1309
+1310        sql = self.query_modifiers(
+1311            expression,
+1312            self.wrap(expression),
+1313            self.expressions(expression, key="pivots", sep=" "),
+1314            f" AS {alias}" if alias else "",
+1315        )
+1316
+1317        return self.prepend_ctes(expression, sql)
+
+ + + + +
+
+ +
+ + def + qualify_sql(self, expression: sqlglot.expressions.Qualify) -> str: + + + +
+ +
1319    def qualify_sql(self, expression: exp.Qualify) -> str:
+1320        this = self.indent(self.sql(expression, "this"))
+1321        return f"{self.seg('QUALIFY')}{self.sep()}{this}"
+
+ + + + +
+
+ +
+ + def + union_sql(self, expression: sqlglot.expressions.Union) -> str: + + + +
+ +
1323    def union_sql(self, expression: exp.Union) -> str:
+1324        return self.prepend_ctes(
+1325            expression,
+1326            self.set_operation(expression, self.union_op(expression)),
+1327        )
+
+ + + + +
+
+ +
+ + def + union_op(self, expression: sqlglot.expressions.Union) -> str: + + + +
+ +
1329    def union_op(self, expression: exp.Union) -> str:
+1330        kind = " DISTINCT" if self.EXPLICIT_UNION else ""
+1331        kind = kind if expression.args.get("distinct") else " ALL"
+1332        return f"UNION{kind}"
+
+ + + + +
+
+ +
+ + def + unnest_sql(self, expression: sqlglot.expressions.Unnest) -> str: + + + +
+ +
1334    def unnest_sql(self, expression: exp.Unnest) -> str:
+1335        args = self.expressions(expression, flat=True)
+1336        alias = expression.args.get("alias")
+1337        if alias and self.unnest_column_only:
+1338            columns = alias.columns
+1339            alias = self.sql(columns[0]) if columns else ""
+1340        else:
+1341            alias = self.sql(expression, "alias")
+1342        alias = f" AS {alias}" if alias else alias
+1343        ordinality = " WITH ORDINALITY" if expression.args.get("ordinality") else ""
+1344        offset = expression.args.get("offset")
+1345        offset = f" WITH OFFSET AS {self.sql(offset)}" if offset else ""
+1346        return f"UNNEST({args}){ordinality}{alias}{offset}"
+
+ + + + +
+
+ +
+ + def + where_sql(self, expression: sqlglot.expressions.Where) -> str: + + + +
+ +
1348    def where_sql(self, expression: exp.Where) -> str:
+1349        this = self.indent(self.sql(expression, "this"))
+1350        return f"{self.seg('WHERE')}{self.sep()}{this}"
+
+ + + + +
+
+ +
+ + def + window_sql(self, expression: sqlglot.expressions.Window) -> str: + + + +
+ +
1352    def window_sql(self, expression: exp.Window) -> str:
+1353        this = self.sql(expression, "this")
+1354
+1355        partition = self.partition_by_sql(expression)
+1356
+1357        order = expression.args.get("order")
+1358        order_sql = self.order_sql(order, flat=True) if order else ""
+1359
+1360        partition_sql = partition + " " if partition and order else partition
+1361
+1362        spec = expression.args.get("spec")
+1363        spec_sql = " " + self.window_spec_sql(spec) if spec else ""
+1364
+1365        alias = self.sql(expression, "alias")
+1366        this = f"{this} {'AS' if expression.arg_key == 'windows' else 'OVER'}"
+1367
+1368        if not partition and not order and not spec and alias:
+1369            return f"{this} {alias}"
+1370
+1371        window_args = alias + partition_sql + order_sql + spec_sql
+1372
+1373        return f"{this} ({window_args.strip()})"
+
+ + + + +
+
+ +
+ + def + partition_by_sql( self, expression: sqlglot.expressions.Window | sqlglot.expressions.MatchRecognize) -> str: + + + +
+ +
1375    def partition_by_sql(self, expression: exp.Window | exp.MatchRecognize) -> str:
+1376        partition = self.expressions(expression, key="partition_by", flat=True)
+1377        return f"PARTITION BY {partition}" if partition else ""
+
+ + + + +
+
+ +
+ + def + window_spec_sql(self, expression: sqlglot.expressions.WindowSpec) -> str: + + + +
+ +
1379    def window_spec_sql(self, expression: exp.WindowSpec) -> str:
+1380        kind = self.sql(expression, "kind")
+1381        start = csv(self.sql(expression, "start"), self.sql(expression, "start_side"), sep=" ")
+1382        end = (
+1383            csv(self.sql(expression, "end"), self.sql(expression, "end_side"), sep=" ")
+1384            or "CURRENT ROW"
+1385        )
+1386        return f"{kind} BETWEEN {start} AND {end}"
+
+ + + + +
+
+ +
+ + def + withingroup_sql(self, expression: sqlglot.expressions.WithinGroup) -> str: + + + +
+ +
1388    def withingroup_sql(self, expression: exp.WithinGroup) -> str:
+1389        this = self.sql(expression, "this")
+1390        expression_sql = self.sql(expression, "expression")[1:]  # order has a leading space
+1391        return f"{this} WITHIN GROUP ({expression_sql})"
+
+ + + + +
+
+ +
+ + def + between_sql(self, expression: sqlglot.expressions.Between) -> str: + + + +
+ +
1393    def between_sql(self, expression: exp.Between) -> str:
+1394        this = self.sql(expression, "this")
+1395        low = self.sql(expression, "low")
+1396        high = self.sql(expression, "high")
+1397        return f"{this} BETWEEN {low} AND {high}"
+
+ + + + +
+
+ +
+ + def + bracket_sql(self, expression: sqlglot.expressions.Bracket) -> str: + + + +
+ +
1399    def bracket_sql(self, expression: exp.Bracket) -> str:
+1400        expressions = apply_index_offset(expression.expressions, self.index_offset)
+1401        expressions_sql = ", ".join(self.sql(e) for e in expressions)
+1402
+1403        return f"{self.sql(expression, 'this')}[{expressions_sql}]"
+
+ + + + +
+
+ +
+ + def + all_sql(self, expression: sqlglot.expressions.All) -> str: + + + +
+ +
1405    def all_sql(self, expression: exp.All) -> str:
+1406        return f"ALL {self.wrap(expression)}"
+
+ + + + +
+
+ +
+ + def + any_sql(self, expression: sqlglot.expressions.Any) -> str: + + + +
+ +
1408    def any_sql(self, expression: exp.Any) -> str:
+1409        return f"ANY {self.wrap(expression)}"
+
+ + + + +
+
+ +
+ + def + exists_sql(self, expression: sqlglot.expressions.Exists) -> str: + + + +
+ +
1411    def exists_sql(self, expression: exp.Exists) -> str:
+1412        return f"EXISTS{self.wrap(expression)}"
+
+ + + + +
+
+ +
+ + def + case_sql(self, expression: sqlglot.expressions.Case) -> str: + + + +
+ +
1414    def case_sql(self, expression: exp.Case) -> str:
+1415        this = self.sql(expression, "this")
+1416        statements = [f"CASE {this}" if this else "CASE"]
+1417
+1418        for e in expression.args["ifs"]:
+1419            statements.append(f"WHEN {self.sql(e, 'this')}")
+1420            statements.append(f"THEN {self.sql(e, 'true')}")
+1421
+1422        default = self.sql(expression, "default")
+1423
+1424        if default:
+1425            statements.append(f"ELSE {default}")
+1426
+1427        statements.append("END")
+1428
+1429        if self.pretty and self.text_width(statements) > self._max_text_width:
+1430            return self.indent("\n".join(statements), skip_first=True, skip_last=True)
+1431
+1432        return " ".join(statements)
+
+ + + + +
+
+ +
+ + def + constraint_sql(self, expression: sqlglot.expressions.Constraint) -> str: + + + +
+ +
1434    def constraint_sql(self, expression: exp.Constraint) -> str:
+1435        this = self.sql(expression, "this")
+1436        expressions = self.expressions(expression, flat=True)
+1437        return f"CONSTRAINT {this} {expressions}"
+
+ + + + +
+
+ +
+ + def + extract_sql(self, expression: sqlglot.expressions.Extract) -> str: + + + +
+ +
1439    def extract_sql(self, expression: exp.Extract) -> str:
+1440        this = self.sql(expression, "this")
+1441        expression_sql = self.sql(expression, "expression")
+1442        return f"EXTRACT({this} FROM {expression_sql})"
+
+ + + + +
+
+ +
+ + def + trim_sql(self, expression: sqlglot.expressions.Trim) -> str: + + + +
+ +
1444    def trim_sql(self, expression: exp.Trim) -> str:
+1445        trim_type = self.sql(expression, "position")
+1446
+1447        if trim_type == "LEADING":
+1448            return f"{self.normalize_func('LTRIM')}({self.format_args(expression.this)})"
+1449        elif trim_type == "TRAILING":
+1450            return f"{self.normalize_func('RTRIM')}({self.format_args(expression.this)})"
+1451        else:
+1452            return f"{self.normalize_func('TRIM')}({self.format_args(expression.this, expression.expression)})"
+
+ + + + +
+
+ +
+ + def + concat_sql(self, expression: sqlglot.expressions.Concat) -> str: + + + +
+ +
1454    def concat_sql(self, expression: exp.Concat) -> str:
+1455        if len(expression.expressions) == 1:
+1456            return self.sql(expression.expressions[0])
+1457        return self.function_fallback_sql(expression)
+
+ + + + +
+
+ +
+ + def + check_sql(self, expression: sqlglot.expressions.Check) -> str: + + + +
+ +
1459    def check_sql(self, expression: exp.Check) -> str:
+1460        this = self.sql(expression, key="this")
+1461        return f"CHECK ({this})"
+
+ + + + +
+
+ +
+ + def + foreignkey_sql(self, expression: sqlglot.expressions.ForeignKey) -> str: + + + +
+ +
1463    def foreignkey_sql(self, expression: exp.ForeignKey) -> str:
+1464        expressions = self.expressions(expression, flat=True)
+1465        reference = self.sql(expression, "reference")
+1466        reference = f" {reference}" if reference else ""
+1467        delete = self.sql(expression, "delete")
+1468        delete = f" ON DELETE {delete}" if delete else ""
+1469        update = self.sql(expression, "update")
+1470        update = f" ON UPDATE {update}" if update else ""
+1471        return f"FOREIGN KEY ({expressions}){reference}{delete}{update}"
+
+ + + + +
+
+ +
+ + def + primarykey_sql(self, expression: sqlglot.expressions.ForeignKey) -> str: + + + +
+ +
1473    def primarykey_sql(self, expression: exp.ForeignKey) -> str:
+1474        expressions = self.expressions(expression, flat=True)
+1475        options = self.expressions(expression, "options", flat=True, sep=" ")
+1476        options = f" {options}" if options else ""
+1477        return f"PRIMARY KEY ({expressions}){options}"
+
+ + + + +
+
+ +
+ + def + unique_sql(self, expression: sqlglot.expressions.Unique) -> str: + + + +
+ +
1479    def unique_sql(self, expression: exp.Unique) -> str:
+1480        columns = self.expressions(expression, key="expressions")
+1481        return f"UNIQUE ({columns})"
+
+ + + + +
+
+ +
+ + def + if_sql(self, expression: sqlglot.expressions.If) -> str: + + + +
+ +
1483    def if_sql(self, expression: exp.If) -> str:
+1484        return self.case_sql(
+1485            exp.Case(ifs=[expression.copy()], default=expression.args.get("false"))
+1486        )
+
+ + + + +
+
+ +
+ + def + in_sql(self, expression: sqlglot.expressions.In) -> str: + + + +
+ +
1488    def in_sql(self, expression: exp.In) -> str:
+1489        query = expression.args.get("query")
+1490        unnest = expression.args.get("unnest")
+1491        field = expression.args.get("field")
+1492        is_global = " GLOBAL" if expression.args.get("is_global") else ""
+1493
+1494        if query:
+1495            in_sql = self.wrap(query)
+1496        elif unnest:
+1497            in_sql = self.in_unnest_op(unnest)
+1498        elif field:
+1499            in_sql = self.sql(field)
+1500        else:
+1501            in_sql = f"({self.expressions(expression, flat=True)})"
+1502
+1503        return f"{self.sql(expression, 'this')}{is_global} IN {in_sql}"
+
+ + + + +
+
+ +
+ + def + in_unnest_op(self, unnest: sqlglot.expressions.Unnest) -> str: + + + +
+ +
1505    def in_unnest_op(self, unnest: exp.Unnest) -> str:
+1506        return f"(SELECT {self.sql(unnest)})"
+
+ + + + +
+
+ +
+ + def + interval_sql(self, expression: sqlglot.expressions.Interval) -> str: + + + +
+ +
1508    def interval_sql(self, expression: exp.Interval) -> str:
+1509        this = expression.args.get("this")
+1510        if this:
+1511            this = (
+1512                f" {this}"
+1513                if isinstance(this, exp.Literal) or isinstance(this, exp.Paren)
+1514                else f" ({this})"
+1515            )
+1516        else:
+1517            this = ""
+1518        unit = expression.args.get("unit")
+1519        unit = f" {unit}" if unit else ""
+1520        return f"INTERVAL{this}{unit}"
+
+ + + + +
+
+ +
+ + def + return_sql(self, expression: sqlglot.expressions.Return) -> str: + + + +
+ +
1522    def return_sql(self, expression: exp.Return) -> str:
+1523        return f"RETURN {self.sql(expression, 'this')}"
+
+ + + + +
+
+ +
+ + def + reference_sql(self, expression: sqlglot.expressions.Reference) -> str: + + + +
+ +
1525    def reference_sql(self, expression: exp.Reference) -> str:
+1526        this = self.sql(expression, "this")
+1527        expressions = self.expressions(expression, flat=True)
+1528        expressions = f"({expressions})" if expressions else ""
+1529        options = self.expressions(expression, "options", flat=True, sep=" ")
+1530        options = f" {options}" if options else ""
+1531        return f"REFERENCES {this}{expressions}{options}"
+
+ + + + +
+
+ +
+ + def + anonymous_sql(self, expression: sqlglot.expressions.Anonymous) -> str: + + + +
+ +
1533    def anonymous_sql(self, expression: exp.Anonymous) -> str:
+1534        args = self.format_args(*expression.expressions)
+1535        return f"{self.normalize_func(self.sql(expression, 'this'))}({args})"
+
+ + + + +
+
+ +
+ + def + paren_sql(self, expression: sqlglot.expressions.Paren) -> str: + + + +
+ +
1537    def paren_sql(self, expression: exp.Paren) -> str:
+1538        if isinstance(expression.unnest(), exp.Select):
+1539            sql = self.wrap(expression)
+1540        else:
+1541            sql = self.seg(self.indent(self.sql(expression, "this")), sep="")
+1542            sql = f"({sql}{self.seg(')', sep='')}"
+1543
+1544        return self.prepend_ctes(expression, sql)
+
+ + + + +
+
+ +
+ + def + neg_sql(self, expression: sqlglot.expressions.Neg) -> str: + + + +
+ +
1546    def neg_sql(self, expression: exp.Neg) -> str:
+1547        # This makes sure we don't convert "- - 5" to "--5", which is a comment
+1548        this_sql = self.sql(expression, "this")
+1549        sep = " " if this_sql[0] == "-" else ""
+1550        return f"-{sep}{this_sql}"
+
+ + + + +
+
+ +
+ + def + not_sql(self, expression: sqlglot.expressions.Not) -> str: + + + +
+ +
1552    def not_sql(self, expression: exp.Not) -> str:
+1553        return f"NOT {self.sql(expression, 'this')}"
+
+ + + + +
+
+ +
+ + def + alias_sql(self, expression: sqlglot.expressions.Alias) -> str: + + + +
+ +
1555    def alias_sql(self, expression: exp.Alias) -> str:
+1556        to_sql = self.sql(expression, "alias")
+1557        to_sql = f" AS {to_sql}" if to_sql else ""
+1558        return f"{self.sql(expression, 'this')}{to_sql}"
+
+ + + + +
+
+ +
+ + def + aliases_sql(self, expression: sqlglot.expressions.Aliases) -> str: + + + +
+ +
1560    def aliases_sql(self, expression: exp.Aliases) -> str:
+1561        return f"{self.sql(expression, 'this')} AS ({self.expressions(expression, flat=True)})"
+
+ + + + +
+
+ +
+ + def + attimezone_sql(self, expression: sqlglot.expressions.AtTimeZone) -> str: + + + +
+ +
1563    def attimezone_sql(self, expression: exp.AtTimeZone) -> str:
+1564        this = self.sql(expression, "this")
+1565        zone = self.sql(expression, "zone")
+1566        return f"{this} AT TIME ZONE {zone}"
+
+ + + + +
+
+ +
+ + def + add_sql(self, expression: sqlglot.expressions.Add) -> str: + + + +
+ +
1568    def add_sql(self, expression: exp.Add) -> str:
+1569        return self.binary(expression, "+")
+
+ + + + +
+
+ +
+ + def + and_sql(self, expression: sqlglot.expressions.And) -> str: + + + +
+ +
1571    def and_sql(self, expression: exp.And) -> str:
+1572        return self.connector_sql(expression, "AND")
+
+ + + + +
+
+ +
+ + def + connector_sql(self, expression: sqlglot.expressions.Connector, op: str) -> str: + + + +
+ +
1574    def connector_sql(self, expression: exp.Connector, op: str) -> str:
+1575        if not self.pretty:
+1576            return self.binary(expression, op)
+1577
+1578        sqls = tuple(self.sql(e) for e in expression.flatten(unnest=False))
+1579        sep = "\n" if self.text_width(sqls) > self._max_text_width else " "
+1580        return f"{sep}{op} ".join(sqls)
+
+ + + + +
+
+ +
+ + def + bitwiseand_sql(self, expression: sqlglot.expressions.BitwiseAnd) -> str: + + + +
+ +
1582    def bitwiseand_sql(self, expression: exp.BitwiseAnd) -> str:
+1583        return self.binary(expression, "&")
+
+ + + + +
+
+ +
+ + def + bitwiseleftshift_sql(self, expression: sqlglot.expressions.BitwiseLeftShift) -> str: + + + +
+ +
1585    def bitwiseleftshift_sql(self, expression: exp.BitwiseLeftShift) -> str:
+1586        return self.binary(expression, "<<")
+
+ + + + +
+
+ +
+ + def + bitwisenot_sql(self, expression: sqlglot.expressions.BitwiseNot) -> str: + + + +
+ +
1588    def bitwisenot_sql(self, expression: exp.BitwiseNot) -> str:
+1589        return f"~{self.sql(expression, 'this')}"
+
+ + + + +
+
+ +
+ + def + bitwiseor_sql(self, expression: sqlglot.expressions.BitwiseOr) -> str: + + + +
+ +
1591    def bitwiseor_sql(self, expression: exp.BitwiseOr) -> str:
+1592        return self.binary(expression, "|")
+
+ + + + +
+
+ +
+ + def + bitwiserightshift_sql(self, expression: sqlglot.expressions.BitwiseRightShift) -> str: + + + +
+ +
1594    def bitwiserightshift_sql(self, expression: exp.BitwiseRightShift) -> str:
+1595        return self.binary(expression, ">>")
+
+ + + + +
+
+ +
+ + def + bitwisexor_sql(self, expression: sqlglot.expressions.BitwiseXor) -> str: + + + +
+ +
1597    def bitwisexor_sql(self, expression: exp.BitwiseXor) -> str:
+1598        return self.binary(expression, "^")
+
+ + + + +
+
+ +
+ + def + cast_sql(self, expression: sqlglot.expressions.Cast) -> str: + + + +
+ +
1600    def cast_sql(self, expression: exp.Cast) -> str:
+1601        return f"CAST({self.sql(expression, 'this')} AS {self.sql(expression, 'to')})"
+
+ + + + +
+
+ +
+ + def + currentdate_sql(self, expression: sqlglot.expressions.CurrentDate) -> str: + + + +
+ +
1603    def currentdate_sql(self, expression: exp.CurrentDate) -> str:
+1604        zone = self.sql(expression, "this")
+1605        return f"CURRENT_DATE({zone})" if zone else "CURRENT_DATE"
+
+ + + + +
+
+ +
+ + def + collate_sql(self, expression: sqlglot.expressions.Collate) -> str: + + + +
+ +
1607    def collate_sql(self, expression: exp.Collate) -> str:
+1608        return self.binary(expression, "COLLATE")
+
+ + + + +
+
+ +
+ + def + command_sql(self, expression: sqlglot.expressions.Command) -> str: + + + +
+ +
1610    def command_sql(self, expression: exp.Command) -> str:
+1611        return f"{self.sql(expression, 'this').upper()} {expression.text('expression').strip()}"
+
+ + + + +
+
+ +
+ + def + transaction_sql(self, *_) -> str: + + + +
+ +
1613    def transaction_sql(self, *_) -> str:
+1614        return "BEGIN"
+
+ + + + +
+
+ +
+ + def + commit_sql(self, expression: sqlglot.expressions.Commit) -> str: + + + +
+ +
1616    def commit_sql(self, expression: exp.Commit) -> str:
+1617        chain = expression.args.get("chain")
+1618        if chain is not None:
+1619            chain = " AND CHAIN" if chain else " AND NO CHAIN"
+1620
+1621        return f"COMMIT{chain or ''}"
+
+ + + + +
+
+ +
+ + def + rollback_sql(self, expression: sqlglot.expressions.Rollback) -> str: + + + +
+ +
1623    def rollback_sql(self, expression: exp.Rollback) -> str:
+1624        savepoint = expression.args.get("savepoint")
+1625        savepoint = f" TO {savepoint}" if savepoint else ""
+1626        return f"ROLLBACK{savepoint}"
+
+ + + + +
+
+ +
+ + def + altercolumn_sql(self, expression: sqlglot.expressions.AlterColumn) -> str: + + + +
+ +
1628    def altercolumn_sql(self, expression: exp.AlterColumn) -> str:
+1629        this = self.sql(expression, "this")
+1630
+1631        dtype = self.sql(expression, "dtype")
+1632        if dtype:
+1633            collate = self.sql(expression, "collate")
+1634            collate = f" COLLATE {collate}" if collate else ""
+1635            using = self.sql(expression, "using")
+1636            using = f" USING {using}" if using else ""
+1637            return f"ALTER COLUMN {this} TYPE {dtype}{collate}{using}"
+1638
+1639        default = self.sql(expression, "default")
+1640        if default:
+1641            return f"ALTER COLUMN {this} SET DEFAULT {default}"
+1642
+1643        if not expression.args.get("drop"):
+1644            self.unsupported("Unsupported ALTER COLUMN syntax")
+1645
+1646        return f"ALTER COLUMN {this} DROP DEFAULT"
+
+ + + + +
+
+ +
+ + def + renametable_sql(self, expression: sqlglot.expressions.RenameTable) -> str: + + + +
+ +
1648    def renametable_sql(self, expression: exp.RenameTable) -> str:
+1649        this = self.sql(expression, "this")
+1650        return f"RENAME TO {this}"
+
+ + + + +
+
+ +
+ + def + altertable_sql(self, expression: sqlglot.expressions.AlterTable) -> str: + + + +
+ +
1652    def altertable_sql(self, expression: exp.AlterTable) -> str:
+1653        actions = expression.args["actions"]
+1654
+1655        if isinstance(actions[0], exp.ColumnDef):
+1656            actions = self.expressions(expression, "actions", prefix="ADD COLUMN ")
+1657        elif isinstance(actions[0], exp.Schema):
+1658            actions = self.expressions(expression, "actions", prefix="ADD COLUMNS ")
+1659        elif isinstance(actions[0], exp.Delete):
+1660            actions = self.expressions(expression, "actions", flat=True)
+1661        else:
+1662            actions = self.expressions(expression, "actions")
+1663
+1664        exists = " IF EXISTS" if expression.args.get("exists") else ""
+1665        return f"ALTER TABLE{exists} {self.sql(expression, 'this')} {actions}"
+
+ + + + +
+
+ +
+ + def + droppartition_sql(self, expression: sqlglot.expressions.DropPartition) -> str: + + + +
+ +
1667    def droppartition_sql(self, expression: exp.DropPartition) -> str:
+1668        expressions = self.expressions(expression)
+1669        exists = " IF EXISTS " if expression.args.get("exists") else " "
+1670        return f"DROP{exists}{expressions}"
+
+ + + + +
+
+ +
+ + def + addconstraint_sql(self, expression: sqlglot.expressions.AddConstraint) -> str: + + + +
+ +
1672    def addconstraint_sql(self, expression: exp.AddConstraint) -> str:
+1673        this = self.sql(expression, "this")
+1674        expression_ = self.sql(expression, "expression")
+1675        add_constraint = f"ADD CONSTRAINT {this}" if this else "ADD"
+1676
+1677        enforced = expression.args.get("enforced")
+1678        if enforced is not None:
+1679            return f"{add_constraint} CHECK ({expression_}){' ENFORCED' if enforced else ''}"
+1680
+1681        return f"{add_constraint} {expression_}"
+
+ + + + +
+
+ +
+ + def + distinct_sql(self, expression: sqlglot.expressions.Distinct) -> str: + + + +
+ +
1683    def distinct_sql(self, expression: exp.Distinct) -> str:
+1684        this = self.expressions(expression, flat=True)
+1685        this = f" {this}" if this else ""
+1686
+1687        on = self.sql(expression, "on")
+1688        on = f" ON {on}" if on else ""
+1689        return f"DISTINCT{this}{on}"
+
+ + + + +
+
+ +
+ + def + ignorenulls_sql(self, expression: sqlglot.expressions.IgnoreNulls) -> str: + + + +
+ +
1691    def ignorenulls_sql(self, expression: exp.IgnoreNulls) -> str:
+1692        return f"{self.sql(expression, 'this')} IGNORE NULLS"
+
+ + + + +
+
+ +
+ + def + respectnulls_sql(self, expression: sqlglot.expressions.RespectNulls) -> str: + + + +
+ +
1694    def respectnulls_sql(self, expression: exp.RespectNulls) -> str:
+1695        return f"{self.sql(expression, 'this')} RESPECT NULLS"
+
+ + + + +
+
+ +
+ + def + intdiv_sql(self, expression: sqlglot.expressions.IntDiv) -> str: + + + +
+ +
1697    def intdiv_sql(self, expression: exp.IntDiv) -> str:
+1698        return self.sql(
+1699            exp.Cast(
+1700                this=exp.Div(this=expression.this, expression=expression.expression),
+1701                to=exp.DataType(this=exp.DataType.Type.INT),
+1702            )
+1703        )
+
+ + + + +
+
+ +
+ + def + dpipe_sql(self, expression: sqlglot.expressions.DPipe) -> str: + + + +
+ +
1705    def dpipe_sql(self, expression: exp.DPipe) -> str:
+1706        return self.binary(expression, "||")
+
+ + + + +
+
+ +
+ + def + div_sql(self, expression: sqlglot.expressions.Div) -> str: + + + +
+ +
1708    def div_sql(self, expression: exp.Div) -> str:
+1709        return self.binary(expression, "/")
+
+ + + + +
+
+ +
+ + def + distance_sql(self, expression: sqlglot.expressions.Distance) -> str: + + + +
+ +
1711    def distance_sql(self, expression: exp.Distance) -> str:
+1712        return self.binary(expression, "<->")
+
+ + + + +
+
+ +
+ + def + dot_sql(self, expression: sqlglot.expressions.Dot) -> str: + + + +
+ +
1714    def dot_sql(self, expression: exp.Dot) -> str:
+1715        return f"{self.sql(expression, 'this')}.{self.sql(expression, 'expression')}"
+
+ + + + +
+
+ +
+ + def + eq_sql(self, expression: sqlglot.expressions.EQ) -> str: + + + +
+ +
1717    def eq_sql(self, expression: exp.EQ) -> str:
+1718        return self.binary(expression, "=")
+
+ + + + +
+
+ +
+ + def + escape_sql(self, expression: sqlglot.expressions.Escape) -> str: + + + +
+ +
1720    def escape_sql(self, expression: exp.Escape) -> str:
+1721        return self.binary(expression, "ESCAPE")
+
+ + + + +
+
+ +
+ + def + glob_sql(self, expression: sqlglot.expressions.Glob) -> str: + + + +
+ +
1723    def glob_sql(self, expression: exp.Glob) -> str:
+1724        return self.binary(expression, "GLOB")
+
+ + + + +
+
+ +
+ + def + gt_sql(self, expression: sqlglot.expressions.GT) -> str: + + + +
+ +
1726    def gt_sql(self, expression: exp.GT) -> str:
+1727        return self.binary(expression, ">")
+
+ + + + +
+
+ +
+ + def + gte_sql(self, expression: sqlglot.expressions.GTE) -> str: + + + +
+ +
1729    def gte_sql(self, expression: exp.GTE) -> str:
+1730        return self.binary(expression, ">=")
+
+ + + + +
+
+ +
+ + def + ilike_sql(self, expression: sqlglot.expressions.ILike) -> str: + + + +
+ +
1732    def ilike_sql(self, expression: exp.ILike) -> str:
+1733        return self.binary(expression, "ILIKE")
+
+ + + + +
+
+ +
+ + def + is_sql(self, expression: sqlglot.expressions.Is) -> str: + + + +
+ +
1735    def is_sql(self, expression: exp.Is) -> str:
+1736        return self.binary(expression, "IS")
+
+ + + + +
+
+ +
+ + def + like_sql(self, expression: sqlglot.expressions.Like) -> str: + + + +
+ +
1738    def like_sql(self, expression: exp.Like) -> str:
+1739        return self.binary(expression, "LIKE")
+
+ + + + +
+
+ +
+ + def + similarto_sql(self, expression: sqlglot.expressions.SimilarTo) -> str: + + + +
+ +
1741    def similarto_sql(self, expression: exp.SimilarTo) -> str:
+1742        return self.binary(expression, "SIMILAR TO")
+
+ + + + +
+
+ +
+ + def + lt_sql(self, expression: sqlglot.expressions.LT) -> str: + + + +
+ +
1744    def lt_sql(self, expression: exp.LT) -> str:
+1745        return self.binary(expression, "<")
+
+ + + + +
+
+ +
+ + def + lte_sql(self, expression: sqlglot.expressions.LTE) -> str: + + + +
+ +
1747    def lte_sql(self, expression: exp.LTE) -> str:
+1748        return self.binary(expression, "<=")
+
+ + + + +
+
+ +
+ + def + mod_sql(self, expression: sqlglot.expressions.Mod) -> str: + + + +
+ +
1750    def mod_sql(self, expression: exp.Mod) -> str:
+1751        return self.binary(expression, "%")
+
+ + + + +
+
+ +
+ + def + mul_sql(self, expression: sqlglot.expressions.Mul) -> str: + + + +
+ +
1753    def mul_sql(self, expression: exp.Mul) -> str:
+1754        return self.binary(expression, "*")
+
+ + + + +
+
+ +
+ + def + neq_sql(self, expression: sqlglot.expressions.NEQ) -> str: + + + +
+ +
1756    def neq_sql(self, expression: exp.NEQ) -> str:
+1757        return self.binary(expression, "<>")
+
+ + + + +
+
+ +
+ + def + nullsafeeq_sql(self, expression: sqlglot.expressions.NullSafeEQ) -> str: + + + +
+ +
1759    def nullsafeeq_sql(self, expression: exp.NullSafeEQ) -> str:
+1760        return self.binary(expression, "IS NOT DISTINCT FROM")
+
+ + + + +
+
+ +
+ + def + nullsafeneq_sql(self, expression: sqlglot.expressions.NullSafeNEQ) -> str: + + + +
+ +
1762    def nullsafeneq_sql(self, expression: exp.NullSafeNEQ) -> str:
+1763        return self.binary(expression, "IS DISTINCT FROM")
+
+ + + + +
+
+ +
+ + def + or_sql(self, expression: sqlglot.expressions.Or) -> str: + + + +
+ +
1765    def or_sql(self, expression: exp.Or) -> str:
+1766        return self.connector_sql(expression, "OR")
+
+ + + + +
+
+ +
+ + def + slice_sql(self, expression: sqlglot.expressions.Slice) -> str: + + + +
+ +
1768    def slice_sql(self, expression: exp.Slice) -> str:
+1769        return self.binary(expression, ":")
+
+ + + + +
+
+ +
+ + def + sub_sql(self, expression: sqlglot.expressions.Sub) -> str: + + + +
+ +
1771    def sub_sql(self, expression: exp.Sub) -> str:
+1772        return self.binary(expression, "-")
+
+ + + + +
+
+ +
+ + def + trycast_sql(self, expression: sqlglot.expressions.TryCast) -> str: + + + +
+ +
1774    def trycast_sql(self, expression: exp.TryCast) -> str:
+1775        return f"TRY_CAST({self.sql(expression, 'this')} AS {self.sql(expression, 'to')})"
+
+ + + + +
+
+ +
+ + def + use_sql(self, expression: sqlglot.expressions.Use) -> str: + + + +
+ +
1777    def use_sql(self, expression: exp.Use) -> str:
+1778        kind = self.sql(expression, "kind")
+1779        kind = f" {kind}" if kind else ""
+1780        this = self.sql(expression, "this")
+1781        this = f" {this}" if this else ""
+1782        return f"USE{kind}{this}"
+
+ + + + +
+
+ +
+ + def + binary(self, expression: sqlglot.expressions.Binary, op: str) -> str: + + + +
+ +
1784    def binary(self, expression: exp.Binary, op: str) -> str:
+1785        return f"{self.sql(expression, 'this')} {op} {self.sql(expression, 'expression')}"
+
+ + + + +
+
+ +
+ + def + function_fallback_sql(self, expression: sqlglot.expressions.Func) -> str: + + + +
+ +
1787    def function_fallback_sql(self, expression: exp.Func) -> str:
+1788        args = []
+1789        for arg_value in expression.args.values():
+1790            if isinstance(arg_value, list):
+1791                for value in arg_value:
+1792                    args.append(value)
+1793            else:
+1794                args.append(arg_value)
+1795
+1796        return f"{self.normalize_func(expression.sql_name())}({self.format_args(*args)})"
+
+ + + + +
+
+ +
+ + def + format_args(self, *args: Union[str, sqlglot.expressions.Expression, NoneType]) -> str: + + + +
+ +
1798    def format_args(self, *args: t.Optional[str | exp.Expression]) -> str:
+1799        arg_sqls = tuple(self.sql(arg) for arg in args if arg is not None)
+1800        if self.pretty and self.text_width(arg_sqls) > self._max_text_width:
+1801            return self.indent("\n" + f",\n".join(arg_sqls) + "\n", skip_first=True, skip_last=True)
+1802        return ", ".join(arg_sqls)
+
+ + + + +
+
+ +
+ + def + text_width(self, args: Iterable) -> int: + + + +
+ +
1804    def text_width(self, args: t.Iterable) -> int:
+1805        return sum(len(arg) for arg in args)
+
+ + + + +
+
+ +
+ + def + format_time(self, expression: sqlglot.expressions.Expression) -> Optional[str]: + + + +
+ +
1807    def format_time(self, expression: exp.Expression) -> t.Optional[str]:
+1808        return format_time(self.sql(expression, "format"), self.time_mapping, self.time_trie)
+
+ + + + +
+
+ +
+ + def + expressions( self, expression: sqlglot.expressions.Expression, key: Optional[str] = None, flat: bool = False, indent: bool = True, sep: str = ', ', prefix: str = '') -> str: + + + +
+ +
1810    def expressions(
+1811        self,
+1812        expression: exp.Expression,
+1813        key: t.Optional[str] = None,
+1814        flat: bool = False,
+1815        indent: bool = True,
+1816        sep: str = ", ",
+1817        prefix: str = "",
+1818    ) -> str:
+1819        expressions = expression.args.get(key or "expressions")
+1820
+1821        if not expressions:
+1822            return ""
+1823
+1824        if flat:
+1825            return sep.join(self.sql(e) for e in expressions)
+1826
+1827        num_sqls = len(expressions)
+1828
+1829        # These are calculated once in case we have the leading_comma / pretty option set, correspondingly
+1830        pad = " " * self.pad
+1831        stripped_sep = sep.strip()
+1832
+1833        result_sqls = []
+1834        for i, e in enumerate(expressions):
+1835            sql = self.sql(e, comment=False)
+1836            comments = self.maybe_comment("", e) if isinstance(e, exp.Expression) else ""
+1837
+1838            if self.pretty:
+1839                if self._leading_comma:
+1840                    result_sqls.append(f"{sep if i > 0 else pad}{prefix}{sql}{comments}")
+1841                else:
+1842                    result_sqls.append(
+1843                        f"{prefix}{sql}{stripped_sep if i + 1 < num_sqls else ''}{comments}"
+1844                    )
+1845            else:
+1846                result_sqls.append(f"{prefix}{sql}{comments}{sep if i + 1 < num_sqls else ''}")
+1847
+1848        result_sql = "\n".join(result_sqls) if self.pretty else "".join(result_sqls)
+1849        return self.indent(result_sql, skip_first=False) if indent else result_sql
+
+ + + + +
+
+ +
+ + def + op_expressions( self, op: str, expression: sqlglot.expressions.Expression, flat: bool = False) -> str: + + + +
+ +
1851    def op_expressions(self, op: str, expression: exp.Expression, flat: bool = False) -> str:
+1852        expressions_sql = self.expressions(expression, flat=flat)
+1853        if flat:
+1854            return f"{op} {expressions_sql}"
+1855        return f"{self.seg(op)}{self.sep() if expressions_sql else ''}{expressions_sql}"
+
+ + + + +
+
+ +
+ + def + naked_property(self, expression: sqlglot.expressions.Property) -> str: + + + +
+ +
1857    def naked_property(self, expression: exp.Property) -> str:
+1858        property_name = exp.Properties.PROPERTY_TO_NAME.get(expression.__class__)
+1859        if not property_name:
+1860            self.unsupported(f"Unsupported property {expression.__class__.__name__}")
+1861        return f"{property_name} {self.sql(expression, 'this')}"
+
+ + + + +
+
+ +
+ + def + set_operation(self, expression: sqlglot.expressions.Expression, op: str) -> str: + + + +
+ +
1863    def set_operation(self, expression: exp.Expression, op: str) -> str:
+1864        this = self.sql(expression, "this")
+1865        op = self.seg(op)
+1866        return self.query_modifiers(
+1867            expression, f"{this}{op}{self.sep()}{self.sql(expression, 'expression')}"
+1868        )
+
+ + + + +
+
+ +
+ + def + tag_sql(self, expression: sqlglot.expressions.Tag) -> str: + + + +
+ +
1870    def tag_sql(self, expression: exp.Tag) -> str:
+1871        return f"{expression.args.get('prefix')}{self.sql(expression.this)}{expression.args.get('postfix')}"
+
+ + + + +
+
+ +
+ + def + token_sql(self, token_type: sqlglot.tokens.TokenType) -> str: + + + +
+ +
1873    def token_sql(self, token_type: TokenType) -> str:
+1874        return self.TOKEN_MAPPING.get(token_type, token_type.name)
+
+ + + + +
+
+ +
+ + def + userdefinedfunction_sql(self, expression: sqlglot.expressions.UserDefinedFunction) -> str: + + + +
+ +
1876    def userdefinedfunction_sql(self, expression: exp.UserDefinedFunction) -> str:
+1877        this = self.sql(expression, "this")
+1878        expressions = self.no_identify(self.expressions, expression)
+1879        expressions = (
+1880            self.wrap(expressions) if expression.args.get("wrapped") else f" {expressions}"
+1881        )
+1882        return f"{this}{expressions}"
+
+ + + + +
+
+ +
+ + def + userdefinedfunctionkwarg_sql(self, expression: sqlglot.expressions.UserDefinedFunctionKwarg) -> str: + + + +
+ +
1884    def userdefinedfunctionkwarg_sql(self, expression: exp.UserDefinedFunctionKwarg) -> str:
+1885        this = self.sql(expression, "this")
+1886        kind = self.sql(expression, "kind")
+1887        return f"{this} {kind}"
+
+ + + + +
+
+ +
+ + def + joinhint_sql(self, expression: sqlglot.expressions.JoinHint) -> str: + + + +
+ +
1889    def joinhint_sql(self, expression: exp.JoinHint) -> str:
+1890        this = self.sql(expression, "this")
+1891        expressions = self.expressions(expression, flat=True)
+1892        return f"{this}({expressions})"
+
+ + + + +
+
+ +
+ + def + kwarg_sql(self, expression: sqlglot.expressions.Kwarg) -> str: + + + +
+ +
1894    def kwarg_sql(self, expression: exp.Kwarg) -> str:
+1895        return self.binary(expression, "=>")
+
+ + + + +
+
+ +
+ + def + when_sql(self, expression: sqlglot.expressions.When) -> str: + + + +
+ +
1897    def when_sql(self, expression: exp.When) -> str:
+1898        this = self.sql(expression, "this")
+1899        then_expression = expression.args.get("then")
+1900        if isinstance(then_expression, exp.Insert):
+1901            then = f"INSERT {self.sql(then_expression, 'this')}"
+1902            if "expression" in then_expression.args:
+1903                then += f" VALUES {self.sql(then_expression, 'expression')}"
+1904        elif isinstance(then_expression, exp.Update):
+1905            if isinstance(then_expression.args.get("expressions"), exp.Star):
+1906                then = f"UPDATE {self.sql(then_expression, 'expressions')}"
+1907            else:
+1908                then = f"UPDATE SET {self.expressions(then_expression, flat=True)}"
+1909        else:
+1910            then = self.sql(then_expression)
+1911        return f"WHEN {this} THEN {then}"
+
+ + + + +
+
+ +
+ + def + merge_sql(self, expression: sqlglot.expressions.Merge) -> str: + + + +
+ +
1913    def merge_sql(self, expression: exp.Merge) -> str:
+1914        this = self.sql(expression, "this")
+1915        using = f"USING {self.sql(expression, 'using')}"
+1916        on = f"ON {self.sql(expression, 'on')}"
+1917        return f"MERGE INTO {this} {using} {on} {self.expressions(expression, sep=' ')}"
+
+ + + + +
+
+
+ + \ No newline at end of file diff --git a/docs/sqlglot/helper.html b/docs/sqlglot/helper.html new file mode 100644 index 0000000..bf8994f --- /dev/null +++ b/docs/sqlglot/helper.html @@ -0,0 +1,1651 @@ + + + + + + + sqlglot.helper API documentation + + + + + + + + + +
+
+ Edit on GitHub +

+sqlglot.helper

+ + + + + + +
  1from __future__ import annotations
+  2
+  3import inspect
+  4import logging
+  5import re
+  6import sys
+  7import typing as t
+  8from collections.abc import Collection
+  9from contextlib import contextmanager
+ 10from copy import copy
+ 11from enum import Enum
+ 12
+ 13if t.TYPE_CHECKING:
+ 14    from sqlglot import exp
+ 15    from sqlglot.expressions import Expression
+ 16
+ 17    T = t.TypeVar("T")
+ 18    E = t.TypeVar("E", bound=Expression)
+ 19
+ 20CAMEL_CASE_PATTERN = re.compile("(?<!^)(?=[A-Z])")
+ 21PYTHON_VERSION = sys.version_info[:2]
+ 22logger = logging.getLogger("sqlglot")
+ 23
+ 24
+ 25class AutoName(Enum):
+ 26    """This is used for creating enum classes where `auto()` is the string form of the corresponding value's name."""
+ 27
+ 28    def _generate_next_value_(name, _start, _count, _last_values):  # type: ignore
+ 29        return name
+ 30
+ 31
+ 32def seq_get(seq: t.Sequence[T], index: int) -> t.Optional[T]:
+ 33    """Returns the value in `seq` at position `index`, or `None` if `index` is out of bounds."""
+ 34    try:
+ 35        return seq[index]
+ 36    except IndexError:
+ 37        return None
+ 38
+ 39
+ 40@t.overload
+ 41def ensure_list(value: t.Collection[T]) -> t.List[T]:
+ 42    ...
+ 43
+ 44
+ 45@t.overload
+ 46def ensure_list(value: T) -> t.List[T]:
+ 47    ...
+ 48
+ 49
+ 50def ensure_list(value):
+ 51    """
+ 52    Ensures that a value is a list, otherwise casts or wraps it into one.
+ 53
+ 54    Args:
+ 55        value: the value of interest.
+ 56
+ 57    Returns:
+ 58        The value cast as a list if it's a list or a tuple, or else the value wrapped in a list.
+ 59    """
+ 60    if value is None:
+ 61        return []
+ 62    elif isinstance(value, (list, tuple)):
+ 63        return list(value)
+ 64
+ 65    return [value]
+ 66
+ 67
+ 68@t.overload
+ 69def ensure_collection(value: t.Collection[T]) -> t.Collection[T]:
+ 70    ...
+ 71
+ 72
+ 73@t.overload
+ 74def ensure_collection(value: T) -> t.Collection[T]:
+ 75    ...
+ 76
+ 77
+ 78def ensure_collection(value):
+ 79    """
+ 80    Ensures that a value is a collection (excluding `str` and `bytes`), otherwise wraps it into a list.
+ 81
+ 82    Args:
+ 83        value: the value of interest.
+ 84
+ 85    Returns:
+ 86        The value if it's a collection, or else the value wrapped in a list.
+ 87    """
+ 88    if value is None:
+ 89        return []
+ 90    return (
+ 91        value if isinstance(value, Collection) and not isinstance(value, (str, bytes)) else [value]
+ 92    )
+ 93
+ 94
+ 95def csv(*args, sep: str = ", ") -> str:
+ 96    """
+ 97    Formats any number of string arguments as CSV.
+ 98
+ 99    Args:
+100        args: the string arguments to format.
+101        sep: the argument separator.
+102
+103    Returns:
+104        The arguments formatted as a CSV string.
+105    """
+106    return sep.join(arg for arg in args if arg)
+107
+108
+109def subclasses(
+110    module_name: str,
+111    classes: t.Type | t.Tuple[t.Type, ...],
+112    exclude: t.Type | t.Tuple[t.Type, ...] = (),
+113) -> t.List[t.Type]:
+114    """
+115    Returns all subclasses for a collection of classes, possibly excluding some of them.
+116
+117    Args:
+118        module_name: the name of the module to search for subclasses in.
+119        classes: class(es) we want to find the subclasses of.
+120        exclude: class(es) we want to exclude from the returned list.
+121
+122    Returns:
+123        The target subclasses.
+124    """
+125    return [
+126        obj
+127        for _, obj in inspect.getmembers(
+128            sys.modules[module_name],
+129            lambda obj: inspect.isclass(obj) and issubclass(obj, classes) and obj not in exclude,
+130        )
+131    ]
+132
+133
+134def apply_index_offset(expressions: t.List[t.Optional[E]], offset: int) -> t.List[t.Optional[E]]:
+135    """
+136    Applies an offset to a given integer literal expression.
+137
+138    Args:
+139        expressions: the expression the offset will be applied to, wrapped in a list.
+140        offset: the offset that will be applied.
+141
+142    Returns:
+143        The original expression with the offset applied to it, wrapped in a list. If the provided
+144        `expressions` argument contains more than one expressions, it's returned unaffected.
+145    """
+146    if not offset or len(expressions) != 1:
+147        return expressions
+148
+149    expression = expressions[0]
+150
+151    if expression and expression.is_int:
+152        expression = expression.copy()
+153        logger.warning("Applying array index offset (%s)", offset)
+154        expression.args["this"] = str(int(expression.this) + offset)  # type: ignore
+155        return [expression]
+156
+157    return expressions
+158
+159
+160def camel_to_snake_case(name: str) -> str:
+161    """Converts `name` from camelCase to snake_case and returns the result."""
+162    return CAMEL_CASE_PATTERN.sub("_", name).upper()
+163
+164
+165def while_changing(
+166    expression: t.Optional[Expression], func: t.Callable[[t.Optional[Expression]], E]
+167) -> E:
+168    """
+169    Applies a transformation to a given expression until a fix point is reached.
+170
+171    Args:
+172        expression: the expression to be transformed.
+173        func: the transformation to be applied.
+174
+175    Returns:
+176        The transformed expression.
+177    """
+178    while True:
+179        start = hash(expression)
+180        expression = func(expression)
+181        if start == hash(expression):
+182            break
+183    return expression
+184
+185
+186def tsort(dag: t.Dict[T, t.List[T]]) -> t.List[T]:
+187    """
+188    Sorts a given directed acyclic graph in topological order.
+189
+190    Args:
+191        dag: the graph to be sorted.
+192
+193    Returns:
+194        A list that contains all of the graph's nodes in topological order.
+195    """
+196    result = []
+197
+198    def visit(node: T, visited: t.Set[T]) -> None:
+199        if node in result:
+200            return
+201        if node in visited:
+202            raise ValueError("Cycle error")
+203
+204        visited.add(node)
+205
+206        for dep in dag.get(node, []):
+207            visit(dep, visited)
+208
+209        visited.remove(node)
+210        result.append(node)
+211
+212    for node in dag:
+213        visit(node, set())
+214
+215    return result
+216
+217
+218def open_file(file_name: str) -> t.TextIO:
+219    """Open a file that may be compressed as gzip and return it in universal newline mode."""
+220    with open(file_name, "rb") as f:
+221        gzipped = f.read(2) == b"\x1f\x8b"
+222
+223    if gzipped:
+224        import gzip
+225
+226        return gzip.open(file_name, "rt", newline="")
+227
+228    return open(file_name, encoding="utf-8", newline="")
+229
+230
+231@contextmanager
+232def csv_reader(read_csv: exp.ReadCSV) -> t.Any:
+233    """
+234    Returns a csv reader given the expression `READ_CSV(name, ['delimiter', '|', ...])`.
+235
+236    Args:
+237        read_csv: a `ReadCSV` function call
+238
+239    Yields:
+240        A python csv reader.
+241    """
+242    args = read_csv.expressions
+243    file = open_file(read_csv.name)
+244
+245    delimiter = ","
+246    args = iter(arg.name for arg in args)
+247    for k, v in zip(args, args):
+248        if k == "delimiter":
+249            delimiter = v
+250
+251    try:
+252        import csv as csv_
+253
+254        yield csv_.reader(file, delimiter=delimiter)
+255    finally:
+256        file.close()
+257
+258
+259def find_new_name(taken: t.Collection[str], base: str) -> str:
+260    """
+261    Searches for a new name.
+262
+263    Args:
+264        taken: a collection of taken names.
+265        base: base name to alter.
+266
+267    Returns:
+268        The new, available name.
+269    """
+270    if base not in taken:
+271        return base
+272
+273    i = 2
+274    new = f"{base}_{i}"
+275    while new in taken:
+276        i += 1
+277        new = f"{base}_{i}"
+278
+279    return new
+280
+281
+282def object_to_dict(obj: t.Any, **kwargs) -> t.Dict:
+283    """Returns a dictionary created from an object's attributes."""
+284    return {**{k: copy(v) for k, v in vars(obj).copy().items()}, **kwargs}
+285
+286
+287def split_num_words(
+288    value: str, sep: str, min_num_words: int, fill_from_start: bool = True
+289) -> t.List[t.Optional[str]]:
+290    """
+291    Perform a split on a value and return N words as a result with `None` used for words that don't exist.
+292
+293    Args:
+294        value: the value to be split.
+295        sep: the value to use to split on.
+296        min_num_words: the minimum number of words that are going to be in the result.
+297        fill_from_start: indicates that if `None` values should be inserted at the start or end of the list.
+298
+299    Examples:
+300        >>> split_num_words("db.table", ".", 3)
+301        [None, 'db', 'table']
+302        >>> split_num_words("db.table", ".", 3, fill_from_start=False)
+303        ['db', 'table', None]
+304        >>> split_num_words("db.table", ".", 1)
+305        ['db', 'table']
+306
+307    Returns:
+308        The list of words returned by `split`, possibly augmented by a number of `None` values.
+309    """
+310    words = value.split(sep)
+311    if fill_from_start:
+312        return [None] * (min_num_words - len(words)) + words
+313    return words + [None] * (min_num_words - len(words))
+314
+315
+316def is_iterable(value: t.Any) -> bool:
+317    """
+318    Checks if the value is an iterable, excluding the types `str` and `bytes`.
+319
+320    Examples:
+321        >>> is_iterable([1,2])
+322        True
+323        >>> is_iterable("test")
+324        False
+325
+326    Args:
+327        value: the value to check if it is an iterable.
+328
+329    Returns:
+330        A `bool` value indicating if it is an iterable.
+331    """
+332    return hasattr(value, "__iter__") and not isinstance(value, (str, bytes))
+333
+334
+335def flatten(values: t.Iterable[t.Iterable[t.Any] | t.Any]) -> t.Iterator[t.Any]:
+336    """
+337    Flattens an iterable that can contain both iterable and non-iterable elements. Objects of
+338    type `str` and `bytes` are not regarded as iterables.
+339
+340    Examples:
+341        >>> list(flatten([[1, 2], 3, {4}, (5, "bla")]))
+342        [1, 2, 3, 4, 5, 'bla']
+343        >>> list(flatten([1, 2, 3]))
+344        [1, 2, 3]
+345
+346    Args:
+347        values: the value to be flattened.
+348
+349    Yields:
+350        Non-iterable elements in `values`.
+351    """
+352    for value in values:
+353        if is_iterable(value):
+354            yield from flatten(value)
+355        else:
+356            yield value
+357
+358
+359def count_params(function: t.Callable) -> int:
+360    """
+361    Returns the number of formal parameters expected by a function, without counting "self"
+362    and "cls", in case of instance and class methods, respectively.
+363    """
+364    count = function.__code__.co_argcount
+365    return count - 1 if inspect.ismethod(function) else count
+366
+367
+368def dict_depth(d: t.Dict) -> int:
+369    """
+370    Get the nesting depth of a dictionary.
+371
+372    For example:
+373        >>> dict_depth(None)
+374        0
+375        >>> dict_depth({})
+376        1
+377        >>> dict_depth({"a": "b"})
+378        1
+379        >>> dict_depth({"a": {}})
+380        2
+381        >>> dict_depth({"a": {"b": {}}})
+382        3
+383
+384    Args:
+385        d (dict): dictionary
+386
+387    Returns:
+388        int: depth
+389    """
+390    try:
+391        return 1 + dict_depth(next(iter(d.values())))
+392    except AttributeError:
+393        # d doesn't have attribute "values"
+394        return 0
+395    except StopIteration:
+396        # d.values() returns an empty sequence
+397        return 1
+398
+399
+400def first(it: t.Iterable[T]) -> T:
+401    """Returns the first element from an iterable.
+402
+403    Useful for sets.
+404    """
+405    return next(i for i in it)
+
+ + +
+
+ +
+ + class + AutoName(enum.Enum): + + + +
+ +
26class AutoName(Enum):
+27    """This is used for creating enum classes where `auto()` is the string form of the corresponding value's name."""
+28
+29    def _generate_next_value_(name, _start, _count, _last_values):  # type: ignore
+30        return name
+
+ + +

This is used for creating enum classes where auto() is the string form of the corresponding value's name.

+
+ + +
+
Inherited Members
+
+
enum.Enum
+
name
+
value
+ +
+
+
+
+
+ +
+ + def + seq_get(seq: Sequence[~T], index: int) -> Optional[~T]: + + + +
+ +
33def seq_get(seq: t.Sequence[T], index: int) -> t.Optional[T]:
+34    """Returns the value in `seq` at position `index`, or `None` if `index` is out of bounds."""
+35    try:
+36        return seq[index]
+37    except IndexError:
+38        return None
+
+ + +

Returns the value in seq at position index, or None if index is out of bounds.

+
+ + +
+
+ +
+ + def + ensure_list(value): + + + +
+ +
51def ensure_list(value):
+52    """
+53    Ensures that a value is a list, otherwise casts or wraps it into one.
+54
+55    Args:
+56        value: the value of interest.
+57
+58    Returns:
+59        The value cast as a list if it's a list or a tuple, or else the value wrapped in a list.
+60    """
+61    if value is None:
+62        return []
+63    elif isinstance(value, (list, tuple)):
+64        return list(value)
+65
+66    return [value]
+
+ + +

Ensures that a value is a list, otherwise casts or wraps it into one.

+ +
Arguments:
+ +
    +
  • value: the value of interest.
  • +
+ +
Returns:
+ +
+

The value cast as a list if it's a list or a tuple, or else the value wrapped in a list.

+
+
+ + +
+
+ +
+ + def + ensure_collection(value): + + + +
+ +
79def ensure_collection(value):
+80    """
+81    Ensures that a value is a collection (excluding `str` and `bytes`), otherwise wraps it into a list.
+82
+83    Args:
+84        value: the value of interest.
+85
+86    Returns:
+87        The value if it's a collection, or else the value wrapped in a list.
+88    """
+89    if value is None:
+90        return []
+91    return (
+92        value if isinstance(value, Collection) and not isinstance(value, (str, bytes)) else [value]
+93    )
+
+ + +

Ensures that a value is a collection (excluding str and bytes), otherwise wraps it into a list.

+ +
Arguments:
+ +
    +
  • value: the value of interest.
  • +
+ +
Returns:
+ +
+

The value if it's a collection, or else the value wrapped in a list.

+
+
+ + +
+
+ +
+ + def + csv(*args, sep: str = ', ') -> str: + + + +
+ +
 96def csv(*args, sep: str = ", ") -> str:
+ 97    """
+ 98    Formats any number of string arguments as CSV.
+ 99
+100    Args:
+101        args: the string arguments to format.
+102        sep: the argument separator.
+103
+104    Returns:
+105        The arguments formatted as a CSV string.
+106    """
+107    return sep.join(arg for arg in args if arg)
+
+ + +

Formats any number of string arguments as CSV.

+ +
Arguments:
+ +
    +
  • args: the string arguments to format.
  • +
  • sep: the argument separator.
  • +
+ +
Returns:
+ +
+

The arguments formatted as a CSV string.

+
+
+ + +
+
+ +
+ + def + subclasses( module_name: str, classes: Union[Type, Tuple[Type, ...]], exclude: Union[Type, Tuple[Type, ...]] = ()) -> List[Type]: + + + +
+ +
110def subclasses(
+111    module_name: str,
+112    classes: t.Type | t.Tuple[t.Type, ...],
+113    exclude: t.Type | t.Tuple[t.Type, ...] = (),
+114) -> t.List[t.Type]:
+115    """
+116    Returns all subclasses for a collection of classes, possibly excluding some of them.
+117
+118    Args:
+119        module_name: the name of the module to search for subclasses in.
+120        classes: class(es) we want to find the subclasses of.
+121        exclude: class(es) we want to exclude from the returned list.
+122
+123    Returns:
+124        The target subclasses.
+125    """
+126    return [
+127        obj
+128        for _, obj in inspect.getmembers(
+129            sys.modules[module_name],
+130            lambda obj: inspect.isclass(obj) and issubclass(obj, classes) and obj not in exclude,
+131        )
+132    ]
+
+ + +

Returns all subclasses for a collection of classes, possibly excluding some of them.

+ +
Arguments:
+ +
    +
  • module_name: the name of the module to search for subclasses in.
  • +
  • classes: class(es) we want to find the subclasses of.
  • +
  • exclude: class(es) we want to exclude from the returned list.
  • +
+ +
Returns:
+ +
+

The target subclasses.

+
+
+ + +
+
+ +
+ + def + apply_index_offset(expressions: List[Optional[~E]], offset: int) -> List[Optional[~E]]: + + + +
+ +
135def apply_index_offset(expressions: t.List[t.Optional[E]], offset: int) -> t.List[t.Optional[E]]:
+136    """
+137    Applies an offset to a given integer literal expression.
+138
+139    Args:
+140        expressions: the expression the offset will be applied to, wrapped in a list.
+141        offset: the offset that will be applied.
+142
+143    Returns:
+144        The original expression with the offset applied to it, wrapped in a list. If the provided
+145        `expressions` argument contains more than one expressions, it's returned unaffected.
+146    """
+147    if not offset or len(expressions) != 1:
+148        return expressions
+149
+150    expression = expressions[0]
+151
+152    if expression and expression.is_int:
+153        expression = expression.copy()
+154        logger.warning("Applying array index offset (%s)", offset)
+155        expression.args["this"] = str(int(expression.this) + offset)  # type: ignore
+156        return [expression]
+157
+158    return expressions
+
+ + +

Applies an offset to a given integer literal expression.

+ +
Arguments:
+ +
    +
  • expressions: the expression the offset will be applied to, wrapped in a list.
  • +
  • offset: the offset that will be applied.
  • +
+ +
Returns:
+ +
+

The original expression with the offset applied to it, wrapped in a list. If the provided + expressions argument contains more than one expressions, it's returned unaffected.

+
+
+ + +
+
+ +
+ + def + camel_to_snake_case(name: str) -> str: + + + +
+ +
161def camel_to_snake_case(name: str) -> str:
+162    """Converts `name` from camelCase to snake_case and returns the result."""
+163    return CAMEL_CASE_PATTERN.sub("_", name).upper()
+
+ + +

Converts name from camelCase to snake_case and returns the result.

+
+ + +
+
+ +
+ + def + while_changing( expression: Optional[sqlglot.expressions.Expression], func: Callable[[Optional[sqlglot.expressions.Expression]], ~E]) -> ~E: + + + +
+ +
166def while_changing(
+167    expression: t.Optional[Expression], func: t.Callable[[t.Optional[Expression]], E]
+168) -> E:
+169    """
+170    Applies a transformation to a given expression until a fix point is reached.
+171
+172    Args:
+173        expression: the expression to be transformed.
+174        func: the transformation to be applied.
+175
+176    Returns:
+177        The transformed expression.
+178    """
+179    while True:
+180        start = hash(expression)
+181        expression = func(expression)
+182        if start == hash(expression):
+183            break
+184    return expression
+
+ + +

Applies a transformation to a given expression until a fix point is reached.

+ +
Arguments:
+ +
    +
  • expression: the expression to be transformed.
  • +
  • func: the transformation to be applied.
  • +
+ +
Returns:
+ +
+

The transformed expression.

+
+
+ + +
+
+ +
+ + def + tsort(dag: Dict[~T, List[~T]]) -> List[~T]: + + + +
+ +
187def tsort(dag: t.Dict[T, t.List[T]]) -> t.List[T]:
+188    """
+189    Sorts a given directed acyclic graph in topological order.
+190
+191    Args:
+192        dag: the graph to be sorted.
+193
+194    Returns:
+195        A list that contains all of the graph's nodes in topological order.
+196    """
+197    result = []
+198
+199    def visit(node: T, visited: t.Set[T]) -> None:
+200        if node in result:
+201            return
+202        if node in visited:
+203            raise ValueError("Cycle error")
+204
+205        visited.add(node)
+206
+207        for dep in dag.get(node, []):
+208            visit(dep, visited)
+209
+210        visited.remove(node)
+211        result.append(node)
+212
+213    for node in dag:
+214        visit(node, set())
+215
+216    return result
+
+ + +

Sorts a given directed acyclic graph in topological order.

+ +
Arguments:
+ +
    +
  • dag: the graph to be sorted.
  • +
+ +
Returns:
+ +
+

A list that contains all of the graph's nodes in topological order.

+
+
+ + +
+
+ +
+ + def + open_file(file_name: str) -> <class 'TextIO'>: + + + +
+ +
219def open_file(file_name: str) -> t.TextIO:
+220    """Open a file that may be compressed as gzip and return it in universal newline mode."""
+221    with open(file_name, "rb") as f:
+222        gzipped = f.read(2) == b"\x1f\x8b"
+223
+224    if gzipped:
+225        import gzip
+226
+227        return gzip.open(file_name, "rt", newline="")
+228
+229    return open(file_name, encoding="utf-8", newline="")
+
+ + +

Open a file that may be compressed as gzip and return it in universal newline mode.

+
+ + +
+
+ +
+
@contextmanager
+ + def + csv_reader(read_csv: sqlglot.expressions.ReadCSV) -> Any: + + + +
+ +
232@contextmanager
+233def csv_reader(read_csv: exp.ReadCSV) -> t.Any:
+234    """
+235    Returns a csv reader given the expression `READ_CSV(name, ['delimiter', '|', ...])`.
+236
+237    Args:
+238        read_csv: a `ReadCSV` function call
+239
+240    Yields:
+241        A python csv reader.
+242    """
+243    args = read_csv.expressions
+244    file = open_file(read_csv.name)
+245
+246    delimiter = ","
+247    args = iter(arg.name for arg in args)
+248    for k, v in zip(args, args):
+249        if k == "delimiter":
+250            delimiter = v
+251
+252    try:
+253        import csv as csv_
+254
+255        yield csv_.reader(file, delimiter=delimiter)
+256    finally:
+257        file.close()
+
+ + +

Returns a csv reader given the expression READ_CSV(name, ['delimiter', '|', ...]).

+ +
Arguments:
+ +
    +
  • read_csv: a ReadCSV function call
  • +
+ +
Yields:
+ +
+

A python csv reader.

+
+
+ + +
+
+ +
+ + def + find_new_name(taken: Collection[str], base: str) -> str: + + + +
+ +
260def find_new_name(taken: t.Collection[str], base: str) -> str:
+261    """
+262    Searches for a new name.
+263
+264    Args:
+265        taken: a collection of taken names.
+266        base: base name to alter.
+267
+268    Returns:
+269        The new, available name.
+270    """
+271    if base not in taken:
+272        return base
+273
+274    i = 2
+275    new = f"{base}_{i}"
+276    while new in taken:
+277        i += 1
+278        new = f"{base}_{i}"
+279
+280    return new
+
+ + +

Searches for a new name.

+ +
Arguments:
+ +
    +
  • taken: a collection of taken names.
  • +
  • base: base name to alter.
  • +
+ +
Returns:
+ +
+

The new, available name.

+
+
+ + +
+
+ +
+ + def + object_to_dict(obj: Any, **kwargs) -> Dict: + + + +
+ +
283def object_to_dict(obj: t.Any, **kwargs) -> t.Dict:
+284    """Returns a dictionary created from an object's attributes."""
+285    return {**{k: copy(v) for k, v in vars(obj).copy().items()}, **kwargs}
+
+ + +

Returns a dictionary created from an object's attributes.

+
+ + +
+
+ +
+ + def + split_num_words( value: str, sep: str, min_num_words: int, fill_from_start: bool = True) -> List[Optional[str]]: + + + +
+ +
288def split_num_words(
+289    value: str, sep: str, min_num_words: int, fill_from_start: bool = True
+290) -> t.List[t.Optional[str]]:
+291    """
+292    Perform a split on a value and return N words as a result with `None` used for words that don't exist.
+293
+294    Args:
+295        value: the value to be split.
+296        sep: the value to use to split on.
+297        min_num_words: the minimum number of words that are going to be in the result.
+298        fill_from_start: indicates that if `None` values should be inserted at the start or end of the list.
+299
+300    Examples:
+301        >>> split_num_words("db.table", ".", 3)
+302        [None, 'db', 'table']
+303        >>> split_num_words("db.table", ".", 3, fill_from_start=False)
+304        ['db', 'table', None]
+305        >>> split_num_words("db.table", ".", 1)
+306        ['db', 'table']
+307
+308    Returns:
+309        The list of words returned by `split`, possibly augmented by a number of `None` values.
+310    """
+311    words = value.split(sep)
+312    if fill_from_start:
+313        return [None] * (min_num_words - len(words)) + words
+314    return words + [None] * (min_num_words - len(words))
+
+ + +

Perform a split on a value and return N words as a result with None used for words that don't exist.

+ +
Arguments:
+ +
    +
  • value: the value to be split.
  • +
  • sep: the value to use to split on.
  • +
  • min_num_words: the minimum number of words that are going to be in the result.
  • +
  • fill_from_start: indicates that if None values should be inserted at the start or end of the list.
  • +
+ +
Examples:
+ +
+
+
>>> split_num_words("db.table", ".", 3)
+[None, 'db', 'table']
+>>> split_num_words("db.table", ".", 3, fill_from_start=False)
+['db', 'table', None]
+>>> split_num_words("db.table", ".", 1)
+['db', 'table']
+
+
+
+ +
Returns:
+ +
+

The list of words returned by split, possibly augmented by a number of None values.

+
+
+ + +
+
+ +
+ + def + is_iterable(value: Any) -> bool: + + + +
+ +
317def is_iterable(value: t.Any) -> bool:
+318    """
+319    Checks if the value is an iterable, excluding the types `str` and `bytes`.
+320
+321    Examples:
+322        >>> is_iterable([1,2])
+323        True
+324        >>> is_iterable("test")
+325        False
+326
+327    Args:
+328        value: the value to check if it is an iterable.
+329
+330    Returns:
+331        A `bool` value indicating if it is an iterable.
+332    """
+333    return hasattr(value, "__iter__") and not isinstance(value, (str, bytes))
+
+ + +

Checks if the value is an iterable, excluding the types str and bytes.

+ +
Examples:
+ +
+
+
>>> is_iterable([1,2])
+True
+>>> is_iterable("test")
+False
+
+
+
+ +
Arguments:
+ +
    +
  • value: the value to check if it is an iterable.
  • +
+ +
Returns:
+ +
+

A bool value indicating if it is an iterable.

+
+
+ + +
+
+ +
+ + def + flatten(values: Iterable[Union[Iterable[Any], Any]]) -> Iterator[Any]: + + + +
+ +
336def flatten(values: t.Iterable[t.Iterable[t.Any] | t.Any]) -> t.Iterator[t.Any]:
+337    """
+338    Flattens an iterable that can contain both iterable and non-iterable elements. Objects of
+339    type `str` and `bytes` are not regarded as iterables.
+340
+341    Examples:
+342        >>> list(flatten([[1, 2], 3, {4}, (5, "bla")]))
+343        [1, 2, 3, 4, 5, 'bla']
+344        >>> list(flatten([1, 2, 3]))
+345        [1, 2, 3]
+346
+347    Args:
+348        values: the value to be flattened.
+349
+350    Yields:
+351        Non-iterable elements in `values`.
+352    """
+353    for value in values:
+354        if is_iterable(value):
+355            yield from flatten(value)
+356        else:
+357            yield value
+
+ + +

Flattens an iterable that can contain both iterable and non-iterable elements. Objects of +type str and bytes are not regarded as iterables.

+ +
Examples:
+ +
+
+
>>> list(flatten([[1, 2], 3, {4}, (5, "bla")]))
+[1, 2, 3, 4, 5, 'bla']
+>>> list(flatten([1, 2, 3]))
+[1, 2, 3]
+
+
+
+ +
Arguments:
+ +
    +
  • values: the value to be flattened.
  • +
+ +
Yields:
+ +
+

Non-iterable elements in values.

+
+
+ + +
+
+ +
+ + def + count_params(function: Callable) -> int: + + + +
+ +
360def count_params(function: t.Callable) -> int:
+361    """
+362    Returns the number of formal parameters expected by a function, without counting "self"
+363    and "cls", in case of instance and class methods, respectively.
+364    """
+365    count = function.__code__.co_argcount
+366    return count - 1 if inspect.ismethod(function) else count
+
+ + +

Returns the number of formal parameters expected by a function, without counting "self" +and "cls", in case of instance and class methods, respectively.

+
+ + +
+
+ +
+ + def + dict_depth(d: Dict) -> int: + + + +
+ +
369def dict_depth(d: t.Dict) -> int:
+370    """
+371    Get the nesting depth of a dictionary.
+372
+373    For example:
+374        >>> dict_depth(None)
+375        0
+376        >>> dict_depth({})
+377        1
+378        >>> dict_depth({"a": "b"})
+379        1
+380        >>> dict_depth({"a": {}})
+381        2
+382        >>> dict_depth({"a": {"b": {}}})
+383        3
+384
+385    Args:
+386        d (dict): dictionary
+387
+388    Returns:
+389        int: depth
+390    """
+391    try:
+392        return 1 + dict_depth(next(iter(d.values())))
+393    except AttributeError:
+394        # d doesn't have attribute "values"
+395        return 0
+396    except StopIteration:
+397        # d.values() returns an empty sequence
+398        return 1
+
+ + +

Get the nesting depth of a dictionary.

+ +
For example:
+ +
+
+
>>> dict_depth(None)
+0
+>>> dict_depth({})
+1
+>>> dict_depth({"a": "b"})
+1
+>>> dict_depth({"a": {}})
+2
+>>> dict_depth({"a": {"b": {}}})
+3
+
+
+
+ +
Arguments:
+ +
    +
  • d (dict): dictionary
  • +
+ +
Returns:
+ +
+

int: depth

+
+
+ + +
+
+ +
+ + def + first(it: Iterable[~T]) -> ~T: + + + +
+ +
401def first(it: t.Iterable[T]) -> T:
+402    """Returns the first element from an iterable.
+403
+404    Useful for sets.
+405    """
+406    return next(i for i in it)
+
+ + +

Returns the first element from an iterable.

+ +

Useful for sets.

+
+ + +
+
+ + \ No newline at end of file diff --git a/docs/sqlglot/lineage.html b/docs/sqlglot/lineage.html new file mode 100644 index 0000000..37abb1d --- /dev/null +++ b/docs/sqlglot/lineage.html @@ -0,0 +1,931 @@ + + + + + + + sqlglot.lineage API documentation + + + + + + + + + +
+
+ Edit on GitHub +

+sqlglot.lineage

+ + + + + + +
  1from __future__ import annotations
+  2
+  3import json
+  4import typing as t
+  5from dataclasses import dataclass, field
+  6
+  7from sqlglot import Schema, exp, maybe_parse
+  8from sqlglot.optimizer import Scope, build_scope, optimize
+  9from sqlglot.optimizer.expand_laterals import expand_laterals
+ 10from sqlglot.optimizer.qualify_columns import qualify_columns
+ 11from sqlglot.optimizer.qualify_tables import qualify_tables
+ 12
+ 13if t.TYPE_CHECKING:
+ 14    from sqlglot.dialects.dialect import DialectType
+ 15
+ 16
+ 17@dataclass(frozen=True)
+ 18class Node:
+ 19    name: str
+ 20    expression: exp.Expression
+ 21    source: exp.Expression
+ 22    downstream: t.List[Node] = field(default_factory=list)
+ 23
+ 24    def walk(self) -> t.Iterator[Node]:
+ 25        yield self
+ 26
+ 27        for d in self.downstream:
+ 28            if isinstance(d, Node):
+ 29                yield from d.walk()
+ 30            else:
+ 31                yield d
+ 32
+ 33    def to_html(self, **opts) -> LineageHTML:
+ 34        return LineageHTML(self, **opts)
+ 35
+ 36
+ 37def lineage(
+ 38    column: str | exp.Column,
+ 39    sql: str | exp.Expression,
+ 40    schema: t.Optional[t.Dict | Schema] = None,
+ 41    sources: t.Optional[t.Dict[str, str | exp.Subqueryable]] = None,
+ 42    rules: t.Sequence[t.Callable] = (qualify_tables, qualify_columns, expand_laterals),
+ 43    dialect: DialectType = None,
+ 44) -> Node:
+ 45    """Build the lineage graph for a column of a SQL query.
+ 46
+ 47    Args:
+ 48        column: The column to build the lineage for.
+ 49        sql: The SQL string or expression.
+ 50        schema: The schema of tables.
+ 51        sources: A mapping of queries which will be used to continue building lineage.
+ 52        rules: Optimizer rules to apply, by default only qualifying tables and columns.
+ 53        dialect: The dialect of input SQL.
+ 54
+ 55    Returns:
+ 56        A lineage node.
+ 57    """
+ 58
+ 59    expression = maybe_parse(sql, dialect=dialect)
+ 60
+ 61    if sources:
+ 62        expression = exp.expand(
+ 63            expression,
+ 64            {
+ 65                k: t.cast(exp.Subqueryable, maybe_parse(v, dialect=dialect))
+ 66                for k, v in sources.items()
+ 67            },
+ 68        )
+ 69
+ 70    optimized = optimize(expression, schema=schema, rules=rules)
+ 71    scope = build_scope(optimized)
+ 72    tables: t.Dict[str, Node] = {}
+ 73
+ 74    def to_node(
+ 75        column_name: str,
+ 76        scope: Scope,
+ 77        scope_name: t.Optional[str] = None,
+ 78        upstream: t.Optional[Node] = None,
+ 79    ) -> Node:
+ 80        if isinstance(scope.expression, exp.Union):
+ 81            for scope in scope.union_scopes:
+ 82                node = to_node(
+ 83                    column_name,
+ 84                    scope=scope,
+ 85                    scope_name=scope_name,
+ 86                    upstream=upstream,
+ 87                )
+ 88            return node
+ 89
+ 90        select = next(select for select in scope.selects if select.alias_or_name == column_name)
+ 91        source = optimize(scope.expression.select(select, append=False), schema=schema, rules=rules)
+ 92        select = source.selects[0]
+ 93
+ 94        node = Node(
+ 95            name=f"{scope_name}.{column_name}" if scope_name else column_name,
+ 96            source=source,
+ 97            expression=select,
+ 98        )
+ 99
+100        if upstream:
+101            upstream.downstream.append(node)
+102
+103        for c in set(select.find_all(exp.Column)):
+104            table = c.table
+105            source = scope.sources[table]
+106
+107            if isinstance(source, Scope):
+108                to_node(
+109                    c.name,
+110                    scope=source,
+111                    scope_name=table,
+112                    upstream=node,
+113                )
+114            else:
+115                if table not in tables:
+116                    tables[table] = Node(name=table, source=source, expression=source)
+117                node.downstream.append(tables[table])
+118
+119        return node
+120
+121    return to_node(column if isinstance(column, str) else column.name, scope)
+122
+123
+124class LineageHTML:
+125    """Node to HTML generator using vis.js.
+126
+127    https://visjs.github.io/vis-network/docs/network/
+128    """
+129
+130    def __init__(
+131        self,
+132        node: Node,
+133        dialect: DialectType = None,
+134        imports: bool = True,
+135        **opts: t.Any,
+136    ):
+137        self.node = node
+138        self.imports = imports
+139
+140        self.options = {
+141            "height": "500px",
+142            "width": "100%",
+143            "layout": {
+144                "hierarchical": {
+145                    "enabled": True,
+146                    "nodeSpacing": 200,
+147                    "sortMethod": "directed",
+148                },
+149            },
+150            "interaction": {
+151                "dragNodes": False,
+152                "selectable": False,
+153            },
+154            "physics": {
+155                "enabled": False,
+156            },
+157            "edges": {
+158                "arrows": "to",
+159            },
+160            "nodes": {
+161                "font": "20px monaco",
+162                "shape": "box",
+163                "widthConstraint": {
+164                    "maximum": 300,
+165                },
+166            },
+167            **opts,
+168        }
+169
+170        self.nodes = {}
+171        self.edges = []
+172
+173        for node in node.walk():
+174            if isinstance(node.expression, exp.Table):
+175                label = f"FROM {node.expression.this}"
+176                title = f"<pre>SELECT {node.name} FROM {node.expression.this}</pre>"
+177                group = 1
+178            else:
+179                label = node.expression.sql(pretty=True, dialect=dialect)
+180                source = node.source.transform(
+181                    lambda n: exp.Tag(this=n, prefix="<b>", postfix="</b>")
+182                    if n is node.expression
+183                    else n,
+184                    copy=False,
+185                ).sql(pretty=True, dialect=dialect)
+186                title = f"<pre>{source}</pre>"
+187                group = 0
+188
+189            node_id = id(node)
+190
+191            self.nodes[node_id] = {
+192                "id": node_id,
+193                "label": label,
+194                "title": title,
+195                "group": group,
+196            }
+197
+198            for d in node.downstream:
+199                self.edges.append({"from": node_id, "to": id(d)})
+200
+201    def __str__(self):
+202        nodes = json.dumps(list(self.nodes.values()))
+203        edges = json.dumps(self.edges)
+204        options = json.dumps(self.options)
+205        imports = (
+206            """<script type="text/javascript" src="https://unpkg.com/vis-data@latest/peer/umd/vis-data.min.js"></script>
+207  <script type="text/javascript" src="https://unpkg.com/vis-network@latest/peer/umd/vis-network.min.js"></script>
+208  <link rel="stylesheet" type="text/css" href="https://unpkg.com/vis-network/styles/vis-network.min.css" />"""
+209            if self.imports
+210            else ""
+211        )
+212
+213        return f"""<div>
+214  <div id="sqlglot-lineage"></div>
+215  {imports}
+216  <script type="text/javascript">
+217    var nodes = new vis.DataSet({nodes})
+218    nodes.forEach(row => row["title"] = new DOMParser().parseFromString(row["title"], "text/html").body.childNodes[0])
+219
+220    new vis.Network(
+221        document.getElementById("sqlglot-lineage"),
+222        {{
+223            nodes: nodes,
+224            edges: new vis.DataSet({edges})
+225        }},
+226        {options},
+227    )
+228  </script>
+229</div>"""
+230
+231    def _repr_html_(self) -> str:
+232        return self.__str__()
+
+ + +
+
+ +
+
@dataclass(frozen=True)
+ + class + Node: + + + +
+ +
18@dataclass(frozen=True)
+19class Node:
+20    name: str
+21    expression: exp.Expression
+22    source: exp.Expression
+23    downstream: t.List[Node] = field(default_factory=list)
+24
+25    def walk(self) -> t.Iterator[Node]:
+26        yield self
+27
+28        for d in self.downstream:
+29            if isinstance(d, Node):
+30                yield from d.walk()
+31            else:
+32                yield d
+33
+34    def to_html(self, **opts) -> LineageHTML:
+35        return LineageHTML(self, **opts)
+
+ + + + +
+
+ + Node( name: str, expression: sqlglot.expressions.Expression, source: sqlglot.expressions.Expression, downstream: List[sqlglot.lineage.Node] = <factory>) + + +
+ + + + +
+
+ +
+ + def + walk(self) -> Iterator[sqlglot.lineage.Node]: + + + +
+ +
25    def walk(self) -> t.Iterator[Node]:
+26        yield self
+27
+28        for d in self.downstream:
+29            if isinstance(d, Node):
+30                yield from d.walk()
+31            else:
+32                yield d
+
+ + + + +
+
+ +
+ + def + to_html(self, **opts) -> sqlglot.lineage.LineageHTML: + + + +
+ +
34    def to_html(self, **opts) -> LineageHTML:
+35        return LineageHTML(self, **opts)
+
+ + + + +
+
+
+ +
+ + def + lineage( column: str | sqlglot.expressions.Column, sql: str | sqlglot.expressions.Expression, schema: Union[Dict, sqlglot.schema.Schema, NoneType] = None, sources: Optional[Dict[str, str | sqlglot.expressions.Subqueryable]] = None, rules: Sequence[Callable] = (<function qualify_tables at 0x7ff75a9d9240>, <function qualify_columns at 0x7ff75a9d8820>, <function expand_laterals at 0x7ff75a9b2b90>), dialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None) -> sqlglot.lineage.Node: + + + +
+ +
 38def lineage(
+ 39    column: str | exp.Column,
+ 40    sql: str | exp.Expression,
+ 41    schema: t.Optional[t.Dict | Schema] = None,
+ 42    sources: t.Optional[t.Dict[str, str | exp.Subqueryable]] = None,
+ 43    rules: t.Sequence[t.Callable] = (qualify_tables, qualify_columns, expand_laterals),
+ 44    dialect: DialectType = None,
+ 45) -> Node:
+ 46    """Build the lineage graph for a column of a SQL query.
+ 47
+ 48    Args:
+ 49        column: The column to build the lineage for.
+ 50        sql: The SQL string or expression.
+ 51        schema: The schema of tables.
+ 52        sources: A mapping of queries which will be used to continue building lineage.
+ 53        rules: Optimizer rules to apply, by default only qualifying tables and columns.
+ 54        dialect: The dialect of input SQL.
+ 55
+ 56    Returns:
+ 57        A lineage node.
+ 58    """
+ 59
+ 60    expression = maybe_parse(sql, dialect=dialect)
+ 61
+ 62    if sources:
+ 63        expression = exp.expand(
+ 64            expression,
+ 65            {
+ 66                k: t.cast(exp.Subqueryable, maybe_parse(v, dialect=dialect))
+ 67                for k, v in sources.items()
+ 68            },
+ 69        )
+ 70
+ 71    optimized = optimize(expression, schema=schema, rules=rules)
+ 72    scope = build_scope(optimized)
+ 73    tables: t.Dict[str, Node] = {}
+ 74
+ 75    def to_node(
+ 76        column_name: str,
+ 77        scope: Scope,
+ 78        scope_name: t.Optional[str] = None,
+ 79        upstream: t.Optional[Node] = None,
+ 80    ) -> Node:
+ 81        if isinstance(scope.expression, exp.Union):
+ 82            for scope in scope.union_scopes:
+ 83                node = to_node(
+ 84                    column_name,
+ 85                    scope=scope,
+ 86                    scope_name=scope_name,
+ 87                    upstream=upstream,
+ 88                )
+ 89            return node
+ 90
+ 91        select = next(select for select in scope.selects if select.alias_or_name == column_name)
+ 92        source = optimize(scope.expression.select(select, append=False), schema=schema, rules=rules)
+ 93        select = source.selects[0]
+ 94
+ 95        node = Node(
+ 96            name=f"{scope_name}.{column_name}" if scope_name else column_name,
+ 97            source=source,
+ 98            expression=select,
+ 99        )
+100
+101        if upstream:
+102            upstream.downstream.append(node)
+103
+104        for c in set(select.find_all(exp.Column)):
+105            table = c.table
+106            source = scope.sources[table]
+107
+108            if isinstance(source, Scope):
+109                to_node(
+110                    c.name,
+111                    scope=source,
+112                    scope_name=table,
+113                    upstream=node,
+114                )
+115            else:
+116                if table not in tables:
+117                    tables[table] = Node(name=table, source=source, expression=source)
+118                node.downstream.append(tables[table])
+119
+120        return node
+121
+122    return to_node(column if isinstance(column, str) else column.name, scope)
+
+ + +

Build the lineage graph for a column of a SQL query.

+ +
Arguments:
+ +
    +
  • column: The column to build the lineage for.
  • +
  • sql: The SQL string or expression.
  • +
  • schema: The schema of tables.
  • +
  • sources: A mapping of queries which will be used to continue building lineage.
  • +
  • rules: Optimizer rules to apply, by default only qualifying tables and columns.
  • +
  • dialect: The dialect of input SQL.
  • +
+ +
Returns:
+ +
+

A lineage node.

+
+
+ + +
+
+ +
+ + class + LineageHTML: + + + +
+ +
125class LineageHTML:
+126    """Node to HTML generator using vis.js.
+127
+128    https://visjs.github.io/vis-network/docs/network/
+129    """
+130
+131    def __init__(
+132        self,
+133        node: Node,
+134        dialect: DialectType = None,
+135        imports: bool = True,
+136        **opts: t.Any,
+137    ):
+138        self.node = node
+139        self.imports = imports
+140
+141        self.options = {
+142            "height": "500px",
+143            "width": "100%",
+144            "layout": {
+145                "hierarchical": {
+146                    "enabled": True,
+147                    "nodeSpacing": 200,
+148                    "sortMethod": "directed",
+149                },
+150            },
+151            "interaction": {
+152                "dragNodes": False,
+153                "selectable": False,
+154            },
+155            "physics": {
+156                "enabled": False,
+157            },
+158            "edges": {
+159                "arrows": "to",
+160            },
+161            "nodes": {
+162                "font": "20px monaco",
+163                "shape": "box",
+164                "widthConstraint": {
+165                    "maximum": 300,
+166                },
+167            },
+168            **opts,
+169        }
+170
+171        self.nodes = {}
+172        self.edges = []
+173
+174        for node in node.walk():
+175            if isinstance(node.expression, exp.Table):
+176                label = f"FROM {node.expression.this}"
+177                title = f"<pre>SELECT {node.name} FROM {node.expression.this}</pre>"
+178                group = 1
+179            else:
+180                label = node.expression.sql(pretty=True, dialect=dialect)
+181                source = node.source.transform(
+182                    lambda n: exp.Tag(this=n, prefix="<b>", postfix="</b>")
+183                    if n is node.expression
+184                    else n,
+185                    copy=False,
+186                ).sql(pretty=True, dialect=dialect)
+187                title = f"<pre>{source}</pre>"
+188                group = 0
+189
+190            node_id = id(node)
+191
+192            self.nodes[node_id] = {
+193                "id": node_id,
+194                "label": label,
+195                "title": title,
+196                "group": group,
+197            }
+198
+199            for d in node.downstream:
+200                self.edges.append({"from": node_id, "to": id(d)})
+201
+202    def __str__(self):
+203        nodes = json.dumps(list(self.nodes.values()))
+204        edges = json.dumps(self.edges)
+205        options = json.dumps(self.options)
+206        imports = (
+207            """<script type="text/javascript" src="https://unpkg.com/vis-data@latest/peer/umd/vis-data.min.js"></script>
+208  <script type="text/javascript" src="https://unpkg.com/vis-network@latest/peer/umd/vis-network.min.js"></script>
+209  <link rel="stylesheet" type="text/css" href="https://unpkg.com/vis-network/styles/vis-network.min.css" />"""
+210            if self.imports
+211            else ""
+212        )
+213
+214        return f"""<div>
+215  <div id="sqlglot-lineage"></div>
+216  {imports}
+217  <script type="text/javascript">
+218    var nodes = new vis.DataSet({nodes})
+219    nodes.forEach(row => row["title"] = new DOMParser().parseFromString(row["title"], "text/html").body.childNodes[0])
+220
+221    new vis.Network(
+222        document.getElementById("sqlglot-lineage"),
+223        {{
+224            nodes: nodes,
+225            edges: new vis.DataSet({edges})
+226        }},
+227        {options},
+228    )
+229  </script>
+230</div>"""
+231
+232    def _repr_html_(self) -> str:
+233        return self.__str__()
+
+ + +

Node to HTML generator using vis.js.

+ +

https://visjs.github.io/vis-network/docs/network/

+
+ + +
+ +
+ + LineageHTML( node: sqlglot.lineage.Node, dialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None, imports: bool = True, **opts: Any) + + + +
+ +
131    def __init__(
+132        self,
+133        node: Node,
+134        dialect: DialectType = None,
+135        imports: bool = True,
+136        **opts: t.Any,
+137    ):
+138        self.node = node
+139        self.imports = imports
+140
+141        self.options = {
+142            "height": "500px",
+143            "width": "100%",
+144            "layout": {
+145                "hierarchical": {
+146                    "enabled": True,
+147                    "nodeSpacing": 200,
+148                    "sortMethod": "directed",
+149                },
+150            },
+151            "interaction": {
+152                "dragNodes": False,
+153                "selectable": False,
+154            },
+155            "physics": {
+156                "enabled": False,
+157            },
+158            "edges": {
+159                "arrows": "to",
+160            },
+161            "nodes": {
+162                "font": "20px monaco",
+163                "shape": "box",
+164                "widthConstraint": {
+165                    "maximum": 300,
+166                },
+167            },
+168            **opts,
+169        }
+170
+171        self.nodes = {}
+172        self.edges = []
+173
+174        for node in node.walk():
+175            if isinstance(node.expression, exp.Table):
+176                label = f"FROM {node.expression.this}"
+177                title = f"<pre>SELECT {node.name} FROM {node.expression.this}</pre>"
+178                group = 1
+179            else:
+180                label = node.expression.sql(pretty=True, dialect=dialect)
+181                source = node.source.transform(
+182                    lambda n: exp.Tag(this=n, prefix="<b>", postfix="</b>")
+183                    if n is node.expression
+184                    else n,
+185                    copy=False,
+186                ).sql(pretty=True, dialect=dialect)
+187                title = f"<pre>{source}</pre>"
+188                group = 0
+189
+190            node_id = id(node)
+191
+192            self.nodes[node_id] = {
+193                "id": node_id,
+194                "label": label,
+195                "title": title,
+196                "group": group,
+197            }
+198
+199            for d in node.downstream:
+200                self.edges.append({"from": node_id, "to": id(d)})
+
+ + + + +
+
+
+ + \ No newline at end of file diff --git a/docs/sqlglot/optimizer.html b/docs/sqlglot/optimizer.html new file mode 100644 index 0000000..1321d62 --- /dev/null +++ b/docs/sqlglot/optimizer.html @@ -0,0 +1,264 @@ + + + + + + + sqlglot.optimizer API documentation + + + + + + + + + +
+
+ Edit on GitHub +

+sqlglot.optimizer

+ + + + + + +
1from sqlglot.optimizer.optimizer import RULES, optimize
+2from sqlglot.optimizer.scope import Scope, build_scope, traverse_scope
+
+ + +
+
+ + \ No newline at end of file diff --git a/docs/sqlglot/optimizer/annotate_types.html b/docs/sqlglot/optimizer/annotate_types.html new file mode 100644 index 0000000..2d84d3e --- /dev/null +++ b/docs/sqlglot/optimizer/annotate_types.html @@ -0,0 +1,1179 @@ + + + + + + + sqlglot.optimizer.annotate_types API documentation + + + + + + + + + +
+
+ Edit on GitHub +

+sqlglot.optimizer.annotate_types

+ + + + + + +
  1from sqlglot import exp
+  2from sqlglot.helper import ensure_collection, ensure_list, subclasses
+  3from sqlglot.optimizer.scope import Scope, traverse_scope
+  4from sqlglot.schema import ensure_schema
+  5
+  6
+  7def annotate_types(expression, schema=None, annotators=None, coerces_to=None):
+  8    """
+  9    Recursively infer & annotate types in an expression syntax tree against a schema.
+ 10    Assumes that we've already executed the optimizer's qualify_columns step.
+ 11
+ 12    Example:
+ 13        >>> import sqlglot
+ 14        >>> schema = {"y": {"cola": "SMALLINT"}}
+ 15        >>> sql = "SELECT x.cola + 2.5 AS cola FROM (SELECT y.cola AS cola FROM y AS y) AS x"
+ 16        >>> annotated_expr = annotate_types(sqlglot.parse_one(sql), schema=schema)
+ 17        >>> annotated_expr.expressions[0].type.this  # Get the type of "x.cola + 2.5 AS cola"
+ 18        <Type.DOUBLE: 'DOUBLE'>
+ 19
+ 20    Args:
+ 21        expression (sqlglot.Expression): Expression to annotate.
+ 22        schema (dict|sqlglot.optimizer.Schema): Database schema.
+ 23        annotators (dict): Maps expression type to corresponding annotation function.
+ 24        coerces_to (dict): Maps expression type to set of types that it can be coerced into.
+ 25    Returns:
+ 26        sqlglot.Expression: expression annotated with types
+ 27    """
+ 28
+ 29    schema = ensure_schema(schema)
+ 30
+ 31    return TypeAnnotator(schema, annotators, coerces_to).annotate(expression)
+ 32
+ 33
+ 34class TypeAnnotator:
+ 35    ANNOTATORS = {
+ 36        **{
+ 37            expr_type: lambda self, expr: self._annotate_unary(expr)
+ 38            for expr_type in subclasses(exp.__name__, exp.Unary)
+ 39        },
+ 40        **{
+ 41            expr_type: lambda self, expr: self._annotate_binary(expr)
+ 42            for expr_type in subclasses(exp.__name__, exp.Binary)
+ 43        },
+ 44        exp.Cast: lambda self, expr: self._annotate_with_type(expr, expr.args["to"]),
+ 45        exp.TryCast: lambda self, expr: self._annotate_with_type(expr, expr.args["to"]),
+ 46        exp.DataType: lambda self, expr: self._annotate_with_type(expr, expr.copy()),
+ 47        exp.Alias: lambda self, expr: self._annotate_unary(expr),
+ 48        exp.Between: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.BOOLEAN),
+ 49        exp.In: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.BOOLEAN),
+ 50        exp.Literal: lambda self, expr: self._annotate_literal(expr),
+ 51        exp.Boolean: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.BOOLEAN),
+ 52        exp.Null: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.NULL),
+ 53        exp.Anonymous: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.UNKNOWN),
+ 54        exp.ApproxDistinct: lambda self, expr: self._annotate_with_type(
+ 55            expr, exp.DataType.Type.BIGINT
+ 56        ),
+ 57        exp.Avg: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.DOUBLE),
+ 58        exp.Min: lambda self, expr: self._annotate_by_args(expr, "this"),
+ 59        exp.Max: lambda self, expr: self._annotate_by_args(expr, "this"),
+ 60        exp.Sum: lambda self, expr: self._annotate_by_args(expr, "this", promote=True),
+ 61        exp.Ceil: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.INT),
+ 62        exp.Count: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.BIGINT),
+ 63        exp.CurrentDate: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.DATE),
+ 64        exp.CurrentDatetime: lambda self, expr: self._annotate_with_type(
+ 65            expr, exp.DataType.Type.DATETIME
+ 66        ),
+ 67        exp.CurrentTime: lambda self, expr: self._annotate_with_type(
+ 68            expr, exp.DataType.Type.TIMESTAMP
+ 69        ),
+ 70        exp.CurrentTimestamp: lambda self, expr: self._annotate_with_type(
+ 71            expr, exp.DataType.Type.TIMESTAMP
+ 72        ),
+ 73        exp.DateAdd: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.DATE),
+ 74        exp.DateSub: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.DATE),
+ 75        exp.DateDiff: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.INT),
+ 76        exp.DatetimeAdd: lambda self, expr: self._annotate_with_type(
+ 77            expr, exp.DataType.Type.DATETIME
+ 78        ),
+ 79        exp.DatetimeSub: lambda self, expr: self._annotate_with_type(
+ 80            expr, exp.DataType.Type.DATETIME
+ 81        ),
+ 82        exp.DatetimeDiff: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.INT),
+ 83        exp.Extract: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.INT),
+ 84        exp.TimestampAdd: lambda self, expr: self._annotate_with_type(
+ 85            expr, exp.DataType.Type.TIMESTAMP
+ 86        ),
+ 87        exp.TimestampSub: lambda self, expr: self._annotate_with_type(
+ 88            expr, exp.DataType.Type.TIMESTAMP
+ 89        ),
+ 90        exp.TimestampDiff: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.INT),
+ 91        exp.TimeAdd: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.TIMESTAMP),
+ 92        exp.TimeSub: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.TIMESTAMP),
+ 93        exp.TimeDiff: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.INT),
+ 94        exp.DateStrToDate: lambda self, expr: self._annotate_with_type(
+ 95            expr, exp.DataType.Type.DATE
+ 96        ),
+ 97        exp.DateToDateStr: lambda self, expr: self._annotate_with_type(
+ 98            expr, exp.DataType.Type.VARCHAR
+ 99        ),
+100        exp.DateToDi: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.INT),
+101        exp.Day: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.TINYINT),
+102        exp.DiToDate: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.DATE),
+103        exp.Exp: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.DOUBLE),
+104        exp.Floor: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.INT),
+105        exp.Case: lambda self, expr: self._annotate_by_args(expr, "default", "ifs"),
+106        exp.If: lambda self, expr: self._annotate_by_args(expr, "true", "false"),
+107        exp.Coalesce: lambda self, expr: self._annotate_by_args(expr, "this", "expressions"),
+108        exp.IfNull: lambda self, expr: self._annotate_by_args(expr, "this", "expression"),
+109        exp.ConcatWs: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.VARCHAR),
+110        exp.GroupConcat: lambda self, expr: self._annotate_with_type(
+111            expr, exp.DataType.Type.VARCHAR
+112        ),
+113        exp.ArrayConcat: lambda self, expr: self._annotate_with_type(
+114            expr, exp.DataType.Type.VARCHAR
+115        ),
+116        exp.Initcap: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.VARCHAR),
+117        exp.Length: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.BIGINT),
+118        exp.Levenshtein: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.INT),
+119        exp.Ln: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.DOUBLE),
+120        exp.Log: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.DOUBLE),
+121        exp.Log2: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.DOUBLE),
+122        exp.Log10: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.DOUBLE),
+123        exp.Lower: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.VARCHAR),
+124        exp.Month: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.TINYINT),
+125        exp.Pow: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.DOUBLE),
+126        exp.Quantile: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.DOUBLE),
+127        exp.ApproxQuantile: lambda self, expr: self._annotate_with_type(
+128            expr, exp.DataType.Type.DOUBLE
+129        ),
+130        exp.RegexpLike: lambda self, expr: self._annotate_with_type(
+131            expr, exp.DataType.Type.BOOLEAN
+132        ),
+133        exp.Round: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.DOUBLE),
+134        exp.SafeDivide: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.DOUBLE),
+135        exp.Substring: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.VARCHAR),
+136        exp.StrPosition: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.INT),
+137        exp.StrToDate: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.DATE),
+138        exp.StrToTime: lambda self, expr: self._annotate_with_type(
+139            expr, exp.DataType.Type.TIMESTAMP
+140        ),
+141        exp.Sqrt: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.DOUBLE),
+142        exp.Stddev: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.DOUBLE),
+143        exp.StddevPop: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.DOUBLE),
+144        exp.StddevSamp: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.DOUBLE),
+145        exp.TimeToStr: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.VARCHAR),
+146        exp.TimeToTimeStr: lambda self, expr: self._annotate_with_type(
+147            expr, exp.DataType.Type.VARCHAR
+148        ),
+149        exp.TimeStrToDate: lambda self, expr: self._annotate_with_type(
+150            expr, exp.DataType.Type.DATE
+151        ),
+152        exp.TimeStrToTime: lambda self, expr: self._annotate_with_type(
+153            expr, exp.DataType.Type.TIMESTAMP
+154        ),
+155        exp.Trim: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.VARCHAR),
+156        exp.TsOrDsToDateStr: lambda self, expr: self._annotate_with_type(
+157            expr, exp.DataType.Type.VARCHAR
+158        ),
+159        exp.TsOrDsToDate: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.DATE),
+160        exp.TsOrDiToDi: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.INT),
+161        exp.UnixToStr: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.VARCHAR),
+162        exp.UnixToTime: lambda self, expr: self._annotate_with_type(
+163            expr, exp.DataType.Type.TIMESTAMP
+164        ),
+165        exp.UnixToTimeStr: lambda self, expr: self._annotate_with_type(
+166            expr, exp.DataType.Type.VARCHAR
+167        ),
+168        exp.Upper: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.VARCHAR),
+169        exp.Variance: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.DOUBLE),
+170        exp.VariancePop: lambda self, expr: self._annotate_with_type(
+171            expr, exp.DataType.Type.DOUBLE
+172        ),
+173        exp.Week: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.TINYINT),
+174        exp.Year: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.TINYINT),
+175    }
+176
+177    # Reference: https://spark.apache.org/docs/3.2.0/sql-ref-ansi-compliance.html
+178    COERCES_TO = {
+179        # CHAR < NCHAR < VARCHAR < NVARCHAR < TEXT
+180        exp.DataType.Type.TEXT: set(),
+181        exp.DataType.Type.NVARCHAR: {exp.DataType.Type.TEXT},
+182        exp.DataType.Type.VARCHAR: {exp.DataType.Type.NVARCHAR, exp.DataType.Type.TEXT},
+183        exp.DataType.Type.NCHAR: {
+184            exp.DataType.Type.VARCHAR,
+185            exp.DataType.Type.NVARCHAR,
+186            exp.DataType.Type.TEXT,
+187        },
+188        exp.DataType.Type.CHAR: {
+189            exp.DataType.Type.NCHAR,
+190            exp.DataType.Type.VARCHAR,
+191            exp.DataType.Type.NVARCHAR,
+192            exp.DataType.Type.TEXT,
+193        },
+194        # TINYINT < SMALLINT < INT < BIGINT < DECIMAL < FLOAT < DOUBLE
+195        exp.DataType.Type.DOUBLE: set(),
+196        exp.DataType.Type.FLOAT: {exp.DataType.Type.DOUBLE},
+197        exp.DataType.Type.DECIMAL: {exp.DataType.Type.FLOAT, exp.DataType.Type.DOUBLE},
+198        exp.DataType.Type.BIGINT: {
+199            exp.DataType.Type.DECIMAL,
+200            exp.DataType.Type.FLOAT,
+201            exp.DataType.Type.DOUBLE,
+202        },
+203        exp.DataType.Type.INT: {
+204            exp.DataType.Type.BIGINT,
+205            exp.DataType.Type.DECIMAL,
+206            exp.DataType.Type.FLOAT,
+207            exp.DataType.Type.DOUBLE,
+208        },
+209        exp.DataType.Type.SMALLINT: {
+210            exp.DataType.Type.INT,
+211            exp.DataType.Type.BIGINT,
+212            exp.DataType.Type.DECIMAL,
+213            exp.DataType.Type.FLOAT,
+214            exp.DataType.Type.DOUBLE,
+215        },
+216        exp.DataType.Type.TINYINT: {
+217            exp.DataType.Type.SMALLINT,
+218            exp.DataType.Type.INT,
+219            exp.DataType.Type.BIGINT,
+220            exp.DataType.Type.DECIMAL,
+221            exp.DataType.Type.FLOAT,
+222            exp.DataType.Type.DOUBLE,
+223        },
+224        # DATE < DATETIME < TIMESTAMP < TIMESTAMPTZ < TIMESTAMPLTZ
+225        exp.DataType.Type.TIMESTAMPLTZ: set(),
+226        exp.DataType.Type.TIMESTAMPTZ: {exp.DataType.Type.TIMESTAMPLTZ},
+227        exp.DataType.Type.TIMESTAMP: {
+228            exp.DataType.Type.TIMESTAMPTZ,
+229            exp.DataType.Type.TIMESTAMPLTZ,
+230        },
+231        exp.DataType.Type.DATETIME: {
+232            exp.DataType.Type.TIMESTAMP,
+233            exp.DataType.Type.TIMESTAMPTZ,
+234            exp.DataType.Type.TIMESTAMPLTZ,
+235        },
+236        exp.DataType.Type.DATE: {
+237            exp.DataType.Type.DATETIME,
+238            exp.DataType.Type.TIMESTAMP,
+239            exp.DataType.Type.TIMESTAMPTZ,
+240            exp.DataType.Type.TIMESTAMPLTZ,
+241        },
+242    }
+243
+244    TRAVERSABLES = (exp.Select, exp.Union, exp.UDTF, exp.Subquery)
+245
+246    def __init__(self, schema=None, annotators=None, coerces_to=None):
+247        self.schema = schema
+248        self.annotators = annotators or self.ANNOTATORS
+249        self.coerces_to = coerces_to or self.COERCES_TO
+250
+251    def annotate(self, expression):
+252        if isinstance(expression, self.TRAVERSABLES):
+253            for scope in traverse_scope(expression):
+254                selects = {}
+255                for name, source in scope.sources.items():
+256                    if not isinstance(source, Scope):
+257                        continue
+258                    if isinstance(source.expression, exp.UDTF):
+259                        values = []
+260
+261                        if isinstance(source.expression, exp.Lateral):
+262                            if isinstance(source.expression.this, exp.Explode):
+263                                values = [source.expression.this.this]
+264                        else:
+265                            values = source.expression.expressions[0].expressions
+266
+267                        if not values:
+268                            continue
+269
+270                        selects[name] = {
+271                            alias: column
+272                            for alias, column in zip(
+273                                source.expression.alias_column_names,
+274                                values,
+275                            )
+276                        }
+277                    else:
+278                        selects[name] = {
+279                            select.alias_or_name: select for select in source.expression.selects
+280                        }
+281                # First annotate the current scope's column references
+282                for col in scope.columns:
+283                    source = scope.sources.get(col.table)
+284                    if isinstance(source, exp.Table):
+285                        col.type = self.schema.get_column_type(source, col)
+286                    elif source and col.table in selects:
+287                        col.type = selects[col.table][col.name].type
+288                # Then (possibly) annotate the remaining expressions in the scope
+289                self._maybe_annotate(scope.expression)
+290        return self._maybe_annotate(expression)  # This takes care of non-traversable expressions
+291
+292    def _maybe_annotate(self, expression):
+293        if not isinstance(expression, exp.Expression):
+294            return None
+295
+296        if expression.type:
+297            return expression  # We've already inferred the expression's type
+298
+299        annotator = self.annotators.get(expression.__class__)
+300
+301        return (
+302            annotator(self, expression)
+303            if annotator
+304            else self._annotate_with_type(expression, exp.DataType.Type.UNKNOWN)
+305        )
+306
+307    def _annotate_args(self, expression):
+308        for value in expression.args.values():
+309            for v in ensure_collection(value):
+310                self._maybe_annotate(v)
+311
+312        return expression
+313
+314    def _maybe_coerce(self, type1, type2):
+315        # We propagate the NULL / UNKNOWN types upwards if found
+316        if isinstance(type1, exp.DataType):
+317            type1 = type1.this
+318        if isinstance(type2, exp.DataType):
+319            type2 = type2.this
+320
+321        if exp.DataType.Type.NULL in (type1, type2):
+322            return exp.DataType.Type.NULL
+323        if exp.DataType.Type.UNKNOWN in (type1, type2):
+324            return exp.DataType.Type.UNKNOWN
+325
+326        return type2 if type2 in self.coerces_to.get(type1, {}) else type1
+327
+328    def _annotate_binary(self, expression):
+329        self._annotate_args(expression)
+330
+331        left_type = expression.left.type.this
+332        right_type = expression.right.type.this
+333
+334        if isinstance(expression, (exp.And, exp.Or)):
+335            if left_type == exp.DataType.Type.NULL and right_type == exp.DataType.Type.NULL:
+336                expression.type = exp.DataType.Type.NULL
+337            elif exp.DataType.Type.NULL in (left_type, right_type):
+338                expression.type = exp.DataType.build(
+339                    "NULLABLE", expressions=exp.DataType.build("BOOLEAN")
+340                )
+341            else:
+342                expression.type = exp.DataType.Type.BOOLEAN
+343        elif isinstance(expression, (exp.Condition, exp.Predicate)):
+344            expression.type = exp.DataType.Type.BOOLEAN
+345        else:
+346            expression.type = self._maybe_coerce(left_type, right_type)
+347
+348        return expression
+349
+350    def _annotate_unary(self, expression):
+351        self._annotate_args(expression)
+352
+353        if isinstance(expression, exp.Condition) and not isinstance(expression, exp.Paren):
+354            expression.type = exp.DataType.Type.BOOLEAN
+355        else:
+356            expression.type = expression.this.type
+357
+358        return expression
+359
+360    def _annotate_literal(self, expression):
+361        if expression.is_string:
+362            expression.type = exp.DataType.Type.VARCHAR
+363        elif expression.is_int:
+364            expression.type = exp.DataType.Type.INT
+365        else:
+366            expression.type = exp.DataType.Type.DOUBLE
+367
+368        return expression
+369
+370    def _annotate_with_type(self, expression, target_type):
+371        expression.type = target_type
+372        return self._annotate_args(expression)
+373
+374    def _annotate_by_args(self, expression, *args, promote=False):
+375        self._annotate_args(expression)
+376        expressions = []
+377        for arg in args:
+378            arg_expr = expression.args.get(arg)
+379            expressions.extend(expr for expr in ensure_list(arg_expr) if expr)
+380
+381        last_datatype = None
+382        for expr in expressions:
+383            last_datatype = self._maybe_coerce(last_datatype or expr.type, expr.type)
+384
+385        expression.type = last_datatype or exp.DataType.Type.UNKNOWN
+386
+387        if promote:
+388            if expression.type.this in exp.DataType.INTEGER_TYPES:
+389                expression.type = exp.DataType.Type.BIGINT
+390            elif expression.type.this in exp.DataType.FLOAT_TYPES:
+391                expression.type = exp.DataType.Type.DOUBLE
+392
+393        return expression
+
+ + +
+
+ +
+ + def + annotate_types(expression, schema=None, annotators=None, coerces_to=None): + + + +
+ +
 8def annotate_types(expression, schema=None, annotators=None, coerces_to=None):
+ 9    """
+10    Recursively infer & annotate types in an expression syntax tree against a schema.
+11    Assumes that we've already executed the optimizer's qualify_columns step.
+12
+13    Example:
+14        >>> import sqlglot
+15        >>> schema = {"y": {"cola": "SMALLINT"}}
+16        >>> sql = "SELECT x.cola + 2.5 AS cola FROM (SELECT y.cola AS cola FROM y AS y) AS x"
+17        >>> annotated_expr = annotate_types(sqlglot.parse_one(sql), schema=schema)
+18        >>> annotated_expr.expressions[0].type.this  # Get the type of "x.cola + 2.5 AS cola"
+19        <Type.DOUBLE: 'DOUBLE'>
+20
+21    Args:
+22        expression (sqlglot.Expression): Expression to annotate.
+23        schema (dict|sqlglot.optimizer.Schema): Database schema.
+24        annotators (dict): Maps expression type to corresponding annotation function.
+25        coerces_to (dict): Maps expression type to set of types that it can be coerced into.
+26    Returns:
+27        sqlglot.Expression: expression annotated with types
+28    """
+29
+30    schema = ensure_schema(schema)
+31
+32    return TypeAnnotator(schema, annotators, coerces_to).annotate(expression)
+
+ + +

Recursively infer & annotate types in an expression syntax tree against a schema. +Assumes that we've already executed the optimizer's qualify_columns step.

+ +
Example:
+ +
+
+
>>> import sqlglot
+>>> schema = {"y": {"cola": "SMALLINT"}}
+>>> sql = "SELECT x.cola + 2.5 AS cola FROM (SELECT y.cola AS cola FROM y AS y) AS x"
+>>> annotated_expr = annotate_types(sqlglot.parse_one(sql), schema=schema)
+>>> annotated_expr.expressions[0].type.this  # Get the type of "x.cola + 2.5 AS cola"
+<Type.DOUBLE: 'DOUBLE'>
+
+
+
+ +
Arguments:
+ +
    +
  • expression (sqlglot.Expression): Expression to annotate.
  • +
  • schema (dict|sqlglot.optimizer.Schema): Database schema.
  • +
  • annotators (dict): Maps expression type to corresponding annotation function.
  • +
  • coerces_to (dict): Maps expression type to set of types that it can be coerced into.
  • +
+ +
Returns:
+ +
+

sqlglot.Expression: expression annotated with types

+
+
+ + +
+
+ +
+ + class + TypeAnnotator: + + + +
+ +
 35class TypeAnnotator:
+ 36    ANNOTATORS = {
+ 37        **{
+ 38            expr_type: lambda self, expr: self._annotate_unary(expr)
+ 39            for expr_type in subclasses(exp.__name__, exp.Unary)
+ 40        },
+ 41        **{
+ 42            expr_type: lambda self, expr: self._annotate_binary(expr)
+ 43            for expr_type in subclasses(exp.__name__, exp.Binary)
+ 44        },
+ 45        exp.Cast: lambda self, expr: self._annotate_with_type(expr, expr.args["to"]),
+ 46        exp.TryCast: lambda self, expr: self._annotate_with_type(expr, expr.args["to"]),
+ 47        exp.DataType: lambda self, expr: self._annotate_with_type(expr, expr.copy()),
+ 48        exp.Alias: lambda self, expr: self._annotate_unary(expr),
+ 49        exp.Between: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.BOOLEAN),
+ 50        exp.In: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.BOOLEAN),
+ 51        exp.Literal: lambda self, expr: self._annotate_literal(expr),
+ 52        exp.Boolean: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.BOOLEAN),
+ 53        exp.Null: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.NULL),
+ 54        exp.Anonymous: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.UNKNOWN),
+ 55        exp.ApproxDistinct: lambda self, expr: self._annotate_with_type(
+ 56            expr, exp.DataType.Type.BIGINT
+ 57        ),
+ 58        exp.Avg: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.DOUBLE),
+ 59        exp.Min: lambda self, expr: self._annotate_by_args(expr, "this"),
+ 60        exp.Max: lambda self, expr: self._annotate_by_args(expr, "this"),
+ 61        exp.Sum: lambda self, expr: self._annotate_by_args(expr, "this", promote=True),
+ 62        exp.Ceil: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.INT),
+ 63        exp.Count: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.BIGINT),
+ 64        exp.CurrentDate: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.DATE),
+ 65        exp.CurrentDatetime: lambda self, expr: self._annotate_with_type(
+ 66            expr, exp.DataType.Type.DATETIME
+ 67        ),
+ 68        exp.CurrentTime: lambda self, expr: self._annotate_with_type(
+ 69            expr, exp.DataType.Type.TIMESTAMP
+ 70        ),
+ 71        exp.CurrentTimestamp: lambda self, expr: self._annotate_with_type(
+ 72            expr, exp.DataType.Type.TIMESTAMP
+ 73        ),
+ 74        exp.DateAdd: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.DATE),
+ 75        exp.DateSub: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.DATE),
+ 76        exp.DateDiff: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.INT),
+ 77        exp.DatetimeAdd: lambda self, expr: self._annotate_with_type(
+ 78            expr, exp.DataType.Type.DATETIME
+ 79        ),
+ 80        exp.DatetimeSub: lambda self, expr: self._annotate_with_type(
+ 81            expr, exp.DataType.Type.DATETIME
+ 82        ),
+ 83        exp.DatetimeDiff: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.INT),
+ 84        exp.Extract: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.INT),
+ 85        exp.TimestampAdd: lambda self, expr: self._annotate_with_type(
+ 86            expr, exp.DataType.Type.TIMESTAMP
+ 87        ),
+ 88        exp.TimestampSub: lambda self, expr: self._annotate_with_type(
+ 89            expr, exp.DataType.Type.TIMESTAMP
+ 90        ),
+ 91        exp.TimestampDiff: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.INT),
+ 92        exp.TimeAdd: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.TIMESTAMP),
+ 93        exp.TimeSub: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.TIMESTAMP),
+ 94        exp.TimeDiff: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.INT),
+ 95        exp.DateStrToDate: lambda self, expr: self._annotate_with_type(
+ 96            expr, exp.DataType.Type.DATE
+ 97        ),
+ 98        exp.DateToDateStr: lambda self, expr: self._annotate_with_type(
+ 99            expr, exp.DataType.Type.VARCHAR
+100        ),
+101        exp.DateToDi: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.INT),
+102        exp.Day: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.TINYINT),
+103        exp.DiToDate: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.DATE),
+104        exp.Exp: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.DOUBLE),
+105        exp.Floor: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.INT),
+106        exp.Case: lambda self, expr: self._annotate_by_args(expr, "default", "ifs"),
+107        exp.If: lambda self, expr: self._annotate_by_args(expr, "true", "false"),
+108        exp.Coalesce: lambda self, expr: self._annotate_by_args(expr, "this", "expressions"),
+109        exp.IfNull: lambda self, expr: self._annotate_by_args(expr, "this", "expression"),
+110        exp.ConcatWs: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.VARCHAR),
+111        exp.GroupConcat: lambda self, expr: self._annotate_with_type(
+112            expr, exp.DataType.Type.VARCHAR
+113        ),
+114        exp.ArrayConcat: lambda self, expr: self._annotate_with_type(
+115            expr, exp.DataType.Type.VARCHAR
+116        ),
+117        exp.Initcap: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.VARCHAR),
+118        exp.Length: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.BIGINT),
+119        exp.Levenshtein: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.INT),
+120        exp.Ln: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.DOUBLE),
+121        exp.Log: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.DOUBLE),
+122        exp.Log2: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.DOUBLE),
+123        exp.Log10: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.DOUBLE),
+124        exp.Lower: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.VARCHAR),
+125        exp.Month: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.TINYINT),
+126        exp.Pow: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.DOUBLE),
+127        exp.Quantile: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.DOUBLE),
+128        exp.ApproxQuantile: lambda self, expr: self._annotate_with_type(
+129            expr, exp.DataType.Type.DOUBLE
+130        ),
+131        exp.RegexpLike: lambda self, expr: self._annotate_with_type(
+132            expr, exp.DataType.Type.BOOLEAN
+133        ),
+134        exp.Round: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.DOUBLE),
+135        exp.SafeDivide: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.DOUBLE),
+136        exp.Substring: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.VARCHAR),
+137        exp.StrPosition: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.INT),
+138        exp.StrToDate: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.DATE),
+139        exp.StrToTime: lambda self, expr: self._annotate_with_type(
+140            expr, exp.DataType.Type.TIMESTAMP
+141        ),
+142        exp.Sqrt: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.DOUBLE),
+143        exp.Stddev: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.DOUBLE),
+144        exp.StddevPop: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.DOUBLE),
+145        exp.StddevSamp: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.DOUBLE),
+146        exp.TimeToStr: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.VARCHAR),
+147        exp.TimeToTimeStr: lambda self, expr: self._annotate_with_type(
+148            expr, exp.DataType.Type.VARCHAR
+149        ),
+150        exp.TimeStrToDate: lambda self, expr: self._annotate_with_type(
+151            expr, exp.DataType.Type.DATE
+152        ),
+153        exp.TimeStrToTime: lambda self, expr: self._annotate_with_type(
+154            expr, exp.DataType.Type.TIMESTAMP
+155        ),
+156        exp.Trim: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.VARCHAR),
+157        exp.TsOrDsToDateStr: lambda self, expr: self._annotate_with_type(
+158            expr, exp.DataType.Type.VARCHAR
+159        ),
+160        exp.TsOrDsToDate: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.DATE),
+161        exp.TsOrDiToDi: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.INT),
+162        exp.UnixToStr: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.VARCHAR),
+163        exp.UnixToTime: lambda self, expr: self._annotate_with_type(
+164            expr, exp.DataType.Type.TIMESTAMP
+165        ),
+166        exp.UnixToTimeStr: lambda self, expr: self._annotate_with_type(
+167            expr, exp.DataType.Type.VARCHAR
+168        ),
+169        exp.Upper: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.VARCHAR),
+170        exp.Variance: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.DOUBLE),
+171        exp.VariancePop: lambda self, expr: self._annotate_with_type(
+172            expr, exp.DataType.Type.DOUBLE
+173        ),
+174        exp.Week: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.TINYINT),
+175        exp.Year: lambda self, expr: self._annotate_with_type(expr, exp.DataType.Type.TINYINT),
+176    }
+177
+178    # Reference: https://spark.apache.org/docs/3.2.0/sql-ref-ansi-compliance.html
+179    COERCES_TO = {
+180        # CHAR < NCHAR < VARCHAR < NVARCHAR < TEXT
+181        exp.DataType.Type.TEXT: set(),
+182        exp.DataType.Type.NVARCHAR: {exp.DataType.Type.TEXT},
+183        exp.DataType.Type.VARCHAR: {exp.DataType.Type.NVARCHAR, exp.DataType.Type.TEXT},
+184        exp.DataType.Type.NCHAR: {
+185            exp.DataType.Type.VARCHAR,
+186            exp.DataType.Type.NVARCHAR,
+187            exp.DataType.Type.TEXT,
+188        },
+189        exp.DataType.Type.CHAR: {
+190            exp.DataType.Type.NCHAR,
+191            exp.DataType.Type.VARCHAR,
+192            exp.DataType.Type.NVARCHAR,
+193            exp.DataType.Type.TEXT,
+194        },
+195        # TINYINT < SMALLINT < INT < BIGINT < DECIMAL < FLOAT < DOUBLE
+196        exp.DataType.Type.DOUBLE: set(),
+197        exp.DataType.Type.FLOAT: {exp.DataType.Type.DOUBLE},
+198        exp.DataType.Type.DECIMAL: {exp.DataType.Type.FLOAT, exp.DataType.Type.DOUBLE},
+199        exp.DataType.Type.BIGINT: {
+200            exp.DataType.Type.DECIMAL,
+201            exp.DataType.Type.FLOAT,
+202            exp.DataType.Type.DOUBLE,
+203        },
+204        exp.DataType.Type.INT: {
+205            exp.DataType.Type.BIGINT,
+206            exp.DataType.Type.DECIMAL,
+207            exp.DataType.Type.FLOAT,
+208            exp.DataType.Type.DOUBLE,
+209        },
+210        exp.DataType.Type.SMALLINT: {
+211            exp.DataType.Type.INT,
+212            exp.DataType.Type.BIGINT,
+213            exp.DataType.Type.DECIMAL,
+214            exp.DataType.Type.FLOAT,
+215            exp.DataType.Type.DOUBLE,
+216        },
+217        exp.DataType.Type.TINYINT: {
+218            exp.DataType.Type.SMALLINT,
+219            exp.DataType.Type.INT,
+220            exp.DataType.Type.BIGINT,
+221            exp.DataType.Type.DECIMAL,
+222            exp.DataType.Type.FLOAT,
+223            exp.DataType.Type.DOUBLE,
+224        },
+225        # DATE < DATETIME < TIMESTAMP < TIMESTAMPTZ < TIMESTAMPLTZ
+226        exp.DataType.Type.TIMESTAMPLTZ: set(),
+227        exp.DataType.Type.TIMESTAMPTZ: {exp.DataType.Type.TIMESTAMPLTZ},
+228        exp.DataType.Type.TIMESTAMP: {
+229            exp.DataType.Type.TIMESTAMPTZ,
+230            exp.DataType.Type.TIMESTAMPLTZ,
+231        },
+232        exp.DataType.Type.DATETIME: {
+233            exp.DataType.Type.TIMESTAMP,
+234            exp.DataType.Type.TIMESTAMPTZ,
+235            exp.DataType.Type.TIMESTAMPLTZ,
+236        },
+237        exp.DataType.Type.DATE: {
+238            exp.DataType.Type.DATETIME,
+239            exp.DataType.Type.TIMESTAMP,
+240            exp.DataType.Type.TIMESTAMPTZ,
+241            exp.DataType.Type.TIMESTAMPLTZ,
+242        },
+243    }
+244
+245    TRAVERSABLES = (exp.Select, exp.Union, exp.UDTF, exp.Subquery)
+246
+247    def __init__(self, schema=None, annotators=None, coerces_to=None):
+248        self.schema = schema
+249        self.annotators = annotators or self.ANNOTATORS
+250        self.coerces_to = coerces_to or self.COERCES_TO
+251
+252    def annotate(self, expression):
+253        if isinstance(expression, self.TRAVERSABLES):
+254            for scope in traverse_scope(expression):
+255                selects = {}
+256                for name, source in scope.sources.items():
+257                    if not isinstance(source, Scope):
+258                        continue
+259                    if isinstance(source.expression, exp.UDTF):
+260                        values = []
+261
+262                        if isinstance(source.expression, exp.Lateral):
+263                            if isinstance(source.expression.this, exp.Explode):
+264                                values = [source.expression.this.this]
+265                        else:
+266                            values = source.expression.expressions[0].expressions
+267
+268                        if not values:
+269                            continue
+270
+271                        selects[name] = {
+272                            alias: column
+273                            for alias, column in zip(
+274                                source.expression.alias_column_names,
+275                                values,
+276                            )
+277                        }
+278                    else:
+279                        selects[name] = {
+280                            select.alias_or_name: select for select in source.expression.selects
+281                        }
+282                # First annotate the current scope's column references
+283                for col in scope.columns:
+284                    source = scope.sources.get(col.table)
+285                    if isinstance(source, exp.Table):
+286                        col.type = self.schema.get_column_type(source, col)
+287                    elif source and col.table in selects:
+288                        col.type = selects[col.table][col.name].type
+289                # Then (possibly) annotate the remaining expressions in the scope
+290                self._maybe_annotate(scope.expression)
+291        return self._maybe_annotate(expression)  # This takes care of non-traversable expressions
+292
+293    def _maybe_annotate(self, expression):
+294        if not isinstance(expression, exp.Expression):
+295            return None
+296
+297        if expression.type:
+298            return expression  # We've already inferred the expression's type
+299
+300        annotator = self.annotators.get(expression.__class__)
+301
+302        return (
+303            annotator(self, expression)
+304            if annotator
+305            else self._annotate_with_type(expression, exp.DataType.Type.UNKNOWN)
+306        )
+307
+308    def _annotate_args(self, expression):
+309        for value in expression.args.values():
+310            for v in ensure_collection(value):
+311                self._maybe_annotate(v)
+312
+313        return expression
+314
+315    def _maybe_coerce(self, type1, type2):
+316        # We propagate the NULL / UNKNOWN types upwards if found
+317        if isinstance(type1, exp.DataType):
+318            type1 = type1.this
+319        if isinstance(type2, exp.DataType):
+320            type2 = type2.this
+321
+322        if exp.DataType.Type.NULL in (type1, type2):
+323            return exp.DataType.Type.NULL
+324        if exp.DataType.Type.UNKNOWN in (type1, type2):
+325            return exp.DataType.Type.UNKNOWN
+326
+327        return type2 if type2 in self.coerces_to.get(type1, {}) else type1
+328
+329    def _annotate_binary(self, expression):
+330        self._annotate_args(expression)
+331
+332        left_type = expression.left.type.this
+333        right_type = expression.right.type.this
+334
+335        if isinstance(expression, (exp.And, exp.Or)):
+336            if left_type == exp.DataType.Type.NULL and right_type == exp.DataType.Type.NULL:
+337                expression.type = exp.DataType.Type.NULL
+338            elif exp.DataType.Type.NULL in (left_type, right_type):
+339                expression.type = exp.DataType.build(
+340                    "NULLABLE", expressions=exp.DataType.build("BOOLEAN")
+341                )
+342            else:
+343                expression.type = exp.DataType.Type.BOOLEAN
+344        elif isinstance(expression, (exp.Condition, exp.Predicate)):
+345            expression.type = exp.DataType.Type.BOOLEAN
+346        else:
+347            expression.type = self._maybe_coerce(left_type, right_type)
+348
+349        return expression
+350
+351    def _annotate_unary(self, expression):
+352        self._annotate_args(expression)
+353
+354        if isinstance(expression, exp.Condition) and not isinstance(expression, exp.Paren):
+355            expression.type = exp.DataType.Type.BOOLEAN
+356        else:
+357            expression.type = expression.this.type
+358
+359        return expression
+360
+361    def _annotate_literal(self, expression):
+362        if expression.is_string:
+363            expression.type = exp.DataType.Type.VARCHAR
+364        elif expression.is_int:
+365            expression.type = exp.DataType.Type.INT
+366        else:
+367            expression.type = exp.DataType.Type.DOUBLE
+368
+369        return expression
+370
+371    def _annotate_with_type(self, expression, target_type):
+372        expression.type = target_type
+373        return self._annotate_args(expression)
+374
+375    def _annotate_by_args(self, expression, *args, promote=False):
+376        self._annotate_args(expression)
+377        expressions = []
+378        for arg in args:
+379            arg_expr = expression.args.get(arg)
+380            expressions.extend(expr for expr in ensure_list(arg_expr) if expr)
+381
+382        last_datatype = None
+383        for expr in expressions:
+384            last_datatype = self._maybe_coerce(last_datatype or expr.type, expr.type)
+385
+386        expression.type = last_datatype or exp.DataType.Type.UNKNOWN
+387
+388        if promote:
+389            if expression.type.this in exp.DataType.INTEGER_TYPES:
+390                expression.type = exp.DataType.Type.BIGINT
+391            elif expression.type.this in exp.DataType.FLOAT_TYPES:
+392                expression.type = exp.DataType.Type.DOUBLE
+393
+394        return expression
+
+ + + + +
+ +
+ + TypeAnnotator(schema=None, annotators=None, coerces_to=None) + + + +
+ +
247    def __init__(self, schema=None, annotators=None, coerces_to=None):
+248        self.schema = schema
+249        self.annotators = annotators or self.ANNOTATORS
+250        self.coerces_to = coerces_to or self.COERCES_TO
+
+ + + + +
+
+ +
+ + def + annotate(self, expression): + + + +
+ +
252    def annotate(self, expression):
+253        if isinstance(expression, self.TRAVERSABLES):
+254            for scope in traverse_scope(expression):
+255                selects = {}
+256                for name, source in scope.sources.items():
+257                    if not isinstance(source, Scope):
+258                        continue
+259                    if isinstance(source.expression, exp.UDTF):
+260                        values = []
+261
+262                        if isinstance(source.expression, exp.Lateral):
+263                            if isinstance(source.expression.this, exp.Explode):
+264                                values = [source.expression.this.this]
+265                        else:
+266                            values = source.expression.expressions[0].expressions
+267
+268                        if not values:
+269                            continue
+270
+271                        selects[name] = {
+272                            alias: column
+273                            for alias, column in zip(
+274                                source.expression.alias_column_names,
+275                                values,
+276                            )
+277                        }
+278                    else:
+279                        selects[name] = {
+280                            select.alias_or_name: select for select in source.expression.selects
+281                        }
+282                # First annotate the current scope's column references
+283                for col in scope.columns:
+284                    source = scope.sources.get(col.table)
+285                    if isinstance(source, exp.Table):
+286                        col.type = self.schema.get_column_type(source, col)
+287                    elif source and col.table in selects:
+288                        col.type = selects[col.table][col.name].type
+289                # Then (possibly) annotate the remaining expressions in the scope
+290                self._maybe_annotate(scope.expression)
+291        return self._maybe_annotate(expression)  # This takes care of non-traversable expressions
+
+ + + + +
+
+
+ + \ No newline at end of file diff --git a/docs/sqlglot/optimizer/canonicalize.html b/docs/sqlglot/optimizer/canonicalize.html new file mode 100644 index 0000000..15e3120 --- /dev/null +++ b/docs/sqlglot/optimizer/canonicalize.html @@ -0,0 +1,445 @@ + + + + + + + sqlglot.optimizer.canonicalize API documentation + + + + + + + + + +
+
+ Edit on GitHub +

+sqlglot.optimizer.canonicalize

+ + + + + + +
 1import itertools
+ 2
+ 3from sqlglot import exp
+ 4
+ 5
+ 6def canonicalize(expression: exp.Expression) -> exp.Expression:
+ 7    """Converts a sql expression into a standard form.
+ 8
+ 9    This method relies on annotate_types because many of the
+10    conversions rely on type inference.
+11
+12    Args:
+13        expression: The expression to canonicalize.
+14    """
+15    exp.replace_children(expression, canonicalize)
+16
+17    expression = add_text_to_concat(expression)
+18    expression = coerce_type(expression)
+19    expression = remove_redundant_casts(expression)
+20
+21    if isinstance(expression, exp.Identifier):
+22        expression.set("quoted", True)
+23
+24    return expression
+25
+26
+27def add_text_to_concat(node: exp.Expression) -> exp.Expression:
+28    if isinstance(node, exp.Add) and node.type and node.type.this in exp.DataType.TEXT_TYPES:
+29        node = exp.Concat(this=node.this, expression=node.expression)
+30    return node
+31
+32
+33def coerce_type(node: exp.Expression) -> exp.Expression:
+34    if isinstance(node, exp.Binary):
+35        _coerce_date(node.left, node.right)
+36    elif isinstance(node, exp.Between):
+37        _coerce_date(node.this, node.args["low"])
+38    elif isinstance(node, exp.Extract):
+39        if node.expression.type.this not in exp.DataType.TEMPORAL_TYPES:
+40            _replace_cast(node.expression, "datetime")
+41    return node
+42
+43
+44def remove_redundant_casts(expression: exp.Expression) -> exp.Expression:
+45    if (
+46        isinstance(expression, exp.Cast)
+47        and expression.to.type
+48        and expression.this.type
+49        and expression.to.type.this == expression.this.type.this
+50    ):
+51        return expression.this
+52    return expression
+53
+54
+55def _coerce_date(a: exp.Expression, b: exp.Expression) -> None:
+56    for a, b in itertools.permutations([a, b]):
+57        if (
+58            a.type
+59            and a.type.this == exp.DataType.Type.DATE
+60            and b.type
+61            and b.type.this != exp.DataType.Type.DATE
+62        ):
+63            _replace_cast(b, "date")
+64
+65
+66def _replace_cast(node: exp.Expression, to: str) -> None:
+67    data_type = exp.DataType.build(to)
+68    cast = exp.Cast(this=node.copy(), to=data_type)
+69    cast.type = data_type
+70    node.replace(cast)
+
+ + +
+
+ +
+ + def + canonicalize( expression: sqlglot.expressions.Expression) -> sqlglot.expressions.Expression: + + + +
+ +
 7def canonicalize(expression: exp.Expression) -> exp.Expression:
+ 8    """Converts a sql expression into a standard form.
+ 9
+10    This method relies on annotate_types because many of the
+11    conversions rely on type inference.
+12
+13    Args:
+14        expression: The expression to canonicalize.
+15    """
+16    exp.replace_children(expression, canonicalize)
+17
+18    expression = add_text_to_concat(expression)
+19    expression = coerce_type(expression)
+20    expression = remove_redundant_casts(expression)
+21
+22    if isinstance(expression, exp.Identifier):
+23        expression.set("quoted", True)
+24
+25    return expression
+
+ + +

Converts a sql expression into a standard form.

+ +

This method relies on annotate_types because many of the +conversions rely on type inference.

+ +
Arguments:
+ +
    +
  • expression: The expression to canonicalize.
  • +
+
+ + +
+
+ +
+ + def + add_text_to_concat(node: sqlglot.expressions.Expression) -> sqlglot.expressions.Expression: + + + +
+ +
28def add_text_to_concat(node: exp.Expression) -> exp.Expression:
+29    if isinstance(node, exp.Add) and node.type and node.type.this in exp.DataType.TEXT_TYPES:
+30        node = exp.Concat(this=node.this, expression=node.expression)
+31    return node
+
+ + + + +
+
+ +
+ + def + coerce_type(node: sqlglot.expressions.Expression) -> sqlglot.expressions.Expression: + + + +
+ +
34def coerce_type(node: exp.Expression) -> exp.Expression:
+35    if isinstance(node, exp.Binary):
+36        _coerce_date(node.left, node.right)
+37    elif isinstance(node, exp.Between):
+38        _coerce_date(node.this, node.args["low"])
+39    elif isinstance(node, exp.Extract):
+40        if node.expression.type.this not in exp.DataType.TEMPORAL_TYPES:
+41            _replace_cast(node.expression, "datetime")
+42    return node
+
+ + + + +
+
+ +
+ + def + remove_redundant_casts( expression: sqlglot.expressions.Expression) -> sqlglot.expressions.Expression: + + + +
+ +
45def remove_redundant_casts(expression: exp.Expression) -> exp.Expression:
+46    if (
+47        isinstance(expression, exp.Cast)
+48        and expression.to.type
+49        and expression.this.type
+50        and expression.to.type.this == expression.this.type.this
+51    ):
+52        return expression.this
+53    return expression
+
+ + + + +
+
+ + \ No newline at end of file diff --git a/docs/sqlglot/optimizer/eliminate_ctes.html b/docs/sqlglot/optimizer/eliminate_ctes.html new file mode 100644 index 0000000..2cad407 --- /dev/null +++ b/docs/sqlglot/optimizer/eliminate_ctes.html @@ -0,0 +1,371 @@ + + + + + + + sqlglot.optimizer.eliminate_ctes API documentation + + + + + + + + + +
+
+ Edit on GitHub +

+sqlglot.optimizer.eliminate_ctes

+ + + + + + +
 1from sqlglot.optimizer.scope import Scope, build_scope
+ 2
+ 3
+ 4def eliminate_ctes(expression):
+ 5    """
+ 6    Remove unused CTEs from an expression.
+ 7
+ 8    Example:
+ 9        >>> import sqlglot
+10        >>> sql = "WITH y AS (SELECT a FROM x) SELECT a FROM z"
+11        >>> expression = sqlglot.parse_one(sql)
+12        >>> eliminate_ctes(expression).sql()
+13        'SELECT a FROM z'
+14
+15    Args:
+16        expression (sqlglot.Expression): expression to optimize
+17    Returns:
+18        sqlglot.Expression: optimized expression
+19    """
+20    root = build_scope(expression)
+21
+22    ref_count = root.ref_count()
+23
+24    # Traverse the scope tree in reverse so we can remove chains of unused CTEs
+25    for scope in reversed(list(root.traverse())):
+26        if scope.is_cte:
+27            count = ref_count[id(scope)]
+28            if count <= 0:
+29                cte_node = scope.expression.parent
+30                with_node = cte_node.parent
+31                cte_node.pop()
+32
+33                # Pop the entire WITH clause if this is the last CTE
+34                if len(with_node.expressions) <= 0:
+35                    with_node.pop()
+36
+37                # Decrement the ref count for all sources this CTE selects from
+38                for _, source in scope.selected_sources.values():
+39                    if isinstance(source, Scope):
+40                        ref_count[id(source)] -= 1
+41
+42    return expression
+
+ + +
+
+ +
+ + def + eliminate_ctes(expression): + + + +
+ +
 5def eliminate_ctes(expression):
+ 6    """
+ 7    Remove unused CTEs from an expression.
+ 8
+ 9    Example:
+10        >>> import sqlglot
+11        >>> sql = "WITH y AS (SELECT a FROM x) SELECT a FROM z"
+12        >>> expression = sqlglot.parse_one(sql)
+13        >>> eliminate_ctes(expression).sql()
+14        'SELECT a FROM z'
+15
+16    Args:
+17        expression (sqlglot.Expression): expression to optimize
+18    Returns:
+19        sqlglot.Expression: optimized expression
+20    """
+21    root = build_scope(expression)
+22
+23    ref_count = root.ref_count()
+24
+25    # Traverse the scope tree in reverse so we can remove chains of unused CTEs
+26    for scope in reversed(list(root.traverse())):
+27        if scope.is_cte:
+28            count = ref_count[id(scope)]
+29            if count <= 0:
+30                cte_node = scope.expression.parent
+31                with_node = cte_node.parent
+32                cte_node.pop()
+33
+34                # Pop the entire WITH clause if this is the last CTE
+35                if len(with_node.expressions) <= 0:
+36                    with_node.pop()
+37
+38                # Decrement the ref count for all sources this CTE selects from
+39                for _, source in scope.selected_sources.values():
+40                    if isinstance(source, Scope):
+41                        ref_count[id(source)] -= 1
+42
+43    return expression
+
+ + +

Remove unused CTEs from an expression.

+ +
Example:
+ +
+
+
>>> import sqlglot
+>>> sql = "WITH y AS (SELECT a FROM x) SELECT a FROM z"
+>>> expression = sqlglot.parse_one(sql)
+>>> eliminate_ctes(expression).sql()
+'SELECT a FROM z'
+
+
+
+ +
Arguments:
+ +
    +
  • expression (sqlglot.Expression): expression to optimize
  • +
+ +
Returns:
+ +
+

sqlglot.Expression: optimized expression

+
+
+ + +
+
+ + \ No newline at end of file diff --git a/docs/sqlglot/optimizer/eliminate_joins.html b/docs/sqlglot/optimizer/eliminate_joins.html new file mode 100644 index 0000000..1696908 --- /dev/null +++ b/docs/sqlglot/optimizer/eliminate_joins.html @@ -0,0 +1,610 @@ + + + + + + + sqlglot.optimizer.eliminate_joins API documentation + + + + + + + + + +
+
+ Edit on GitHub +

+sqlglot.optimizer.eliminate_joins

+ + + + + + +
  1from sqlglot import expressions as exp
+  2from sqlglot.optimizer.normalize import normalized
+  3from sqlglot.optimizer.scope import Scope, traverse_scope
+  4from sqlglot.optimizer.simplify import simplify
+  5
+  6
+  7def eliminate_joins(expression):
+  8    """
+  9    Remove unused joins from an expression.
+ 10
+ 11    This only removes joins when we know that the join condition doesn't produce duplicate rows.
+ 12
+ 13    Example:
+ 14        >>> import sqlglot
+ 15        >>> sql = "SELECT x.a FROM x LEFT JOIN (SELECT DISTINCT y.b FROM y) AS y ON x.b = y.b"
+ 16        >>> expression = sqlglot.parse_one(sql)
+ 17        >>> eliminate_joins(expression).sql()
+ 18        'SELECT x.a FROM x'
+ 19
+ 20    Args:
+ 21        expression (sqlglot.Expression): expression to optimize
+ 22    Returns:
+ 23        sqlglot.Expression: optimized expression
+ 24    """
+ 25    for scope in traverse_scope(expression):
+ 26        # If any columns in this scope aren't qualified, it's hard to determine if a join isn't used.
+ 27        # It's probably possible to infer this from the outputs of derived tables.
+ 28        # But for now, let's just skip this rule.
+ 29        if scope.unqualified_columns:
+ 30            continue
+ 31
+ 32        joins = scope.expression.args.get("joins", [])
+ 33
+ 34        # Reverse the joins so we can remove chains of unused joins
+ 35        for join in reversed(joins):
+ 36            alias = join.this.alias_or_name
+ 37            if _should_eliminate_join(scope, join, alias):
+ 38                join.pop()
+ 39                scope.remove_source(alias)
+ 40    return expression
+ 41
+ 42
+ 43def _should_eliminate_join(scope, join, alias):
+ 44    inner_source = scope.sources.get(alias)
+ 45    return (
+ 46        isinstance(inner_source, Scope)
+ 47        and not _join_is_used(scope, join, alias)
+ 48        and (
+ 49            (join.side == "LEFT" and _is_joined_on_all_unique_outputs(inner_source, join))
+ 50            or (not join.args.get("on") and _has_single_output_row(inner_source))
+ 51        )
+ 52    )
+ 53
+ 54
+ 55def _join_is_used(scope, join, alias):
+ 56    # We need to find all columns that reference this join.
+ 57    # But columns in the ON clause shouldn't count.
+ 58    on = join.args.get("on")
+ 59    if on:
+ 60        on_clause_columns = {id(column) for column in on.find_all(exp.Column)}
+ 61    else:
+ 62        on_clause_columns = set()
+ 63    return any(
+ 64        column for column in scope.source_columns(alias) if id(column) not in on_clause_columns
+ 65    )
+ 66
+ 67
+ 68def _is_joined_on_all_unique_outputs(scope, join):
+ 69    unique_outputs = _unique_outputs(scope)
+ 70    if not unique_outputs:
+ 71        return False
+ 72
+ 73    _, join_keys, _ = join_condition(join)
+ 74    remaining_unique_outputs = unique_outputs - {c.name for c in join_keys}
+ 75    return not remaining_unique_outputs
+ 76
+ 77
+ 78def _unique_outputs(scope):
+ 79    """Determine output columns of `scope` that must have a unique combination per row"""
+ 80    if scope.expression.args.get("distinct"):
+ 81        return set(scope.expression.named_selects)
+ 82
+ 83    group = scope.expression.args.get("group")
+ 84    if group:
+ 85        grouped_expressions = set(group.expressions)
+ 86        grouped_outputs = set()
+ 87
+ 88        unique_outputs = set()
+ 89        for select in scope.selects:
+ 90            output = select.unalias()
+ 91            if output in grouped_expressions:
+ 92                grouped_outputs.add(output)
+ 93                unique_outputs.add(select.alias_or_name)
+ 94
+ 95        # All the grouped expressions must be in the output
+ 96        if not grouped_expressions.difference(grouped_outputs):
+ 97            return unique_outputs
+ 98        else:
+ 99            return set()
+100
+101    if _has_single_output_row(scope):
+102        return set(scope.expression.named_selects)
+103
+104    return set()
+105
+106
+107def _has_single_output_row(scope):
+108    return isinstance(scope.expression, exp.Select) and (
+109        all(isinstance(e.unalias(), exp.AggFunc) for e in scope.selects)
+110        or _is_limit_1(scope)
+111        or not scope.expression.args.get("from")
+112    )
+113
+114
+115def _is_limit_1(scope):
+116    limit = scope.expression.args.get("limit")
+117    return limit and limit.expression.this == "1"
+118
+119
+120def join_condition(join):
+121    """
+122    Extract the join condition from a join expression.
+123
+124    Args:
+125        join (exp.Join)
+126    Returns:
+127        tuple[list[str], list[str], exp.Expression]:
+128            Tuple of (source key, join key, remaining predicate)
+129    """
+130    name = join.this.alias_or_name
+131    on = (join.args.get("on") or exp.true()).copy()
+132    source_key = []
+133    join_key = []
+134
+135    def extract_condition(condition):
+136        left, right = condition.unnest_operands()
+137        left_tables = exp.column_table_names(left)
+138        right_tables = exp.column_table_names(right)
+139
+140        if name in left_tables and name not in right_tables:
+141            join_key.append(left)
+142            source_key.append(right)
+143            condition.replace(exp.true())
+144        elif name in right_tables and name not in left_tables:
+145            join_key.append(right)
+146            source_key.append(left)
+147            condition.replace(exp.true())
+148
+149    # find the join keys
+150    # SELECT
+151    # FROM x
+152    # JOIN y
+153    #   ON x.a = y.b AND y.b > 1
+154    #
+155    # should pull y.b as the join key and x.a as the source key
+156    if normalized(on):
+157        on = on if isinstance(on, exp.And) else exp.and_(on, exp.true())
+158
+159        for condition in on.flatten():
+160            if isinstance(condition, exp.EQ):
+161                extract_condition(condition)
+162    elif normalized(on, dnf=True):
+163        conditions = None
+164
+165        for condition in on.flatten():
+166            parts = [part for part in condition.flatten() if isinstance(part, exp.EQ)]
+167            if conditions is None:
+168                conditions = parts
+169            else:
+170                temp = []
+171                for p in parts:
+172                    cs = [c for c in conditions if p == c]
+173
+174                    if cs:
+175                        temp.append(p)
+176                        temp.extend(cs)
+177                conditions = temp
+178
+179        for condition in conditions:
+180            extract_condition(condition)
+181
+182    on = simplify(on)
+183    remaining_condition = None if on == exp.true() else on
+184    return source_key, join_key, remaining_condition
+
+ + +
+
+ +
+ + def + eliminate_joins(expression): + + + +
+ +
 8def eliminate_joins(expression):
+ 9    """
+10    Remove unused joins from an expression.
+11
+12    This only removes joins when we know that the join condition doesn't produce duplicate rows.
+13
+14    Example:
+15        >>> import sqlglot
+16        >>> sql = "SELECT x.a FROM x LEFT JOIN (SELECT DISTINCT y.b FROM y) AS y ON x.b = y.b"
+17        >>> expression = sqlglot.parse_one(sql)
+18        >>> eliminate_joins(expression).sql()
+19        'SELECT x.a FROM x'
+20
+21    Args:
+22        expression (sqlglot.Expression): expression to optimize
+23    Returns:
+24        sqlglot.Expression: optimized expression
+25    """
+26    for scope in traverse_scope(expression):
+27        # If any columns in this scope aren't qualified, it's hard to determine if a join isn't used.
+28        # It's probably possible to infer this from the outputs of derived tables.
+29        # But for now, let's just skip this rule.
+30        if scope.unqualified_columns:
+31            continue
+32
+33        joins = scope.expression.args.get("joins", [])
+34
+35        # Reverse the joins so we can remove chains of unused joins
+36        for join in reversed(joins):
+37            alias = join.this.alias_or_name
+38            if _should_eliminate_join(scope, join, alias):
+39                join.pop()
+40                scope.remove_source(alias)
+41    return expression
+
+ + +

Remove unused joins from an expression.

+ +

This only removes joins when we know that the join condition doesn't produce duplicate rows.

+ +
Example:
+ +
+
+
>>> import sqlglot
+>>> sql = "SELECT x.a FROM x LEFT JOIN (SELECT DISTINCT y.b FROM y) AS y ON x.b = y.b"
+>>> expression = sqlglot.parse_one(sql)
+>>> eliminate_joins(expression).sql()
+'SELECT x.a FROM x'
+
+
+
+ +
Arguments:
+ +
    +
  • expression (sqlglot.Expression): expression to optimize
  • +
+ +
Returns:
+ +
+

sqlglot.Expression: optimized expression

+
+
+ + +
+
+ +
+ + def + join_condition(join): + + + +
+ +
121def join_condition(join):
+122    """
+123    Extract the join condition from a join expression.
+124
+125    Args:
+126        join (exp.Join)
+127    Returns:
+128        tuple[list[str], list[str], exp.Expression]:
+129            Tuple of (source key, join key, remaining predicate)
+130    """
+131    name = join.this.alias_or_name
+132    on = (join.args.get("on") or exp.true()).copy()
+133    source_key = []
+134    join_key = []
+135
+136    def extract_condition(condition):
+137        left, right = condition.unnest_operands()
+138        left_tables = exp.column_table_names(left)
+139        right_tables = exp.column_table_names(right)
+140
+141        if name in left_tables and name not in right_tables:
+142            join_key.append(left)
+143            source_key.append(right)
+144            condition.replace(exp.true())
+145        elif name in right_tables and name not in left_tables:
+146            join_key.append(right)
+147            source_key.append(left)
+148            condition.replace(exp.true())
+149
+150    # find the join keys
+151    # SELECT
+152    # FROM x
+153    # JOIN y
+154    #   ON x.a = y.b AND y.b > 1
+155    #
+156    # should pull y.b as the join key and x.a as the source key
+157    if normalized(on):
+158        on = on if isinstance(on, exp.And) else exp.and_(on, exp.true())
+159
+160        for condition in on.flatten():
+161            if isinstance(condition, exp.EQ):
+162                extract_condition(condition)
+163    elif normalized(on, dnf=True):
+164        conditions = None
+165
+166        for condition in on.flatten():
+167            parts = [part for part in condition.flatten() if isinstance(part, exp.EQ)]
+168            if conditions is None:
+169                conditions = parts
+170            else:
+171                temp = []
+172                for p in parts:
+173                    cs = [c for c in conditions if p == c]
+174
+175                    if cs:
+176                        temp.append(p)
+177                        temp.extend(cs)
+178                conditions = temp
+179
+180        for condition in conditions:
+181            extract_condition(condition)
+182
+183    on = simplify(on)
+184    remaining_condition = None if on == exp.true() else on
+185    return source_key, join_key, remaining_condition
+
+ + +

Extract the join condition from a join expression.

+ +
Arguments:
+ +
    +
  • join (exp.Join)
  • +
+ +
Returns:
+ +
+

tuple[list[str], list[str], exp.Expression]: + Tuple of (source key, join key, remaining predicate)

+
+
+ + +
+
+ + \ No newline at end of file diff --git a/docs/sqlglot/optimizer/eliminate_subqueries.html b/docs/sqlglot/optimizer/eliminate_subqueries.html new file mode 100644 index 0000000..34e38b8 --- /dev/null +++ b/docs/sqlglot/optimizer/eliminate_subqueries.html @@ -0,0 +1,582 @@ + + + + + + + sqlglot.optimizer.eliminate_subqueries API documentation + + + + + + + + + +
+
+ Edit on GitHub +

+sqlglot.optimizer.eliminate_subqueries

+ + + + + + +
  1import itertools
+  2
+  3from sqlglot import expressions as exp
+  4from sqlglot.helper import find_new_name
+  5from sqlglot.optimizer.scope import build_scope
+  6from sqlglot.optimizer.simplify import simplify
+  7
+  8
+  9def eliminate_subqueries(expression):
+ 10    """
+ 11    Rewrite derived tables as CTES, deduplicating if possible.
+ 12
+ 13    Example:
+ 14        >>> import sqlglot
+ 15        >>> expression = sqlglot.parse_one("SELECT a FROM (SELECT * FROM x) AS y")
+ 16        >>> eliminate_subqueries(expression).sql()
+ 17        'WITH y AS (SELECT * FROM x) SELECT a FROM y AS y'
+ 18
+ 19    This also deduplicates common subqueries:
+ 20        >>> expression = sqlglot.parse_one("SELECT a FROM (SELECT * FROM x) AS y JOIN (SELECT * FROM x) AS z")
+ 21        >>> eliminate_subqueries(expression).sql()
+ 22        'WITH y AS (SELECT * FROM x) SELECT a FROM y AS y JOIN y AS z'
+ 23
+ 24    Args:
+ 25        expression (sqlglot.Expression): expression
+ 26    Returns:
+ 27        sqlglot.Expression: expression
+ 28    """
+ 29    if isinstance(expression, exp.Subquery):
+ 30        # It's possible to have subqueries at the root, e.g. (SELECT * FROM x) LIMIT 1
+ 31        eliminate_subqueries(expression.this)
+ 32        return expression
+ 33
+ 34    expression = simplify(expression)
+ 35    root = build_scope(expression)
+ 36
+ 37    # Map of alias->Scope|Table
+ 38    # These are all aliases that are already used in the expression.
+ 39    # We don't want to create new CTEs that conflict with these names.
+ 40    taken = {}
+ 41
+ 42    # All CTE aliases in the root scope are taken
+ 43    for scope in root.cte_scopes:
+ 44        taken[scope.expression.parent.alias] = scope
+ 45
+ 46    # All table names are taken
+ 47    for scope in root.traverse():
+ 48        taken.update(
+ 49            {
+ 50                source.name: source
+ 51                for _, source in scope.sources.items()
+ 52                if isinstance(source, exp.Table)
+ 53            }
+ 54        )
+ 55
+ 56    # Map of Expression->alias
+ 57    # Existing CTES in the root expression. We'll use this for deduplication.
+ 58    existing_ctes = {}
+ 59
+ 60    with_ = root.expression.args.get("with")
+ 61    recursive = False
+ 62    if with_:
+ 63        recursive = with_.args.get("recursive")
+ 64        for cte in with_.expressions:
+ 65            existing_ctes[cte.this] = cte.alias
+ 66    new_ctes = []
+ 67
+ 68    # We're adding more CTEs, but we want to maintain the DAG order.
+ 69    # Derived tables within an existing CTE need to come before the existing CTE.
+ 70    for cte_scope in root.cte_scopes:
+ 71        # Append all the new CTEs from this existing CTE
+ 72        for scope in cte_scope.traverse():
+ 73            if scope is cte_scope:
+ 74                # Don't try to eliminate this CTE itself
+ 75                continue
+ 76            new_cte = _eliminate(scope, existing_ctes, taken)
+ 77            if new_cte:
+ 78                new_ctes.append(new_cte)
+ 79
+ 80        # Append the existing CTE itself
+ 81        new_ctes.append(cte_scope.expression.parent)
+ 82
+ 83    # Now append the rest
+ 84    for scope in itertools.chain(
+ 85        root.union_scopes, root.subquery_scopes, root.derived_table_scopes
+ 86    ):
+ 87        for child_scope in scope.traverse():
+ 88            new_cte = _eliminate(child_scope, existing_ctes, taken)
+ 89            if new_cte:
+ 90                new_ctes.append(new_cte)
+ 91
+ 92    if new_ctes:
+ 93        expression.set("with", exp.With(expressions=new_ctes, recursive=recursive))
+ 94
+ 95    return expression
+ 96
+ 97
+ 98def _eliminate(scope, existing_ctes, taken):
+ 99    if scope.is_union:
+100        return _eliminate_union(scope, existing_ctes, taken)
+101
+102    if scope.is_derived_table and not isinstance(scope.expression, exp.UDTF):
+103        return _eliminate_derived_table(scope, existing_ctes, taken)
+104
+105    if scope.is_cte:
+106        return _eliminate_cte(scope, existing_ctes, taken)
+107
+108
+109def _eliminate_union(scope, existing_ctes, taken):
+110    duplicate_cte_alias = existing_ctes.get(scope.expression)
+111
+112    alias = duplicate_cte_alias or find_new_name(taken=taken, base="cte")
+113
+114    taken[alias] = scope
+115
+116    # Try to maintain the selections
+117    expressions = scope.selects
+118    selects = [
+119        exp.alias_(exp.column(e.alias_or_name, table=alias), alias=e.alias_or_name)
+120        for e in expressions
+121        if e.alias_or_name
+122    ]
+123    # If not all selections have an alias, just select *
+124    if len(selects) != len(expressions):
+125        selects = ["*"]
+126
+127    scope.expression.replace(exp.select(*selects).from_(exp.alias_(exp.table_(alias), alias=alias)))
+128
+129    if not duplicate_cte_alias:
+130        existing_ctes[scope.expression] = alias
+131        return exp.CTE(
+132            this=scope.expression,
+133            alias=exp.TableAlias(this=exp.to_identifier(alias)),
+134        )
+135
+136
+137def _eliminate_derived_table(scope, existing_ctes, taken):
+138    parent = scope.expression.parent
+139    name, cte = _new_cte(scope, existing_ctes, taken)
+140
+141    table = exp.alias_(exp.table_(name), alias=parent.alias or name)
+142    parent.replace(table)
+143
+144    return cte
+145
+146
+147def _eliminate_cte(scope, existing_ctes, taken):
+148    parent = scope.expression.parent
+149    name, cte = _new_cte(scope, existing_ctes, taken)
+150
+151    with_ = parent.parent
+152    parent.pop()
+153    if not with_.expressions:
+154        with_.pop()
+155
+156    # Rename references to this CTE
+157    for child_scope in scope.parent.traverse():
+158        for table, source in child_scope.selected_sources.values():
+159            if source is scope:
+160                new_table = exp.alias_(exp.table_(name), alias=table.alias_or_name)
+161                table.replace(new_table)
+162
+163    return cte
+164
+165
+166def _new_cte(scope, existing_ctes, taken):
+167    """
+168    Returns:
+169        tuple of (name, cte)
+170        where `name` is a new name for this CTE in the root scope and `cte` is a new CTE instance.
+171        If this CTE duplicates an existing CTE, `cte` will be None.
+172    """
+173    duplicate_cte_alias = existing_ctes.get(scope.expression)
+174    parent = scope.expression.parent
+175    name = parent.alias
+176
+177    if not name:
+178        name = find_new_name(taken=taken, base="cte")
+179
+180    if duplicate_cte_alias:
+181        name = duplicate_cte_alias
+182    elif taken.get(name):
+183        name = find_new_name(taken=taken, base=name)
+184
+185    taken[name] = scope
+186
+187    if not duplicate_cte_alias:
+188        existing_ctes[scope.expression] = name
+189        cte = exp.CTE(
+190            this=scope.expression,
+191            alias=exp.TableAlias(this=exp.to_identifier(name)),
+192        )
+193    else:
+194        cte = None
+195    return name, cte
+
+ + +
+
+ +
+ + def + eliminate_subqueries(expression): + + + +
+ +
10def eliminate_subqueries(expression):
+11    """
+12    Rewrite derived tables as CTES, deduplicating if possible.
+13
+14    Example:
+15        >>> import sqlglot
+16        >>> expression = sqlglot.parse_one("SELECT a FROM (SELECT * FROM x) AS y")
+17        >>> eliminate_subqueries(expression).sql()
+18        'WITH y AS (SELECT * FROM x) SELECT a FROM y AS y'
+19
+20    This also deduplicates common subqueries:
+21        >>> expression = sqlglot.parse_one("SELECT a FROM (SELECT * FROM x) AS y JOIN (SELECT * FROM x) AS z")
+22        >>> eliminate_subqueries(expression).sql()
+23        'WITH y AS (SELECT * FROM x) SELECT a FROM y AS y JOIN y AS z'
+24
+25    Args:
+26        expression (sqlglot.Expression): expression
+27    Returns:
+28        sqlglot.Expression: expression
+29    """
+30    if isinstance(expression, exp.Subquery):
+31        # It's possible to have subqueries at the root, e.g. (SELECT * FROM x) LIMIT 1
+32        eliminate_subqueries(expression.this)
+33        return expression
+34
+35    expression = simplify(expression)
+36    root = build_scope(expression)
+37
+38    # Map of alias->Scope|Table
+39    # These are all aliases that are already used in the expression.
+40    # We don't want to create new CTEs that conflict with these names.
+41    taken = {}
+42
+43    # All CTE aliases in the root scope are taken
+44    for scope in root.cte_scopes:
+45        taken[scope.expression.parent.alias] = scope
+46
+47    # All table names are taken
+48    for scope in root.traverse():
+49        taken.update(
+50            {
+51                source.name: source
+52                for _, source in scope.sources.items()
+53                if isinstance(source, exp.Table)
+54            }
+55        )
+56
+57    # Map of Expression->alias
+58    # Existing CTES in the root expression. We'll use this for deduplication.
+59    existing_ctes = {}
+60
+61    with_ = root.expression.args.get("with")
+62    recursive = False
+63    if with_:
+64        recursive = with_.args.get("recursive")
+65        for cte in with_.expressions:
+66            existing_ctes[cte.this] = cte.alias
+67    new_ctes = []
+68
+69    # We're adding more CTEs, but we want to maintain the DAG order.
+70    # Derived tables within an existing CTE need to come before the existing CTE.
+71    for cte_scope in root.cte_scopes:
+72        # Append all the new CTEs from this existing CTE
+73        for scope in cte_scope.traverse():
+74            if scope is cte_scope:
+75                # Don't try to eliminate this CTE itself
+76                continue
+77            new_cte = _eliminate(scope, existing_ctes, taken)
+78            if new_cte:
+79                new_ctes.append(new_cte)
+80
+81        # Append the existing CTE itself
+82        new_ctes.append(cte_scope.expression.parent)
+83
+84    # Now append the rest
+85    for scope in itertools.chain(
+86        root.union_scopes, root.subquery_scopes, root.derived_table_scopes
+87    ):
+88        for child_scope in scope.traverse():
+89            new_cte = _eliminate(child_scope, existing_ctes, taken)
+90            if new_cte:
+91                new_ctes.append(new_cte)
+92
+93    if new_ctes:
+94        expression.set("with", exp.With(expressions=new_ctes, recursive=recursive))
+95
+96    return expression
+
+ + +

Rewrite derived tables as CTES, deduplicating if possible.

+ +
Example:
+ +
+
+
>>> import sqlglot
+>>> expression = sqlglot.parse_one("SELECT a FROM (SELECT * FROM x) AS y")
+>>> eliminate_subqueries(expression).sql()
+'WITH y AS (SELECT * FROM x) SELECT a FROM y AS y'
+
+
+
+ +
This also deduplicates common subqueries:
+ +
+
+
>>> expression = sqlglot.parse_one("SELECT a FROM (SELECT * FROM x) AS y JOIN (SELECT * FROM x) AS z")
+>>> eliminate_subqueries(expression).sql()
+'WITH y AS (SELECT * FROM x) SELECT a FROM y AS y JOIN y AS z'
+
+
+
+ +
Arguments:
+ +
    +
  • expression (sqlglot.Expression): expression
  • +
+ +
Returns:
+ +
+

sqlglot.Expression: expression

+
+
+ + +
+
+ + \ No newline at end of file diff --git a/docs/sqlglot/optimizer/expand_laterals.html b/docs/sqlglot/optimizer/expand_laterals.html new file mode 100644 index 0000000..89944f9 --- /dev/null +++ b/docs/sqlglot/optimizer/expand_laterals.html @@ -0,0 +1,353 @@ + + + + + + + sqlglot.optimizer.expand_laterals API documentation + + + + + + + + + +
+
+ Edit on GitHub +

+sqlglot.optimizer.expand_laterals

+ + + + + + +
 1from __future__ import annotations
+ 2
+ 3import typing as t
+ 4
+ 5from sqlglot import exp
+ 6
+ 7
+ 8def expand_laterals(expression: exp.Expression) -> exp.Expression:
+ 9    """
+10    Expand lateral column alias references.
+11
+12    This assumes `qualify_columns` as already run.
+13
+14    Example:
+15        >>> import sqlglot
+16        >>> sql = "SELECT x.a + 1 AS b, b + 1 AS c FROM x"
+17        >>> expression = sqlglot.parse_one(sql)
+18        >>> expand_laterals(expression).sql()
+19        'SELECT x.a + 1 AS b, x.a + 1 + 1 AS c FROM x'
+20
+21    Args:
+22        expression: expression to optimize
+23    Returns:
+24        optimized expression
+25    """
+26    for select in expression.find_all(exp.Select):
+27        alias_to_expression: t.Dict[str, exp.Expression] = {}
+28        for projection in select.expressions:
+29            for column in projection.find_all(exp.Column):
+30                if not column.table and column.name in alias_to_expression:
+31                    column.replace(alias_to_expression[column.name].copy())
+32                if isinstance(projection, exp.Alias):
+33                    alias_to_expression[projection.alias] = projection.this
+34    return expression
+
+ + +
+
+ +
+ + def + expand_laterals( expression: sqlglot.expressions.Expression) -> sqlglot.expressions.Expression: + + + +
+ +
 9def expand_laterals(expression: exp.Expression) -> exp.Expression:
+10    """
+11    Expand lateral column alias references.
+12
+13    This assumes `qualify_columns` as already run.
+14
+15    Example:
+16        >>> import sqlglot
+17        >>> sql = "SELECT x.a + 1 AS b, b + 1 AS c FROM x"
+18        >>> expression = sqlglot.parse_one(sql)
+19        >>> expand_laterals(expression).sql()
+20        'SELECT x.a + 1 AS b, x.a + 1 + 1 AS c FROM x'
+21
+22    Args:
+23        expression: expression to optimize
+24    Returns:
+25        optimized expression
+26    """
+27    for select in expression.find_all(exp.Select):
+28        alias_to_expression: t.Dict[str, exp.Expression] = {}
+29        for projection in select.expressions:
+30            for column in projection.find_all(exp.Column):
+31                if not column.table and column.name in alias_to_expression:
+32                    column.replace(alias_to_expression[column.name].copy())
+33                if isinstance(projection, exp.Alias):
+34                    alias_to_expression[projection.alias] = projection.this
+35    return expression
+
+ + +

Expand lateral column alias references.

+ +

This assumes qualify_columns as already run.

+ +
Example:
+ +
+
+
>>> import sqlglot
+>>> sql = "SELECT x.a + 1 AS b, b + 1 AS c FROM x"
+>>> expression = sqlglot.parse_one(sql)
+>>> expand_laterals(expression).sql()
+'SELECT x.a + 1 AS b, x.a + 1 + 1 AS c FROM x'
+
+
+
+ +
Arguments:
+ +
    +
  • expression: expression to optimize
  • +
+ +
Returns:
+ +
+

optimized expression

+
+
+ + +
+
+ + \ No newline at end of file diff --git a/docs/sqlglot/optimizer/expand_multi_table_selects.html b/docs/sqlglot/optimizer/expand_multi_table_selects.html new file mode 100644 index 0000000..d0751a3 --- /dev/null +++ b/docs/sqlglot/optimizer/expand_multi_table_selects.html @@ -0,0 +1,321 @@ + + + + + + + sqlglot.optimizer.expand_multi_table_selects API documentation + + + + + + + + + +
+
+ Edit on GitHub +

+sqlglot.optimizer.expand_multi_table_selects

+ + + + + + +
 1from sqlglot import exp
+ 2
+ 3
+ 4def expand_multi_table_selects(expression):
+ 5    """
+ 6    Replace multiple FROM expressions with JOINs.
+ 7
+ 8    Example:
+ 9        >>> from sqlglot import parse_one
+10        >>> expand_multi_table_selects(parse_one("SELECT * FROM x, y")).sql()
+11        'SELECT * FROM x CROSS JOIN y'
+12    """
+13    for from_ in expression.find_all(exp.From):
+14        parent = from_.parent
+15
+16        for query in from_.expressions[1:]:
+17            parent.join(
+18                query,
+19                join_type="CROSS",
+20                copy=False,
+21            )
+22            from_.expressions.remove(query)
+23
+24    return expression
+
+ + +
+
+ +
+ + def + expand_multi_table_selects(expression): + + + +
+ +
 5def expand_multi_table_selects(expression):
+ 6    """
+ 7    Replace multiple FROM expressions with JOINs.
+ 8
+ 9    Example:
+10        >>> from sqlglot import parse_one
+11        >>> expand_multi_table_selects(parse_one("SELECT * FROM x, y")).sql()
+12        'SELECT * FROM x CROSS JOIN y'
+13    """
+14    for from_ in expression.find_all(exp.From):
+15        parent = from_.parent
+16
+17        for query in from_.expressions[1:]:
+18            parent.join(
+19                query,
+20                join_type="CROSS",
+21                copy=False,
+22            )
+23            from_.expressions.remove(query)
+24
+25    return expression
+
+ + +

Replace multiple FROM expressions with JOINs.

+ +
Example:
+ +
+
+
>>> from sqlglot import parse_one
+>>> expand_multi_table_selects(parse_one("SELECT * FROM x, y")).sql()
+'SELECT * FROM x CROSS JOIN y'
+
+
+
+
+ + +
+
+ + \ No newline at end of file diff --git a/docs/sqlglot/optimizer/isolate_table_selects.html b/docs/sqlglot/optimizer/isolate_table_selects.html new file mode 100644 index 0000000..10413e3 --- /dev/null +++ b/docs/sqlglot/optimizer/isolate_table_selects.html @@ -0,0 +1,317 @@ + + + + + + + sqlglot.optimizer.isolate_table_selects API documentation + + + + + + + + + +
+
+ Edit on GitHub +

+sqlglot.optimizer.isolate_table_selects

+ + + + + + +
 1from sqlglot import alias, exp
+ 2from sqlglot.errors import OptimizeError
+ 3from sqlglot.optimizer.scope import traverse_scope
+ 4from sqlglot.schema import ensure_schema
+ 5
+ 6
+ 7def isolate_table_selects(expression, schema=None):
+ 8    schema = ensure_schema(schema)
+ 9
+10    for scope in traverse_scope(expression):
+11        if len(scope.selected_sources) == 1:
+12            continue
+13
+14        for _, source in scope.selected_sources.values():
+15            if not isinstance(source, exp.Table) or not schema.column_names(source):
+16                continue
+17
+18            if not source.alias:
+19                raise OptimizeError("Tables require an alias. Run qualify_tables optimization.")
+20
+21            source.replace(
+22                exp.select("*")
+23                .from_(
+24                    alias(source.copy(), source.name or source.alias, table=True),
+25                    copy=False,
+26                )
+27                .subquery(source.alias, copy=False)
+28            )
+29
+30    return expression
+
+ + +
+
+ +
+ + def + isolate_table_selects(expression, schema=None): + + + +
+ +
 8def isolate_table_selects(expression, schema=None):
+ 9    schema = ensure_schema(schema)
+10
+11    for scope in traverse_scope(expression):
+12        if len(scope.selected_sources) == 1:
+13            continue
+14
+15        for _, source in scope.selected_sources.values():
+16            if not isinstance(source, exp.Table) or not schema.column_names(source):
+17                continue
+18
+19            if not source.alias:
+20                raise OptimizeError("Tables require an alias. Run qualify_tables optimization.")
+21
+22            source.replace(
+23                exp.select("*")
+24                .from_(
+25                    alias(source.copy(), source.name or source.alias, table=True),
+26                    copy=False,
+27                )
+28                .subquery(source.alias, copy=False)
+29            )
+30
+31    return expression
+
+ + + + +
+
+ + \ No newline at end of file diff --git a/docs/sqlglot/optimizer/lower_identities.html b/docs/sqlglot/optimizer/lower_identities.html new file mode 100644 index 0000000..cf760a6 --- /dev/null +++ b/docs/sqlglot/optimizer/lower_identities.html @@ -0,0 +1,430 @@ + + + + + + + sqlglot.optimizer.lower_identities API documentation + + + + + + + + + +
+
+ Edit on GitHub +

+sqlglot.optimizer.lower_identities

+ + + + + + +
 1from sqlglot import exp
+ 2from sqlglot.helper import ensure_collection
+ 3
+ 4
+ 5def lower_identities(expression):
+ 6    """
+ 7    Convert all unquoted identifiers to lower case.
+ 8
+ 9    Assuming the schema is all lower case, this essentially makes identifiers case-insensitive.
+10
+11    Example:
+12        >>> import sqlglot
+13        >>> expression = sqlglot.parse_one('SELECT Bar.A AS A FROM "Foo".Bar')
+14        >>> lower_identities(expression).sql()
+15        'SELECT bar.a AS A FROM "Foo".bar'
+16
+17    Args:
+18        expression (sqlglot.Expression): expression to quote
+19    Returns:
+20        sqlglot.Expression: quoted expression
+21    """
+22    # We need to leave the output aliases unchanged, so the selects need special handling
+23    _lower_selects(expression)
+24
+25    # These clauses can reference output aliases and also need special handling
+26    _lower_order(expression)
+27    _lower_having(expression)
+28
+29    # We've already handled these args, so don't traverse into them
+30    traversed = {"expressions", "order", "having"}
+31
+32    if isinstance(expression, exp.Subquery):
+33        # Root subquery, e.g. (SELECT A AS A FROM X) LIMIT 1
+34        lower_identities(expression.this)
+35        traversed |= {"this"}
+36
+37    if isinstance(expression, exp.Union):
+38        # Union, e.g. SELECT A AS A FROM X UNION SELECT A AS A FROM X
+39        lower_identities(expression.left)
+40        lower_identities(expression.right)
+41        traversed |= {"this", "expression"}
+42
+43    for k, v in expression.args.items():
+44        if k in traversed:
+45            continue
+46
+47        for child in ensure_collection(v):
+48            if isinstance(child, exp.Expression):
+49                child.transform(_lower, copy=False)
+50
+51    return expression
+52
+53
+54def _lower_selects(expression):
+55    for e in expression.expressions:
+56        # Leave output aliases as-is
+57        e.unalias().transform(_lower, copy=False)
+58
+59
+60def _lower_order(expression):
+61    order = expression.args.get("order")
+62
+63    if not order:
+64        return
+65
+66    output_aliases = {e.alias for e in expression.expressions if isinstance(e, exp.Alias)}
+67
+68    for ordered in order.expressions:
+69        # Don't lower references to output aliases
+70        if not (
+71            isinstance(ordered.this, exp.Column)
+72            and not ordered.this.table
+73            and ordered.this.name in output_aliases
+74        ):
+75            ordered.transform(_lower, copy=False)
+76
+77
+78def _lower_having(expression):
+79    having = expression.args.get("having")
+80
+81    if not having:
+82        return
+83
+84    # Don't lower references to output aliases
+85    for agg in having.find_all(exp.AggFunc):
+86        agg.transform(_lower, copy=False)
+87
+88
+89def _lower(node):
+90    if isinstance(node, exp.Identifier) and not node.quoted:
+91        node.set("this", node.this.lower())
+92    return node
+
+ + +
+
+ +
+ + def + lower_identities(expression): + + + +
+ +
 6def lower_identities(expression):
+ 7    """
+ 8    Convert all unquoted identifiers to lower case.
+ 9
+10    Assuming the schema is all lower case, this essentially makes identifiers case-insensitive.
+11
+12    Example:
+13        >>> import sqlglot
+14        >>> expression = sqlglot.parse_one('SELECT Bar.A AS A FROM "Foo".Bar')
+15        >>> lower_identities(expression).sql()
+16        'SELECT bar.a AS A FROM "Foo".bar'
+17
+18    Args:
+19        expression (sqlglot.Expression): expression to quote
+20    Returns:
+21        sqlglot.Expression: quoted expression
+22    """
+23    # We need to leave the output aliases unchanged, so the selects need special handling
+24    _lower_selects(expression)
+25
+26    # These clauses can reference output aliases and also need special handling
+27    _lower_order(expression)
+28    _lower_having(expression)
+29
+30    # We've already handled these args, so don't traverse into them
+31    traversed = {"expressions", "order", "having"}
+32
+33    if isinstance(expression, exp.Subquery):
+34        # Root subquery, e.g. (SELECT A AS A FROM X) LIMIT 1
+35        lower_identities(expression.this)
+36        traversed |= {"this"}
+37
+38    if isinstance(expression, exp.Union):
+39        # Union, e.g. SELECT A AS A FROM X UNION SELECT A AS A FROM X
+40        lower_identities(expression.left)
+41        lower_identities(expression.right)
+42        traversed |= {"this", "expression"}
+43
+44    for k, v in expression.args.items():
+45        if k in traversed:
+46            continue
+47
+48        for child in ensure_collection(v):
+49            if isinstance(child, exp.Expression):
+50                child.transform(_lower, copy=False)
+51
+52    return expression
+
+ + +

Convert all unquoted identifiers to lower case.

+ +

Assuming the schema is all lower case, this essentially makes identifiers case-insensitive.

+ +
Example:
+ +
+
+
>>> import sqlglot
+>>> expression = sqlglot.parse_one('SELECT Bar.A AS A FROM "Foo".Bar')
+>>> lower_identities(expression).sql()
+'SELECT bar.a AS A FROM "Foo".bar'
+
+
+
+ +
Arguments:
+ +
    +
  • expression (sqlglot.Expression): expression to quote
  • +
+ +
Returns:
+ +
+

sqlglot.Expression: quoted expression

+
+
+ + +
+
+ + \ No newline at end of file diff --git a/docs/sqlglot/optimizer/merge_subqueries.html b/docs/sqlglot/optimizer/merge_subqueries.html new file mode 100644 index 0000000..9ffb070 --- /dev/null +++ b/docs/sqlglot/optimizer/merge_subqueries.html @@ -0,0 +1,794 @@ + + + + + + + sqlglot.optimizer.merge_subqueries API documentation + + + + + + + + + +
+
+ Edit on GitHub +

+sqlglot.optimizer.merge_subqueries

+ + + + + + +
  1from collections import defaultdict
+  2
+  3from sqlglot import expressions as exp
+  4from sqlglot.helper import find_new_name
+  5from sqlglot.optimizer.scope import Scope, traverse_scope
+  6from sqlglot.optimizer.simplify import simplify
+  7
+  8
+  9def merge_subqueries(expression, leave_tables_isolated=False):
+ 10    """
+ 11    Rewrite sqlglot AST to merge derived tables into the outer query.
+ 12
+ 13    This also merges CTEs if they are selected from only once.
+ 14
+ 15    Example:
+ 16        >>> import sqlglot
+ 17        >>> expression = sqlglot.parse_one("SELECT a FROM (SELECT x.a FROM x) JOIN y")
+ 18        >>> merge_subqueries(expression).sql()
+ 19        'SELECT x.a FROM x JOIN y'
+ 20
+ 21    If `leave_tables_isolated` is True, this will not merge inner queries into outer
+ 22    queries if it would result in multiple table selects in a single query:
+ 23        >>> expression = sqlglot.parse_one("SELECT a FROM (SELECT x.a FROM x) JOIN y")
+ 24        >>> merge_subqueries(expression, leave_tables_isolated=True).sql()
+ 25        'SELECT a FROM (SELECT x.a FROM x) JOIN y'
+ 26
+ 27    Inspired by https://dev.mysql.com/doc/refman/8.0/en/derived-table-optimization.html
+ 28
+ 29    Args:
+ 30        expression (sqlglot.Expression): expression to optimize
+ 31        leave_tables_isolated (bool):
+ 32    Returns:
+ 33        sqlglot.Expression: optimized expression
+ 34    """
+ 35    expression = merge_ctes(expression, leave_tables_isolated)
+ 36    expression = merge_derived_tables(expression, leave_tables_isolated)
+ 37    return expression
+ 38
+ 39
+ 40# If a derived table has these Select args, it can't be merged
+ 41UNMERGABLE_ARGS = set(exp.Select.arg_types) - {
+ 42    "expressions",
+ 43    "from",
+ 44    "joins",
+ 45    "where",
+ 46    "order",
+ 47    "hint",
+ 48}
+ 49
+ 50
+ 51def merge_ctes(expression, leave_tables_isolated=False):
+ 52    scopes = traverse_scope(expression)
+ 53
+ 54    # All places where we select from CTEs.
+ 55    # We key on the CTE scope so we can detect CTES that are selected from multiple times.
+ 56    cte_selections = defaultdict(list)
+ 57    for outer_scope in scopes:
+ 58        for table, inner_scope in outer_scope.selected_sources.values():
+ 59            if isinstance(inner_scope, Scope) and inner_scope.is_cte:
+ 60                cte_selections[id(inner_scope)].append(
+ 61                    (
+ 62                        outer_scope,
+ 63                        inner_scope,
+ 64                        table,
+ 65                    )
+ 66                )
+ 67
+ 68    singular_cte_selections = [v[0] for k, v in cte_selections.items() if len(v) == 1]
+ 69    for outer_scope, inner_scope, table in singular_cte_selections:
+ 70        from_or_join = table.find_ancestor(exp.From, exp.Join)
+ 71        if _mergeable(outer_scope, inner_scope, leave_tables_isolated, from_or_join):
+ 72            alias = table.alias_or_name
+ 73            _rename_inner_sources(outer_scope, inner_scope, alias)
+ 74            _merge_from(outer_scope, inner_scope, table, alias)
+ 75            _merge_expressions(outer_scope, inner_scope, alias)
+ 76            _merge_joins(outer_scope, inner_scope, from_or_join)
+ 77            _merge_where(outer_scope, inner_scope, from_or_join)
+ 78            _merge_order(outer_scope, inner_scope)
+ 79            _merge_hints(outer_scope, inner_scope)
+ 80            _pop_cte(inner_scope)
+ 81            outer_scope.clear_cache()
+ 82    return expression
+ 83
+ 84
+ 85def merge_derived_tables(expression, leave_tables_isolated=False):
+ 86    for outer_scope in traverse_scope(expression):
+ 87        for subquery in outer_scope.derived_tables:
+ 88            from_or_join = subquery.find_ancestor(exp.From, exp.Join)
+ 89            alias = subquery.alias_or_name
+ 90            inner_scope = outer_scope.sources[alias]
+ 91            if _mergeable(outer_scope, inner_scope, leave_tables_isolated, from_or_join):
+ 92                _rename_inner_sources(outer_scope, inner_scope, alias)
+ 93                _merge_from(outer_scope, inner_scope, subquery, alias)
+ 94                _merge_expressions(outer_scope, inner_scope, alias)
+ 95                _merge_joins(outer_scope, inner_scope, from_or_join)
+ 96                _merge_where(outer_scope, inner_scope, from_or_join)
+ 97                _merge_order(outer_scope, inner_scope)
+ 98                _merge_hints(outer_scope, inner_scope)
+ 99                outer_scope.clear_cache()
+100    return expression
+101
+102
+103def _mergeable(outer_scope, inner_scope, leave_tables_isolated, from_or_join):
+104    """
+105    Return True if `inner_select` can be merged into outer query.
+106
+107    Args:
+108        outer_scope (Scope)
+109        inner_scope (Scope)
+110        leave_tables_isolated (bool)
+111        from_or_join (exp.From|exp.Join)
+112    Returns:
+113        bool: True if can be merged
+114    """
+115    inner_select = inner_scope.expression.unnest()
+116
+117    def _is_a_window_expression_in_unmergable_operation():
+118        window_expressions = inner_select.find_all(exp.Window)
+119        window_alias_names = {window.parent.alias_or_name for window in window_expressions}
+120        inner_select_name = inner_select.parent.alias_or_name
+121        unmergable_window_columns = [
+122            column
+123            for column in outer_scope.columns
+124            if column.find_ancestor(
+125                exp.Where, exp.Group, exp.Order, exp.Join, exp.Having, exp.AggFunc
+126            )
+127        ]
+128        window_expressions_in_unmergable = [
+129            column
+130            for column in unmergable_window_columns
+131            if column.table == inner_select_name and column.name in window_alias_names
+132        ]
+133        return any(window_expressions_in_unmergable)
+134
+135    def _outer_select_joins_on_inner_select_join():
+136        """
+137        All columns from the inner select in the ON clause must be from the first FROM table.
+138
+139        That is, this can be merged:
+140            SELECT * FROM x JOIN (SELECT y.a AS a FROM y JOIN z) AS q ON x.a = q.a
+141                                         ^^^           ^
+142        But this can't:
+143            SELECT * FROM x JOIN (SELECT z.a AS a FROM y JOIN z) AS q ON x.a = q.a
+144                                         ^^^                  ^
+145        """
+146        if not isinstance(from_or_join, exp.Join):
+147            return False
+148
+149        alias = from_or_join.this.alias_or_name
+150
+151        on = from_or_join.args.get("on")
+152        if not on:
+153            return False
+154        selections = [c.name for c in on.find_all(exp.Column) if c.table == alias]
+155        inner_from = inner_scope.expression.args.get("from")
+156        if not inner_from:
+157            return False
+158        inner_from_table = inner_from.expressions[0].alias_or_name
+159        inner_projections = {s.alias_or_name: s for s in inner_scope.selects}
+160        return any(
+161            col.table != inner_from_table
+162            for selection in selections
+163            for col in inner_projections[selection].find_all(exp.Column)
+164        )
+165
+166    return (
+167        isinstance(outer_scope.expression, exp.Select)
+168        and isinstance(inner_select, exp.Select)
+169        and not any(inner_select.args.get(arg) for arg in UNMERGABLE_ARGS)
+170        and inner_select.args.get("from")
+171        and not any(e.find(exp.AggFunc, exp.Select) for e in inner_select.expressions)
+172        and not (leave_tables_isolated and len(outer_scope.selected_sources) > 1)
+173        and not (
+174            isinstance(from_or_join, exp.Join)
+175            and inner_select.args.get("where")
+176            and from_or_join.side in {"FULL", "LEFT", "RIGHT"}
+177        )
+178        and not (
+179            isinstance(from_or_join, exp.From)
+180            and inner_select.args.get("where")
+181            and any(
+182                j.side in {"FULL", "RIGHT"} for j in outer_scope.expression.args.get("joins", [])
+183            )
+184        )
+185        and not _outer_select_joins_on_inner_select_join()
+186        and not _is_a_window_expression_in_unmergable_operation()
+187    )
+188
+189
+190def _rename_inner_sources(outer_scope, inner_scope, alias):
+191    """
+192    Renames any sources in the inner query that conflict with names in the outer query.
+193
+194    Args:
+195        outer_scope (sqlglot.optimizer.scope.Scope)
+196        inner_scope (sqlglot.optimizer.scope.Scope)
+197        alias (str)
+198    """
+199    taken = set(outer_scope.selected_sources)
+200    conflicts = taken.intersection(set(inner_scope.selected_sources))
+201    conflicts -= {alias}
+202
+203    for conflict in conflicts:
+204        new_name = find_new_name(taken, conflict)
+205
+206        source, _ = inner_scope.selected_sources[conflict]
+207        new_alias = exp.to_identifier(new_name)
+208
+209        if isinstance(source, exp.Subquery):
+210            source.set("alias", exp.TableAlias(this=new_alias))
+211        elif isinstance(source, exp.Table) and source.alias:
+212            source.set("alias", new_alias)
+213        elif isinstance(source, exp.Table):
+214            source.replace(exp.alias_(source.copy(), new_alias))
+215
+216        for column in inner_scope.source_columns(conflict):
+217            column.set("table", exp.to_identifier(new_name))
+218
+219        inner_scope.rename_source(conflict, new_name)
+220
+221
+222def _merge_from(outer_scope, inner_scope, node_to_replace, alias):
+223    """
+224    Merge FROM clause of inner query into outer query.
+225
+226    Args:
+227        outer_scope (sqlglot.optimizer.scope.Scope)
+228        inner_scope (sqlglot.optimizer.scope.Scope)
+229        node_to_replace (exp.Subquery|exp.Table)
+230        alias (str)
+231    """
+232    new_subquery = inner_scope.expression.args.get("from").expressions[0]
+233    node_to_replace.replace(new_subquery)
+234    for join_hint in outer_scope.join_hints:
+235        tables = join_hint.find_all(exp.Table)
+236        for table in tables:
+237            if table.alias_or_name == node_to_replace.alias_or_name:
+238                table.set("this", exp.to_identifier(new_subquery.alias_or_name))
+239    outer_scope.remove_source(alias)
+240    outer_scope.add_source(
+241        new_subquery.alias_or_name, inner_scope.sources[new_subquery.alias_or_name]
+242    )
+243
+244
+245def _merge_joins(outer_scope, inner_scope, from_or_join):
+246    """
+247    Merge JOIN clauses of inner query into outer query.
+248
+249    Args:
+250        outer_scope (sqlglot.optimizer.scope.Scope)
+251        inner_scope (sqlglot.optimizer.scope.Scope)
+252        from_or_join (exp.From|exp.Join)
+253    """
+254
+255    new_joins = []
+256    comma_joins = inner_scope.expression.args.get("from").expressions[1:]
+257    for subquery in comma_joins:
+258        new_joins.append(exp.Join(this=subquery, kind="CROSS"))
+259        outer_scope.add_source(subquery.alias_or_name, inner_scope.sources[subquery.alias_or_name])
+260
+261    joins = inner_scope.expression.args.get("joins") or []
+262    for join in joins:
+263        new_joins.append(join)
+264        outer_scope.add_source(join.alias_or_name, inner_scope.sources[join.alias_or_name])
+265
+266    if new_joins:
+267        outer_joins = outer_scope.expression.args.get("joins", [])
+268
+269        # Maintain the join order
+270        if isinstance(from_or_join, exp.From):
+271            position = 0
+272        else:
+273            position = outer_joins.index(from_or_join) + 1
+274        outer_joins[position:position] = new_joins
+275
+276        outer_scope.expression.set("joins", outer_joins)
+277
+278
+279def _merge_expressions(outer_scope, inner_scope, alias):
+280    """
+281    Merge projections of inner query into outer query.
+282
+283    Args:
+284        outer_scope (sqlglot.optimizer.scope.Scope)
+285        inner_scope (sqlglot.optimizer.scope.Scope)
+286        alias (str)
+287    """
+288    # Collect all columns that reference the alias of the inner query
+289    outer_columns = defaultdict(list)
+290    for column in outer_scope.columns:
+291        if column.table == alias:
+292            outer_columns[column.name].append(column)
+293
+294    # Replace columns with the projection expression in the inner query
+295    for expression in inner_scope.expression.expressions:
+296        projection_name = expression.alias_or_name
+297        if not projection_name:
+298            continue
+299        columns_to_replace = outer_columns.get(projection_name, [])
+300        for column in columns_to_replace:
+301            column.replace(expression.unalias().copy())
+302
+303
+304def _merge_where(outer_scope, inner_scope, from_or_join):
+305    """
+306    Merge WHERE clause of inner query into outer query.
+307
+308    Args:
+309        outer_scope (sqlglot.optimizer.scope.Scope)
+310        inner_scope (sqlglot.optimizer.scope.Scope)
+311        from_or_join (exp.From|exp.Join)
+312    """
+313    where = inner_scope.expression.args.get("where")
+314    if not where or not where.this:
+315        return
+316
+317    if isinstance(from_or_join, exp.Join):
+318        # Merge predicates from an outer join to the ON clause
+319        from_or_join.on(where.this, copy=False)
+320        from_or_join.set("on", simplify(from_or_join.args.get("on")))
+321    else:
+322        outer_scope.expression.where(where.this, copy=False)
+323        outer_scope.expression.set("where", simplify(outer_scope.expression.args.get("where")))
+324
+325
+326def _merge_order(outer_scope, inner_scope):
+327    """
+328    Merge ORDER clause of inner query into outer query.
+329
+330    Args:
+331        outer_scope (sqlglot.optimizer.scope.Scope)
+332        inner_scope (sqlglot.optimizer.scope.Scope)
+333    """
+334    if (
+335        any(
+336            outer_scope.expression.args.get(arg) for arg in ["group", "distinct", "having", "order"]
+337        )
+338        or len(outer_scope.selected_sources) != 1
+339        or any(expression.find(exp.AggFunc) for expression in outer_scope.expression.expressions)
+340    ):
+341        return
+342
+343    outer_scope.expression.set("order", inner_scope.expression.args.get("order"))
+344
+345
+346def _merge_hints(outer_scope, inner_scope):
+347    inner_scope_hint = inner_scope.expression.args.get("hint")
+348    if not inner_scope_hint:
+349        return
+350    outer_scope_hint = outer_scope.expression.args.get("hint")
+351    if outer_scope_hint:
+352        for hint_expression in inner_scope_hint.expressions:
+353            outer_scope_hint.append("expressions", hint_expression)
+354    else:
+355        outer_scope.expression.set("hint", inner_scope_hint)
+356
+357
+358def _pop_cte(inner_scope):
+359    """
+360    Remove CTE from the AST.
+361
+362    Args:
+363        inner_scope (sqlglot.optimizer.scope.Scope)
+364    """
+365    cte = inner_scope.expression.parent
+366    with_ = cte.parent
+367    if len(with_.expressions) == 1:
+368        with_.pop()
+369    else:
+370        cte.pop()
+
+ + +
+
+ +
+ + def + merge_subqueries(expression, leave_tables_isolated=False): + + + +
+ +
10def merge_subqueries(expression, leave_tables_isolated=False):
+11    """
+12    Rewrite sqlglot AST to merge derived tables into the outer query.
+13
+14    This also merges CTEs if they are selected from only once.
+15
+16    Example:
+17        >>> import sqlglot
+18        >>> expression = sqlglot.parse_one("SELECT a FROM (SELECT x.a FROM x) JOIN y")
+19        >>> merge_subqueries(expression).sql()
+20        'SELECT x.a FROM x JOIN y'
+21
+22    If `leave_tables_isolated` is True, this will not merge inner queries into outer
+23    queries if it would result in multiple table selects in a single query:
+24        >>> expression = sqlglot.parse_one("SELECT a FROM (SELECT x.a FROM x) JOIN y")
+25        >>> merge_subqueries(expression, leave_tables_isolated=True).sql()
+26        'SELECT a FROM (SELECT x.a FROM x) JOIN y'
+27
+28    Inspired by https://dev.mysql.com/doc/refman/8.0/en/derived-table-optimization.html
+29
+30    Args:
+31        expression (sqlglot.Expression): expression to optimize
+32        leave_tables_isolated (bool):
+33    Returns:
+34        sqlglot.Expression: optimized expression
+35    """
+36    expression = merge_ctes(expression, leave_tables_isolated)
+37    expression = merge_derived_tables(expression, leave_tables_isolated)
+38    return expression
+
+ + +

Rewrite sqlglot AST to merge derived tables into the outer query.

+ +

This also merges CTEs if they are selected from only once.

+ +
Example:
+ +
+
+
>>> import sqlglot
+>>> expression = sqlglot.parse_one("SELECT a FROM (SELECT x.a FROM x) JOIN y")
+>>> merge_subqueries(expression).sql()
+'SELECT x.a FROM x JOIN y'
+
+
+
+ +

If leave_tables_isolated is True, this will not merge inner queries into outer +queries if it would result in multiple table selects in a single query:

+ +
+
+
+

expression = sqlglot.parse_one("SELECT a FROM (SELECT x.a FROM x) JOIN y") + merge_subqueries(expression, leave_tables_isolated=True).sql() + 'SELECT a FROM (SELECT x.a FROM x) JOIN y'

+
+
+
+ +

Inspired by https://dev.mysql.com/doc/refman/8.0/en/derived-table-optimization.html

+ +
Arguments:
+ +
    +
  • expression (sqlglot.Expression): expression to optimize
  • +
  • leave_tables_isolated (bool):
  • +
+ +
Returns:
+ +
+

sqlglot.Expression: optimized expression

+
+
+ + +
+
+ +
+ + def + merge_ctes(expression, leave_tables_isolated=False): + + + +
+ +
52def merge_ctes(expression, leave_tables_isolated=False):
+53    scopes = traverse_scope(expression)
+54
+55    # All places where we select from CTEs.
+56    # We key on the CTE scope so we can detect CTES that are selected from multiple times.
+57    cte_selections = defaultdict(list)
+58    for outer_scope in scopes:
+59        for table, inner_scope in outer_scope.selected_sources.values():
+60            if isinstance(inner_scope, Scope) and inner_scope.is_cte:
+61                cte_selections[id(inner_scope)].append(
+62                    (
+63                        outer_scope,
+64                        inner_scope,
+65                        table,
+66                    )
+67                )
+68
+69    singular_cte_selections = [v[0] for k, v in cte_selections.items() if len(v) == 1]
+70    for outer_scope, inner_scope, table in singular_cte_selections:
+71        from_or_join = table.find_ancestor(exp.From, exp.Join)
+72        if _mergeable(outer_scope, inner_scope, leave_tables_isolated, from_or_join):
+73            alias = table.alias_or_name
+74            _rename_inner_sources(outer_scope, inner_scope, alias)
+75            _merge_from(outer_scope, inner_scope, table, alias)
+76            _merge_expressions(outer_scope, inner_scope, alias)
+77            _merge_joins(outer_scope, inner_scope, from_or_join)
+78            _merge_where(outer_scope, inner_scope, from_or_join)
+79            _merge_order(outer_scope, inner_scope)
+80            _merge_hints(outer_scope, inner_scope)
+81            _pop_cte(inner_scope)
+82            outer_scope.clear_cache()
+83    return expression
+
+ + + + +
+
+ +
+ + def + merge_derived_tables(expression, leave_tables_isolated=False): + + + +
+ +
 86def merge_derived_tables(expression, leave_tables_isolated=False):
+ 87    for outer_scope in traverse_scope(expression):
+ 88        for subquery in outer_scope.derived_tables:
+ 89            from_or_join = subquery.find_ancestor(exp.From, exp.Join)
+ 90            alias = subquery.alias_or_name
+ 91            inner_scope = outer_scope.sources[alias]
+ 92            if _mergeable(outer_scope, inner_scope, leave_tables_isolated, from_or_join):
+ 93                _rename_inner_sources(outer_scope, inner_scope, alias)
+ 94                _merge_from(outer_scope, inner_scope, subquery, alias)
+ 95                _merge_expressions(outer_scope, inner_scope, alias)
+ 96                _merge_joins(outer_scope, inner_scope, from_or_join)
+ 97                _merge_where(outer_scope, inner_scope, from_or_join)
+ 98                _merge_order(outer_scope, inner_scope)
+ 99                _merge_hints(outer_scope, inner_scope)
+100                outer_scope.clear_cache()
+101    return expression
+
+ + + + +
+
+ + \ No newline at end of file diff --git a/docs/sqlglot/optimizer/normalize.html b/docs/sqlglot/optimizer/normalize.html new file mode 100644 index 0000000..a93d820 --- /dev/null +++ b/docs/sqlglot/optimizer/normalize.html @@ -0,0 +1,585 @@ + + + + + + + sqlglot.optimizer.normalize API documentation + + + + + + + + + +
+
+ Edit on GitHub +

+sqlglot.optimizer.normalize

+ + + + + + +
  1from sqlglot import exp
+  2from sqlglot.helper import while_changing
+  3from sqlglot.optimizer.simplify import flatten, simplify, uniq_sort
+  4
+  5
+  6def normalize(expression, dnf=False, max_distance=128):
+  7    """
+  8    Rewrite sqlglot AST into conjunctive normal form.
+  9
+ 10    Example:
+ 11        >>> import sqlglot
+ 12        >>> expression = sqlglot.parse_one("(x AND y) OR z")
+ 13        >>> normalize(expression).sql()
+ 14        '(x OR z) AND (y OR z)'
+ 15
+ 16    Args:
+ 17        expression (sqlglot.Expression): expression to normalize
+ 18        dnf (bool): rewrite in disjunctive normal form instead
+ 19        max_distance (int): the maximal estimated distance from cnf to attempt conversion
+ 20    Returns:
+ 21        sqlglot.Expression: normalized expression
+ 22    """
+ 23    expression = simplify(expression)
+ 24
+ 25    expression = while_changing(expression, lambda e: distributive_law(e, dnf, max_distance))
+ 26    return simplify(expression)
+ 27
+ 28
+ 29def normalized(expression, dnf=False):
+ 30    ancestor, root = (exp.And, exp.Or) if dnf else (exp.Or, exp.And)
+ 31
+ 32    return not any(connector.find_ancestor(ancestor) for connector in expression.find_all(root))
+ 33
+ 34
+ 35def normalization_distance(expression, dnf=False):
+ 36    """
+ 37    The difference in the number of predicates between the current expression and the normalized form.
+ 38
+ 39    This is used as an estimate of the cost of the conversion which is exponential in complexity.
+ 40
+ 41    Example:
+ 42        >>> import sqlglot
+ 43        >>> expression = sqlglot.parse_one("(a AND b) OR (c AND d)")
+ 44        >>> normalization_distance(expression)
+ 45        4
+ 46
+ 47    Args:
+ 48        expression (sqlglot.Expression): expression to compute distance
+ 49        dnf (bool): compute to dnf distance instead
+ 50    Returns:
+ 51        int: difference
+ 52    """
+ 53    return sum(_predicate_lengths(expression, dnf)) - (
+ 54        len(list(expression.find_all(exp.Connector))) + 1
+ 55    )
+ 56
+ 57
+ 58def _predicate_lengths(expression, dnf):
+ 59    """
+ 60    Returns a list of predicate lengths when expanded to normalized form.
+ 61
+ 62    (A AND B) OR C -> [2, 2] because len(A OR C), len(B OR C).
+ 63    """
+ 64    expression = expression.unnest()
+ 65
+ 66    if not isinstance(expression, exp.Connector):
+ 67        return [1]
+ 68
+ 69    left, right = expression.args.values()
+ 70
+ 71    if isinstance(expression, exp.And if dnf else exp.Or):
+ 72        return [
+ 73            a + b for a in _predicate_lengths(left, dnf) for b in _predicate_lengths(right, dnf)
+ 74        ]
+ 75    return _predicate_lengths(left, dnf) + _predicate_lengths(right, dnf)
+ 76
+ 77
+ 78def distributive_law(expression, dnf, max_distance):
+ 79    """
+ 80    x OR (y AND z) -> (x OR y) AND (x OR z)
+ 81    (x AND y) OR (y AND z) -> (x OR y) AND (x OR z) AND (y OR y) AND (y OR z)
+ 82    """
+ 83    if isinstance(expression.unnest(), exp.Connector):
+ 84        if normalization_distance(expression, dnf) > max_distance:
+ 85            return expression
+ 86
+ 87    to_exp, from_exp = (exp.Or, exp.And) if dnf else (exp.And, exp.Or)
+ 88
+ 89    exp.replace_children(expression, lambda e: distributive_law(e, dnf, max_distance))
+ 90
+ 91    if isinstance(expression, from_exp):
+ 92        a, b = expression.unnest_operands()
+ 93
+ 94        from_func = exp.and_ if from_exp == exp.And else exp.or_
+ 95        to_func = exp.and_ if to_exp == exp.And else exp.or_
+ 96
+ 97        if isinstance(a, to_exp) and isinstance(b, to_exp):
+ 98            if len(tuple(a.find_all(exp.Connector))) > len(tuple(b.find_all(exp.Connector))):
+ 99                return _distribute(a, b, from_func, to_func)
+100            return _distribute(b, a, from_func, to_func)
+101        if isinstance(a, to_exp):
+102            return _distribute(b, a, from_func, to_func)
+103        if isinstance(b, to_exp):
+104            return _distribute(a, b, from_func, to_func)
+105
+106    return expression
+107
+108
+109def _distribute(a, b, from_func, to_func):
+110    if isinstance(a, exp.Connector):
+111        exp.replace_children(
+112            a,
+113            lambda c: to_func(
+114                exp.paren(from_func(c, b.left)),
+115                exp.paren(from_func(c, b.right)),
+116            ),
+117        )
+118    else:
+119        a = to_func(from_func(a, b.left), from_func(a, b.right))
+120
+121    return _simplify(a)
+122
+123
+124def _simplify(node):
+125    node = uniq_sort(flatten(node))
+126    exp.replace_children(node, _simplify)
+127    return node
+
+ + +
+
+ +
+ + def + normalize(expression, dnf=False, max_distance=128): + + + +
+ +
 7def normalize(expression, dnf=False, max_distance=128):
+ 8    """
+ 9    Rewrite sqlglot AST into conjunctive normal form.
+10
+11    Example:
+12        >>> import sqlglot
+13        >>> expression = sqlglot.parse_one("(x AND y) OR z")
+14        >>> normalize(expression).sql()
+15        '(x OR z) AND (y OR z)'
+16
+17    Args:
+18        expression (sqlglot.Expression): expression to normalize
+19        dnf (bool): rewrite in disjunctive normal form instead
+20        max_distance (int): the maximal estimated distance from cnf to attempt conversion
+21    Returns:
+22        sqlglot.Expression: normalized expression
+23    """
+24    expression = simplify(expression)
+25
+26    expression = while_changing(expression, lambda e: distributive_law(e, dnf, max_distance))
+27    return simplify(expression)
+
+ + +

Rewrite sqlglot AST into conjunctive normal form.

+ +
Example:
+ +
+
+
>>> import sqlglot
+>>> expression = sqlglot.parse_one("(x AND y) OR z")
+>>> normalize(expression).sql()
+'(x OR z) AND (y OR z)'
+
+
+
+ +
Arguments:
+ +
    +
  • expression (sqlglot.Expression): expression to normalize
  • +
  • dnf (bool): rewrite in disjunctive normal form instead
  • +
  • max_distance (int): the maximal estimated distance from cnf to attempt conversion
  • +
+ +
Returns:
+ +
+

sqlglot.Expression: normalized expression

+
+
+ + +
+
+ +
+ + def + normalized(expression, dnf=False): + + + +
+ +
30def normalized(expression, dnf=False):
+31    ancestor, root = (exp.And, exp.Or) if dnf else (exp.Or, exp.And)
+32
+33    return not any(connector.find_ancestor(ancestor) for connector in expression.find_all(root))
+
+ + + + +
+
+ +
+ + def + normalization_distance(expression, dnf=False): + + + +
+ +
36def normalization_distance(expression, dnf=False):
+37    """
+38    The difference in the number of predicates between the current expression and the normalized form.
+39
+40    This is used as an estimate of the cost of the conversion which is exponential in complexity.
+41
+42    Example:
+43        >>> import sqlglot
+44        >>> expression = sqlglot.parse_one("(a AND b) OR (c AND d)")
+45        >>> normalization_distance(expression)
+46        4
+47
+48    Args:
+49        expression (sqlglot.Expression): expression to compute distance
+50        dnf (bool): compute to dnf distance instead
+51    Returns:
+52        int: difference
+53    """
+54    return sum(_predicate_lengths(expression, dnf)) - (
+55        len(list(expression.find_all(exp.Connector))) + 1
+56    )
+
+ + +

The difference in the number of predicates between the current expression and the normalized form.

+ +

This is used as an estimate of the cost of the conversion which is exponential in complexity.

+ +
Example:
+ +
+
+
>>> import sqlglot
+>>> expression = sqlglot.parse_one("(a AND b) OR (c AND d)")
+>>> normalization_distance(expression)
+4
+
+
+
+ +
Arguments:
+ +
    +
  • expression (sqlglot.Expression): expression to compute distance
  • +
  • dnf (bool): compute to dnf distance instead
  • +
+ +
Returns:
+ +
+

int: difference

+
+
+ + +
+
+ +
+ + def + distributive_law(expression, dnf, max_distance): + + + +
+ +
 79def distributive_law(expression, dnf, max_distance):
+ 80    """
+ 81    x OR (y AND z) -> (x OR y) AND (x OR z)
+ 82    (x AND y) OR (y AND z) -> (x OR y) AND (x OR z) AND (y OR y) AND (y OR z)
+ 83    """
+ 84    if isinstance(expression.unnest(), exp.Connector):
+ 85        if normalization_distance(expression, dnf) > max_distance:
+ 86            return expression
+ 87
+ 88    to_exp, from_exp = (exp.Or, exp.And) if dnf else (exp.And, exp.Or)
+ 89
+ 90    exp.replace_children(expression, lambda e: distributive_law(e, dnf, max_distance))
+ 91
+ 92    if isinstance(expression, from_exp):
+ 93        a, b = expression.unnest_operands()
+ 94
+ 95        from_func = exp.and_ if from_exp == exp.And else exp.or_
+ 96        to_func = exp.and_ if to_exp == exp.And else exp.or_
+ 97
+ 98        if isinstance(a, to_exp) and isinstance(b, to_exp):
+ 99            if len(tuple(a.find_all(exp.Connector))) > len(tuple(b.find_all(exp.Connector))):
+100                return _distribute(a, b, from_func, to_func)
+101            return _distribute(b, a, from_func, to_func)
+102        if isinstance(a, to_exp):
+103            return _distribute(b, a, from_func, to_func)
+104        if isinstance(b, to_exp):
+105            return _distribute(a, b, from_func, to_func)
+106
+107    return expression
+
+ + +

x OR (y AND z) -> (x OR y) AND (x OR z) +(x AND y) OR (y AND z) -> (x OR y) AND (x OR z) AND (y OR y) AND (y OR z)

+
+ + +
+
+ + \ No newline at end of file diff --git a/docs/sqlglot/optimizer/optimize_joins.html b/docs/sqlglot/optimizer/optimize_joins.html new file mode 100644 index 0000000..66891c7 --- /dev/null +++ b/docs/sqlglot/optimizer/optimize_joins.html @@ -0,0 +1,489 @@ + + + + + + + sqlglot.optimizer.optimize_joins API documentation + + + + + + + + + +
+
+ Edit on GitHub +

+sqlglot.optimizer.optimize_joins

+ + + + + + +
 1from sqlglot import exp
+ 2from sqlglot.helper import tsort
+ 3from sqlglot.optimizer.simplify import simplify
+ 4
+ 5
+ 6def optimize_joins(expression):
+ 7    """
+ 8    Removes cross joins if possible and reorder joins based on predicate dependencies.
+ 9
+10    Example:
+11        >>> from sqlglot import parse_one
+12        >>> optimize_joins(parse_one("SELECT * FROM x CROSS JOIN y JOIN z ON x.a = z.a AND y.a = z.a")).sql()
+13        'SELECT * FROM x JOIN z ON x.a = z.a AND TRUE JOIN y ON y.a = z.a'
+14    """
+15    for select in expression.find_all(exp.Select):
+16        references = {}
+17        cross_joins = []
+18
+19        for join in select.args.get("joins", []):
+20            name = join.this.alias_or_name
+21            tables = other_table_names(join, name)
+22
+23            if tables:
+24                for table in tables:
+25                    references[table] = references.get(table, []) + [join]
+26            else:
+27                cross_joins.append((name, join))
+28
+29        for name, join in cross_joins:
+30            for dep in references.get(name, []):
+31                on = dep.args["on"]
+32                on = on.replace(simplify(on))
+33
+34                if isinstance(on, exp.Connector):
+35                    for predicate in on.flatten():
+36                        if name in exp.column_table_names(predicate):
+37                            predicate.replace(exp.true())
+38                            join.on(predicate, copy=False)
+39
+40    expression = reorder_joins(expression)
+41    expression = normalize(expression)
+42    return expression
+43
+44
+45def reorder_joins(expression):
+46    """
+47    Reorder joins by topological sort order based on predicate references.
+48    """
+49    for from_ in expression.find_all(exp.From):
+50        head = from_.expressions[0]
+51        parent = from_.parent
+52        joins = {join.this.alias_or_name: join for join in parent.args.get("joins", [])}
+53        dag = {head.alias_or_name: []}
+54
+55        for name, join in joins.items():
+56            dag[name] = other_table_names(join, name)
+57
+58        parent.set(
+59            "joins",
+60            [joins[name] for name in tsort(dag) if name != head.alias_or_name],
+61        )
+62    return expression
+63
+64
+65def normalize(expression):
+66    """
+67    Remove INNER and OUTER from joins as they are optional.
+68    """
+69    for join in expression.find_all(exp.Join):
+70        if join.kind != "CROSS":
+71            join.set("kind", None)
+72    return expression
+73
+74
+75def other_table_names(join, exclude):
+76    return [
+77        name
+78        for name in (exp.column_table_names(join.args.get("on") or exp.true()))
+79        if name != exclude
+80    ]
+
+ + +
+
+ +
+ + def + optimize_joins(expression): + + + +
+ +
 7def optimize_joins(expression):
+ 8    """
+ 9    Removes cross joins if possible and reorder joins based on predicate dependencies.
+10
+11    Example:
+12        >>> from sqlglot import parse_one
+13        >>> optimize_joins(parse_one("SELECT * FROM x CROSS JOIN y JOIN z ON x.a = z.a AND y.a = z.a")).sql()
+14        'SELECT * FROM x JOIN z ON x.a = z.a AND TRUE JOIN y ON y.a = z.a'
+15    """
+16    for select in expression.find_all(exp.Select):
+17        references = {}
+18        cross_joins = []
+19
+20        for join in select.args.get("joins", []):
+21            name = join.this.alias_or_name
+22            tables = other_table_names(join, name)
+23
+24            if tables:
+25                for table in tables:
+26                    references[table] = references.get(table, []) + [join]
+27            else:
+28                cross_joins.append((name, join))
+29
+30        for name, join in cross_joins:
+31            for dep in references.get(name, []):
+32                on = dep.args["on"]
+33                on = on.replace(simplify(on))
+34
+35                if isinstance(on, exp.Connector):
+36                    for predicate in on.flatten():
+37                        if name in exp.column_table_names(predicate):
+38                            predicate.replace(exp.true())
+39                            join.on(predicate, copy=False)
+40
+41    expression = reorder_joins(expression)
+42    expression = normalize(expression)
+43    return expression
+
+ + +

Removes cross joins if possible and reorder joins based on predicate dependencies.

+ +
Example:
+ +
+
+
>>> from sqlglot import parse_one
+>>> optimize_joins(parse_one("SELECT * FROM x CROSS JOIN y JOIN z ON x.a = z.a AND y.a = z.a")).sql()
+'SELECT * FROM x JOIN z ON x.a = z.a AND TRUE JOIN y ON y.a = z.a'
+
+
+
+
+ + +
+
+ +
+ + def + reorder_joins(expression): + + + +
+ +
46def reorder_joins(expression):
+47    """
+48    Reorder joins by topological sort order based on predicate references.
+49    """
+50    for from_ in expression.find_all(exp.From):
+51        head = from_.expressions[0]
+52        parent = from_.parent
+53        joins = {join.this.alias_or_name: join for join in parent.args.get("joins", [])}
+54        dag = {head.alias_or_name: []}
+55
+56        for name, join in joins.items():
+57            dag[name] = other_table_names(join, name)
+58
+59        parent.set(
+60            "joins",
+61            [joins[name] for name in tsort(dag) if name != head.alias_or_name],
+62        )
+63    return expression
+
+ + +

Reorder joins by topological sort order based on predicate references.

+
+ + +
+
+ +
+ + def + normalize(expression): + + + +
+ +
66def normalize(expression):
+67    """
+68    Remove INNER and OUTER from joins as they are optional.
+69    """
+70    for join in expression.find_all(exp.Join):
+71        if join.kind != "CROSS":
+72            join.set("kind", None)
+73    return expression
+
+ + +

Remove INNER and OUTER from joins as they are optional.

+
+ + +
+
+ +
+ + def + other_table_names(join, exclude): + + + +
+ +
76def other_table_names(join, exclude):
+77    return [
+78        name
+79        for name in (exp.column_table_names(join.args.get("on") or exp.true()))
+80        if name != exclude
+81    ]
+
+ + + + +
+
+ + \ No newline at end of file diff --git a/docs/sqlglot/optimizer/optimizer.html b/docs/sqlglot/optimizer/optimizer.html new file mode 100644 index 0000000..ca82c2f --- /dev/null +++ b/docs/sqlglot/optimizer/optimizer.html @@ -0,0 +1,401 @@ + + + + + + + sqlglot.optimizer.optimizer API documentation + + + + + + + + + +
+
+ Edit on GitHub +

+sqlglot.optimizer.optimizer

+ + + + + + +
 1import sqlglot
+ 2from sqlglot.optimizer.annotate_types import annotate_types
+ 3from sqlglot.optimizer.canonicalize import canonicalize
+ 4from sqlglot.optimizer.eliminate_ctes import eliminate_ctes
+ 5from sqlglot.optimizer.eliminate_joins import eliminate_joins
+ 6from sqlglot.optimizer.eliminate_subqueries import eliminate_subqueries
+ 7from sqlglot.optimizer.expand_laterals import expand_laterals
+ 8from sqlglot.optimizer.expand_multi_table_selects import expand_multi_table_selects
+ 9from sqlglot.optimizer.isolate_table_selects import isolate_table_selects
+10from sqlglot.optimizer.lower_identities import lower_identities
+11from sqlglot.optimizer.merge_subqueries import merge_subqueries
+12from sqlglot.optimizer.normalize import normalize
+13from sqlglot.optimizer.optimize_joins import optimize_joins
+14from sqlglot.optimizer.pushdown_predicates import pushdown_predicates
+15from sqlglot.optimizer.pushdown_projections import pushdown_projections
+16from sqlglot.optimizer.qualify_columns import qualify_columns, validate_qualify_columns
+17from sqlglot.optimizer.qualify_tables import qualify_tables
+18from sqlglot.optimizer.unnest_subqueries import unnest_subqueries
+19from sqlglot.schema import ensure_schema
+20
+21RULES = (
+22    lower_identities,
+23    qualify_tables,
+24    isolate_table_selects,
+25    qualify_columns,
+26    expand_laterals,
+27    validate_qualify_columns,
+28    pushdown_projections,
+29    normalize,
+30    unnest_subqueries,
+31    expand_multi_table_selects,
+32    pushdown_predicates,
+33    optimize_joins,
+34    eliminate_subqueries,
+35    merge_subqueries,
+36    eliminate_joins,
+37    eliminate_ctes,
+38    annotate_types,
+39    canonicalize,
+40)
+41
+42
+43def optimize(expression, schema=None, db=None, catalog=None, rules=RULES, **kwargs):
+44    """
+45    Rewrite a sqlglot AST into an optimized form.
+46
+47    Args:
+48        expression (sqlglot.Expression): expression to optimize
+49        schema (dict|sqlglot.optimizer.Schema): database schema.
+50            This can either be an instance of `sqlglot.optimizer.Schema` or a mapping in one of
+51            the following forms:
+52                1. {table: {col: type}}
+53                2. {db: {table: {col: type}}}
+54                3. {catalog: {db: {table: {col: type}}}}
+55            If no schema is provided then the default schema defined at `sqlgot.schema` will be used
+56        db (str): specify the default database, as might be set by a `USE DATABASE db` statement
+57        catalog (str): specify the default catalog, as might be set by a `USE CATALOG c` statement
+58        rules (sequence): sequence of optimizer rules to use.
+59            Many of the rules require tables and columns to be qualified.
+60            Do not remove qualify_tables or qualify_columns from the sequence of rules unless you know
+61            what you're doing!
+62        **kwargs: If a rule has a keyword argument with a same name in **kwargs, it will be passed in.
+63    Returns:
+64        sqlglot.Expression: optimized expression
+65    """
+66    schema = ensure_schema(schema or sqlglot.schema)
+67    possible_kwargs = {"db": db, "catalog": catalog, "schema": schema, **kwargs}
+68    expression = expression.copy()
+69    for rule in rules:
+70        # Find any additional rule parameters, beyond `expression`
+71        rule_params = rule.__code__.co_varnames
+72        rule_kwargs = {
+73            param: possible_kwargs[param] for param in rule_params if param in possible_kwargs
+74        }
+75        expression = rule(expression, **rule_kwargs)
+76    return expression
+
+ + +
+
+ +
+ + def + optimize( expression, schema=None, db=None, catalog=None, rules=(<function lower_identities at 0x7ff75a9b2f80>, <function qualify_tables at 0x7ff75a9d9240>, <function isolate_table_selects at 0x7ff75a9b2e60>, <function qualify_columns at 0x7ff75a9d8820>, <function expand_laterals at 0x7ff75a9b2b90>, <function validate_qualify_columns at 0x7ff75a9d8c10>, <function pushdown_projections at 0x7ff75a9d85e0>, <function normalize at 0x7ff75a9b0820>, <function unnest_subqueries at 0x7ff75a9d9900>, <function expand_multi_table_selects at 0x7ff75a9b2dd0>, <function pushdown_predicates at 0x7ff75a9d81f0>, <function optimize_joins at 0x7ff75a9b3d00>, <function eliminate_subqueries at 0x7ff75a9b2830>, <function merge_subqueries at 0x7ff75a9b35b0>, <function eliminate_joins at 0x7ff75a9b0700>, <function eliminate_ctes at 0x7ff75a9b05e0>, <function annotate_types at 0x7ff75a989480>, <function canonicalize at 0x7ff75a9b0160>), **kwargs): + + + +
+ +
44def optimize(expression, schema=None, db=None, catalog=None, rules=RULES, **kwargs):
+45    """
+46    Rewrite a sqlglot AST into an optimized form.
+47
+48    Args:
+49        expression (sqlglot.Expression): expression to optimize
+50        schema (dict|sqlglot.optimizer.Schema): database schema.
+51            This can either be an instance of `sqlglot.optimizer.Schema` or a mapping in one of
+52            the following forms:
+53                1. {table: {col: type}}
+54                2. {db: {table: {col: type}}}
+55                3. {catalog: {db: {table: {col: type}}}}
+56            If no schema is provided then the default schema defined at `sqlgot.schema` will be used
+57        db (str): specify the default database, as might be set by a `USE DATABASE db` statement
+58        catalog (str): specify the default catalog, as might be set by a `USE CATALOG c` statement
+59        rules (sequence): sequence of optimizer rules to use.
+60            Many of the rules require tables and columns to be qualified.
+61            Do not remove qualify_tables or qualify_columns from the sequence of rules unless you know
+62            what you're doing!
+63        **kwargs: If a rule has a keyword argument with a same name in **kwargs, it will be passed in.
+64    Returns:
+65        sqlglot.Expression: optimized expression
+66    """
+67    schema = ensure_schema(schema or sqlglot.schema)
+68    possible_kwargs = {"db": db, "catalog": catalog, "schema": schema, **kwargs}
+69    expression = expression.copy()
+70    for rule in rules:
+71        # Find any additional rule parameters, beyond `expression`
+72        rule_params = rule.__code__.co_varnames
+73        rule_kwargs = {
+74            param: possible_kwargs[param] for param in rule_params if param in possible_kwargs
+75        }
+76        expression = rule(expression, **rule_kwargs)
+77    return expression
+
+ + +

Rewrite a sqlglot AST into an optimized form.

+ +
Arguments:
+ +
    +
  • expression (sqlglot.Expression): expression to optimize
  • +
  • schema (dict|sqlglot.optimizer.Schema): database schema. +This can either be an instance of sqlglot.optimizer.Schema or a mapping in one of +the following forms: + 1. {table: {col: type}} + 2. {db: {table: {col: type}}} + 3. {catalog: {db: {table: {col: type}}}} +If no schema is provided then the default schema defined at sqlgot.schema will be used
  • +
  • db (str): specify the default database, as might be set by a USE DATABASE db statement
  • +
  • catalog (str): specify the default catalog, as might be set by a USE CATALOG c statement
  • +
  • rules (sequence): sequence of optimizer rules to use. +Many of the rules require tables and columns to be qualified. +Do not remove qualify_tables or qualify_columns from the sequence of rules unless you know +what you're doing!
  • +
  • *kwargs: If a rule has a keyword argument with a same name in *kwargs, it will be passed in.
  • +
+ +
Returns:
+ +
+

sqlglot.Expression: optimized expression

+
+
+ + +
+
+ + \ No newline at end of file diff --git a/docs/sqlglot/optimizer/pushdown_predicates.html b/docs/sqlglot/optimizer/pushdown_predicates.html new file mode 100644 index 0000000..e4e1add --- /dev/null +++ b/docs/sqlglot/optimizer/pushdown_predicates.html @@ -0,0 +1,773 @@ + + + + + + + sqlglot.optimizer.pushdown_predicates API documentation + + + + + + + + + +
+
+ Edit on GitHub +

+sqlglot.optimizer.pushdown_predicates

+ + + + + + +
  1from sqlglot import exp
+  2from sqlglot.optimizer.normalize import normalized
+  3from sqlglot.optimizer.scope import build_scope
+  4from sqlglot.optimizer.simplify import simplify
+  5
+  6
+  7def pushdown_predicates(expression):
+  8    """
+  9    Rewrite sqlglot AST to pushdown predicates in FROMS and JOINS
+ 10
+ 11    Example:
+ 12        >>> import sqlglot
+ 13        >>> sql = "SELECT * FROM (SELECT * FROM x AS x) AS y WHERE y.a = 1"
+ 14        >>> expression = sqlglot.parse_one(sql)
+ 15        >>> pushdown_predicates(expression).sql()
+ 16        'SELECT * FROM (SELECT * FROM x AS x WHERE y.a = 1) AS y WHERE TRUE'
+ 17
+ 18    Args:
+ 19        expression (sqlglot.Expression): expression to optimize
+ 20    Returns:
+ 21        sqlglot.Expression: optimized expression
+ 22    """
+ 23    root = build_scope(expression)
+ 24    scope_ref_count = root.ref_count()
+ 25
+ 26    for scope in reversed(list(root.traverse())):
+ 27        select = scope.expression
+ 28        where = select.args.get("where")
+ 29        if where:
+ 30            selected_sources = scope.selected_sources
+ 31            # a right join can only push down to itself and not the source FROM table
+ 32            for k, (node, source) in selected_sources.items():
+ 33                parent = node.find_ancestor(exp.Join, exp.From)
+ 34                if isinstance(parent, exp.Join) and parent.side == "RIGHT":
+ 35                    selected_sources = {k: (node, source)}
+ 36                    break
+ 37            pushdown(where.this, selected_sources, scope_ref_count)
+ 38
+ 39        # joins should only pushdown into itself, not to other joins
+ 40        # so we limit the selected sources to only itself
+ 41        for join in select.args.get("joins") or []:
+ 42            name = join.this.alias_or_name
+ 43            pushdown(join.args.get("on"), {name: scope.selected_sources[name]}, scope_ref_count)
+ 44
+ 45    return expression
+ 46
+ 47
+ 48def pushdown(condition, sources, scope_ref_count):
+ 49    if not condition:
+ 50        return
+ 51
+ 52    condition = condition.replace(simplify(condition))
+ 53    cnf_like = normalized(condition) or not normalized(condition, dnf=True)
+ 54
+ 55    predicates = list(
+ 56        condition.flatten()
+ 57        if isinstance(condition, exp.And if cnf_like else exp.Or)
+ 58        else [condition]
+ 59    )
+ 60
+ 61    if cnf_like:
+ 62        pushdown_cnf(predicates, sources, scope_ref_count)
+ 63    else:
+ 64        pushdown_dnf(predicates, sources, scope_ref_count)
+ 65
+ 66
+ 67def pushdown_cnf(predicates, scope, scope_ref_count):
+ 68    """
+ 69    If the predicates are in CNF like form, we can simply replace each block in the parent.
+ 70    """
+ 71    for predicate in predicates:
+ 72        for node in nodes_for_predicate(predicate, scope, scope_ref_count).values():
+ 73            if isinstance(node, exp.Join):
+ 74                predicate.replace(exp.true())
+ 75                node.on(predicate, copy=False)
+ 76                break
+ 77            if isinstance(node, exp.Select):
+ 78                predicate.replace(exp.true())
+ 79                node.where(replace_aliases(node, predicate), copy=False)
+ 80
+ 81
+ 82def pushdown_dnf(predicates, scope, scope_ref_count):
+ 83    """
+ 84    If the predicates are in DNF form, we can only push down conditions that are in all blocks.
+ 85    Additionally, we can't remove predicates from their original form.
+ 86    """
+ 87    # find all the tables that can be pushdown too
+ 88    # these are tables that are referenced in all blocks of a DNF
+ 89    # (a.x AND b.x) OR (a.y AND c.y)
+ 90    # only table a can be push down
+ 91    pushdown_tables = set()
+ 92
+ 93    for a in predicates:
+ 94        a_tables = set(exp.column_table_names(a))
+ 95
+ 96        for b in predicates:
+ 97            a_tables &= set(exp.column_table_names(b))
+ 98
+ 99        pushdown_tables.update(a_tables)
+100
+101    conditions = {}
+102
+103    # for every pushdown table, find all related conditions in all predicates
+104    # combine them with ORS
+105    # (a.x AND and a.y AND b.x) OR (a.z AND c.y) -> (a.x AND a.y) OR (a.z)
+106    for table in sorted(pushdown_tables):
+107        for predicate in predicates:
+108            nodes = nodes_for_predicate(predicate, scope, scope_ref_count)
+109
+110            if table not in nodes:
+111                continue
+112
+113            predicate_condition = None
+114
+115            for column in predicate.find_all(exp.Column):
+116                if column.table == table:
+117                    condition = column.find_ancestor(exp.Condition)
+118                    predicate_condition = (
+119                        exp.and_(predicate_condition, condition)
+120                        if predicate_condition
+121                        else condition
+122                    )
+123
+124            if predicate_condition:
+125                conditions[table] = (
+126                    exp.or_(conditions[table], predicate_condition)
+127                    if table in conditions
+128                    else predicate_condition
+129                )
+130
+131        for name, node in nodes.items():
+132            if name not in conditions:
+133                continue
+134
+135            predicate = conditions[name]
+136
+137            if isinstance(node, exp.Join):
+138                node.on(predicate, copy=False)
+139            elif isinstance(node, exp.Select):
+140                node.where(replace_aliases(node, predicate), copy=False)
+141
+142
+143def nodes_for_predicate(predicate, sources, scope_ref_count):
+144    nodes = {}
+145    tables = exp.column_table_names(predicate)
+146    where_condition = isinstance(predicate.find_ancestor(exp.Join, exp.Where), exp.Where)
+147
+148    for table in tables:
+149        node, source = sources.get(table) or (None, None)
+150
+151        # if the predicate is in a where statement we can try to push it down
+152        # we want to find the root join or from statement
+153        if node and where_condition:
+154            node = node.find_ancestor(exp.Join, exp.From)
+155
+156        # a node can reference a CTE which should be pushed down
+157        if isinstance(node, exp.From) and not isinstance(source, exp.Table):
+158            with_ = source.parent.expression.args.get("with")
+159            if with_ and with_.recursive:
+160                return {}
+161            node = source.expression
+162
+163        if isinstance(node, exp.Join):
+164            if node.side and node.side != "RIGHT":
+165                return {}
+166            nodes[table] = node
+167        elif isinstance(node, exp.Select) and len(tables) == 1:
+168            # We can't push down window expressions
+169            has_window_expression = any(
+170                select for select in node.selects if select.find(exp.Window)
+171            )
+172            # we can't push down predicates to select statements if they are referenced in
+173            # multiple places.
+174            if (
+175                not node.args.get("group")
+176                and scope_ref_count[id(source)] < 2
+177                and not has_window_expression
+178            ):
+179                nodes[table] = node
+180    return nodes
+181
+182
+183def replace_aliases(source, predicate):
+184    aliases = {}
+185
+186    for select in source.selects:
+187        if isinstance(select, exp.Alias):
+188            aliases[select.alias] = select.this
+189        else:
+190            aliases[select.name] = select
+191
+192    def _replace_alias(column):
+193        if isinstance(column, exp.Column) and column.name in aliases:
+194            return aliases[column.name].copy()
+195        return column
+196
+197    return predicate.transform(_replace_alias)
+
+ + +
+
+ +
+ + def + pushdown_predicates(expression): + + + +
+ +
 8def pushdown_predicates(expression):
+ 9    """
+10    Rewrite sqlglot AST to pushdown predicates in FROMS and JOINS
+11
+12    Example:
+13        >>> import sqlglot
+14        >>> sql = "SELECT * FROM (SELECT * FROM x AS x) AS y WHERE y.a = 1"
+15        >>> expression = sqlglot.parse_one(sql)
+16        >>> pushdown_predicates(expression).sql()
+17        'SELECT * FROM (SELECT * FROM x AS x WHERE y.a = 1) AS y WHERE TRUE'
+18
+19    Args:
+20        expression (sqlglot.Expression): expression to optimize
+21    Returns:
+22        sqlglot.Expression: optimized expression
+23    """
+24    root = build_scope(expression)
+25    scope_ref_count = root.ref_count()
+26
+27    for scope in reversed(list(root.traverse())):
+28        select = scope.expression
+29        where = select.args.get("where")
+30        if where:
+31            selected_sources = scope.selected_sources
+32            # a right join can only push down to itself and not the source FROM table
+33            for k, (node, source) in selected_sources.items():
+34                parent = node.find_ancestor(exp.Join, exp.From)
+35                if isinstance(parent, exp.Join) and parent.side == "RIGHT":
+36                    selected_sources = {k: (node, source)}
+37                    break
+38            pushdown(where.this, selected_sources, scope_ref_count)
+39
+40        # joins should only pushdown into itself, not to other joins
+41        # so we limit the selected sources to only itself
+42        for join in select.args.get("joins") or []:
+43            name = join.this.alias_or_name
+44            pushdown(join.args.get("on"), {name: scope.selected_sources[name]}, scope_ref_count)
+45
+46    return expression
+
+ + +

Rewrite sqlglot AST to pushdown predicates in FROMS and JOINS

+ +
Example:
+ +
+
+
>>> import sqlglot
+>>> sql = "SELECT * FROM (SELECT * FROM x AS x) AS y WHERE y.a = 1"
+>>> expression = sqlglot.parse_one(sql)
+>>> pushdown_predicates(expression).sql()
+'SELECT * FROM (SELECT * FROM x AS x WHERE y.a = 1) AS y WHERE TRUE'
+
+
+
+ +
Arguments:
+ +
    +
  • expression (sqlglot.Expression): expression to optimize
  • +
+ +
Returns:
+ +
+

sqlglot.Expression: optimized expression

+
+
+ + +
+
+ +
+ + def + pushdown(condition, sources, scope_ref_count): + + + +
+ +
49def pushdown(condition, sources, scope_ref_count):
+50    if not condition:
+51        return
+52
+53    condition = condition.replace(simplify(condition))
+54    cnf_like = normalized(condition) or not normalized(condition, dnf=True)
+55
+56    predicates = list(
+57        condition.flatten()
+58        if isinstance(condition, exp.And if cnf_like else exp.Or)
+59        else [condition]
+60    )
+61
+62    if cnf_like:
+63        pushdown_cnf(predicates, sources, scope_ref_count)
+64    else:
+65        pushdown_dnf(predicates, sources, scope_ref_count)
+
+ + + + +
+
+ +
+ + def + pushdown_cnf(predicates, scope, scope_ref_count): + + + +
+ +
68def pushdown_cnf(predicates, scope, scope_ref_count):
+69    """
+70    If the predicates are in CNF like form, we can simply replace each block in the parent.
+71    """
+72    for predicate in predicates:
+73        for node in nodes_for_predicate(predicate, scope, scope_ref_count).values():
+74            if isinstance(node, exp.Join):
+75                predicate.replace(exp.true())
+76                node.on(predicate, copy=False)
+77                break
+78            if isinstance(node, exp.Select):
+79                predicate.replace(exp.true())
+80                node.where(replace_aliases(node, predicate), copy=False)
+
+ + +

If the predicates are in CNF like form, we can simply replace each block in the parent.

+
+ + +
+
+ +
+ + def + pushdown_dnf(predicates, scope, scope_ref_count): + + + +
+ +
 83def pushdown_dnf(predicates, scope, scope_ref_count):
+ 84    """
+ 85    If the predicates are in DNF form, we can only push down conditions that are in all blocks.
+ 86    Additionally, we can't remove predicates from their original form.
+ 87    """
+ 88    # find all the tables that can be pushdown too
+ 89    # these are tables that are referenced in all blocks of a DNF
+ 90    # (a.x AND b.x) OR (a.y AND c.y)
+ 91    # only table a can be push down
+ 92    pushdown_tables = set()
+ 93
+ 94    for a in predicates:
+ 95        a_tables = set(exp.column_table_names(a))
+ 96
+ 97        for b in predicates:
+ 98            a_tables &= set(exp.column_table_names(b))
+ 99
+100        pushdown_tables.update(a_tables)
+101
+102    conditions = {}
+103
+104    # for every pushdown table, find all related conditions in all predicates
+105    # combine them with ORS
+106    # (a.x AND and a.y AND b.x) OR (a.z AND c.y) -> (a.x AND a.y) OR (a.z)
+107    for table in sorted(pushdown_tables):
+108        for predicate in predicates:
+109            nodes = nodes_for_predicate(predicate, scope, scope_ref_count)
+110
+111            if table not in nodes:
+112                continue
+113
+114            predicate_condition = None
+115
+116            for column in predicate.find_all(exp.Column):
+117                if column.table == table:
+118                    condition = column.find_ancestor(exp.Condition)
+119                    predicate_condition = (
+120                        exp.and_(predicate_condition, condition)
+121                        if predicate_condition
+122                        else condition
+123                    )
+124
+125            if predicate_condition:
+126                conditions[table] = (
+127                    exp.or_(conditions[table], predicate_condition)
+128                    if table in conditions
+129                    else predicate_condition
+130                )
+131
+132        for name, node in nodes.items():
+133            if name not in conditions:
+134                continue
+135
+136            predicate = conditions[name]
+137
+138            if isinstance(node, exp.Join):
+139                node.on(predicate, copy=False)
+140            elif isinstance(node, exp.Select):
+141                node.where(replace_aliases(node, predicate), copy=False)
+
+ + +

If the predicates are in DNF form, we can only push down conditions that are in all blocks. +Additionally, we can't remove predicates from their original form.

+
+ + +
+
+ +
+ + def + nodes_for_predicate(predicate, sources, scope_ref_count): + + + +
+ +
144def nodes_for_predicate(predicate, sources, scope_ref_count):
+145    nodes = {}
+146    tables = exp.column_table_names(predicate)
+147    where_condition = isinstance(predicate.find_ancestor(exp.Join, exp.Where), exp.Where)
+148
+149    for table in tables:
+150        node, source = sources.get(table) or (None, None)
+151
+152        # if the predicate is in a where statement we can try to push it down
+153        # we want to find the root join or from statement
+154        if node and where_condition:
+155            node = node.find_ancestor(exp.Join, exp.From)
+156
+157        # a node can reference a CTE which should be pushed down
+158        if isinstance(node, exp.From) and not isinstance(source, exp.Table):
+159            with_ = source.parent.expression.args.get("with")
+160            if with_ and with_.recursive:
+161                return {}
+162            node = source.expression
+163
+164        if isinstance(node, exp.Join):
+165            if node.side and node.side != "RIGHT":
+166                return {}
+167            nodes[table] = node
+168        elif isinstance(node, exp.Select) and len(tables) == 1:
+169            # We can't push down window expressions
+170            has_window_expression = any(
+171                select for select in node.selects if select.find(exp.Window)
+172            )
+173            # we can't push down predicates to select statements if they are referenced in
+174            # multiple places.
+175            if (
+176                not node.args.get("group")
+177                and scope_ref_count[id(source)] < 2
+178                and not has_window_expression
+179            ):
+180                nodes[table] = node
+181    return nodes
+
+ + + + +
+
+ +
+ + def + replace_aliases(source, predicate): + + + +
+ +
184def replace_aliases(source, predicate):
+185    aliases = {}
+186
+187    for select in source.selects:
+188        if isinstance(select, exp.Alias):
+189            aliases[select.alias] = select.this
+190        else:
+191            aliases[select.name] = select
+192
+193    def _replace_alias(column):
+194        if isinstance(column, exp.Column) and column.name in aliases:
+195            return aliases[column.name].copy()
+196        return column
+197
+198    return predicate.transform(_replace_alias)
+
+ + + + +
+
+ + \ No newline at end of file diff --git a/docs/sqlglot/optimizer/pushdown_projections.html b/docs/sqlglot/optimizer/pushdown_projections.html new file mode 100644 index 0000000..385a18a --- /dev/null +++ b/docs/sqlglot/optimizer/pushdown_projections.html @@ -0,0 +1,477 @@ + + + + + + + sqlglot.optimizer.pushdown_projections API documentation + + + + + + + + + +
+
+ Edit on GitHub +

+sqlglot.optimizer.pushdown_projections

+ + + + + + +
  1from collections import defaultdict
+  2
+  3from sqlglot import alias, exp
+  4from sqlglot.optimizer.scope import Scope, traverse_scope
+  5
+  6# Sentinel value that means an outer query selecting ALL columns
+  7SELECT_ALL = object()
+  8
+  9# Selection to use if selection list is empty
+ 10DEFAULT_SELECTION = lambda: alias("1", "_")
+ 11
+ 12
+ 13def pushdown_projections(expression):
+ 14    """
+ 15    Rewrite sqlglot AST to remove unused columns projections.
+ 16
+ 17    Example:
+ 18        >>> import sqlglot
+ 19        >>> sql = "SELECT y.a AS a FROM (SELECT x.a AS a, x.b AS b FROM x) AS y"
+ 20        >>> expression = sqlglot.parse_one(sql)
+ 21        >>> pushdown_projections(expression).sql()
+ 22        'SELECT y.a AS a FROM (SELECT x.a AS a FROM x) AS y'
+ 23
+ 24    Args:
+ 25        expression (sqlglot.Expression): expression to optimize
+ 26    Returns:
+ 27        sqlglot.Expression: optimized expression
+ 28    """
+ 29    # Map of Scope to all columns being selected by outer queries.
+ 30    referenced_columns = defaultdict(set)
+ 31    left_union = None
+ 32    right_union = None
+ 33    # We build the scope tree (which is traversed in DFS postorder), then iterate
+ 34    # over the result in reverse order. This should ensure that the set of selected
+ 35    # columns for a particular scope are completely build by the time we get to it.
+ 36    for scope in reversed(traverse_scope(expression)):
+ 37        parent_selections = referenced_columns.get(scope, {SELECT_ALL})
+ 38
+ 39        if scope.expression.args.get("distinct"):
+ 40            # We can't remove columns SELECT DISTINCT nor UNION DISTINCT
+ 41            parent_selections = {SELECT_ALL}
+ 42
+ 43        if isinstance(scope.expression, exp.Union):
+ 44            left_union, right_union = scope.union_scopes
+ 45            referenced_columns[left_union] = parent_selections
+ 46            referenced_columns[right_union] = parent_selections
+ 47
+ 48        if isinstance(scope.expression, exp.Select) and scope != right_union:
+ 49            removed_indexes = _remove_unused_selections(scope, parent_selections)
+ 50            # The left union is used for column names to select and if we remove columns from the left
+ 51            # we need to also remove those same columns in the right that were at the same position
+ 52            if scope is left_union:
+ 53                _remove_indexed_selections(right_union, removed_indexes)
+ 54
+ 55            # Group columns by source name
+ 56            selects = defaultdict(set)
+ 57            for col in scope.columns:
+ 58                table_name = col.table
+ 59                col_name = col.name
+ 60                selects[table_name].add(col_name)
+ 61
+ 62            # Push the selected columns down to the next scope
+ 63            for name, (_, source) in scope.selected_sources.items():
+ 64                if isinstance(source, Scope):
+ 65                    columns = selects.get(name) or set()
+ 66                    referenced_columns[source].update(columns)
+ 67
+ 68    return expression
+ 69
+ 70
+ 71def _remove_unused_selections(scope, parent_selections):
+ 72    removed_indexes = []
+ 73    order = scope.expression.args.get("order")
+ 74
+ 75    if order:
+ 76        # Assume columns without a qualified table are references to output columns
+ 77        order_refs = {c.name for c in order.find_all(exp.Column) if not c.table}
+ 78    else:
+ 79        order_refs = set()
+ 80
+ 81    new_selections = []
+ 82    removed = False
+ 83    for i, selection in enumerate(scope.selects):
+ 84        if (
+ 85            SELECT_ALL in parent_selections
+ 86            or selection.alias_or_name in parent_selections
+ 87            or selection.alias_or_name in order_refs
+ 88        ):
+ 89            new_selections.append(selection)
+ 90        else:
+ 91            removed_indexes.append(i)
+ 92            removed = True
+ 93
+ 94    # If there are no remaining selections, just select a single constant
+ 95    if not new_selections:
+ 96        new_selections.append(DEFAULT_SELECTION())
+ 97
+ 98    scope.expression.set("expressions", new_selections)
+ 99    if removed:
+100        scope.clear_cache()
+101    return removed_indexes
+102
+103
+104def _remove_indexed_selections(scope, indexes_to_remove):
+105    new_selections = [
+106        selection for i, selection in enumerate(scope.selects) if i not in indexes_to_remove
+107    ]
+108    if not new_selections:
+109        new_selections.append(DEFAULT_SELECTION())
+110    scope.expression.set("expressions", new_selections)
+
+ + +
+
+ +
+ + def + DEFAULT_SELECTION(): + + + +
+ +
11DEFAULT_SELECTION = lambda: alias("1", "_")
+
+ + + + +
+
+ +
+ + def + pushdown_projections(expression): + + + +
+ +
14def pushdown_projections(expression):
+15    """
+16    Rewrite sqlglot AST to remove unused columns projections.
+17
+18    Example:
+19        >>> import sqlglot
+20        >>> sql = "SELECT y.a AS a FROM (SELECT x.a AS a, x.b AS b FROM x) AS y"
+21        >>> expression = sqlglot.parse_one(sql)
+22        >>> pushdown_projections(expression).sql()
+23        'SELECT y.a AS a FROM (SELECT x.a AS a FROM x) AS y'
+24
+25    Args:
+26        expression (sqlglot.Expression): expression to optimize
+27    Returns:
+28        sqlglot.Expression: optimized expression
+29    """
+30    # Map of Scope to all columns being selected by outer queries.
+31    referenced_columns = defaultdict(set)
+32    left_union = None
+33    right_union = None
+34    # We build the scope tree (which is traversed in DFS postorder), then iterate
+35    # over the result in reverse order. This should ensure that the set of selected
+36    # columns for a particular scope are completely build by the time we get to it.
+37    for scope in reversed(traverse_scope(expression)):
+38        parent_selections = referenced_columns.get(scope, {SELECT_ALL})
+39
+40        if scope.expression.args.get("distinct"):
+41            # We can't remove columns SELECT DISTINCT nor UNION DISTINCT
+42            parent_selections = {SELECT_ALL}
+43
+44        if isinstance(scope.expression, exp.Union):
+45            left_union, right_union = scope.union_scopes
+46            referenced_columns[left_union] = parent_selections
+47            referenced_columns[right_union] = parent_selections
+48
+49        if isinstance(scope.expression, exp.Select) and scope != right_union:
+50            removed_indexes = _remove_unused_selections(scope, parent_selections)
+51            # The left union is used for column names to select and if we remove columns from the left
+52            # we need to also remove those same columns in the right that were at the same position
+53            if scope is left_union:
+54                _remove_indexed_selections(right_union, removed_indexes)
+55
+56            # Group columns by source name
+57            selects = defaultdict(set)
+58            for col in scope.columns:
+59                table_name = col.table
+60                col_name = col.name
+61                selects[table_name].add(col_name)
+62
+63            # Push the selected columns down to the next scope
+64            for name, (_, source) in scope.selected_sources.items():
+65                if isinstance(source, Scope):
+66                    columns = selects.get(name) or set()
+67                    referenced_columns[source].update(columns)
+68
+69    return expression
+
+ + +

Rewrite sqlglot AST to remove unused columns projections.

+ +
Example:
+ +
+
+
>>> import sqlglot
+>>> sql = "SELECT y.a AS a FROM (SELECT x.a AS a, x.b AS b FROM x) AS y"
+>>> expression = sqlglot.parse_one(sql)
+>>> pushdown_projections(expression).sql()
+'SELECT y.a AS a FROM (SELECT x.a AS a FROM x) AS y'
+
+
+
+ +
Arguments:
+ +
    +
  • expression (sqlglot.Expression): expression to optimize
  • +
+ +
Returns:
+ +
+

sqlglot.Expression: optimized expression

+
+
+ + +
+
+ + \ No newline at end of file diff --git a/docs/sqlglot/optimizer/qualify_columns.html b/docs/sqlglot/optimizer/qualify_columns.html new file mode 100644 index 0000000..9722c27 --- /dev/null +++ b/docs/sqlglot/optimizer/qualify_columns.html @@ -0,0 +1,804 @@ + + + + + + + sqlglot.optimizer.qualify_columns API documentation + + + + + + + + + +
+
+ Edit on GitHub +

+sqlglot.optimizer.qualify_columns

+ + + + + + +
  1import itertools
+  2import typing as t
+  3
+  4from sqlglot import alias, exp
+  5from sqlglot.errors import OptimizeError
+  6from sqlglot.optimizer.scope import Scope, traverse_scope
+  7from sqlglot.schema import ensure_schema
+  8
+  9
+ 10def qualify_columns(expression, schema):
+ 11    """
+ 12    Rewrite sqlglot AST to have fully qualified columns.
+ 13
+ 14    Example:
+ 15        >>> import sqlglot
+ 16        >>> schema = {"tbl": {"col": "INT"}}
+ 17        >>> expression = sqlglot.parse_one("SELECT col FROM tbl")
+ 18        >>> qualify_columns(expression, schema).sql()
+ 19        'SELECT tbl.col AS col FROM tbl'
+ 20
+ 21    Args:
+ 22        expression (sqlglot.Expression): expression to qualify
+ 23        schema (dict|sqlglot.optimizer.Schema): Database schema
+ 24    Returns:
+ 25        sqlglot.Expression: qualified expression
+ 26    """
+ 27    schema = ensure_schema(schema)
+ 28
+ 29    for scope in traverse_scope(expression):
+ 30        resolver = _Resolver(scope, schema)
+ 31        _pop_table_column_aliases(scope.ctes)
+ 32        _pop_table_column_aliases(scope.derived_tables)
+ 33        _expand_using(scope, resolver)
+ 34        _expand_group_by(scope, resolver)
+ 35        _qualify_columns(scope, resolver)
+ 36        _expand_order_by(scope)
+ 37        if not isinstance(scope.expression, exp.UDTF):
+ 38            _expand_stars(scope, resolver)
+ 39            _qualify_outputs(scope)
+ 40
+ 41    return expression
+ 42
+ 43
+ 44def validate_qualify_columns(expression):
+ 45    """Raise an `OptimizeError` if any columns aren't qualified"""
+ 46    unqualified_columns = []
+ 47    for scope in traverse_scope(expression):
+ 48        if isinstance(scope.expression, exp.Select):
+ 49            unqualified_columns.extend(scope.unqualified_columns)
+ 50            if scope.external_columns and not scope.is_correlated_subquery:
+ 51                raise OptimizeError(f"Unknown table: {scope.external_columns[0].table}")
+ 52
+ 53    if unqualified_columns:
+ 54        raise OptimizeError(f"Ambiguous columns: {unqualified_columns}")
+ 55    return expression
+ 56
+ 57
+ 58def _pop_table_column_aliases(derived_tables):
+ 59    """
+ 60    Remove table column aliases.
+ 61
+ 62    (e.g. SELECT ... FROM (SELECT ...) AS foo(col1, col2)
+ 63    """
+ 64    for derived_table in derived_tables:
+ 65        if isinstance(derived_table.unnest(), exp.UDTF):
+ 66            continue
+ 67        table_alias = derived_table.args.get("alias")
+ 68        if table_alias:
+ 69            table_alias.args.pop("columns", None)
+ 70
+ 71
+ 72def _expand_using(scope, resolver):
+ 73    joins = list(scope.expression.find_all(exp.Join))
+ 74    names = {join.this.alias for join in joins}
+ 75    ordered = [key for key in scope.selected_sources if key not in names]
+ 76
+ 77    # Mapping of automatically joined column names to source names
+ 78    column_tables = {}
+ 79
+ 80    for join in joins:
+ 81        using = join.args.get("using")
+ 82
+ 83        if not using:
+ 84            continue
+ 85
+ 86        join_table = join.this.alias_or_name
+ 87
+ 88        columns = {}
+ 89
+ 90        for k in scope.selected_sources:
+ 91            if k in ordered:
+ 92                for column in resolver.get_source_columns(k):
+ 93                    if column not in columns:
+ 94                        columns[column] = k
+ 95
+ 96        ordered.append(join_table)
+ 97        join_columns = resolver.get_source_columns(join_table)
+ 98        conditions = []
+ 99
+100        for identifier in using:
+101            identifier = identifier.name
+102            table = columns.get(identifier)
+103
+104            if not table or identifier not in join_columns:
+105                raise OptimizeError(f"Cannot automatically join: {identifier}")
+106
+107            conditions.append(
+108                exp.condition(
+109                    exp.EQ(
+110                        this=exp.column(identifier, table=table),
+111                        expression=exp.column(identifier, table=join_table),
+112                    )
+113                )
+114            )
+115
+116            tables = column_tables.setdefault(identifier, [])
+117            if table not in tables:
+118                tables.append(table)
+119            if join_table not in tables:
+120                tables.append(join_table)
+121
+122        join.args.pop("using")
+123        join.set("on", exp.and_(*conditions))
+124
+125    if column_tables:
+126        for column in scope.columns:
+127            if not column.table and column.name in column_tables:
+128                tables = column_tables[column.name]
+129                coalesce = [exp.column(column.name, table=table) for table in tables]
+130                replacement = exp.Coalesce(this=coalesce[0], expressions=coalesce[1:])
+131
+132                # Ensure selects keep their output name
+133                if isinstance(column.parent, exp.Select):
+134                    replacement = exp.alias_(replacement, alias=column.name)
+135
+136                scope.replace(column, replacement)
+137
+138
+139def _expand_group_by(scope, resolver):
+140    group = scope.expression.args.get("group")
+141    if not group:
+142        return
+143
+144    # Replace references to select aliases
+145    def transform(node, *_):
+146        if isinstance(node, exp.Column) and not node.table:
+147            table = resolver.get_table(node.name)
+148
+149            # Source columns get priority over select aliases
+150            if table:
+151                node.set("table", exp.to_identifier(table))
+152                return node
+153
+154            selects = {s.alias_or_name: s for s in scope.selects}
+155
+156            select = selects.get(node.name)
+157            if select:
+158                scope.clear_cache()
+159                if isinstance(select, exp.Alias):
+160                    select = select.this
+161                return select.copy()
+162
+163        return node
+164
+165    group.transform(transform, copy=False)
+166    group.set("expressions", _expand_positional_references(scope, group.expressions))
+167    scope.expression.set("group", group)
+168
+169
+170def _expand_order_by(scope):
+171    order = scope.expression.args.get("order")
+172    if not order:
+173        return
+174
+175    ordereds = order.expressions
+176    for ordered, new_expression in zip(
+177        ordereds,
+178        _expand_positional_references(scope, (o.this for o in ordereds)),
+179    ):
+180        ordered.set("this", new_expression)
+181
+182
+183def _expand_positional_references(scope, expressions):
+184    new_nodes = []
+185    for node in expressions:
+186        if node.is_int:
+187            try:
+188                select = scope.selects[int(node.name) - 1]
+189            except IndexError:
+190                raise OptimizeError(f"Unknown output column: {node.name}")
+191            if isinstance(select, exp.Alias):
+192                select = select.this
+193            new_nodes.append(select.copy())
+194            scope.clear_cache()
+195        else:
+196            new_nodes.append(node)
+197
+198    return new_nodes
+199
+200
+201def _qualify_columns(scope, resolver):
+202    """Disambiguate columns, ensuring each column specifies a source"""
+203    for column in scope.columns:
+204        column_table = column.table
+205        column_name = column.name
+206
+207        if column_table and column_table in scope.sources:
+208            source_columns = resolver.get_source_columns(column_table)
+209            if source_columns and column_name not in source_columns:
+210                raise OptimizeError(f"Unknown column: {column_name}")
+211
+212        if not column_table:
+213            column_table = resolver.get_table(column_name)
+214
+215            # column_table can be a '' because bigquery unnest has no table alias
+216            if column_table:
+217                column.set("table", exp.to_identifier(column_table))
+218
+219    columns_missing_from_scope = []
+220    # Determine whether each reference in the order by clause is to a column or an alias.
+221    for ordered in scope.find_all(exp.Ordered):
+222        for column in ordered.find_all(exp.Column):
+223            if (
+224                not column.table
+225                and column.parent is not ordered
+226                and column.name in resolver.all_columns
+227            ):
+228                columns_missing_from_scope.append(column)
+229
+230    # Determine whether each reference in the having clause is to a column or an alias.
+231    for having in scope.find_all(exp.Having):
+232        for column in having.find_all(exp.Column):
+233            if (
+234                not column.table
+235                and column.find_ancestor(exp.AggFunc)
+236                and column.name in resolver.all_columns
+237            ):
+238                columns_missing_from_scope.append(column)
+239
+240    for column in columns_missing_from_scope:
+241        column_table = resolver.get_table(column.name)
+242
+243        if column_table:
+244            column.set("table", exp.to_identifier(column_table))
+245
+246
+247def _expand_stars(scope, resolver):
+248    """Expand stars to lists of column selections"""
+249
+250    new_selections = []
+251    except_columns = {}
+252    replace_columns = {}
+253
+254    for expression in scope.selects:
+255        if isinstance(expression, exp.Star):
+256            tables = list(scope.selected_sources)
+257            _add_except_columns(expression, tables, except_columns)
+258            _add_replace_columns(expression, tables, replace_columns)
+259        elif isinstance(expression, exp.Column) and isinstance(expression.this, exp.Star):
+260            tables = [expression.table]
+261            _add_except_columns(expression.this, tables, except_columns)
+262            _add_replace_columns(expression.this, tables, replace_columns)
+263        else:
+264            new_selections.append(expression)
+265            continue
+266
+267        for table in tables:
+268            if table not in scope.sources:
+269                raise OptimizeError(f"Unknown table: {table}")
+270            columns = resolver.get_source_columns(table, only_visible=True)
+271            if not columns:
+272                raise OptimizeError(
+273                    f"Table has no schema/columns. Cannot expand star for table: {table}."
+274                )
+275            table_id = id(table)
+276            for name in columns:
+277                if name not in except_columns.get(table_id, set()):
+278                    alias_ = replace_columns.get(table_id, {}).get(name, name)
+279                    column = exp.column(name, table)
+280                    new_selections.append(alias(column, alias_) if alias_ != name else column)
+281
+282    scope.expression.set("expressions", new_selections)
+283
+284
+285def _add_except_columns(expression, tables, except_columns):
+286    except_ = expression.args.get("except")
+287
+288    if not except_:
+289        return
+290
+291    columns = {e.name for e in except_}
+292
+293    for table in tables:
+294        except_columns[id(table)] = columns
+295
+296
+297def _add_replace_columns(expression, tables, replace_columns):
+298    replace = expression.args.get("replace")
+299
+300    if not replace:
+301        return
+302
+303    columns = {e.this.name: e.alias for e in replace}
+304
+305    for table in tables:
+306        replace_columns[id(table)] = columns
+307
+308
+309def _qualify_outputs(scope):
+310    """Ensure all output columns are aliased"""
+311    new_selections = []
+312
+313    for i, (selection, aliased_column) in enumerate(
+314        itertools.zip_longest(scope.selects, scope.outer_column_list)
+315    ):
+316        if isinstance(selection, exp.Subquery):
+317            if not selection.output_name:
+318                selection.set("alias", exp.TableAlias(this=exp.to_identifier(f"_col_{i}")))
+319        elif not isinstance(selection, exp.Alias):
+320            alias_ = alias(exp.column(""), alias=selection.output_name or f"_col_{i}")
+321            alias_.set("this", selection)
+322            selection = alias_
+323
+324        if aliased_column:
+325            selection.set("alias", exp.to_identifier(aliased_column))
+326
+327        new_selections.append(selection)
+328
+329    scope.expression.set("expressions", new_selections)
+330
+331
+332class _Resolver:
+333    """
+334    Helper for resolving columns.
+335
+336    This is a class so we can lazily load some things and easily share them across functions.
+337    """
+338
+339    def __init__(self, scope, schema):
+340        self.scope = scope
+341        self.schema = schema
+342        self._source_columns = None
+343        self._unambiguous_columns = None
+344        self._all_columns = None
+345
+346    def get_table(self, column_name: str) -> t.Optional[str]:
+347        """
+348        Get the table for a column name.
+349
+350        Args:
+351            column_name: The column name to find the table for.
+352        Returns:
+353            The table name if it can be found/inferred.
+354        """
+355        if self._unambiguous_columns is None:
+356            self._unambiguous_columns = self._get_unambiguous_columns(
+357                self._get_all_source_columns()
+358            )
+359
+360        table = self._unambiguous_columns.get(column_name)
+361
+362        if not table:
+363            sources_without_schema = tuple(
+364                source for source, columns in self._get_all_source_columns().items() if not columns
+365            )
+366            if len(sources_without_schema) == 1:
+367                return sources_without_schema[0]
+368
+369        return table
+370
+371    @property
+372    def all_columns(self):
+373        """All available columns of all sources in this scope"""
+374        if self._all_columns is None:
+375            self._all_columns = {
+376                column for columns in self._get_all_source_columns().values() for column in columns
+377            }
+378        return self._all_columns
+379
+380    def get_source_columns(self, name, only_visible=False):
+381        """Resolve the source columns for a given source `name`"""
+382        if name not in self.scope.sources:
+383            raise OptimizeError(f"Unknown table: {name}")
+384
+385        source = self.scope.sources[name]
+386
+387        # If referencing a table, return the columns from the schema
+388        if isinstance(source, exp.Table):
+389            return self.schema.column_names(source, only_visible)
+390
+391        if isinstance(source, Scope) and isinstance(source.expression, exp.Values):
+392            return source.expression.alias_column_names
+393
+394        # Otherwise, if referencing another scope, return that scope's named selects
+395        return source.expression.named_selects
+396
+397    def _get_all_source_columns(self):
+398        if self._source_columns is None:
+399            self._source_columns = {
+400                k: self.get_source_columns(k) for k in self.scope.selected_sources
+401            }
+402        return self._source_columns
+403
+404    def _get_unambiguous_columns(self, source_columns):
+405        """
+406        Find all the unambiguous columns in sources.
+407
+408        Args:
+409            source_columns (dict): Mapping of names to source columns
+410        Returns:
+411            dict: Mapping of column name to source name
+412        """
+413        if not source_columns:
+414            return {}
+415
+416        source_columns = list(source_columns.items())
+417
+418        first_table, first_columns = source_columns[0]
+419        unambiguous_columns = {col: first_table for col in self._find_unique_columns(first_columns)}
+420        all_columns = set(unambiguous_columns)
+421
+422        for table, columns in source_columns[1:]:
+423            unique = self._find_unique_columns(columns)
+424            ambiguous = set(all_columns).intersection(unique)
+425            all_columns.update(columns)
+426            for column in ambiguous:
+427                unambiguous_columns.pop(column, None)
+428            for column in unique.difference(ambiguous):
+429                unambiguous_columns[column] = table
+430
+431        return unambiguous_columns
+432
+433    @staticmethod
+434    def _find_unique_columns(columns):
+435        """
+436        Find the unique columns in a list of columns.
+437
+438        Example:
+439            >>> sorted(_Resolver._find_unique_columns(["a", "b", "b", "c"]))
+440            ['a', 'c']
+441
+442        This is necessary because duplicate column names are ambiguous.
+443        """
+444        counts = {}
+445        for column in columns:
+446            counts[column] = counts.get(column, 0) + 1
+447        return {column for column, count in counts.items() if count == 1}
+
+ + +
+
+ +
+ + def + qualify_columns(expression, schema): + + + +
+ +
11def qualify_columns(expression, schema):
+12    """
+13    Rewrite sqlglot AST to have fully qualified columns.
+14
+15    Example:
+16        >>> import sqlglot
+17        >>> schema = {"tbl": {"col": "INT"}}
+18        >>> expression = sqlglot.parse_one("SELECT col FROM tbl")
+19        >>> qualify_columns(expression, schema).sql()
+20        'SELECT tbl.col AS col FROM tbl'
+21
+22    Args:
+23        expression (sqlglot.Expression): expression to qualify
+24        schema (dict|sqlglot.optimizer.Schema): Database schema
+25    Returns:
+26        sqlglot.Expression: qualified expression
+27    """
+28    schema = ensure_schema(schema)
+29
+30    for scope in traverse_scope(expression):
+31        resolver = _Resolver(scope, schema)
+32        _pop_table_column_aliases(scope.ctes)
+33        _pop_table_column_aliases(scope.derived_tables)
+34        _expand_using(scope, resolver)
+35        _expand_group_by(scope, resolver)
+36        _qualify_columns(scope, resolver)
+37        _expand_order_by(scope)
+38        if not isinstance(scope.expression, exp.UDTF):
+39            _expand_stars(scope, resolver)
+40            _qualify_outputs(scope)
+41
+42    return expression
+
+ + +

Rewrite sqlglot AST to have fully qualified columns.

+ +
Example:
+ +
+
+
>>> import sqlglot
+>>> schema = {"tbl": {"col": "INT"}}
+>>> expression = sqlglot.parse_one("SELECT col FROM tbl")
+>>> qualify_columns(expression, schema).sql()
+'SELECT tbl.col AS col FROM tbl'
+
+
+
+ +
Arguments:
+ +
    +
  • expression (sqlglot.Expression): expression to qualify
  • +
  • schema (dict|sqlglot.optimizer.Schema): Database schema
  • +
+ +
Returns:
+ +
+

sqlglot.Expression: qualified expression

+
+
+ + +
+
+ +
+ + def + validate_qualify_columns(expression): + + + +
+ +
45def validate_qualify_columns(expression):
+46    """Raise an `OptimizeError` if any columns aren't qualified"""
+47    unqualified_columns = []
+48    for scope in traverse_scope(expression):
+49        if isinstance(scope.expression, exp.Select):
+50            unqualified_columns.extend(scope.unqualified_columns)
+51            if scope.external_columns and not scope.is_correlated_subquery:
+52                raise OptimizeError(f"Unknown table: {scope.external_columns[0].table}")
+53
+54    if unqualified_columns:
+55        raise OptimizeError(f"Ambiguous columns: {unqualified_columns}")
+56    return expression
+
+ + +

Raise an OptimizeError if any columns aren't qualified

+
+ + +
+
+ + \ No newline at end of file diff --git a/docs/sqlglot/optimizer/qualify_tables.html b/docs/sqlglot/optimizer/qualify_tables.html new file mode 100644 index 0000000..acde3e6 --- /dev/null +++ b/docs/sqlglot/optimizer/qualify_tables.html @@ -0,0 +1,427 @@ + + + + + + + sqlglot.optimizer.qualify_tables API documentation + + + + + + + + + +
+
+ Edit on GitHub +

+sqlglot.optimizer.qualify_tables

+ + + + + + +
 1import itertools
+ 2
+ 3from sqlglot import alias, exp
+ 4from sqlglot.helper import csv_reader
+ 5from sqlglot.optimizer.scope import Scope, traverse_scope
+ 6
+ 7
+ 8def qualify_tables(expression, db=None, catalog=None, schema=None):
+ 9    """
+10    Rewrite sqlglot AST to have fully qualified tables.
+11
+12    Example:
+13        >>> import sqlglot
+14        >>> expression = sqlglot.parse_one("SELECT 1 FROM tbl")
+15        >>> qualify_tables(expression, db="db").sql()
+16        'SELECT 1 FROM db.tbl AS tbl'
+17
+18    Args:
+19        expression (sqlglot.Expression): expression to qualify
+20        db (str): Database name
+21        catalog (str): Catalog name
+22        schema: A schema to populate
+23    Returns:
+24        sqlglot.Expression: qualified expression
+25    """
+26    sequence = itertools.count()
+27
+28    next_name = lambda: f"_q_{next(sequence)}"
+29
+30    for scope in traverse_scope(expression):
+31        for derived_table in scope.ctes + scope.derived_tables:
+32            if not derived_table.args.get("alias"):
+33                alias_ = f"_q_{next(sequence)}"
+34                derived_table.set("alias", exp.TableAlias(this=exp.to_identifier(alias_)))
+35                scope.rename_source(None, alias_)
+36
+37        for source in scope.sources.values():
+38            if isinstance(source, exp.Table):
+39                identifier = isinstance(source.this, exp.Identifier)
+40
+41                if identifier:
+42                    if not source.args.get("db"):
+43                        source.set("db", exp.to_identifier(db))
+44                    if not source.args.get("catalog"):
+45                        source.set("catalog", exp.to_identifier(catalog))
+46
+47                if not source.alias:
+48                    source = source.replace(
+49                        alias(
+50                            source.copy(),
+51                            source.this if identifier else next_name(),
+52                            table=True,
+53                        )
+54                    )
+55
+56                if schema and isinstance(source.this, exp.ReadCSV):
+57                    with csv_reader(source.this) as reader:
+58                        header = next(reader)
+59                        columns = next(reader)
+60                        schema.add_table(
+61                            source, {k: type(v).__name__ for k, v in zip(header, columns)}
+62                        )
+63            elif isinstance(source, Scope) and source.is_udtf:
+64                udtf = source.expression
+65                table_alias = udtf.args.get("alias") or exp.TableAlias(this=next_name())
+66                udtf.set("alias", table_alias)
+67
+68                if not table_alias.name:
+69                    table_alias.set("this", next_name())
+70
+71    return expression
+
+ + +
+
+ +
+ + def + qualify_tables(expression, db=None, catalog=None, schema=None): + + + +
+ +
 9def qualify_tables(expression, db=None, catalog=None, schema=None):
+10    """
+11    Rewrite sqlglot AST to have fully qualified tables.
+12
+13    Example:
+14        >>> import sqlglot
+15        >>> expression = sqlglot.parse_one("SELECT 1 FROM tbl")
+16        >>> qualify_tables(expression, db="db").sql()
+17        'SELECT 1 FROM db.tbl AS tbl'
+18
+19    Args:
+20        expression (sqlglot.Expression): expression to qualify
+21        db (str): Database name
+22        catalog (str): Catalog name
+23        schema: A schema to populate
+24    Returns:
+25        sqlglot.Expression: qualified expression
+26    """
+27    sequence = itertools.count()
+28
+29    next_name = lambda: f"_q_{next(sequence)}"
+30
+31    for scope in traverse_scope(expression):
+32        for derived_table in scope.ctes + scope.derived_tables:
+33            if not derived_table.args.get("alias"):
+34                alias_ = f"_q_{next(sequence)}"
+35                derived_table.set("alias", exp.TableAlias(this=exp.to_identifier(alias_)))
+36                scope.rename_source(None, alias_)
+37
+38        for source in scope.sources.values():
+39            if isinstance(source, exp.Table):
+40                identifier = isinstance(source.this, exp.Identifier)
+41
+42                if identifier:
+43                    if not source.args.get("db"):
+44                        source.set("db", exp.to_identifier(db))
+45                    if not source.args.get("catalog"):
+46                        source.set("catalog", exp.to_identifier(catalog))
+47
+48                if not source.alias:
+49                    source = source.replace(
+50                        alias(
+51                            source.copy(),
+52                            source.this if identifier else next_name(),
+53                            table=True,
+54                        )
+55                    )
+56
+57                if schema and isinstance(source.this, exp.ReadCSV):
+58                    with csv_reader(source.this) as reader:
+59                        header = next(reader)
+60                        columns = next(reader)
+61                        schema.add_table(
+62                            source, {k: type(v).__name__ for k, v in zip(header, columns)}
+63                        )
+64            elif isinstance(source, Scope) and source.is_udtf:
+65                udtf = source.expression
+66                table_alias = udtf.args.get("alias") or exp.TableAlias(this=next_name())
+67                udtf.set("alias", table_alias)
+68
+69                if not table_alias.name:
+70                    table_alias.set("this", next_name())
+71
+72    return expression
+
+ + +

Rewrite sqlglot AST to have fully qualified tables.

+ +
Example:
+ +
+
+
>>> import sqlglot
+>>> expression = sqlglot.parse_one("SELECT 1 FROM tbl")
+>>> qualify_tables(expression, db="db").sql()
+'SELECT 1 FROM db.tbl AS tbl'
+
+
+
+ +
Arguments:
+ +
    +
  • expression (sqlglot.Expression): expression to qualify
  • +
  • db (str): Database name
  • +
  • catalog (str): Catalog name
  • +
  • schema: A schema to populate
  • +
+ +
Returns:
+ +
+

sqlglot.Expression: qualified expression

+
+
+ + +
+
+ + \ No newline at end of file diff --git a/docs/sqlglot/optimizer/scope.html b/docs/sqlglot/optimizer/scope.html new file mode 100644 index 0000000..94b5f5b --- /dev/null +++ b/docs/sqlglot/optimizer/scope.html @@ -0,0 +1,2512 @@ + + + + + + + sqlglot.optimizer.scope API documentation + + + + + + + + + +
+
+ Edit on GitHub +

+sqlglot.optimizer.scope

+ + + + + + +
  1import itertools
+  2from collections import defaultdict
+  3from enum import Enum, auto
+  4
+  5from sqlglot import exp
+  6from sqlglot.errors import OptimizeError
+  7
+  8
+  9class ScopeType(Enum):
+ 10    ROOT = auto()
+ 11    SUBQUERY = auto()
+ 12    DERIVED_TABLE = auto()
+ 13    CTE = auto()
+ 14    UNION = auto()
+ 15    UDTF = auto()
+ 16
+ 17
+ 18class Scope:
+ 19    """
+ 20    Selection scope.
+ 21
+ 22    Attributes:
+ 23        expression (exp.Select|exp.Union): Root expression of this scope
+ 24        sources (dict[str, exp.Table|Scope]): Mapping of source name to either
+ 25            a Table expression or another Scope instance. For example:
+ 26                SELECT * FROM x                     {"x": Table(this="x")}
+ 27                SELECT * FROM x AS y                {"y": Table(this="x")}
+ 28                SELECT * FROM (SELECT ...) AS y     {"y": Scope(...)}
+ 29        outer_column_list (list[str]): If this is a derived table or CTE, and the outer query
+ 30            defines a column list of it's alias of this scope, this is that list of columns.
+ 31            For example:
+ 32                SELECT * FROM (SELECT ...) AS y(col1, col2)
+ 33            The inner query would have `["col1", "col2"]` for its `outer_column_list`
+ 34        parent (Scope): Parent scope
+ 35        scope_type (ScopeType): Type of this scope, relative to it's parent
+ 36        subquery_scopes (list[Scope]): List of all child scopes for subqueries
+ 37        cte_scopes = (list[Scope]) List of all child scopes for CTEs
+ 38        derived_table_scopes = (list[Scope]) List of all child scopes for derived_tables
+ 39        union_scopes (list[Scope, Scope]): If this Scope is for a Union expression, this will be
+ 40            a list of the left and right child scopes.
+ 41    """
+ 42
+ 43    def __init__(
+ 44        self,
+ 45        expression,
+ 46        sources=None,
+ 47        outer_column_list=None,
+ 48        parent=None,
+ 49        scope_type=ScopeType.ROOT,
+ 50    ):
+ 51        self.expression = expression
+ 52        self.sources = sources or {}
+ 53        self.outer_column_list = outer_column_list or []
+ 54        self.parent = parent
+ 55        self.scope_type = scope_type
+ 56        self.subquery_scopes = []
+ 57        self.derived_table_scopes = []
+ 58        self.cte_scopes = []
+ 59        self.union_scopes = []
+ 60        self.clear_cache()
+ 61
+ 62    def clear_cache(self):
+ 63        self._collected = False
+ 64        self._raw_columns = None
+ 65        self._derived_tables = None
+ 66        self._tables = None
+ 67        self._ctes = None
+ 68        self._subqueries = None
+ 69        self._selected_sources = None
+ 70        self._columns = None
+ 71        self._external_columns = None
+ 72        self._join_hints = None
+ 73
+ 74    def branch(self, expression, scope_type, chain_sources=None, **kwargs):
+ 75        """Branch from the current scope to a new, inner scope"""
+ 76        return Scope(
+ 77            expression=expression.unnest(),
+ 78            sources={**self.cte_sources, **(chain_sources or {})},
+ 79            parent=self,
+ 80            scope_type=scope_type,
+ 81            **kwargs,
+ 82        )
+ 83
+ 84    def _collect(self):
+ 85        self._tables = []
+ 86        self._ctes = []
+ 87        self._subqueries = []
+ 88        self._derived_tables = []
+ 89        self._raw_columns = []
+ 90        self._join_hints = []
+ 91
+ 92        for node, parent, _ in self.walk(bfs=False):
+ 93            if node is self.expression:
+ 94                continue
+ 95            elif isinstance(node, exp.Column) and not isinstance(node.this, exp.Star):
+ 96                self._raw_columns.append(node)
+ 97            elif isinstance(node, exp.Table) and not isinstance(node.parent, exp.JoinHint):
+ 98                self._tables.append(node)
+ 99            elif isinstance(node, exp.JoinHint):
+100                self._join_hints.append(node)
+101            elif isinstance(node, exp.UDTF):
+102                self._derived_tables.append(node)
+103            elif isinstance(node, exp.CTE):
+104                self._ctes.append(node)
+105            elif isinstance(node, exp.Subquery) and isinstance(parent, (exp.From, exp.Join)):
+106                self._derived_tables.append(node)
+107            elif isinstance(node, exp.Subqueryable):
+108                self._subqueries.append(node)
+109
+110        self._collected = True
+111
+112    def _ensure_collected(self):
+113        if not self._collected:
+114            self._collect()
+115
+116    def walk(self, bfs=True):
+117        return walk_in_scope(self.expression, bfs=bfs)
+118
+119    def find(self, *expression_types, bfs=True):
+120        """
+121        Returns the first node in this scope which matches at least one of the specified types.
+122
+123        This does NOT traverse into subscopes.
+124
+125        Args:
+126            expression_types (type): the expression type(s) to match.
+127            bfs (bool): True to use breadth-first search, False to use depth-first.
+128
+129        Returns:
+130            exp.Expression: the node which matches the criteria or None if no node matching
+131            the criteria was found.
+132        """
+133        return next(self.find_all(*expression_types, bfs=bfs), None)
+134
+135    def find_all(self, *expression_types, bfs=True):
+136        """
+137        Returns a generator object which visits all nodes in this scope and only yields those that
+138        match at least one of the specified expression types.
+139
+140        This does NOT traverse into subscopes.
+141
+142        Args:
+143            expression_types (type): the expression type(s) to match.
+144            bfs (bool): True to use breadth-first search, False to use depth-first.
+145
+146        Yields:
+147            exp.Expression: nodes
+148        """
+149        for expression, _, _ in self.walk(bfs=bfs):
+150            if isinstance(expression, expression_types):
+151                yield expression
+152
+153    def replace(self, old, new):
+154        """
+155        Replace `old` with `new`.
+156
+157        This can be used instead of `exp.Expression.replace` to ensure the `Scope` is kept up-to-date.
+158
+159        Args:
+160            old (exp.Expression): old node
+161            new (exp.Expression): new node
+162        """
+163        old.replace(new)
+164        self.clear_cache()
+165
+166    @property
+167    def tables(self):
+168        """
+169        List of tables in this scope.
+170
+171        Returns:
+172            list[exp.Table]: tables
+173        """
+174        self._ensure_collected()
+175        return self._tables
+176
+177    @property
+178    def ctes(self):
+179        """
+180        List of CTEs in this scope.
+181
+182        Returns:
+183            list[exp.CTE]: ctes
+184        """
+185        self._ensure_collected()
+186        return self._ctes
+187
+188    @property
+189    def derived_tables(self):
+190        """
+191        List of derived tables in this scope.
+192
+193        For example:
+194            SELECT * FROM (SELECT ...) <- that's a derived table
+195
+196        Returns:
+197            list[exp.Subquery]: derived tables
+198        """
+199        self._ensure_collected()
+200        return self._derived_tables
+201
+202    @property
+203    def subqueries(self):
+204        """
+205        List of subqueries in this scope.
+206
+207        For example:
+208            SELECT * FROM x WHERE a IN (SELECT ...) <- that's a subquery
+209
+210        Returns:
+211            list[exp.Subqueryable]: subqueries
+212        """
+213        self._ensure_collected()
+214        return self._subqueries
+215
+216    @property
+217    def columns(self):
+218        """
+219        List of columns in this scope.
+220
+221        Returns:
+222            list[exp.Column]: Column instances in this scope, plus any
+223                Columns that reference this scope from correlated subqueries.
+224        """
+225        if self._columns is None:
+226            self._ensure_collected()
+227            columns = self._raw_columns
+228
+229            external_columns = [
+230                column for scope in self.subquery_scopes for column in scope.external_columns
+231            ]
+232
+233            named_selects = set(self.expression.named_selects)
+234
+235            self._columns = []
+236            for column in columns + external_columns:
+237                ancestor = column.find_ancestor(exp.Qualify, exp.Order, exp.Having, exp.Hint)
+238                if (
+239                    not ancestor
+240                    # Window functions can have an ORDER BY clause
+241                    or not isinstance(ancestor.parent, exp.Select)
+242                    or column.table
+243                    or (column.name not in named_selects and not isinstance(ancestor, exp.Hint))
+244                ):
+245                    self._columns.append(column)
+246
+247        return self._columns
+248
+249    @property
+250    def selected_sources(self):
+251        """
+252        Mapping of nodes and sources that are actually selected from in this scope.
+253
+254        That is, all tables in a schema are selectable at any point. But a
+255        table only becomes a selected source if it's included in a FROM or JOIN clause.
+256
+257        Returns:
+258            dict[str, (exp.Table|exp.Select, exp.Table|Scope)]: selected sources and nodes
+259        """
+260        if self._selected_sources is None:
+261            referenced_names = []
+262
+263            for table in self.tables:
+264                referenced_names.append((table.alias_or_name, table))
+265            for derived_table in self.derived_tables:
+266                referenced_names.append((derived_table.alias, derived_table.unnest()))
+267
+268            result = {}
+269
+270            for name, node in referenced_names:
+271                if name in self.sources:
+272                    result[name] = (node, self.sources[name])
+273
+274            self._selected_sources = result
+275        return self._selected_sources
+276
+277    @property
+278    def cte_sources(self):
+279        """
+280        Sources that are CTEs.
+281
+282        Returns:
+283            dict[str, Scope]: Mapping of source alias to Scope
+284        """
+285        return {
+286            alias: scope
+287            for alias, scope in self.sources.items()
+288            if isinstance(scope, Scope) and scope.is_cte
+289        }
+290
+291    @property
+292    def selects(self):
+293        """
+294        Select expressions of this scope.
+295
+296        For example, for the following expression:
+297            SELECT 1 as a, 2 as b FROM x
+298
+299        The outputs are the "1 as a" and "2 as b" expressions.
+300
+301        Returns:
+302            list[exp.Expression]: expressions
+303        """
+304        if isinstance(self.expression, exp.Union):
+305            return self.expression.unnest().selects
+306        return self.expression.selects
+307
+308    @property
+309    def external_columns(self):
+310        """
+311        Columns that appear to reference sources in outer scopes.
+312
+313        Returns:
+314            list[exp.Column]: Column instances that don't reference
+315                sources in the current scope.
+316        """
+317        if self._external_columns is None:
+318            self._external_columns = [
+319                c for c in self.columns if c.table not in self.selected_sources
+320            ]
+321        return self._external_columns
+322
+323    @property
+324    def unqualified_columns(self):
+325        """
+326        Unqualified columns in the current scope.
+327
+328        Returns:
+329             list[exp.Column]: Unqualified columns
+330        """
+331        return [c for c in self.columns if not c.table]
+332
+333    @property
+334    def join_hints(self):
+335        """
+336        Hints that exist in the scope that reference tables
+337
+338        Returns:
+339            list[exp.JoinHint]: Join hints that are referenced within the scope
+340        """
+341        if self._join_hints is None:
+342            return []
+343        return self._join_hints
+344
+345    def source_columns(self, source_name):
+346        """
+347        Get all columns in the current scope for a particular source.
+348
+349        Args:
+350            source_name (str): Name of the source
+351        Returns:
+352            list[exp.Column]: Column instances that reference `source_name`
+353        """
+354        return [column for column in self.columns if column.table == source_name]
+355
+356    @property
+357    def is_subquery(self):
+358        """Determine if this scope is a subquery"""
+359        return self.scope_type == ScopeType.SUBQUERY
+360
+361    @property
+362    def is_derived_table(self):
+363        """Determine if this scope is a derived table"""
+364        return self.scope_type == ScopeType.DERIVED_TABLE
+365
+366    @property
+367    def is_union(self):
+368        """Determine if this scope is a union"""
+369        return self.scope_type == ScopeType.UNION
+370
+371    @property
+372    def is_cte(self):
+373        """Determine if this scope is a common table expression"""
+374        return self.scope_type == ScopeType.CTE
+375
+376    @property
+377    def is_root(self):
+378        """Determine if this is the root scope"""
+379        return self.scope_type == ScopeType.ROOT
+380
+381    @property
+382    def is_udtf(self):
+383        """Determine if this scope is a UDTF (User Defined Table Function)"""
+384        return self.scope_type == ScopeType.UDTF
+385
+386    @property
+387    def is_correlated_subquery(self):
+388        """Determine if this scope is a correlated subquery"""
+389        return bool(self.is_subquery and self.external_columns)
+390
+391    def rename_source(self, old_name, new_name):
+392        """Rename a source in this scope"""
+393        columns = self.sources.pop(old_name or "", [])
+394        self.sources[new_name] = columns
+395
+396    def add_source(self, name, source):
+397        """Add a source to this scope"""
+398        self.sources[name] = source
+399        self.clear_cache()
+400
+401    def remove_source(self, name):
+402        """Remove a source from this scope"""
+403        self.sources.pop(name, None)
+404        self.clear_cache()
+405
+406    def __repr__(self):
+407        return f"Scope<{self.expression.sql()}>"
+408
+409    def traverse(self):
+410        """
+411        Traverse the scope tree from this node.
+412
+413        Yields:
+414            Scope: scope instances in depth-first-search post-order
+415        """
+416        for child_scope in itertools.chain(
+417            self.cte_scopes, self.union_scopes, self.derived_table_scopes, self.subquery_scopes
+418        ):
+419            yield from child_scope.traverse()
+420        yield self
+421
+422    def ref_count(self):
+423        """
+424        Count the number of times each scope in this tree is referenced.
+425
+426        Returns:
+427            dict[int, int]: Mapping of Scope instance ID to reference count
+428        """
+429        scope_ref_count = defaultdict(lambda: 0)
+430
+431        for scope in self.traverse():
+432            for _, source in scope.selected_sources.values():
+433                scope_ref_count[id(source)] += 1
+434
+435        return scope_ref_count
+436
+437
+438def traverse_scope(expression):
+439    """
+440    Traverse an expression by it's "scopes".
+441
+442    "Scope" represents the current context of a Select statement.
+443
+444    This is helpful for optimizing queries, where we need more information than
+445    the expression tree itself. For example, we might care about the source
+446    names within a subquery. Returns a list because a generator could result in
+447    incomplete properties which is confusing.
+448
+449    Examples:
+450        >>> import sqlglot
+451        >>> expression = sqlglot.parse_one("SELECT a FROM (SELECT a FROM x) AS y")
+452        >>> scopes = traverse_scope(expression)
+453        >>> scopes[0].expression.sql(), list(scopes[0].sources)
+454        ('SELECT a FROM x', ['x'])
+455        >>> scopes[1].expression.sql(), list(scopes[1].sources)
+456        ('SELECT a FROM (SELECT a FROM x) AS y', ['y'])
+457
+458    Args:
+459        expression (exp.Expression): expression to traverse
+460    Returns:
+461        list[Scope]: scope instances
+462    """
+463    return list(_traverse_scope(Scope(expression)))
+464
+465
+466def build_scope(expression):
+467    """
+468    Build a scope tree.
+469
+470    Args:
+471        expression (exp.Expression): expression to build the scope tree for
+472    Returns:
+473        Scope: root scope
+474    """
+475    return traverse_scope(expression)[-1]
+476
+477
+478def _traverse_scope(scope):
+479    if isinstance(scope.expression, exp.Select):
+480        yield from _traverse_select(scope)
+481    elif isinstance(scope.expression, exp.Union):
+482        yield from _traverse_union(scope)
+483    elif isinstance(scope.expression, exp.UDTF):
+484        _set_udtf_scope(scope)
+485    elif isinstance(scope.expression, exp.Subquery):
+486        yield from _traverse_subqueries(scope)
+487    else:
+488        raise OptimizeError(f"Unexpected expression type: {type(scope.expression)}")
+489    yield scope
+490
+491
+492def _traverse_select(scope):
+493    yield from _traverse_derived_tables(scope.ctes, scope, ScopeType.CTE)
+494    yield from _traverse_derived_tables(scope.derived_tables, scope, ScopeType.DERIVED_TABLE)
+495    yield from _traverse_subqueries(scope)
+496    _add_table_sources(scope)
+497
+498
+499def _traverse_union(scope):
+500    yield from _traverse_derived_tables(scope.ctes, scope, scope_type=ScopeType.CTE)
+501
+502    # The last scope to be yield should be the top most scope
+503    left = None
+504    for left in _traverse_scope(scope.branch(scope.expression.left, scope_type=ScopeType.UNION)):
+505        yield left
+506
+507    right = None
+508    for right in _traverse_scope(scope.branch(scope.expression.right, scope_type=ScopeType.UNION)):
+509        yield right
+510
+511    scope.union_scopes = [left, right]
+512
+513
+514def _set_udtf_scope(scope):
+515    parent = scope.expression.parent
+516    from_ = parent.args.get("from")
+517
+518    if not from_:
+519        return
+520
+521    for table in from_.expressions:
+522        if isinstance(table, exp.Table):
+523            scope.tables.append(table)
+524        elif isinstance(table, exp.Subquery):
+525            scope.subqueries.append(table)
+526    _add_table_sources(scope)
+527    _traverse_subqueries(scope)
+528
+529
+530def _traverse_derived_tables(derived_tables, scope, scope_type):
+531    sources = {}
+532    is_cte = scope_type == ScopeType.CTE
+533
+534    for derived_table in derived_tables:
+535        recursive_scope = None
+536
+537        # if the scope is a recursive cte, it must be in the form of
+538        # base_case UNION recursive. thus the recursive scope is the first
+539        # section of the union.
+540        if is_cte and scope.expression.args["with"].recursive:
+541            union = derived_table.this
+542
+543            if isinstance(union, exp.Union):
+544                recursive_scope = scope.branch(union.this, scope_type=ScopeType.CTE)
+545
+546        for child_scope in _traverse_scope(
+547            scope.branch(
+548                derived_table if isinstance(derived_table, exp.UDTF) else derived_table.this,
+549                chain_sources=sources if scope_type == ScopeType.CTE else None,
+550                outer_column_list=derived_table.alias_column_names,
+551                scope_type=ScopeType.UDTF if isinstance(derived_table, exp.UDTF) else scope_type,
+552            )
+553        ):
+554            yield child_scope
+555
+556            # Tables without aliases will be set as ""
+557            # This shouldn't be a problem once qualify_columns runs, as it adds aliases on everything.
+558            # Until then, this means that only a single, unaliased derived table is allowed (rather,
+559            # the latest one wins.
+560            alias = derived_table.alias
+561            sources[alias] = child_scope
+562
+563            if recursive_scope:
+564                child_scope.add_source(alias, recursive_scope)
+565
+566        # append the final child_scope yielded
+567        if is_cte:
+568            scope.cte_scopes.append(child_scope)
+569        else:
+570            scope.derived_table_scopes.append(child_scope)
+571
+572    scope.sources.update(sources)
+573
+574
+575def _add_table_sources(scope):
+576    sources = {}
+577    for table in scope.tables:
+578        table_name = table.name
+579
+580        if table.alias:
+581            source_name = table.alias
+582        else:
+583            source_name = table_name
+584
+585        if table_name in scope.sources:
+586            # This is a reference to a parent source (e.g. a CTE), not an actual table.
+587            scope.sources[source_name] = scope.sources[table_name]
+588        else:
+589            sources[source_name] = table
+590
+591    scope.sources.update(sources)
+592
+593
+594def _traverse_subqueries(scope):
+595    for subquery in scope.subqueries:
+596        top = None
+597        for child_scope in _traverse_scope(scope.branch(subquery, scope_type=ScopeType.SUBQUERY)):
+598            yield child_scope
+599            top = child_scope
+600        scope.subquery_scopes.append(top)
+601
+602
+603def walk_in_scope(expression, bfs=True):
+604    """
+605    Returns a generator object which visits all nodes in the syntrax tree, stopping at
+606    nodes that start child scopes.
+607
+608    Args:
+609        expression (exp.Expression):
+610        bfs (bool): if set to True the BFS traversal order will be applied,
+611            otherwise the DFS traversal will be used instead.
+612
+613    Yields:
+614        tuple[exp.Expression, Optional[exp.Expression], str]: node, parent, arg key
+615    """
+616    # We'll use this variable to pass state into the dfs generator.
+617    # Whenever we set it to True, we exclude a subtree from traversal.
+618    prune = False
+619
+620    for node, parent, key in expression.walk(bfs=bfs, prune=lambda *_: prune):
+621        prune = False
+622
+623        yield node, parent, key
+624
+625        if node is expression:
+626            continue
+627        elif isinstance(node, exp.CTE):
+628            prune = True
+629        elif isinstance(node, exp.Subquery) and isinstance(parent, (exp.From, exp.Join)):
+630            prune = True
+631        elif isinstance(node, exp.Subqueryable):
+632            prune = True
+
+ + +
+
+ +
+ + class + ScopeType(enum.Enum): + + + +
+ +
10class ScopeType(Enum):
+11    ROOT = auto()
+12    SUBQUERY = auto()
+13    DERIVED_TABLE = auto()
+14    CTE = auto()
+15    UNION = auto()
+16    UDTF = auto()
+
+ + +

An enumeration.

+
+ + +
+
+ ROOT = <ScopeType.ROOT: 1> + + +
+ + + + +
+
+
+ SUBQUERY = <ScopeType.SUBQUERY: 2> + + +
+ + + + +
+
+
+ DERIVED_TABLE = <ScopeType.DERIVED_TABLE: 3> + + +
+ + + + +
+
+
+ CTE = <ScopeType.CTE: 4> + + +
+ + + + +
+
+
+ UNION = <ScopeType.UNION: 5> + + +
+ + + + +
+
+
+ UDTF = <ScopeType.UDTF: 6> + + +
+ + + + +
+
+
Inherited Members
+
+
enum.Enum
+
name
+
value
+ +
+
+
+
+
+ +
+ + class + Scope: + + + +
+ +
 19class Scope:
+ 20    """
+ 21    Selection scope.
+ 22
+ 23    Attributes:
+ 24        expression (exp.Select|exp.Union): Root expression of this scope
+ 25        sources (dict[str, exp.Table|Scope]): Mapping of source name to either
+ 26            a Table expression or another Scope instance. For example:
+ 27                SELECT * FROM x                     {"x": Table(this="x")}
+ 28                SELECT * FROM x AS y                {"y": Table(this="x")}
+ 29                SELECT * FROM (SELECT ...) AS y     {"y": Scope(...)}
+ 30        outer_column_list (list[str]): If this is a derived table or CTE, and the outer query
+ 31            defines a column list of it's alias of this scope, this is that list of columns.
+ 32            For example:
+ 33                SELECT * FROM (SELECT ...) AS y(col1, col2)
+ 34            The inner query would have `["col1", "col2"]` for its `outer_column_list`
+ 35        parent (Scope): Parent scope
+ 36        scope_type (ScopeType): Type of this scope, relative to it's parent
+ 37        subquery_scopes (list[Scope]): List of all child scopes for subqueries
+ 38        cte_scopes = (list[Scope]) List of all child scopes for CTEs
+ 39        derived_table_scopes = (list[Scope]) List of all child scopes for derived_tables
+ 40        union_scopes (list[Scope, Scope]): If this Scope is for a Union expression, this will be
+ 41            a list of the left and right child scopes.
+ 42    """
+ 43
+ 44    def __init__(
+ 45        self,
+ 46        expression,
+ 47        sources=None,
+ 48        outer_column_list=None,
+ 49        parent=None,
+ 50        scope_type=ScopeType.ROOT,
+ 51    ):
+ 52        self.expression = expression
+ 53        self.sources = sources or {}
+ 54        self.outer_column_list = outer_column_list or []
+ 55        self.parent = parent
+ 56        self.scope_type = scope_type
+ 57        self.subquery_scopes = []
+ 58        self.derived_table_scopes = []
+ 59        self.cte_scopes = []
+ 60        self.union_scopes = []
+ 61        self.clear_cache()
+ 62
+ 63    def clear_cache(self):
+ 64        self._collected = False
+ 65        self._raw_columns = None
+ 66        self._derived_tables = None
+ 67        self._tables = None
+ 68        self._ctes = None
+ 69        self._subqueries = None
+ 70        self._selected_sources = None
+ 71        self._columns = None
+ 72        self._external_columns = None
+ 73        self._join_hints = None
+ 74
+ 75    def branch(self, expression, scope_type, chain_sources=None, **kwargs):
+ 76        """Branch from the current scope to a new, inner scope"""
+ 77        return Scope(
+ 78            expression=expression.unnest(),
+ 79            sources={**self.cte_sources, **(chain_sources or {})},
+ 80            parent=self,
+ 81            scope_type=scope_type,
+ 82            **kwargs,
+ 83        )
+ 84
+ 85    def _collect(self):
+ 86        self._tables = []
+ 87        self._ctes = []
+ 88        self._subqueries = []
+ 89        self._derived_tables = []
+ 90        self._raw_columns = []
+ 91        self._join_hints = []
+ 92
+ 93        for node, parent, _ in self.walk(bfs=False):
+ 94            if node is self.expression:
+ 95                continue
+ 96            elif isinstance(node, exp.Column) and not isinstance(node.this, exp.Star):
+ 97                self._raw_columns.append(node)
+ 98            elif isinstance(node, exp.Table) and not isinstance(node.parent, exp.JoinHint):
+ 99                self._tables.append(node)
+100            elif isinstance(node, exp.JoinHint):
+101                self._join_hints.append(node)
+102            elif isinstance(node, exp.UDTF):
+103                self._derived_tables.append(node)
+104            elif isinstance(node, exp.CTE):
+105                self._ctes.append(node)
+106            elif isinstance(node, exp.Subquery) and isinstance(parent, (exp.From, exp.Join)):
+107                self._derived_tables.append(node)
+108            elif isinstance(node, exp.Subqueryable):
+109                self._subqueries.append(node)
+110
+111        self._collected = True
+112
+113    def _ensure_collected(self):
+114        if not self._collected:
+115            self._collect()
+116
+117    def walk(self, bfs=True):
+118        return walk_in_scope(self.expression, bfs=bfs)
+119
+120    def find(self, *expression_types, bfs=True):
+121        """
+122        Returns the first node in this scope which matches at least one of the specified types.
+123
+124        This does NOT traverse into subscopes.
+125
+126        Args:
+127            expression_types (type): the expression type(s) to match.
+128            bfs (bool): True to use breadth-first search, False to use depth-first.
+129
+130        Returns:
+131            exp.Expression: the node which matches the criteria or None if no node matching
+132            the criteria was found.
+133        """
+134        return next(self.find_all(*expression_types, bfs=bfs), None)
+135
+136    def find_all(self, *expression_types, bfs=True):
+137        """
+138        Returns a generator object which visits all nodes in this scope and only yields those that
+139        match at least one of the specified expression types.
+140
+141        This does NOT traverse into subscopes.
+142
+143        Args:
+144            expression_types (type): the expression type(s) to match.
+145            bfs (bool): True to use breadth-first search, False to use depth-first.
+146
+147        Yields:
+148            exp.Expression: nodes
+149        """
+150        for expression, _, _ in self.walk(bfs=bfs):
+151            if isinstance(expression, expression_types):
+152                yield expression
+153
+154    def replace(self, old, new):
+155        """
+156        Replace `old` with `new`.
+157
+158        This can be used instead of `exp.Expression.replace` to ensure the `Scope` is kept up-to-date.
+159
+160        Args:
+161            old (exp.Expression): old node
+162            new (exp.Expression): new node
+163        """
+164        old.replace(new)
+165        self.clear_cache()
+166
+167    @property
+168    def tables(self):
+169        """
+170        List of tables in this scope.
+171
+172        Returns:
+173            list[exp.Table]: tables
+174        """
+175        self._ensure_collected()
+176        return self._tables
+177
+178    @property
+179    def ctes(self):
+180        """
+181        List of CTEs in this scope.
+182
+183        Returns:
+184            list[exp.CTE]: ctes
+185        """
+186        self._ensure_collected()
+187        return self._ctes
+188
+189    @property
+190    def derived_tables(self):
+191        """
+192        List of derived tables in this scope.
+193
+194        For example:
+195            SELECT * FROM (SELECT ...) <- that's a derived table
+196
+197        Returns:
+198            list[exp.Subquery]: derived tables
+199        """
+200        self._ensure_collected()
+201        return self._derived_tables
+202
+203    @property
+204    def subqueries(self):
+205        """
+206        List of subqueries in this scope.
+207
+208        For example:
+209            SELECT * FROM x WHERE a IN (SELECT ...) <- that's a subquery
+210
+211        Returns:
+212            list[exp.Subqueryable]: subqueries
+213        """
+214        self._ensure_collected()
+215        return self._subqueries
+216
+217    @property
+218    def columns(self):
+219        """
+220        List of columns in this scope.
+221
+222        Returns:
+223            list[exp.Column]: Column instances in this scope, plus any
+224                Columns that reference this scope from correlated subqueries.
+225        """
+226        if self._columns is None:
+227            self._ensure_collected()
+228            columns = self._raw_columns
+229
+230            external_columns = [
+231                column for scope in self.subquery_scopes for column in scope.external_columns
+232            ]
+233
+234            named_selects = set(self.expression.named_selects)
+235
+236            self._columns = []
+237            for column in columns + external_columns:
+238                ancestor = column.find_ancestor(exp.Qualify, exp.Order, exp.Having, exp.Hint)
+239                if (
+240                    not ancestor
+241                    # Window functions can have an ORDER BY clause
+242                    or not isinstance(ancestor.parent, exp.Select)
+243                    or column.table
+244                    or (column.name not in named_selects and not isinstance(ancestor, exp.Hint))
+245                ):
+246                    self._columns.append(column)
+247
+248        return self._columns
+249
+250    @property
+251    def selected_sources(self):
+252        """
+253        Mapping of nodes and sources that are actually selected from in this scope.
+254
+255        That is, all tables in a schema are selectable at any point. But a
+256        table only becomes a selected source if it's included in a FROM or JOIN clause.
+257
+258        Returns:
+259            dict[str, (exp.Table|exp.Select, exp.Table|Scope)]: selected sources and nodes
+260        """
+261        if self._selected_sources is None:
+262            referenced_names = []
+263
+264            for table in self.tables:
+265                referenced_names.append((table.alias_or_name, table))
+266            for derived_table in self.derived_tables:
+267                referenced_names.append((derived_table.alias, derived_table.unnest()))
+268
+269            result = {}
+270
+271            for name, node in referenced_names:
+272                if name in self.sources:
+273                    result[name] = (node, self.sources[name])
+274
+275            self._selected_sources = result
+276        return self._selected_sources
+277
+278    @property
+279    def cte_sources(self):
+280        """
+281        Sources that are CTEs.
+282
+283        Returns:
+284            dict[str, Scope]: Mapping of source alias to Scope
+285        """
+286        return {
+287            alias: scope
+288            for alias, scope in self.sources.items()
+289            if isinstance(scope, Scope) and scope.is_cte
+290        }
+291
+292    @property
+293    def selects(self):
+294        """
+295        Select expressions of this scope.
+296
+297        For example, for the following expression:
+298            SELECT 1 as a, 2 as b FROM x
+299
+300        The outputs are the "1 as a" and "2 as b" expressions.
+301
+302        Returns:
+303            list[exp.Expression]: expressions
+304        """
+305        if isinstance(self.expression, exp.Union):
+306            return self.expression.unnest().selects
+307        return self.expression.selects
+308
+309    @property
+310    def external_columns(self):
+311        """
+312        Columns that appear to reference sources in outer scopes.
+313
+314        Returns:
+315            list[exp.Column]: Column instances that don't reference
+316                sources in the current scope.
+317        """
+318        if self._external_columns is None:
+319            self._external_columns = [
+320                c for c in self.columns if c.table not in self.selected_sources
+321            ]
+322        return self._external_columns
+323
+324    @property
+325    def unqualified_columns(self):
+326        """
+327        Unqualified columns in the current scope.
+328
+329        Returns:
+330             list[exp.Column]: Unqualified columns
+331        """
+332        return [c for c in self.columns if not c.table]
+333
+334    @property
+335    def join_hints(self):
+336        """
+337        Hints that exist in the scope that reference tables
+338
+339        Returns:
+340            list[exp.JoinHint]: Join hints that are referenced within the scope
+341        """
+342        if self._join_hints is None:
+343            return []
+344        return self._join_hints
+345
+346    def source_columns(self, source_name):
+347        """
+348        Get all columns in the current scope for a particular source.
+349
+350        Args:
+351            source_name (str): Name of the source
+352        Returns:
+353            list[exp.Column]: Column instances that reference `source_name`
+354        """
+355        return [column for column in self.columns if column.table == source_name]
+356
+357    @property
+358    def is_subquery(self):
+359        """Determine if this scope is a subquery"""
+360        return self.scope_type == ScopeType.SUBQUERY
+361
+362    @property
+363    def is_derived_table(self):
+364        """Determine if this scope is a derived table"""
+365        return self.scope_type == ScopeType.DERIVED_TABLE
+366
+367    @property
+368    def is_union(self):
+369        """Determine if this scope is a union"""
+370        return self.scope_type == ScopeType.UNION
+371
+372    @property
+373    def is_cte(self):
+374        """Determine if this scope is a common table expression"""
+375        return self.scope_type == ScopeType.CTE
+376
+377    @property
+378    def is_root(self):
+379        """Determine if this is the root scope"""
+380        return self.scope_type == ScopeType.ROOT
+381
+382    @property
+383    def is_udtf(self):
+384        """Determine if this scope is a UDTF (User Defined Table Function)"""
+385        return self.scope_type == ScopeType.UDTF
+386
+387    @property
+388    def is_correlated_subquery(self):
+389        """Determine if this scope is a correlated subquery"""
+390        return bool(self.is_subquery and self.external_columns)
+391
+392    def rename_source(self, old_name, new_name):
+393        """Rename a source in this scope"""
+394        columns = self.sources.pop(old_name or "", [])
+395        self.sources[new_name] = columns
+396
+397    def add_source(self, name, source):
+398        """Add a source to this scope"""
+399        self.sources[name] = source
+400        self.clear_cache()
+401
+402    def remove_source(self, name):
+403        """Remove a source from this scope"""
+404        self.sources.pop(name, None)
+405        self.clear_cache()
+406
+407    def __repr__(self):
+408        return f"Scope<{self.expression.sql()}>"
+409
+410    def traverse(self):
+411        """
+412        Traverse the scope tree from this node.
+413
+414        Yields:
+415            Scope: scope instances in depth-first-search post-order
+416        """
+417        for child_scope in itertools.chain(
+418            self.cte_scopes, self.union_scopes, self.derived_table_scopes, self.subquery_scopes
+419        ):
+420            yield from child_scope.traverse()
+421        yield self
+422
+423    def ref_count(self):
+424        """
+425        Count the number of times each scope in this tree is referenced.
+426
+427        Returns:
+428            dict[int, int]: Mapping of Scope instance ID to reference count
+429        """
+430        scope_ref_count = defaultdict(lambda: 0)
+431
+432        for scope in self.traverse():
+433            for _, source in scope.selected_sources.values():
+434                scope_ref_count[id(source)] += 1
+435
+436        return scope_ref_count
+
+ + +

Selection scope.

+ +
Attributes:
+ +
    +
  • expression (exp.Select|exp.Union): Root expression of this scope
  • +
  • sources (dict[str, exp.Table|Scope]): Mapping of source name to either +a Table expression or another Scope instance. For example: + SELECT * FROM x {"x": Table(this="x")} + SELECT * FROM x AS y {"y": Table(this="x")} + SELECT * FROM (SELECT ...) AS y {"y": Scope(...)}
  • +
  • outer_column_list (list[str]): If this is a derived table or CTE, and the outer query +defines a column list of it's alias of this scope, this is that list of columns. +For example: + SELECT * FROM (SELECT ...) AS y(col1, col2) +The inner query would have ["col1", "col2"] for its outer_column_list
  • +
  • parent (Scope): Parent scope
  • +
  • scope_type (ScopeType): Type of this scope, relative to it's parent
  • +
  • subquery_scopes (list[Scope]): List of all child scopes for subqueries
  • +
  • cte_scopes = (list[Scope]) List of all child scopes for CTEs
  • +
  • derived_table_scopes = (list[Scope]) List of all child scopes for derived_tables
  • +
  • union_scopes (list[Scope, Scope]): If this Scope is for a Union expression, this will be +a list of the left and right child scopes.
  • +
+
+ + +
+ +
+ + Scope( expression, sources=None, outer_column_list=None, parent=None, scope_type=<ScopeType.ROOT: 1>) + + + +
+ +
44    def __init__(
+45        self,
+46        expression,
+47        sources=None,
+48        outer_column_list=None,
+49        parent=None,
+50        scope_type=ScopeType.ROOT,
+51    ):
+52        self.expression = expression
+53        self.sources = sources or {}
+54        self.outer_column_list = outer_column_list or []
+55        self.parent = parent
+56        self.scope_type = scope_type
+57        self.subquery_scopes = []
+58        self.derived_table_scopes = []
+59        self.cte_scopes = []
+60        self.union_scopes = []
+61        self.clear_cache()
+
+ + + + +
+
+ +
+ + def + clear_cache(self): + + + +
+ +
63    def clear_cache(self):
+64        self._collected = False
+65        self._raw_columns = None
+66        self._derived_tables = None
+67        self._tables = None
+68        self._ctes = None
+69        self._subqueries = None
+70        self._selected_sources = None
+71        self._columns = None
+72        self._external_columns = None
+73        self._join_hints = None
+
+ + + + +
+
+ +
+ + def + branch(self, expression, scope_type, chain_sources=None, **kwargs): + + + +
+ +
75    def branch(self, expression, scope_type, chain_sources=None, **kwargs):
+76        """Branch from the current scope to a new, inner scope"""
+77        return Scope(
+78            expression=expression.unnest(),
+79            sources={**self.cte_sources, **(chain_sources or {})},
+80            parent=self,
+81            scope_type=scope_type,
+82            **kwargs,
+83        )
+
+ + +

Branch from the current scope to a new, inner scope

+
+ + +
+
+ +
+ + def + walk(self, bfs=True): + + + +
+ +
117    def walk(self, bfs=True):
+118        return walk_in_scope(self.expression, bfs=bfs)
+
+ + + + +
+
+ +
+ + def + find(self, *expression_types, bfs=True): + + + +
+ +
120    def find(self, *expression_types, bfs=True):
+121        """
+122        Returns the first node in this scope which matches at least one of the specified types.
+123
+124        This does NOT traverse into subscopes.
+125
+126        Args:
+127            expression_types (type): the expression type(s) to match.
+128            bfs (bool): True to use breadth-first search, False to use depth-first.
+129
+130        Returns:
+131            exp.Expression: the node which matches the criteria or None if no node matching
+132            the criteria was found.
+133        """
+134        return next(self.find_all(*expression_types, bfs=bfs), None)
+
+ + +

Returns the first node in this scope which matches at least one of the specified types.

+ +

This does NOT traverse into subscopes.

+ +
Arguments:
+ +
    +
  • expression_types (type): the expression type(s) to match.
  • +
  • bfs (bool): True to use breadth-first search, False to use depth-first.
  • +
+ +
Returns:
+ +
+

exp.Expression: the node which matches the criteria or None if no node matching + the criteria was found.

+
+
+ + +
+
+ +
+ + def + find_all(self, *expression_types, bfs=True): + + + +
+ +
136    def find_all(self, *expression_types, bfs=True):
+137        """
+138        Returns a generator object which visits all nodes in this scope and only yields those that
+139        match at least one of the specified expression types.
+140
+141        This does NOT traverse into subscopes.
+142
+143        Args:
+144            expression_types (type): the expression type(s) to match.
+145            bfs (bool): True to use breadth-first search, False to use depth-first.
+146
+147        Yields:
+148            exp.Expression: nodes
+149        """
+150        for expression, _, _ in self.walk(bfs=bfs):
+151            if isinstance(expression, expression_types):
+152                yield expression
+
+ + +

Returns a generator object which visits all nodes in this scope and only yields those that +match at least one of the specified expression types.

+ +

This does NOT traverse into subscopes.

+ +
Arguments:
+ +
    +
  • expression_types (type): the expression type(s) to match.
  • +
  • bfs (bool): True to use breadth-first search, False to use depth-first.
  • +
+ +
Yields:
+ +
+

exp.Expression: nodes

+
+
+ + +
+
+ +
+ + def + replace(self, old, new): + + + +
+ +
154    def replace(self, old, new):
+155        """
+156        Replace `old` with `new`.
+157
+158        This can be used instead of `exp.Expression.replace` to ensure the `Scope` is kept up-to-date.
+159
+160        Args:
+161            old (exp.Expression): old node
+162            new (exp.Expression): new node
+163        """
+164        old.replace(new)
+165        self.clear_cache()
+
+ + +

Replace old with new.

+ +

This can be used instead of exp.Expression.replace to ensure the Scope is kept up-to-date.

+ +
Arguments:
+ +
    +
  • old (exp.Expression): old node
  • +
  • new (exp.Expression): new node
  • +
+
+ + +
+
+
+ tables + + +
+ + +

List of tables in this scope.

+ +
Returns:
+ +
+

list[exp.Table]: tables

+
+
+ + +
+
+
+ ctes + + +
+ + +

List of CTEs in this scope.

+ +
Returns:
+ +
+

list[exp.CTE]: ctes

+
+
+ + +
+
+
+ derived_tables + + +
+ + +

List of derived tables in this scope.

+ +
For example:
+ +
+

SELECT * FROM (SELECT ...) <- that's a derived table

+
+ +
Returns:
+ +
+

list[exp.Subquery]: derived tables

+
+
+ + +
+
+
+ subqueries + + +
+ + +

List of subqueries in this scope.

+ +
For example:
+ +
+

SELECT * FROM x WHERE a IN (SELECT ...) <- that's a subquery

+
+ +
Returns:
+ +
+

list[exp.Subqueryable]: subqueries

+
+
+ + +
+
+
+ columns + + +
+ + +

List of columns in this scope.

+ +
Returns:
+ +
+

list[exp.Column]: Column instances in this scope, plus any + Columns that reference this scope from correlated subqueries.

+
+
+ + +
+
+
+ selected_sources + + +
+ + +

Mapping of nodes and sources that are actually selected from in this scope.

+ +

That is, all tables in a schema are selectable at any point. But a +table only becomes a selected source if it's included in a FROM or JOIN clause.

+ +
Returns:
+ +
+

dict[str, (exp.Table|exp.Select, exp.Table|Scope)]: selected sources and nodes

+
+
+ + +
+
+
+ cte_sources + + +
+ + +

Sources that are CTEs.

+ +
Returns:
+ +
+

dict[str, Scope]: Mapping of source alias to Scope

+
+
+ + +
+
+
+ selects + + +
+ + +

Select expressions of this scope.

+ +

For example, for the following expression: + SELECT 1 as a, 2 as b FROM x

+ +

The outputs are the "1 as a" and "2 as b" expressions.

+ +
Returns:
+ +
+

list[exp.Expression]: expressions

+
+
+ + +
+
+
+ external_columns + + +
+ + +

Columns that appear to reference sources in outer scopes.

+ +
Returns:
+ +
+

list[exp.Column]: Column instances that don't reference + sources in the current scope.

+
+
+ + +
+
+
+ unqualified_columns + + +
+ + +

Unqualified columns in the current scope.

+ +
Returns:
+ +
+

list[exp.Column]: Unqualified columns

+
+
+ + +
+
+
+ join_hints + + +
+ + +

Hints that exist in the scope that reference tables

+ +
Returns:
+ +
+

list[exp.JoinHint]: Join hints that are referenced within the scope

+
+
+ + +
+
+ +
+ + def + source_columns(self, source_name): + + + +
+ +
346    def source_columns(self, source_name):
+347        """
+348        Get all columns in the current scope for a particular source.
+349
+350        Args:
+351            source_name (str): Name of the source
+352        Returns:
+353            list[exp.Column]: Column instances that reference `source_name`
+354        """
+355        return [column for column in self.columns if column.table == source_name]
+
+ + +

Get all columns in the current scope for a particular source.

+ +
Arguments:
+ +
    +
  • source_name (str): Name of the source
  • +
+ +
Returns:
+ +
+

list[exp.Column]: Column instances that reference source_name

+
+
+ + +
+
+
+ is_subquery + + +
+ + +

Determine if this scope is a subquery

+
+ + +
+
+
+ is_derived_table + + +
+ + +

Determine if this scope is a derived table

+
+ + +
+
+
+ is_union + + +
+ + +

Determine if this scope is a union

+
+ + +
+
+
+ is_cte + + +
+ + +

Determine if this scope is a common table expression

+
+ + +
+
+
+ is_root + + +
+ + +

Determine if this is the root scope

+
+ + +
+
+
+ is_udtf + + +
+ + +

Determine if this scope is a UDTF (User Defined Table Function)

+
+ + +
+
+
+ is_correlated_subquery + + +
+ + +

Determine if this scope is a correlated subquery

+
+ + +
+
+ +
+ + def + rename_source(self, old_name, new_name): + + + +
+ +
392    def rename_source(self, old_name, new_name):
+393        """Rename a source in this scope"""
+394        columns = self.sources.pop(old_name or "", [])
+395        self.sources[new_name] = columns
+
+ + +

Rename a source in this scope

+
+ + +
+
+ +
+ + def + add_source(self, name, source): + + + +
+ +
397    def add_source(self, name, source):
+398        """Add a source to this scope"""
+399        self.sources[name] = source
+400        self.clear_cache()
+
+ + +

Add a source to this scope

+
+ + +
+
+ +
+ + def + remove_source(self, name): + + + +
+ +
402    def remove_source(self, name):
+403        """Remove a source from this scope"""
+404        self.sources.pop(name, None)
+405        self.clear_cache()
+
+ + +

Remove a source from this scope

+
+ + +
+
+ +
+ + def + traverse(self): + + + +
+ +
410    def traverse(self):
+411        """
+412        Traverse the scope tree from this node.
+413
+414        Yields:
+415            Scope: scope instances in depth-first-search post-order
+416        """
+417        for child_scope in itertools.chain(
+418            self.cte_scopes, self.union_scopes, self.derived_table_scopes, self.subquery_scopes
+419        ):
+420            yield from child_scope.traverse()
+421        yield self
+
+ + +

Traverse the scope tree from this node.

+ +
Yields:
+ +
+

Scope: scope instances in depth-first-search post-order

+
+
+ + +
+
+ +
+ + def + ref_count(self): + + + +
+ +
423    def ref_count(self):
+424        """
+425        Count the number of times each scope in this tree is referenced.
+426
+427        Returns:
+428            dict[int, int]: Mapping of Scope instance ID to reference count
+429        """
+430        scope_ref_count = defaultdict(lambda: 0)
+431
+432        for scope in self.traverse():
+433            for _, source in scope.selected_sources.values():
+434                scope_ref_count[id(source)] += 1
+435
+436        return scope_ref_count
+
+ + +

Count the number of times each scope in this tree is referenced.

+ +
Returns:
+ +
+

dict[int, int]: Mapping of Scope instance ID to reference count

+
+
+ + +
+
+
+ +
+ + def + traverse_scope(expression): + + + +
+ +
439def traverse_scope(expression):
+440    """
+441    Traverse an expression by it's "scopes".
+442
+443    "Scope" represents the current context of a Select statement.
+444
+445    This is helpful for optimizing queries, where we need more information than
+446    the expression tree itself. For example, we might care about the source
+447    names within a subquery. Returns a list because a generator could result in
+448    incomplete properties which is confusing.
+449
+450    Examples:
+451        >>> import sqlglot
+452        >>> expression = sqlglot.parse_one("SELECT a FROM (SELECT a FROM x) AS y")
+453        >>> scopes = traverse_scope(expression)
+454        >>> scopes[0].expression.sql(), list(scopes[0].sources)
+455        ('SELECT a FROM x', ['x'])
+456        >>> scopes[1].expression.sql(), list(scopes[1].sources)
+457        ('SELECT a FROM (SELECT a FROM x) AS y', ['y'])
+458
+459    Args:
+460        expression (exp.Expression): expression to traverse
+461    Returns:
+462        list[Scope]: scope instances
+463    """
+464    return list(_traverse_scope(Scope(expression)))
+
+ + +

Traverse an expression by it's "scopes".

+ +

"Scope" represents the current context of a Select statement.

+ +

This is helpful for optimizing queries, where we need more information than +the expression tree itself. For example, we might care about the source +names within a subquery. Returns a list because a generator could result in +incomplete properties which is confusing.

+ +
Examples:
+ +
+
+
>>> import sqlglot
+>>> expression = sqlglot.parse_one("SELECT a FROM (SELECT a FROM x) AS y")
+>>> scopes = traverse_scope(expression)
+>>> scopes[0].expression.sql(), list(scopes[0].sources)
+('SELECT a FROM x', ['x'])
+>>> scopes[1].expression.sql(), list(scopes[1].sources)
+('SELECT a FROM (SELECT a FROM x) AS y', ['y'])
+
+
+
+ +
Arguments:
+ +
    +
  • expression (exp.Expression): expression to traverse
  • +
+ +
Returns:
+ +
+

list[Scope]: scope instances

+
+
+ + +
+
+ +
+ + def + build_scope(expression): + + + +
+ +
467def build_scope(expression):
+468    """
+469    Build a scope tree.
+470
+471    Args:
+472        expression (exp.Expression): expression to build the scope tree for
+473    Returns:
+474        Scope: root scope
+475    """
+476    return traverse_scope(expression)[-1]
+
+ + +

Build a scope tree.

+ +
Arguments:
+ +
    +
  • expression (exp.Expression): expression to build the scope tree for
  • +
+ +
Returns:
+ +
+

Scope: root scope

+
+
+ + +
+
+ +
+ + def + walk_in_scope(expression, bfs=True): + + + +
+ +
604def walk_in_scope(expression, bfs=True):
+605    """
+606    Returns a generator object which visits all nodes in the syntrax tree, stopping at
+607    nodes that start child scopes.
+608
+609    Args:
+610        expression (exp.Expression):
+611        bfs (bool): if set to True the BFS traversal order will be applied,
+612            otherwise the DFS traversal will be used instead.
+613
+614    Yields:
+615        tuple[exp.Expression, Optional[exp.Expression], str]: node, parent, arg key
+616    """
+617    # We'll use this variable to pass state into the dfs generator.
+618    # Whenever we set it to True, we exclude a subtree from traversal.
+619    prune = False
+620
+621    for node, parent, key in expression.walk(bfs=bfs, prune=lambda *_: prune):
+622        prune = False
+623
+624        yield node, parent, key
+625
+626        if node is expression:
+627            continue
+628        elif isinstance(node, exp.CTE):
+629            prune = True
+630        elif isinstance(node, exp.Subquery) and isinstance(parent, (exp.From, exp.Join)):
+631            prune = True
+632        elif isinstance(node, exp.Subqueryable):
+633            prune = True
+
+ + +

Returns a generator object which visits all nodes in the syntrax tree, stopping at +nodes that start child scopes.

+ +
Arguments:
+ +
    +
  • expression (exp.Expression):
  • +
  • bfs (bool): if set to True the BFS traversal order will be applied, +otherwise the DFS traversal will be used instead.
  • +
+ +
Yields:
+ +
+

tuple[exp.Expression, Optional[exp.Expression], str]: node, parent, arg key

+
+
+ + +
+
+ + \ No newline at end of file diff --git a/docs/sqlglot/optimizer/simplify.html b/docs/sqlglot/optimizer/simplify.html new file mode 100644 index 0000000..cc83534 --- /dev/null +++ b/docs/sqlglot/optimizer/simplify.html @@ -0,0 +1,1428 @@ + + + + + + + sqlglot.optimizer.simplify API documentation + + + + + + + + + +
+
+ Edit on GitHub +

+sqlglot.optimizer.simplify

+ + + + + + +
  1import datetime
+  2import functools
+  3import itertools
+  4from collections import deque
+  5from decimal import Decimal
+  6
+  7from sqlglot import exp
+  8from sqlglot.expressions import FALSE, NULL, TRUE
+  9from sqlglot.generator import Generator
+ 10from sqlglot.helper import first, while_changing
+ 11
+ 12GENERATOR = Generator(normalize=True, identify=True)
+ 13
+ 14
+ 15def simplify(expression):
+ 16    """
+ 17    Rewrite sqlglot AST to simplify expressions.
+ 18
+ 19    Example:
+ 20        >>> import sqlglot
+ 21        >>> expression = sqlglot.parse_one("TRUE AND TRUE")
+ 22        >>> simplify(expression).sql()
+ 23        'TRUE'
+ 24
+ 25    Args:
+ 26        expression (sqlglot.Expression): expression to simplify
+ 27    Returns:
+ 28        sqlglot.Expression: simplified expression
+ 29    """
+ 30
+ 31    def _simplify(expression, root=True):
+ 32        node = expression
+ 33        node = rewrite_between(node)
+ 34        node = uniq_sort(node)
+ 35        node = absorb_and_eliminate(node)
+ 36        exp.replace_children(node, lambda e: _simplify(e, False))
+ 37        node = simplify_not(node)
+ 38        node = flatten(node)
+ 39        node = simplify_connectors(node)
+ 40        node = remove_compliments(node)
+ 41        node.parent = expression.parent
+ 42        node = simplify_literals(node)
+ 43        node = simplify_parens(node)
+ 44        if root:
+ 45            expression.replace(node)
+ 46        return node
+ 47
+ 48    expression = while_changing(expression, _simplify)
+ 49    remove_where_true(expression)
+ 50    return expression
+ 51
+ 52
+ 53def rewrite_between(expression: exp.Expression) -> exp.Expression:
+ 54    """Rewrite x between y and z to x >= y AND x <= z.
+ 55
+ 56    This is done because comparison simplification is only done on lt/lte/gt/gte.
+ 57    """
+ 58    if isinstance(expression, exp.Between):
+ 59        return exp.and_(
+ 60            exp.GTE(this=expression.this.copy(), expression=expression.args["low"]),
+ 61            exp.LTE(this=expression.this.copy(), expression=expression.args["high"]),
+ 62        )
+ 63    return expression
+ 64
+ 65
+ 66def simplify_not(expression):
+ 67    """
+ 68    Demorgan's Law
+ 69    NOT (x OR y) -> NOT x AND NOT y
+ 70    NOT (x AND y) -> NOT x OR NOT y
+ 71    """
+ 72    if isinstance(expression, exp.Not):
+ 73        if isinstance(expression.this, exp.Null):
+ 74            return exp.null()
+ 75        if isinstance(expression.this, exp.Paren):
+ 76            condition = expression.this.unnest()
+ 77            if isinstance(condition, exp.And):
+ 78                return exp.or_(exp.not_(condition.left), exp.not_(condition.right))
+ 79            if isinstance(condition, exp.Or):
+ 80                return exp.and_(exp.not_(condition.left), exp.not_(condition.right))
+ 81            if isinstance(condition, exp.Null):
+ 82                return exp.null()
+ 83        if always_true(expression.this):
+ 84            return exp.false()
+ 85        if expression.this == FALSE:
+ 86            return exp.true()
+ 87        if isinstance(expression.this, exp.Not):
+ 88            # double negation
+ 89            # NOT NOT x -> x
+ 90            return expression.this.this
+ 91    return expression
+ 92
+ 93
+ 94def flatten(expression):
+ 95    """
+ 96    A AND (B AND C) -> A AND B AND C
+ 97    A OR (B OR C) -> A OR B OR C
+ 98    """
+ 99    if isinstance(expression, exp.Connector):
+100        for node in expression.args.values():
+101            child = node.unnest()
+102            if isinstance(child, expression.__class__):
+103                node.replace(child)
+104    return expression
+105
+106
+107def simplify_connectors(expression):
+108    def _simplify_connectors(expression, left, right):
+109        if isinstance(expression, exp.Connector):
+110            if left == right:
+111                return left
+112            if isinstance(expression, exp.And):
+113                if FALSE in (left, right):
+114                    return exp.false()
+115                if NULL in (left, right):
+116                    return exp.null()
+117                if always_true(left) and always_true(right):
+118                    return exp.true()
+119                if always_true(left):
+120                    return right
+121                if always_true(right):
+122                    return left
+123                return _simplify_comparison(expression, left, right)
+124            elif isinstance(expression, exp.Or):
+125                if always_true(left) or always_true(right):
+126                    return exp.true()
+127                if left == FALSE and right == FALSE:
+128                    return exp.false()
+129                if (
+130                    (left == NULL and right == NULL)
+131                    or (left == NULL and right == FALSE)
+132                    or (left == FALSE and right == NULL)
+133                ):
+134                    return exp.null()
+135                if left == FALSE:
+136                    return right
+137                if right == FALSE:
+138                    return left
+139                return _simplify_comparison(expression, left, right, or_=True)
+140        return None
+141
+142    return _flat_simplify(expression, _simplify_connectors)
+143
+144
+145LT_LTE = (exp.LT, exp.LTE)
+146GT_GTE = (exp.GT, exp.GTE)
+147
+148COMPARISONS = (
+149    *LT_LTE,
+150    *GT_GTE,
+151    exp.EQ,
+152    exp.NEQ,
+153)
+154
+155INVERSE_COMPARISONS = {
+156    exp.LT: exp.GT,
+157    exp.GT: exp.LT,
+158    exp.LTE: exp.GTE,
+159    exp.GTE: exp.LTE,
+160}
+161
+162
+163def _simplify_comparison(expression, left, right, or_=False):
+164    if isinstance(left, COMPARISONS) and isinstance(right, COMPARISONS):
+165        ll, lr = left.args.values()
+166        rl, rr = right.args.values()
+167
+168        largs = {ll, lr}
+169        rargs = {rl, rr}
+170
+171        matching = largs & rargs
+172        columns = {m for m in matching if isinstance(m, exp.Column)}
+173
+174        if matching and columns:
+175            try:
+176                l = first(largs - columns)
+177                r = first(rargs - columns)
+178            except StopIteration:
+179                return expression
+180
+181            # make sure the comparison is always of the form x > 1 instead of 1 < x
+182            if left.__class__ in INVERSE_COMPARISONS and l == ll:
+183                left = INVERSE_COMPARISONS[left.__class__](this=lr, expression=ll)
+184            if right.__class__ in INVERSE_COMPARISONS and r == rl:
+185                right = INVERSE_COMPARISONS[right.__class__](this=rr, expression=rl)
+186
+187            if l.is_number and r.is_number:
+188                l = float(l.name)
+189                r = float(r.name)
+190            elif l.is_string and r.is_string:
+191                l = l.name
+192                r = r.name
+193            else:
+194                return None
+195
+196            for (a, av), (b, bv) in itertools.permutations(((left, l), (right, r))):
+197                if isinstance(a, LT_LTE) and isinstance(b, LT_LTE):
+198                    return left if (av > bv if or_ else av <= bv) else right
+199                if isinstance(a, GT_GTE) and isinstance(b, GT_GTE):
+200                    return left if (av < bv if or_ else av >= bv) else right
+201
+202                # we can't ever shortcut to true because the column could be null
+203                if isinstance(a, exp.LT) and isinstance(b, GT_GTE):
+204                    if not or_ and av <= bv:
+205                        return exp.false()
+206                elif isinstance(a, exp.GT) and isinstance(b, LT_LTE):
+207                    if not or_ and av >= bv:
+208                        return exp.false()
+209                elif isinstance(a, exp.EQ):
+210                    if isinstance(b, exp.LT):
+211                        return exp.false() if av >= bv else a
+212                    if isinstance(b, exp.LTE):
+213                        return exp.false() if av > bv else a
+214                    if isinstance(b, exp.GT):
+215                        return exp.false() if av <= bv else a
+216                    if isinstance(b, exp.GTE):
+217                        return exp.false() if av < bv else a
+218                    if isinstance(b, exp.NEQ):
+219                        return exp.false() if av == bv else a
+220    return None
+221
+222
+223def remove_compliments(expression):
+224    """
+225    Removing compliments.
+226
+227    A AND NOT A -> FALSE
+228    A OR NOT A -> TRUE
+229    """
+230    if isinstance(expression, exp.Connector):
+231        compliment = exp.false() if isinstance(expression, exp.And) else exp.true()
+232
+233        for a, b in itertools.permutations(expression.flatten(), 2):
+234            if is_complement(a, b):
+235                return compliment
+236    return expression
+237
+238
+239def uniq_sort(expression):
+240    """
+241    Uniq and sort a connector.
+242
+243    C AND A AND B AND B -> A AND B AND C
+244    """
+245    if isinstance(expression, exp.Connector):
+246        result_func = exp.and_ if isinstance(expression, exp.And) else exp.or_
+247        flattened = tuple(expression.flatten())
+248        deduped = {GENERATOR.generate(e): e for e in flattened}
+249        arr = tuple(deduped.items())
+250
+251        # check if the operands are already sorted, if not sort them
+252        # A AND C AND B -> A AND B AND C
+253        for i, (sql, e) in enumerate(arr[1:]):
+254            if sql < arr[i][0]:
+255                expression = result_func(*(deduped[sql] for sql in sorted(deduped)))
+256                break
+257        else:
+258            # we didn't have to sort but maybe we need to dedup
+259            if len(deduped) < len(flattened):
+260                expression = result_func(*deduped.values())
+261
+262    return expression
+263
+264
+265def absorb_and_eliminate(expression):
+266    """
+267    absorption:
+268        A AND (A OR B) -> A
+269        A OR (A AND B) -> A
+270        A AND (NOT A OR B) -> A AND B
+271        A OR (NOT A AND B) -> A OR B
+272    elimination:
+273        (A AND B) OR (A AND NOT B) -> A
+274        (A OR B) AND (A OR NOT B) -> A
+275    """
+276    if isinstance(expression, exp.Connector):
+277        kind = exp.Or if isinstance(expression, exp.And) else exp.And
+278
+279        for a, b in itertools.permutations(expression.flatten(), 2):
+280            if isinstance(a, kind):
+281                aa, ab = a.unnest_operands()
+282
+283                # absorb
+284                if is_complement(b, aa):
+285                    aa.replace(exp.true() if kind == exp.And else exp.false())
+286                elif is_complement(b, ab):
+287                    ab.replace(exp.true() if kind == exp.And else exp.false())
+288                elif (set(b.flatten()) if isinstance(b, kind) else {b}) < set(a.flatten()):
+289                    a.replace(exp.false() if kind == exp.And else exp.true())
+290                elif isinstance(b, kind):
+291                    # eliminate
+292                    rhs = b.unnest_operands()
+293                    ba, bb = rhs
+294
+295                    if aa in rhs and (is_complement(ab, ba) or is_complement(ab, bb)):
+296                        a.replace(aa)
+297                        b.replace(aa)
+298                    elif ab in rhs and (is_complement(aa, ba) or is_complement(aa, bb)):
+299                        a.replace(ab)
+300                        b.replace(ab)
+301
+302    return expression
+303
+304
+305def simplify_literals(expression):
+306    if isinstance(expression, exp.Binary):
+307        return _flat_simplify(expression, _simplify_binary)
+308    elif isinstance(expression, exp.Neg):
+309        this = expression.this
+310        if this.is_number:
+311            value = this.name
+312            if value[0] == "-":
+313                return exp.Literal.number(value[1:])
+314            return exp.Literal.number(f"-{value}")
+315
+316    return expression
+317
+318
+319def _simplify_binary(expression, a, b):
+320    if isinstance(expression, exp.Is):
+321        if isinstance(b, exp.Not):
+322            c = b.this
+323            not_ = True
+324        else:
+325            c = b
+326            not_ = False
+327
+328        if c == NULL:
+329            if isinstance(a, exp.Literal):
+330                return exp.true() if not_ else exp.false()
+331            if a == NULL:
+332                return exp.false() if not_ else exp.true()
+333    elif isinstance(expression, (exp.NullSafeEQ, exp.NullSafeNEQ)):
+334        return None
+335    elif NULL in (a, b):
+336        return exp.null()
+337
+338    if a.is_number and b.is_number:
+339        a = int(a.name) if a.is_int else Decimal(a.name)
+340        b = int(b.name) if b.is_int else Decimal(b.name)
+341
+342        if isinstance(expression, exp.Add):
+343            return exp.Literal.number(a + b)
+344        if isinstance(expression, exp.Sub):
+345            return exp.Literal.number(a - b)
+346        if isinstance(expression, exp.Mul):
+347            return exp.Literal.number(a * b)
+348        if isinstance(expression, exp.Div):
+349            if isinstance(a, int) and isinstance(b, int):
+350                return exp.Literal.number(a // b)
+351            return exp.Literal.number(a / b)
+352
+353        boolean = eval_boolean(expression, a, b)
+354
+355        if boolean:
+356            return boolean
+357    elif a.is_string and b.is_string:
+358        boolean = eval_boolean(expression, a, b)
+359
+360        if boolean:
+361            return boolean
+362    elif isinstance(a, exp.Cast) and isinstance(b, exp.Interval):
+363        a, b = extract_date(a), extract_interval(b)
+364        if a and b:
+365            if isinstance(expression, exp.Add):
+366                return date_literal(a + b)
+367            if isinstance(expression, exp.Sub):
+368                return date_literal(a - b)
+369    elif isinstance(a, exp.Interval) and isinstance(b, exp.Cast):
+370        a, b = extract_interval(a), extract_date(b)
+371        # you cannot subtract a date from an interval
+372        if a and b and isinstance(expression, exp.Add):
+373            return date_literal(a + b)
+374
+375    return None
+376
+377
+378def simplify_parens(expression):
+379    if (
+380        isinstance(expression, exp.Paren)
+381        and not isinstance(expression.this, exp.Select)
+382        and (
+383            not isinstance(expression.parent, (exp.Condition, exp.Binary))
+384            or isinstance(expression.this, (exp.Is, exp.Like))
+385            or not isinstance(expression.this, exp.Binary)
+386        )
+387    ):
+388        return expression.this
+389    return expression
+390
+391
+392def remove_where_true(expression):
+393    for where in expression.find_all(exp.Where):
+394        if always_true(where.this):
+395            where.parent.set("where", None)
+396    for join in expression.find_all(exp.Join):
+397        if always_true(join.args.get("on")):
+398            join.set("kind", "CROSS")
+399            join.set("on", None)
+400
+401
+402def always_true(expression):
+403    return expression == TRUE or isinstance(expression, exp.Literal)
+404
+405
+406def is_complement(a, b):
+407    return isinstance(b, exp.Not) and b.this == a
+408
+409
+410def eval_boolean(expression, a, b):
+411    if isinstance(expression, (exp.EQ, exp.Is)):
+412        return boolean_literal(a == b)
+413    if isinstance(expression, exp.NEQ):
+414        return boolean_literal(a != b)
+415    if isinstance(expression, exp.GT):
+416        return boolean_literal(a > b)
+417    if isinstance(expression, exp.GTE):
+418        return boolean_literal(a >= b)
+419    if isinstance(expression, exp.LT):
+420        return boolean_literal(a < b)
+421    if isinstance(expression, exp.LTE):
+422        return boolean_literal(a <= b)
+423    return None
+424
+425
+426def extract_date(cast):
+427    # The "fromisoformat" conversion could fail if the cast is used on an identifier,
+428    # so in that case we can't extract the date.
+429    try:
+430        if cast.args["to"].this == exp.DataType.Type.DATE:
+431            return datetime.date.fromisoformat(cast.name)
+432        if cast.args["to"].this == exp.DataType.Type.DATETIME:
+433            return datetime.datetime.fromisoformat(cast.name)
+434    except ValueError:
+435        return None
+436
+437
+438def extract_interval(interval):
+439    try:
+440        from dateutil.relativedelta import relativedelta  # type: ignore
+441    except ModuleNotFoundError:
+442        return None
+443
+444    n = int(interval.name)
+445    unit = interval.text("unit").lower()
+446
+447    if unit == "year":
+448        return relativedelta(years=n)
+449    if unit == "month":
+450        return relativedelta(months=n)
+451    if unit == "week":
+452        return relativedelta(weeks=n)
+453    if unit == "day":
+454        return relativedelta(days=n)
+455    return None
+456
+457
+458def date_literal(date):
+459    return exp.cast(
+460        exp.Literal.string(date),
+461        "DATETIME" if isinstance(date, datetime.datetime) else "DATE",
+462    )
+463
+464
+465def boolean_literal(condition):
+466    return exp.true() if condition else exp.false()
+467
+468
+469def _flat_simplify(expression, simplifier):
+470    operands = []
+471    queue = deque(expression.flatten(unnest=False))
+472    size = len(queue)
+473
+474    while queue:
+475        a = queue.popleft()
+476
+477        for b in queue:
+478            result = simplifier(expression, a, b)
+479
+480            if result:
+481                queue.remove(b)
+482                queue.append(result)
+483                break
+484        else:
+485            operands.append(a)
+486
+487    if len(operands) < size:
+488        return functools.reduce(lambda a, b: expression.__class__(this=a, expression=b), operands)
+489    return expression
+
+ + +
+
+ +
+ + def + simplify(expression): + + + +
+ +
16def simplify(expression):
+17    """
+18    Rewrite sqlglot AST to simplify expressions.
+19
+20    Example:
+21        >>> import sqlglot
+22        >>> expression = sqlglot.parse_one("TRUE AND TRUE")
+23        >>> simplify(expression).sql()
+24        'TRUE'
+25
+26    Args:
+27        expression (sqlglot.Expression): expression to simplify
+28    Returns:
+29        sqlglot.Expression: simplified expression
+30    """
+31
+32    def _simplify(expression, root=True):
+33        node = expression
+34        node = rewrite_between(node)
+35        node = uniq_sort(node)
+36        node = absorb_and_eliminate(node)
+37        exp.replace_children(node, lambda e: _simplify(e, False))
+38        node = simplify_not(node)
+39        node = flatten(node)
+40        node = simplify_connectors(node)
+41        node = remove_compliments(node)
+42        node.parent = expression.parent
+43        node = simplify_literals(node)
+44        node = simplify_parens(node)
+45        if root:
+46            expression.replace(node)
+47        return node
+48
+49    expression = while_changing(expression, _simplify)
+50    remove_where_true(expression)
+51    return expression
+
+ + +

Rewrite sqlglot AST to simplify expressions.

+ +
Example:
+ +
+
+
>>> import sqlglot
+>>> expression = sqlglot.parse_one("TRUE AND TRUE")
+>>> simplify(expression).sql()
+'TRUE'
+
+
+
+ +
Arguments:
+ +
    +
  • expression (sqlglot.Expression): expression to simplify
  • +
+ +
Returns:
+ +
+

sqlglot.Expression: simplified expression

+
+
+ + +
+
+ +
+ + def + rewrite_between( expression: sqlglot.expressions.Expression) -> sqlglot.expressions.Expression: + + + +
+ +
54def rewrite_between(expression: exp.Expression) -> exp.Expression:
+55    """Rewrite x between y and z to x >= y AND x <= z.
+56
+57    This is done because comparison simplification is only done on lt/lte/gt/gte.
+58    """
+59    if isinstance(expression, exp.Between):
+60        return exp.and_(
+61            exp.GTE(this=expression.this.copy(), expression=expression.args["low"]),
+62            exp.LTE(this=expression.this.copy(), expression=expression.args["high"]),
+63        )
+64    return expression
+
+ + +

Rewrite x between y and z to x >= y AND x <= z.

+ +

This is done because comparison simplification is only done on lt/lte/gt/gte.

+
+ + +
+
+ +
+ + def + simplify_not(expression): + + + +
+ +
67def simplify_not(expression):
+68    """
+69    Demorgan's Law
+70    NOT (x OR y) -> NOT x AND NOT y
+71    NOT (x AND y) -> NOT x OR NOT y
+72    """
+73    if isinstance(expression, exp.Not):
+74        if isinstance(expression.this, exp.Null):
+75            return exp.null()
+76        if isinstance(expression.this, exp.Paren):
+77            condition = expression.this.unnest()
+78            if isinstance(condition, exp.And):
+79                return exp.or_(exp.not_(condition.left), exp.not_(condition.right))
+80            if isinstance(condition, exp.Or):
+81                return exp.and_(exp.not_(condition.left), exp.not_(condition.right))
+82            if isinstance(condition, exp.Null):
+83                return exp.null()
+84        if always_true(expression.this):
+85            return exp.false()
+86        if expression.this == FALSE:
+87            return exp.true()
+88        if isinstance(expression.this, exp.Not):
+89            # double negation
+90            # NOT NOT x -> x
+91            return expression.this.this
+92    return expression
+
+ + +

Demorgan's Law +NOT (x OR y) -> NOT x AND NOT y +NOT (x AND y) -> NOT x OR NOT y

+
+ + +
+
+ +
+ + def + flatten(expression): + + + +
+ +
 95def flatten(expression):
+ 96    """
+ 97    A AND (B AND C) -> A AND B AND C
+ 98    A OR (B OR C) -> A OR B OR C
+ 99    """
+100    if isinstance(expression, exp.Connector):
+101        for node in expression.args.values():
+102            child = node.unnest()
+103            if isinstance(child, expression.__class__):
+104                node.replace(child)
+105    return expression
+
+ + +

A AND (B AND C) -> A AND B AND C +A OR (B OR C) -> A OR B OR C

+
+ + +
+
+ +
+ + def + simplify_connectors(expression): + + + +
+ +
108def simplify_connectors(expression):
+109    def _simplify_connectors(expression, left, right):
+110        if isinstance(expression, exp.Connector):
+111            if left == right:
+112                return left
+113            if isinstance(expression, exp.And):
+114                if FALSE in (left, right):
+115                    return exp.false()
+116                if NULL in (left, right):
+117                    return exp.null()
+118                if always_true(left) and always_true(right):
+119                    return exp.true()
+120                if always_true(left):
+121                    return right
+122                if always_true(right):
+123                    return left
+124                return _simplify_comparison(expression, left, right)
+125            elif isinstance(expression, exp.Or):
+126                if always_true(left) or always_true(right):
+127                    return exp.true()
+128                if left == FALSE and right == FALSE:
+129                    return exp.false()
+130                if (
+131                    (left == NULL and right == NULL)
+132                    or (left == NULL and right == FALSE)
+133                    or (left == FALSE and right == NULL)
+134                ):
+135                    return exp.null()
+136                if left == FALSE:
+137                    return right
+138                if right == FALSE:
+139                    return left
+140                return _simplify_comparison(expression, left, right, or_=True)
+141        return None
+142
+143    return _flat_simplify(expression, _simplify_connectors)
+
+ + + + +
+
+ +
+ + def + remove_compliments(expression): + + + +
+ +
224def remove_compliments(expression):
+225    """
+226    Removing compliments.
+227
+228    A AND NOT A -> FALSE
+229    A OR NOT A -> TRUE
+230    """
+231    if isinstance(expression, exp.Connector):
+232        compliment = exp.false() if isinstance(expression, exp.And) else exp.true()
+233
+234        for a, b in itertools.permutations(expression.flatten(), 2):
+235            if is_complement(a, b):
+236                return compliment
+237    return expression
+
+ + +

Removing compliments.

+ +

A AND NOT A -> FALSE +A OR NOT A -> TRUE

+
+ + +
+
+ +
+ + def + uniq_sort(expression): + + + +
+ +
240def uniq_sort(expression):
+241    """
+242    Uniq and sort a connector.
+243
+244    C AND A AND B AND B -> A AND B AND C
+245    """
+246    if isinstance(expression, exp.Connector):
+247        result_func = exp.and_ if isinstance(expression, exp.And) else exp.or_
+248        flattened = tuple(expression.flatten())
+249        deduped = {GENERATOR.generate(e): e for e in flattened}
+250        arr = tuple(deduped.items())
+251
+252        # check if the operands are already sorted, if not sort them
+253        # A AND C AND B -> A AND B AND C
+254        for i, (sql, e) in enumerate(arr[1:]):
+255            if sql < arr[i][0]:
+256                expression = result_func(*(deduped[sql] for sql in sorted(deduped)))
+257                break
+258        else:
+259            # we didn't have to sort but maybe we need to dedup
+260            if len(deduped) < len(flattened):
+261                expression = result_func(*deduped.values())
+262
+263    return expression
+
+ + +

Uniq and sort a connector.

+ +

C AND A AND B AND B -> A AND B AND C

+
+ + +
+
+ +
+ + def + absorb_and_eliminate(expression): + + + +
+ +
266def absorb_and_eliminate(expression):
+267    """
+268    absorption:
+269        A AND (A OR B) -> A
+270        A OR (A AND B) -> A
+271        A AND (NOT A OR B) -> A AND B
+272        A OR (NOT A AND B) -> A OR B
+273    elimination:
+274        (A AND B) OR (A AND NOT B) -> A
+275        (A OR B) AND (A OR NOT B) -> A
+276    """
+277    if isinstance(expression, exp.Connector):
+278        kind = exp.Or if isinstance(expression, exp.And) else exp.And
+279
+280        for a, b in itertools.permutations(expression.flatten(), 2):
+281            if isinstance(a, kind):
+282                aa, ab = a.unnest_operands()
+283
+284                # absorb
+285                if is_complement(b, aa):
+286                    aa.replace(exp.true() if kind == exp.And else exp.false())
+287                elif is_complement(b, ab):
+288                    ab.replace(exp.true() if kind == exp.And else exp.false())
+289                elif (set(b.flatten()) if isinstance(b, kind) else {b}) < set(a.flatten()):
+290                    a.replace(exp.false() if kind == exp.And else exp.true())
+291                elif isinstance(b, kind):
+292                    # eliminate
+293                    rhs = b.unnest_operands()
+294                    ba, bb = rhs
+295
+296                    if aa in rhs and (is_complement(ab, ba) or is_complement(ab, bb)):
+297                        a.replace(aa)
+298                        b.replace(aa)
+299                    elif ab in rhs and (is_complement(aa, ba) or is_complement(aa, bb)):
+300                        a.replace(ab)
+301                        b.replace(ab)
+302
+303    return expression
+
+ + +

absorption: + A AND (A OR B) -> A + A OR (A AND B) -> A + A AND (NOT A OR B) -> A AND B + A OR (NOT A AND B) -> A OR B +elimination: + (A AND B) OR (A AND NOT B) -> A + (A OR B) AND (A OR NOT B) -> A

+
+ + +
+
+ +
+ + def + simplify_literals(expression): + + + +
+ +
306def simplify_literals(expression):
+307    if isinstance(expression, exp.Binary):
+308        return _flat_simplify(expression, _simplify_binary)
+309    elif isinstance(expression, exp.Neg):
+310        this = expression.this
+311        if this.is_number:
+312            value = this.name
+313            if value[0] == "-":
+314                return exp.Literal.number(value[1:])
+315            return exp.Literal.number(f"-{value}")
+316
+317    return expression
+
+ + + + +
+
+ +
+ + def + simplify_parens(expression): + + + +
+ +
379def simplify_parens(expression):
+380    if (
+381        isinstance(expression, exp.Paren)
+382        and not isinstance(expression.this, exp.Select)
+383        and (
+384            not isinstance(expression.parent, (exp.Condition, exp.Binary))
+385            or isinstance(expression.this, (exp.Is, exp.Like))
+386            or not isinstance(expression.this, exp.Binary)
+387        )
+388    ):
+389        return expression.this
+390    return expression
+
+ + + + +
+
+ +
+ + def + remove_where_true(expression): + + + +
+ +
393def remove_where_true(expression):
+394    for where in expression.find_all(exp.Where):
+395        if always_true(where.this):
+396            where.parent.set("where", None)
+397    for join in expression.find_all(exp.Join):
+398        if always_true(join.args.get("on")):
+399            join.set("kind", "CROSS")
+400            join.set("on", None)
+
+ + + + +
+
+ +
+ + def + always_true(expression): + + + +
+ +
403def always_true(expression):
+404    return expression == TRUE or isinstance(expression, exp.Literal)
+
+ + + + +
+
+ +
+ + def + is_complement(a, b): + + + +
+ +
407def is_complement(a, b):
+408    return isinstance(b, exp.Not) and b.this == a
+
+ + + + +
+
+ +
+ + def + eval_boolean(expression, a, b): + + + +
+ +
411def eval_boolean(expression, a, b):
+412    if isinstance(expression, (exp.EQ, exp.Is)):
+413        return boolean_literal(a == b)
+414    if isinstance(expression, exp.NEQ):
+415        return boolean_literal(a != b)
+416    if isinstance(expression, exp.GT):
+417        return boolean_literal(a > b)
+418    if isinstance(expression, exp.GTE):
+419        return boolean_literal(a >= b)
+420    if isinstance(expression, exp.LT):
+421        return boolean_literal(a < b)
+422    if isinstance(expression, exp.LTE):
+423        return boolean_literal(a <= b)
+424    return None
+
+ + + + +
+
+ +
+ + def + extract_date(cast): + + + +
+ +
427def extract_date(cast):
+428    # The "fromisoformat" conversion could fail if the cast is used on an identifier,
+429    # so in that case we can't extract the date.
+430    try:
+431        if cast.args["to"].this == exp.DataType.Type.DATE:
+432            return datetime.date.fromisoformat(cast.name)
+433        if cast.args["to"].this == exp.DataType.Type.DATETIME:
+434            return datetime.datetime.fromisoformat(cast.name)
+435    except ValueError:
+436        return None
+
+ + + + +
+
+ +
+ + def + extract_interval(interval): + + + +
+ +
439def extract_interval(interval):
+440    try:
+441        from dateutil.relativedelta import relativedelta  # type: ignore
+442    except ModuleNotFoundError:
+443        return None
+444
+445    n = int(interval.name)
+446    unit = interval.text("unit").lower()
+447
+448    if unit == "year":
+449        return relativedelta(years=n)
+450    if unit == "month":
+451        return relativedelta(months=n)
+452    if unit == "week":
+453        return relativedelta(weeks=n)
+454    if unit == "day":
+455        return relativedelta(days=n)
+456    return None
+
+ + + + +
+
+ +
+ + def + date_literal(date): + + + +
+ +
459def date_literal(date):
+460    return exp.cast(
+461        exp.Literal.string(date),
+462        "DATETIME" if isinstance(date, datetime.datetime) else "DATE",
+463    )
+
+ + + + +
+
+ +
+ + def + boolean_literal(condition): + + + +
+ +
466def boolean_literal(condition):
+467    return exp.true() if condition else exp.false()
+
+ + + + +
+
+ + \ No newline at end of file diff --git a/docs/sqlglot/optimizer/unnest_subqueries.html b/docs/sqlglot/optimizer/unnest_subqueries.html new file mode 100644 index 0000000..0102e43 --- /dev/null +++ b/docs/sqlglot/optimizer/unnest_subqueries.html @@ -0,0 +1,835 @@ + + + + + + + sqlglot.optimizer.unnest_subqueries API documentation + + + + + + + + + +
+
+ Edit on GitHub +

+sqlglot.optimizer.unnest_subqueries

+ + + + + + +
  1import itertools
+  2
+  3from sqlglot import exp
+  4from sqlglot.optimizer.scope import ScopeType, traverse_scope
+  5
+  6
+  7def unnest_subqueries(expression):
+  8    """
+  9    Rewrite sqlglot AST to convert some predicates with subqueries into joins.
+ 10
+ 11    Convert scalar subqueries into cross joins.
+ 12    Convert correlated or vectorized subqueries into a group by so it is not a many to many left join.
+ 13
+ 14    Example:
+ 15        >>> import sqlglot
+ 16        >>> expression = sqlglot.parse_one("SELECT * FROM x AS x WHERE (SELECT y.a AS a FROM y AS y WHERE x.a = y.a) = 1 ")
+ 17        >>> unnest_subqueries(expression).sql()
+ 18        'SELECT * FROM x AS x LEFT JOIN (SELECT y.a AS a FROM y AS y WHERE TRUE GROUP BY y.a) AS _u_0 ON x.a = _u_0.a WHERE _u_0.a = 1'
+ 19
+ 20    Args:
+ 21        expression (sqlglot.Expression): expression to unnest
+ 22    Returns:
+ 23        sqlglot.Expression: unnested expression
+ 24    """
+ 25    sequence = itertools.count()
+ 26
+ 27    for scope in traverse_scope(expression):
+ 28        select = scope.expression
+ 29        parent = select.parent_select
+ 30        if not parent:
+ 31            continue
+ 32        if scope.external_columns:
+ 33            decorrelate(select, parent, scope.external_columns, sequence)
+ 34        elif scope.scope_type == ScopeType.SUBQUERY:
+ 35            unnest(select, parent, sequence)
+ 36
+ 37    return expression
+ 38
+ 39
+ 40def unnest(select, parent_select, sequence):
+ 41    if len(select.selects) > 1:
+ 42        return
+ 43
+ 44    predicate = select.find_ancestor(exp.Condition)
+ 45    alias = _alias(sequence)
+ 46
+ 47    if not predicate or parent_select is not predicate.parent_select:
+ 48        return
+ 49
+ 50    # this subquery returns a scalar and can just be converted to a cross join
+ 51    if not isinstance(predicate, (exp.In, exp.Any)):
+ 52        having = predicate.find_ancestor(exp.Having)
+ 53        column = exp.column(select.selects[0].alias_or_name, alias)
+ 54        if having and having.parent_select is parent_select:
+ 55            column = exp.Max(this=column)
+ 56        _replace(select.parent, column)
+ 57
+ 58        parent_select.join(
+ 59            select,
+ 60            join_type="CROSS",
+ 61            join_alias=alias,
+ 62            copy=False,
+ 63        )
+ 64        return
+ 65
+ 66    if select.find(exp.Limit, exp.Offset):
+ 67        return
+ 68
+ 69    if isinstance(predicate, exp.Any):
+ 70        predicate = predicate.find_ancestor(exp.EQ)
+ 71
+ 72        if not predicate or parent_select is not predicate.parent_select:
+ 73            return
+ 74
+ 75    column = _other_operand(predicate)
+ 76    value = select.selects[0]
+ 77
+ 78    on = exp.condition(f'{column} = "{alias}"."{value.alias}"')
+ 79    _replace(predicate, f"NOT {on.right} IS NULL")
+ 80
+ 81    parent_select.join(
+ 82        select.group_by(value.this, copy=False),
+ 83        on=on,
+ 84        join_type="LEFT",
+ 85        join_alias=alias,
+ 86        copy=False,
+ 87    )
+ 88
+ 89
+ 90def decorrelate(select, parent_select, external_columns, sequence):
+ 91    where = select.args.get("where")
+ 92
+ 93    if not where or where.find(exp.Or) or select.find(exp.Limit, exp.Offset):
+ 94        return
+ 95
+ 96    table_alias = _alias(sequence)
+ 97    keys = []
+ 98
+ 99    # for all external columns in the where statement, find the relevant predicate
+100    # keys to convert it into a join
+101    for column in external_columns:
+102        if column.find_ancestor(exp.Where) is not where:
+103            return
+104
+105        predicate = column.find_ancestor(exp.Predicate)
+106
+107        if not predicate or predicate.find_ancestor(exp.Where) is not where:
+108            return
+109
+110        if isinstance(predicate, exp.Binary):
+111            key = (
+112                predicate.right
+113                if any(node is column for node, *_ in predicate.left.walk())
+114                else predicate.left
+115            )
+116        else:
+117            return
+118
+119        keys.append((key, column, predicate))
+120
+121    if not any(isinstance(predicate, exp.EQ) for *_, predicate in keys):
+122        return
+123
+124    is_subquery_projection = any(
+125        node is select.parent for node in parent_select.selects if isinstance(node, exp.Subquery)
+126    )
+127
+128    value = select.selects[0]
+129    key_aliases = {}
+130    group_by = []
+131
+132    for key, _, predicate in keys:
+133        # if we filter on the value of the subquery, it needs to be unique
+134        if key == value.this:
+135            key_aliases[key] = value.alias
+136            group_by.append(key)
+137        else:
+138            if key not in key_aliases:
+139                key_aliases[key] = _alias(sequence)
+140            # all predicates that are equalities must also be in the unique
+141            # so that we don't do a many to many join
+142            if isinstance(predicate, exp.EQ) and key not in group_by:
+143                group_by.append(key)
+144
+145    parent_predicate = select.find_ancestor(exp.Predicate)
+146
+147    # if the value of the subquery is not an agg or a key, we need to collect it into an array
+148    # so that it can be grouped. For subquery projections, we use a MAX aggregation instead.
+149    agg_func = exp.Max if is_subquery_projection else exp.ArrayAgg
+150    if not value.find(exp.AggFunc) and value.this not in group_by:
+151        select.select(
+152            exp.alias_(agg_func(this=value.this), value.alias, quoted=False),
+153            append=False,
+154            copy=False,
+155        )
+156
+157    # exists queries should not have any selects as it only checks if there are any rows
+158    # all selects will be added by the optimizer and only used for join keys
+159    if isinstance(parent_predicate, exp.Exists):
+160        select.args["expressions"] = []
+161
+162    for key, alias in key_aliases.items():
+163        if key in group_by:
+164            # add all keys to the projections of the subquery
+165            # so that we can use it as a join key
+166            if isinstance(parent_predicate, exp.Exists) or key != value.this:
+167                select.select(f"{key} AS {alias}", copy=False)
+168        else:
+169            select.select(exp.alias_(agg_func(this=key.copy()), alias, quoted=False), copy=False)
+170
+171    alias = exp.column(value.alias, table_alias)
+172    other = _other_operand(parent_predicate)
+173
+174    if isinstance(parent_predicate, exp.Exists):
+175        alias = exp.column(list(key_aliases.values())[0], table_alias)
+176        parent_predicate = _replace(parent_predicate, f"NOT {alias} IS NULL")
+177    elif isinstance(parent_predicate, exp.All):
+178        parent_predicate = _replace(
+179            parent_predicate.parent, f"ARRAY_ALL({alias}, _x -> _x = {other})"
+180        )
+181    elif isinstance(parent_predicate, exp.Any):
+182        if value.this in group_by:
+183            parent_predicate = _replace(parent_predicate.parent, f"{other} = {alias}")
+184        else:
+185            parent_predicate = _replace(parent_predicate, f"ARRAY_ANY({alias}, _x -> _x = {other})")
+186    elif isinstance(parent_predicate, exp.In):
+187        if value.this in group_by:
+188            parent_predicate = _replace(parent_predicate, f"{other} = {alias}")
+189        else:
+190            parent_predicate = _replace(
+191                parent_predicate,
+192                f"ARRAY_ANY({alias}, _x -> _x = {parent_predicate.this})",
+193            )
+194    else:
+195        if is_subquery_projection:
+196            alias = exp.alias_(alias, select.parent.alias)
+197
+198        # COUNT always returns 0 on empty datasets, so we need take that into consideration here
+199        # by transforming all counts into 0 and using that as the coalesced value
+200        if value.find(exp.Count):
+201
+202            def remove_aggs(node):
+203                if isinstance(node, exp.Count):
+204                    return exp.Literal.number(0)
+205                elif isinstance(node, exp.AggFunc):
+206                    return exp.null()
+207                return node
+208
+209            alias = exp.Coalesce(
+210                this=alias,
+211                expressions=[value.this.transform(remove_aggs)],
+212            )
+213
+214        select.parent.replace(alias)
+215
+216    for key, column, predicate in keys:
+217        predicate.replace(exp.true())
+218        nested = exp.column(key_aliases[key], table_alias)
+219
+220        if is_subquery_projection:
+221            key.replace(nested)
+222            continue
+223
+224        if key in group_by:
+225            key.replace(nested)
+226        elif isinstance(predicate, exp.EQ):
+227            parent_predicate = _replace(
+228                parent_predicate,
+229                f"({parent_predicate} AND ARRAY_CONTAINS({nested}, {column}))",
+230            )
+231        else:
+232            key.replace(exp.to_identifier("_x"))
+233            parent_predicate = _replace(
+234                parent_predicate,
+235                f'({parent_predicate} AND ARRAY_ANY({nested}, "_x" -> {predicate}))',
+236            )
+237
+238    parent_select.join(
+239        select.group_by(*group_by, copy=False),
+240        on=[predicate for *_, predicate in keys if isinstance(predicate, exp.EQ)],
+241        join_type="LEFT",
+242        join_alias=table_alias,
+243        copy=False,
+244    )
+245
+246
+247def _alias(sequence):
+248    return f"_u_{next(sequence)}"
+249
+250
+251def _replace(expression, condition):
+252    return expression.replace(exp.condition(condition))
+253
+254
+255def _other_operand(expression):
+256    if isinstance(expression, exp.In):
+257        return expression.this
+258
+259    if isinstance(expression, (exp.Any, exp.All)):
+260        return _other_operand(expression.parent)
+261
+262    if isinstance(expression, exp.Binary):
+263        return (
+264            expression.right
+265            if isinstance(expression.left, (exp.Subquery, exp.Any, exp.Exists, exp.All))
+266            else expression.left
+267        )
+268
+269    return None
+
+ + +
+
+ +
+ + def + unnest_subqueries(expression): + + + +
+ +
 8def unnest_subqueries(expression):
+ 9    """
+10    Rewrite sqlglot AST to convert some predicates with subqueries into joins.
+11
+12    Convert scalar subqueries into cross joins.
+13    Convert correlated or vectorized subqueries into a group by so it is not a many to many left join.
+14
+15    Example:
+16        >>> import sqlglot
+17        >>> expression = sqlglot.parse_one("SELECT * FROM x AS x WHERE (SELECT y.a AS a FROM y AS y WHERE x.a = y.a) = 1 ")
+18        >>> unnest_subqueries(expression).sql()
+19        'SELECT * FROM x AS x LEFT JOIN (SELECT y.a AS a FROM y AS y WHERE TRUE GROUP BY y.a) AS _u_0 ON x.a = _u_0.a WHERE _u_0.a = 1'
+20
+21    Args:
+22        expression (sqlglot.Expression): expression to unnest
+23    Returns:
+24        sqlglot.Expression: unnested expression
+25    """
+26    sequence = itertools.count()
+27
+28    for scope in traverse_scope(expression):
+29        select = scope.expression
+30        parent = select.parent_select
+31        if not parent:
+32            continue
+33        if scope.external_columns:
+34            decorrelate(select, parent, scope.external_columns, sequence)
+35        elif scope.scope_type == ScopeType.SUBQUERY:
+36            unnest(select, parent, sequence)
+37
+38    return expression
+
+ + +

Rewrite sqlglot AST to convert some predicates with subqueries into joins.

+ +

Convert scalar subqueries into cross joins. +Convert correlated or vectorized subqueries into a group by so it is not a many to many left join.

+ +
Example:
+ +
+
+
>>> import sqlglot
+>>> expression = sqlglot.parse_one("SELECT * FROM x AS x WHERE (SELECT y.a AS a FROM y AS y WHERE x.a = y.a) = 1 ")
+>>> unnest_subqueries(expression).sql()
+'SELECT * FROM x AS x LEFT JOIN (SELECT y.a AS a FROM y AS y WHERE TRUE GROUP BY y.a) AS _u_0 ON x.a = _u_0.a WHERE _u_0.a = 1'
+
+
+
+ +
Arguments:
+ +
    +
  • expression (sqlglot.Expression): expression to unnest
  • +
+ +
Returns:
+ +
+

sqlglot.Expression: unnested expression

+
+
+ + +
+
+ +
+ + def + unnest(select, parent_select, sequence): + + + +
+ +
41def unnest(select, parent_select, sequence):
+42    if len(select.selects) > 1:
+43        return
+44
+45    predicate = select.find_ancestor(exp.Condition)
+46    alias = _alias(sequence)
+47
+48    if not predicate or parent_select is not predicate.parent_select:
+49        return
+50
+51    # this subquery returns a scalar and can just be converted to a cross join
+52    if not isinstance(predicate, (exp.In, exp.Any)):
+53        having = predicate.find_ancestor(exp.Having)
+54        column = exp.column(select.selects[0].alias_or_name, alias)
+55        if having and having.parent_select is parent_select:
+56            column = exp.Max(this=column)
+57        _replace(select.parent, column)
+58
+59        parent_select.join(
+60            select,
+61            join_type="CROSS",
+62            join_alias=alias,
+63            copy=False,
+64        )
+65        return
+66
+67    if select.find(exp.Limit, exp.Offset):
+68        return
+69
+70    if isinstance(predicate, exp.Any):
+71        predicate = predicate.find_ancestor(exp.EQ)
+72
+73        if not predicate or parent_select is not predicate.parent_select:
+74            return
+75
+76    column = _other_operand(predicate)
+77    value = select.selects[0]
+78
+79    on = exp.condition(f'{column} = "{alias}"."{value.alias}"')
+80    _replace(predicate, f"NOT {on.right} IS NULL")
+81
+82    parent_select.join(
+83        select.group_by(value.this, copy=False),
+84        on=on,
+85        join_type="LEFT",
+86        join_alias=alias,
+87        copy=False,
+88    )
+
+ + + + +
+
+ +
+ + def + decorrelate(select, parent_select, external_columns, sequence): + + + +
+ +
 91def decorrelate(select, parent_select, external_columns, sequence):
+ 92    where = select.args.get("where")
+ 93
+ 94    if not where or where.find(exp.Or) or select.find(exp.Limit, exp.Offset):
+ 95        return
+ 96
+ 97    table_alias = _alias(sequence)
+ 98    keys = []
+ 99
+100    # for all external columns in the where statement, find the relevant predicate
+101    # keys to convert it into a join
+102    for column in external_columns:
+103        if column.find_ancestor(exp.Where) is not where:
+104            return
+105
+106        predicate = column.find_ancestor(exp.Predicate)
+107
+108        if not predicate or predicate.find_ancestor(exp.Where) is not where:
+109            return
+110
+111        if isinstance(predicate, exp.Binary):
+112            key = (
+113                predicate.right
+114                if any(node is column for node, *_ in predicate.left.walk())
+115                else predicate.left
+116            )
+117        else:
+118            return
+119
+120        keys.append((key, column, predicate))
+121
+122    if not any(isinstance(predicate, exp.EQ) for *_, predicate in keys):
+123        return
+124
+125    is_subquery_projection = any(
+126        node is select.parent for node in parent_select.selects if isinstance(node, exp.Subquery)
+127    )
+128
+129    value = select.selects[0]
+130    key_aliases = {}
+131    group_by = []
+132
+133    for key, _, predicate in keys:
+134        # if we filter on the value of the subquery, it needs to be unique
+135        if key == value.this:
+136            key_aliases[key] = value.alias
+137            group_by.append(key)
+138        else:
+139            if key not in key_aliases:
+140                key_aliases[key] = _alias(sequence)
+141            # all predicates that are equalities must also be in the unique
+142            # so that we don't do a many to many join
+143            if isinstance(predicate, exp.EQ) and key not in group_by:
+144                group_by.append(key)
+145
+146    parent_predicate = select.find_ancestor(exp.Predicate)
+147
+148    # if the value of the subquery is not an agg or a key, we need to collect it into an array
+149    # so that it can be grouped. For subquery projections, we use a MAX aggregation instead.
+150    agg_func = exp.Max if is_subquery_projection else exp.ArrayAgg
+151    if not value.find(exp.AggFunc) and value.this not in group_by:
+152        select.select(
+153            exp.alias_(agg_func(this=value.this), value.alias, quoted=False),
+154            append=False,
+155            copy=False,
+156        )
+157
+158    # exists queries should not have any selects as it only checks if there are any rows
+159    # all selects will be added by the optimizer and only used for join keys
+160    if isinstance(parent_predicate, exp.Exists):
+161        select.args["expressions"] = []
+162
+163    for key, alias in key_aliases.items():
+164        if key in group_by:
+165            # add all keys to the projections of the subquery
+166            # so that we can use it as a join key
+167            if isinstance(parent_predicate, exp.Exists) or key != value.this:
+168                select.select(f"{key} AS {alias}", copy=False)
+169        else:
+170            select.select(exp.alias_(agg_func(this=key.copy()), alias, quoted=False), copy=False)
+171
+172    alias = exp.column(value.alias, table_alias)
+173    other = _other_operand(parent_predicate)
+174
+175    if isinstance(parent_predicate, exp.Exists):
+176        alias = exp.column(list(key_aliases.values())[0], table_alias)
+177        parent_predicate = _replace(parent_predicate, f"NOT {alias} IS NULL")
+178    elif isinstance(parent_predicate, exp.All):
+179        parent_predicate = _replace(
+180            parent_predicate.parent, f"ARRAY_ALL({alias}, _x -> _x = {other})"
+181        )
+182    elif isinstance(parent_predicate, exp.Any):
+183        if value.this in group_by:
+184            parent_predicate = _replace(parent_predicate.parent, f"{other} = {alias}")
+185        else:
+186            parent_predicate = _replace(parent_predicate, f"ARRAY_ANY({alias}, _x -> _x = {other})")
+187    elif isinstance(parent_predicate, exp.In):
+188        if value.this in group_by:
+189            parent_predicate = _replace(parent_predicate, f"{other} = {alias}")
+190        else:
+191            parent_predicate = _replace(
+192                parent_predicate,
+193                f"ARRAY_ANY({alias}, _x -> _x = {parent_predicate.this})",
+194            )
+195    else:
+196        if is_subquery_projection:
+197            alias = exp.alias_(alias, select.parent.alias)
+198
+199        # COUNT always returns 0 on empty datasets, so we need take that into consideration here
+200        # by transforming all counts into 0 and using that as the coalesced value
+201        if value.find(exp.Count):
+202
+203            def remove_aggs(node):
+204                if isinstance(node, exp.Count):
+205                    return exp.Literal.number(0)
+206                elif isinstance(node, exp.AggFunc):
+207                    return exp.null()
+208                return node
+209
+210            alias = exp.Coalesce(
+211                this=alias,
+212                expressions=[value.this.transform(remove_aggs)],
+213            )
+214
+215        select.parent.replace(alias)
+216
+217    for key, column, predicate in keys:
+218        predicate.replace(exp.true())
+219        nested = exp.column(key_aliases[key], table_alias)
+220
+221        if is_subquery_projection:
+222            key.replace(nested)
+223            continue
+224
+225        if key in group_by:
+226            key.replace(nested)
+227        elif isinstance(predicate, exp.EQ):
+228            parent_predicate = _replace(
+229                parent_predicate,
+230                f"({parent_predicate} AND ARRAY_CONTAINS({nested}, {column}))",
+231            )
+232        else:
+233            key.replace(exp.to_identifier("_x"))
+234            parent_predicate = _replace(
+235                parent_predicate,
+236                f'({parent_predicate} AND ARRAY_ANY({nested}, "_x" -> {predicate}))',
+237            )
+238
+239    parent_select.join(
+240        select.group_by(*group_by, copy=False),
+241        on=[predicate for *_, predicate in keys if isinstance(predicate, exp.EQ)],
+242        join_type="LEFT",
+243        join_alias=table_alias,
+244        copy=False,
+245    )
+
+ + + + +
+
+ + \ No newline at end of file diff --git a/docs/sqlglot/parser.html b/docs/sqlglot/parser.html new file mode 100644 index 0000000..28aa800 --- /dev/null +++ b/docs/sqlglot/parser.html @@ -0,0 +1,8049 @@ + + + + + + + sqlglot.parser API documentation + + + + + + + + + +
+
+ Edit on GitHub +

+sqlglot.parser

+ + + + + + +
   1from __future__ import annotations
+   2
+   3import logging
+   4import typing as t
+   5
+   6from sqlglot import exp
+   7from sqlglot.errors import ErrorLevel, ParseError, concat_messages, merge_errors
+   8from sqlglot.helper import (
+   9    apply_index_offset,
+  10    count_params,
+  11    ensure_collection,
+  12    ensure_list,
+  13    seq_get,
+  14)
+  15from sqlglot.tokens import Token, Tokenizer, TokenType
+  16from sqlglot.trie import in_trie, new_trie
+  17
+  18logger = logging.getLogger("sqlglot")
+  19
+  20
+  21def parse_var_map(args):
+  22    keys = []
+  23    values = []
+  24    for i in range(0, len(args), 2):
+  25        keys.append(args[i])
+  26        values.append(args[i + 1])
+  27    return exp.VarMap(
+  28        keys=exp.Array(expressions=keys),
+  29        values=exp.Array(expressions=values),
+  30    )
+  31
+  32
+  33class _Parser(type):
+  34    def __new__(cls, clsname, bases, attrs):
+  35        klass = super().__new__(cls, clsname, bases, attrs)
+  36        klass._show_trie = new_trie(key.split(" ") for key in klass.SHOW_PARSERS)
+  37        klass._set_trie = new_trie(key.split(" ") for key in klass.SET_PARSERS)
+  38        return klass
+  39
+  40
+  41class Parser(metaclass=_Parser):
+  42    """
+  43    Parser consumes a list of tokens produced by the `sqlglot.tokens.Tokenizer` and produces
+  44    a parsed syntax tree.
+  45
+  46    Args:
+  47        error_level: the desired error level.
+  48            Default: ErrorLevel.RAISE
+  49        error_message_context: determines the amount of context to capture from a
+  50            query string when displaying the error message (in number of characters).
+  51            Default: 50.
+  52        index_offset: Index offset for arrays eg ARRAY[0] vs ARRAY[1] as the head of a list.
+  53            Default: 0
+  54        alias_post_tablesample: If the table alias comes after tablesample.
+  55            Default: False
+  56        max_errors: Maximum number of error messages to include in a raised ParseError.
+  57            This is only relevant if error_level is ErrorLevel.RAISE.
+  58            Default: 3
+  59        null_ordering: Indicates the default null ordering method to use if not explicitly set.
+  60            Options are "nulls_are_small", "nulls_are_large", "nulls_are_last".
+  61            Default: "nulls_are_small"
+  62    """
+  63
+  64    FUNCTIONS: t.Dict[str, t.Callable] = {
+  65        **{name: f.from_arg_list for f in exp.ALL_FUNCTIONS for name in f.sql_names()},
+  66        "DATE_TO_DATE_STR": lambda args: exp.Cast(
+  67            this=seq_get(args, 0),
+  68            to=exp.DataType(this=exp.DataType.Type.TEXT),
+  69        ),
+  70        "TIME_TO_TIME_STR": lambda args: exp.Cast(
+  71            this=seq_get(args, 0),
+  72            to=exp.DataType(this=exp.DataType.Type.TEXT),
+  73        ),
+  74        "TS_OR_DS_TO_DATE_STR": lambda args: exp.Substring(
+  75            this=exp.Cast(
+  76                this=seq_get(args, 0),
+  77                to=exp.DataType(this=exp.DataType.Type.TEXT),
+  78            ),
+  79            start=exp.Literal.number(1),
+  80            length=exp.Literal.number(10),
+  81        ),
+  82        "VAR_MAP": parse_var_map,
+  83        "IFNULL": exp.Coalesce.from_arg_list,
+  84    }
+  85
+  86    NO_PAREN_FUNCTIONS = {
+  87        TokenType.CURRENT_DATE: exp.CurrentDate,
+  88        TokenType.CURRENT_DATETIME: exp.CurrentDate,
+  89        TokenType.CURRENT_TIMESTAMP: exp.CurrentTimestamp,
+  90    }
+  91
+  92    NESTED_TYPE_TOKENS = {
+  93        TokenType.ARRAY,
+  94        TokenType.MAP,
+  95        TokenType.STRUCT,
+  96        TokenType.NULLABLE,
+  97    }
+  98
+  99    TYPE_TOKENS = {
+ 100        TokenType.BOOLEAN,
+ 101        TokenType.TINYINT,
+ 102        TokenType.SMALLINT,
+ 103        TokenType.INT,
+ 104        TokenType.BIGINT,
+ 105        TokenType.FLOAT,
+ 106        TokenType.DOUBLE,
+ 107        TokenType.CHAR,
+ 108        TokenType.NCHAR,
+ 109        TokenType.VARCHAR,
+ 110        TokenType.NVARCHAR,
+ 111        TokenType.TEXT,
+ 112        TokenType.MEDIUMTEXT,
+ 113        TokenType.LONGTEXT,
+ 114        TokenType.MEDIUMBLOB,
+ 115        TokenType.LONGBLOB,
+ 116        TokenType.BINARY,
+ 117        TokenType.VARBINARY,
+ 118        TokenType.JSON,
+ 119        TokenType.JSONB,
+ 120        TokenType.INTERVAL,
+ 121        TokenType.TIME,
+ 122        TokenType.TIMESTAMP,
+ 123        TokenType.TIMESTAMPTZ,
+ 124        TokenType.TIMESTAMPLTZ,
+ 125        TokenType.DATETIME,
+ 126        TokenType.DATE,
+ 127        TokenType.DECIMAL,
+ 128        TokenType.UUID,
+ 129        TokenType.GEOGRAPHY,
+ 130        TokenType.GEOMETRY,
+ 131        TokenType.HLLSKETCH,
+ 132        TokenType.HSTORE,
+ 133        TokenType.PSEUDO_TYPE,
+ 134        TokenType.SUPER,
+ 135        TokenType.SERIAL,
+ 136        TokenType.SMALLSERIAL,
+ 137        TokenType.BIGSERIAL,
+ 138        TokenType.XML,
+ 139        TokenType.UNIQUEIDENTIFIER,
+ 140        TokenType.MONEY,
+ 141        TokenType.SMALLMONEY,
+ 142        TokenType.ROWVERSION,
+ 143        TokenType.IMAGE,
+ 144        TokenType.VARIANT,
+ 145        TokenType.OBJECT,
+ 146        *NESTED_TYPE_TOKENS,
+ 147    }
+ 148
+ 149    SUBQUERY_PREDICATES = {
+ 150        TokenType.ANY: exp.Any,
+ 151        TokenType.ALL: exp.All,
+ 152        TokenType.EXISTS: exp.Exists,
+ 153        TokenType.SOME: exp.Any,
+ 154    }
+ 155
+ 156    RESERVED_KEYWORDS = {*Tokenizer.SINGLE_TOKENS.values(), TokenType.SELECT}
+ 157
+ 158    ID_VAR_TOKENS = {
+ 159        TokenType.VAR,
+ 160        TokenType.ALWAYS,
+ 161        TokenType.ANTI,
+ 162        TokenType.APPLY,
+ 163        TokenType.AUTO_INCREMENT,
+ 164        TokenType.BEGIN,
+ 165        TokenType.BOTH,
+ 166        TokenType.BUCKET,
+ 167        TokenType.CACHE,
+ 168        TokenType.CASCADE,
+ 169        TokenType.COLLATE,
+ 170        TokenType.COLUMN,
+ 171        TokenType.COMMAND,
+ 172        TokenType.COMMIT,
+ 173        TokenType.COMPOUND,
+ 174        TokenType.CONSTRAINT,
+ 175        TokenType.CURRENT_TIME,
+ 176        TokenType.DEFAULT,
+ 177        TokenType.DELETE,
+ 178        TokenType.DESCRIBE,
+ 179        TokenType.DIV,
+ 180        TokenType.END,
+ 181        TokenType.EXECUTE,
+ 182        TokenType.ESCAPE,
+ 183        TokenType.FALSE,
+ 184        TokenType.FIRST,
+ 185        TokenType.FILTER,
+ 186        TokenType.FOLLOWING,
+ 187        TokenType.FORMAT,
+ 188        TokenType.FUNCTION,
+ 189        TokenType.GENERATED,
+ 190        TokenType.IDENTITY,
+ 191        TokenType.IF,
+ 192        TokenType.INDEX,
+ 193        TokenType.ISNULL,
+ 194        TokenType.INTERVAL,
+ 195        TokenType.LAZY,
+ 196        TokenType.LEADING,
+ 197        TokenType.LEFT,
+ 198        TokenType.LOCAL,
+ 199        TokenType.MATERIALIZED,
+ 200        TokenType.MERGE,
+ 201        TokenType.NATURAL,
+ 202        TokenType.NEXT,
+ 203        TokenType.OFFSET,
+ 204        TokenType.ONLY,
+ 205        TokenType.OPTIONS,
+ 206        TokenType.ORDINALITY,
+ 207        TokenType.PERCENT,
+ 208        TokenType.PIVOT,
+ 209        TokenType.PRECEDING,
+ 210        TokenType.RANGE,
+ 211        TokenType.REFERENCES,
+ 212        TokenType.RIGHT,
+ 213        TokenType.ROW,
+ 214        TokenType.ROWS,
+ 215        TokenType.SCHEMA,
+ 216        TokenType.SCHEMA_COMMENT,
+ 217        TokenType.SEED,
+ 218        TokenType.SEMI,
+ 219        TokenType.SET,
+ 220        TokenType.SHOW,
+ 221        TokenType.SORTKEY,
+ 222        TokenType.TABLE,
+ 223        TokenType.TEMPORARY,
+ 224        TokenType.TOP,
+ 225        TokenType.TRAILING,
+ 226        TokenType.TRUE,
+ 227        TokenType.UNBOUNDED,
+ 228        TokenType.UNIQUE,
+ 229        TokenType.UNLOGGED,
+ 230        TokenType.UNPIVOT,
+ 231        TokenType.PROCEDURE,
+ 232        TokenType.VIEW,
+ 233        TokenType.VOLATILE,
+ 234        TokenType.WINDOW,
+ 235        *SUBQUERY_PREDICATES,
+ 236        *TYPE_TOKENS,
+ 237        *NO_PAREN_FUNCTIONS,
+ 238    }
+ 239
+ 240    TABLE_ALIAS_TOKENS = ID_VAR_TOKENS - {
+ 241        TokenType.APPLY,
+ 242        TokenType.LEFT,
+ 243        TokenType.NATURAL,
+ 244        TokenType.OFFSET,
+ 245        TokenType.RIGHT,
+ 246        TokenType.WINDOW,
+ 247    }
+ 248
+ 249    UPDATE_ALIAS_TOKENS = TABLE_ALIAS_TOKENS - {TokenType.SET}
+ 250
+ 251    TRIM_TYPES = {TokenType.LEADING, TokenType.TRAILING, TokenType.BOTH}
+ 252
+ 253    FUNC_TOKENS = {
+ 254        TokenType.COMMAND,
+ 255        TokenType.CURRENT_DATE,
+ 256        TokenType.CURRENT_DATETIME,
+ 257        TokenType.CURRENT_TIMESTAMP,
+ 258        TokenType.CURRENT_TIME,
+ 259        TokenType.FILTER,
+ 260        TokenType.FIRST,
+ 261        TokenType.FORMAT,
+ 262        TokenType.IDENTIFIER,
+ 263        TokenType.INDEX,
+ 264        TokenType.ISNULL,
+ 265        TokenType.ILIKE,
+ 266        TokenType.LIKE,
+ 267        TokenType.MERGE,
+ 268        TokenType.OFFSET,
+ 269        TokenType.PRIMARY_KEY,
+ 270        TokenType.REPLACE,
+ 271        TokenType.ROW,
+ 272        TokenType.UNNEST,
+ 273        TokenType.VAR,
+ 274        TokenType.LEFT,
+ 275        TokenType.RIGHT,
+ 276        TokenType.DATE,
+ 277        TokenType.DATETIME,
+ 278        TokenType.TABLE,
+ 279        TokenType.TIMESTAMP,
+ 280        TokenType.TIMESTAMPTZ,
+ 281        TokenType.WINDOW,
+ 282        *TYPE_TOKENS,
+ 283        *SUBQUERY_PREDICATES,
+ 284    }
+ 285
+ 286    CONJUNCTION = {
+ 287        TokenType.AND: exp.And,
+ 288        TokenType.OR: exp.Or,
+ 289    }
+ 290
+ 291    EQUALITY = {
+ 292        TokenType.EQ: exp.EQ,
+ 293        TokenType.NEQ: exp.NEQ,
+ 294        TokenType.NULLSAFE_EQ: exp.NullSafeEQ,
+ 295    }
+ 296
+ 297    COMPARISON = {
+ 298        TokenType.GT: exp.GT,
+ 299        TokenType.GTE: exp.GTE,
+ 300        TokenType.LT: exp.LT,
+ 301        TokenType.LTE: exp.LTE,
+ 302    }
+ 303
+ 304    BITWISE = {
+ 305        TokenType.AMP: exp.BitwiseAnd,
+ 306        TokenType.CARET: exp.BitwiseXor,
+ 307        TokenType.PIPE: exp.BitwiseOr,
+ 308        TokenType.DPIPE: exp.DPipe,
+ 309    }
+ 310
+ 311    TERM = {
+ 312        TokenType.DASH: exp.Sub,
+ 313        TokenType.PLUS: exp.Add,
+ 314        TokenType.MOD: exp.Mod,
+ 315        TokenType.COLLATE: exp.Collate,
+ 316    }
+ 317
+ 318    FACTOR = {
+ 319        TokenType.DIV: exp.IntDiv,
+ 320        TokenType.LR_ARROW: exp.Distance,
+ 321        TokenType.SLASH: exp.Div,
+ 322        TokenType.STAR: exp.Mul,
+ 323    }
+ 324
+ 325    TIMESTAMPS = {
+ 326        TokenType.TIME,
+ 327        TokenType.TIMESTAMP,
+ 328        TokenType.TIMESTAMPTZ,
+ 329        TokenType.TIMESTAMPLTZ,
+ 330    }
+ 331
+ 332    SET_OPERATIONS = {
+ 333        TokenType.UNION,
+ 334        TokenType.INTERSECT,
+ 335        TokenType.EXCEPT,
+ 336    }
+ 337
+ 338    JOIN_SIDES = {
+ 339        TokenType.LEFT,
+ 340        TokenType.RIGHT,
+ 341        TokenType.FULL,
+ 342    }
+ 343
+ 344    JOIN_KINDS = {
+ 345        TokenType.INNER,
+ 346        TokenType.OUTER,
+ 347        TokenType.CROSS,
+ 348        TokenType.SEMI,
+ 349        TokenType.ANTI,
+ 350    }
+ 351
+ 352    LAMBDAS = {
+ 353        TokenType.ARROW: lambda self, expressions: self.expression(
+ 354            exp.Lambda,
+ 355            this=self._parse_conjunction().transform(
+ 356                self._replace_lambda, {node.name for node in expressions}
+ 357            ),
+ 358            expressions=expressions,
+ 359        ),
+ 360        TokenType.FARROW: lambda self, expressions: self.expression(
+ 361            exp.Kwarg,
+ 362            this=exp.Var(this=expressions[0].name),
+ 363            expression=self._parse_conjunction(),
+ 364        ),
+ 365    }
+ 366
+ 367    COLUMN_OPERATORS = {
+ 368        TokenType.DOT: None,
+ 369        TokenType.DCOLON: lambda self, this, to: self.expression(
+ 370            exp.Cast,
+ 371            this=this,
+ 372            to=to,
+ 373        ),
+ 374        TokenType.ARROW: lambda self, this, path: self.expression(
+ 375            exp.JSONExtract,
+ 376            this=this,
+ 377            expression=path,
+ 378        ),
+ 379        TokenType.DARROW: lambda self, this, path: self.expression(
+ 380            exp.JSONExtractScalar,
+ 381            this=this,
+ 382            expression=path,
+ 383        ),
+ 384        TokenType.HASH_ARROW: lambda self, this, path: self.expression(
+ 385            exp.JSONBExtract,
+ 386            this=this,
+ 387            expression=path,
+ 388        ),
+ 389        TokenType.DHASH_ARROW: lambda self, this, path: self.expression(
+ 390            exp.JSONBExtractScalar,
+ 391            this=this,
+ 392            expression=path,
+ 393        ),
+ 394        TokenType.PLACEHOLDER: lambda self, this, key: self.expression(
+ 395            exp.JSONBContains,
+ 396            this=this,
+ 397            expression=key,
+ 398        ),
+ 399    }
+ 400
+ 401    EXPRESSION_PARSERS = {
+ 402        exp.Column: lambda self: self._parse_column(),
+ 403        exp.DataType: lambda self: self._parse_types(),
+ 404        exp.From: lambda self: self._parse_from(),
+ 405        exp.Group: lambda self: self._parse_group(),
+ 406        exp.Identifier: lambda self: self._parse_id_var(),
+ 407        exp.Lateral: lambda self: self._parse_lateral(),
+ 408        exp.Join: lambda self: self._parse_join(),
+ 409        exp.Order: lambda self: self._parse_order(),
+ 410        exp.Cluster: lambda self: self._parse_sort(TokenType.CLUSTER_BY, exp.Cluster),
+ 411        exp.Sort: lambda self: self._parse_sort(TokenType.SORT_BY, exp.Sort),
+ 412        exp.Lambda: lambda self: self._parse_lambda(),
+ 413        exp.Limit: lambda self: self._parse_limit(),
+ 414        exp.Offset: lambda self: self._parse_offset(),
+ 415        exp.TableAlias: lambda self: self._parse_table_alias(),
+ 416        exp.Table: lambda self: self._parse_table(),
+ 417        exp.Condition: lambda self: self._parse_conjunction(),
+ 418        exp.Expression: lambda self: self._parse_statement(),
+ 419        exp.Properties: lambda self: self._parse_properties(),
+ 420        exp.Where: lambda self: self._parse_where(),
+ 421        exp.Ordered: lambda self: self._parse_ordered(),
+ 422        exp.Having: lambda self: self._parse_having(),
+ 423        exp.With: lambda self: self._parse_with(),
+ 424        exp.Window: lambda self: self._parse_named_window(),
+ 425        "JOIN_TYPE": lambda self: self._parse_join_side_and_kind(),
+ 426    }
+ 427
+ 428    STATEMENT_PARSERS = {
+ 429        TokenType.ALTER: lambda self: self._parse_alter(),
+ 430        TokenType.BEGIN: lambda self: self._parse_transaction(),
+ 431        TokenType.CACHE: lambda self: self._parse_cache(),
+ 432        TokenType.COMMIT: lambda self: self._parse_commit_or_rollback(),
+ 433        TokenType.CREATE: lambda self: self._parse_create(),
+ 434        TokenType.DELETE: lambda self: self._parse_delete(),
+ 435        TokenType.DESC: lambda self: self._parse_describe(),
+ 436        TokenType.DESCRIBE: lambda self: self._parse_describe(),
+ 437        TokenType.DROP: lambda self: self._parse_drop(),
+ 438        TokenType.END: lambda self: self._parse_commit_or_rollback(),
+ 439        TokenType.INSERT: lambda self: self._parse_insert(),
+ 440        TokenType.LOAD_DATA: lambda self: self._parse_load_data(),
+ 441        TokenType.MERGE: lambda self: self._parse_merge(),
+ 442        TokenType.ROLLBACK: lambda self: self._parse_commit_or_rollback(),
+ 443        TokenType.UNCACHE: lambda self: self._parse_uncache(),
+ 444        TokenType.UPDATE: lambda self: self._parse_update(),
+ 445        TokenType.USE: lambda self: self.expression(
+ 446            exp.Use,
+ 447            kind=self._match_texts(("ROLE", "WAREHOUSE", "DATABASE", "SCHEMA"))
+ 448            and exp.Var(this=self._prev.text),
+ 449            this=self._parse_table(schema=False),
+ 450        ),
+ 451    }
+ 452
+ 453    UNARY_PARSERS = {
+ 454        TokenType.PLUS: lambda self: self._parse_unary(),  # Unary + is handled as a no-op
+ 455        TokenType.NOT: lambda self: self.expression(exp.Not, this=self._parse_equality()),
+ 456        TokenType.TILDA: lambda self: self.expression(exp.BitwiseNot, this=self._parse_unary()),
+ 457        TokenType.DASH: lambda self: self.expression(exp.Neg, this=self._parse_unary()),
+ 458    }
+ 459
+ 460    PRIMARY_PARSERS = {
+ 461        TokenType.STRING: lambda self, token: self.expression(
+ 462            exp.Literal, this=token.text, is_string=True
+ 463        ),
+ 464        TokenType.NUMBER: lambda self, token: self.expression(
+ 465            exp.Literal, this=token.text, is_string=False
+ 466        ),
+ 467        TokenType.STAR: lambda self, _: self.expression(
+ 468            exp.Star,
+ 469            **{"except": self._parse_except(), "replace": self._parse_replace()},
+ 470        ),
+ 471        TokenType.NULL: lambda self, _: self.expression(exp.Null),
+ 472        TokenType.TRUE: lambda self, _: self.expression(exp.Boolean, this=True),
+ 473        TokenType.FALSE: lambda self, _: self.expression(exp.Boolean, this=False),
+ 474        TokenType.BIT_STRING: lambda self, token: self.expression(exp.BitString, this=token.text),
+ 475        TokenType.HEX_STRING: lambda self, token: self.expression(exp.HexString, this=token.text),
+ 476        TokenType.BYTE_STRING: lambda self, token: self.expression(exp.ByteString, this=token.text),
+ 477        TokenType.INTRODUCER: lambda self, token: self._parse_introducer(token),
+ 478        TokenType.NATIONAL: lambda self, token: self._parse_national(token),
+ 479        TokenType.SESSION_PARAMETER: lambda self, _: self._parse_session_parameter(),
+ 480    }
+ 481
+ 482    PLACEHOLDER_PARSERS = {
+ 483        TokenType.PLACEHOLDER: lambda self: self.expression(exp.Placeholder),
+ 484        TokenType.PARAMETER: lambda self: self.expression(
+ 485            exp.Parameter, this=self._parse_var() or self._parse_primary()
+ 486        ),
+ 487        TokenType.COLON: lambda self: self.expression(exp.Placeholder, this=self._prev.text)
+ 488        if self._match_set((TokenType.NUMBER, TokenType.VAR))
+ 489        else None,
+ 490    }
+ 491
+ 492    RANGE_PARSERS = {
+ 493        TokenType.BETWEEN: lambda self, this: self._parse_between(this),
+ 494        TokenType.GLOB: lambda self, this: self._parse_escape(
+ 495            self.expression(exp.Glob, this=this, expression=self._parse_bitwise())
+ 496        ),
+ 497        TokenType.IN: lambda self, this: self._parse_in(this),
+ 498        TokenType.IS: lambda self, this: self._parse_is(this),
+ 499        TokenType.LIKE: lambda self, this: self._parse_escape(
+ 500            self.expression(exp.Like, this=this, expression=self._parse_bitwise())
+ 501        ),
+ 502        TokenType.ILIKE: lambda self, this: self._parse_escape(
+ 503            self.expression(exp.ILike, this=this, expression=self._parse_bitwise())
+ 504        ),
+ 505        TokenType.IRLIKE: lambda self, this: self.expression(
+ 506            exp.RegexpILike, this=this, expression=self._parse_bitwise()
+ 507        ),
+ 508        TokenType.RLIKE: lambda self, this: self.expression(
+ 509            exp.RegexpLike, this=this, expression=self._parse_bitwise()
+ 510        ),
+ 511        TokenType.SIMILAR_TO: lambda self, this: self.expression(
+ 512            exp.SimilarTo, this=this, expression=self._parse_bitwise()
+ 513        ),
+ 514    }
+ 515
+ 516    PROPERTY_PARSERS = {
+ 517        "AUTO_INCREMENT": lambda self: self._parse_property_assignment(exp.AutoIncrementProperty),
+ 518        "CHARACTER SET": lambda self: self._parse_character_set(),
+ 519        "LOCATION": lambda self: self._parse_property_assignment(exp.LocationProperty),
+ 520        "PARTITION BY": lambda self: self._parse_partitioned_by(),
+ 521        "PARTITIONED BY": lambda self: self._parse_partitioned_by(),
+ 522        "PARTITIONED_BY": lambda self: self._parse_partitioned_by(),
+ 523        "COMMENT": lambda self: self._parse_property_assignment(exp.SchemaCommentProperty),
+ 524        "STORED": lambda self: self._parse_property_assignment(exp.FileFormatProperty),
+ 525        "DISTKEY": lambda self: self._parse_distkey(),
+ 526        "DISTSTYLE": lambda self: self._parse_property_assignment(exp.DistStyleProperty),
+ 527        "SORTKEY": lambda self: self._parse_sortkey(),
+ 528        "LIKE": lambda self: self._parse_create_like(),
+ 529        "RETURNS": lambda self: self._parse_returns(),
+ 530        "ROW": lambda self: self._parse_row(),
+ 531        "COLLATE": lambda self: self._parse_property_assignment(exp.CollateProperty),
+ 532        "FORMAT": lambda self: self._parse_property_assignment(exp.FileFormatProperty),
+ 533        "TABLE_FORMAT": lambda self: self._parse_property_assignment(exp.TableFormatProperty),
+ 534        "USING": lambda self: self._parse_property_assignment(exp.TableFormatProperty),
+ 535        "LANGUAGE": lambda self: self._parse_property_assignment(exp.LanguageProperty),
+ 536        "EXECUTE": lambda self: self._parse_property_assignment(exp.ExecuteAsProperty),
+ 537        "DETERMINISTIC": lambda self: self.expression(
+ 538            exp.VolatilityProperty, this=exp.Literal.string("IMMUTABLE")
+ 539        ),
+ 540        "IMMUTABLE": lambda self: self.expression(
+ 541            exp.VolatilityProperty, this=exp.Literal.string("IMMUTABLE")
+ 542        ),
+ 543        "STABLE": lambda self: self.expression(
+ 544            exp.VolatilityProperty, this=exp.Literal.string("STABLE")
+ 545        ),
+ 546        "VOLATILE": lambda self: self.expression(
+ 547            exp.VolatilityProperty, this=exp.Literal.string("VOLATILE")
+ 548        ),
+ 549        "WITH": lambda self: self._parse_with_property(),
+ 550        "TBLPROPERTIES": lambda self: self._parse_wrapped_csv(self._parse_property),
+ 551        "FALLBACK": lambda self: self._parse_fallback(no=self._prev.text.upper() == "NO"),
+ 552        "LOG": lambda self: self._parse_log(no=self._prev.text.upper() == "NO"),
+ 553        "BEFORE": lambda self: self._parse_journal(
+ 554            no=self._prev.text.upper() == "NO", dual=self._prev.text.upper() == "DUAL"
+ 555        ),
+ 556        "JOURNAL": lambda self: self._parse_journal(
+ 557            no=self._prev.text.upper() == "NO", dual=self._prev.text.upper() == "DUAL"
+ 558        ),
+ 559        "AFTER": lambda self: self._parse_afterjournal(
+ 560            no=self._prev.text.upper() == "NO", dual=self._prev.text.upper() == "DUAL"
+ 561        ),
+ 562        "LOCAL": lambda self: self._parse_afterjournal(no=False, dual=False, local=True),
+ 563        "NOT": lambda self: self._parse_afterjournal(no=False, dual=False, local=False),
+ 564        "CHECKSUM": lambda self: self._parse_checksum(),
+ 565        "FREESPACE": lambda self: self._parse_freespace(),
+ 566        "MERGEBLOCKRATIO": lambda self: self._parse_mergeblockratio(
+ 567            no=self._prev.text.upper() == "NO", default=self._prev.text.upper() == "DEFAULT"
+ 568        ),
+ 569        "MIN": lambda self: self._parse_datablocksize(),
+ 570        "MINIMUM": lambda self: self._parse_datablocksize(),
+ 571        "MAX": lambda self: self._parse_datablocksize(),
+ 572        "MAXIMUM": lambda self: self._parse_datablocksize(),
+ 573        "DATABLOCKSIZE": lambda self: self._parse_datablocksize(
+ 574            default=self._prev.text.upper() == "DEFAULT"
+ 575        ),
+ 576        "BLOCKCOMPRESSION": lambda self: self._parse_blockcompression(),
+ 577        "ALGORITHM": lambda self: self._parse_property_assignment(exp.AlgorithmProperty),
+ 578        "DEFINER": lambda self: self._parse_definer(),
+ 579    }
+ 580
+ 581    CONSTRAINT_PARSERS = {
+ 582        TokenType.CHECK: lambda self: self.expression(
+ 583            exp.Check, this=self._parse_wrapped(self._parse_conjunction)
+ 584        ),
+ 585        TokenType.FOREIGN_KEY: lambda self: self._parse_foreign_key(),
+ 586        TokenType.UNIQUE: lambda self: self._parse_unique(),
+ 587        TokenType.LIKE: lambda self: self._parse_create_like(),
+ 588    }
+ 589
+ 590    NO_PAREN_FUNCTION_PARSERS = {
+ 591        TokenType.CASE: lambda self: self._parse_case(),
+ 592        TokenType.IF: lambda self: self._parse_if(),
+ 593    }
+ 594
+ 595    FUNCTION_PARSERS: t.Dict[str, t.Callable] = {
+ 596        "CONVERT": lambda self: self._parse_convert(self.STRICT_CAST),
+ 597        "TRY_CONVERT": lambda self: self._parse_convert(False),
+ 598        "EXTRACT": lambda self: self._parse_extract(),
+ 599        "POSITION": lambda self: self._parse_position(),
+ 600        "SUBSTRING": lambda self: self._parse_substring(),
+ 601        "TRIM": lambda self: self._parse_trim(),
+ 602        "CAST": lambda self: self._parse_cast(self.STRICT_CAST),
+ 603        "TRY_CAST": lambda self: self._parse_cast(False),
+ 604        "STRING_AGG": lambda self: self._parse_string_agg(),
+ 605    }
+ 606
+ 607    QUERY_MODIFIER_PARSERS = {
+ 608        "match": lambda self: self._parse_match_recognize(),
+ 609        "where": lambda self: self._parse_where(),
+ 610        "group": lambda self: self._parse_group(),
+ 611        "having": lambda self: self._parse_having(),
+ 612        "qualify": lambda self: self._parse_qualify(),
+ 613        "windows": lambda self: self._parse_window_clause(),
+ 614        "distribute": lambda self: self._parse_sort(TokenType.DISTRIBUTE_BY, exp.Distribute),
+ 615        "sort": lambda self: self._parse_sort(TokenType.SORT_BY, exp.Sort),
+ 616        "cluster": lambda self: self._parse_sort(TokenType.CLUSTER_BY, exp.Cluster),
+ 617        "order": lambda self: self._parse_order(),
+ 618        "limit": lambda self: self._parse_limit(),
+ 619        "offset": lambda self: self._parse_offset(),
+ 620        "lock": lambda self: self._parse_lock(),
+ 621    }
+ 622
+ 623    SHOW_PARSERS: t.Dict[str, t.Callable] = {}
+ 624    SET_PARSERS: t.Dict[str, t.Callable] = {}
+ 625
+ 626    MODIFIABLES = (exp.Subquery, exp.Subqueryable, exp.Table)
+ 627
+ 628    CREATABLES = {
+ 629        TokenType.COLUMN,
+ 630        TokenType.FUNCTION,
+ 631        TokenType.INDEX,
+ 632        TokenType.PROCEDURE,
+ 633        TokenType.SCHEMA,
+ 634        TokenType.TABLE,
+ 635        TokenType.VIEW,
+ 636    }
+ 637
+ 638    TRANSACTION_KIND = {"DEFERRED", "IMMEDIATE", "EXCLUSIVE"}
+ 639
+ 640    WINDOW_ALIAS_TOKENS = ID_VAR_TOKENS - {TokenType.ROWS}
+ 641
+ 642    ADD_CONSTRAINT_TOKENS = {TokenType.CONSTRAINT, TokenType.PRIMARY_KEY, TokenType.FOREIGN_KEY}
+ 643
+ 644    STRICT_CAST = True
+ 645
+ 646    __slots__ = (
+ 647        "error_level",
+ 648        "error_message_context",
+ 649        "sql",
+ 650        "errors",
+ 651        "index_offset",
+ 652        "unnest_column_only",
+ 653        "alias_post_tablesample",
+ 654        "max_errors",
+ 655        "null_ordering",
+ 656        "_tokens",
+ 657        "_index",
+ 658        "_curr",
+ 659        "_next",
+ 660        "_prev",
+ 661        "_prev_comments",
+ 662        "_show_trie",
+ 663        "_set_trie",
+ 664    )
+ 665
+ 666    def __init__(
+ 667        self,
+ 668        error_level: t.Optional[ErrorLevel] = None,
+ 669        error_message_context: int = 100,
+ 670        index_offset: int = 0,
+ 671        unnest_column_only: bool = False,
+ 672        alias_post_tablesample: bool = False,
+ 673        max_errors: int = 3,
+ 674        null_ordering: t.Optional[str] = None,
+ 675    ):
+ 676        self.error_level = error_level or ErrorLevel.IMMEDIATE
+ 677        self.error_message_context = error_message_context
+ 678        self.index_offset = index_offset
+ 679        self.unnest_column_only = unnest_column_only
+ 680        self.alias_post_tablesample = alias_post_tablesample
+ 681        self.max_errors = max_errors
+ 682        self.null_ordering = null_ordering
+ 683        self.reset()
+ 684
+ 685    def reset(self):
+ 686        self.sql = ""
+ 687        self.errors = []
+ 688        self._tokens = []
+ 689        self._index = 0
+ 690        self._curr = None
+ 691        self._next = None
+ 692        self._prev = None
+ 693        self._prev_comments = None
+ 694
+ 695    def parse(
+ 696        self, raw_tokens: t.List[Token], sql: t.Optional[str] = None
+ 697    ) -> t.List[t.Optional[exp.Expression]]:
+ 698        """
+ 699        Parses a list of tokens and returns a list of syntax trees, one tree
+ 700        per parsed SQL statement.
+ 701
+ 702        Args:
+ 703            raw_tokens: the list of tokens.
+ 704            sql: the original SQL string, used to produce helpful debug messages.
+ 705
+ 706        Returns:
+ 707            The list of syntax trees.
+ 708        """
+ 709        return self._parse(
+ 710            parse_method=self.__class__._parse_statement, raw_tokens=raw_tokens, sql=sql
+ 711        )
+ 712
+ 713    def parse_into(
+ 714        self,
+ 715        expression_types: exp.IntoType,
+ 716        raw_tokens: t.List[Token],
+ 717        sql: t.Optional[str] = None,
+ 718    ) -> t.List[t.Optional[exp.Expression]]:
+ 719        """
+ 720        Parses a list of tokens into a given Expression type. If a collection of Expression
+ 721        types is given instead, this method will try to parse the token list into each one
+ 722        of them, stopping at the first for which the parsing succeeds.
+ 723
+ 724        Args:
+ 725            expression_types: the expression type(s) to try and parse the token list into.
+ 726            raw_tokens: the list of tokens.
+ 727            sql: the original SQL string, used to produce helpful debug messages.
+ 728
+ 729        Returns:
+ 730            The target Expression.
+ 731        """
+ 732        errors = []
+ 733        for expression_type in ensure_collection(expression_types):
+ 734            parser = self.EXPRESSION_PARSERS.get(expression_type)
+ 735            if not parser:
+ 736                raise TypeError(f"No parser registered for {expression_type}")
+ 737            try:
+ 738                return self._parse(parser, raw_tokens, sql)
+ 739            except ParseError as e:
+ 740                e.errors[0]["into_expression"] = expression_type
+ 741                errors.append(e)
+ 742        raise ParseError(
+ 743            f"Failed to parse into {expression_types}",
+ 744            errors=merge_errors(errors),
+ 745        ) from errors[-1]
+ 746
+ 747    def _parse(
+ 748        self,
+ 749        parse_method: t.Callable[[Parser], t.Optional[exp.Expression]],
+ 750        raw_tokens: t.List[Token],
+ 751        sql: t.Optional[str] = None,
+ 752    ) -> t.List[t.Optional[exp.Expression]]:
+ 753        self.reset()
+ 754        self.sql = sql or ""
+ 755        total = len(raw_tokens)
+ 756        chunks: t.List[t.List[Token]] = [[]]
+ 757
+ 758        for i, token in enumerate(raw_tokens):
+ 759            if token.token_type == TokenType.SEMICOLON:
+ 760                if i < total - 1:
+ 761                    chunks.append([])
+ 762            else:
+ 763                chunks[-1].append(token)
+ 764
+ 765        expressions = []
+ 766
+ 767        for tokens in chunks:
+ 768            self._index = -1
+ 769            self._tokens = tokens
+ 770            self._advance()
+ 771
+ 772            expressions.append(parse_method(self))
+ 773
+ 774            if self._index < len(self._tokens):
+ 775                self.raise_error("Invalid expression / Unexpected token")
+ 776
+ 777            self.check_errors()
+ 778
+ 779        return expressions
+ 780
+ 781    def check_errors(self) -> None:
+ 782        """
+ 783        Logs or raises any found errors, depending on the chosen error level setting.
+ 784        """
+ 785        if self.error_level == ErrorLevel.WARN:
+ 786            for error in self.errors:
+ 787                logger.error(str(error))
+ 788        elif self.error_level == ErrorLevel.RAISE and self.errors:
+ 789            raise ParseError(
+ 790                concat_messages(self.errors, self.max_errors),
+ 791                errors=merge_errors(self.errors),
+ 792            )
+ 793
+ 794    def raise_error(self, message: str, token: t.Optional[Token] = None) -> None:
+ 795        """
+ 796        Appends an error in the list of recorded errors or raises it, depending on the chosen
+ 797        error level setting.
+ 798        """
+ 799        token = token or self._curr or self._prev or Token.string("")
+ 800        start = self._find_token(token)
+ 801        end = start + len(token.text)
+ 802        start_context = self.sql[max(start - self.error_message_context, 0) : start]
+ 803        highlight = self.sql[start:end]
+ 804        end_context = self.sql[end : end + self.error_message_context]
+ 805
+ 806        error = ParseError.new(
+ 807            f"{message}. Line {token.line}, Col: {token.col}.\n"
+ 808            f"  {start_context}\033[4m{highlight}\033[0m{end_context}",
+ 809            description=message,
+ 810            line=token.line,
+ 811            col=token.col,
+ 812            start_context=start_context,
+ 813            highlight=highlight,
+ 814            end_context=end_context,
+ 815        )
+ 816
+ 817        if self.error_level == ErrorLevel.IMMEDIATE:
+ 818            raise error
+ 819
+ 820        self.errors.append(error)
+ 821
+ 822    def expression(
+ 823        self, exp_class: t.Type[exp.Expression], comments: t.Optional[t.List[str]] = None, **kwargs
+ 824    ) -> exp.Expression:
+ 825        """
+ 826        Creates a new, validated Expression.
+ 827
+ 828        Args:
+ 829            exp_class: the expression class to instantiate.
+ 830            comments: an optional list of comments to attach to the expression.
+ 831            kwargs: the arguments to set for the expression along with their respective values.
+ 832
+ 833        Returns:
+ 834            The target expression.
+ 835        """
+ 836        instance = exp_class(**kwargs)
+ 837        if self._prev_comments:
+ 838            instance.comments = self._prev_comments
+ 839            self._prev_comments = None
+ 840        if comments:
+ 841            instance.comments = comments
+ 842        self.validate_expression(instance)
+ 843        return instance
+ 844
+ 845    def validate_expression(
+ 846        self, expression: exp.Expression, args: t.Optional[t.List] = None
+ 847    ) -> None:
+ 848        """
+ 849        Validates an already instantiated expression, making sure that all its mandatory arguments
+ 850        are set.
+ 851
+ 852        Args:
+ 853            expression: the expression to validate.
+ 854            args: an optional list of items that was used to instantiate the expression, if it's a Func.
+ 855        """
+ 856        if self.error_level == ErrorLevel.IGNORE:
+ 857            return
+ 858
+ 859        for error_message in expression.error_messages(args):
+ 860            self.raise_error(error_message)
+ 861
+ 862    def _find_sql(self, start: Token, end: Token) -> str:
+ 863        return self.sql[self._find_token(start) : self._find_token(end) + len(end.text)]
+ 864
+ 865    def _find_token(self, token: Token) -> int:
+ 866        line = 1
+ 867        col = 1
+ 868        index = 0
+ 869
+ 870        while line < token.line or col < token.col:
+ 871            if Tokenizer.WHITE_SPACE.get(self.sql[index]) == TokenType.BREAK:
+ 872                line += 1
+ 873                col = 1
+ 874            else:
+ 875                col += 1
+ 876            index += 1
+ 877
+ 878        return index
+ 879
+ 880    def _advance(self, times: int = 1) -> None:
+ 881        self._index += times
+ 882        self._curr = seq_get(self._tokens, self._index)
+ 883        self._next = seq_get(self._tokens, self._index + 1)
+ 884        if self._index > 0:
+ 885            self._prev = self._tokens[self._index - 1]
+ 886            self._prev_comments = self._prev.comments
+ 887        else:
+ 888            self._prev = None
+ 889            self._prev_comments = None
+ 890
+ 891    def _retreat(self, index: int) -> None:
+ 892        self._advance(index - self._index)
+ 893
+ 894    def _parse_command(self) -> exp.Expression:
+ 895        return self.expression(exp.Command, this=self._prev.text, expression=self._parse_string())
+ 896
+ 897    def _parse_statement(self) -> t.Optional[exp.Expression]:
+ 898        if self._curr is None:
+ 899            return None
+ 900
+ 901        if self._match_set(self.STATEMENT_PARSERS):
+ 902            return self.STATEMENT_PARSERS[self._prev.token_type](self)
+ 903
+ 904        if self._match_set(Tokenizer.COMMANDS):
+ 905            return self._parse_command()
+ 906
+ 907        expression = self._parse_expression()
+ 908        expression = self._parse_set_operations(expression) if expression else self._parse_select()
+ 909
+ 910        self._parse_query_modifiers(expression)
+ 911        return expression
+ 912
+ 913    def _parse_drop(self, default_kind: t.Optional[str] = None) -> t.Optional[exp.Expression]:
+ 914        start = self._prev
+ 915        temporary = self._match(TokenType.TEMPORARY)
+ 916        materialized = self._match(TokenType.MATERIALIZED)
+ 917        kind = self._match_set(self.CREATABLES) and self._prev.text
+ 918        if not kind:
+ 919            if default_kind:
+ 920                kind = default_kind
+ 921            else:
+ 922                return self._parse_as_command(start)
+ 923
+ 924        return self.expression(
+ 925            exp.Drop,
+ 926            exists=self._parse_exists(),
+ 927            this=self._parse_table(schema=True),
+ 928            kind=kind,
+ 929            temporary=temporary,
+ 930            materialized=materialized,
+ 931            cascade=self._match(TokenType.CASCADE),
+ 932        )
+ 933
+ 934    def _parse_exists(self, not_: bool = False) -> t.Optional[bool]:
+ 935        return (
+ 936            self._match(TokenType.IF)
+ 937            and (not not_ or self._match(TokenType.NOT))
+ 938            and self._match(TokenType.EXISTS)
+ 939        )
+ 940
+ 941    def _parse_create(self) -> t.Optional[exp.Expression]:
+ 942        start = self._prev
+ 943        replace = self._match_pair(TokenType.OR, TokenType.REPLACE)
+ 944        set_ = self._match(TokenType.SET)  # Teradata
+ 945        multiset = self._match_text_seq("MULTISET")  # Teradata
+ 946        global_temporary = self._match_text_seq("GLOBAL", "TEMPORARY")  # Teradata
+ 947        volatile = self._match(TokenType.VOLATILE)  # Teradata
+ 948        temporary = self._match(TokenType.TEMPORARY)
+ 949        transient = self._match_text_seq("TRANSIENT")
+ 950        external = self._match_text_seq("EXTERNAL")
+ 951        unique = self._match(TokenType.UNIQUE)
+ 952        materialized = self._match(TokenType.MATERIALIZED)
+ 953
+ 954        if self._match_pair(TokenType.TABLE, TokenType.FUNCTION, advance=False):
+ 955            self._match(TokenType.TABLE)
+ 956
+ 957        properties = None
+ 958        create_token = self._match_set(self.CREATABLES) and self._prev
+ 959
+ 960        if not create_token:
+ 961            properties = self._parse_properties()
+ 962            create_token = self._match_set(self.CREATABLES) and self._prev
+ 963
+ 964            if not properties or not create_token:
+ 965                return self._parse_as_command(start)
+ 966
+ 967        exists = self._parse_exists(not_=True)
+ 968        this = None
+ 969        expression = None
+ 970        data = None
+ 971        statistics = None
+ 972        no_primary_index = None
+ 973        indexes = None
+ 974        no_schema_binding = None
+ 975        begin = None
+ 976
+ 977        if create_token.token_type in (TokenType.FUNCTION, TokenType.PROCEDURE):
+ 978            this = self._parse_user_defined_function(kind=create_token.token_type)
+ 979            properties = self._parse_properties()
+ 980
+ 981            self._match(TokenType.ALIAS)
+ 982            begin = self._match(TokenType.BEGIN)
+ 983            return_ = self._match_text_seq("RETURN")
+ 984            expression = self._parse_statement()
+ 985
+ 986            if return_:
+ 987                expression = self.expression(exp.Return, this=expression)
+ 988        elif create_token.token_type == TokenType.INDEX:
+ 989            this = self._parse_index()
+ 990        elif create_token.token_type in (
+ 991            TokenType.TABLE,
+ 992            TokenType.VIEW,
+ 993            TokenType.SCHEMA,
+ 994        ):
+ 995            table_parts = self._parse_table_parts(schema=True)
+ 996
+ 997            if self._match(TokenType.COMMA):  # comma-separated properties before schema definition
+ 998                properties = self._parse_properties(before=True)
+ 999
+1000            this = self._parse_schema(this=table_parts)
+1001
+1002            if not properties:  # properties after schema definition
+1003                properties = self._parse_properties()
+1004
+1005            self._match(TokenType.ALIAS)
+1006            expression = self._parse_ddl_select()
+1007
+1008            if create_token.token_type == TokenType.TABLE:
+1009                if self._match_text_seq("WITH", "DATA"):
+1010                    data = True
+1011                elif self._match_text_seq("WITH", "NO", "DATA"):
+1012                    data = False
+1013
+1014                if self._match_text_seq("AND", "STATISTICS"):
+1015                    statistics = True
+1016                elif self._match_text_seq("AND", "NO", "STATISTICS"):
+1017                    statistics = False
+1018
+1019                no_primary_index = self._match_text_seq("NO", "PRIMARY", "INDEX")
+1020
+1021                indexes = []
+1022                while True:
+1023                    index = self._parse_create_table_index()
+1024
+1025                    # post index PARTITION BY property
+1026                    if self._match(TokenType.PARTITION_BY, advance=False):
+1027                        if properties:
+1028                            properties.expressions.append(self._parse_property())
+1029                        else:
+1030                            properties = self._parse_properties()
+1031
+1032                    if not index:
+1033                        break
+1034                    else:
+1035                        indexes.append(index)
+1036            elif create_token.token_type == TokenType.VIEW:
+1037                if self._match_text_seq("WITH", "NO", "SCHEMA", "BINDING"):
+1038                    no_schema_binding = True
+1039
+1040        return self.expression(
+1041            exp.Create,
+1042            this=this,
+1043            kind=create_token.text,
+1044            expression=expression,
+1045            set=set_,
+1046            multiset=multiset,
+1047            global_temporary=global_temporary,
+1048            volatile=volatile,
+1049            exists=exists,
+1050            properties=properties,
+1051            temporary=temporary,
+1052            transient=transient,
+1053            external=external,
+1054            replace=replace,
+1055            unique=unique,
+1056            materialized=materialized,
+1057            data=data,
+1058            statistics=statistics,
+1059            no_primary_index=no_primary_index,
+1060            indexes=indexes,
+1061            no_schema_binding=no_schema_binding,
+1062            begin=begin,
+1063        )
+1064
+1065    def _parse_property_before(self) -> t.Optional[exp.Expression]:
+1066        self._match(TokenType.COMMA)
+1067
+1068        # parsers look to _prev for no/dual/default, so need to consume first
+1069        self._match_text_seq("NO")
+1070        self._match_text_seq("DUAL")
+1071        self._match_text_seq("DEFAULT")
+1072
+1073        if self.PROPERTY_PARSERS.get(self._curr.text.upper()):
+1074            return self.PROPERTY_PARSERS[self._curr.text.upper()](self)
+1075
+1076        return None
+1077
+1078    def _parse_property(self) -> t.Optional[exp.Expression]:
+1079        if self._match_texts(self.PROPERTY_PARSERS):
+1080            return self.PROPERTY_PARSERS[self._prev.text.upper()](self)
+1081
+1082        if self._match_pair(TokenType.DEFAULT, TokenType.CHARACTER_SET):
+1083            return self._parse_character_set(True)
+1084
+1085        if self._match_pair(TokenType.COMPOUND, TokenType.SORTKEY):
+1086            return self._parse_sortkey(compound=True)
+1087
+1088        if self._match_text_seq("SQL", "SECURITY"):
+1089            return self.expression(exp.SqlSecurityProperty, definer=self._match_text_seq("DEFINER"))
+1090
+1091        assignment = self._match_pair(
+1092            TokenType.VAR, TokenType.EQ, advance=False
+1093        ) or self._match_pair(TokenType.STRING, TokenType.EQ, advance=False)
+1094
+1095        if assignment:
+1096            key = self._parse_var_or_string()
+1097            self._match(TokenType.EQ)
+1098            return self.expression(exp.Property, this=key, value=self._parse_column())
+1099
+1100        return None
+1101
+1102    def _parse_property_assignment(self, exp_class: t.Type[exp.Expression]) -> exp.Expression:
+1103        self._match(TokenType.EQ)
+1104        self._match(TokenType.ALIAS)
+1105        return self.expression(
+1106            exp_class,
+1107            this=self._parse_var_or_string() or self._parse_number() or self._parse_id_var(),
+1108        )
+1109
+1110    def _parse_properties(self, before=None) -> t.Optional[exp.Expression]:
+1111        properties = []
+1112
+1113        while True:
+1114            if before:
+1115                identified_property = self._parse_property_before()
+1116            else:
+1117                identified_property = self._parse_property()
+1118
+1119            if not identified_property:
+1120                break
+1121            for p in ensure_collection(identified_property):
+1122                properties.append(p)
+1123
+1124        if properties:
+1125            return self.expression(exp.Properties, expressions=properties)
+1126
+1127        return None
+1128
+1129    def _parse_fallback(self, no=False) -> exp.Expression:
+1130        self._match_text_seq("FALLBACK")
+1131        return self.expression(
+1132            exp.FallbackProperty, no=no, protection=self._match_text_seq("PROTECTION")
+1133        )
+1134
+1135    def _parse_with_property(
+1136        self,
+1137    ) -> t.Union[t.Optional[exp.Expression], t.List[t.Optional[exp.Expression]]]:
+1138        if self._match(TokenType.L_PAREN, advance=False):
+1139            return self._parse_wrapped_csv(self._parse_property)
+1140
+1141        if not self._next:
+1142            return None
+1143
+1144        if self._next.text.upper() == "JOURNAL":
+1145            return self._parse_withjournaltable()
+1146
+1147        return self._parse_withisolatedloading()
+1148
+1149    # https://dev.mysql.com/doc/refman/8.0/en/create-view.html
+1150    def _parse_definer(self) -> t.Optional[exp.Expression]:
+1151        self._match(TokenType.EQ)
+1152
+1153        user = self._parse_id_var()
+1154        self._match(TokenType.PARAMETER)
+1155        host = self._parse_id_var() or (self._match(TokenType.MOD) and self._prev.text)
+1156
+1157        if not user or not host:
+1158            return None
+1159
+1160        return exp.DefinerProperty(this=f"{user}@{host}")
+1161
+1162    def _parse_withjournaltable(self) -> exp.Expression:
+1163        self._match_text_seq("WITH", "JOURNAL", "TABLE")
+1164        self._match(TokenType.EQ)
+1165        return self.expression(exp.WithJournalTableProperty, this=self._parse_table_parts())
+1166
+1167    def _parse_log(self, no=False) -> exp.Expression:
+1168        self._match_text_seq("LOG")
+1169        return self.expression(exp.LogProperty, no=no)
+1170
+1171    def _parse_journal(self, no=False, dual=False) -> exp.Expression:
+1172        before = self._match_text_seq("BEFORE")
+1173        self._match_text_seq("JOURNAL")
+1174        return self.expression(exp.JournalProperty, no=no, dual=dual, before=before)
+1175
+1176    def _parse_afterjournal(self, no=False, dual=False, local=None) -> exp.Expression:
+1177        self._match_text_seq("NOT")
+1178        self._match_text_seq("LOCAL")
+1179        self._match_text_seq("AFTER", "JOURNAL")
+1180        return self.expression(exp.AfterJournalProperty, no=no, dual=dual, local=local)
+1181
+1182    def _parse_checksum(self) -> exp.Expression:
+1183        self._match_text_seq("CHECKSUM")
+1184        self._match(TokenType.EQ)
+1185
+1186        on = None
+1187        if self._match(TokenType.ON):
+1188            on = True
+1189        elif self._match_text_seq("OFF"):
+1190            on = False
+1191        default = self._match(TokenType.DEFAULT)
+1192
+1193        return self.expression(
+1194            exp.ChecksumProperty,
+1195            on=on,
+1196            default=default,
+1197        )
+1198
+1199    def _parse_freespace(self) -> exp.Expression:
+1200        self._match_text_seq("FREESPACE")
+1201        self._match(TokenType.EQ)
+1202        return self.expression(
+1203            exp.FreespaceProperty, this=self._parse_number(), percent=self._match(TokenType.PERCENT)
+1204        )
+1205
+1206    def _parse_mergeblockratio(self, no=False, default=False) -> exp.Expression:
+1207        self._match_text_seq("MERGEBLOCKRATIO")
+1208        if self._match(TokenType.EQ):
+1209            return self.expression(
+1210                exp.MergeBlockRatioProperty,
+1211                this=self._parse_number(),
+1212                percent=self._match(TokenType.PERCENT),
+1213            )
+1214        else:
+1215            return self.expression(
+1216                exp.MergeBlockRatioProperty,
+1217                no=no,
+1218                default=default,
+1219            )
+1220
+1221    def _parse_datablocksize(self, default=None) -> exp.Expression:
+1222        if default:
+1223            self._match_text_seq("DATABLOCKSIZE")
+1224            return self.expression(exp.DataBlocksizeProperty, default=True)
+1225        elif self._match_texts(("MIN", "MINIMUM")):
+1226            self._match_text_seq("DATABLOCKSIZE")
+1227            return self.expression(exp.DataBlocksizeProperty, min=True)
+1228        elif self._match_texts(("MAX", "MAXIMUM")):
+1229            self._match_text_seq("DATABLOCKSIZE")
+1230            return self.expression(exp.DataBlocksizeProperty, min=False)
+1231
+1232        self._match_text_seq("DATABLOCKSIZE")
+1233        self._match(TokenType.EQ)
+1234        size = self._parse_number()
+1235        units = None
+1236        if self._match_texts(("BYTES", "KBYTES", "KILOBYTES")):
+1237            units = self._prev.text
+1238        return self.expression(exp.DataBlocksizeProperty, size=size, units=units)
+1239
+1240    def _parse_blockcompression(self) -> exp.Expression:
+1241        self._match_text_seq("BLOCKCOMPRESSION")
+1242        self._match(TokenType.EQ)
+1243        always = self._match(TokenType.ALWAYS)
+1244        manual = self._match_text_seq("MANUAL")
+1245        never = self._match_text_seq("NEVER")
+1246        default = self._match_text_seq("DEFAULT")
+1247        autotemp = None
+1248        if self._match_text_seq("AUTOTEMP"):
+1249            autotemp = self._parse_schema()
+1250
+1251        return self.expression(
+1252            exp.BlockCompressionProperty,
+1253            always=always,
+1254            manual=manual,
+1255            never=never,
+1256            default=default,
+1257            autotemp=autotemp,
+1258        )
+1259
+1260    def _parse_withisolatedloading(self) -> exp.Expression:
+1261        self._match(TokenType.WITH)
+1262        no = self._match_text_seq("NO")
+1263        concurrent = self._match_text_seq("CONCURRENT")
+1264        self._match_text_seq("ISOLATED", "LOADING")
+1265        for_all = self._match_text_seq("FOR", "ALL")
+1266        for_insert = self._match_text_seq("FOR", "INSERT")
+1267        for_none = self._match_text_seq("FOR", "NONE")
+1268        return self.expression(
+1269            exp.IsolatedLoadingProperty,
+1270            no=no,
+1271            concurrent=concurrent,
+1272            for_all=for_all,
+1273            for_insert=for_insert,
+1274            for_none=for_none,
+1275        )
+1276
+1277    def _parse_partition_by(self) -> t.List[t.Optional[exp.Expression]]:
+1278        if self._match(TokenType.PARTITION_BY):
+1279            return self._parse_csv(self._parse_conjunction)
+1280        return []
+1281
+1282    def _parse_partitioned_by(self) -> exp.Expression:
+1283        self._match(TokenType.EQ)
+1284        return self.expression(
+1285            exp.PartitionedByProperty,
+1286            this=self._parse_schema() or self._parse_bracket(self._parse_field()),
+1287        )
+1288
+1289    def _parse_distkey(self) -> exp.Expression:
+1290        return self.expression(exp.DistKeyProperty, this=self._parse_wrapped(self._parse_id_var))
+1291
+1292    def _parse_create_like(self) -> t.Optional[exp.Expression]:
+1293        table = self._parse_table(schema=True)
+1294        options = []
+1295        while self._match_texts(("INCLUDING", "EXCLUDING")):
+1296            this = self._prev.text.upper()
+1297            id_var = self._parse_id_var()
+1298
+1299            if not id_var:
+1300                return None
+1301
+1302            options.append(
+1303                self.expression(
+1304                    exp.Property,
+1305                    this=this,
+1306                    value=exp.Var(this=id_var.this.upper()),
+1307                )
+1308            )
+1309        return self.expression(exp.LikeProperty, this=table, expressions=options)
+1310
+1311    def _parse_sortkey(self, compound: bool = False) -> exp.Expression:
+1312        return self.expression(
+1313            exp.SortKeyProperty, this=self._parse_wrapped_csv(self._parse_id_var), compound=compound
+1314        )
+1315
+1316    def _parse_character_set(self, default: bool = False) -> exp.Expression:
+1317        self._match(TokenType.EQ)
+1318        return self.expression(
+1319            exp.CharacterSetProperty, this=self._parse_var_or_string(), default=default
+1320        )
+1321
+1322    def _parse_returns(self) -> exp.Expression:
+1323        value: t.Optional[exp.Expression]
+1324        is_table = self._match(TokenType.TABLE)
+1325
+1326        if is_table:
+1327            if self._match(TokenType.LT):
+1328                value = self.expression(
+1329                    exp.Schema,
+1330                    this="TABLE",
+1331                    expressions=self._parse_csv(self._parse_struct_kwargs),
+1332                )
+1333                if not self._match(TokenType.GT):
+1334                    self.raise_error("Expecting >")
+1335            else:
+1336                value = self._parse_schema(exp.Var(this="TABLE"))
+1337        else:
+1338            value = self._parse_types()
+1339
+1340        return self.expression(exp.ReturnsProperty, this=value, is_table=is_table)
+1341
+1342    def _parse_describe(self) -> exp.Expression:
+1343        kind = self._match_set(self.CREATABLES) and self._prev.text
+1344        this = self._parse_table()
+1345
+1346        return self.expression(exp.Describe, this=this, kind=kind)
+1347
+1348    def _parse_insert(self) -> exp.Expression:
+1349        overwrite = self._match(TokenType.OVERWRITE)
+1350        local = self._match(TokenType.LOCAL)
+1351
+1352        this: t.Optional[exp.Expression]
+1353
+1354        if self._match_text_seq("DIRECTORY"):
+1355            this = self.expression(
+1356                exp.Directory,
+1357                this=self._parse_var_or_string(),
+1358                local=local,
+1359                row_format=self._parse_row_format(match_row=True),
+1360            )
+1361        else:
+1362            self._match(TokenType.INTO)
+1363            self._match(TokenType.TABLE)
+1364            this = self._parse_table(schema=True)
+1365
+1366        return self.expression(
+1367            exp.Insert,
+1368            this=this,
+1369            exists=self._parse_exists(),
+1370            partition=self._parse_partition(),
+1371            expression=self._parse_ddl_select(),
+1372            overwrite=overwrite,
+1373        )
+1374
+1375    def _parse_row(self) -> t.Optional[exp.Expression]:
+1376        if not self._match(TokenType.FORMAT):
+1377            return None
+1378        return self._parse_row_format()
+1379
+1380    def _parse_row_format(self, match_row: bool = False) -> t.Optional[exp.Expression]:
+1381        if match_row and not self._match_pair(TokenType.ROW, TokenType.FORMAT):
+1382            return None
+1383
+1384        if self._match_text_seq("SERDE"):
+1385            return self.expression(exp.RowFormatSerdeProperty, this=self._parse_string())
+1386
+1387        self._match_text_seq("DELIMITED")
+1388
+1389        kwargs = {}
+1390
+1391        if self._match_text_seq("FIELDS", "TERMINATED", "BY"):
+1392            kwargs["fields"] = self._parse_string()
+1393            if self._match_text_seq("ESCAPED", "BY"):
+1394                kwargs["escaped"] = self._parse_string()
+1395        if self._match_text_seq("COLLECTION", "ITEMS", "TERMINATED", "BY"):
+1396            kwargs["collection_items"] = self._parse_string()
+1397        if self._match_text_seq("MAP", "KEYS", "TERMINATED", "BY"):
+1398            kwargs["map_keys"] = self._parse_string()
+1399        if self._match_text_seq("LINES", "TERMINATED", "BY"):
+1400            kwargs["lines"] = self._parse_string()
+1401        if self._match_text_seq("NULL", "DEFINED", "AS"):
+1402            kwargs["null"] = self._parse_string()
+1403
+1404        return self.expression(exp.RowFormatDelimitedProperty, **kwargs)  # type: ignore
+1405
+1406    def _parse_load_data(self) -> exp.Expression:
+1407        local = self._match(TokenType.LOCAL)
+1408        self._match_text_seq("INPATH")
+1409        inpath = self._parse_string()
+1410        overwrite = self._match(TokenType.OVERWRITE)
+1411        self._match_pair(TokenType.INTO, TokenType.TABLE)
+1412
+1413        return self.expression(
+1414            exp.LoadData,
+1415            this=self._parse_table(schema=True),
+1416            local=local,
+1417            overwrite=overwrite,
+1418            inpath=inpath,
+1419            partition=self._parse_partition(),
+1420            input_format=self._match_text_seq("INPUTFORMAT") and self._parse_string(),
+1421            serde=self._match_text_seq("SERDE") and self._parse_string(),
+1422        )
+1423
+1424    def _parse_delete(self) -> exp.Expression:
+1425        self._match(TokenType.FROM)
+1426
+1427        return self.expression(
+1428            exp.Delete,
+1429            this=self._parse_table(schema=True),
+1430            using=self._parse_csv(lambda: self._match(TokenType.USING) and self._parse_table()),
+1431            where=self._parse_where(),
+1432        )
+1433
+1434    def _parse_update(self) -> exp.Expression:
+1435        return self.expression(
+1436            exp.Update,
+1437            **{  # type: ignore
+1438                "this": self._parse_table(alias_tokens=self.UPDATE_ALIAS_TOKENS),
+1439                "expressions": self._match(TokenType.SET) and self._parse_csv(self._parse_equality),
+1440                "from": self._parse_from(),
+1441                "where": self._parse_where(),
+1442            },
+1443        )
+1444
+1445    def _parse_uncache(self) -> exp.Expression:
+1446        if not self._match(TokenType.TABLE):
+1447            self.raise_error("Expecting TABLE after UNCACHE")
+1448
+1449        return self.expression(
+1450            exp.Uncache,
+1451            exists=self._parse_exists(),
+1452            this=self._parse_table(schema=True),
+1453        )
+1454
+1455    def _parse_cache(self) -> exp.Expression:
+1456        lazy = self._match(TokenType.LAZY)
+1457        self._match(TokenType.TABLE)
+1458        table = self._parse_table(schema=True)
+1459        options = []
+1460
+1461        if self._match(TokenType.OPTIONS):
+1462            self._match_l_paren()
+1463            k = self._parse_string()
+1464            self._match(TokenType.EQ)
+1465            v = self._parse_string()
+1466            options = [k, v]
+1467            self._match_r_paren()
+1468
+1469        self._match(TokenType.ALIAS)
+1470        return self.expression(
+1471            exp.Cache,
+1472            this=table,
+1473            lazy=lazy,
+1474            options=options,
+1475            expression=self._parse_select(nested=True),
+1476        )
+1477
+1478    def _parse_partition(self) -> t.Optional[exp.Expression]:
+1479        if not self._match(TokenType.PARTITION):
+1480            return None
+1481
+1482        return self.expression(
+1483            exp.Partition, expressions=self._parse_wrapped_csv(self._parse_conjunction)
+1484        )
+1485
+1486    def _parse_value(self) -> exp.Expression:
+1487        if self._match(TokenType.L_PAREN):
+1488            expressions = self._parse_csv(self._parse_conjunction)
+1489            self._match_r_paren()
+1490            return self.expression(exp.Tuple, expressions=expressions)
+1491
+1492        # In presto we can have VALUES 1, 2 which results in 1 column & 2 rows.
+1493        # Source: https://prestodb.io/docs/current/sql/values.html
+1494        return self.expression(exp.Tuple, expressions=[self._parse_conjunction()])
+1495
+1496    def _parse_select(
+1497        self, nested: bool = False, table: bool = False, parse_subquery_alias: bool = True
+1498    ) -> t.Optional[exp.Expression]:
+1499        cte = self._parse_with()
+1500        if cte:
+1501            this = self._parse_statement()
+1502
+1503            if not this:
+1504                self.raise_error("Failed to parse any statement following CTE")
+1505                return cte
+1506
+1507            if "with" in this.arg_types:
+1508                this.set("with", cte)
+1509            else:
+1510                self.raise_error(f"{this.key} does not support CTE")
+1511                this = cte
+1512        elif self._match(TokenType.SELECT):
+1513            comments = self._prev_comments
+1514
+1515            hint = self._parse_hint()
+1516            all_ = self._match(TokenType.ALL)
+1517            distinct = self._match(TokenType.DISTINCT)
+1518
+1519            if distinct:
+1520                distinct = self.expression(
+1521                    exp.Distinct,
+1522                    on=self._parse_value() if self._match(TokenType.ON) else None,
+1523                )
+1524
+1525            if all_ and distinct:
+1526                self.raise_error("Cannot specify both ALL and DISTINCT after SELECT")
+1527
+1528            limit = self._parse_limit(top=True)
+1529            expressions = self._parse_csv(self._parse_expression)
+1530
+1531            this = self.expression(
+1532                exp.Select,
+1533                hint=hint,
+1534                distinct=distinct,
+1535                expressions=expressions,
+1536                limit=limit,
+1537            )
+1538            this.comments = comments
+1539
+1540            into = self._parse_into()
+1541            if into:
+1542                this.set("into", into)
+1543
+1544            from_ = self._parse_from()
+1545            if from_:
+1546                this.set("from", from_)
+1547
+1548            self._parse_query_modifiers(this)
+1549        elif (table or nested) and self._match(TokenType.L_PAREN):
+1550            this = self._parse_table() if table else self._parse_select(nested=True)
+1551            self._parse_query_modifiers(this)
+1552            this = self._parse_set_operations(this)
+1553            self._match_r_paren()
+1554
+1555            # early return so that subquery unions aren't parsed again
+1556            # SELECT * FROM (SELECT 1) UNION ALL SELECT 1
+1557            # Union ALL should be a property of the top select node, not the subquery
+1558            return self._parse_subquery(this, parse_alias=parse_subquery_alias)
+1559        elif self._match(TokenType.VALUES):
+1560            this = self.expression(
+1561                exp.Values,
+1562                expressions=self._parse_csv(self._parse_value),
+1563                alias=self._parse_table_alias(),
+1564            )
+1565        else:
+1566            this = None
+1567
+1568        return self._parse_set_operations(this)
+1569
+1570    def _parse_with(self, skip_with_token: bool = False) -> t.Optional[exp.Expression]:
+1571        if not skip_with_token and not self._match(TokenType.WITH):
+1572            return None
+1573
+1574        recursive = self._match(TokenType.RECURSIVE)
+1575
+1576        expressions = []
+1577        while True:
+1578            expressions.append(self._parse_cte())
+1579
+1580            if not self._match(TokenType.COMMA) and not self._match(TokenType.WITH):
+1581                break
+1582            else:
+1583                self._match(TokenType.WITH)
+1584
+1585        return self.expression(exp.With, expressions=expressions, recursive=recursive)
+1586
+1587    def _parse_cte(self) -> exp.Expression:
+1588        alias = self._parse_table_alias()
+1589        if not alias or not alias.this:
+1590            self.raise_error("Expected CTE to have alias")
+1591
+1592        self._match(TokenType.ALIAS)
+1593
+1594        return self.expression(
+1595            exp.CTE,
+1596            this=self._parse_wrapped(self._parse_statement),
+1597            alias=alias,
+1598        )
+1599
+1600    def _parse_table_alias(
+1601        self, alias_tokens: t.Optional[t.Collection[TokenType]] = None
+1602    ) -> t.Optional[exp.Expression]:
+1603        any_token = self._match(TokenType.ALIAS)
+1604        alias = self._parse_id_var(
+1605            any_token=any_token, tokens=alias_tokens or self.TABLE_ALIAS_TOKENS
+1606        )
+1607        index = self._index
+1608
+1609        if self._match(TokenType.L_PAREN):
+1610            columns = self._parse_csv(lambda: self._parse_column_def(self._parse_id_var()))
+1611            self._match_r_paren() if columns else self._retreat(index)
+1612        else:
+1613            columns = None
+1614
+1615        if not alias and not columns:
+1616            return None
+1617
+1618        return self.expression(exp.TableAlias, this=alias, columns=columns)
+1619
+1620    def _parse_subquery(
+1621        self, this: t.Optional[exp.Expression], parse_alias: bool = True
+1622    ) -> exp.Expression:
+1623        return self.expression(
+1624            exp.Subquery,
+1625            this=this,
+1626            pivots=self._parse_pivots(),
+1627            alias=self._parse_table_alias() if parse_alias else None,
+1628        )
+1629
+1630    def _parse_query_modifiers(self, this: t.Optional[exp.Expression]) -> None:
+1631        if not isinstance(this, self.MODIFIABLES):
+1632            return
+1633
+1634        table = isinstance(this, exp.Table)
+1635
+1636        while True:
+1637            lateral = self._parse_lateral()
+1638            join = self._parse_join()
+1639            comma = None if table else self._match(TokenType.COMMA)
+1640            if lateral:
+1641                this.append("laterals", lateral)
+1642            if join:
+1643                this.append("joins", join)
+1644            if comma:
+1645                this.args["from"].append("expressions", self._parse_table())
+1646            if not (lateral or join or comma):
+1647                break
+1648
+1649        for key, parser in self.QUERY_MODIFIER_PARSERS.items():
+1650            expression = parser(self)
+1651
+1652            if expression:
+1653                this.set(key, expression)
+1654
+1655    def _parse_hint(self) -> t.Optional[exp.Expression]:
+1656        if self._match(TokenType.HINT):
+1657            hints = self._parse_csv(self._parse_function)
+1658            if not self._match_pair(TokenType.STAR, TokenType.SLASH):
+1659                self.raise_error("Expected */ after HINT")
+1660            return self.expression(exp.Hint, expressions=hints)
+1661
+1662        return None
+1663
+1664    def _parse_into(self) -> t.Optional[exp.Expression]:
+1665        if not self._match(TokenType.INTO):
+1666            return None
+1667
+1668        temp = self._match(TokenType.TEMPORARY)
+1669        unlogged = self._match(TokenType.UNLOGGED)
+1670        self._match(TokenType.TABLE)
+1671
+1672        return self.expression(
+1673            exp.Into, this=self._parse_table(schema=True), temporary=temp, unlogged=unlogged
+1674        )
+1675
+1676    def _parse_from(self) -> t.Optional[exp.Expression]:
+1677        if not self._match(TokenType.FROM):
+1678            return None
+1679
+1680        return self.expression(
+1681            exp.From, comments=self._prev_comments, expressions=self._parse_csv(self._parse_table)
+1682        )
+1683
+1684    def _parse_match_recognize(self) -> t.Optional[exp.Expression]:
+1685        if not self._match(TokenType.MATCH_RECOGNIZE):
+1686            return None
+1687        self._match_l_paren()
+1688
+1689        partition = self._parse_partition_by()
+1690        order = self._parse_order()
+1691        measures = (
+1692            self._parse_alias(self._parse_conjunction())
+1693            if self._match_text_seq("MEASURES")
+1694            else None
+1695        )
+1696
+1697        if self._match_text_seq("ONE", "ROW", "PER", "MATCH"):
+1698            rows = exp.Var(this="ONE ROW PER MATCH")
+1699        elif self._match_text_seq("ALL", "ROWS", "PER", "MATCH"):
+1700            text = "ALL ROWS PER MATCH"
+1701            if self._match_text_seq("SHOW", "EMPTY", "MATCHES"):
+1702                text += f" SHOW EMPTY MATCHES"
+1703            elif self._match_text_seq("OMIT", "EMPTY", "MATCHES"):
+1704                text += f" OMIT EMPTY MATCHES"
+1705            elif self._match_text_seq("WITH", "UNMATCHED", "ROWS"):
+1706                text += f" WITH UNMATCHED ROWS"
+1707            rows = exp.Var(this=text)
+1708        else:
+1709            rows = None
+1710
+1711        if self._match_text_seq("AFTER", "MATCH", "SKIP"):
+1712            text = "AFTER MATCH SKIP"
+1713            if self._match_text_seq("PAST", "LAST", "ROW"):
+1714                text += f" PAST LAST ROW"
+1715            elif self._match_text_seq("TO", "NEXT", "ROW"):
+1716                text += f" TO NEXT ROW"
+1717            elif self._match_text_seq("TO", "FIRST"):
+1718                text += f" TO FIRST {self._advance_any().text}"  # type: ignore
+1719            elif self._match_text_seq("TO", "LAST"):
+1720                text += f" TO LAST {self._advance_any().text}"  # type: ignore
+1721            after = exp.Var(this=text)
+1722        else:
+1723            after = None
+1724
+1725        if self._match_text_seq("PATTERN"):
+1726            self._match_l_paren()
+1727
+1728            if not self._curr:
+1729                self.raise_error("Expecting )", self._curr)
+1730
+1731            paren = 1
+1732            start = self._curr
+1733
+1734            while self._curr and paren > 0:
+1735                if self._curr.token_type == TokenType.L_PAREN:
+1736                    paren += 1
+1737                if self._curr.token_type == TokenType.R_PAREN:
+1738                    paren -= 1
+1739                end = self._prev
+1740                self._advance()
+1741            if paren > 0:
+1742                self.raise_error("Expecting )", self._curr)
+1743            pattern = exp.Var(this=self._find_sql(start, end))
+1744        else:
+1745            pattern = None
+1746
+1747        define = (
+1748            self._parse_alias(self._parse_conjunction()) if self._match_text_seq("DEFINE") else None
+1749        )
+1750        self._match_r_paren()
+1751
+1752        return self.expression(
+1753            exp.MatchRecognize,
+1754            partition_by=partition,
+1755            order=order,
+1756            measures=measures,
+1757            rows=rows,
+1758            after=after,
+1759            pattern=pattern,
+1760            define=define,
+1761        )
+1762
+1763    def _parse_lateral(self) -> t.Optional[exp.Expression]:
+1764        outer_apply = self._match_pair(TokenType.OUTER, TokenType.APPLY)
+1765        cross_apply = self._match_pair(TokenType.CROSS, TokenType.APPLY)
+1766
+1767        if outer_apply or cross_apply:
+1768            this = self._parse_select(table=True)
+1769            view = None
+1770            outer = not cross_apply
+1771        elif self._match(TokenType.LATERAL):
+1772            this = self._parse_select(table=True)
+1773            view = self._match(TokenType.VIEW)
+1774            outer = self._match(TokenType.OUTER)
+1775        else:
+1776            return None
+1777
+1778        if not this:
+1779            this = self._parse_function() or self._parse_id_var(any_token=False)
+1780            while self._match(TokenType.DOT):
+1781                this = exp.Dot(
+1782                    this=this,
+1783                    expression=self._parse_function() or self._parse_id_var(any_token=False),
+1784                )
+1785
+1786        table_alias: t.Optional[exp.Expression]
+1787
+1788        if view:
+1789            table = self._parse_id_var(any_token=False)
+1790            columns = self._parse_csv(self._parse_id_var) if self._match(TokenType.ALIAS) else []
+1791            table_alias = self.expression(exp.TableAlias, this=table, columns=columns)
+1792        else:
+1793            table_alias = self._parse_table_alias()
+1794
+1795        expression = self.expression(
+1796            exp.Lateral,
+1797            this=this,
+1798            view=view,
+1799            outer=outer,
+1800            alias=table_alias,
+1801        )
+1802
+1803        if outer_apply or cross_apply:
+1804            return self.expression(exp.Join, this=expression, side=None if cross_apply else "LEFT")
+1805
+1806        return expression
+1807
+1808    def _parse_join_side_and_kind(
+1809        self,
+1810    ) -> t.Tuple[t.Optional[Token], t.Optional[Token], t.Optional[Token]]:
+1811        return (
+1812            self._match(TokenType.NATURAL) and self._prev,
+1813            self._match_set(self.JOIN_SIDES) and self._prev,
+1814            self._match_set(self.JOIN_KINDS) and self._prev,
+1815        )
+1816
+1817    def _parse_join(self, skip_join_token: bool = False) -> t.Optional[exp.Expression]:
+1818        natural, side, kind = self._parse_join_side_and_kind()
+1819
+1820        if not skip_join_token and not self._match(TokenType.JOIN):
+1821            return None
+1822
+1823        kwargs: t.Dict[
+1824            str, t.Optional[exp.Expression] | bool | str | t.List[t.Optional[exp.Expression]]
+1825        ] = {"this": self._parse_table()}
+1826
+1827        if natural:
+1828            kwargs["natural"] = True
+1829        if side:
+1830            kwargs["side"] = side.text
+1831        if kind:
+1832            kwargs["kind"] = kind.text
+1833
+1834        if self._match(TokenType.ON):
+1835            kwargs["on"] = self._parse_conjunction()
+1836        elif self._match(TokenType.USING):
+1837            kwargs["using"] = self._parse_wrapped_id_vars()
+1838
+1839        return self.expression(exp.Join, **kwargs)  # type: ignore
+1840
+1841    def _parse_index(self) -> exp.Expression:
+1842        index = self._parse_id_var()
+1843        self._match(TokenType.ON)
+1844        self._match(TokenType.TABLE)  # hive
+1845
+1846        return self.expression(
+1847            exp.Index,
+1848            this=index,
+1849            table=self.expression(exp.Table, this=self._parse_id_var()),
+1850            columns=self._parse_expression(),
+1851        )
+1852
+1853    def _parse_create_table_index(self) -> t.Optional[exp.Expression]:
+1854        unique = self._match(TokenType.UNIQUE)
+1855        primary = self._match_text_seq("PRIMARY")
+1856        amp = self._match_text_seq("AMP")
+1857        if not self._match(TokenType.INDEX):
+1858            return None
+1859        index = self._parse_id_var()
+1860        columns = None
+1861        if self._match(TokenType.L_PAREN, advance=False):
+1862            columns = self._parse_wrapped_csv(self._parse_column)
+1863        return self.expression(
+1864            exp.Index,
+1865            this=index,
+1866            columns=columns,
+1867            unique=unique,
+1868            primary=primary,
+1869            amp=amp,
+1870        )
+1871
+1872    def _parse_table_parts(self, schema: bool = False) -> exp.Expression:
+1873        catalog = None
+1874        db = None
+1875        table = (not schema and self._parse_function()) or self._parse_id_var(any_token=False)
+1876
+1877        while self._match(TokenType.DOT):
+1878            if catalog:
+1879                # This allows nesting the table in arbitrarily many dot expressions if needed
+1880                table = self.expression(exp.Dot, this=table, expression=self._parse_id_var())
+1881            else:
+1882                catalog = db
+1883                db = table
+1884                table = self._parse_id_var()
+1885
+1886        if not table:
+1887            self.raise_error(f"Expected table name but got {self._curr}")
+1888
+1889        return self.expression(
+1890            exp.Table, this=table, db=db, catalog=catalog, pivots=self._parse_pivots()
+1891        )
+1892
+1893    def _parse_table(
+1894        self, schema: bool = False, alias_tokens: t.Optional[t.Collection[TokenType]] = None
+1895    ) -> t.Optional[exp.Expression]:
+1896        lateral = self._parse_lateral()
+1897
+1898        if lateral:
+1899            return lateral
+1900
+1901        unnest = self._parse_unnest()
+1902
+1903        if unnest:
+1904            return unnest
+1905
+1906        values = self._parse_derived_table_values()
+1907
+1908        if values:
+1909            return values
+1910
+1911        subquery = self._parse_select(table=True)
+1912
+1913        if subquery:
+1914            return subquery
+1915
+1916        this = self._parse_table_parts(schema=schema)
+1917
+1918        if schema:
+1919            return self._parse_schema(this=this)
+1920
+1921        if self.alias_post_tablesample:
+1922            table_sample = self._parse_table_sample()
+1923
+1924        alias = self._parse_table_alias(alias_tokens=alias_tokens or self.TABLE_ALIAS_TOKENS)
+1925
+1926        if alias:
+1927            this.set("alias", alias)
+1928
+1929        if self._match_pair(TokenType.WITH, TokenType.L_PAREN):
+1930            this.set(
+1931                "hints",
+1932                self._parse_csv(lambda: self._parse_function() or self._parse_var(any_token=True)),
+1933            )
+1934            self._match_r_paren()
+1935
+1936        if not self.alias_post_tablesample:
+1937            table_sample = self._parse_table_sample()
+1938
+1939        if table_sample:
+1940            table_sample.set("this", this)
+1941            this = table_sample
+1942
+1943        return this
+1944
+1945    def _parse_unnest(self) -> t.Optional[exp.Expression]:
+1946        if not self._match(TokenType.UNNEST):
+1947            return None
+1948
+1949        expressions = self._parse_wrapped_csv(self._parse_column)
+1950        ordinality = bool(self._match(TokenType.WITH) and self._match(TokenType.ORDINALITY))
+1951        alias = self._parse_table_alias()
+1952
+1953        if alias and self.unnest_column_only:
+1954            if alias.args.get("columns"):
+1955                self.raise_error("Unexpected extra column alias in unnest.")
+1956            alias.set("columns", [alias.this])
+1957            alias.set("this", None)
+1958
+1959        offset = None
+1960        if self._match_pair(TokenType.WITH, TokenType.OFFSET):
+1961            self._match(TokenType.ALIAS)
+1962            offset = self._parse_conjunction()
+1963
+1964        return self.expression(
+1965            exp.Unnest,
+1966            expressions=expressions,
+1967            ordinality=ordinality,
+1968            alias=alias,
+1969            offset=offset,
+1970        )
+1971
+1972    def _parse_derived_table_values(self) -> t.Optional[exp.Expression]:
+1973        is_derived = self._match_pair(TokenType.L_PAREN, TokenType.VALUES)
+1974        if not is_derived and not self._match(TokenType.VALUES):
+1975            return None
+1976
+1977        expressions = self._parse_csv(self._parse_value)
+1978
+1979        if is_derived:
+1980            self._match_r_paren()
+1981
+1982        return self.expression(exp.Values, expressions=expressions, alias=self._parse_table_alias())
+1983
+1984    def _parse_table_sample(self) -> t.Optional[exp.Expression]:
+1985        if not self._match(TokenType.TABLE_SAMPLE):
+1986            return None
+1987
+1988        method = self._parse_var()
+1989        bucket_numerator = None
+1990        bucket_denominator = None
+1991        bucket_field = None
+1992        percent = None
+1993        rows = None
+1994        size = None
+1995        seed = None
+1996
+1997        self._match_l_paren()
+1998
+1999        if self._match(TokenType.BUCKET):
+2000            bucket_numerator = self._parse_number()
+2001            self._match(TokenType.OUT_OF)
+2002            bucket_denominator = bucket_denominator = self._parse_number()
+2003            self._match(TokenType.ON)
+2004            bucket_field = self._parse_field()
+2005        else:
+2006            num = self._parse_number()
+2007
+2008            if self._match(TokenType.PERCENT):
+2009                percent = num
+2010            elif self._match(TokenType.ROWS):
+2011                rows = num
+2012            else:
+2013                size = num
+2014
+2015        self._match_r_paren()
+2016
+2017        if self._match(TokenType.SEED):
+2018            seed = self._parse_wrapped(self._parse_number)
+2019
+2020        return self.expression(
+2021            exp.TableSample,
+2022            method=method,
+2023            bucket_numerator=bucket_numerator,
+2024            bucket_denominator=bucket_denominator,
+2025            bucket_field=bucket_field,
+2026            percent=percent,
+2027            rows=rows,
+2028            size=size,
+2029            seed=seed,
+2030        )
+2031
+2032    def _parse_pivots(self) -> t.List[t.Optional[exp.Expression]]:
+2033        return list(iter(self._parse_pivot, None))
+2034
+2035    def _parse_pivot(self) -> t.Optional[exp.Expression]:
+2036        index = self._index
+2037
+2038        if self._match(TokenType.PIVOT):
+2039            unpivot = False
+2040        elif self._match(TokenType.UNPIVOT):
+2041            unpivot = True
+2042        else:
+2043            return None
+2044
+2045        expressions = []
+2046        field = None
+2047
+2048        if not self._match(TokenType.L_PAREN):
+2049            self._retreat(index)
+2050            return None
+2051
+2052        if unpivot:
+2053            expressions = self._parse_csv(self._parse_column)
+2054        else:
+2055            expressions = self._parse_csv(lambda: self._parse_alias(self._parse_function()))
+2056
+2057        if not self._match(TokenType.FOR):
+2058            self.raise_error("Expecting FOR")
+2059
+2060        value = self._parse_column()
+2061
+2062        if not self._match(TokenType.IN):
+2063            self.raise_error("Expecting IN")
+2064
+2065        field = self._parse_in(value)
+2066
+2067        self._match_r_paren()
+2068
+2069        return self.expression(exp.Pivot, expressions=expressions, field=field, unpivot=unpivot)
+2070
+2071    def _parse_where(self, skip_where_token: bool = False) -> t.Optional[exp.Expression]:
+2072        if not skip_where_token and not self._match(TokenType.WHERE):
+2073            return None
+2074
+2075        return self.expression(
+2076            exp.Where, comments=self._prev_comments, this=self._parse_conjunction()
+2077        )
+2078
+2079    def _parse_group(self, skip_group_by_token: bool = False) -> t.Optional[exp.Expression]:
+2080        if not skip_group_by_token and not self._match(TokenType.GROUP_BY):
+2081            return None
+2082
+2083        expressions = self._parse_csv(self._parse_conjunction)
+2084        grouping_sets = self._parse_grouping_sets()
+2085
+2086        self._match(TokenType.COMMA)
+2087        with_ = self._match(TokenType.WITH)
+2088        cube = self._match(TokenType.CUBE) and (
+2089            with_ or self._parse_wrapped_csv(self._parse_column)
+2090        )
+2091
+2092        self._match(TokenType.COMMA)
+2093        rollup = self._match(TokenType.ROLLUP) and (
+2094            with_ or self._parse_wrapped_csv(self._parse_column)
+2095        )
+2096
+2097        return self.expression(
+2098            exp.Group,
+2099            expressions=expressions,
+2100            grouping_sets=grouping_sets,
+2101            cube=cube,
+2102            rollup=rollup,
+2103        )
+2104
+2105    def _parse_grouping_sets(self) -> t.Optional[t.List[t.Optional[exp.Expression]]]:
+2106        if not self._match(TokenType.GROUPING_SETS):
+2107            return None
+2108
+2109        return self._parse_wrapped_csv(self._parse_grouping_set)
+2110
+2111    def _parse_grouping_set(self) -> t.Optional[exp.Expression]:
+2112        if self._match(TokenType.L_PAREN):
+2113            grouping_set = self._parse_csv(self._parse_column)
+2114            self._match_r_paren()
+2115            return self.expression(exp.Tuple, expressions=grouping_set)
+2116
+2117        return self._parse_column()
+2118
+2119    def _parse_having(self, skip_having_token: bool = False) -> t.Optional[exp.Expression]:
+2120        if not skip_having_token and not self._match(TokenType.HAVING):
+2121            return None
+2122        return self.expression(exp.Having, this=self._parse_conjunction())
+2123
+2124    def _parse_qualify(self) -> t.Optional[exp.Expression]:
+2125        if not self._match(TokenType.QUALIFY):
+2126            return None
+2127        return self.expression(exp.Qualify, this=self._parse_conjunction())
+2128
+2129    def _parse_order(
+2130        self, this: t.Optional[exp.Expression] = None, skip_order_token: bool = False
+2131    ) -> t.Optional[exp.Expression]:
+2132        if not skip_order_token and not self._match(TokenType.ORDER_BY):
+2133            return this
+2134
+2135        return self.expression(
+2136            exp.Order, this=this, expressions=self._parse_csv(self._parse_ordered)
+2137        )
+2138
+2139    def _parse_sort(
+2140        self, token_type: TokenType, exp_class: t.Type[exp.Expression]
+2141    ) -> t.Optional[exp.Expression]:
+2142        if not self._match(token_type):
+2143            return None
+2144        return self.expression(exp_class, expressions=self._parse_csv(self._parse_ordered))
+2145
+2146    def _parse_ordered(self) -> exp.Expression:
+2147        this = self._parse_conjunction()
+2148        self._match(TokenType.ASC)
+2149        is_desc = self._match(TokenType.DESC)
+2150        is_nulls_first = self._match(TokenType.NULLS_FIRST)
+2151        is_nulls_last = self._match(TokenType.NULLS_LAST)
+2152        desc = is_desc or False
+2153        asc = not desc
+2154        nulls_first = is_nulls_first or False
+2155        explicitly_null_ordered = is_nulls_first or is_nulls_last
+2156        if (
+2157            not explicitly_null_ordered
+2158            and (
+2159                (asc and self.null_ordering == "nulls_are_small")
+2160                or (desc and self.null_ordering != "nulls_are_small")
+2161            )
+2162            and self.null_ordering != "nulls_are_last"
+2163        ):
+2164            nulls_first = True
+2165
+2166        return self.expression(exp.Ordered, this=this, desc=desc, nulls_first=nulls_first)
+2167
+2168    def _parse_limit(
+2169        self, this: t.Optional[exp.Expression] = None, top: bool = False
+2170    ) -> t.Optional[exp.Expression]:
+2171        if self._match(TokenType.TOP if top else TokenType.LIMIT):
+2172            limit_paren = self._match(TokenType.L_PAREN)
+2173            limit_exp = self.expression(
+2174                exp.Limit, this=this, expression=self._parse_number() if top else self._parse_term()
+2175            )
+2176
+2177            if limit_paren:
+2178                self._match_r_paren()
+2179
+2180            return limit_exp
+2181
+2182        if self._match(TokenType.FETCH):
+2183            direction = self._match_set((TokenType.FIRST, TokenType.NEXT))
+2184            direction = self._prev.text if direction else "FIRST"
+2185            count = self._parse_number()
+2186            self._match_set((TokenType.ROW, TokenType.ROWS))
+2187            self._match(TokenType.ONLY)
+2188            return self.expression(exp.Fetch, direction=direction, count=count)
+2189
+2190        return this
+2191
+2192    def _parse_offset(self, this: t.Optional[exp.Expression] = None) -> t.Optional[exp.Expression]:
+2193        if not self._match_set((TokenType.OFFSET, TokenType.COMMA)):
+2194            return this
+2195
+2196        count = self._parse_number()
+2197        self._match_set((TokenType.ROW, TokenType.ROWS))
+2198        return self.expression(exp.Offset, this=this, expression=count)
+2199
+2200    def _parse_lock(self) -> t.Optional[exp.Expression]:
+2201        if self._match_text_seq("FOR", "UPDATE"):
+2202            return self.expression(exp.Lock, update=True)
+2203        if self._match_text_seq("FOR", "SHARE"):
+2204            return self.expression(exp.Lock, update=False)
+2205
+2206        return None
+2207
+2208    def _parse_set_operations(self, this: t.Optional[exp.Expression]) -> t.Optional[exp.Expression]:
+2209        if not self._match_set(self.SET_OPERATIONS):
+2210            return this
+2211
+2212        token_type = self._prev.token_type
+2213
+2214        if token_type == TokenType.UNION:
+2215            expression = exp.Union
+2216        elif token_type == TokenType.EXCEPT:
+2217            expression = exp.Except
+2218        else:
+2219            expression = exp.Intersect
+2220
+2221        return self.expression(
+2222            expression,
+2223            this=this,
+2224            distinct=self._match(TokenType.DISTINCT) or not self._match(TokenType.ALL),
+2225            expression=self._parse_set_operations(self._parse_select(nested=True)),
+2226        )
+2227
+2228    def _parse_expression(self) -> t.Optional[exp.Expression]:
+2229        return self._parse_alias(self._parse_conjunction())
+2230
+2231    def _parse_conjunction(self) -> t.Optional[exp.Expression]:
+2232        return self._parse_tokens(self._parse_equality, self.CONJUNCTION)
+2233
+2234    def _parse_equality(self) -> t.Optional[exp.Expression]:
+2235        return self._parse_tokens(self._parse_comparison, self.EQUALITY)
+2236
+2237    def _parse_comparison(self) -> t.Optional[exp.Expression]:
+2238        return self._parse_tokens(self._parse_range, self.COMPARISON)
+2239
+2240    def _parse_range(self) -> t.Optional[exp.Expression]:
+2241        this = self._parse_bitwise()
+2242        negate = self._match(TokenType.NOT)
+2243
+2244        if self._match_set(self.RANGE_PARSERS):
+2245            this = self.RANGE_PARSERS[self._prev.token_type](self, this)
+2246        elif self._match(TokenType.ISNULL):
+2247            this = self.expression(exp.Is, this=this, expression=exp.Null())
+2248
+2249        # Postgres supports ISNULL and NOTNULL for conditions.
+2250        # https://blog.andreiavram.ro/postgresql-null-composite-type/
+2251        if self._match(TokenType.NOTNULL):
+2252            this = self.expression(exp.Is, this=this, expression=exp.Null())
+2253            this = self.expression(exp.Not, this=this)
+2254
+2255        if negate:
+2256            this = self.expression(exp.Not, this=this)
+2257
+2258        if self._match(TokenType.IS):
+2259            this = self._parse_is(this)
+2260
+2261        return this
+2262
+2263    def _parse_is(self, this: t.Optional[exp.Expression]) -> exp.Expression:
+2264        negate = self._match(TokenType.NOT)
+2265        if self._match(TokenType.DISTINCT_FROM):
+2266            klass = exp.NullSafeEQ if negate else exp.NullSafeNEQ
+2267            return self.expression(klass, this=this, expression=self._parse_expression())
+2268
+2269        this = self.expression(
+2270            exp.Is,
+2271            this=this,
+2272            expression=self._parse_null() or self._parse_boolean(),
+2273        )
+2274        return self.expression(exp.Not, this=this) if negate else this
+2275
+2276    def _parse_in(self, this: t.Optional[exp.Expression]) -> exp.Expression:
+2277        unnest = self._parse_unnest()
+2278        if unnest:
+2279            this = self.expression(exp.In, this=this, unnest=unnest)
+2280        elif self._match(TokenType.L_PAREN):
+2281            expressions = self._parse_csv(self._parse_select_or_expression)
+2282
+2283            if len(expressions) == 1 and isinstance(expressions[0], exp.Subqueryable):
+2284                this = self.expression(exp.In, this=this, query=expressions[0])
+2285            else:
+2286                this = self.expression(exp.In, this=this, expressions=expressions)
+2287
+2288            self._match_r_paren()
+2289        else:
+2290            this = self.expression(exp.In, this=this, field=self._parse_field())
+2291
+2292        return this
+2293
+2294    def _parse_between(self, this: exp.Expression) -> exp.Expression:
+2295        low = self._parse_bitwise()
+2296        self._match(TokenType.AND)
+2297        high = self._parse_bitwise()
+2298        return self.expression(exp.Between, this=this, low=low, high=high)
+2299
+2300    def _parse_escape(self, this: t.Optional[exp.Expression]) -> t.Optional[exp.Expression]:
+2301        if not self._match(TokenType.ESCAPE):
+2302            return this
+2303        return self.expression(exp.Escape, this=this, expression=self._parse_string())
+2304
+2305    def _parse_bitwise(self) -> t.Optional[exp.Expression]:
+2306        this = self._parse_term()
+2307
+2308        while True:
+2309            if self._match_set(self.BITWISE):
+2310                this = self.expression(
+2311                    self.BITWISE[self._prev.token_type],
+2312                    this=this,
+2313                    expression=self._parse_term(),
+2314                )
+2315            elif self._match_pair(TokenType.LT, TokenType.LT):
+2316                this = self.expression(
+2317                    exp.BitwiseLeftShift, this=this, expression=self._parse_term()
+2318                )
+2319            elif self._match_pair(TokenType.GT, TokenType.GT):
+2320                this = self.expression(
+2321                    exp.BitwiseRightShift, this=this, expression=self._parse_term()
+2322                )
+2323            else:
+2324                break
+2325
+2326        return this
+2327
+2328    def _parse_term(self) -> t.Optional[exp.Expression]:
+2329        return self._parse_tokens(self._parse_factor, self.TERM)
+2330
+2331    def _parse_factor(self) -> t.Optional[exp.Expression]:
+2332        return self._parse_tokens(self._parse_unary, self.FACTOR)
+2333
+2334    def _parse_unary(self) -> t.Optional[exp.Expression]:
+2335        if self._match_set(self.UNARY_PARSERS):
+2336            return self.UNARY_PARSERS[self._prev.token_type](self)
+2337        return self._parse_at_time_zone(self._parse_type())
+2338
+2339    def _parse_type(self) -> t.Optional[exp.Expression]:
+2340        if self._match(TokenType.INTERVAL):
+2341            return self.expression(exp.Interval, this=self._parse_term(), unit=self._parse_var())
+2342
+2343        index = self._index
+2344        type_token = self._parse_types(check_func=True)
+2345        this = self._parse_column()
+2346
+2347        if type_token:
+2348            if this and not isinstance(this, exp.Star):
+2349                return self.expression(exp.Cast, this=this, to=type_token)
+2350            if not type_token.args.get("expressions"):
+2351                self._retreat(index)
+2352                return self._parse_column()
+2353            return type_token
+2354
+2355        return this
+2356
+2357    def _parse_types(self, check_func: bool = False) -> t.Optional[exp.Expression]:
+2358        index = self._index
+2359
+2360        if not self._match_set(self.TYPE_TOKENS):
+2361            return None
+2362
+2363        type_token = self._prev.token_type
+2364
+2365        if type_token == TokenType.PSEUDO_TYPE:
+2366            return self.expression(exp.PseudoType, this=self._prev.text)
+2367
+2368        nested = type_token in self.NESTED_TYPE_TOKENS
+2369        is_struct = type_token == TokenType.STRUCT
+2370        expressions = None
+2371        maybe_func = False
+2372
+2373        if self._match(TokenType.L_PAREN):
+2374            if is_struct:
+2375                expressions = self._parse_csv(self._parse_struct_kwargs)
+2376            elif nested:
+2377                expressions = self._parse_csv(self._parse_types)
+2378            else:
+2379                expressions = self._parse_csv(self._parse_conjunction)
+2380
+2381            if not expressions:
+2382                self._retreat(index)
+2383                return None
+2384
+2385            self._match_r_paren()
+2386            maybe_func = True
+2387
+2388        if not nested and self._match_pair(TokenType.L_BRACKET, TokenType.R_BRACKET):
+2389            this = exp.DataType(
+2390                this=exp.DataType.Type.ARRAY,
+2391                expressions=[exp.DataType.build(type_token.value, expressions=expressions)],
+2392                nested=True,
+2393            )
+2394
+2395            while self._match_pair(TokenType.L_BRACKET, TokenType.R_BRACKET):
+2396                this = exp.DataType(
+2397                    this=exp.DataType.Type.ARRAY,
+2398                    expressions=[this],
+2399                    nested=True,
+2400                )
+2401
+2402            return this
+2403
+2404        if self._match(TokenType.L_BRACKET):
+2405            self._retreat(index)
+2406            return None
+2407
+2408        values: t.Optional[t.List[t.Optional[exp.Expression]]] = None
+2409        if nested and self._match(TokenType.LT):
+2410            if is_struct:
+2411                expressions = self._parse_csv(self._parse_struct_kwargs)
+2412            else:
+2413                expressions = self._parse_csv(self._parse_types)
+2414
+2415            if not self._match(TokenType.GT):
+2416                self.raise_error("Expecting >")
+2417
+2418            if self._match_set((TokenType.L_BRACKET, TokenType.L_PAREN)):
+2419                values = self._parse_csv(self._parse_conjunction)
+2420                self._match_set((TokenType.R_BRACKET, TokenType.R_PAREN))
+2421
+2422        value: t.Optional[exp.Expression] = None
+2423        if type_token in self.TIMESTAMPS:
+2424            if self._match(TokenType.WITH_TIME_ZONE) or type_token == TokenType.TIMESTAMPTZ:
+2425                value = exp.DataType(this=exp.DataType.Type.TIMESTAMPTZ, expressions=expressions)
+2426            elif (
+2427                self._match(TokenType.WITH_LOCAL_TIME_ZONE) or type_token == TokenType.TIMESTAMPLTZ
+2428            ):
+2429                value = exp.DataType(this=exp.DataType.Type.TIMESTAMPLTZ, expressions=expressions)
+2430            elif self._match(TokenType.WITHOUT_TIME_ZONE):
+2431                if type_token == TokenType.TIME:
+2432                    value = exp.DataType(this=exp.DataType.Type.TIME, expressions=expressions)
+2433                else:
+2434                    value = exp.DataType(this=exp.DataType.Type.TIMESTAMP, expressions=expressions)
+2435
+2436            maybe_func = maybe_func and value is None
+2437
+2438            if value is None:
+2439                value = exp.DataType(this=exp.DataType.Type.TIMESTAMP, expressions=expressions)
+2440        elif type_token == TokenType.INTERVAL:
+2441            value = self.expression(exp.Interval, unit=self._parse_var())
+2442
+2443        if maybe_func and check_func:
+2444            index2 = self._index
+2445            peek = self._parse_string()
+2446
+2447            if not peek:
+2448                self._retreat(index)
+2449                return None
+2450
+2451            self._retreat(index2)
+2452
+2453        if value:
+2454            return value
+2455
+2456        return exp.DataType(
+2457            this=exp.DataType.Type[type_token.value.upper()],
+2458            expressions=expressions,
+2459            nested=nested,
+2460            values=values,
+2461        )
+2462
+2463    def _parse_struct_kwargs(self) -> t.Optional[exp.Expression]:
+2464        if self._curr and self._curr.token_type in self.TYPE_TOKENS:
+2465            return self._parse_types()
+2466
+2467        this = self._parse_id_var()
+2468        self._match(TokenType.COLON)
+2469        data_type = self._parse_types()
+2470
+2471        if not data_type:
+2472            return None
+2473        return self.expression(exp.StructKwarg, this=this, expression=data_type)
+2474
+2475    def _parse_at_time_zone(self, this: t.Optional[exp.Expression]) -> t.Optional[exp.Expression]:
+2476        if not self._match(TokenType.AT_TIME_ZONE):
+2477            return this
+2478        return self.expression(exp.AtTimeZone, this=this, zone=self._parse_unary())
+2479
+2480    def _parse_column(self) -> t.Optional[exp.Expression]:
+2481        this = self._parse_field()
+2482        if isinstance(this, exp.Identifier):
+2483            this = self.expression(exp.Column, this=this)
+2484        elif not this:
+2485            return self._parse_bracket(this)
+2486        this = self._parse_bracket(this)
+2487
+2488        while self._match_set(self.COLUMN_OPERATORS):
+2489            op_token = self._prev.token_type
+2490            op = self.COLUMN_OPERATORS.get(op_token)
+2491
+2492            if op_token == TokenType.DCOLON:
+2493                field = self._parse_types()
+2494                if not field:
+2495                    self.raise_error("Expected type")
+2496            elif op:
+2497                self._advance()
+2498                value = self._prev.text
+2499                field = (
+2500                    exp.Literal.number(value)
+2501                    if self._prev.token_type == TokenType.NUMBER
+2502                    else exp.Literal.string(value)
+2503                )
+2504            else:
+2505                field = self._parse_star() or self._parse_function() or self._parse_id_var()
+2506
+2507            if isinstance(field, exp.Func):
+2508                # bigquery allows function calls like x.y.count(...)
+2509                # SAFE.SUBSTR(...)
+2510                # https://cloud.google.com/bigquery/docs/reference/standard-sql/functions-reference#function_call_rules
+2511                this = self._replace_columns_with_dots(this)
+2512
+2513            if op:
+2514                this = op(self, this, field)
+2515            elif isinstance(this, exp.Column) and not this.table:
+2516                this = self.expression(exp.Column, this=field, table=this.this)
+2517            else:
+2518                this = self.expression(exp.Dot, this=this, expression=field)
+2519            this = self._parse_bracket(this)
+2520
+2521        return this
+2522
+2523    def _parse_primary(self) -> t.Optional[exp.Expression]:
+2524        if self._match_set(self.PRIMARY_PARSERS):
+2525            token_type = self._prev.token_type
+2526            primary = self.PRIMARY_PARSERS[token_type](self, self._prev)
+2527
+2528            if token_type == TokenType.STRING:
+2529                expressions = [primary]
+2530                while self._match(TokenType.STRING):
+2531                    expressions.append(exp.Literal.string(self._prev.text))
+2532                if len(expressions) > 1:
+2533                    return self.expression(exp.Concat, expressions=expressions)
+2534            return primary
+2535
+2536        if self._match_pair(TokenType.DOT, TokenType.NUMBER):
+2537            return exp.Literal.number(f"0.{self._prev.text}")
+2538
+2539        if self._match(TokenType.L_PAREN):
+2540            comments = self._prev_comments
+2541            query = self._parse_select()
+2542
+2543            if query:
+2544                expressions = [query]
+2545            else:
+2546                expressions = self._parse_csv(
+2547                    lambda: self._parse_alias(self._parse_conjunction(), explicit=True)
+2548                )
+2549
+2550            this = seq_get(expressions, 0)
+2551            self._parse_query_modifiers(this)
+2552            self._match_r_paren()
+2553
+2554            if isinstance(this, exp.Subqueryable):
+2555                this = self._parse_set_operations(
+2556                    self._parse_subquery(this=this, parse_alias=False)
+2557                )
+2558            elif len(expressions) > 1:
+2559                this = self.expression(exp.Tuple, expressions=expressions)
+2560            else:
+2561                this = self.expression(exp.Paren, this=this)
+2562
+2563            if this and comments:
+2564                this.comments = comments
+2565
+2566            return this
+2567
+2568        return None
+2569
+2570    def _parse_field(self, any_token: bool = False) -> t.Optional[exp.Expression]:
+2571        return self._parse_primary() or self._parse_function() or self._parse_id_var(any_token)
+2572
+2573    def _parse_function(
+2574        self, functions: t.Optional[t.Dict[str, t.Callable]] = None
+2575    ) -> t.Optional[exp.Expression]:
+2576        if not self._curr:
+2577            return None
+2578
+2579        token_type = self._curr.token_type
+2580
+2581        if self._match_set(self.NO_PAREN_FUNCTION_PARSERS):
+2582            return self.NO_PAREN_FUNCTION_PARSERS[token_type](self)
+2583
+2584        if not self._next or self._next.token_type != TokenType.L_PAREN:
+2585            if token_type in self.NO_PAREN_FUNCTIONS:
+2586                self._advance()
+2587                return self.expression(self.NO_PAREN_FUNCTIONS[token_type])
+2588
+2589            return None
+2590
+2591        if token_type not in self.FUNC_TOKENS:
+2592            return None
+2593
+2594        this = self._curr.text
+2595        upper = this.upper()
+2596        self._advance(2)
+2597
+2598        parser = self.FUNCTION_PARSERS.get(upper)
+2599
+2600        if parser:
+2601            this = parser(self)
+2602        else:
+2603            subquery_predicate = self.SUBQUERY_PREDICATES.get(token_type)
+2604
+2605            if subquery_predicate and self._curr.token_type in (TokenType.SELECT, TokenType.WITH):
+2606                this = self.expression(subquery_predicate, this=self._parse_select())
+2607                self._match_r_paren()
+2608                return this
+2609
+2610            if functions is None:
+2611                functions = self.FUNCTIONS
+2612
+2613            function = functions.get(upper)
+2614            args = self._parse_csv(self._parse_lambda)
+2615
+2616            if function:
+2617                # Clickhouse supports function calls like foo(x, y)(z), so for these we need to also parse the
+2618                # second parameter list (i.e. "(z)") and the corresponding function will receive both arg lists.
+2619                if count_params(function) == 2:
+2620                    params = None
+2621                    if self._match_pair(TokenType.R_PAREN, TokenType.L_PAREN):
+2622                        params = self._parse_csv(self._parse_lambda)
+2623
+2624                    this = function(args, params)
+2625                else:
+2626                    this = function(args)
+2627
+2628                self.validate_expression(this, args)
+2629            else:
+2630                this = self.expression(exp.Anonymous, this=this, expressions=args)
+2631
+2632        self._match_r_paren(this)
+2633        return self._parse_window(this)
+2634
+2635    def _parse_user_defined_function(
+2636        self, kind: t.Optional[TokenType] = None
+2637    ) -> t.Optional[exp.Expression]:
+2638        this = self._parse_id_var()
+2639
+2640        while self._match(TokenType.DOT):
+2641            this = self.expression(exp.Dot, this=this, expression=self._parse_id_var())
+2642
+2643        if not self._match(TokenType.L_PAREN):
+2644            return this
+2645
+2646        expressions = self._parse_csv(self._parse_udf_kwarg)
+2647        self._match_r_paren()
+2648        return self.expression(
+2649            exp.UserDefinedFunction, this=this, expressions=expressions, wrapped=True
+2650        )
+2651
+2652    def _parse_introducer(self, token: Token) -> t.Optional[exp.Expression]:
+2653        literal = self._parse_primary()
+2654        if literal:
+2655            return self.expression(exp.Introducer, this=token.text, expression=literal)
+2656
+2657        return self.expression(exp.Identifier, this=token.text)
+2658
+2659    def _parse_national(self, token: Token) -> exp.Expression:
+2660        return self.expression(exp.National, this=exp.Literal.string(token.text))
+2661
+2662    def _parse_session_parameter(self) -> exp.Expression:
+2663        kind = None
+2664        this = self._parse_id_var() or self._parse_primary()
+2665
+2666        if this and self._match(TokenType.DOT):
+2667            kind = this.name
+2668            this = self._parse_var() or self._parse_primary()
+2669
+2670        return self.expression(exp.SessionParameter, this=this, kind=kind)
+2671
+2672    def _parse_udf_kwarg(self) -> t.Optional[exp.Expression]:
+2673        this = self._parse_id_var()
+2674        kind = self._parse_types()
+2675
+2676        if not kind:
+2677            return this
+2678
+2679        return self.expression(exp.UserDefinedFunctionKwarg, this=this, kind=kind)
+2680
+2681    def _parse_lambda(self) -> t.Optional[exp.Expression]:
+2682        index = self._index
+2683
+2684        if self._match(TokenType.L_PAREN):
+2685            expressions = self._parse_csv(self._parse_id_var)
+2686
+2687            if not self._match(TokenType.R_PAREN):
+2688                self._retreat(index)
+2689        else:
+2690            expressions = [self._parse_id_var()]
+2691
+2692        if self._match_set(self.LAMBDAS):
+2693            return self.LAMBDAS[self._prev.token_type](self, expressions)
+2694
+2695        self._retreat(index)
+2696
+2697        this: t.Optional[exp.Expression]
+2698
+2699        if self._match(TokenType.DISTINCT):
+2700            this = self.expression(
+2701                exp.Distinct, expressions=self._parse_csv(self._parse_conjunction)
+2702            )
+2703        else:
+2704            this = self._parse_select_or_expression()
+2705
+2706        if self._match(TokenType.IGNORE_NULLS):
+2707            this = self.expression(exp.IgnoreNulls, this=this)
+2708        else:
+2709            self._match(TokenType.RESPECT_NULLS)
+2710
+2711        return self._parse_limit(self._parse_order(this))
+2712
+2713    def _parse_schema(self, this: t.Optional[exp.Expression] = None) -> t.Optional[exp.Expression]:
+2714        index = self._index
+2715        if not self._match(TokenType.L_PAREN) or self._match(TokenType.SELECT):
+2716            self._retreat(index)
+2717            return this
+2718
+2719        args = self._parse_csv(
+2720            lambda: self._parse_constraint()
+2721            or self._parse_column_def(self._parse_field(any_token=True))
+2722        )
+2723        self._match_r_paren()
+2724        return self.expression(exp.Schema, this=this, expressions=args)
+2725
+2726    def _parse_column_def(self, this: t.Optional[exp.Expression]) -> t.Optional[exp.Expression]:
+2727        kind = self._parse_types()
+2728
+2729        constraints = []
+2730        while True:
+2731            constraint = self._parse_column_constraint()
+2732            if not constraint:
+2733                break
+2734            constraints.append(constraint)
+2735
+2736        if not kind and not constraints:
+2737            return this
+2738
+2739        return self.expression(exp.ColumnDef, this=this, kind=kind, constraints=constraints)
+2740
+2741    def _parse_column_constraint(self) -> t.Optional[exp.Expression]:
+2742        this = self._parse_references()
+2743
+2744        if this:
+2745            return this
+2746
+2747        if self._match(TokenType.CONSTRAINT):
+2748            this = self._parse_id_var()
+2749
+2750        kind: exp.Expression
+2751
+2752        if self._match_set((TokenType.AUTO_INCREMENT, TokenType.IDENTITY)):
+2753            start = None
+2754            increment = None
+2755
+2756            if self._match(TokenType.L_PAREN, advance=False):
+2757                args = self._parse_wrapped_csv(self._parse_bitwise)
+2758                start = seq_get(args, 0)
+2759                increment = seq_get(args, 1)
+2760            elif self._match_text_seq("START"):
+2761                start = self._parse_bitwise()
+2762                self._match_text_seq("INCREMENT")
+2763                increment = self._parse_bitwise()
+2764
+2765            if start and increment:
+2766                kind = exp.GeneratedAsIdentityColumnConstraint(start=start, increment=increment)
+2767            else:
+2768                kind = exp.AutoIncrementColumnConstraint()
+2769        elif self._match(TokenType.CHECK):
+2770            constraint = self._parse_wrapped(self._parse_conjunction)
+2771            kind = self.expression(exp.CheckColumnConstraint, this=constraint)
+2772        elif self._match(TokenType.COLLATE):
+2773            kind = self.expression(exp.CollateColumnConstraint, this=self._parse_var())
+2774        elif self._match(TokenType.ENCODE):
+2775            kind = self.expression(exp.EncodeColumnConstraint, this=self._parse_var())
+2776        elif self._match(TokenType.DEFAULT):
+2777            kind = self.expression(exp.DefaultColumnConstraint, this=self._parse_bitwise())
+2778        elif self._match_pair(TokenType.NOT, TokenType.NULL):
+2779            kind = exp.NotNullColumnConstraint()
+2780        elif self._match(TokenType.NULL):
+2781            kind = exp.NotNullColumnConstraint(allow_null=True)
+2782        elif self._match(TokenType.SCHEMA_COMMENT):
+2783            kind = self.expression(exp.CommentColumnConstraint, this=self._parse_string())
+2784        elif self._match(TokenType.PRIMARY_KEY):
+2785            desc = None
+2786            if self._match(TokenType.ASC) or self._match(TokenType.DESC):
+2787                desc = self._prev.token_type == TokenType.DESC
+2788            kind = exp.PrimaryKeyColumnConstraint(desc=desc)
+2789        elif self._match(TokenType.UNIQUE):
+2790            kind = exp.UniqueColumnConstraint()
+2791        elif self._match(TokenType.GENERATED):
+2792            if self._match(TokenType.BY_DEFAULT):
+2793                kind = self.expression(exp.GeneratedAsIdentityColumnConstraint, this=False)
+2794            else:
+2795                self._match(TokenType.ALWAYS)
+2796                kind = self.expression(exp.GeneratedAsIdentityColumnConstraint, this=True)
+2797            self._match_pair(TokenType.ALIAS, TokenType.IDENTITY)
+2798
+2799            if self._match(TokenType.L_PAREN):
+2800                if self._match_text_seq("START", "WITH"):
+2801                    kind.set("start", self._parse_bitwise())
+2802                if self._match_text_seq("INCREMENT", "BY"):
+2803                    kind.set("increment", self._parse_bitwise())
+2804
+2805                self._match_r_paren()
+2806        else:
+2807            return this
+2808
+2809        return self.expression(exp.ColumnConstraint, this=this, kind=kind)
+2810
+2811    def _parse_constraint(self) -> t.Optional[exp.Expression]:
+2812        if not self._match(TokenType.CONSTRAINT):
+2813            return self._parse_unnamed_constraint()
+2814
+2815        this = self._parse_id_var()
+2816        expressions = []
+2817
+2818        while True:
+2819            constraint = self._parse_unnamed_constraint() or self._parse_function()
+2820            if not constraint:
+2821                break
+2822            expressions.append(constraint)
+2823
+2824        return self.expression(exp.Constraint, this=this, expressions=expressions)
+2825
+2826    def _parse_unnamed_constraint(self) -> t.Optional[exp.Expression]:
+2827        if not self._match_set(self.CONSTRAINT_PARSERS):
+2828            return None
+2829        return self.CONSTRAINT_PARSERS[self._prev.token_type](self)
+2830
+2831    def _parse_unique(self) -> exp.Expression:
+2832        return self.expression(exp.Unique, expressions=self._parse_wrapped_id_vars())
+2833
+2834    def _parse_key_constraint_options(self) -> t.List[str]:
+2835        options = []
+2836        while True:
+2837            if not self._curr:
+2838                break
+2839
+2840            if self._match(TokenType.ON):
+2841                action = None
+2842                on = self._advance_any() and self._prev.text
+2843
+2844                if self._match(TokenType.NO_ACTION):
+2845                    action = "NO ACTION"
+2846                elif self._match(TokenType.CASCADE):
+2847                    action = "CASCADE"
+2848                elif self._match_pair(TokenType.SET, TokenType.NULL):
+2849                    action = "SET NULL"
+2850                elif self._match_pair(TokenType.SET, TokenType.DEFAULT):
+2851                    action = "SET DEFAULT"
+2852                else:
+2853                    self.raise_error("Invalid key constraint")
+2854
+2855                options.append(f"ON {on} {action}")
+2856            elif self._match_text_seq("NOT", "ENFORCED"):
+2857                options.append("NOT ENFORCED")
+2858            elif self._match_text_seq("DEFERRABLE"):
+2859                options.append("DEFERRABLE")
+2860            elif self._match_text_seq("INITIALLY", "DEFERRED"):
+2861                options.append("INITIALLY DEFERRED")
+2862            elif self._match_text_seq("NORELY"):
+2863                options.append("NORELY")
+2864            elif self._match_text_seq("MATCH", "FULL"):
+2865                options.append("MATCH FULL")
+2866            else:
+2867                break
+2868
+2869        return options
+2870
+2871    def _parse_references(self) -> t.Optional[exp.Expression]:
+2872        if not self._match(TokenType.REFERENCES):
+2873            return None
+2874
+2875        expressions = None
+2876        this = self._parse_id_var()
+2877
+2878        if self._match(TokenType.L_PAREN, advance=False):
+2879            expressions = self._parse_wrapped_id_vars()
+2880
+2881        options = self._parse_key_constraint_options()
+2882        return self.expression(exp.Reference, this=this, expressions=expressions, options=options)
+2883
+2884    def _parse_foreign_key(self) -> exp.Expression:
+2885        expressions = self._parse_wrapped_id_vars()
+2886        reference = self._parse_references()
+2887        options = {}
+2888
+2889        while self._match(TokenType.ON):
+2890            if not self._match_set((TokenType.DELETE, TokenType.UPDATE)):
+2891                self.raise_error("Expected DELETE or UPDATE")
+2892
+2893            kind = self._prev.text.lower()
+2894
+2895            if self._match(TokenType.NO_ACTION):
+2896                action = "NO ACTION"
+2897            elif self._match(TokenType.SET):
+2898                self._match_set((TokenType.NULL, TokenType.DEFAULT))
+2899                action = "SET " + self._prev.text.upper()
+2900            else:
+2901                self._advance()
+2902                action = self._prev.text.upper()
+2903
+2904            options[kind] = action
+2905
+2906        return self.expression(
+2907            exp.ForeignKey, expressions=expressions, reference=reference, **options  # type: ignore
+2908        )
+2909
+2910    def _parse_primary_key(self) -> exp.Expression:
+2911        expressions = self._parse_wrapped_id_vars()
+2912        options = self._parse_key_constraint_options()
+2913        return self.expression(exp.PrimaryKey, expressions=expressions, options=options)
+2914
+2915    def _parse_bracket(self, this: t.Optional[exp.Expression]) -> t.Optional[exp.Expression]:
+2916        if not self._match_set((TokenType.L_BRACKET, TokenType.L_BRACE)):
+2917            return this
+2918
+2919        bracket_kind = self._prev.token_type
+2920        expressions: t.List[t.Optional[exp.Expression]]
+2921
+2922        if self._match(TokenType.COLON):
+2923            expressions = [self.expression(exp.Slice, expression=self._parse_conjunction())]
+2924        else:
+2925            expressions = self._parse_csv(lambda: self._parse_slice(self._parse_conjunction()))
+2926
+2927        # https://duckdb.org/docs/sql/data_types/struct.html#creating-structs
+2928        if bracket_kind == TokenType.L_BRACE:
+2929            this = self.expression(exp.Struct, expressions=expressions)
+2930        elif not this or this.name.upper() == "ARRAY":
+2931            this = self.expression(exp.Array, expressions=expressions)
+2932        else:
+2933            expressions = apply_index_offset(expressions, -self.index_offset)
+2934            this = self.expression(exp.Bracket, this=this, expressions=expressions)
+2935
+2936        if not self._match(TokenType.R_BRACKET) and bracket_kind == TokenType.L_BRACKET:
+2937            self.raise_error("Expected ]")
+2938        elif not self._match(TokenType.R_BRACE) and bracket_kind == TokenType.L_BRACE:
+2939            self.raise_error("Expected }")
+2940
+2941        this.comments = self._prev_comments
+2942        return self._parse_bracket(this)
+2943
+2944    def _parse_slice(self, this: t.Optional[exp.Expression]) -> t.Optional[exp.Expression]:
+2945        if self._match(TokenType.COLON):
+2946            return self.expression(exp.Slice, this=this, expression=self._parse_conjunction())
+2947        return this
+2948
+2949    def _parse_case(self) -> t.Optional[exp.Expression]:
+2950        ifs = []
+2951        default = None
+2952
+2953        expression = self._parse_conjunction()
+2954
+2955        while self._match(TokenType.WHEN):
+2956            this = self._parse_conjunction()
+2957            self._match(TokenType.THEN)
+2958            then = self._parse_conjunction()
+2959            ifs.append(self.expression(exp.If, this=this, true=then))
+2960
+2961        if self._match(TokenType.ELSE):
+2962            default = self._parse_conjunction()
+2963
+2964        if not self._match(TokenType.END):
+2965            self.raise_error("Expected END after CASE", self._prev)
+2966
+2967        return self._parse_window(
+2968            self.expression(exp.Case, this=expression, ifs=ifs, default=default)
+2969        )
+2970
+2971    def _parse_if(self) -> t.Optional[exp.Expression]:
+2972        if self._match(TokenType.L_PAREN):
+2973            args = self._parse_csv(self._parse_conjunction)
+2974            this = exp.If.from_arg_list(args)
+2975            self.validate_expression(this, args)
+2976            self._match_r_paren()
+2977        else:
+2978            condition = self._parse_conjunction()
+2979            self._match(TokenType.THEN)
+2980            true = self._parse_conjunction()
+2981            false = self._parse_conjunction() if self._match(TokenType.ELSE) else None
+2982            self._match(TokenType.END)
+2983            this = self.expression(exp.If, this=condition, true=true, false=false)
+2984
+2985        return self._parse_window(this)
+2986
+2987    def _parse_extract(self) -> exp.Expression:
+2988        this = self._parse_function() or self._parse_var() or self._parse_type()
+2989
+2990        if self._match(TokenType.FROM):
+2991            return self.expression(exp.Extract, this=this, expression=self._parse_bitwise())
+2992
+2993        if not self._match(TokenType.COMMA):
+2994            self.raise_error("Expected FROM or comma after EXTRACT", self._prev)
+2995
+2996        return self.expression(exp.Extract, this=this, expression=self._parse_bitwise())
+2997
+2998    def _parse_cast(self, strict: bool) -> exp.Expression:
+2999        this = self._parse_conjunction()
+3000
+3001        if not self._match(TokenType.ALIAS):
+3002            self.raise_error("Expected AS after CAST")
+3003
+3004        to = self._parse_types()
+3005
+3006        if not to:
+3007            self.raise_error("Expected TYPE after CAST")
+3008        elif to.this == exp.DataType.Type.CHAR:
+3009            if self._match(TokenType.CHARACTER_SET):
+3010                to = self.expression(exp.CharacterSet, this=self._parse_var_or_string())
+3011
+3012        return self.expression(exp.Cast if strict else exp.TryCast, this=this, to=to)
+3013
+3014    def _parse_string_agg(self) -> exp.Expression:
+3015        expression: t.Optional[exp.Expression]
+3016
+3017        if self._match(TokenType.DISTINCT):
+3018            args = self._parse_csv(self._parse_conjunction)
+3019            expression = self.expression(exp.Distinct, expressions=[seq_get(args, 0)])
+3020        else:
+3021            args = self._parse_csv(self._parse_conjunction)
+3022            expression = seq_get(args, 0)
+3023
+3024        index = self._index
+3025        if not self._match(TokenType.R_PAREN):
+3026            # postgres: STRING_AGG([DISTINCT] expression, separator [ORDER BY expression1 {ASC | DESC} [, ...]])
+3027            order = self._parse_order(this=expression)
+3028            return self.expression(exp.GroupConcat, this=order, separator=seq_get(args, 1))
+3029
+3030        # Checks if we can parse an order clause: WITHIN GROUP (ORDER BY <order_by_expression_list> [ASC | DESC]).
+3031        # This is done "manually", instead of letting _parse_window parse it into an exp.WithinGroup node, so that
+3032        # the STRING_AGG call is parsed like in MySQL / SQLite and can thus be transpiled more easily to them.
+3033        if not self._match(TokenType.WITHIN_GROUP):
+3034            self._retreat(index)
+3035            this = exp.GroupConcat.from_arg_list(args)
+3036            self.validate_expression(this, args)
+3037            return this
+3038
+3039        self._match_l_paren()  # The corresponding match_r_paren will be called in parse_function (caller)
+3040        order = self._parse_order(this=expression)
+3041        return self.expression(exp.GroupConcat, this=order, separator=seq_get(args, 1))
+3042
+3043    def _parse_convert(self, strict: bool) -> t.Optional[exp.Expression]:
+3044        to: t.Optional[exp.Expression]
+3045        this = self._parse_column()
+3046
+3047        if self._match(TokenType.USING):
+3048            to = self.expression(exp.CharacterSet, this=self._parse_var())
+3049        elif self._match(TokenType.COMMA):
+3050            to = self._parse_types()
+3051        else:
+3052            to = None
+3053
+3054        return self.expression(exp.Cast if strict else exp.TryCast, this=this, to=to)
+3055
+3056    def _parse_position(self, haystack_first: bool = False) -> exp.Expression:
+3057        args = self._parse_csv(self._parse_bitwise)
+3058
+3059        if self._match(TokenType.IN):
+3060            return self.expression(
+3061                exp.StrPosition, this=self._parse_bitwise(), substr=seq_get(args, 0)
+3062            )
+3063
+3064        if haystack_first:
+3065            haystack = seq_get(args, 0)
+3066            needle = seq_get(args, 1)
+3067        else:
+3068            needle = seq_get(args, 0)
+3069            haystack = seq_get(args, 1)
+3070
+3071        this = exp.StrPosition(this=haystack, substr=needle, position=seq_get(args, 2))
+3072
+3073        self.validate_expression(this, args)
+3074
+3075        return this
+3076
+3077    def _parse_join_hint(self, func_name: str) -> exp.Expression:
+3078        args = self._parse_csv(self._parse_table)
+3079        return exp.JoinHint(this=func_name.upper(), expressions=args)
+3080
+3081    def _parse_substring(self) -> exp.Expression:
+3082        # Postgres supports the form: substring(string [from int] [for int])
+3083        # https://www.postgresql.org/docs/9.1/functions-string.html @ Table 9-6
+3084
+3085        args = self._parse_csv(self._parse_bitwise)
+3086
+3087        if self._match(TokenType.FROM):
+3088            args.append(self._parse_bitwise())
+3089            if self._match(TokenType.FOR):
+3090                args.append(self._parse_bitwise())
+3091
+3092        this = exp.Substring.from_arg_list(args)
+3093        self.validate_expression(this, args)
+3094
+3095        return this
+3096
+3097    def _parse_trim(self) -> exp.Expression:
+3098        # https://www.w3resource.com/sql/character-functions/trim.php
+3099        # https://docs.oracle.com/javadb/10.8.3.0/ref/rreftrimfunc.html
+3100
+3101        position = None
+3102        collation = None
+3103
+3104        if self._match_set(self.TRIM_TYPES):
+3105            position = self._prev.text.upper()
+3106
+3107        expression = self._parse_term()
+3108        if self._match_set((TokenType.FROM, TokenType.COMMA)):
+3109            this = self._parse_term()
+3110        else:
+3111            this = expression
+3112            expression = None
+3113
+3114        if self._match(TokenType.COLLATE):
+3115            collation = self._parse_term()
+3116
+3117        return self.expression(
+3118            exp.Trim,
+3119            this=this,
+3120            position=position,
+3121            expression=expression,
+3122            collation=collation,
+3123        )
+3124
+3125    def _parse_window_clause(self) -> t.Optional[t.List[t.Optional[exp.Expression]]]:
+3126        return self._match(TokenType.WINDOW) and self._parse_csv(self._parse_named_window)
+3127
+3128    def _parse_named_window(self) -> t.Optional[exp.Expression]:
+3129        return self._parse_window(self._parse_id_var(), alias=True)
+3130
+3131    def _parse_window(
+3132        self, this: t.Optional[exp.Expression], alias: bool = False
+3133    ) -> t.Optional[exp.Expression]:
+3134        if self._match(TokenType.FILTER):
+3135            where = self._parse_wrapped(self._parse_where)
+3136            this = self.expression(exp.Filter, this=this, expression=where)
+3137
+3138        # T-SQL allows the OVER (...) syntax after WITHIN GROUP.
+3139        # https://learn.microsoft.com/en-us/sql/t-sql/functions/percentile-disc-transact-sql?view=sql-server-ver16
+3140        if self._match(TokenType.WITHIN_GROUP):
+3141            order = self._parse_wrapped(self._parse_order)
+3142            this = self.expression(exp.WithinGroup, this=this, expression=order)
+3143
+3144        # SQL spec defines an optional [ { IGNORE | RESPECT } NULLS ] OVER
+3145        # Some dialects choose to implement and some do not.
+3146        # https://dev.mysql.com/doc/refman/8.0/en/window-function-descriptions.html
+3147
+3148        # There is some code above in _parse_lambda that handles
+3149        #   SELECT FIRST_VALUE(TABLE.COLUMN IGNORE|RESPECT NULLS) OVER ...
+3150
+3151        # The below changes handle
+3152        #   SELECT FIRST_VALUE(TABLE.COLUMN) IGNORE|RESPECT NULLS OVER ...
+3153
+3154        # Oracle allows both formats
+3155        #   (https://docs.oracle.com/en/database/oracle/oracle-database/19/sqlrf/img_text/first_value.html)
+3156        #   and Snowflake chose to do the same for familiarity
+3157        #   https://docs.snowflake.com/en/sql-reference/functions/first_value.html#usage-notes
+3158        if self._match(TokenType.IGNORE_NULLS):
+3159            this = self.expression(exp.IgnoreNulls, this=this)
+3160        elif self._match(TokenType.RESPECT_NULLS):
+3161            this = self.expression(exp.RespectNulls, this=this)
+3162
+3163        # bigquery select from window x AS (partition by ...)
+3164        if alias:
+3165            self._match(TokenType.ALIAS)
+3166        elif not self._match(TokenType.OVER):
+3167            return this
+3168
+3169        if not self._match(TokenType.L_PAREN):
+3170            return self.expression(exp.Window, this=this, alias=self._parse_id_var(False))
+3171
+3172        window_alias = self._parse_id_var(any_token=False, tokens=self.WINDOW_ALIAS_TOKENS)
+3173        partition = self._parse_partition_by()
+3174        order = self._parse_order()
+3175        kind = self._match_set((TokenType.ROWS, TokenType.RANGE)) and self._prev.text
+3176
+3177        if kind:
+3178            self._match(TokenType.BETWEEN)
+3179            start = self._parse_window_spec()
+3180            self._match(TokenType.AND)
+3181            end = self._parse_window_spec()
+3182
+3183            spec = self.expression(
+3184                exp.WindowSpec,
+3185                kind=kind,
+3186                start=start["value"],
+3187                start_side=start["side"],
+3188                end=end["value"],
+3189                end_side=end["side"],
+3190            )
+3191        else:
+3192            spec = None
+3193
+3194        self._match_r_paren()
+3195
+3196        return self.expression(
+3197            exp.Window,
+3198            this=this,
+3199            partition_by=partition,
+3200            order=order,
+3201            spec=spec,
+3202            alias=window_alias,
+3203        )
+3204
+3205    def _parse_window_spec(self) -> t.Dict[str, t.Optional[str | exp.Expression]]:
+3206        self._match(TokenType.BETWEEN)
+3207
+3208        return {
+3209            "value": (
+3210                self._match_set((TokenType.UNBOUNDED, TokenType.CURRENT_ROW)) and self._prev.text
+3211            )
+3212            or self._parse_bitwise(),
+3213            "side": self._match_set((TokenType.PRECEDING, TokenType.FOLLOWING)) and self._prev.text,
+3214        }
+3215
+3216    def _parse_alias(
+3217        self, this: t.Optional[exp.Expression], explicit: bool = False
+3218    ) -> t.Optional[exp.Expression]:
+3219        any_token = self._match(TokenType.ALIAS)
+3220
+3221        if explicit and not any_token:
+3222            return this
+3223
+3224        if self._match(TokenType.L_PAREN):
+3225            aliases = self.expression(
+3226                exp.Aliases,
+3227                this=this,
+3228                expressions=self._parse_csv(lambda: self._parse_id_var(any_token)),
+3229            )
+3230            self._match_r_paren(aliases)
+3231            return aliases
+3232
+3233        alias = self._parse_id_var(any_token)
+3234
+3235        if alias:
+3236            return self.expression(exp.Alias, this=this, alias=alias)
+3237
+3238        return this
+3239
+3240    def _parse_id_var(
+3241        self,
+3242        any_token: bool = True,
+3243        tokens: t.Optional[t.Collection[TokenType]] = None,
+3244        prefix_tokens: t.Optional[t.Collection[TokenType]] = None,
+3245    ) -> t.Optional[exp.Expression]:
+3246        identifier = self._parse_identifier()
+3247
+3248        if identifier:
+3249            return identifier
+3250
+3251        prefix = ""
+3252
+3253        if prefix_tokens:
+3254            while self._match_set(prefix_tokens):
+3255                prefix += self._prev.text
+3256
+3257        if (any_token and self._advance_any()) or self._match_set(tokens or self.ID_VAR_TOKENS):
+3258            quoted = self._prev.token_type == TokenType.STRING
+3259            return exp.Identifier(this=prefix + self._prev.text, quoted=quoted)
+3260
+3261        return None
+3262
+3263    def _parse_string(self) -> t.Optional[exp.Expression]:
+3264        if self._match(TokenType.STRING):
+3265            return self.PRIMARY_PARSERS[TokenType.STRING](self, self._prev)
+3266        return self._parse_placeholder()
+3267
+3268    def _parse_number(self) -> t.Optional[exp.Expression]:
+3269        if self._match(TokenType.NUMBER):
+3270            return self.PRIMARY_PARSERS[TokenType.NUMBER](self, self._prev)
+3271        return self._parse_placeholder()
+3272
+3273    def _parse_identifier(self) -> t.Optional[exp.Expression]:
+3274        if self._match(TokenType.IDENTIFIER):
+3275            return self.expression(exp.Identifier, this=self._prev.text, quoted=True)
+3276        return self._parse_placeholder()
+3277
+3278    def _parse_var(self, any_token: bool = False) -> t.Optional[exp.Expression]:
+3279        if (any_token and self._advance_any()) or self._match(TokenType.VAR):
+3280            return self.expression(exp.Var, this=self._prev.text)
+3281        return self._parse_placeholder()
+3282
+3283    def _advance_any(self) -> t.Optional[Token]:
+3284        if self._curr and self._curr.token_type not in self.RESERVED_KEYWORDS:
+3285            self._advance()
+3286            return self._prev
+3287        return None
+3288
+3289    def _parse_var_or_string(self) -> t.Optional[exp.Expression]:
+3290        return self._parse_var() or self._parse_string()
+3291
+3292    def _parse_null(self) -> t.Optional[exp.Expression]:
+3293        if self._match(TokenType.NULL):
+3294            return self.PRIMARY_PARSERS[TokenType.NULL](self, self._prev)
+3295        return None
+3296
+3297    def _parse_boolean(self) -> t.Optional[exp.Expression]:
+3298        if self._match(TokenType.TRUE):
+3299            return self.PRIMARY_PARSERS[TokenType.TRUE](self, self._prev)
+3300        if self._match(TokenType.FALSE):
+3301            return self.PRIMARY_PARSERS[TokenType.FALSE](self, self._prev)
+3302        return None
+3303
+3304    def _parse_star(self) -> t.Optional[exp.Expression]:
+3305        if self._match(TokenType.STAR):
+3306            return self.PRIMARY_PARSERS[TokenType.STAR](self, self._prev)
+3307        return None
+3308
+3309    def _parse_placeholder(self) -> t.Optional[exp.Expression]:
+3310        if self._match_set(self.PLACEHOLDER_PARSERS):
+3311            placeholder = self.PLACEHOLDER_PARSERS[self._prev.token_type](self)
+3312            if placeholder:
+3313                return placeholder
+3314            self._advance(-1)
+3315        return None
+3316
+3317    def _parse_except(self) -> t.Optional[t.List[t.Optional[exp.Expression]]]:
+3318        if not self._match(TokenType.EXCEPT):
+3319            return None
+3320        if self._match(TokenType.L_PAREN, advance=False):
+3321            return self._parse_wrapped_csv(self._parse_column)
+3322        return self._parse_csv(self._parse_column)
+3323
+3324    def _parse_replace(self) -> t.Optional[t.List[t.Optional[exp.Expression]]]:
+3325        if not self._match(TokenType.REPLACE):
+3326            return None
+3327        if self._match(TokenType.L_PAREN, advance=False):
+3328            return self._parse_wrapped_csv(self._parse_expression)
+3329        return self._parse_csv(self._parse_expression)
+3330
+3331    def _parse_csv(
+3332        self, parse_method: t.Callable, sep: TokenType = TokenType.COMMA
+3333    ) -> t.List[t.Optional[exp.Expression]]:
+3334        parse_result = parse_method()
+3335        items = [parse_result] if parse_result is not None else []
+3336
+3337        while self._match(sep):
+3338            if parse_result and self._prev_comments:
+3339                parse_result.comments = self._prev_comments
+3340
+3341            parse_result = parse_method()
+3342            if parse_result is not None:
+3343                items.append(parse_result)
+3344
+3345        return items
+3346
+3347    def _parse_tokens(
+3348        self, parse_method: t.Callable, expressions: t.Dict
+3349    ) -> t.Optional[exp.Expression]:
+3350        this = parse_method()
+3351
+3352        while self._match_set(expressions):
+3353            this = self.expression(
+3354                expressions[self._prev.token_type],
+3355                this=this,
+3356                comments=self._prev_comments,
+3357                expression=parse_method(),
+3358            )
+3359
+3360        return this
+3361
+3362    def _parse_wrapped_id_vars(self) -> t.List[t.Optional[exp.Expression]]:
+3363        return self._parse_wrapped_csv(self._parse_id_var)
+3364
+3365    def _parse_wrapped_csv(
+3366        self, parse_method: t.Callable, sep: TokenType = TokenType.COMMA
+3367    ) -> t.List[t.Optional[exp.Expression]]:
+3368        return self._parse_wrapped(lambda: self._parse_csv(parse_method, sep=sep))
+3369
+3370    def _parse_wrapped(self, parse_method: t.Callable) -> t.Any:
+3371        self._match_l_paren()
+3372        parse_result = parse_method()
+3373        self._match_r_paren()
+3374        return parse_result
+3375
+3376    def _parse_select_or_expression(self) -> t.Optional[exp.Expression]:
+3377        return self._parse_select() or self._parse_expression()
+3378
+3379    def _parse_ddl_select(self) -> t.Optional[exp.Expression]:
+3380        return self._parse_set_operations(
+3381            self._parse_select(nested=True, parse_subquery_alias=False)
+3382        )
+3383
+3384    def _parse_transaction(self) -> exp.Expression:
+3385        this = None
+3386        if self._match_texts(self.TRANSACTION_KIND):
+3387            this = self._prev.text
+3388
+3389        self._match_texts({"TRANSACTION", "WORK"})
+3390
+3391        modes = []
+3392        while True:
+3393            mode = []
+3394            while self._match(TokenType.VAR):
+3395                mode.append(self._prev.text)
+3396
+3397            if mode:
+3398                modes.append(" ".join(mode))
+3399            if not self._match(TokenType.COMMA):
+3400                break
+3401
+3402        return self.expression(exp.Transaction, this=this, modes=modes)
+3403
+3404    def _parse_commit_or_rollback(self) -> exp.Expression:
+3405        chain = None
+3406        savepoint = None
+3407        is_rollback = self._prev.token_type == TokenType.ROLLBACK
+3408
+3409        self._match_texts({"TRANSACTION", "WORK"})
+3410
+3411        if self._match_text_seq("TO"):
+3412            self._match_text_seq("SAVEPOINT")
+3413            savepoint = self._parse_id_var()
+3414
+3415        if self._match(TokenType.AND):
+3416            chain = not self._match_text_seq("NO")
+3417            self._match_text_seq("CHAIN")
+3418
+3419        if is_rollback:
+3420            return self.expression(exp.Rollback, savepoint=savepoint)
+3421        return self.expression(exp.Commit, chain=chain)
+3422
+3423    def _parse_add_column(self) -> t.Optional[exp.Expression]:
+3424        if not self._match_text_seq("ADD"):
+3425            return None
+3426
+3427        self._match(TokenType.COLUMN)
+3428        exists_column = self._parse_exists(not_=True)
+3429        expression = self._parse_column_def(self._parse_field(any_token=True))
+3430
+3431        if expression:
+3432            expression.set("exists", exists_column)
+3433
+3434        return expression
+3435
+3436    def _parse_drop_column(self) -> t.Optional[exp.Expression]:
+3437        return self._match(TokenType.DROP) and self._parse_drop(default_kind="COLUMN")
+3438
+3439    # https://docs.aws.amazon.com/athena/latest/ug/alter-table-drop-partition.html
+3440    def _parse_drop_partition(self, exists: t.Optional[bool] = None) -> exp.Expression:
+3441        return self.expression(
+3442            exp.DropPartition, expressions=self._parse_csv(self._parse_partition), exists=exists
+3443        )
+3444
+3445    def _parse_add_constraint(self) -> t.Optional[exp.Expression]:
+3446        this = None
+3447        kind = self._prev.token_type
+3448
+3449        if kind == TokenType.CONSTRAINT:
+3450            this = self._parse_id_var()
+3451
+3452            if self._match(TokenType.CHECK):
+3453                expression = self._parse_wrapped(self._parse_conjunction)
+3454                enforced = self._match_text_seq("ENFORCED")
+3455
+3456                return self.expression(
+3457                    exp.AddConstraint, this=this, expression=expression, enforced=enforced
+3458                )
+3459
+3460        if kind == TokenType.FOREIGN_KEY or self._match(TokenType.FOREIGN_KEY):
+3461            expression = self._parse_foreign_key()
+3462        elif kind == TokenType.PRIMARY_KEY or self._match(TokenType.PRIMARY_KEY):
+3463            expression = self._parse_primary_key()
+3464
+3465        return self.expression(exp.AddConstraint, this=this, expression=expression)
+3466
+3467    def _parse_alter(self) -> t.Optional[exp.Expression]:
+3468        if not self._match(TokenType.TABLE):
+3469            return self._parse_as_command(self._prev)
+3470
+3471        exists = self._parse_exists()
+3472        this = self._parse_table(schema=True)
+3473
+3474        actions: t.Optional[exp.Expression | t.List[t.Optional[exp.Expression]]] = None
+3475
+3476        index = self._index
+3477        if self._match(TokenType.DELETE):
+3478            actions = [self.expression(exp.Delete, where=self._parse_where())]
+3479        elif self._match_text_seq("ADD"):
+3480            if self._match_set(self.ADD_CONSTRAINT_TOKENS):
+3481                actions = self._parse_csv(self._parse_add_constraint)
+3482            else:
+3483                self._retreat(index)
+3484                actions = self._parse_csv(self._parse_add_column)
+3485        elif self._match_text_seq("DROP"):
+3486            partition_exists = self._parse_exists()
+3487
+3488            if self._match(TokenType.PARTITION, advance=False):
+3489                actions = self._parse_csv(
+3490                    lambda: self._parse_drop_partition(exists=partition_exists)
+3491                )
+3492            else:
+3493                self._retreat(index)
+3494                actions = self._parse_csv(self._parse_drop_column)
+3495        elif self._match_text_seq("RENAME", "TO"):
+3496            actions = self.expression(exp.RenameTable, this=self._parse_table(schema=True))
+3497        elif self._match_text_seq("ALTER"):
+3498            self._match(TokenType.COLUMN)
+3499            column = self._parse_field(any_token=True)
+3500
+3501            if self._match_pair(TokenType.DROP, TokenType.DEFAULT):
+3502                actions = self.expression(exp.AlterColumn, this=column, drop=True)
+3503            elif self._match_pair(TokenType.SET, TokenType.DEFAULT):
+3504                actions = self.expression(
+3505                    exp.AlterColumn, this=column, default=self._parse_conjunction()
+3506                )
+3507            else:
+3508                self._match_text_seq("SET", "DATA")
+3509                actions = self.expression(
+3510                    exp.AlterColumn,
+3511                    this=column,
+3512                    dtype=self._match_text_seq("TYPE") and self._parse_types(),
+3513                    collate=self._match(TokenType.COLLATE) and self._parse_term(),
+3514                    using=self._match(TokenType.USING) and self._parse_conjunction(),
+3515                )
+3516
+3517        actions = ensure_list(actions)
+3518        return self.expression(exp.AlterTable, this=this, exists=exists, actions=actions)
+3519
+3520    def _parse_show(self) -> t.Optional[exp.Expression]:
+3521        parser = self._find_parser(self.SHOW_PARSERS, self._show_trie)  # type: ignore
+3522        if parser:
+3523            return parser(self)
+3524        self._advance()
+3525        return self.expression(exp.Show, this=self._prev.text.upper())
+3526
+3527    def _default_parse_set_item(self) -> exp.Expression:
+3528        return self.expression(
+3529            exp.SetItem,
+3530            this=self._parse_statement(),
+3531        )
+3532
+3533    def _parse_set_item(self) -> t.Optional[exp.Expression]:
+3534        parser = self._find_parser(self.SET_PARSERS, self._set_trie)  # type: ignore
+3535        return parser(self) if parser else self._default_parse_set_item()
+3536
+3537    def _parse_merge(self) -> exp.Expression:
+3538        self._match(TokenType.INTO)
+3539        target = self._parse_table()
+3540
+3541        self._match(TokenType.USING)
+3542        using = self._parse_table()
+3543
+3544        self._match(TokenType.ON)
+3545        on = self._parse_conjunction()
+3546
+3547        whens = []
+3548        while self._match(TokenType.WHEN):
+3549            this = self._parse_conjunction()
+3550            self._match(TokenType.THEN)
+3551
+3552            if self._match(TokenType.INSERT):
+3553                _this = self._parse_star()
+3554                if _this:
+3555                    then = self.expression(exp.Insert, this=_this)
+3556                else:
+3557                    then = self.expression(
+3558                        exp.Insert,
+3559                        this=self._parse_value(),
+3560                        expression=self._match(TokenType.VALUES) and self._parse_value(),
+3561                    )
+3562            elif self._match(TokenType.UPDATE):
+3563                expressions = self._parse_star()
+3564                if expressions:
+3565                    then = self.expression(exp.Update, expressions=expressions)
+3566                else:
+3567                    then = self.expression(
+3568                        exp.Update,
+3569                        expressions=self._match(TokenType.SET)
+3570                        and self._parse_csv(self._parse_equality),
+3571                    )
+3572            elif self._match(TokenType.DELETE):
+3573                then = self.expression(exp.Var, this=self._prev.text)
+3574
+3575            whens.append(self.expression(exp.When, this=this, then=then))
+3576
+3577        return self.expression(
+3578            exp.Merge,
+3579            this=target,
+3580            using=using,
+3581            on=on,
+3582            expressions=whens,
+3583        )
+3584
+3585    def _parse_set(self) -> exp.Expression:
+3586        return self.expression(exp.Set, expressions=self._parse_csv(self._parse_set_item))
+3587
+3588    def _parse_as_command(self, start: Token) -> exp.Command:
+3589        while self._curr:
+3590            self._advance()
+3591        return exp.Command(this=self._find_sql(start, self._prev))
+3592
+3593    def _find_parser(
+3594        self, parsers: t.Dict[str, t.Callable], trie: t.Dict
+3595    ) -> t.Optional[t.Callable]:
+3596        index = self._index
+3597        this = []
+3598        while True:
+3599            # The current token might be multiple words
+3600            curr = self._curr.text.upper()
+3601            key = curr.split(" ")
+3602            this.append(curr)
+3603            self._advance()
+3604            result, trie = in_trie(trie, key)
+3605            if result == 0:
+3606                break
+3607            if result == 2:
+3608                subparser = parsers[" ".join(this)]
+3609                return subparser
+3610        self._retreat(index)
+3611        return None
+3612
+3613    def _match(self, token_type, advance=True):
+3614        if not self._curr:
+3615            return None
+3616
+3617        if self._curr.token_type == token_type:
+3618            if advance:
+3619                self._advance()
+3620            return True
+3621
+3622        return None
+3623
+3624    def _match_set(self, types):
+3625        if not self._curr:
+3626            return None
+3627
+3628        if self._curr.token_type in types:
+3629            self._advance()
+3630            return True
+3631
+3632        return None
+3633
+3634    def _match_pair(self, token_type_a, token_type_b, advance=True):
+3635        if not self._curr or not self._next:
+3636            return None
+3637
+3638        if self._curr.token_type == token_type_a and self._next.token_type == token_type_b:
+3639            if advance:
+3640                self._advance(2)
+3641            return True
+3642
+3643        return None
+3644
+3645    def _match_l_paren(self, expression=None):
+3646        if not self._match(TokenType.L_PAREN):
+3647            self.raise_error("Expecting (")
+3648        if expression and self._prev_comments:
+3649            expression.comments = self._prev_comments
+3650
+3651    def _match_r_paren(self, expression=None):
+3652        if not self._match(TokenType.R_PAREN):
+3653            self.raise_error("Expecting )")
+3654        if expression and self._prev_comments:
+3655            expression.comments = self._prev_comments
+3656
+3657    def _match_texts(self, texts):
+3658        if self._curr and self._curr.text.upper() in texts:
+3659            self._advance()
+3660            return True
+3661        return False
+3662
+3663    def _match_text_seq(self, *texts, advance=True):
+3664        index = self._index
+3665        for text in texts:
+3666            if self._curr and self._curr.text.upper() == text:
+3667                self._advance()
+3668            else:
+3669                self._retreat(index)
+3670                return False
+3671
+3672        if not advance:
+3673            self._retreat(index)
+3674
+3675        return True
+3676
+3677    def _replace_columns_with_dots(self, this):
+3678        if isinstance(this, exp.Dot):
+3679            exp.replace_children(this, self._replace_columns_with_dots)
+3680        elif isinstance(this, exp.Column):
+3681            exp.replace_children(this, self._replace_columns_with_dots)
+3682            table = this.args.get("table")
+3683            this = (
+3684                self.expression(exp.Dot, this=table, expression=this.this)
+3685                if table
+3686                else self.expression(exp.Var, this=this.name)
+3687            )
+3688        elif isinstance(this, exp.Identifier):
+3689            this = self.expression(exp.Var, this=this.name)
+3690        return this
+3691
+3692    def _replace_lambda(self, node, lambda_variables):
+3693        if isinstance(node, exp.Column):
+3694            if node.name in lambda_variables:
+3695                return node.this
+3696        return node
+
+ + +
+
+ +
+ + def + parse_var_map(args): + + + +
+ +
22def parse_var_map(args):
+23    keys = []
+24    values = []
+25    for i in range(0, len(args), 2):
+26        keys.append(args[i])
+27        values.append(args[i + 1])
+28    return exp.VarMap(
+29        keys=exp.Array(expressions=keys),
+30        values=exp.Array(expressions=values),
+31    )
+
+ + + + +
+
+ +
+ + class + Parser: + + + +
+ +
  42class Parser(metaclass=_Parser):
+  43    """
+  44    Parser consumes a list of tokens produced by the `sqlglot.tokens.Tokenizer` and produces
+  45    a parsed syntax tree.
+  46
+  47    Args:
+  48        error_level: the desired error level.
+  49            Default: ErrorLevel.RAISE
+  50        error_message_context: determines the amount of context to capture from a
+  51            query string when displaying the error message (in number of characters).
+  52            Default: 50.
+  53        index_offset: Index offset for arrays eg ARRAY[0] vs ARRAY[1] as the head of a list.
+  54            Default: 0
+  55        alias_post_tablesample: If the table alias comes after tablesample.
+  56            Default: False
+  57        max_errors: Maximum number of error messages to include in a raised ParseError.
+  58            This is only relevant if error_level is ErrorLevel.RAISE.
+  59            Default: 3
+  60        null_ordering: Indicates the default null ordering method to use if not explicitly set.
+  61            Options are "nulls_are_small", "nulls_are_large", "nulls_are_last".
+  62            Default: "nulls_are_small"
+  63    """
+  64
+  65    FUNCTIONS: t.Dict[str, t.Callable] = {
+  66        **{name: f.from_arg_list for f in exp.ALL_FUNCTIONS for name in f.sql_names()},
+  67        "DATE_TO_DATE_STR": lambda args: exp.Cast(
+  68            this=seq_get(args, 0),
+  69            to=exp.DataType(this=exp.DataType.Type.TEXT),
+  70        ),
+  71        "TIME_TO_TIME_STR": lambda args: exp.Cast(
+  72            this=seq_get(args, 0),
+  73            to=exp.DataType(this=exp.DataType.Type.TEXT),
+  74        ),
+  75        "TS_OR_DS_TO_DATE_STR": lambda args: exp.Substring(
+  76            this=exp.Cast(
+  77                this=seq_get(args, 0),
+  78                to=exp.DataType(this=exp.DataType.Type.TEXT),
+  79            ),
+  80            start=exp.Literal.number(1),
+  81            length=exp.Literal.number(10),
+  82        ),
+  83        "VAR_MAP": parse_var_map,
+  84        "IFNULL": exp.Coalesce.from_arg_list,
+  85    }
+  86
+  87    NO_PAREN_FUNCTIONS = {
+  88        TokenType.CURRENT_DATE: exp.CurrentDate,
+  89        TokenType.CURRENT_DATETIME: exp.CurrentDate,
+  90        TokenType.CURRENT_TIMESTAMP: exp.CurrentTimestamp,
+  91    }
+  92
+  93    NESTED_TYPE_TOKENS = {
+  94        TokenType.ARRAY,
+  95        TokenType.MAP,
+  96        TokenType.STRUCT,
+  97        TokenType.NULLABLE,
+  98    }
+  99
+ 100    TYPE_TOKENS = {
+ 101        TokenType.BOOLEAN,
+ 102        TokenType.TINYINT,
+ 103        TokenType.SMALLINT,
+ 104        TokenType.INT,
+ 105        TokenType.BIGINT,
+ 106        TokenType.FLOAT,
+ 107        TokenType.DOUBLE,
+ 108        TokenType.CHAR,
+ 109        TokenType.NCHAR,
+ 110        TokenType.VARCHAR,
+ 111        TokenType.NVARCHAR,
+ 112        TokenType.TEXT,
+ 113        TokenType.MEDIUMTEXT,
+ 114        TokenType.LONGTEXT,
+ 115        TokenType.MEDIUMBLOB,
+ 116        TokenType.LONGBLOB,
+ 117        TokenType.BINARY,
+ 118        TokenType.VARBINARY,
+ 119        TokenType.JSON,
+ 120        TokenType.JSONB,
+ 121        TokenType.INTERVAL,
+ 122        TokenType.TIME,
+ 123        TokenType.TIMESTAMP,
+ 124        TokenType.TIMESTAMPTZ,
+ 125        TokenType.TIMESTAMPLTZ,
+ 126        TokenType.DATETIME,
+ 127        TokenType.DATE,
+ 128        TokenType.DECIMAL,
+ 129        TokenType.UUID,
+ 130        TokenType.GEOGRAPHY,
+ 131        TokenType.GEOMETRY,
+ 132        TokenType.HLLSKETCH,
+ 133        TokenType.HSTORE,
+ 134        TokenType.PSEUDO_TYPE,
+ 135        TokenType.SUPER,
+ 136        TokenType.SERIAL,
+ 137        TokenType.SMALLSERIAL,
+ 138        TokenType.BIGSERIAL,
+ 139        TokenType.XML,
+ 140        TokenType.UNIQUEIDENTIFIER,
+ 141        TokenType.MONEY,
+ 142        TokenType.SMALLMONEY,
+ 143        TokenType.ROWVERSION,
+ 144        TokenType.IMAGE,
+ 145        TokenType.VARIANT,
+ 146        TokenType.OBJECT,
+ 147        *NESTED_TYPE_TOKENS,
+ 148    }
+ 149
+ 150    SUBQUERY_PREDICATES = {
+ 151        TokenType.ANY: exp.Any,
+ 152        TokenType.ALL: exp.All,
+ 153        TokenType.EXISTS: exp.Exists,
+ 154        TokenType.SOME: exp.Any,
+ 155    }
+ 156
+ 157    RESERVED_KEYWORDS = {*Tokenizer.SINGLE_TOKENS.values(), TokenType.SELECT}
+ 158
+ 159    ID_VAR_TOKENS = {
+ 160        TokenType.VAR,
+ 161        TokenType.ALWAYS,
+ 162        TokenType.ANTI,
+ 163        TokenType.APPLY,
+ 164        TokenType.AUTO_INCREMENT,
+ 165        TokenType.BEGIN,
+ 166        TokenType.BOTH,
+ 167        TokenType.BUCKET,
+ 168        TokenType.CACHE,
+ 169        TokenType.CASCADE,
+ 170        TokenType.COLLATE,
+ 171        TokenType.COLUMN,
+ 172        TokenType.COMMAND,
+ 173        TokenType.COMMIT,
+ 174        TokenType.COMPOUND,
+ 175        TokenType.CONSTRAINT,
+ 176        TokenType.CURRENT_TIME,
+ 177        TokenType.DEFAULT,
+ 178        TokenType.DELETE,
+ 179        TokenType.DESCRIBE,
+ 180        TokenType.DIV,
+ 181        TokenType.END,
+ 182        TokenType.EXECUTE,
+ 183        TokenType.ESCAPE,
+ 184        TokenType.FALSE,
+ 185        TokenType.FIRST,
+ 186        TokenType.FILTER,
+ 187        TokenType.FOLLOWING,
+ 188        TokenType.FORMAT,
+ 189        TokenType.FUNCTION,
+ 190        TokenType.GENERATED,
+ 191        TokenType.IDENTITY,
+ 192        TokenType.IF,
+ 193        TokenType.INDEX,
+ 194        TokenType.ISNULL,
+ 195        TokenType.INTERVAL,
+ 196        TokenType.LAZY,
+ 197        TokenType.LEADING,
+ 198        TokenType.LEFT,
+ 199        TokenType.LOCAL,
+ 200        TokenType.MATERIALIZED,
+ 201        TokenType.MERGE,
+ 202        TokenType.NATURAL,
+ 203        TokenType.NEXT,
+ 204        TokenType.OFFSET,
+ 205        TokenType.ONLY,
+ 206        TokenType.OPTIONS,
+ 207        TokenType.ORDINALITY,
+ 208        TokenType.PERCENT,
+ 209        TokenType.PIVOT,
+ 210        TokenType.PRECEDING,
+ 211        TokenType.RANGE,
+ 212        TokenType.REFERENCES,
+ 213        TokenType.RIGHT,
+ 214        TokenType.ROW,
+ 215        TokenType.ROWS,
+ 216        TokenType.SCHEMA,
+ 217        TokenType.SCHEMA_COMMENT,
+ 218        TokenType.SEED,
+ 219        TokenType.SEMI,
+ 220        TokenType.SET,
+ 221        TokenType.SHOW,
+ 222        TokenType.SORTKEY,
+ 223        TokenType.TABLE,
+ 224        TokenType.TEMPORARY,
+ 225        TokenType.TOP,
+ 226        TokenType.TRAILING,
+ 227        TokenType.TRUE,
+ 228        TokenType.UNBOUNDED,
+ 229        TokenType.UNIQUE,
+ 230        TokenType.UNLOGGED,
+ 231        TokenType.UNPIVOT,
+ 232        TokenType.PROCEDURE,
+ 233        TokenType.VIEW,
+ 234        TokenType.VOLATILE,
+ 235        TokenType.WINDOW,
+ 236        *SUBQUERY_PREDICATES,
+ 237        *TYPE_TOKENS,
+ 238        *NO_PAREN_FUNCTIONS,
+ 239    }
+ 240
+ 241    TABLE_ALIAS_TOKENS = ID_VAR_TOKENS - {
+ 242        TokenType.APPLY,
+ 243        TokenType.LEFT,
+ 244        TokenType.NATURAL,
+ 245        TokenType.OFFSET,
+ 246        TokenType.RIGHT,
+ 247        TokenType.WINDOW,
+ 248    }
+ 249
+ 250    UPDATE_ALIAS_TOKENS = TABLE_ALIAS_TOKENS - {TokenType.SET}
+ 251
+ 252    TRIM_TYPES = {TokenType.LEADING, TokenType.TRAILING, TokenType.BOTH}
+ 253
+ 254    FUNC_TOKENS = {
+ 255        TokenType.COMMAND,
+ 256        TokenType.CURRENT_DATE,
+ 257        TokenType.CURRENT_DATETIME,
+ 258        TokenType.CURRENT_TIMESTAMP,
+ 259        TokenType.CURRENT_TIME,
+ 260        TokenType.FILTER,
+ 261        TokenType.FIRST,
+ 262        TokenType.FORMAT,
+ 263        TokenType.IDENTIFIER,
+ 264        TokenType.INDEX,
+ 265        TokenType.ISNULL,
+ 266        TokenType.ILIKE,
+ 267        TokenType.LIKE,
+ 268        TokenType.MERGE,
+ 269        TokenType.OFFSET,
+ 270        TokenType.PRIMARY_KEY,
+ 271        TokenType.REPLACE,
+ 272        TokenType.ROW,
+ 273        TokenType.UNNEST,
+ 274        TokenType.VAR,
+ 275        TokenType.LEFT,
+ 276        TokenType.RIGHT,
+ 277        TokenType.DATE,
+ 278        TokenType.DATETIME,
+ 279        TokenType.TABLE,
+ 280        TokenType.TIMESTAMP,
+ 281        TokenType.TIMESTAMPTZ,
+ 282        TokenType.WINDOW,
+ 283        *TYPE_TOKENS,
+ 284        *SUBQUERY_PREDICATES,
+ 285    }
+ 286
+ 287    CONJUNCTION = {
+ 288        TokenType.AND: exp.And,
+ 289        TokenType.OR: exp.Or,
+ 290    }
+ 291
+ 292    EQUALITY = {
+ 293        TokenType.EQ: exp.EQ,
+ 294        TokenType.NEQ: exp.NEQ,
+ 295        TokenType.NULLSAFE_EQ: exp.NullSafeEQ,
+ 296    }
+ 297
+ 298    COMPARISON = {
+ 299        TokenType.GT: exp.GT,
+ 300        TokenType.GTE: exp.GTE,
+ 301        TokenType.LT: exp.LT,
+ 302        TokenType.LTE: exp.LTE,
+ 303    }
+ 304
+ 305    BITWISE = {
+ 306        TokenType.AMP: exp.BitwiseAnd,
+ 307        TokenType.CARET: exp.BitwiseXor,
+ 308        TokenType.PIPE: exp.BitwiseOr,
+ 309        TokenType.DPIPE: exp.DPipe,
+ 310    }
+ 311
+ 312    TERM = {
+ 313        TokenType.DASH: exp.Sub,
+ 314        TokenType.PLUS: exp.Add,
+ 315        TokenType.MOD: exp.Mod,
+ 316        TokenType.COLLATE: exp.Collate,
+ 317    }
+ 318
+ 319    FACTOR = {
+ 320        TokenType.DIV: exp.IntDiv,
+ 321        TokenType.LR_ARROW: exp.Distance,
+ 322        TokenType.SLASH: exp.Div,
+ 323        TokenType.STAR: exp.Mul,
+ 324    }
+ 325
+ 326    TIMESTAMPS = {
+ 327        TokenType.TIME,
+ 328        TokenType.TIMESTAMP,
+ 329        TokenType.TIMESTAMPTZ,
+ 330        TokenType.TIMESTAMPLTZ,
+ 331    }
+ 332
+ 333    SET_OPERATIONS = {
+ 334        TokenType.UNION,
+ 335        TokenType.INTERSECT,
+ 336        TokenType.EXCEPT,
+ 337    }
+ 338
+ 339    JOIN_SIDES = {
+ 340        TokenType.LEFT,
+ 341        TokenType.RIGHT,
+ 342        TokenType.FULL,
+ 343    }
+ 344
+ 345    JOIN_KINDS = {
+ 346        TokenType.INNER,
+ 347        TokenType.OUTER,
+ 348        TokenType.CROSS,
+ 349        TokenType.SEMI,
+ 350        TokenType.ANTI,
+ 351    }
+ 352
+ 353    LAMBDAS = {
+ 354        TokenType.ARROW: lambda self, expressions: self.expression(
+ 355            exp.Lambda,
+ 356            this=self._parse_conjunction().transform(
+ 357                self._replace_lambda, {node.name for node in expressions}
+ 358            ),
+ 359            expressions=expressions,
+ 360        ),
+ 361        TokenType.FARROW: lambda self, expressions: self.expression(
+ 362            exp.Kwarg,
+ 363            this=exp.Var(this=expressions[0].name),
+ 364            expression=self._parse_conjunction(),
+ 365        ),
+ 366    }
+ 367
+ 368    COLUMN_OPERATORS = {
+ 369        TokenType.DOT: None,
+ 370        TokenType.DCOLON: lambda self, this, to: self.expression(
+ 371            exp.Cast,
+ 372            this=this,
+ 373            to=to,
+ 374        ),
+ 375        TokenType.ARROW: lambda self, this, path: self.expression(
+ 376            exp.JSONExtract,
+ 377            this=this,
+ 378            expression=path,
+ 379        ),
+ 380        TokenType.DARROW: lambda self, this, path: self.expression(
+ 381            exp.JSONExtractScalar,
+ 382            this=this,
+ 383            expression=path,
+ 384        ),
+ 385        TokenType.HASH_ARROW: lambda self, this, path: self.expression(
+ 386            exp.JSONBExtract,
+ 387            this=this,
+ 388            expression=path,
+ 389        ),
+ 390        TokenType.DHASH_ARROW: lambda self, this, path: self.expression(
+ 391            exp.JSONBExtractScalar,
+ 392            this=this,
+ 393            expression=path,
+ 394        ),
+ 395        TokenType.PLACEHOLDER: lambda self, this, key: self.expression(
+ 396            exp.JSONBContains,
+ 397            this=this,
+ 398            expression=key,
+ 399        ),
+ 400    }
+ 401
+ 402    EXPRESSION_PARSERS = {
+ 403        exp.Column: lambda self: self._parse_column(),
+ 404        exp.DataType: lambda self: self._parse_types(),
+ 405        exp.From: lambda self: self._parse_from(),
+ 406        exp.Group: lambda self: self._parse_group(),
+ 407        exp.Identifier: lambda self: self._parse_id_var(),
+ 408        exp.Lateral: lambda self: self._parse_lateral(),
+ 409        exp.Join: lambda self: self._parse_join(),
+ 410        exp.Order: lambda self: self._parse_order(),
+ 411        exp.Cluster: lambda self: self._parse_sort(TokenType.CLUSTER_BY, exp.Cluster),
+ 412        exp.Sort: lambda self: self._parse_sort(TokenType.SORT_BY, exp.Sort),
+ 413        exp.Lambda: lambda self: self._parse_lambda(),
+ 414        exp.Limit: lambda self: self._parse_limit(),
+ 415        exp.Offset: lambda self: self._parse_offset(),
+ 416        exp.TableAlias: lambda self: self._parse_table_alias(),
+ 417        exp.Table: lambda self: self._parse_table(),
+ 418        exp.Condition: lambda self: self._parse_conjunction(),
+ 419        exp.Expression: lambda self: self._parse_statement(),
+ 420        exp.Properties: lambda self: self._parse_properties(),
+ 421        exp.Where: lambda self: self._parse_where(),
+ 422        exp.Ordered: lambda self: self._parse_ordered(),
+ 423        exp.Having: lambda self: self._parse_having(),
+ 424        exp.With: lambda self: self._parse_with(),
+ 425        exp.Window: lambda self: self._parse_named_window(),
+ 426        "JOIN_TYPE": lambda self: self._parse_join_side_and_kind(),
+ 427    }
+ 428
+ 429    STATEMENT_PARSERS = {
+ 430        TokenType.ALTER: lambda self: self._parse_alter(),
+ 431        TokenType.BEGIN: lambda self: self._parse_transaction(),
+ 432        TokenType.CACHE: lambda self: self._parse_cache(),
+ 433        TokenType.COMMIT: lambda self: self._parse_commit_or_rollback(),
+ 434        TokenType.CREATE: lambda self: self._parse_create(),
+ 435        TokenType.DELETE: lambda self: self._parse_delete(),
+ 436        TokenType.DESC: lambda self: self._parse_describe(),
+ 437        TokenType.DESCRIBE: lambda self: self._parse_describe(),
+ 438        TokenType.DROP: lambda self: self._parse_drop(),
+ 439        TokenType.END: lambda self: self._parse_commit_or_rollback(),
+ 440        TokenType.INSERT: lambda self: self._parse_insert(),
+ 441        TokenType.LOAD_DATA: lambda self: self._parse_load_data(),
+ 442        TokenType.MERGE: lambda self: self._parse_merge(),
+ 443        TokenType.ROLLBACK: lambda self: self._parse_commit_or_rollback(),
+ 444        TokenType.UNCACHE: lambda self: self._parse_uncache(),
+ 445        TokenType.UPDATE: lambda self: self._parse_update(),
+ 446        TokenType.USE: lambda self: self.expression(
+ 447            exp.Use,
+ 448            kind=self._match_texts(("ROLE", "WAREHOUSE", "DATABASE", "SCHEMA"))
+ 449            and exp.Var(this=self._prev.text),
+ 450            this=self._parse_table(schema=False),
+ 451        ),
+ 452    }
+ 453
+ 454    UNARY_PARSERS = {
+ 455        TokenType.PLUS: lambda self: self._parse_unary(),  # Unary + is handled as a no-op
+ 456        TokenType.NOT: lambda self: self.expression(exp.Not, this=self._parse_equality()),
+ 457        TokenType.TILDA: lambda self: self.expression(exp.BitwiseNot, this=self._parse_unary()),
+ 458        TokenType.DASH: lambda self: self.expression(exp.Neg, this=self._parse_unary()),
+ 459    }
+ 460
+ 461    PRIMARY_PARSERS = {
+ 462        TokenType.STRING: lambda self, token: self.expression(
+ 463            exp.Literal, this=token.text, is_string=True
+ 464        ),
+ 465        TokenType.NUMBER: lambda self, token: self.expression(
+ 466            exp.Literal, this=token.text, is_string=False
+ 467        ),
+ 468        TokenType.STAR: lambda self, _: self.expression(
+ 469            exp.Star,
+ 470            **{"except": self._parse_except(), "replace": self._parse_replace()},
+ 471        ),
+ 472        TokenType.NULL: lambda self, _: self.expression(exp.Null),
+ 473        TokenType.TRUE: lambda self, _: self.expression(exp.Boolean, this=True),
+ 474        TokenType.FALSE: lambda self, _: self.expression(exp.Boolean, this=False),
+ 475        TokenType.BIT_STRING: lambda self, token: self.expression(exp.BitString, this=token.text),
+ 476        TokenType.HEX_STRING: lambda self, token: self.expression(exp.HexString, this=token.text),
+ 477        TokenType.BYTE_STRING: lambda self, token: self.expression(exp.ByteString, this=token.text),
+ 478        TokenType.INTRODUCER: lambda self, token: self._parse_introducer(token),
+ 479        TokenType.NATIONAL: lambda self, token: self._parse_national(token),
+ 480        TokenType.SESSION_PARAMETER: lambda self, _: self._parse_session_parameter(),
+ 481    }
+ 482
+ 483    PLACEHOLDER_PARSERS = {
+ 484        TokenType.PLACEHOLDER: lambda self: self.expression(exp.Placeholder),
+ 485        TokenType.PARAMETER: lambda self: self.expression(
+ 486            exp.Parameter, this=self._parse_var() or self._parse_primary()
+ 487        ),
+ 488        TokenType.COLON: lambda self: self.expression(exp.Placeholder, this=self._prev.text)
+ 489        if self._match_set((TokenType.NUMBER, TokenType.VAR))
+ 490        else None,
+ 491    }
+ 492
+ 493    RANGE_PARSERS = {
+ 494        TokenType.BETWEEN: lambda self, this: self._parse_between(this),
+ 495        TokenType.GLOB: lambda self, this: self._parse_escape(
+ 496            self.expression(exp.Glob, this=this, expression=self._parse_bitwise())
+ 497        ),
+ 498        TokenType.IN: lambda self, this: self._parse_in(this),
+ 499        TokenType.IS: lambda self, this: self._parse_is(this),
+ 500        TokenType.LIKE: lambda self, this: self._parse_escape(
+ 501            self.expression(exp.Like, this=this, expression=self._parse_bitwise())
+ 502        ),
+ 503        TokenType.ILIKE: lambda self, this: self._parse_escape(
+ 504            self.expression(exp.ILike, this=this, expression=self._parse_bitwise())
+ 505        ),
+ 506        TokenType.IRLIKE: lambda self, this: self.expression(
+ 507            exp.RegexpILike, this=this, expression=self._parse_bitwise()
+ 508        ),
+ 509        TokenType.RLIKE: lambda self, this: self.expression(
+ 510            exp.RegexpLike, this=this, expression=self._parse_bitwise()
+ 511        ),
+ 512        TokenType.SIMILAR_TO: lambda self, this: self.expression(
+ 513            exp.SimilarTo, this=this, expression=self._parse_bitwise()
+ 514        ),
+ 515    }
+ 516
+ 517    PROPERTY_PARSERS = {
+ 518        "AUTO_INCREMENT": lambda self: self._parse_property_assignment(exp.AutoIncrementProperty),
+ 519        "CHARACTER SET": lambda self: self._parse_character_set(),
+ 520        "LOCATION": lambda self: self._parse_property_assignment(exp.LocationProperty),
+ 521        "PARTITION BY": lambda self: self._parse_partitioned_by(),
+ 522        "PARTITIONED BY": lambda self: self._parse_partitioned_by(),
+ 523        "PARTITIONED_BY": lambda self: self._parse_partitioned_by(),
+ 524        "COMMENT": lambda self: self._parse_property_assignment(exp.SchemaCommentProperty),
+ 525        "STORED": lambda self: self._parse_property_assignment(exp.FileFormatProperty),
+ 526        "DISTKEY": lambda self: self._parse_distkey(),
+ 527        "DISTSTYLE": lambda self: self._parse_property_assignment(exp.DistStyleProperty),
+ 528        "SORTKEY": lambda self: self._parse_sortkey(),
+ 529        "LIKE": lambda self: self._parse_create_like(),
+ 530        "RETURNS": lambda self: self._parse_returns(),
+ 531        "ROW": lambda self: self._parse_row(),
+ 532        "COLLATE": lambda self: self._parse_property_assignment(exp.CollateProperty),
+ 533        "FORMAT": lambda self: self._parse_property_assignment(exp.FileFormatProperty),
+ 534        "TABLE_FORMAT": lambda self: self._parse_property_assignment(exp.TableFormatProperty),
+ 535        "USING": lambda self: self._parse_property_assignment(exp.TableFormatProperty),
+ 536        "LANGUAGE": lambda self: self._parse_property_assignment(exp.LanguageProperty),
+ 537        "EXECUTE": lambda self: self._parse_property_assignment(exp.ExecuteAsProperty),
+ 538        "DETERMINISTIC": lambda self: self.expression(
+ 539            exp.VolatilityProperty, this=exp.Literal.string("IMMUTABLE")
+ 540        ),
+ 541        "IMMUTABLE": lambda self: self.expression(
+ 542            exp.VolatilityProperty, this=exp.Literal.string("IMMUTABLE")
+ 543        ),
+ 544        "STABLE": lambda self: self.expression(
+ 545            exp.VolatilityProperty, this=exp.Literal.string("STABLE")
+ 546        ),
+ 547        "VOLATILE": lambda self: self.expression(
+ 548            exp.VolatilityProperty, this=exp.Literal.string("VOLATILE")
+ 549        ),
+ 550        "WITH": lambda self: self._parse_with_property(),
+ 551        "TBLPROPERTIES": lambda self: self._parse_wrapped_csv(self._parse_property),
+ 552        "FALLBACK": lambda self: self._parse_fallback(no=self._prev.text.upper() == "NO"),
+ 553        "LOG": lambda self: self._parse_log(no=self._prev.text.upper() == "NO"),
+ 554        "BEFORE": lambda self: self._parse_journal(
+ 555            no=self._prev.text.upper() == "NO", dual=self._prev.text.upper() == "DUAL"
+ 556        ),
+ 557        "JOURNAL": lambda self: self._parse_journal(
+ 558            no=self._prev.text.upper() == "NO", dual=self._prev.text.upper() == "DUAL"
+ 559        ),
+ 560        "AFTER": lambda self: self._parse_afterjournal(
+ 561            no=self._prev.text.upper() == "NO", dual=self._prev.text.upper() == "DUAL"
+ 562        ),
+ 563        "LOCAL": lambda self: self._parse_afterjournal(no=False, dual=False, local=True),
+ 564        "NOT": lambda self: self._parse_afterjournal(no=False, dual=False, local=False),
+ 565        "CHECKSUM": lambda self: self._parse_checksum(),
+ 566        "FREESPACE": lambda self: self._parse_freespace(),
+ 567        "MERGEBLOCKRATIO": lambda self: self._parse_mergeblockratio(
+ 568            no=self._prev.text.upper() == "NO", default=self._prev.text.upper() == "DEFAULT"
+ 569        ),
+ 570        "MIN": lambda self: self._parse_datablocksize(),
+ 571        "MINIMUM": lambda self: self._parse_datablocksize(),
+ 572        "MAX": lambda self: self._parse_datablocksize(),
+ 573        "MAXIMUM": lambda self: self._parse_datablocksize(),
+ 574        "DATABLOCKSIZE": lambda self: self._parse_datablocksize(
+ 575            default=self._prev.text.upper() == "DEFAULT"
+ 576        ),
+ 577        "BLOCKCOMPRESSION": lambda self: self._parse_blockcompression(),
+ 578        "ALGORITHM": lambda self: self._parse_property_assignment(exp.AlgorithmProperty),
+ 579        "DEFINER": lambda self: self._parse_definer(),
+ 580    }
+ 581
+ 582    CONSTRAINT_PARSERS = {
+ 583        TokenType.CHECK: lambda self: self.expression(
+ 584            exp.Check, this=self._parse_wrapped(self._parse_conjunction)
+ 585        ),
+ 586        TokenType.FOREIGN_KEY: lambda self: self._parse_foreign_key(),
+ 587        TokenType.UNIQUE: lambda self: self._parse_unique(),
+ 588        TokenType.LIKE: lambda self: self._parse_create_like(),
+ 589    }
+ 590
+ 591    NO_PAREN_FUNCTION_PARSERS = {
+ 592        TokenType.CASE: lambda self: self._parse_case(),
+ 593        TokenType.IF: lambda self: self._parse_if(),
+ 594    }
+ 595
+ 596    FUNCTION_PARSERS: t.Dict[str, t.Callable] = {
+ 597        "CONVERT": lambda self: self._parse_convert(self.STRICT_CAST),
+ 598        "TRY_CONVERT": lambda self: self._parse_convert(False),
+ 599        "EXTRACT": lambda self: self._parse_extract(),
+ 600        "POSITION": lambda self: self._parse_position(),
+ 601        "SUBSTRING": lambda self: self._parse_substring(),
+ 602        "TRIM": lambda self: self._parse_trim(),
+ 603        "CAST": lambda self: self._parse_cast(self.STRICT_CAST),
+ 604        "TRY_CAST": lambda self: self._parse_cast(False),
+ 605        "STRING_AGG": lambda self: self._parse_string_agg(),
+ 606    }
+ 607
+ 608    QUERY_MODIFIER_PARSERS = {
+ 609        "match": lambda self: self._parse_match_recognize(),
+ 610        "where": lambda self: self._parse_where(),
+ 611        "group": lambda self: self._parse_group(),
+ 612        "having": lambda self: self._parse_having(),
+ 613        "qualify": lambda self: self._parse_qualify(),
+ 614        "windows": lambda self: self._parse_window_clause(),
+ 615        "distribute": lambda self: self._parse_sort(TokenType.DISTRIBUTE_BY, exp.Distribute),
+ 616        "sort": lambda self: self._parse_sort(TokenType.SORT_BY, exp.Sort),
+ 617        "cluster": lambda self: self._parse_sort(TokenType.CLUSTER_BY, exp.Cluster),
+ 618        "order": lambda self: self._parse_order(),
+ 619        "limit": lambda self: self._parse_limit(),
+ 620        "offset": lambda self: self._parse_offset(),
+ 621        "lock": lambda self: self._parse_lock(),
+ 622    }
+ 623
+ 624    SHOW_PARSERS: t.Dict[str, t.Callable] = {}
+ 625    SET_PARSERS: t.Dict[str, t.Callable] = {}
+ 626
+ 627    MODIFIABLES = (exp.Subquery, exp.Subqueryable, exp.Table)
+ 628
+ 629    CREATABLES = {
+ 630        TokenType.COLUMN,
+ 631        TokenType.FUNCTION,
+ 632        TokenType.INDEX,
+ 633        TokenType.PROCEDURE,
+ 634        TokenType.SCHEMA,
+ 635        TokenType.TABLE,
+ 636        TokenType.VIEW,
+ 637    }
+ 638
+ 639    TRANSACTION_KIND = {"DEFERRED", "IMMEDIATE", "EXCLUSIVE"}
+ 640
+ 641    WINDOW_ALIAS_TOKENS = ID_VAR_TOKENS - {TokenType.ROWS}
+ 642
+ 643    ADD_CONSTRAINT_TOKENS = {TokenType.CONSTRAINT, TokenType.PRIMARY_KEY, TokenType.FOREIGN_KEY}
+ 644
+ 645    STRICT_CAST = True
+ 646
+ 647    __slots__ = (
+ 648        "error_level",
+ 649        "error_message_context",
+ 650        "sql",
+ 651        "errors",
+ 652        "index_offset",
+ 653        "unnest_column_only",
+ 654        "alias_post_tablesample",
+ 655        "max_errors",
+ 656        "null_ordering",
+ 657        "_tokens",
+ 658        "_index",
+ 659        "_curr",
+ 660        "_next",
+ 661        "_prev",
+ 662        "_prev_comments",
+ 663        "_show_trie",
+ 664        "_set_trie",
+ 665    )
+ 666
+ 667    def __init__(
+ 668        self,
+ 669        error_level: t.Optional[ErrorLevel] = None,
+ 670        error_message_context: int = 100,
+ 671        index_offset: int = 0,
+ 672        unnest_column_only: bool = False,
+ 673        alias_post_tablesample: bool = False,
+ 674        max_errors: int = 3,
+ 675        null_ordering: t.Optional[str] = None,
+ 676    ):
+ 677        self.error_level = error_level or ErrorLevel.IMMEDIATE
+ 678        self.error_message_context = error_message_context
+ 679        self.index_offset = index_offset
+ 680        self.unnest_column_only = unnest_column_only
+ 681        self.alias_post_tablesample = alias_post_tablesample
+ 682        self.max_errors = max_errors
+ 683        self.null_ordering = null_ordering
+ 684        self.reset()
+ 685
+ 686    def reset(self):
+ 687        self.sql = ""
+ 688        self.errors = []
+ 689        self._tokens = []
+ 690        self._index = 0
+ 691        self._curr = None
+ 692        self._next = None
+ 693        self._prev = None
+ 694        self._prev_comments = None
+ 695
+ 696    def parse(
+ 697        self, raw_tokens: t.List[Token], sql: t.Optional[str] = None
+ 698    ) -> t.List[t.Optional[exp.Expression]]:
+ 699        """
+ 700        Parses a list of tokens and returns a list of syntax trees, one tree
+ 701        per parsed SQL statement.
+ 702
+ 703        Args:
+ 704            raw_tokens: the list of tokens.
+ 705            sql: the original SQL string, used to produce helpful debug messages.
+ 706
+ 707        Returns:
+ 708            The list of syntax trees.
+ 709        """
+ 710        return self._parse(
+ 711            parse_method=self.__class__._parse_statement, raw_tokens=raw_tokens, sql=sql
+ 712        )
+ 713
+ 714    def parse_into(
+ 715        self,
+ 716        expression_types: exp.IntoType,
+ 717        raw_tokens: t.List[Token],
+ 718        sql: t.Optional[str] = None,
+ 719    ) -> t.List[t.Optional[exp.Expression]]:
+ 720        """
+ 721        Parses a list of tokens into a given Expression type. If a collection of Expression
+ 722        types is given instead, this method will try to parse the token list into each one
+ 723        of them, stopping at the first for which the parsing succeeds.
+ 724
+ 725        Args:
+ 726            expression_types: the expression type(s) to try and parse the token list into.
+ 727            raw_tokens: the list of tokens.
+ 728            sql: the original SQL string, used to produce helpful debug messages.
+ 729
+ 730        Returns:
+ 731            The target Expression.
+ 732        """
+ 733        errors = []
+ 734        for expression_type in ensure_collection(expression_types):
+ 735            parser = self.EXPRESSION_PARSERS.get(expression_type)
+ 736            if not parser:
+ 737                raise TypeError(f"No parser registered for {expression_type}")
+ 738            try:
+ 739                return self._parse(parser, raw_tokens, sql)
+ 740            except ParseError as e:
+ 741                e.errors[0]["into_expression"] = expression_type
+ 742                errors.append(e)
+ 743        raise ParseError(
+ 744            f"Failed to parse into {expression_types}",
+ 745            errors=merge_errors(errors),
+ 746        ) from errors[-1]
+ 747
+ 748    def _parse(
+ 749        self,
+ 750        parse_method: t.Callable[[Parser], t.Optional[exp.Expression]],
+ 751        raw_tokens: t.List[Token],
+ 752        sql: t.Optional[str] = None,
+ 753    ) -> t.List[t.Optional[exp.Expression]]:
+ 754        self.reset()
+ 755        self.sql = sql or ""
+ 756        total = len(raw_tokens)
+ 757        chunks: t.List[t.List[Token]] = [[]]
+ 758
+ 759        for i, token in enumerate(raw_tokens):
+ 760            if token.token_type == TokenType.SEMICOLON:
+ 761                if i < total - 1:
+ 762                    chunks.append([])
+ 763            else:
+ 764                chunks[-1].append(token)
+ 765
+ 766        expressions = []
+ 767
+ 768        for tokens in chunks:
+ 769            self._index = -1
+ 770            self._tokens = tokens
+ 771            self._advance()
+ 772
+ 773            expressions.append(parse_method(self))
+ 774
+ 775            if self._index < len(self._tokens):
+ 776                self.raise_error("Invalid expression / Unexpected token")
+ 777
+ 778            self.check_errors()
+ 779
+ 780        return expressions
+ 781
+ 782    def check_errors(self) -> None:
+ 783        """
+ 784        Logs or raises any found errors, depending on the chosen error level setting.
+ 785        """
+ 786        if self.error_level == ErrorLevel.WARN:
+ 787            for error in self.errors:
+ 788                logger.error(str(error))
+ 789        elif self.error_level == ErrorLevel.RAISE and self.errors:
+ 790            raise ParseError(
+ 791                concat_messages(self.errors, self.max_errors),
+ 792                errors=merge_errors(self.errors),
+ 793            )
+ 794
+ 795    def raise_error(self, message: str, token: t.Optional[Token] = None) -> None:
+ 796        """
+ 797        Appends an error in the list of recorded errors or raises it, depending on the chosen
+ 798        error level setting.
+ 799        """
+ 800        token = token or self._curr or self._prev or Token.string("")
+ 801        start = self._find_token(token)
+ 802        end = start + len(token.text)
+ 803        start_context = self.sql[max(start - self.error_message_context, 0) : start]
+ 804        highlight = self.sql[start:end]
+ 805        end_context = self.sql[end : end + self.error_message_context]
+ 806
+ 807        error = ParseError.new(
+ 808            f"{message}. Line {token.line}, Col: {token.col}.\n"
+ 809            f"  {start_context}\033[4m{highlight}\033[0m{end_context}",
+ 810            description=message,
+ 811            line=token.line,
+ 812            col=token.col,
+ 813            start_context=start_context,
+ 814            highlight=highlight,
+ 815            end_context=end_context,
+ 816        )
+ 817
+ 818        if self.error_level == ErrorLevel.IMMEDIATE:
+ 819            raise error
+ 820
+ 821        self.errors.append(error)
+ 822
+ 823    def expression(
+ 824        self, exp_class: t.Type[exp.Expression], comments: t.Optional[t.List[str]] = None, **kwargs
+ 825    ) -> exp.Expression:
+ 826        """
+ 827        Creates a new, validated Expression.
+ 828
+ 829        Args:
+ 830            exp_class: the expression class to instantiate.
+ 831            comments: an optional list of comments to attach to the expression.
+ 832            kwargs: the arguments to set for the expression along with their respective values.
+ 833
+ 834        Returns:
+ 835            The target expression.
+ 836        """
+ 837        instance = exp_class(**kwargs)
+ 838        if self._prev_comments:
+ 839            instance.comments = self._prev_comments
+ 840            self._prev_comments = None
+ 841        if comments:
+ 842            instance.comments = comments
+ 843        self.validate_expression(instance)
+ 844        return instance
+ 845
+ 846    def validate_expression(
+ 847        self, expression: exp.Expression, args: t.Optional[t.List] = None
+ 848    ) -> None:
+ 849        """
+ 850        Validates an already instantiated expression, making sure that all its mandatory arguments
+ 851        are set.
+ 852
+ 853        Args:
+ 854            expression: the expression to validate.
+ 855            args: an optional list of items that was used to instantiate the expression, if it's a Func.
+ 856        """
+ 857        if self.error_level == ErrorLevel.IGNORE:
+ 858            return
+ 859
+ 860        for error_message in expression.error_messages(args):
+ 861            self.raise_error(error_message)
+ 862
+ 863    def _find_sql(self, start: Token, end: Token) -> str:
+ 864        return self.sql[self._find_token(start) : self._find_token(end) + len(end.text)]
+ 865
+ 866    def _find_token(self, token: Token) -> int:
+ 867        line = 1
+ 868        col = 1
+ 869        index = 0
+ 870
+ 871        while line < token.line or col < token.col:
+ 872            if Tokenizer.WHITE_SPACE.get(self.sql[index]) == TokenType.BREAK:
+ 873                line += 1
+ 874                col = 1
+ 875            else:
+ 876                col += 1
+ 877            index += 1
+ 878
+ 879        return index
+ 880
+ 881    def _advance(self, times: int = 1) -> None:
+ 882        self._index += times
+ 883        self._curr = seq_get(self._tokens, self._index)
+ 884        self._next = seq_get(self._tokens, self._index + 1)
+ 885        if self._index > 0:
+ 886            self._prev = self._tokens[self._index - 1]
+ 887            self._prev_comments = self._prev.comments
+ 888        else:
+ 889            self._prev = None
+ 890            self._prev_comments = None
+ 891
+ 892    def _retreat(self, index: int) -> None:
+ 893        self._advance(index - self._index)
+ 894
+ 895    def _parse_command(self) -> exp.Expression:
+ 896        return self.expression(exp.Command, this=self._prev.text, expression=self._parse_string())
+ 897
+ 898    def _parse_statement(self) -> t.Optional[exp.Expression]:
+ 899        if self._curr is None:
+ 900            return None
+ 901
+ 902        if self._match_set(self.STATEMENT_PARSERS):
+ 903            return self.STATEMENT_PARSERS[self._prev.token_type](self)
+ 904
+ 905        if self._match_set(Tokenizer.COMMANDS):
+ 906            return self._parse_command()
+ 907
+ 908        expression = self._parse_expression()
+ 909        expression = self._parse_set_operations(expression) if expression else self._parse_select()
+ 910
+ 911        self._parse_query_modifiers(expression)
+ 912        return expression
+ 913
+ 914    def _parse_drop(self, default_kind: t.Optional[str] = None) -> t.Optional[exp.Expression]:
+ 915        start = self._prev
+ 916        temporary = self._match(TokenType.TEMPORARY)
+ 917        materialized = self._match(TokenType.MATERIALIZED)
+ 918        kind = self._match_set(self.CREATABLES) and self._prev.text
+ 919        if not kind:
+ 920            if default_kind:
+ 921                kind = default_kind
+ 922            else:
+ 923                return self._parse_as_command(start)
+ 924
+ 925        return self.expression(
+ 926            exp.Drop,
+ 927            exists=self._parse_exists(),
+ 928            this=self._parse_table(schema=True),
+ 929            kind=kind,
+ 930            temporary=temporary,
+ 931            materialized=materialized,
+ 932            cascade=self._match(TokenType.CASCADE),
+ 933        )
+ 934
+ 935    def _parse_exists(self, not_: bool = False) -> t.Optional[bool]:
+ 936        return (
+ 937            self._match(TokenType.IF)
+ 938            and (not not_ or self._match(TokenType.NOT))
+ 939            and self._match(TokenType.EXISTS)
+ 940        )
+ 941
+ 942    def _parse_create(self) -> t.Optional[exp.Expression]:
+ 943        start = self._prev
+ 944        replace = self._match_pair(TokenType.OR, TokenType.REPLACE)
+ 945        set_ = self._match(TokenType.SET)  # Teradata
+ 946        multiset = self._match_text_seq("MULTISET")  # Teradata
+ 947        global_temporary = self._match_text_seq("GLOBAL", "TEMPORARY")  # Teradata
+ 948        volatile = self._match(TokenType.VOLATILE)  # Teradata
+ 949        temporary = self._match(TokenType.TEMPORARY)
+ 950        transient = self._match_text_seq("TRANSIENT")
+ 951        external = self._match_text_seq("EXTERNAL")
+ 952        unique = self._match(TokenType.UNIQUE)
+ 953        materialized = self._match(TokenType.MATERIALIZED)
+ 954
+ 955        if self._match_pair(TokenType.TABLE, TokenType.FUNCTION, advance=False):
+ 956            self._match(TokenType.TABLE)
+ 957
+ 958        properties = None
+ 959        create_token = self._match_set(self.CREATABLES) and self._prev
+ 960
+ 961        if not create_token:
+ 962            properties = self._parse_properties()
+ 963            create_token = self._match_set(self.CREATABLES) and self._prev
+ 964
+ 965            if not properties or not create_token:
+ 966                return self._parse_as_command(start)
+ 967
+ 968        exists = self._parse_exists(not_=True)
+ 969        this = None
+ 970        expression = None
+ 971        data = None
+ 972        statistics = None
+ 973        no_primary_index = None
+ 974        indexes = None
+ 975        no_schema_binding = None
+ 976        begin = None
+ 977
+ 978        if create_token.token_type in (TokenType.FUNCTION, TokenType.PROCEDURE):
+ 979            this = self._parse_user_defined_function(kind=create_token.token_type)
+ 980            properties = self._parse_properties()
+ 981
+ 982            self._match(TokenType.ALIAS)
+ 983            begin = self._match(TokenType.BEGIN)
+ 984            return_ = self._match_text_seq("RETURN")
+ 985            expression = self._parse_statement()
+ 986
+ 987            if return_:
+ 988                expression = self.expression(exp.Return, this=expression)
+ 989        elif create_token.token_type == TokenType.INDEX:
+ 990            this = self._parse_index()
+ 991        elif create_token.token_type in (
+ 992            TokenType.TABLE,
+ 993            TokenType.VIEW,
+ 994            TokenType.SCHEMA,
+ 995        ):
+ 996            table_parts = self._parse_table_parts(schema=True)
+ 997
+ 998            if self._match(TokenType.COMMA):  # comma-separated properties before schema definition
+ 999                properties = self._parse_properties(before=True)
+1000
+1001            this = self._parse_schema(this=table_parts)
+1002
+1003            if not properties:  # properties after schema definition
+1004                properties = self._parse_properties()
+1005
+1006            self._match(TokenType.ALIAS)
+1007            expression = self._parse_ddl_select()
+1008
+1009            if create_token.token_type == TokenType.TABLE:
+1010                if self._match_text_seq("WITH", "DATA"):
+1011                    data = True
+1012                elif self._match_text_seq("WITH", "NO", "DATA"):
+1013                    data = False
+1014
+1015                if self._match_text_seq("AND", "STATISTICS"):
+1016                    statistics = True
+1017                elif self._match_text_seq("AND", "NO", "STATISTICS"):
+1018                    statistics = False
+1019
+1020                no_primary_index = self._match_text_seq("NO", "PRIMARY", "INDEX")
+1021
+1022                indexes = []
+1023                while True:
+1024                    index = self._parse_create_table_index()
+1025
+1026                    # post index PARTITION BY property
+1027                    if self._match(TokenType.PARTITION_BY, advance=False):
+1028                        if properties:
+1029                            properties.expressions.append(self._parse_property())
+1030                        else:
+1031                            properties = self._parse_properties()
+1032
+1033                    if not index:
+1034                        break
+1035                    else:
+1036                        indexes.append(index)
+1037            elif create_token.token_type == TokenType.VIEW:
+1038                if self._match_text_seq("WITH", "NO", "SCHEMA", "BINDING"):
+1039                    no_schema_binding = True
+1040
+1041        return self.expression(
+1042            exp.Create,
+1043            this=this,
+1044            kind=create_token.text,
+1045            expression=expression,
+1046            set=set_,
+1047            multiset=multiset,
+1048            global_temporary=global_temporary,
+1049            volatile=volatile,
+1050            exists=exists,
+1051            properties=properties,
+1052            temporary=temporary,
+1053            transient=transient,
+1054            external=external,
+1055            replace=replace,
+1056            unique=unique,
+1057            materialized=materialized,
+1058            data=data,
+1059            statistics=statistics,
+1060            no_primary_index=no_primary_index,
+1061            indexes=indexes,
+1062            no_schema_binding=no_schema_binding,
+1063            begin=begin,
+1064        )
+1065
+1066    def _parse_property_before(self) -> t.Optional[exp.Expression]:
+1067        self._match(TokenType.COMMA)
+1068
+1069        # parsers look to _prev for no/dual/default, so need to consume first
+1070        self._match_text_seq("NO")
+1071        self._match_text_seq("DUAL")
+1072        self._match_text_seq("DEFAULT")
+1073
+1074        if self.PROPERTY_PARSERS.get(self._curr.text.upper()):
+1075            return self.PROPERTY_PARSERS[self._curr.text.upper()](self)
+1076
+1077        return None
+1078
+1079    def _parse_property(self) -> t.Optional[exp.Expression]:
+1080        if self._match_texts(self.PROPERTY_PARSERS):
+1081            return self.PROPERTY_PARSERS[self._prev.text.upper()](self)
+1082
+1083        if self._match_pair(TokenType.DEFAULT, TokenType.CHARACTER_SET):
+1084            return self._parse_character_set(True)
+1085
+1086        if self._match_pair(TokenType.COMPOUND, TokenType.SORTKEY):
+1087            return self._parse_sortkey(compound=True)
+1088
+1089        if self._match_text_seq("SQL", "SECURITY"):
+1090            return self.expression(exp.SqlSecurityProperty, definer=self._match_text_seq("DEFINER"))
+1091
+1092        assignment = self._match_pair(
+1093            TokenType.VAR, TokenType.EQ, advance=False
+1094        ) or self._match_pair(TokenType.STRING, TokenType.EQ, advance=False)
+1095
+1096        if assignment:
+1097            key = self._parse_var_or_string()
+1098            self._match(TokenType.EQ)
+1099            return self.expression(exp.Property, this=key, value=self._parse_column())
+1100
+1101        return None
+1102
+1103    def _parse_property_assignment(self, exp_class: t.Type[exp.Expression]) -> exp.Expression:
+1104        self._match(TokenType.EQ)
+1105        self._match(TokenType.ALIAS)
+1106        return self.expression(
+1107            exp_class,
+1108            this=self._parse_var_or_string() or self._parse_number() or self._parse_id_var(),
+1109        )
+1110
+1111    def _parse_properties(self, before=None) -> t.Optional[exp.Expression]:
+1112        properties = []
+1113
+1114        while True:
+1115            if before:
+1116                identified_property = self._parse_property_before()
+1117            else:
+1118                identified_property = self._parse_property()
+1119
+1120            if not identified_property:
+1121                break
+1122            for p in ensure_collection(identified_property):
+1123                properties.append(p)
+1124
+1125        if properties:
+1126            return self.expression(exp.Properties, expressions=properties)
+1127
+1128        return None
+1129
+1130    def _parse_fallback(self, no=False) -> exp.Expression:
+1131        self._match_text_seq("FALLBACK")
+1132        return self.expression(
+1133            exp.FallbackProperty, no=no, protection=self._match_text_seq("PROTECTION")
+1134        )
+1135
+1136    def _parse_with_property(
+1137        self,
+1138    ) -> t.Union[t.Optional[exp.Expression], t.List[t.Optional[exp.Expression]]]:
+1139        if self._match(TokenType.L_PAREN, advance=False):
+1140            return self._parse_wrapped_csv(self._parse_property)
+1141
+1142        if not self._next:
+1143            return None
+1144
+1145        if self._next.text.upper() == "JOURNAL":
+1146            return self._parse_withjournaltable()
+1147
+1148        return self._parse_withisolatedloading()
+1149
+1150    # https://dev.mysql.com/doc/refman/8.0/en/create-view.html
+1151    def _parse_definer(self) -> t.Optional[exp.Expression]:
+1152        self._match(TokenType.EQ)
+1153
+1154        user = self._parse_id_var()
+1155        self._match(TokenType.PARAMETER)
+1156        host = self._parse_id_var() or (self._match(TokenType.MOD) and self._prev.text)
+1157
+1158        if not user or not host:
+1159            return None
+1160
+1161        return exp.DefinerProperty(this=f"{user}@{host}")
+1162
+1163    def _parse_withjournaltable(self) -> exp.Expression:
+1164        self._match_text_seq("WITH", "JOURNAL", "TABLE")
+1165        self._match(TokenType.EQ)
+1166        return self.expression(exp.WithJournalTableProperty, this=self._parse_table_parts())
+1167
+1168    def _parse_log(self, no=False) -> exp.Expression:
+1169        self._match_text_seq("LOG")
+1170        return self.expression(exp.LogProperty, no=no)
+1171
+1172    def _parse_journal(self, no=False, dual=False) -> exp.Expression:
+1173        before = self._match_text_seq("BEFORE")
+1174        self._match_text_seq("JOURNAL")
+1175        return self.expression(exp.JournalProperty, no=no, dual=dual, before=before)
+1176
+1177    def _parse_afterjournal(self, no=False, dual=False, local=None) -> exp.Expression:
+1178        self._match_text_seq("NOT")
+1179        self._match_text_seq("LOCAL")
+1180        self._match_text_seq("AFTER", "JOURNAL")
+1181        return self.expression(exp.AfterJournalProperty, no=no, dual=dual, local=local)
+1182
+1183    def _parse_checksum(self) -> exp.Expression:
+1184        self._match_text_seq("CHECKSUM")
+1185        self._match(TokenType.EQ)
+1186
+1187        on = None
+1188        if self._match(TokenType.ON):
+1189            on = True
+1190        elif self._match_text_seq("OFF"):
+1191            on = False
+1192        default = self._match(TokenType.DEFAULT)
+1193
+1194        return self.expression(
+1195            exp.ChecksumProperty,
+1196            on=on,
+1197            default=default,
+1198        )
+1199
+1200    def _parse_freespace(self) -> exp.Expression:
+1201        self._match_text_seq("FREESPACE")
+1202        self._match(TokenType.EQ)
+1203        return self.expression(
+1204            exp.FreespaceProperty, this=self._parse_number(), percent=self._match(TokenType.PERCENT)
+1205        )
+1206
+1207    def _parse_mergeblockratio(self, no=False, default=False) -> exp.Expression:
+1208        self._match_text_seq("MERGEBLOCKRATIO")
+1209        if self._match(TokenType.EQ):
+1210            return self.expression(
+1211                exp.MergeBlockRatioProperty,
+1212                this=self._parse_number(),
+1213                percent=self._match(TokenType.PERCENT),
+1214            )
+1215        else:
+1216            return self.expression(
+1217                exp.MergeBlockRatioProperty,
+1218                no=no,
+1219                default=default,
+1220            )
+1221
+1222    def _parse_datablocksize(self, default=None) -> exp.Expression:
+1223        if default:
+1224            self._match_text_seq("DATABLOCKSIZE")
+1225            return self.expression(exp.DataBlocksizeProperty, default=True)
+1226        elif self._match_texts(("MIN", "MINIMUM")):
+1227            self._match_text_seq("DATABLOCKSIZE")
+1228            return self.expression(exp.DataBlocksizeProperty, min=True)
+1229        elif self._match_texts(("MAX", "MAXIMUM")):
+1230            self._match_text_seq("DATABLOCKSIZE")
+1231            return self.expression(exp.DataBlocksizeProperty, min=False)
+1232
+1233        self._match_text_seq("DATABLOCKSIZE")
+1234        self._match(TokenType.EQ)
+1235        size = self._parse_number()
+1236        units = None
+1237        if self._match_texts(("BYTES", "KBYTES", "KILOBYTES")):
+1238            units = self._prev.text
+1239        return self.expression(exp.DataBlocksizeProperty, size=size, units=units)
+1240
+1241    def _parse_blockcompression(self) -> exp.Expression:
+1242        self._match_text_seq("BLOCKCOMPRESSION")
+1243        self._match(TokenType.EQ)
+1244        always = self._match(TokenType.ALWAYS)
+1245        manual = self._match_text_seq("MANUAL")
+1246        never = self._match_text_seq("NEVER")
+1247        default = self._match_text_seq("DEFAULT")
+1248        autotemp = None
+1249        if self._match_text_seq("AUTOTEMP"):
+1250            autotemp = self._parse_schema()
+1251
+1252        return self.expression(
+1253            exp.BlockCompressionProperty,
+1254            always=always,
+1255            manual=manual,
+1256            never=never,
+1257            default=default,
+1258            autotemp=autotemp,
+1259        )
+1260
+1261    def _parse_withisolatedloading(self) -> exp.Expression:
+1262        self._match(TokenType.WITH)
+1263        no = self._match_text_seq("NO")
+1264        concurrent = self._match_text_seq("CONCURRENT")
+1265        self._match_text_seq("ISOLATED", "LOADING")
+1266        for_all = self._match_text_seq("FOR", "ALL")
+1267        for_insert = self._match_text_seq("FOR", "INSERT")
+1268        for_none = self._match_text_seq("FOR", "NONE")
+1269        return self.expression(
+1270            exp.IsolatedLoadingProperty,
+1271            no=no,
+1272            concurrent=concurrent,
+1273            for_all=for_all,
+1274            for_insert=for_insert,
+1275            for_none=for_none,
+1276        )
+1277
+1278    def _parse_partition_by(self) -> t.List[t.Optional[exp.Expression]]:
+1279        if self._match(TokenType.PARTITION_BY):
+1280            return self._parse_csv(self._parse_conjunction)
+1281        return []
+1282
+1283    def _parse_partitioned_by(self) -> exp.Expression:
+1284        self._match(TokenType.EQ)
+1285        return self.expression(
+1286            exp.PartitionedByProperty,
+1287            this=self._parse_schema() or self._parse_bracket(self._parse_field()),
+1288        )
+1289
+1290    def _parse_distkey(self) -> exp.Expression:
+1291        return self.expression(exp.DistKeyProperty, this=self._parse_wrapped(self._parse_id_var))
+1292
+1293    def _parse_create_like(self) -> t.Optional[exp.Expression]:
+1294        table = self._parse_table(schema=True)
+1295        options = []
+1296        while self._match_texts(("INCLUDING", "EXCLUDING")):
+1297            this = self._prev.text.upper()
+1298            id_var = self._parse_id_var()
+1299
+1300            if not id_var:
+1301                return None
+1302
+1303            options.append(
+1304                self.expression(
+1305                    exp.Property,
+1306                    this=this,
+1307                    value=exp.Var(this=id_var.this.upper()),
+1308                )
+1309            )
+1310        return self.expression(exp.LikeProperty, this=table, expressions=options)
+1311
+1312    def _parse_sortkey(self, compound: bool = False) -> exp.Expression:
+1313        return self.expression(
+1314            exp.SortKeyProperty, this=self._parse_wrapped_csv(self._parse_id_var), compound=compound
+1315        )
+1316
+1317    def _parse_character_set(self, default: bool = False) -> exp.Expression:
+1318        self._match(TokenType.EQ)
+1319        return self.expression(
+1320            exp.CharacterSetProperty, this=self._parse_var_or_string(), default=default
+1321        )
+1322
+1323    def _parse_returns(self) -> exp.Expression:
+1324        value: t.Optional[exp.Expression]
+1325        is_table = self._match(TokenType.TABLE)
+1326
+1327        if is_table:
+1328            if self._match(TokenType.LT):
+1329                value = self.expression(
+1330                    exp.Schema,
+1331                    this="TABLE",
+1332                    expressions=self._parse_csv(self._parse_struct_kwargs),
+1333                )
+1334                if not self._match(TokenType.GT):
+1335                    self.raise_error("Expecting >")
+1336            else:
+1337                value = self._parse_schema(exp.Var(this="TABLE"))
+1338        else:
+1339            value = self._parse_types()
+1340
+1341        return self.expression(exp.ReturnsProperty, this=value, is_table=is_table)
+1342
+1343    def _parse_describe(self) -> exp.Expression:
+1344        kind = self._match_set(self.CREATABLES) and self._prev.text
+1345        this = self._parse_table()
+1346
+1347        return self.expression(exp.Describe, this=this, kind=kind)
+1348
+1349    def _parse_insert(self) -> exp.Expression:
+1350        overwrite = self._match(TokenType.OVERWRITE)
+1351        local = self._match(TokenType.LOCAL)
+1352
+1353        this: t.Optional[exp.Expression]
+1354
+1355        if self._match_text_seq("DIRECTORY"):
+1356            this = self.expression(
+1357                exp.Directory,
+1358                this=self._parse_var_or_string(),
+1359                local=local,
+1360                row_format=self._parse_row_format(match_row=True),
+1361            )
+1362        else:
+1363            self._match(TokenType.INTO)
+1364            self._match(TokenType.TABLE)
+1365            this = self._parse_table(schema=True)
+1366
+1367        return self.expression(
+1368            exp.Insert,
+1369            this=this,
+1370            exists=self._parse_exists(),
+1371            partition=self._parse_partition(),
+1372            expression=self._parse_ddl_select(),
+1373            overwrite=overwrite,
+1374        )
+1375
+1376    def _parse_row(self) -> t.Optional[exp.Expression]:
+1377        if not self._match(TokenType.FORMAT):
+1378            return None
+1379        return self._parse_row_format()
+1380
+1381    def _parse_row_format(self, match_row: bool = False) -> t.Optional[exp.Expression]:
+1382        if match_row and not self._match_pair(TokenType.ROW, TokenType.FORMAT):
+1383            return None
+1384
+1385        if self._match_text_seq("SERDE"):
+1386            return self.expression(exp.RowFormatSerdeProperty, this=self._parse_string())
+1387
+1388        self._match_text_seq("DELIMITED")
+1389
+1390        kwargs = {}
+1391
+1392        if self._match_text_seq("FIELDS", "TERMINATED", "BY"):
+1393            kwargs["fields"] = self._parse_string()
+1394            if self._match_text_seq("ESCAPED", "BY"):
+1395                kwargs["escaped"] = self._parse_string()
+1396        if self._match_text_seq("COLLECTION", "ITEMS", "TERMINATED", "BY"):
+1397            kwargs["collection_items"] = self._parse_string()
+1398        if self._match_text_seq("MAP", "KEYS", "TERMINATED", "BY"):
+1399            kwargs["map_keys"] = self._parse_string()
+1400        if self._match_text_seq("LINES", "TERMINATED", "BY"):
+1401            kwargs["lines"] = self._parse_string()
+1402        if self._match_text_seq("NULL", "DEFINED", "AS"):
+1403            kwargs["null"] = self._parse_string()
+1404
+1405        return self.expression(exp.RowFormatDelimitedProperty, **kwargs)  # type: ignore
+1406
+1407    def _parse_load_data(self) -> exp.Expression:
+1408        local = self._match(TokenType.LOCAL)
+1409        self._match_text_seq("INPATH")
+1410        inpath = self._parse_string()
+1411        overwrite = self._match(TokenType.OVERWRITE)
+1412        self._match_pair(TokenType.INTO, TokenType.TABLE)
+1413
+1414        return self.expression(
+1415            exp.LoadData,
+1416            this=self._parse_table(schema=True),
+1417            local=local,
+1418            overwrite=overwrite,
+1419            inpath=inpath,
+1420            partition=self._parse_partition(),
+1421            input_format=self._match_text_seq("INPUTFORMAT") and self._parse_string(),
+1422            serde=self._match_text_seq("SERDE") and self._parse_string(),
+1423        )
+1424
+1425    def _parse_delete(self) -> exp.Expression:
+1426        self._match(TokenType.FROM)
+1427
+1428        return self.expression(
+1429            exp.Delete,
+1430            this=self._parse_table(schema=True),
+1431            using=self._parse_csv(lambda: self._match(TokenType.USING) and self._parse_table()),
+1432            where=self._parse_where(),
+1433        )
+1434
+1435    def _parse_update(self) -> exp.Expression:
+1436        return self.expression(
+1437            exp.Update,
+1438            **{  # type: ignore
+1439                "this": self._parse_table(alias_tokens=self.UPDATE_ALIAS_TOKENS),
+1440                "expressions": self._match(TokenType.SET) and self._parse_csv(self._parse_equality),
+1441                "from": self._parse_from(),
+1442                "where": self._parse_where(),
+1443            },
+1444        )
+1445
+1446    def _parse_uncache(self) -> exp.Expression:
+1447        if not self._match(TokenType.TABLE):
+1448            self.raise_error("Expecting TABLE after UNCACHE")
+1449
+1450        return self.expression(
+1451            exp.Uncache,
+1452            exists=self._parse_exists(),
+1453            this=self._parse_table(schema=True),
+1454        )
+1455
+1456    def _parse_cache(self) -> exp.Expression:
+1457        lazy = self._match(TokenType.LAZY)
+1458        self._match(TokenType.TABLE)
+1459        table = self._parse_table(schema=True)
+1460        options = []
+1461
+1462        if self._match(TokenType.OPTIONS):
+1463            self._match_l_paren()
+1464            k = self._parse_string()
+1465            self._match(TokenType.EQ)
+1466            v = self._parse_string()
+1467            options = [k, v]
+1468            self._match_r_paren()
+1469
+1470        self._match(TokenType.ALIAS)
+1471        return self.expression(
+1472            exp.Cache,
+1473            this=table,
+1474            lazy=lazy,
+1475            options=options,
+1476            expression=self._parse_select(nested=True),
+1477        )
+1478
+1479    def _parse_partition(self) -> t.Optional[exp.Expression]:
+1480        if not self._match(TokenType.PARTITION):
+1481            return None
+1482
+1483        return self.expression(
+1484            exp.Partition, expressions=self._parse_wrapped_csv(self._parse_conjunction)
+1485        )
+1486
+1487    def _parse_value(self) -> exp.Expression:
+1488        if self._match(TokenType.L_PAREN):
+1489            expressions = self._parse_csv(self._parse_conjunction)
+1490            self._match_r_paren()
+1491            return self.expression(exp.Tuple, expressions=expressions)
+1492
+1493        # In presto we can have VALUES 1, 2 which results in 1 column & 2 rows.
+1494        # Source: https://prestodb.io/docs/current/sql/values.html
+1495        return self.expression(exp.Tuple, expressions=[self._parse_conjunction()])
+1496
+1497    def _parse_select(
+1498        self, nested: bool = False, table: bool = False, parse_subquery_alias: bool = True
+1499    ) -> t.Optional[exp.Expression]:
+1500        cte = self._parse_with()
+1501        if cte:
+1502            this = self._parse_statement()
+1503
+1504            if not this:
+1505                self.raise_error("Failed to parse any statement following CTE")
+1506                return cte
+1507
+1508            if "with" in this.arg_types:
+1509                this.set("with", cte)
+1510            else:
+1511                self.raise_error(f"{this.key} does not support CTE")
+1512                this = cte
+1513        elif self._match(TokenType.SELECT):
+1514            comments = self._prev_comments
+1515
+1516            hint = self._parse_hint()
+1517            all_ = self._match(TokenType.ALL)
+1518            distinct = self._match(TokenType.DISTINCT)
+1519
+1520            if distinct:
+1521                distinct = self.expression(
+1522                    exp.Distinct,
+1523                    on=self._parse_value() if self._match(TokenType.ON) else None,
+1524                )
+1525
+1526            if all_ and distinct:
+1527                self.raise_error("Cannot specify both ALL and DISTINCT after SELECT")
+1528
+1529            limit = self._parse_limit(top=True)
+1530            expressions = self._parse_csv(self._parse_expression)
+1531
+1532            this = self.expression(
+1533                exp.Select,
+1534                hint=hint,
+1535                distinct=distinct,
+1536                expressions=expressions,
+1537                limit=limit,
+1538            )
+1539            this.comments = comments
+1540
+1541            into = self._parse_into()
+1542            if into:
+1543                this.set("into", into)
+1544
+1545            from_ = self._parse_from()
+1546            if from_:
+1547                this.set("from", from_)
+1548
+1549            self._parse_query_modifiers(this)
+1550        elif (table or nested) and self._match(TokenType.L_PAREN):
+1551            this = self._parse_table() if table else self._parse_select(nested=True)
+1552            self._parse_query_modifiers(this)
+1553            this = self._parse_set_operations(this)
+1554            self._match_r_paren()
+1555
+1556            # early return so that subquery unions aren't parsed again
+1557            # SELECT * FROM (SELECT 1) UNION ALL SELECT 1
+1558            # Union ALL should be a property of the top select node, not the subquery
+1559            return self._parse_subquery(this, parse_alias=parse_subquery_alias)
+1560        elif self._match(TokenType.VALUES):
+1561            this = self.expression(
+1562                exp.Values,
+1563                expressions=self._parse_csv(self._parse_value),
+1564                alias=self._parse_table_alias(),
+1565            )
+1566        else:
+1567            this = None
+1568
+1569        return self._parse_set_operations(this)
+1570
+1571    def _parse_with(self, skip_with_token: bool = False) -> t.Optional[exp.Expression]:
+1572        if not skip_with_token and not self._match(TokenType.WITH):
+1573            return None
+1574
+1575        recursive = self._match(TokenType.RECURSIVE)
+1576
+1577        expressions = []
+1578        while True:
+1579            expressions.append(self._parse_cte())
+1580
+1581            if not self._match(TokenType.COMMA) and not self._match(TokenType.WITH):
+1582                break
+1583            else:
+1584                self._match(TokenType.WITH)
+1585
+1586        return self.expression(exp.With, expressions=expressions, recursive=recursive)
+1587
+1588    def _parse_cte(self) -> exp.Expression:
+1589        alias = self._parse_table_alias()
+1590        if not alias or not alias.this:
+1591            self.raise_error("Expected CTE to have alias")
+1592
+1593        self._match(TokenType.ALIAS)
+1594
+1595        return self.expression(
+1596            exp.CTE,
+1597            this=self._parse_wrapped(self._parse_statement),
+1598            alias=alias,
+1599        )
+1600
+1601    def _parse_table_alias(
+1602        self, alias_tokens: t.Optional[t.Collection[TokenType]] = None
+1603    ) -> t.Optional[exp.Expression]:
+1604        any_token = self._match(TokenType.ALIAS)
+1605        alias = self._parse_id_var(
+1606            any_token=any_token, tokens=alias_tokens or self.TABLE_ALIAS_TOKENS
+1607        )
+1608        index = self._index
+1609
+1610        if self._match(TokenType.L_PAREN):
+1611            columns = self._parse_csv(lambda: self._parse_column_def(self._parse_id_var()))
+1612            self._match_r_paren() if columns else self._retreat(index)
+1613        else:
+1614            columns = None
+1615
+1616        if not alias and not columns:
+1617            return None
+1618
+1619        return self.expression(exp.TableAlias, this=alias, columns=columns)
+1620
+1621    def _parse_subquery(
+1622        self, this: t.Optional[exp.Expression], parse_alias: bool = True
+1623    ) -> exp.Expression:
+1624        return self.expression(
+1625            exp.Subquery,
+1626            this=this,
+1627            pivots=self._parse_pivots(),
+1628            alias=self._parse_table_alias() if parse_alias else None,
+1629        )
+1630
+1631    def _parse_query_modifiers(self, this: t.Optional[exp.Expression]) -> None:
+1632        if not isinstance(this, self.MODIFIABLES):
+1633            return
+1634
+1635        table = isinstance(this, exp.Table)
+1636
+1637        while True:
+1638            lateral = self._parse_lateral()
+1639            join = self._parse_join()
+1640            comma = None if table else self._match(TokenType.COMMA)
+1641            if lateral:
+1642                this.append("laterals", lateral)
+1643            if join:
+1644                this.append("joins", join)
+1645            if comma:
+1646                this.args["from"].append("expressions", self._parse_table())
+1647            if not (lateral or join or comma):
+1648                break
+1649
+1650        for key, parser in self.QUERY_MODIFIER_PARSERS.items():
+1651            expression = parser(self)
+1652
+1653            if expression:
+1654                this.set(key, expression)
+1655
+1656    def _parse_hint(self) -> t.Optional[exp.Expression]:
+1657        if self._match(TokenType.HINT):
+1658            hints = self._parse_csv(self._parse_function)
+1659            if not self._match_pair(TokenType.STAR, TokenType.SLASH):
+1660                self.raise_error("Expected */ after HINT")
+1661            return self.expression(exp.Hint, expressions=hints)
+1662
+1663        return None
+1664
+1665    def _parse_into(self) -> t.Optional[exp.Expression]:
+1666        if not self._match(TokenType.INTO):
+1667            return None
+1668
+1669        temp = self._match(TokenType.TEMPORARY)
+1670        unlogged = self._match(TokenType.UNLOGGED)
+1671        self._match(TokenType.TABLE)
+1672
+1673        return self.expression(
+1674            exp.Into, this=self._parse_table(schema=True), temporary=temp, unlogged=unlogged
+1675        )
+1676
+1677    def _parse_from(self) -> t.Optional[exp.Expression]:
+1678        if not self._match(TokenType.FROM):
+1679            return None
+1680
+1681        return self.expression(
+1682            exp.From, comments=self._prev_comments, expressions=self._parse_csv(self._parse_table)
+1683        )
+1684
+1685    def _parse_match_recognize(self) -> t.Optional[exp.Expression]:
+1686        if not self._match(TokenType.MATCH_RECOGNIZE):
+1687            return None
+1688        self._match_l_paren()
+1689
+1690        partition = self._parse_partition_by()
+1691        order = self._parse_order()
+1692        measures = (
+1693            self._parse_alias(self._parse_conjunction())
+1694            if self._match_text_seq("MEASURES")
+1695            else None
+1696        )
+1697
+1698        if self._match_text_seq("ONE", "ROW", "PER", "MATCH"):
+1699            rows = exp.Var(this="ONE ROW PER MATCH")
+1700        elif self._match_text_seq("ALL", "ROWS", "PER", "MATCH"):
+1701            text = "ALL ROWS PER MATCH"
+1702            if self._match_text_seq("SHOW", "EMPTY", "MATCHES"):
+1703                text += f" SHOW EMPTY MATCHES"
+1704            elif self._match_text_seq("OMIT", "EMPTY", "MATCHES"):
+1705                text += f" OMIT EMPTY MATCHES"
+1706            elif self._match_text_seq("WITH", "UNMATCHED", "ROWS"):
+1707                text += f" WITH UNMATCHED ROWS"
+1708            rows = exp.Var(this=text)
+1709        else:
+1710            rows = None
+1711
+1712        if self._match_text_seq("AFTER", "MATCH", "SKIP"):
+1713            text = "AFTER MATCH SKIP"
+1714            if self._match_text_seq("PAST", "LAST", "ROW"):
+1715                text += f" PAST LAST ROW"
+1716            elif self._match_text_seq("TO", "NEXT", "ROW"):
+1717                text += f" TO NEXT ROW"
+1718            elif self._match_text_seq("TO", "FIRST"):
+1719                text += f" TO FIRST {self._advance_any().text}"  # type: ignore
+1720            elif self._match_text_seq("TO", "LAST"):
+1721                text += f" TO LAST {self._advance_any().text}"  # type: ignore
+1722            after = exp.Var(this=text)
+1723        else:
+1724            after = None
+1725
+1726        if self._match_text_seq("PATTERN"):
+1727            self._match_l_paren()
+1728
+1729            if not self._curr:
+1730                self.raise_error("Expecting )", self._curr)
+1731
+1732            paren = 1
+1733            start = self._curr
+1734
+1735            while self._curr and paren > 0:
+1736                if self._curr.token_type == TokenType.L_PAREN:
+1737                    paren += 1
+1738                if self._curr.token_type == TokenType.R_PAREN:
+1739                    paren -= 1
+1740                end = self._prev
+1741                self._advance()
+1742            if paren > 0:
+1743                self.raise_error("Expecting )", self._curr)
+1744            pattern = exp.Var(this=self._find_sql(start, end))
+1745        else:
+1746            pattern = None
+1747
+1748        define = (
+1749            self._parse_alias(self._parse_conjunction()) if self._match_text_seq("DEFINE") else None
+1750        )
+1751        self._match_r_paren()
+1752
+1753        return self.expression(
+1754            exp.MatchRecognize,
+1755            partition_by=partition,
+1756            order=order,
+1757            measures=measures,
+1758            rows=rows,
+1759            after=after,
+1760            pattern=pattern,
+1761            define=define,
+1762        )
+1763
+1764    def _parse_lateral(self) -> t.Optional[exp.Expression]:
+1765        outer_apply = self._match_pair(TokenType.OUTER, TokenType.APPLY)
+1766        cross_apply = self._match_pair(TokenType.CROSS, TokenType.APPLY)
+1767
+1768        if outer_apply or cross_apply:
+1769            this = self._parse_select(table=True)
+1770            view = None
+1771            outer = not cross_apply
+1772        elif self._match(TokenType.LATERAL):
+1773            this = self._parse_select(table=True)
+1774            view = self._match(TokenType.VIEW)
+1775            outer = self._match(TokenType.OUTER)
+1776        else:
+1777            return None
+1778
+1779        if not this:
+1780            this = self._parse_function() or self._parse_id_var(any_token=False)
+1781            while self._match(TokenType.DOT):
+1782                this = exp.Dot(
+1783                    this=this,
+1784                    expression=self._parse_function() or self._parse_id_var(any_token=False),
+1785                )
+1786
+1787        table_alias: t.Optional[exp.Expression]
+1788
+1789        if view:
+1790            table = self._parse_id_var(any_token=False)
+1791            columns = self._parse_csv(self._parse_id_var) if self._match(TokenType.ALIAS) else []
+1792            table_alias = self.expression(exp.TableAlias, this=table, columns=columns)
+1793        else:
+1794            table_alias = self._parse_table_alias()
+1795
+1796        expression = self.expression(
+1797            exp.Lateral,
+1798            this=this,
+1799            view=view,
+1800            outer=outer,
+1801            alias=table_alias,
+1802        )
+1803
+1804        if outer_apply or cross_apply:
+1805            return self.expression(exp.Join, this=expression, side=None if cross_apply else "LEFT")
+1806
+1807        return expression
+1808
+1809    def _parse_join_side_and_kind(
+1810        self,
+1811    ) -> t.Tuple[t.Optional[Token], t.Optional[Token], t.Optional[Token]]:
+1812        return (
+1813            self._match(TokenType.NATURAL) and self._prev,
+1814            self._match_set(self.JOIN_SIDES) and self._prev,
+1815            self._match_set(self.JOIN_KINDS) and self._prev,
+1816        )
+1817
+1818    def _parse_join(self, skip_join_token: bool = False) -> t.Optional[exp.Expression]:
+1819        natural, side, kind = self._parse_join_side_and_kind()
+1820
+1821        if not skip_join_token and not self._match(TokenType.JOIN):
+1822            return None
+1823
+1824        kwargs: t.Dict[
+1825            str, t.Optional[exp.Expression] | bool | str | t.List[t.Optional[exp.Expression]]
+1826        ] = {"this": self._parse_table()}
+1827
+1828        if natural:
+1829            kwargs["natural"] = True
+1830        if side:
+1831            kwargs["side"] = side.text
+1832        if kind:
+1833            kwargs["kind"] = kind.text
+1834
+1835        if self._match(TokenType.ON):
+1836            kwargs["on"] = self._parse_conjunction()
+1837        elif self._match(TokenType.USING):
+1838            kwargs["using"] = self._parse_wrapped_id_vars()
+1839
+1840        return self.expression(exp.Join, **kwargs)  # type: ignore
+1841
+1842    def _parse_index(self) -> exp.Expression:
+1843        index = self._parse_id_var()
+1844        self._match(TokenType.ON)
+1845        self._match(TokenType.TABLE)  # hive
+1846
+1847        return self.expression(
+1848            exp.Index,
+1849            this=index,
+1850            table=self.expression(exp.Table, this=self._parse_id_var()),
+1851            columns=self._parse_expression(),
+1852        )
+1853
+1854    def _parse_create_table_index(self) -> t.Optional[exp.Expression]:
+1855        unique = self._match(TokenType.UNIQUE)
+1856        primary = self._match_text_seq("PRIMARY")
+1857        amp = self._match_text_seq("AMP")
+1858        if not self._match(TokenType.INDEX):
+1859            return None
+1860        index = self._parse_id_var()
+1861        columns = None
+1862        if self._match(TokenType.L_PAREN, advance=False):
+1863            columns = self._parse_wrapped_csv(self._parse_column)
+1864        return self.expression(
+1865            exp.Index,
+1866            this=index,
+1867            columns=columns,
+1868            unique=unique,
+1869            primary=primary,
+1870            amp=amp,
+1871        )
+1872
+1873    def _parse_table_parts(self, schema: bool = False) -> exp.Expression:
+1874        catalog = None
+1875        db = None
+1876        table = (not schema and self._parse_function()) or self._parse_id_var(any_token=False)
+1877
+1878        while self._match(TokenType.DOT):
+1879            if catalog:
+1880                # This allows nesting the table in arbitrarily many dot expressions if needed
+1881                table = self.expression(exp.Dot, this=table, expression=self._parse_id_var())
+1882            else:
+1883                catalog = db
+1884                db = table
+1885                table = self._parse_id_var()
+1886
+1887        if not table:
+1888            self.raise_error(f"Expected table name but got {self._curr}")
+1889
+1890        return self.expression(
+1891            exp.Table, this=table, db=db, catalog=catalog, pivots=self._parse_pivots()
+1892        )
+1893
+1894    def _parse_table(
+1895        self, schema: bool = False, alias_tokens: t.Optional[t.Collection[TokenType]] = None
+1896    ) -> t.Optional[exp.Expression]:
+1897        lateral = self._parse_lateral()
+1898
+1899        if lateral:
+1900            return lateral
+1901
+1902        unnest = self._parse_unnest()
+1903
+1904        if unnest:
+1905            return unnest
+1906
+1907        values = self._parse_derived_table_values()
+1908
+1909        if values:
+1910            return values
+1911
+1912        subquery = self._parse_select(table=True)
+1913
+1914        if subquery:
+1915            return subquery
+1916
+1917        this = self._parse_table_parts(schema=schema)
+1918
+1919        if schema:
+1920            return self._parse_schema(this=this)
+1921
+1922        if self.alias_post_tablesample:
+1923            table_sample = self._parse_table_sample()
+1924
+1925        alias = self._parse_table_alias(alias_tokens=alias_tokens or self.TABLE_ALIAS_TOKENS)
+1926
+1927        if alias:
+1928            this.set("alias", alias)
+1929
+1930        if self._match_pair(TokenType.WITH, TokenType.L_PAREN):
+1931            this.set(
+1932                "hints",
+1933                self._parse_csv(lambda: self._parse_function() or self._parse_var(any_token=True)),
+1934            )
+1935            self._match_r_paren()
+1936
+1937        if not self.alias_post_tablesample:
+1938            table_sample = self._parse_table_sample()
+1939
+1940        if table_sample:
+1941            table_sample.set("this", this)
+1942            this = table_sample
+1943
+1944        return this
+1945
+1946    def _parse_unnest(self) -> t.Optional[exp.Expression]:
+1947        if not self._match(TokenType.UNNEST):
+1948            return None
+1949
+1950        expressions = self._parse_wrapped_csv(self._parse_column)
+1951        ordinality = bool(self._match(TokenType.WITH) and self._match(TokenType.ORDINALITY))
+1952        alias = self._parse_table_alias()
+1953
+1954        if alias and self.unnest_column_only:
+1955            if alias.args.get("columns"):
+1956                self.raise_error("Unexpected extra column alias in unnest.")
+1957            alias.set("columns", [alias.this])
+1958            alias.set("this", None)
+1959
+1960        offset = None
+1961        if self._match_pair(TokenType.WITH, TokenType.OFFSET):
+1962            self._match(TokenType.ALIAS)
+1963            offset = self._parse_conjunction()
+1964
+1965        return self.expression(
+1966            exp.Unnest,
+1967            expressions=expressions,
+1968            ordinality=ordinality,
+1969            alias=alias,
+1970            offset=offset,
+1971        )
+1972
+1973    def _parse_derived_table_values(self) -> t.Optional[exp.Expression]:
+1974        is_derived = self._match_pair(TokenType.L_PAREN, TokenType.VALUES)
+1975        if not is_derived and not self._match(TokenType.VALUES):
+1976            return None
+1977
+1978        expressions = self._parse_csv(self._parse_value)
+1979
+1980        if is_derived:
+1981            self._match_r_paren()
+1982
+1983        return self.expression(exp.Values, expressions=expressions, alias=self._parse_table_alias())
+1984
+1985    def _parse_table_sample(self) -> t.Optional[exp.Expression]:
+1986        if not self._match(TokenType.TABLE_SAMPLE):
+1987            return None
+1988
+1989        method = self._parse_var()
+1990        bucket_numerator = None
+1991        bucket_denominator = None
+1992        bucket_field = None
+1993        percent = None
+1994        rows = None
+1995        size = None
+1996        seed = None
+1997
+1998        self._match_l_paren()
+1999
+2000        if self._match(TokenType.BUCKET):
+2001            bucket_numerator = self._parse_number()
+2002            self._match(TokenType.OUT_OF)
+2003            bucket_denominator = bucket_denominator = self._parse_number()
+2004            self._match(TokenType.ON)
+2005            bucket_field = self._parse_field()
+2006        else:
+2007            num = self._parse_number()
+2008
+2009            if self._match(TokenType.PERCENT):
+2010                percent = num
+2011            elif self._match(TokenType.ROWS):
+2012                rows = num
+2013            else:
+2014                size = num
+2015
+2016        self._match_r_paren()
+2017
+2018        if self._match(TokenType.SEED):
+2019            seed = self._parse_wrapped(self._parse_number)
+2020
+2021        return self.expression(
+2022            exp.TableSample,
+2023            method=method,
+2024            bucket_numerator=bucket_numerator,
+2025            bucket_denominator=bucket_denominator,
+2026            bucket_field=bucket_field,
+2027            percent=percent,
+2028            rows=rows,
+2029            size=size,
+2030            seed=seed,
+2031        )
+2032
+2033    def _parse_pivots(self) -> t.List[t.Optional[exp.Expression]]:
+2034        return list(iter(self._parse_pivot, None))
+2035
+2036    def _parse_pivot(self) -> t.Optional[exp.Expression]:
+2037        index = self._index
+2038
+2039        if self._match(TokenType.PIVOT):
+2040            unpivot = False
+2041        elif self._match(TokenType.UNPIVOT):
+2042            unpivot = True
+2043        else:
+2044            return None
+2045
+2046        expressions = []
+2047        field = None
+2048
+2049        if not self._match(TokenType.L_PAREN):
+2050            self._retreat(index)
+2051            return None
+2052
+2053        if unpivot:
+2054            expressions = self._parse_csv(self._parse_column)
+2055        else:
+2056            expressions = self._parse_csv(lambda: self._parse_alias(self._parse_function()))
+2057
+2058        if not self._match(TokenType.FOR):
+2059            self.raise_error("Expecting FOR")
+2060
+2061        value = self._parse_column()
+2062
+2063        if not self._match(TokenType.IN):
+2064            self.raise_error("Expecting IN")
+2065
+2066        field = self._parse_in(value)
+2067
+2068        self._match_r_paren()
+2069
+2070        return self.expression(exp.Pivot, expressions=expressions, field=field, unpivot=unpivot)
+2071
+2072    def _parse_where(self, skip_where_token: bool = False) -> t.Optional[exp.Expression]:
+2073        if not skip_where_token and not self._match(TokenType.WHERE):
+2074            return None
+2075
+2076        return self.expression(
+2077            exp.Where, comments=self._prev_comments, this=self._parse_conjunction()
+2078        )
+2079
+2080    def _parse_group(self, skip_group_by_token: bool = False) -> t.Optional[exp.Expression]:
+2081        if not skip_group_by_token and not self._match(TokenType.GROUP_BY):
+2082            return None
+2083
+2084        expressions = self._parse_csv(self._parse_conjunction)
+2085        grouping_sets = self._parse_grouping_sets()
+2086
+2087        self._match(TokenType.COMMA)
+2088        with_ = self._match(TokenType.WITH)
+2089        cube = self._match(TokenType.CUBE) and (
+2090            with_ or self._parse_wrapped_csv(self._parse_column)
+2091        )
+2092
+2093        self._match(TokenType.COMMA)
+2094        rollup = self._match(TokenType.ROLLUP) and (
+2095            with_ or self._parse_wrapped_csv(self._parse_column)
+2096        )
+2097
+2098        return self.expression(
+2099            exp.Group,
+2100            expressions=expressions,
+2101            grouping_sets=grouping_sets,
+2102            cube=cube,
+2103            rollup=rollup,
+2104        )
+2105
+2106    def _parse_grouping_sets(self) -> t.Optional[t.List[t.Optional[exp.Expression]]]:
+2107        if not self._match(TokenType.GROUPING_SETS):
+2108            return None
+2109
+2110        return self._parse_wrapped_csv(self._parse_grouping_set)
+2111
+2112    def _parse_grouping_set(self) -> t.Optional[exp.Expression]:
+2113        if self._match(TokenType.L_PAREN):
+2114            grouping_set = self._parse_csv(self._parse_column)
+2115            self._match_r_paren()
+2116            return self.expression(exp.Tuple, expressions=grouping_set)
+2117
+2118        return self._parse_column()
+2119
+2120    def _parse_having(self, skip_having_token: bool = False) -> t.Optional[exp.Expression]:
+2121        if not skip_having_token and not self._match(TokenType.HAVING):
+2122            return None
+2123        return self.expression(exp.Having, this=self._parse_conjunction())
+2124
+2125    def _parse_qualify(self) -> t.Optional[exp.Expression]:
+2126        if not self._match(TokenType.QUALIFY):
+2127            return None
+2128        return self.expression(exp.Qualify, this=self._parse_conjunction())
+2129
+2130    def _parse_order(
+2131        self, this: t.Optional[exp.Expression] = None, skip_order_token: bool = False
+2132    ) -> t.Optional[exp.Expression]:
+2133        if not skip_order_token and not self._match(TokenType.ORDER_BY):
+2134            return this
+2135
+2136        return self.expression(
+2137            exp.Order, this=this, expressions=self._parse_csv(self._parse_ordered)
+2138        )
+2139
+2140    def _parse_sort(
+2141        self, token_type: TokenType, exp_class: t.Type[exp.Expression]
+2142    ) -> t.Optional[exp.Expression]:
+2143        if not self._match(token_type):
+2144            return None
+2145        return self.expression(exp_class, expressions=self._parse_csv(self._parse_ordered))
+2146
+2147    def _parse_ordered(self) -> exp.Expression:
+2148        this = self._parse_conjunction()
+2149        self._match(TokenType.ASC)
+2150        is_desc = self._match(TokenType.DESC)
+2151        is_nulls_first = self._match(TokenType.NULLS_FIRST)
+2152        is_nulls_last = self._match(TokenType.NULLS_LAST)
+2153        desc = is_desc or False
+2154        asc = not desc
+2155        nulls_first = is_nulls_first or False
+2156        explicitly_null_ordered = is_nulls_first or is_nulls_last
+2157        if (
+2158            not explicitly_null_ordered
+2159            and (
+2160                (asc and self.null_ordering == "nulls_are_small")
+2161                or (desc and self.null_ordering != "nulls_are_small")
+2162            )
+2163            and self.null_ordering != "nulls_are_last"
+2164        ):
+2165            nulls_first = True
+2166
+2167        return self.expression(exp.Ordered, this=this, desc=desc, nulls_first=nulls_first)
+2168
+2169    def _parse_limit(
+2170        self, this: t.Optional[exp.Expression] = None, top: bool = False
+2171    ) -> t.Optional[exp.Expression]:
+2172        if self._match(TokenType.TOP if top else TokenType.LIMIT):
+2173            limit_paren = self._match(TokenType.L_PAREN)
+2174            limit_exp = self.expression(
+2175                exp.Limit, this=this, expression=self._parse_number() if top else self._parse_term()
+2176            )
+2177
+2178            if limit_paren:
+2179                self._match_r_paren()
+2180
+2181            return limit_exp
+2182
+2183        if self._match(TokenType.FETCH):
+2184            direction = self._match_set((TokenType.FIRST, TokenType.NEXT))
+2185            direction = self._prev.text if direction else "FIRST"
+2186            count = self._parse_number()
+2187            self._match_set((TokenType.ROW, TokenType.ROWS))
+2188            self._match(TokenType.ONLY)
+2189            return self.expression(exp.Fetch, direction=direction, count=count)
+2190
+2191        return this
+2192
+2193    def _parse_offset(self, this: t.Optional[exp.Expression] = None) -> t.Optional[exp.Expression]:
+2194        if not self._match_set((TokenType.OFFSET, TokenType.COMMA)):
+2195            return this
+2196
+2197        count = self._parse_number()
+2198        self._match_set((TokenType.ROW, TokenType.ROWS))
+2199        return self.expression(exp.Offset, this=this, expression=count)
+2200
+2201    def _parse_lock(self) -> t.Optional[exp.Expression]:
+2202        if self._match_text_seq("FOR", "UPDATE"):
+2203            return self.expression(exp.Lock, update=True)
+2204        if self._match_text_seq("FOR", "SHARE"):
+2205            return self.expression(exp.Lock, update=False)
+2206
+2207        return None
+2208
+2209    def _parse_set_operations(self, this: t.Optional[exp.Expression]) -> t.Optional[exp.Expression]:
+2210        if not self._match_set(self.SET_OPERATIONS):
+2211            return this
+2212
+2213        token_type = self._prev.token_type
+2214
+2215        if token_type == TokenType.UNION:
+2216            expression = exp.Union
+2217        elif token_type == TokenType.EXCEPT:
+2218            expression = exp.Except
+2219        else:
+2220            expression = exp.Intersect
+2221
+2222        return self.expression(
+2223            expression,
+2224            this=this,
+2225            distinct=self._match(TokenType.DISTINCT) or not self._match(TokenType.ALL),
+2226            expression=self._parse_set_operations(self._parse_select(nested=True)),
+2227        )
+2228
+2229    def _parse_expression(self) -> t.Optional[exp.Expression]:
+2230        return self._parse_alias(self._parse_conjunction())
+2231
+2232    def _parse_conjunction(self) -> t.Optional[exp.Expression]:
+2233        return self._parse_tokens(self._parse_equality, self.CONJUNCTION)
+2234
+2235    def _parse_equality(self) -> t.Optional[exp.Expression]:
+2236        return self._parse_tokens(self._parse_comparison, self.EQUALITY)
+2237
+2238    def _parse_comparison(self) -> t.Optional[exp.Expression]:
+2239        return self._parse_tokens(self._parse_range, self.COMPARISON)
+2240
+2241    def _parse_range(self) -> t.Optional[exp.Expression]:
+2242        this = self._parse_bitwise()
+2243        negate = self._match(TokenType.NOT)
+2244
+2245        if self._match_set(self.RANGE_PARSERS):
+2246            this = self.RANGE_PARSERS[self._prev.token_type](self, this)
+2247        elif self._match(TokenType.ISNULL):
+2248            this = self.expression(exp.Is, this=this, expression=exp.Null())
+2249
+2250        # Postgres supports ISNULL and NOTNULL for conditions.
+2251        # https://blog.andreiavram.ro/postgresql-null-composite-type/
+2252        if self._match(TokenType.NOTNULL):
+2253            this = self.expression(exp.Is, this=this, expression=exp.Null())
+2254            this = self.expression(exp.Not, this=this)
+2255
+2256        if negate:
+2257            this = self.expression(exp.Not, this=this)
+2258
+2259        if self._match(TokenType.IS):
+2260            this = self._parse_is(this)
+2261
+2262        return this
+2263
+2264    def _parse_is(self, this: t.Optional[exp.Expression]) -> exp.Expression:
+2265        negate = self._match(TokenType.NOT)
+2266        if self._match(TokenType.DISTINCT_FROM):
+2267            klass = exp.NullSafeEQ if negate else exp.NullSafeNEQ
+2268            return self.expression(klass, this=this, expression=self._parse_expression())
+2269
+2270        this = self.expression(
+2271            exp.Is,
+2272            this=this,
+2273            expression=self._parse_null() or self._parse_boolean(),
+2274        )
+2275        return self.expression(exp.Not, this=this) if negate else this
+2276
+2277    def _parse_in(self, this: t.Optional[exp.Expression]) -> exp.Expression:
+2278        unnest = self._parse_unnest()
+2279        if unnest:
+2280            this = self.expression(exp.In, this=this, unnest=unnest)
+2281        elif self._match(TokenType.L_PAREN):
+2282            expressions = self._parse_csv(self._parse_select_or_expression)
+2283
+2284            if len(expressions) == 1 and isinstance(expressions[0], exp.Subqueryable):
+2285                this = self.expression(exp.In, this=this, query=expressions[0])
+2286            else:
+2287                this = self.expression(exp.In, this=this, expressions=expressions)
+2288
+2289            self._match_r_paren()
+2290        else:
+2291            this = self.expression(exp.In, this=this, field=self._parse_field())
+2292
+2293        return this
+2294
+2295    def _parse_between(self, this: exp.Expression) -> exp.Expression:
+2296        low = self._parse_bitwise()
+2297        self._match(TokenType.AND)
+2298        high = self._parse_bitwise()
+2299        return self.expression(exp.Between, this=this, low=low, high=high)
+2300
+2301    def _parse_escape(self, this: t.Optional[exp.Expression]) -> t.Optional[exp.Expression]:
+2302        if not self._match(TokenType.ESCAPE):
+2303            return this
+2304        return self.expression(exp.Escape, this=this, expression=self._parse_string())
+2305
+2306    def _parse_bitwise(self) -> t.Optional[exp.Expression]:
+2307        this = self._parse_term()
+2308
+2309        while True:
+2310            if self._match_set(self.BITWISE):
+2311                this = self.expression(
+2312                    self.BITWISE[self._prev.token_type],
+2313                    this=this,
+2314                    expression=self._parse_term(),
+2315                )
+2316            elif self._match_pair(TokenType.LT, TokenType.LT):
+2317                this = self.expression(
+2318                    exp.BitwiseLeftShift, this=this, expression=self._parse_term()
+2319                )
+2320            elif self._match_pair(TokenType.GT, TokenType.GT):
+2321                this = self.expression(
+2322                    exp.BitwiseRightShift, this=this, expression=self._parse_term()
+2323                )
+2324            else:
+2325                break
+2326
+2327        return this
+2328
+2329    def _parse_term(self) -> t.Optional[exp.Expression]:
+2330        return self._parse_tokens(self._parse_factor, self.TERM)
+2331
+2332    def _parse_factor(self) -> t.Optional[exp.Expression]:
+2333        return self._parse_tokens(self._parse_unary, self.FACTOR)
+2334
+2335    def _parse_unary(self) -> t.Optional[exp.Expression]:
+2336        if self._match_set(self.UNARY_PARSERS):
+2337            return self.UNARY_PARSERS[self._prev.token_type](self)
+2338        return self._parse_at_time_zone(self._parse_type())
+2339
+2340    def _parse_type(self) -> t.Optional[exp.Expression]:
+2341        if self._match(TokenType.INTERVAL):
+2342            return self.expression(exp.Interval, this=self._parse_term(), unit=self._parse_var())
+2343
+2344        index = self._index
+2345        type_token = self._parse_types(check_func=True)
+2346        this = self._parse_column()
+2347
+2348        if type_token:
+2349            if this and not isinstance(this, exp.Star):
+2350                return self.expression(exp.Cast, this=this, to=type_token)
+2351            if not type_token.args.get("expressions"):
+2352                self._retreat(index)
+2353                return self._parse_column()
+2354            return type_token
+2355
+2356        return this
+2357
+2358    def _parse_types(self, check_func: bool = False) -> t.Optional[exp.Expression]:
+2359        index = self._index
+2360
+2361        if not self._match_set(self.TYPE_TOKENS):
+2362            return None
+2363
+2364        type_token = self._prev.token_type
+2365
+2366        if type_token == TokenType.PSEUDO_TYPE:
+2367            return self.expression(exp.PseudoType, this=self._prev.text)
+2368
+2369        nested = type_token in self.NESTED_TYPE_TOKENS
+2370        is_struct = type_token == TokenType.STRUCT
+2371        expressions = None
+2372        maybe_func = False
+2373
+2374        if self._match(TokenType.L_PAREN):
+2375            if is_struct:
+2376                expressions = self._parse_csv(self._parse_struct_kwargs)
+2377            elif nested:
+2378                expressions = self._parse_csv(self._parse_types)
+2379            else:
+2380                expressions = self._parse_csv(self._parse_conjunction)
+2381
+2382            if not expressions:
+2383                self._retreat(index)
+2384                return None
+2385
+2386            self._match_r_paren()
+2387            maybe_func = True
+2388
+2389        if not nested and self._match_pair(TokenType.L_BRACKET, TokenType.R_BRACKET):
+2390            this = exp.DataType(
+2391                this=exp.DataType.Type.ARRAY,
+2392                expressions=[exp.DataType.build(type_token.value, expressions=expressions)],
+2393                nested=True,
+2394            )
+2395
+2396            while self._match_pair(TokenType.L_BRACKET, TokenType.R_BRACKET):
+2397                this = exp.DataType(
+2398                    this=exp.DataType.Type.ARRAY,
+2399                    expressions=[this],
+2400                    nested=True,
+2401                )
+2402
+2403            return this
+2404
+2405        if self._match(TokenType.L_BRACKET):
+2406            self._retreat(index)
+2407            return None
+2408
+2409        values: t.Optional[t.List[t.Optional[exp.Expression]]] = None
+2410        if nested and self._match(TokenType.LT):
+2411            if is_struct:
+2412                expressions = self._parse_csv(self._parse_struct_kwargs)
+2413            else:
+2414                expressions = self._parse_csv(self._parse_types)
+2415
+2416            if not self._match(TokenType.GT):
+2417                self.raise_error("Expecting >")
+2418
+2419            if self._match_set((TokenType.L_BRACKET, TokenType.L_PAREN)):
+2420                values = self._parse_csv(self._parse_conjunction)
+2421                self._match_set((TokenType.R_BRACKET, TokenType.R_PAREN))
+2422
+2423        value: t.Optional[exp.Expression] = None
+2424        if type_token in self.TIMESTAMPS:
+2425            if self._match(TokenType.WITH_TIME_ZONE) or type_token == TokenType.TIMESTAMPTZ:
+2426                value = exp.DataType(this=exp.DataType.Type.TIMESTAMPTZ, expressions=expressions)
+2427            elif (
+2428                self._match(TokenType.WITH_LOCAL_TIME_ZONE) or type_token == TokenType.TIMESTAMPLTZ
+2429            ):
+2430                value = exp.DataType(this=exp.DataType.Type.TIMESTAMPLTZ, expressions=expressions)
+2431            elif self._match(TokenType.WITHOUT_TIME_ZONE):
+2432                if type_token == TokenType.TIME:
+2433                    value = exp.DataType(this=exp.DataType.Type.TIME, expressions=expressions)
+2434                else:
+2435                    value = exp.DataType(this=exp.DataType.Type.TIMESTAMP, expressions=expressions)
+2436
+2437            maybe_func = maybe_func and value is None
+2438
+2439            if value is None:
+2440                value = exp.DataType(this=exp.DataType.Type.TIMESTAMP, expressions=expressions)
+2441        elif type_token == TokenType.INTERVAL:
+2442            value = self.expression(exp.Interval, unit=self._parse_var())
+2443
+2444        if maybe_func and check_func:
+2445            index2 = self._index
+2446            peek = self._parse_string()
+2447
+2448            if not peek:
+2449                self._retreat(index)
+2450                return None
+2451
+2452            self._retreat(index2)
+2453
+2454        if value:
+2455            return value
+2456
+2457        return exp.DataType(
+2458            this=exp.DataType.Type[type_token.value.upper()],
+2459            expressions=expressions,
+2460            nested=nested,
+2461            values=values,
+2462        )
+2463
+2464    def _parse_struct_kwargs(self) -> t.Optional[exp.Expression]:
+2465        if self._curr and self._curr.token_type in self.TYPE_TOKENS:
+2466            return self._parse_types()
+2467
+2468        this = self._parse_id_var()
+2469        self._match(TokenType.COLON)
+2470        data_type = self._parse_types()
+2471
+2472        if not data_type:
+2473            return None
+2474        return self.expression(exp.StructKwarg, this=this, expression=data_type)
+2475
+2476    def _parse_at_time_zone(self, this: t.Optional[exp.Expression]) -> t.Optional[exp.Expression]:
+2477        if not self._match(TokenType.AT_TIME_ZONE):
+2478            return this
+2479        return self.expression(exp.AtTimeZone, this=this, zone=self._parse_unary())
+2480
+2481    def _parse_column(self) -> t.Optional[exp.Expression]:
+2482        this = self._parse_field()
+2483        if isinstance(this, exp.Identifier):
+2484            this = self.expression(exp.Column, this=this)
+2485        elif not this:
+2486            return self._parse_bracket(this)
+2487        this = self._parse_bracket(this)
+2488
+2489        while self._match_set(self.COLUMN_OPERATORS):
+2490            op_token = self._prev.token_type
+2491            op = self.COLUMN_OPERATORS.get(op_token)
+2492
+2493            if op_token == TokenType.DCOLON:
+2494                field = self._parse_types()
+2495                if not field:
+2496                    self.raise_error("Expected type")
+2497            elif op:
+2498                self._advance()
+2499                value = self._prev.text
+2500                field = (
+2501                    exp.Literal.number(value)
+2502                    if self._prev.token_type == TokenType.NUMBER
+2503                    else exp.Literal.string(value)
+2504                )
+2505            else:
+2506                field = self._parse_star() or self._parse_function() or self._parse_id_var()
+2507
+2508            if isinstance(field, exp.Func):
+2509                # bigquery allows function calls like x.y.count(...)
+2510                # SAFE.SUBSTR(...)
+2511                # https://cloud.google.com/bigquery/docs/reference/standard-sql/functions-reference#function_call_rules
+2512                this = self._replace_columns_with_dots(this)
+2513
+2514            if op:
+2515                this = op(self, this, field)
+2516            elif isinstance(this, exp.Column) and not this.table:
+2517                this = self.expression(exp.Column, this=field, table=this.this)
+2518            else:
+2519                this = self.expression(exp.Dot, this=this, expression=field)
+2520            this = self._parse_bracket(this)
+2521
+2522        return this
+2523
+2524    def _parse_primary(self) -> t.Optional[exp.Expression]:
+2525        if self._match_set(self.PRIMARY_PARSERS):
+2526            token_type = self._prev.token_type
+2527            primary = self.PRIMARY_PARSERS[token_type](self, self._prev)
+2528
+2529            if token_type == TokenType.STRING:
+2530                expressions = [primary]
+2531                while self._match(TokenType.STRING):
+2532                    expressions.append(exp.Literal.string(self._prev.text))
+2533                if len(expressions) > 1:
+2534                    return self.expression(exp.Concat, expressions=expressions)
+2535            return primary
+2536
+2537        if self._match_pair(TokenType.DOT, TokenType.NUMBER):
+2538            return exp.Literal.number(f"0.{self._prev.text}")
+2539
+2540        if self._match(TokenType.L_PAREN):
+2541            comments = self._prev_comments
+2542            query = self._parse_select()
+2543
+2544            if query:
+2545                expressions = [query]
+2546            else:
+2547                expressions = self._parse_csv(
+2548                    lambda: self._parse_alias(self._parse_conjunction(), explicit=True)
+2549                )
+2550
+2551            this = seq_get(expressions, 0)
+2552            self._parse_query_modifiers(this)
+2553            self._match_r_paren()
+2554
+2555            if isinstance(this, exp.Subqueryable):
+2556                this = self._parse_set_operations(
+2557                    self._parse_subquery(this=this, parse_alias=False)
+2558                )
+2559            elif len(expressions) > 1:
+2560                this = self.expression(exp.Tuple, expressions=expressions)
+2561            else:
+2562                this = self.expression(exp.Paren, this=this)
+2563
+2564            if this and comments:
+2565                this.comments = comments
+2566
+2567            return this
+2568
+2569        return None
+2570
+2571    def _parse_field(self, any_token: bool = False) -> t.Optional[exp.Expression]:
+2572        return self._parse_primary() or self._parse_function() or self._parse_id_var(any_token)
+2573
+2574    def _parse_function(
+2575        self, functions: t.Optional[t.Dict[str, t.Callable]] = None
+2576    ) -> t.Optional[exp.Expression]:
+2577        if not self._curr:
+2578            return None
+2579
+2580        token_type = self._curr.token_type
+2581
+2582        if self._match_set(self.NO_PAREN_FUNCTION_PARSERS):
+2583            return self.NO_PAREN_FUNCTION_PARSERS[token_type](self)
+2584
+2585        if not self._next or self._next.token_type != TokenType.L_PAREN:
+2586            if token_type in self.NO_PAREN_FUNCTIONS:
+2587                self._advance()
+2588                return self.expression(self.NO_PAREN_FUNCTIONS[token_type])
+2589
+2590            return None
+2591
+2592        if token_type not in self.FUNC_TOKENS:
+2593            return None
+2594
+2595        this = self._curr.text
+2596        upper = this.upper()
+2597        self._advance(2)
+2598
+2599        parser = self.FUNCTION_PARSERS.get(upper)
+2600
+2601        if parser:
+2602            this = parser(self)
+2603        else:
+2604            subquery_predicate = self.SUBQUERY_PREDICATES.get(token_type)
+2605
+2606            if subquery_predicate and self._curr.token_type in (TokenType.SELECT, TokenType.WITH):
+2607                this = self.expression(subquery_predicate, this=self._parse_select())
+2608                self._match_r_paren()
+2609                return this
+2610
+2611            if functions is None:
+2612                functions = self.FUNCTIONS
+2613
+2614            function = functions.get(upper)
+2615            args = self._parse_csv(self._parse_lambda)
+2616
+2617            if function:
+2618                # Clickhouse supports function calls like foo(x, y)(z), so for these we need to also parse the
+2619                # second parameter list (i.e. "(z)") and the corresponding function will receive both arg lists.
+2620                if count_params(function) == 2:
+2621                    params = None
+2622                    if self._match_pair(TokenType.R_PAREN, TokenType.L_PAREN):
+2623                        params = self._parse_csv(self._parse_lambda)
+2624
+2625                    this = function(args, params)
+2626                else:
+2627                    this = function(args)
+2628
+2629                self.validate_expression(this, args)
+2630            else:
+2631                this = self.expression(exp.Anonymous, this=this, expressions=args)
+2632
+2633        self._match_r_paren(this)
+2634        return self._parse_window(this)
+2635
+2636    def _parse_user_defined_function(
+2637        self, kind: t.Optional[TokenType] = None
+2638    ) -> t.Optional[exp.Expression]:
+2639        this = self._parse_id_var()
+2640
+2641        while self._match(TokenType.DOT):
+2642            this = self.expression(exp.Dot, this=this, expression=self._parse_id_var())
+2643
+2644        if not self._match(TokenType.L_PAREN):
+2645            return this
+2646
+2647        expressions = self._parse_csv(self._parse_udf_kwarg)
+2648        self._match_r_paren()
+2649        return self.expression(
+2650            exp.UserDefinedFunction, this=this, expressions=expressions, wrapped=True
+2651        )
+2652
+2653    def _parse_introducer(self, token: Token) -> t.Optional[exp.Expression]:
+2654        literal = self._parse_primary()
+2655        if literal:
+2656            return self.expression(exp.Introducer, this=token.text, expression=literal)
+2657
+2658        return self.expression(exp.Identifier, this=token.text)
+2659
+2660    def _parse_national(self, token: Token) -> exp.Expression:
+2661        return self.expression(exp.National, this=exp.Literal.string(token.text))
+2662
+2663    def _parse_session_parameter(self) -> exp.Expression:
+2664        kind = None
+2665        this = self._parse_id_var() or self._parse_primary()
+2666
+2667        if this and self._match(TokenType.DOT):
+2668            kind = this.name
+2669            this = self._parse_var() or self._parse_primary()
+2670
+2671        return self.expression(exp.SessionParameter, this=this, kind=kind)
+2672
+2673    def _parse_udf_kwarg(self) -> t.Optional[exp.Expression]:
+2674        this = self._parse_id_var()
+2675        kind = self._parse_types()
+2676
+2677        if not kind:
+2678            return this
+2679
+2680        return self.expression(exp.UserDefinedFunctionKwarg, this=this, kind=kind)
+2681
+2682    def _parse_lambda(self) -> t.Optional[exp.Expression]:
+2683        index = self._index
+2684
+2685        if self._match(TokenType.L_PAREN):
+2686            expressions = self._parse_csv(self._parse_id_var)
+2687
+2688            if not self._match(TokenType.R_PAREN):
+2689                self._retreat(index)
+2690        else:
+2691            expressions = [self._parse_id_var()]
+2692
+2693        if self._match_set(self.LAMBDAS):
+2694            return self.LAMBDAS[self._prev.token_type](self, expressions)
+2695
+2696        self._retreat(index)
+2697
+2698        this: t.Optional[exp.Expression]
+2699
+2700        if self._match(TokenType.DISTINCT):
+2701            this = self.expression(
+2702                exp.Distinct, expressions=self._parse_csv(self._parse_conjunction)
+2703            )
+2704        else:
+2705            this = self._parse_select_or_expression()
+2706
+2707        if self._match(TokenType.IGNORE_NULLS):
+2708            this = self.expression(exp.IgnoreNulls, this=this)
+2709        else:
+2710            self._match(TokenType.RESPECT_NULLS)
+2711
+2712        return self._parse_limit(self._parse_order(this))
+2713
+2714    def _parse_schema(self, this: t.Optional[exp.Expression] = None) -> t.Optional[exp.Expression]:
+2715        index = self._index
+2716        if not self._match(TokenType.L_PAREN) or self._match(TokenType.SELECT):
+2717            self._retreat(index)
+2718            return this
+2719
+2720        args = self._parse_csv(
+2721            lambda: self._parse_constraint()
+2722            or self._parse_column_def(self._parse_field(any_token=True))
+2723        )
+2724        self._match_r_paren()
+2725        return self.expression(exp.Schema, this=this, expressions=args)
+2726
+2727    def _parse_column_def(self, this: t.Optional[exp.Expression]) -> t.Optional[exp.Expression]:
+2728        kind = self._parse_types()
+2729
+2730        constraints = []
+2731        while True:
+2732            constraint = self._parse_column_constraint()
+2733            if not constraint:
+2734                break
+2735            constraints.append(constraint)
+2736
+2737        if not kind and not constraints:
+2738            return this
+2739
+2740        return self.expression(exp.ColumnDef, this=this, kind=kind, constraints=constraints)
+2741
+2742    def _parse_column_constraint(self) -> t.Optional[exp.Expression]:
+2743        this = self._parse_references()
+2744
+2745        if this:
+2746            return this
+2747
+2748        if self._match(TokenType.CONSTRAINT):
+2749            this = self._parse_id_var()
+2750
+2751        kind: exp.Expression
+2752
+2753        if self._match_set((TokenType.AUTO_INCREMENT, TokenType.IDENTITY)):
+2754            start = None
+2755            increment = None
+2756
+2757            if self._match(TokenType.L_PAREN, advance=False):
+2758                args = self._parse_wrapped_csv(self._parse_bitwise)
+2759                start = seq_get(args, 0)
+2760                increment = seq_get(args, 1)
+2761            elif self._match_text_seq("START"):
+2762                start = self._parse_bitwise()
+2763                self._match_text_seq("INCREMENT")
+2764                increment = self._parse_bitwise()
+2765
+2766            if start and increment:
+2767                kind = exp.GeneratedAsIdentityColumnConstraint(start=start, increment=increment)
+2768            else:
+2769                kind = exp.AutoIncrementColumnConstraint()
+2770        elif self._match(TokenType.CHECK):
+2771            constraint = self._parse_wrapped(self._parse_conjunction)
+2772            kind = self.expression(exp.CheckColumnConstraint, this=constraint)
+2773        elif self._match(TokenType.COLLATE):
+2774            kind = self.expression(exp.CollateColumnConstraint, this=self._parse_var())
+2775        elif self._match(TokenType.ENCODE):
+2776            kind = self.expression(exp.EncodeColumnConstraint, this=self._parse_var())
+2777        elif self._match(TokenType.DEFAULT):
+2778            kind = self.expression(exp.DefaultColumnConstraint, this=self._parse_bitwise())
+2779        elif self._match_pair(TokenType.NOT, TokenType.NULL):
+2780            kind = exp.NotNullColumnConstraint()
+2781        elif self._match(TokenType.NULL):
+2782            kind = exp.NotNullColumnConstraint(allow_null=True)
+2783        elif self._match(TokenType.SCHEMA_COMMENT):
+2784            kind = self.expression(exp.CommentColumnConstraint, this=self._parse_string())
+2785        elif self._match(TokenType.PRIMARY_KEY):
+2786            desc = None
+2787            if self._match(TokenType.ASC) or self._match(TokenType.DESC):
+2788                desc = self._prev.token_type == TokenType.DESC
+2789            kind = exp.PrimaryKeyColumnConstraint(desc=desc)
+2790        elif self._match(TokenType.UNIQUE):
+2791            kind = exp.UniqueColumnConstraint()
+2792        elif self._match(TokenType.GENERATED):
+2793            if self._match(TokenType.BY_DEFAULT):
+2794                kind = self.expression(exp.GeneratedAsIdentityColumnConstraint, this=False)
+2795            else:
+2796                self._match(TokenType.ALWAYS)
+2797                kind = self.expression(exp.GeneratedAsIdentityColumnConstraint, this=True)
+2798            self._match_pair(TokenType.ALIAS, TokenType.IDENTITY)
+2799
+2800            if self._match(TokenType.L_PAREN):
+2801                if self._match_text_seq("START", "WITH"):
+2802                    kind.set("start", self._parse_bitwise())
+2803                if self._match_text_seq("INCREMENT", "BY"):
+2804                    kind.set("increment", self._parse_bitwise())
+2805
+2806                self._match_r_paren()
+2807        else:
+2808            return this
+2809
+2810        return self.expression(exp.ColumnConstraint, this=this, kind=kind)
+2811
+2812    def _parse_constraint(self) -> t.Optional[exp.Expression]:
+2813        if not self._match(TokenType.CONSTRAINT):
+2814            return self._parse_unnamed_constraint()
+2815
+2816        this = self._parse_id_var()
+2817        expressions = []
+2818
+2819        while True:
+2820            constraint = self._parse_unnamed_constraint() or self._parse_function()
+2821            if not constraint:
+2822                break
+2823            expressions.append(constraint)
+2824
+2825        return self.expression(exp.Constraint, this=this, expressions=expressions)
+2826
+2827    def _parse_unnamed_constraint(self) -> t.Optional[exp.Expression]:
+2828        if not self._match_set(self.CONSTRAINT_PARSERS):
+2829            return None
+2830        return self.CONSTRAINT_PARSERS[self._prev.token_type](self)
+2831
+2832    def _parse_unique(self) -> exp.Expression:
+2833        return self.expression(exp.Unique, expressions=self._parse_wrapped_id_vars())
+2834
+2835    def _parse_key_constraint_options(self) -> t.List[str]:
+2836        options = []
+2837        while True:
+2838            if not self._curr:
+2839                break
+2840
+2841            if self._match(TokenType.ON):
+2842                action = None
+2843                on = self._advance_any() and self._prev.text
+2844
+2845                if self._match(TokenType.NO_ACTION):
+2846                    action = "NO ACTION"
+2847                elif self._match(TokenType.CASCADE):
+2848                    action = "CASCADE"
+2849                elif self._match_pair(TokenType.SET, TokenType.NULL):
+2850                    action = "SET NULL"
+2851                elif self._match_pair(TokenType.SET, TokenType.DEFAULT):
+2852                    action = "SET DEFAULT"
+2853                else:
+2854                    self.raise_error("Invalid key constraint")
+2855
+2856                options.append(f"ON {on} {action}")
+2857            elif self._match_text_seq("NOT", "ENFORCED"):
+2858                options.append("NOT ENFORCED")
+2859            elif self._match_text_seq("DEFERRABLE"):
+2860                options.append("DEFERRABLE")
+2861            elif self._match_text_seq("INITIALLY", "DEFERRED"):
+2862                options.append("INITIALLY DEFERRED")
+2863            elif self._match_text_seq("NORELY"):
+2864                options.append("NORELY")
+2865            elif self._match_text_seq("MATCH", "FULL"):
+2866                options.append("MATCH FULL")
+2867            else:
+2868                break
+2869
+2870        return options
+2871
+2872    def _parse_references(self) -> t.Optional[exp.Expression]:
+2873        if not self._match(TokenType.REFERENCES):
+2874            return None
+2875
+2876        expressions = None
+2877        this = self._parse_id_var()
+2878
+2879        if self._match(TokenType.L_PAREN, advance=False):
+2880            expressions = self._parse_wrapped_id_vars()
+2881
+2882        options = self._parse_key_constraint_options()
+2883        return self.expression(exp.Reference, this=this, expressions=expressions, options=options)
+2884
+2885    def _parse_foreign_key(self) -> exp.Expression:
+2886        expressions = self._parse_wrapped_id_vars()
+2887        reference = self._parse_references()
+2888        options = {}
+2889
+2890        while self._match(TokenType.ON):
+2891            if not self._match_set((TokenType.DELETE, TokenType.UPDATE)):
+2892                self.raise_error("Expected DELETE or UPDATE")
+2893
+2894            kind = self._prev.text.lower()
+2895
+2896            if self._match(TokenType.NO_ACTION):
+2897                action = "NO ACTION"
+2898            elif self._match(TokenType.SET):
+2899                self._match_set((TokenType.NULL, TokenType.DEFAULT))
+2900                action = "SET " + self._prev.text.upper()
+2901            else:
+2902                self._advance()
+2903                action = self._prev.text.upper()
+2904
+2905            options[kind] = action
+2906
+2907        return self.expression(
+2908            exp.ForeignKey, expressions=expressions, reference=reference, **options  # type: ignore
+2909        )
+2910
+2911    def _parse_primary_key(self) -> exp.Expression:
+2912        expressions = self._parse_wrapped_id_vars()
+2913        options = self._parse_key_constraint_options()
+2914        return self.expression(exp.PrimaryKey, expressions=expressions, options=options)
+2915
+2916    def _parse_bracket(self, this: t.Optional[exp.Expression]) -> t.Optional[exp.Expression]:
+2917        if not self._match_set((TokenType.L_BRACKET, TokenType.L_BRACE)):
+2918            return this
+2919
+2920        bracket_kind = self._prev.token_type
+2921        expressions: t.List[t.Optional[exp.Expression]]
+2922
+2923        if self._match(TokenType.COLON):
+2924            expressions = [self.expression(exp.Slice, expression=self._parse_conjunction())]
+2925        else:
+2926            expressions = self._parse_csv(lambda: self._parse_slice(self._parse_conjunction()))
+2927
+2928        # https://duckdb.org/docs/sql/data_types/struct.html#creating-structs
+2929        if bracket_kind == TokenType.L_BRACE:
+2930            this = self.expression(exp.Struct, expressions=expressions)
+2931        elif not this or this.name.upper() == "ARRAY":
+2932            this = self.expression(exp.Array, expressions=expressions)
+2933        else:
+2934            expressions = apply_index_offset(expressions, -self.index_offset)
+2935            this = self.expression(exp.Bracket, this=this, expressions=expressions)
+2936
+2937        if not self._match(TokenType.R_BRACKET) and bracket_kind == TokenType.L_BRACKET:
+2938            self.raise_error("Expected ]")
+2939        elif not self._match(TokenType.R_BRACE) and bracket_kind == TokenType.L_BRACE:
+2940            self.raise_error("Expected }")
+2941
+2942        this.comments = self._prev_comments
+2943        return self._parse_bracket(this)
+2944
+2945    def _parse_slice(self, this: t.Optional[exp.Expression]) -> t.Optional[exp.Expression]:
+2946        if self._match(TokenType.COLON):
+2947            return self.expression(exp.Slice, this=this, expression=self._parse_conjunction())
+2948        return this
+2949
+2950    def _parse_case(self) -> t.Optional[exp.Expression]:
+2951        ifs = []
+2952        default = None
+2953
+2954        expression = self._parse_conjunction()
+2955
+2956        while self._match(TokenType.WHEN):
+2957            this = self._parse_conjunction()
+2958            self._match(TokenType.THEN)
+2959            then = self._parse_conjunction()
+2960            ifs.append(self.expression(exp.If, this=this, true=then))
+2961
+2962        if self._match(TokenType.ELSE):
+2963            default = self._parse_conjunction()
+2964
+2965        if not self._match(TokenType.END):
+2966            self.raise_error("Expected END after CASE", self._prev)
+2967
+2968        return self._parse_window(
+2969            self.expression(exp.Case, this=expression, ifs=ifs, default=default)
+2970        )
+2971
+2972    def _parse_if(self) -> t.Optional[exp.Expression]:
+2973        if self._match(TokenType.L_PAREN):
+2974            args = self._parse_csv(self._parse_conjunction)
+2975            this = exp.If.from_arg_list(args)
+2976            self.validate_expression(this, args)
+2977            self._match_r_paren()
+2978        else:
+2979            condition = self._parse_conjunction()
+2980            self._match(TokenType.THEN)
+2981            true = self._parse_conjunction()
+2982            false = self._parse_conjunction() if self._match(TokenType.ELSE) else None
+2983            self._match(TokenType.END)
+2984            this = self.expression(exp.If, this=condition, true=true, false=false)
+2985
+2986        return self._parse_window(this)
+2987
+2988    def _parse_extract(self) -> exp.Expression:
+2989        this = self._parse_function() or self._parse_var() or self._parse_type()
+2990
+2991        if self._match(TokenType.FROM):
+2992            return self.expression(exp.Extract, this=this, expression=self._parse_bitwise())
+2993
+2994        if not self._match(TokenType.COMMA):
+2995            self.raise_error("Expected FROM or comma after EXTRACT", self._prev)
+2996
+2997        return self.expression(exp.Extract, this=this, expression=self._parse_bitwise())
+2998
+2999    def _parse_cast(self, strict: bool) -> exp.Expression:
+3000        this = self._parse_conjunction()
+3001
+3002        if not self._match(TokenType.ALIAS):
+3003            self.raise_error("Expected AS after CAST")
+3004
+3005        to = self._parse_types()
+3006
+3007        if not to:
+3008            self.raise_error("Expected TYPE after CAST")
+3009        elif to.this == exp.DataType.Type.CHAR:
+3010            if self._match(TokenType.CHARACTER_SET):
+3011                to = self.expression(exp.CharacterSet, this=self._parse_var_or_string())
+3012
+3013        return self.expression(exp.Cast if strict else exp.TryCast, this=this, to=to)
+3014
+3015    def _parse_string_agg(self) -> exp.Expression:
+3016        expression: t.Optional[exp.Expression]
+3017
+3018        if self._match(TokenType.DISTINCT):
+3019            args = self._parse_csv(self._parse_conjunction)
+3020            expression = self.expression(exp.Distinct, expressions=[seq_get(args, 0)])
+3021        else:
+3022            args = self._parse_csv(self._parse_conjunction)
+3023            expression = seq_get(args, 0)
+3024
+3025        index = self._index
+3026        if not self._match(TokenType.R_PAREN):
+3027            # postgres: STRING_AGG([DISTINCT] expression, separator [ORDER BY expression1 {ASC | DESC} [, ...]])
+3028            order = self._parse_order(this=expression)
+3029            return self.expression(exp.GroupConcat, this=order, separator=seq_get(args, 1))
+3030
+3031        # Checks if we can parse an order clause: WITHIN GROUP (ORDER BY <order_by_expression_list> [ASC | DESC]).
+3032        # This is done "manually", instead of letting _parse_window parse it into an exp.WithinGroup node, so that
+3033        # the STRING_AGG call is parsed like in MySQL / SQLite and can thus be transpiled more easily to them.
+3034        if not self._match(TokenType.WITHIN_GROUP):
+3035            self._retreat(index)
+3036            this = exp.GroupConcat.from_arg_list(args)
+3037            self.validate_expression(this, args)
+3038            return this
+3039
+3040        self._match_l_paren()  # The corresponding match_r_paren will be called in parse_function (caller)
+3041        order = self._parse_order(this=expression)
+3042        return self.expression(exp.GroupConcat, this=order, separator=seq_get(args, 1))
+3043
+3044    def _parse_convert(self, strict: bool) -> t.Optional[exp.Expression]:
+3045        to: t.Optional[exp.Expression]
+3046        this = self._parse_column()
+3047
+3048        if self._match(TokenType.USING):
+3049            to = self.expression(exp.CharacterSet, this=self._parse_var())
+3050        elif self._match(TokenType.COMMA):
+3051            to = self._parse_types()
+3052        else:
+3053            to = None
+3054
+3055        return self.expression(exp.Cast if strict else exp.TryCast, this=this, to=to)
+3056
+3057    def _parse_position(self, haystack_first: bool = False) -> exp.Expression:
+3058        args = self._parse_csv(self._parse_bitwise)
+3059
+3060        if self._match(TokenType.IN):
+3061            return self.expression(
+3062                exp.StrPosition, this=self._parse_bitwise(), substr=seq_get(args, 0)
+3063            )
+3064
+3065        if haystack_first:
+3066            haystack = seq_get(args, 0)
+3067            needle = seq_get(args, 1)
+3068        else:
+3069            needle = seq_get(args, 0)
+3070            haystack = seq_get(args, 1)
+3071
+3072        this = exp.StrPosition(this=haystack, substr=needle, position=seq_get(args, 2))
+3073
+3074        self.validate_expression(this, args)
+3075
+3076        return this
+3077
+3078    def _parse_join_hint(self, func_name: str) -> exp.Expression:
+3079        args = self._parse_csv(self._parse_table)
+3080        return exp.JoinHint(this=func_name.upper(), expressions=args)
+3081
+3082    def _parse_substring(self) -> exp.Expression:
+3083        # Postgres supports the form: substring(string [from int] [for int])
+3084        # https://www.postgresql.org/docs/9.1/functions-string.html @ Table 9-6
+3085
+3086        args = self._parse_csv(self._parse_bitwise)
+3087
+3088        if self._match(TokenType.FROM):
+3089            args.append(self._parse_bitwise())
+3090            if self._match(TokenType.FOR):
+3091                args.append(self._parse_bitwise())
+3092
+3093        this = exp.Substring.from_arg_list(args)
+3094        self.validate_expression(this, args)
+3095
+3096        return this
+3097
+3098    def _parse_trim(self) -> exp.Expression:
+3099        # https://www.w3resource.com/sql/character-functions/trim.php
+3100        # https://docs.oracle.com/javadb/10.8.3.0/ref/rreftrimfunc.html
+3101
+3102        position = None
+3103        collation = None
+3104
+3105        if self._match_set(self.TRIM_TYPES):
+3106            position = self._prev.text.upper()
+3107
+3108        expression = self._parse_term()
+3109        if self._match_set((TokenType.FROM, TokenType.COMMA)):
+3110            this = self._parse_term()
+3111        else:
+3112            this = expression
+3113            expression = None
+3114
+3115        if self._match(TokenType.COLLATE):
+3116            collation = self._parse_term()
+3117
+3118        return self.expression(
+3119            exp.Trim,
+3120            this=this,
+3121            position=position,
+3122            expression=expression,
+3123            collation=collation,
+3124        )
+3125
+3126    def _parse_window_clause(self) -> t.Optional[t.List[t.Optional[exp.Expression]]]:
+3127        return self._match(TokenType.WINDOW) and self._parse_csv(self._parse_named_window)
+3128
+3129    def _parse_named_window(self) -> t.Optional[exp.Expression]:
+3130        return self._parse_window(self._parse_id_var(), alias=True)
+3131
+3132    def _parse_window(
+3133        self, this: t.Optional[exp.Expression], alias: bool = False
+3134    ) -> t.Optional[exp.Expression]:
+3135        if self._match(TokenType.FILTER):
+3136            where = self._parse_wrapped(self._parse_where)
+3137            this = self.expression(exp.Filter, this=this, expression=where)
+3138
+3139        # T-SQL allows the OVER (...) syntax after WITHIN GROUP.
+3140        # https://learn.microsoft.com/en-us/sql/t-sql/functions/percentile-disc-transact-sql?view=sql-server-ver16
+3141        if self._match(TokenType.WITHIN_GROUP):
+3142            order = self._parse_wrapped(self._parse_order)
+3143            this = self.expression(exp.WithinGroup, this=this, expression=order)
+3144
+3145        # SQL spec defines an optional [ { IGNORE | RESPECT } NULLS ] OVER
+3146        # Some dialects choose to implement and some do not.
+3147        # https://dev.mysql.com/doc/refman/8.0/en/window-function-descriptions.html
+3148
+3149        # There is some code above in _parse_lambda that handles
+3150        #   SELECT FIRST_VALUE(TABLE.COLUMN IGNORE|RESPECT NULLS) OVER ...
+3151
+3152        # The below changes handle
+3153        #   SELECT FIRST_VALUE(TABLE.COLUMN) IGNORE|RESPECT NULLS OVER ...
+3154
+3155        # Oracle allows both formats
+3156        #   (https://docs.oracle.com/en/database/oracle/oracle-database/19/sqlrf/img_text/first_value.html)
+3157        #   and Snowflake chose to do the same for familiarity
+3158        #   https://docs.snowflake.com/en/sql-reference/functions/first_value.html#usage-notes
+3159        if self._match(TokenType.IGNORE_NULLS):
+3160            this = self.expression(exp.IgnoreNulls, this=this)
+3161        elif self._match(TokenType.RESPECT_NULLS):
+3162            this = self.expression(exp.RespectNulls, this=this)
+3163
+3164        # bigquery select from window x AS (partition by ...)
+3165        if alias:
+3166            self._match(TokenType.ALIAS)
+3167        elif not self._match(TokenType.OVER):
+3168            return this
+3169
+3170        if not self._match(TokenType.L_PAREN):
+3171            return self.expression(exp.Window, this=this, alias=self._parse_id_var(False))
+3172
+3173        window_alias = self._parse_id_var(any_token=False, tokens=self.WINDOW_ALIAS_TOKENS)
+3174        partition = self._parse_partition_by()
+3175        order = self._parse_order()
+3176        kind = self._match_set((TokenType.ROWS, TokenType.RANGE)) and self._prev.text
+3177
+3178        if kind:
+3179            self._match(TokenType.BETWEEN)
+3180            start = self._parse_window_spec()
+3181            self._match(TokenType.AND)
+3182            end = self._parse_window_spec()
+3183
+3184            spec = self.expression(
+3185                exp.WindowSpec,
+3186                kind=kind,
+3187                start=start["value"],
+3188                start_side=start["side"],
+3189                end=end["value"],
+3190                end_side=end["side"],
+3191            )
+3192        else:
+3193            spec = None
+3194
+3195        self._match_r_paren()
+3196
+3197        return self.expression(
+3198            exp.Window,
+3199            this=this,
+3200            partition_by=partition,
+3201            order=order,
+3202            spec=spec,
+3203            alias=window_alias,
+3204        )
+3205
+3206    def _parse_window_spec(self) -> t.Dict[str, t.Optional[str | exp.Expression]]:
+3207        self._match(TokenType.BETWEEN)
+3208
+3209        return {
+3210            "value": (
+3211                self._match_set((TokenType.UNBOUNDED, TokenType.CURRENT_ROW)) and self._prev.text
+3212            )
+3213            or self._parse_bitwise(),
+3214            "side": self._match_set((TokenType.PRECEDING, TokenType.FOLLOWING)) and self._prev.text,
+3215        }
+3216
+3217    def _parse_alias(
+3218        self, this: t.Optional[exp.Expression], explicit: bool = False
+3219    ) -> t.Optional[exp.Expression]:
+3220        any_token = self._match(TokenType.ALIAS)
+3221
+3222        if explicit and not any_token:
+3223            return this
+3224
+3225        if self._match(TokenType.L_PAREN):
+3226            aliases = self.expression(
+3227                exp.Aliases,
+3228                this=this,
+3229                expressions=self._parse_csv(lambda: self._parse_id_var(any_token)),
+3230            )
+3231            self._match_r_paren(aliases)
+3232            return aliases
+3233
+3234        alias = self._parse_id_var(any_token)
+3235
+3236        if alias:
+3237            return self.expression(exp.Alias, this=this, alias=alias)
+3238
+3239        return this
+3240
+3241    def _parse_id_var(
+3242        self,
+3243        any_token: bool = True,
+3244        tokens: t.Optional[t.Collection[TokenType]] = None,
+3245        prefix_tokens: t.Optional[t.Collection[TokenType]] = None,
+3246    ) -> t.Optional[exp.Expression]:
+3247        identifier = self._parse_identifier()
+3248
+3249        if identifier:
+3250            return identifier
+3251
+3252        prefix = ""
+3253
+3254        if prefix_tokens:
+3255            while self._match_set(prefix_tokens):
+3256                prefix += self._prev.text
+3257
+3258        if (any_token and self._advance_any()) or self._match_set(tokens or self.ID_VAR_TOKENS):
+3259            quoted = self._prev.token_type == TokenType.STRING
+3260            return exp.Identifier(this=prefix + self._prev.text, quoted=quoted)
+3261
+3262        return None
+3263
+3264    def _parse_string(self) -> t.Optional[exp.Expression]:
+3265        if self._match(TokenType.STRING):
+3266            return self.PRIMARY_PARSERS[TokenType.STRING](self, self._prev)
+3267        return self._parse_placeholder()
+3268
+3269    def _parse_number(self) -> t.Optional[exp.Expression]:
+3270        if self._match(TokenType.NUMBER):
+3271            return self.PRIMARY_PARSERS[TokenType.NUMBER](self, self._prev)
+3272        return self._parse_placeholder()
+3273
+3274    def _parse_identifier(self) -> t.Optional[exp.Expression]:
+3275        if self._match(TokenType.IDENTIFIER):
+3276            return self.expression(exp.Identifier, this=self._prev.text, quoted=True)
+3277        return self._parse_placeholder()
+3278
+3279    def _parse_var(self, any_token: bool = False) -> t.Optional[exp.Expression]:
+3280        if (any_token and self._advance_any()) or self._match(TokenType.VAR):
+3281            return self.expression(exp.Var, this=self._prev.text)
+3282        return self._parse_placeholder()
+3283
+3284    def _advance_any(self) -> t.Optional[Token]:
+3285        if self._curr and self._curr.token_type not in self.RESERVED_KEYWORDS:
+3286            self._advance()
+3287            return self._prev
+3288        return None
+3289
+3290    def _parse_var_or_string(self) -> t.Optional[exp.Expression]:
+3291        return self._parse_var() or self._parse_string()
+3292
+3293    def _parse_null(self) -> t.Optional[exp.Expression]:
+3294        if self._match(TokenType.NULL):
+3295            return self.PRIMARY_PARSERS[TokenType.NULL](self, self._prev)
+3296        return None
+3297
+3298    def _parse_boolean(self) -> t.Optional[exp.Expression]:
+3299        if self._match(TokenType.TRUE):
+3300            return self.PRIMARY_PARSERS[TokenType.TRUE](self, self._prev)
+3301        if self._match(TokenType.FALSE):
+3302            return self.PRIMARY_PARSERS[TokenType.FALSE](self, self._prev)
+3303        return None
+3304
+3305    def _parse_star(self) -> t.Optional[exp.Expression]:
+3306        if self._match(TokenType.STAR):
+3307            return self.PRIMARY_PARSERS[TokenType.STAR](self, self._prev)
+3308        return None
+3309
+3310    def _parse_placeholder(self) -> t.Optional[exp.Expression]:
+3311        if self._match_set(self.PLACEHOLDER_PARSERS):
+3312            placeholder = self.PLACEHOLDER_PARSERS[self._prev.token_type](self)
+3313            if placeholder:
+3314                return placeholder
+3315            self._advance(-1)
+3316        return None
+3317
+3318    def _parse_except(self) -> t.Optional[t.List[t.Optional[exp.Expression]]]:
+3319        if not self._match(TokenType.EXCEPT):
+3320            return None
+3321        if self._match(TokenType.L_PAREN, advance=False):
+3322            return self._parse_wrapped_csv(self._parse_column)
+3323        return self._parse_csv(self._parse_column)
+3324
+3325    def _parse_replace(self) -> t.Optional[t.List[t.Optional[exp.Expression]]]:
+3326        if not self._match(TokenType.REPLACE):
+3327            return None
+3328        if self._match(TokenType.L_PAREN, advance=False):
+3329            return self._parse_wrapped_csv(self._parse_expression)
+3330        return self._parse_csv(self._parse_expression)
+3331
+3332    def _parse_csv(
+3333        self, parse_method: t.Callable, sep: TokenType = TokenType.COMMA
+3334    ) -> t.List[t.Optional[exp.Expression]]:
+3335        parse_result = parse_method()
+3336        items = [parse_result] if parse_result is not None else []
+3337
+3338        while self._match(sep):
+3339            if parse_result and self._prev_comments:
+3340                parse_result.comments = self._prev_comments
+3341
+3342            parse_result = parse_method()
+3343            if parse_result is not None:
+3344                items.append(parse_result)
+3345
+3346        return items
+3347
+3348    def _parse_tokens(
+3349        self, parse_method: t.Callable, expressions: t.Dict
+3350    ) -> t.Optional[exp.Expression]:
+3351        this = parse_method()
+3352
+3353        while self._match_set(expressions):
+3354            this = self.expression(
+3355                expressions[self._prev.token_type],
+3356                this=this,
+3357                comments=self._prev_comments,
+3358                expression=parse_method(),
+3359            )
+3360
+3361        return this
+3362
+3363    def _parse_wrapped_id_vars(self) -> t.List[t.Optional[exp.Expression]]:
+3364        return self._parse_wrapped_csv(self._parse_id_var)
+3365
+3366    def _parse_wrapped_csv(
+3367        self, parse_method: t.Callable, sep: TokenType = TokenType.COMMA
+3368    ) -> t.List[t.Optional[exp.Expression]]:
+3369        return self._parse_wrapped(lambda: self._parse_csv(parse_method, sep=sep))
+3370
+3371    def _parse_wrapped(self, parse_method: t.Callable) -> t.Any:
+3372        self._match_l_paren()
+3373        parse_result = parse_method()
+3374        self._match_r_paren()
+3375        return parse_result
+3376
+3377    def _parse_select_or_expression(self) -> t.Optional[exp.Expression]:
+3378        return self._parse_select() or self._parse_expression()
+3379
+3380    def _parse_ddl_select(self) -> t.Optional[exp.Expression]:
+3381        return self._parse_set_operations(
+3382            self._parse_select(nested=True, parse_subquery_alias=False)
+3383        )
+3384
+3385    def _parse_transaction(self) -> exp.Expression:
+3386        this = None
+3387        if self._match_texts(self.TRANSACTION_KIND):
+3388            this = self._prev.text
+3389
+3390        self._match_texts({"TRANSACTION", "WORK"})
+3391
+3392        modes = []
+3393        while True:
+3394            mode = []
+3395            while self._match(TokenType.VAR):
+3396                mode.append(self._prev.text)
+3397
+3398            if mode:
+3399                modes.append(" ".join(mode))
+3400            if not self._match(TokenType.COMMA):
+3401                break
+3402
+3403        return self.expression(exp.Transaction, this=this, modes=modes)
+3404
+3405    def _parse_commit_or_rollback(self) -> exp.Expression:
+3406        chain = None
+3407        savepoint = None
+3408        is_rollback = self._prev.token_type == TokenType.ROLLBACK
+3409
+3410        self._match_texts({"TRANSACTION", "WORK"})
+3411
+3412        if self._match_text_seq("TO"):
+3413            self._match_text_seq("SAVEPOINT")
+3414            savepoint = self._parse_id_var()
+3415
+3416        if self._match(TokenType.AND):
+3417            chain = not self._match_text_seq("NO")
+3418            self._match_text_seq("CHAIN")
+3419
+3420        if is_rollback:
+3421            return self.expression(exp.Rollback, savepoint=savepoint)
+3422        return self.expression(exp.Commit, chain=chain)
+3423
+3424    def _parse_add_column(self) -> t.Optional[exp.Expression]:
+3425        if not self._match_text_seq("ADD"):
+3426            return None
+3427
+3428        self._match(TokenType.COLUMN)
+3429        exists_column = self._parse_exists(not_=True)
+3430        expression = self._parse_column_def(self._parse_field(any_token=True))
+3431
+3432        if expression:
+3433            expression.set("exists", exists_column)
+3434
+3435        return expression
+3436
+3437    def _parse_drop_column(self) -> t.Optional[exp.Expression]:
+3438        return self._match(TokenType.DROP) and self._parse_drop(default_kind="COLUMN")
+3439
+3440    # https://docs.aws.amazon.com/athena/latest/ug/alter-table-drop-partition.html
+3441    def _parse_drop_partition(self, exists: t.Optional[bool] = None) -> exp.Expression:
+3442        return self.expression(
+3443            exp.DropPartition, expressions=self._parse_csv(self._parse_partition), exists=exists
+3444        )
+3445
+3446    def _parse_add_constraint(self) -> t.Optional[exp.Expression]:
+3447        this = None
+3448        kind = self._prev.token_type
+3449
+3450        if kind == TokenType.CONSTRAINT:
+3451            this = self._parse_id_var()
+3452
+3453            if self._match(TokenType.CHECK):
+3454                expression = self._parse_wrapped(self._parse_conjunction)
+3455                enforced = self._match_text_seq("ENFORCED")
+3456
+3457                return self.expression(
+3458                    exp.AddConstraint, this=this, expression=expression, enforced=enforced
+3459                )
+3460
+3461        if kind == TokenType.FOREIGN_KEY or self._match(TokenType.FOREIGN_KEY):
+3462            expression = self._parse_foreign_key()
+3463        elif kind == TokenType.PRIMARY_KEY or self._match(TokenType.PRIMARY_KEY):
+3464            expression = self._parse_primary_key()
+3465
+3466        return self.expression(exp.AddConstraint, this=this, expression=expression)
+3467
+3468    def _parse_alter(self) -> t.Optional[exp.Expression]:
+3469        if not self._match(TokenType.TABLE):
+3470            return self._parse_as_command(self._prev)
+3471
+3472        exists = self._parse_exists()
+3473        this = self._parse_table(schema=True)
+3474
+3475        actions: t.Optional[exp.Expression | t.List[t.Optional[exp.Expression]]] = None
+3476
+3477        index = self._index
+3478        if self._match(TokenType.DELETE):
+3479            actions = [self.expression(exp.Delete, where=self._parse_where())]
+3480        elif self._match_text_seq("ADD"):
+3481            if self._match_set(self.ADD_CONSTRAINT_TOKENS):
+3482                actions = self._parse_csv(self._parse_add_constraint)
+3483            else:
+3484                self._retreat(index)
+3485                actions = self._parse_csv(self._parse_add_column)
+3486        elif self._match_text_seq("DROP"):
+3487            partition_exists = self._parse_exists()
+3488
+3489            if self._match(TokenType.PARTITION, advance=False):
+3490                actions = self._parse_csv(
+3491                    lambda: self._parse_drop_partition(exists=partition_exists)
+3492                )
+3493            else:
+3494                self._retreat(index)
+3495                actions = self._parse_csv(self._parse_drop_column)
+3496        elif self._match_text_seq("RENAME", "TO"):
+3497            actions = self.expression(exp.RenameTable, this=self._parse_table(schema=True))
+3498        elif self._match_text_seq("ALTER"):
+3499            self._match(TokenType.COLUMN)
+3500            column = self._parse_field(any_token=True)
+3501
+3502            if self._match_pair(TokenType.DROP, TokenType.DEFAULT):
+3503                actions = self.expression(exp.AlterColumn, this=column, drop=True)
+3504            elif self._match_pair(TokenType.SET, TokenType.DEFAULT):
+3505                actions = self.expression(
+3506                    exp.AlterColumn, this=column, default=self._parse_conjunction()
+3507                )
+3508            else:
+3509                self._match_text_seq("SET", "DATA")
+3510                actions = self.expression(
+3511                    exp.AlterColumn,
+3512                    this=column,
+3513                    dtype=self._match_text_seq("TYPE") and self._parse_types(),
+3514                    collate=self._match(TokenType.COLLATE) and self._parse_term(),
+3515                    using=self._match(TokenType.USING) and self._parse_conjunction(),
+3516                )
+3517
+3518        actions = ensure_list(actions)
+3519        return self.expression(exp.AlterTable, this=this, exists=exists, actions=actions)
+3520
+3521    def _parse_show(self) -> t.Optional[exp.Expression]:
+3522        parser = self._find_parser(self.SHOW_PARSERS, self._show_trie)  # type: ignore
+3523        if parser:
+3524            return parser(self)
+3525        self._advance()
+3526        return self.expression(exp.Show, this=self._prev.text.upper())
+3527
+3528    def _default_parse_set_item(self) -> exp.Expression:
+3529        return self.expression(
+3530            exp.SetItem,
+3531            this=self._parse_statement(),
+3532        )
+3533
+3534    def _parse_set_item(self) -> t.Optional[exp.Expression]:
+3535        parser = self._find_parser(self.SET_PARSERS, self._set_trie)  # type: ignore
+3536        return parser(self) if parser else self._default_parse_set_item()
+3537
+3538    def _parse_merge(self) -> exp.Expression:
+3539        self._match(TokenType.INTO)
+3540        target = self._parse_table()
+3541
+3542        self._match(TokenType.USING)
+3543        using = self._parse_table()
+3544
+3545        self._match(TokenType.ON)
+3546        on = self._parse_conjunction()
+3547
+3548        whens = []
+3549        while self._match(TokenType.WHEN):
+3550            this = self._parse_conjunction()
+3551            self._match(TokenType.THEN)
+3552
+3553            if self._match(TokenType.INSERT):
+3554                _this = self._parse_star()
+3555                if _this:
+3556                    then = self.expression(exp.Insert, this=_this)
+3557                else:
+3558                    then = self.expression(
+3559                        exp.Insert,
+3560                        this=self._parse_value(),
+3561                        expression=self._match(TokenType.VALUES) and self._parse_value(),
+3562                    )
+3563            elif self._match(TokenType.UPDATE):
+3564                expressions = self._parse_star()
+3565                if expressions:
+3566                    then = self.expression(exp.Update, expressions=expressions)
+3567                else:
+3568                    then = self.expression(
+3569                        exp.Update,
+3570                        expressions=self._match(TokenType.SET)
+3571                        and self._parse_csv(self._parse_equality),
+3572                    )
+3573            elif self._match(TokenType.DELETE):
+3574                then = self.expression(exp.Var, this=self._prev.text)
+3575
+3576            whens.append(self.expression(exp.When, this=this, then=then))
+3577
+3578        return self.expression(
+3579            exp.Merge,
+3580            this=target,
+3581            using=using,
+3582            on=on,
+3583            expressions=whens,
+3584        )
+3585
+3586    def _parse_set(self) -> exp.Expression:
+3587        return self.expression(exp.Set, expressions=self._parse_csv(self._parse_set_item))
+3588
+3589    def _parse_as_command(self, start: Token) -> exp.Command:
+3590        while self._curr:
+3591            self._advance()
+3592        return exp.Command(this=self._find_sql(start, self._prev))
+3593
+3594    def _find_parser(
+3595        self, parsers: t.Dict[str, t.Callable], trie: t.Dict
+3596    ) -> t.Optional[t.Callable]:
+3597        index = self._index
+3598        this = []
+3599        while True:
+3600            # The current token might be multiple words
+3601            curr = self._curr.text.upper()
+3602            key = curr.split(" ")
+3603            this.append(curr)
+3604            self._advance()
+3605            result, trie = in_trie(trie, key)
+3606            if result == 0:
+3607                break
+3608            if result == 2:
+3609                subparser = parsers[" ".join(this)]
+3610                return subparser
+3611        self._retreat(index)
+3612        return None
+3613
+3614    def _match(self, token_type, advance=True):
+3615        if not self._curr:
+3616            return None
+3617
+3618        if self._curr.token_type == token_type:
+3619            if advance:
+3620                self._advance()
+3621            return True
+3622
+3623        return None
+3624
+3625    def _match_set(self, types):
+3626        if not self._curr:
+3627            return None
+3628
+3629        if self._curr.token_type in types:
+3630            self._advance()
+3631            return True
+3632
+3633        return None
+3634
+3635    def _match_pair(self, token_type_a, token_type_b, advance=True):
+3636        if not self._curr or not self._next:
+3637            return None
+3638
+3639        if self._curr.token_type == token_type_a and self._next.token_type == token_type_b:
+3640            if advance:
+3641                self._advance(2)
+3642            return True
+3643
+3644        return None
+3645
+3646    def _match_l_paren(self, expression=None):
+3647        if not self._match(TokenType.L_PAREN):
+3648            self.raise_error("Expecting (")
+3649        if expression and self._prev_comments:
+3650            expression.comments = self._prev_comments
+3651
+3652    def _match_r_paren(self, expression=None):
+3653        if not self._match(TokenType.R_PAREN):
+3654            self.raise_error("Expecting )")
+3655        if expression and self._prev_comments:
+3656            expression.comments = self._prev_comments
+3657
+3658    def _match_texts(self, texts):
+3659        if self._curr and self._curr.text.upper() in texts:
+3660            self._advance()
+3661            return True
+3662        return False
+3663
+3664    def _match_text_seq(self, *texts, advance=True):
+3665        index = self._index
+3666        for text in texts:
+3667            if self._curr and self._curr.text.upper() == text:
+3668                self._advance()
+3669            else:
+3670                self._retreat(index)
+3671                return False
+3672
+3673        if not advance:
+3674            self._retreat(index)
+3675
+3676        return True
+3677
+3678    def _replace_columns_with_dots(self, this):
+3679        if isinstance(this, exp.Dot):
+3680            exp.replace_children(this, self._replace_columns_with_dots)
+3681        elif isinstance(this, exp.Column):
+3682            exp.replace_children(this, self._replace_columns_with_dots)
+3683            table = this.args.get("table")
+3684            this = (
+3685                self.expression(exp.Dot, this=table, expression=this.this)
+3686                if table
+3687                else self.expression(exp.Var, this=this.name)
+3688            )
+3689        elif isinstance(this, exp.Identifier):
+3690            this = self.expression(exp.Var, this=this.name)
+3691        return this
+3692
+3693    def _replace_lambda(self, node, lambda_variables):
+3694        if isinstance(node, exp.Column):
+3695            if node.name in lambda_variables:
+3696                return node.this
+3697        return node
+
+ + +

Parser consumes a list of tokens produced by the sqlglot.tokens.Tokenizer and produces +a parsed syntax tree.

+ +
Arguments:
+ +
    +
  • error_level: the desired error level. +Default: ErrorLevel.RAISE
  • +
  • error_message_context: determines the amount of context to capture from a +query string when displaying the error message (in number of characters). +Default: 50.
  • +
  • index_offset: Index offset for arrays eg ARRAY[0] vs ARRAY[1] as the head of a list. +Default: 0
  • +
  • alias_post_tablesample: If the table alias comes after tablesample. +Default: False
  • +
  • max_errors: Maximum number of error messages to include in a raised ParseError. +This is only relevant if error_level is ErrorLevel.RAISE. +Default: 3
  • +
  • null_ordering: Indicates the default null ordering method to use if not explicitly set. +Options are "nulls_are_small", "nulls_are_large", "nulls_are_last". +Default: "nulls_are_small"
  • +
+
+ + +
+ +
+ + Parser( error_level: Optional[sqlglot.errors.ErrorLevel] = None, error_message_context: int = 100, index_offset: int = 0, unnest_column_only: bool = False, alias_post_tablesample: bool = False, max_errors: int = 3, null_ordering: Optional[str] = None) + + + +
+ +
667    def __init__(
+668        self,
+669        error_level: t.Optional[ErrorLevel] = None,
+670        error_message_context: int = 100,
+671        index_offset: int = 0,
+672        unnest_column_only: bool = False,
+673        alias_post_tablesample: bool = False,
+674        max_errors: int = 3,
+675        null_ordering: t.Optional[str] = None,
+676    ):
+677        self.error_level = error_level or ErrorLevel.IMMEDIATE
+678        self.error_message_context = error_message_context
+679        self.index_offset = index_offset
+680        self.unnest_column_only = unnest_column_only
+681        self.alias_post_tablesample = alias_post_tablesample
+682        self.max_errors = max_errors
+683        self.null_ordering = null_ordering
+684        self.reset()
+
+ + + + +
+
+ +
+ + def + reset(self): + + + +
+ +
686    def reset(self):
+687        self.sql = ""
+688        self.errors = []
+689        self._tokens = []
+690        self._index = 0
+691        self._curr = None
+692        self._next = None
+693        self._prev = None
+694        self._prev_comments = None
+
+ + + + +
+
+ +
+ + def + parse( self, raw_tokens: List[sqlglot.tokens.Token], sql: Optional[str] = None) -> List[Optional[sqlglot.expressions.Expression]]: + + + +
+ +
696    def parse(
+697        self, raw_tokens: t.List[Token], sql: t.Optional[str] = None
+698    ) -> t.List[t.Optional[exp.Expression]]:
+699        """
+700        Parses a list of tokens and returns a list of syntax trees, one tree
+701        per parsed SQL statement.
+702
+703        Args:
+704            raw_tokens: the list of tokens.
+705            sql: the original SQL string, used to produce helpful debug messages.
+706
+707        Returns:
+708            The list of syntax trees.
+709        """
+710        return self._parse(
+711            parse_method=self.__class__._parse_statement, raw_tokens=raw_tokens, sql=sql
+712        )
+
+ + +

Parses a list of tokens and returns a list of syntax trees, one tree +per parsed SQL statement.

+ +
Arguments:
+ +
    +
  • raw_tokens: the list of tokens.
  • +
  • sql: the original SQL string, used to produce helpful debug messages.
  • +
+ +
Returns:
+ +
+

The list of syntax trees.

+
+
+ + +
+
+ +
+ + def + parse_into( self, expression_types: Union[str, Type[sqlglot.expressions.Expression], Collection[Union[str, Type[sqlglot.expressions.Expression]]]], raw_tokens: List[sqlglot.tokens.Token], sql: Optional[str] = None) -> List[Optional[sqlglot.expressions.Expression]]: + + + +
+ +
714    def parse_into(
+715        self,
+716        expression_types: exp.IntoType,
+717        raw_tokens: t.List[Token],
+718        sql: t.Optional[str] = None,
+719    ) -> t.List[t.Optional[exp.Expression]]:
+720        """
+721        Parses a list of tokens into a given Expression type. If a collection of Expression
+722        types is given instead, this method will try to parse the token list into each one
+723        of them, stopping at the first for which the parsing succeeds.
+724
+725        Args:
+726            expression_types: the expression type(s) to try and parse the token list into.
+727            raw_tokens: the list of tokens.
+728            sql: the original SQL string, used to produce helpful debug messages.
+729
+730        Returns:
+731            The target Expression.
+732        """
+733        errors = []
+734        for expression_type in ensure_collection(expression_types):
+735            parser = self.EXPRESSION_PARSERS.get(expression_type)
+736            if not parser:
+737                raise TypeError(f"No parser registered for {expression_type}")
+738            try:
+739                return self._parse(parser, raw_tokens, sql)
+740            except ParseError as e:
+741                e.errors[0]["into_expression"] = expression_type
+742                errors.append(e)
+743        raise ParseError(
+744            f"Failed to parse into {expression_types}",
+745            errors=merge_errors(errors),
+746        ) from errors[-1]
+
+ + +

Parses a list of tokens into a given Expression type. If a collection of Expression +types is given instead, this method will try to parse the token list into each one +of them, stopping at the first for which the parsing succeeds.

+ +
Arguments:
+ +
    +
  • expression_types: the expression type(s) to try and parse the token list into.
  • +
  • raw_tokens: the list of tokens.
  • +
  • sql: the original SQL string, used to produce helpful debug messages.
  • +
+ +
Returns:
+ +
+

The target Expression.

+
+
+ + +
+
+ +
+ + def + check_errors(self) -> None: + + + +
+ +
782    def check_errors(self) -> None:
+783        """
+784        Logs or raises any found errors, depending on the chosen error level setting.
+785        """
+786        if self.error_level == ErrorLevel.WARN:
+787            for error in self.errors:
+788                logger.error(str(error))
+789        elif self.error_level == ErrorLevel.RAISE and self.errors:
+790            raise ParseError(
+791                concat_messages(self.errors, self.max_errors),
+792                errors=merge_errors(self.errors),
+793            )
+
+ + +

Logs or raises any found errors, depending on the chosen error level setting.

+
+ + +
+
+ +
+ + def + raise_error(self, message: str, token: Optional[sqlglot.tokens.Token] = None) -> None: + + + +
+ +
795    def raise_error(self, message: str, token: t.Optional[Token] = None) -> None:
+796        """
+797        Appends an error in the list of recorded errors or raises it, depending on the chosen
+798        error level setting.
+799        """
+800        token = token or self._curr or self._prev or Token.string("")
+801        start = self._find_token(token)
+802        end = start + len(token.text)
+803        start_context = self.sql[max(start - self.error_message_context, 0) : start]
+804        highlight = self.sql[start:end]
+805        end_context = self.sql[end : end + self.error_message_context]
+806
+807        error = ParseError.new(
+808            f"{message}. Line {token.line}, Col: {token.col}.\n"
+809            f"  {start_context}\033[4m{highlight}\033[0m{end_context}",
+810            description=message,
+811            line=token.line,
+812            col=token.col,
+813            start_context=start_context,
+814            highlight=highlight,
+815            end_context=end_context,
+816        )
+817
+818        if self.error_level == ErrorLevel.IMMEDIATE:
+819            raise error
+820
+821        self.errors.append(error)
+
+ + +

Appends an error in the list of recorded errors or raises it, depending on the chosen +error level setting.

+
+ + +
+
+ +
+ + def + expression( self, exp_class: Type[sqlglot.expressions.Expression], comments: Optional[List[str]] = None, **kwargs) -> sqlglot.expressions.Expression: + + + +
+ +
823    def expression(
+824        self, exp_class: t.Type[exp.Expression], comments: t.Optional[t.List[str]] = None, **kwargs
+825    ) -> exp.Expression:
+826        """
+827        Creates a new, validated Expression.
+828
+829        Args:
+830            exp_class: the expression class to instantiate.
+831            comments: an optional list of comments to attach to the expression.
+832            kwargs: the arguments to set for the expression along with their respective values.
+833
+834        Returns:
+835            The target expression.
+836        """
+837        instance = exp_class(**kwargs)
+838        if self._prev_comments:
+839            instance.comments = self._prev_comments
+840            self._prev_comments = None
+841        if comments:
+842            instance.comments = comments
+843        self.validate_expression(instance)
+844        return instance
+
+ + +

Creates a new, validated Expression.

+ +
Arguments:
+ +
    +
  • exp_class: the expression class to instantiate.
  • +
  • comments: an optional list of comments to attach to the expression.
  • +
  • kwargs: the arguments to set for the expression along with their respective values.
  • +
+ +
Returns:
+ +
+

The target expression.

+
+
+ + +
+
+ +
+ + def + validate_expression( self, expression: sqlglot.expressions.Expression, args: Optional[List] = None) -> None: + + + +
+ +
846    def validate_expression(
+847        self, expression: exp.Expression, args: t.Optional[t.List] = None
+848    ) -> None:
+849        """
+850        Validates an already instantiated expression, making sure that all its mandatory arguments
+851        are set.
+852
+853        Args:
+854            expression: the expression to validate.
+855            args: an optional list of items that was used to instantiate the expression, if it's a Func.
+856        """
+857        if self.error_level == ErrorLevel.IGNORE:
+858            return
+859
+860        for error_message in expression.error_messages(args):
+861            self.raise_error(error_message)
+
+ + +

Validates an already instantiated expression, making sure that all its mandatory arguments +are set.

+ +
Arguments:
+ +
    +
  • expression: the expression to validate.
  • +
  • args: an optional list of items that was used to instantiate the expression, if it's a Func.
  • +
+
+ + +
+
+
+ + \ No newline at end of file diff --git a/docs/sqlglot/planner.html b/docs/sqlglot/planner.html new file mode 100644 index 0000000..7df7e4e --- /dev/null +++ b/docs/sqlglot/planner.html @@ -0,0 +1,1995 @@ + + + + + + + sqlglot.planner API documentation + + + + + + + + + +
+
+ Edit on GitHub +

+sqlglot.planner

+ + + + + + +
  1from __future__ import annotations
+  2
+  3import itertools
+  4import math
+  5import typing as t
+  6
+  7from sqlglot import alias, exp
+  8from sqlglot.errors import UnsupportedError
+  9from sqlglot.optimizer.eliminate_joins import join_condition
+ 10
+ 11
+ 12class Plan:
+ 13    def __init__(self, expression: exp.Expression) -> None:
+ 14        self.expression = expression.copy()
+ 15        self.root = Step.from_expression(self.expression)
+ 16        self._dag: t.Dict[Step, t.Set[Step]] = {}
+ 17
+ 18    @property
+ 19    def dag(self) -> t.Dict[Step, t.Set[Step]]:
+ 20        if not self._dag:
+ 21            dag: t.Dict[Step, t.Set[Step]] = {}
+ 22            nodes = {self.root}
+ 23
+ 24            while nodes:
+ 25                node = nodes.pop()
+ 26                dag[node] = set()
+ 27                for dep in node.dependencies:
+ 28                    dag[node].add(dep)
+ 29                    nodes.add(dep)
+ 30            self._dag = dag
+ 31
+ 32        return self._dag
+ 33
+ 34    @property
+ 35    def leaves(self) -> t.Iterator[Step]:
+ 36        return (node for node, deps in self.dag.items() if not deps)
+ 37
+ 38    def __repr__(self) -> str:
+ 39        return f"Plan\n----\n{repr(self.root)}"
+ 40
+ 41
+ 42class Step:
+ 43    @classmethod
+ 44    def from_expression(
+ 45        cls, expression: exp.Expression, ctes: t.Optional[t.Dict[str, Step]] = None
+ 46    ) -> Step:
+ 47        """
+ 48        Builds a DAG of Steps from a SQL expression so that it's easier to execute in an engine.
+ 49        Note: the expression's tables and subqueries must be aliased for this method to work. For
+ 50        example, given the following expression:
+ 51
+ 52        SELECT
+ 53          x.a,
+ 54          SUM(x.b)
+ 55        FROM x AS x
+ 56        JOIN y AS y
+ 57          ON x.a = y.a
+ 58        GROUP BY x.a
+ 59
+ 60        the following DAG is produced (the expression IDs might differ per execution):
+ 61
+ 62        - Aggregate: x (4347984624)
+ 63            Context:
+ 64              Aggregations:
+ 65                - SUM(x.b)
+ 66              Group:
+ 67                - x.a
+ 68            Projections:
+ 69              - x.a
+ 70              - "x".""
+ 71            Dependencies:
+ 72            - Join: x (4347985296)
+ 73              Context:
+ 74                y:
+ 75                On: x.a = y.a
+ 76              Projections:
+ 77              Dependencies:
+ 78              - Scan: x (4347983136)
+ 79                Context:
+ 80                  Source: x AS x
+ 81                Projections:
+ 82              - Scan: y (4343416624)
+ 83                Context:
+ 84                  Source: y AS y
+ 85                Projections:
+ 86
+ 87        Args:
+ 88            expression: the expression to build the DAG from.
+ 89            ctes: a dictionary that maps CTEs to their corresponding Step DAG by name.
+ 90
+ 91        Returns:
+ 92            A Step DAG corresponding to `expression`.
+ 93        """
+ 94        ctes = ctes or {}
+ 95        with_ = expression.args.get("with")
+ 96
+ 97        # CTEs break the mold of scope and introduce themselves to all in the context.
+ 98        if with_:
+ 99            ctes = ctes.copy()
+100            for cte in with_.expressions:
+101                step = Step.from_expression(cte.this, ctes)
+102                step.name = cte.alias
+103                ctes[step.name] = step  # type: ignore
+104
+105        from_ = expression.args.get("from")
+106
+107        if isinstance(expression, exp.Select) and from_:
+108            from_ = from_.expressions
+109            if len(from_) > 1:
+110                raise UnsupportedError(
+111                    "Multi-from statements are unsupported. Run it through the optimizer"
+112                )
+113
+114            step = Scan.from_expression(from_[0], ctes)
+115        elif isinstance(expression, exp.Union):
+116            step = SetOperation.from_expression(expression, ctes)
+117        else:
+118            step = Scan()
+119
+120        joins = expression.args.get("joins")
+121
+122        if joins:
+123            join = Join.from_joins(joins, ctes)
+124            join.name = step.name
+125            join.add_dependency(step)
+126            step = join
+127
+128        projections = []  # final selects in this chain of steps representing a select
+129        operands = {}  # intermediate computations of agg funcs eg x + 1 in SUM(x + 1)
+130        aggregations = []
+131        sequence = itertools.count()
+132
+133        def extract_agg_operands(expression):
+134            for agg in expression.find_all(exp.AggFunc):
+135                for operand in agg.unnest_operands():
+136                    if isinstance(operand, exp.Column):
+137                        continue
+138                    if operand not in operands:
+139                        operands[operand] = f"_a_{next(sequence)}"
+140                    operand.replace(exp.column(operands[operand], quoted=True))
+141
+142        for e in expression.expressions:
+143            if e.find(exp.AggFunc):
+144                projections.append(exp.column(e.alias_or_name, step.name, quoted=True))
+145                aggregations.append(e)
+146                extract_agg_operands(e)
+147            else:
+148                projections.append(e)
+149
+150        where = expression.args.get("where")
+151
+152        if where:
+153            step.condition = where.this
+154
+155        group = expression.args.get("group")
+156
+157        if group or aggregations:
+158            aggregate = Aggregate()
+159            aggregate.source = step.name
+160            aggregate.name = step.name
+161
+162            having = expression.args.get("having")
+163
+164            if having:
+165                extract_agg_operands(having)
+166                aggregate.condition = having.this
+167
+168            aggregate.operands = tuple(
+169                alias(operand, alias_) for operand, alias_ in operands.items()
+170            )
+171            aggregate.aggregations = aggregations
+172            # give aggregates names and replace projections with references to them
+173            aggregate.group = {
+174                f"_g{i}": e for i, e in enumerate(group.expressions if group else [])
+175            }
+176            for projection in projections:
+177                for i, e in aggregate.group.items():
+178                    for child, _, _ in projection.walk():
+179                        if child == e:
+180                            child.replace(exp.column(i, step.name))
+181            aggregate.add_dependency(step)
+182            step = aggregate
+183
+184        order = expression.args.get("order")
+185
+186        if order:
+187            sort = Sort()
+188            sort.name = step.name
+189            sort.key = order.expressions
+190            sort.add_dependency(step)
+191            step = sort
+192
+193        step.projections = projections
+194
+195        if isinstance(expression, exp.Select) and expression.args.get("distinct"):
+196            distinct = Aggregate()
+197            distinct.source = step.name
+198            distinct.name = step.name
+199            distinct.group = {
+200                e.alias_or_name: exp.column(col=e.alias_or_name, table=step.name)
+201                for e in projections or expression.expressions
+202            }
+203            distinct.add_dependency(step)
+204            step = distinct
+205
+206        limit = expression.args.get("limit")
+207
+208        if limit:
+209            step.limit = int(limit.text("expression"))
+210
+211        return step
+212
+213    def __init__(self) -> None:
+214        self.name: t.Optional[str] = None
+215        self.dependencies: t.Set[Step] = set()
+216        self.dependents: t.Set[Step] = set()
+217        self.projections: t.Sequence[exp.Expression] = []
+218        self.limit: float = math.inf
+219        self.condition: t.Optional[exp.Expression] = None
+220
+221    def add_dependency(self, dependency: Step) -> None:
+222        self.dependencies.add(dependency)
+223        dependency.dependents.add(self)
+224
+225    def __repr__(self) -> str:
+226        return self.to_s()
+227
+228    def to_s(self, level: int = 0) -> str:
+229        indent = "  " * level
+230        nested = f"{indent}    "
+231
+232        context = self._to_s(f"{nested}  ")
+233
+234        if context:
+235            context = [f"{nested}Context:"] + context
+236
+237        lines = [
+238            f"{indent}- {self.id}",
+239            *context,
+240            f"{nested}Projections:",
+241        ]
+242
+243        for expression in self.projections:
+244            lines.append(f"{nested}  - {expression.sql()}")
+245
+246        if self.condition:
+247            lines.append(f"{nested}Condition: {self.condition.sql()}")
+248
+249        if self.limit is not math.inf:
+250            lines.append(f"{nested}Limit: {self.limit}")
+251
+252        if self.dependencies:
+253            lines.append(f"{nested}Dependencies:")
+254            for dependency in self.dependencies:
+255                lines.append("  " + dependency.to_s(level + 1))
+256
+257        return "\n".join(lines)
+258
+259    @property
+260    def type_name(self) -> str:
+261        return self.__class__.__name__
+262
+263    @property
+264    def id(self) -> str:
+265        name = self.name
+266        name = f" {name}" if name else ""
+267        return f"{self.type_name}:{name} ({id(self)})"
+268
+269    def _to_s(self, _indent: str) -> t.List[str]:
+270        return []
+271
+272
+273class Scan(Step):
+274    @classmethod
+275    def from_expression(
+276        cls, expression: exp.Expression, ctes: t.Optional[t.Dict[str, Step]] = None
+277    ) -> Step:
+278        table = expression
+279        alias_ = expression.alias_or_name
+280
+281        if isinstance(expression, exp.Subquery):
+282            table = expression.this
+283            step = Step.from_expression(table, ctes)
+284            step.name = alias_
+285            return step
+286
+287        step = Scan()
+288        step.name = alias_
+289        step.source = expression
+290        if ctes and table.name in ctes:
+291            step.add_dependency(ctes[table.name])
+292
+293        return step
+294
+295    def __init__(self) -> None:
+296        super().__init__()
+297        self.source: t.Optional[exp.Expression] = None
+298
+299    def _to_s(self, indent: str) -> t.List[str]:
+300        return [f"{indent}Source: {self.source.sql() if self.source else '-static-'}"]  # type: ignore
+301
+302
+303class Join(Step):
+304    @classmethod
+305    def from_joins(
+306        cls, joins: t.Iterable[exp.Join], ctes: t.Optional[t.Dict[str, Step]] = None
+307    ) -> Step:
+308        step = Join()
+309
+310        for join in joins:
+311            source_key, join_key, condition = join_condition(join)
+312            step.joins[join.this.alias_or_name] = {
+313                "side": join.side,
+314                "join_key": join_key,
+315                "source_key": source_key,
+316                "condition": condition,
+317            }
+318
+319            step.add_dependency(Scan.from_expression(join.this, ctes))
+320
+321        return step
+322
+323    def __init__(self) -> None:
+324        super().__init__()
+325        self.joins: t.Dict[str, t.Dict[str, t.List[str] | exp.Expression]] = {}
+326
+327    def _to_s(self, indent: str) -> t.List[str]:
+328        lines = []
+329        for name, join in self.joins.items():
+330            lines.append(f"{indent}{name}: {join['side']}")
+331            if join.get("condition"):
+332                lines.append(f"{indent}On: {join['condition'].sql()}")  # type: ignore
+333        return lines
+334
+335
+336class Aggregate(Step):
+337    def __init__(self) -> None:
+338        super().__init__()
+339        self.aggregations: t.List[exp.Expression] = []
+340        self.operands: t.Tuple[exp.Expression, ...] = ()
+341        self.group: t.Dict[str, exp.Expression] = {}
+342        self.source: t.Optional[str] = None
+343
+344    def _to_s(self, indent: str) -> t.List[str]:
+345        lines = [f"{indent}Aggregations:"]
+346
+347        for expression in self.aggregations:
+348            lines.append(f"{indent}  - {expression.sql()}")
+349
+350        if self.group:
+351            lines.append(f"{indent}Group:")
+352            for expression in self.group.values():
+353                lines.append(f"{indent}  - {expression.sql()}")
+354        if self.condition:
+355            lines.append(f"{indent}Having:")
+356            lines.append(f"{indent}  - {self.condition.sql()}")
+357        if self.operands:
+358            lines.append(f"{indent}Operands:")
+359            for expression in self.operands:
+360                lines.append(f"{indent}  - {expression.sql()}")
+361
+362        return lines
+363
+364
+365class Sort(Step):
+366    def __init__(self) -> None:
+367        super().__init__()
+368        self.key = None
+369
+370    def _to_s(self, indent: str) -> t.List[str]:
+371        lines = [f"{indent}Key:"]
+372
+373        for expression in self.key:  # type: ignore
+374            lines.append(f"{indent}  - {expression.sql()}")
+375
+376        return lines
+377
+378
+379class SetOperation(Step):
+380    def __init__(
+381        self,
+382        op: t.Type[exp.Expression],
+383        left: str | None,
+384        right: str | None,
+385        distinct: bool = False,
+386    ) -> None:
+387        super().__init__()
+388        self.op = op
+389        self.left = left
+390        self.right = right
+391        self.distinct = distinct
+392
+393    @classmethod
+394    def from_expression(
+395        cls, expression: exp.Expression, ctes: t.Optional[t.Dict[str, Step]] = None
+396    ) -> Step:
+397        assert isinstance(expression, exp.Union)
+398        left = Step.from_expression(expression.left, ctes)
+399        right = Step.from_expression(expression.right, ctes)
+400        step = cls(
+401            op=expression.__class__,
+402            left=left.name,
+403            right=right.name,
+404            distinct=bool(expression.args.get("distinct")),
+405        )
+406        step.add_dependency(left)
+407        step.add_dependency(right)
+408        return step
+409
+410    def _to_s(self, indent: str) -> t.List[str]:
+411        lines = []
+412        if self.distinct:
+413            lines.append(f"{indent}Distinct: {self.distinct}")
+414        return lines
+415
+416    @property
+417    def type_name(self) -> str:
+418        return self.op.__name__
+
+ + +
+
+ +
+ + class + Plan: + + + +
+ +
13class Plan:
+14    def __init__(self, expression: exp.Expression) -> None:
+15        self.expression = expression.copy()
+16        self.root = Step.from_expression(self.expression)
+17        self._dag: t.Dict[Step, t.Set[Step]] = {}
+18
+19    @property
+20    def dag(self) -> t.Dict[Step, t.Set[Step]]:
+21        if not self._dag:
+22            dag: t.Dict[Step, t.Set[Step]] = {}
+23            nodes = {self.root}
+24
+25            while nodes:
+26                node = nodes.pop()
+27                dag[node] = set()
+28                for dep in node.dependencies:
+29                    dag[node].add(dep)
+30                    nodes.add(dep)
+31            self._dag = dag
+32
+33        return self._dag
+34
+35    @property
+36    def leaves(self) -> t.Iterator[Step]:
+37        return (node for node, deps in self.dag.items() if not deps)
+38
+39    def __repr__(self) -> str:
+40        return f"Plan\n----\n{repr(self.root)}"
+
+ + + + +
+ +
+ + Plan(expression: sqlglot.expressions.Expression) + + + +
+ +
14    def __init__(self, expression: exp.Expression) -> None:
+15        self.expression = expression.copy()
+16        self.root = Step.from_expression(self.expression)
+17        self._dag: t.Dict[Step, t.Set[Step]] = {}
+
+ + + + +
+
+
+ +
+ + class + Step: + + + +
+ +
 43class Step:
+ 44    @classmethod
+ 45    def from_expression(
+ 46        cls, expression: exp.Expression, ctes: t.Optional[t.Dict[str, Step]] = None
+ 47    ) -> Step:
+ 48        """
+ 49        Builds a DAG of Steps from a SQL expression so that it's easier to execute in an engine.
+ 50        Note: the expression's tables and subqueries must be aliased for this method to work. For
+ 51        example, given the following expression:
+ 52
+ 53        SELECT
+ 54          x.a,
+ 55          SUM(x.b)
+ 56        FROM x AS x
+ 57        JOIN y AS y
+ 58          ON x.a = y.a
+ 59        GROUP BY x.a
+ 60
+ 61        the following DAG is produced (the expression IDs might differ per execution):
+ 62
+ 63        - Aggregate: x (4347984624)
+ 64            Context:
+ 65              Aggregations:
+ 66                - SUM(x.b)
+ 67              Group:
+ 68                - x.a
+ 69            Projections:
+ 70              - x.a
+ 71              - "x".""
+ 72            Dependencies:
+ 73            - Join: x (4347985296)
+ 74              Context:
+ 75                y:
+ 76                On: x.a = y.a
+ 77              Projections:
+ 78              Dependencies:
+ 79              - Scan: x (4347983136)
+ 80                Context:
+ 81                  Source: x AS x
+ 82                Projections:
+ 83              - Scan: y (4343416624)
+ 84                Context:
+ 85                  Source: y AS y
+ 86                Projections:
+ 87
+ 88        Args:
+ 89            expression: the expression to build the DAG from.
+ 90            ctes: a dictionary that maps CTEs to their corresponding Step DAG by name.
+ 91
+ 92        Returns:
+ 93            A Step DAG corresponding to `expression`.
+ 94        """
+ 95        ctes = ctes or {}
+ 96        with_ = expression.args.get("with")
+ 97
+ 98        # CTEs break the mold of scope and introduce themselves to all in the context.
+ 99        if with_:
+100            ctes = ctes.copy()
+101            for cte in with_.expressions:
+102                step = Step.from_expression(cte.this, ctes)
+103                step.name = cte.alias
+104                ctes[step.name] = step  # type: ignore
+105
+106        from_ = expression.args.get("from")
+107
+108        if isinstance(expression, exp.Select) and from_:
+109            from_ = from_.expressions
+110            if len(from_) > 1:
+111                raise UnsupportedError(
+112                    "Multi-from statements are unsupported. Run it through the optimizer"
+113                )
+114
+115            step = Scan.from_expression(from_[0], ctes)
+116        elif isinstance(expression, exp.Union):
+117            step = SetOperation.from_expression(expression, ctes)
+118        else:
+119            step = Scan()
+120
+121        joins = expression.args.get("joins")
+122
+123        if joins:
+124            join = Join.from_joins(joins, ctes)
+125            join.name = step.name
+126            join.add_dependency(step)
+127            step = join
+128
+129        projections = []  # final selects in this chain of steps representing a select
+130        operands = {}  # intermediate computations of agg funcs eg x + 1 in SUM(x + 1)
+131        aggregations = []
+132        sequence = itertools.count()
+133
+134        def extract_agg_operands(expression):
+135            for agg in expression.find_all(exp.AggFunc):
+136                for operand in agg.unnest_operands():
+137                    if isinstance(operand, exp.Column):
+138                        continue
+139                    if operand not in operands:
+140                        operands[operand] = f"_a_{next(sequence)}"
+141                    operand.replace(exp.column(operands[operand], quoted=True))
+142
+143        for e in expression.expressions:
+144            if e.find(exp.AggFunc):
+145                projections.append(exp.column(e.alias_or_name, step.name, quoted=True))
+146                aggregations.append(e)
+147                extract_agg_operands(e)
+148            else:
+149                projections.append(e)
+150
+151        where = expression.args.get("where")
+152
+153        if where:
+154            step.condition = where.this
+155
+156        group = expression.args.get("group")
+157
+158        if group or aggregations:
+159            aggregate = Aggregate()
+160            aggregate.source = step.name
+161            aggregate.name = step.name
+162
+163            having = expression.args.get("having")
+164
+165            if having:
+166                extract_agg_operands(having)
+167                aggregate.condition = having.this
+168
+169            aggregate.operands = tuple(
+170                alias(operand, alias_) for operand, alias_ in operands.items()
+171            )
+172            aggregate.aggregations = aggregations
+173            # give aggregates names and replace projections with references to them
+174            aggregate.group = {
+175                f"_g{i}": e for i, e in enumerate(group.expressions if group else [])
+176            }
+177            for projection in projections:
+178                for i, e in aggregate.group.items():
+179                    for child, _, _ in projection.walk():
+180                        if child == e:
+181                            child.replace(exp.column(i, step.name))
+182            aggregate.add_dependency(step)
+183            step = aggregate
+184
+185        order = expression.args.get("order")
+186
+187        if order:
+188            sort = Sort()
+189            sort.name = step.name
+190            sort.key = order.expressions
+191            sort.add_dependency(step)
+192            step = sort
+193
+194        step.projections = projections
+195
+196        if isinstance(expression, exp.Select) and expression.args.get("distinct"):
+197            distinct = Aggregate()
+198            distinct.source = step.name
+199            distinct.name = step.name
+200            distinct.group = {
+201                e.alias_or_name: exp.column(col=e.alias_or_name, table=step.name)
+202                for e in projections or expression.expressions
+203            }
+204            distinct.add_dependency(step)
+205            step = distinct
+206
+207        limit = expression.args.get("limit")
+208
+209        if limit:
+210            step.limit = int(limit.text("expression"))
+211
+212        return step
+213
+214    def __init__(self) -> None:
+215        self.name: t.Optional[str] = None
+216        self.dependencies: t.Set[Step] = set()
+217        self.dependents: t.Set[Step] = set()
+218        self.projections: t.Sequence[exp.Expression] = []
+219        self.limit: float = math.inf
+220        self.condition: t.Optional[exp.Expression] = None
+221
+222    def add_dependency(self, dependency: Step) -> None:
+223        self.dependencies.add(dependency)
+224        dependency.dependents.add(self)
+225
+226    def __repr__(self) -> str:
+227        return self.to_s()
+228
+229    def to_s(self, level: int = 0) -> str:
+230        indent = "  " * level
+231        nested = f"{indent}    "
+232
+233        context = self._to_s(f"{nested}  ")
+234
+235        if context:
+236            context = [f"{nested}Context:"] + context
+237
+238        lines = [
+239            f"{indent}- {self.id}",
+240            *context,
+241            f"{nested}Projections:",
+242        ]
+243
+244        for expression in self.projections:
+245            lines.append(f"{nested}  - {expression.sql()}")
+246
+247        if self.condition:
+248            lines.append(f"{nested}Condition: {self.condition.sql()}")
+249
+250        if self.limit is not math.inf:
+251            lines.append(f"{nested}Limit: {self.limit}")
+252
+253        if self.dependencies:
+254            lines.append(f"{nested}Dependencies:")
+255            for dependency in self.dependencies:
+256                lines.append("  " + dependency.to_s(level + 1))
+257
+258        return "\n".join(lines)
+259
+260    @property
+261    def type_name(self) -> str:
+262        return self.__class__.__name__
+263
+264    @property
+265    def id(self) -> str:
+266        name = self.name
+267        name = f" {name}" if name else ""
+268        return f"{self.type_name}:{name} ({id(self)})"
+269
+270    def _to_s(self, _indent: str) -> t.List[str]:
+271        return []
+
+ + + + +
+ +
+ + Step() + + + +
+ +
214    def __init__(self) -> None:
+215        self.name: t.Optional[str] = None
+216        self.dependencies: t.Set[Step] = set()
+217        self.dependents: t.Set[Step] = set()
+218        self.projections: t.Sequence[exp.Expression] = []
+219        self.limit: float = math.inf
+220        self.condition: t.Optional[exp.Expression] = None
+
+ + + + +
+
+ +
+
@classmethod
+ + def + from_expression( cls, expression: sqlglot.expressions.Expression, ctes: Optional[Dict[str, sqlglot.planner.Step]] = None) -> sqlglot.planner.Step: + + + +
+ +
 44    @classmethod
+ 45    def from_expression(
+ 46        cls, expression: exp.Expression, ctes: t.Optional[t.Dict[str, Step]] = None
+ 47    ) -> Step:
+ 48        """
+ 49        Builds a DAG of Steps from a SQL expression so that it's easier to execute in an engine.
+ 50        Note: the expression's tables and subqueries must be aliased for this method to work. For
+ 51        example, given the following expression:
+ 52
+ 53        SELECT
+ 54          x.a,
+ 55          SUM(x.b)
+ 56        FROM x AS x
+ 57        JOIN y AS y
+ 58          ON x.a = y.a
+ 59        GROUP BY x.a
+ 60
+ 61        the following DAG is produced (the expression IDs might differ per execution):
+ 62
+ 63        - Aggregate: x (4347984624)
+ 64            Context:
+ 65              Aggregations:
+ 66                - SUM(x.b)
+ 67              Group:
+ 68                - x.a
+ 69            Projections:
+ 70              - x.a
+ 71              - "x".""
+ 72            Dependencies:
+ 73            - Join: x (4347985296)
+ 74              Context:
+ 75                y:
+ 76                On: x.a = y.a
+ 77              Projections:
+ 78              Dependencies:
+ 79              - Scan: x (4347983136)
+ 80                Context:
+ 81                  Source: x AS x
+ 82                Projections:
+ 83              - Scan: y (4343416624)
+ 84                Context:
+ 85                  Source: y AS y
+ 86                Projections:
+ 87
+ 88        Args:
+ 89            expression: the expression to build the DAG from.
+ 90            ctes: a dictionary that maps CTEs to their corresponding Step DAG by name.
+ 91
+ 92        Returns:
+ 93            A Step DAG corresponding to `expression`.
+ 94        """
+ 95        ctes = ctes or {}
+ 96        with_ = expression.args.get("with")
+ 97
+ 98        # CTEs break the mold of scope and introduce themselves to all in the context.
+ 99        if with_:
+100            ctes = ctes.copy()
+101            for cte in with_.expressions:
+102                step = Step.from_expression(cte.this, ctes)
+103                step.name = cte.alias
+104                ctes[step.name] = step  # type: ignore
+105
+106        from_ = expression.args.get("from")
+107
+108        if isinstance(expression, exp.Select) and from_:
+109            from_ = from_.expressions
+110            if len(from_) > 1:
+111                raise UnsupportedError(
+112                    "Multi-from statements are unsupported. Run it through the optimizer"
+113                )
+114
+115            step = Scan.from_expression(from_[0], ctes)
+116        elif isinstance(expression, exp.Union):
+117            step = SetOperation.from_expression(expression, ctes)
+118        else:
+119            step = Scan()
+120
+121        joins = expression.args.get("joins")
+122
+123        if joins:
+124            join = Join.from_joins(joins, ctes)
+125            join.name = step.name
+126            join.add_dependency(step)
+127            step = join
+128
+129        projections = []  # final selects in this chain of steps representing a select
+130        operands = {}  # intermediate computations of agg funcs eg x + 1 in SUM(x + 1)
+131        aggregations = []
+132        sequence = itertools.count()
+133
+134        def extract_agg_operands(expression):
+135            for agg in expression.find_all(exp.AggFunc):
+136                for operand in agg.unnest_operands():
+137                    if isinstance(operand, exp.Column):
+138                        continue
+139                    if operand not in operands:
+140                        operands[operand] = f"_a_{next(sequence)}"
+141                    operand.replace(exp.column(operands[operand], quoted=True))
+142
+143        for e in expression.expressions:
+144            if e.find(exp.AggFunc):
+145                projections.append(exp.column(e.alias_or_name, step.name, quoted=True))
+146                aggregations.append(e)
+147                extract_agg_operands(e)
+148            else:
+149                projections.append(e)
+150
+151        where = expression.args.get("where")
+152
+153        if where:
+154            step.condition = where.this
+155
+156        group = expression.args.get("group")
+157
+158        if group or aggregations:
+159            aggregate = Aggregate()
+160            aggregate.source = step.name
+161            aggregate.name = step.name
+162
+163            having = expression.args.get("having")
+164
+165            if having:
+166                extract_agg_operands(having)
+167                aggregate.condition = having.this
+168
+169            aggregate.operands = tuple(
+170                alias(operand, alias_) for operand, alias_ in operands.items()
+171            )
+172            aggregate.aggregations = aggregations
+173            # give aggregates names and replace projections with references to them
+174            aggregate.group = {
+175                f"_g{i}": e for i, e in enumerate(group.expressions if group else [])
+176            }
+177            for projection in projections:
+178                for i, e in aggregate.group.items():
+179                    for child, _, _ in projection.walk():
+180                        if child == e:
+181                            child.replace(exp.column(i, step.name))
+182            aggregate.add_dependency(step)
+183            step = aggregate
+184
+185        order = expression.args.get("order")
+186
+187        if order:
+188            sort = Sort()
+189            sort.name = step.name
+190            sort.key = order.expressions
+191            sort.add_dependency(step)
+192            step = sort
+193
+194        step.projections = projections
+195
+196        if isinstance(expression, exp.Select) and expression.args.get("distinct"):
+197            distinct = Aggregate()
+198            distinct.source = step.name
+199            distinct.name = step.name
+200            distinct.group = {
+201                e.alias_or_name: exp.column(col=e.alias_or_name, table=step.name)
+202                for e in projections or expression.expressions
+203            }
+204            distinct.add_dependency(step)
+205            step = distinct
+206
+207        limit = expression.args.get("limit")
+208
+209        if limit:
+210            step.limit = int(limit.text("expression"))
+211
+212        return step
+
+ + +

Builds a DAG of Steps from a SQL expression so that it's easier to execute in an engine. +Note: the expression's tables and subqueries must be aliased for this method to work. For +example, given the following expression:

+ +

SELECT + x.a, + SUM(x.b) +FROM x AS x +JOIN y AS y + ON x.a = y.a +GROUP BY x.a

+ +

the following DAG is produced (the expression IDs might differ per execution):

+ +
    +
  • Aggregate: x (4347984624) +Context: + Aggregations: + - SUM(x.b) + Group: + - x.a +Projections: +
      +
    • x.a
    • +
    • "x"."" +Dependencies: +
        +
      • Join: x (4347985296) +Context: +y: +On: x.a = y.a +Projections: +Dependencies:
      • +
    • +
    • Scan: x (4347983136) +Context: + Source: x AS x +Projections:
    • +
    • Scan: y (4343416624) +Context: + Source: y AS y +Projections:
    • +
  • +
+ +
Arguments:
+ +
    +
  • expression: the expression to build the DAG from.
  • +
  • ctes: a dictionary that maps CTEs to their corresponding Step DAG by name.
  • +
+ +
Returns:
+ +
+

A Step DAG corresponding to expression.

+
+
+ + +
+
+ +
+ + def + add_dependency(self, dependency: sqlglot.planner.Step) -> None: + + + +
+ +
222    def add_dependency(self, dependency: Step) -> None:
+223        self.dependencies.add(dependency)
+224        dependency.dependents.add(self)
+
+ + + + +
+
+ +
+ + def + to_s(self, level: int = 0) -> str: + + + +
+ +
229    def to_s(self, level: int = 0) -> str:
+230        indent = "  " * level
+231        nested = f"{indent}    "
+232
+233        context = self._to_s(f"{nested}  ")
+234
+235        if context:
+236            context = [f"{nested}Context:"] + context
+237
+238        lines = [
+239            f"{indent}- {self.id}",
+240            *context,
+241            f"{nested}Projections:",
+242        ]
+243
+244        for expression in self.projections:
+245            lines.append(f"{nested}  - {expression.sql()}")
+246
+247        if self.condition:
+248            lines.append(f"{nested}Condition: {self.condition.sql()}")
+249
+250        if self.limit is not math.inf:
+251            lines.append(f"{nested}Limit: {self.limit}")
+252
+253        if self.dependencies:
+254            lines.append(f"{nested}Dependencies:")
+255            for dependency in self.dependencies:
+256                lines.append("  " + dependency.to_s(level + 1))
+257
+258        return "\n".join(lines)
+
+ + + + +
+
+
+ +
+ + class + Scan(Step): + + + +
+ +
274class Scan(Step):
+275    @classmethod
+276    def from_expression(
+277        cls, expression: exp.Expression, ctes: t.Optional[t.Dict[str, Step]] = None
+278    ) -> Step:
+279        table = expression
+280        alias_ = expression.alias_or_name
+281
+282        if isinstance(expression, exp.Subquery):
+283            table = expression.this
+284            step = Step.from_expression(table, ctes)
+285            step.name = alias_
+286            return step
+287
+288        step = Scan()
+289        step.name = alias_
+290        step.source = expression
+291        if ctes and table.name in ctes:
+292            step.add_dependency(ctes[table.name])
+293
+294        return step
+295
+296    def __init__(self) -> None:
+297        super().__init__()
+298        self.source: t.Optional[exp.Expression] = None
+299
+300    def _to_s(self, indent: str) -> t.List[str]:
+301        return [f"{indent}Source: {self.source.sql() if self.source else '-static-'}"]  # type: ignore
+
+ + + + +
+ +
+ + Scan() + + + +
+ +
296    def __init__(self) -> None:
+297        super().__init__()
+298        self.source: t.Optional[exp.Expression] = None
+
+ + + + +
+
+ +
+
@classmethod
+ + def + from_expression( cls, expression: sqlglot.expressions.Expression, ctes: Optional[Dict[str, sqlglot.planner.Step]] = None) -> sqlglot.planner.Step: + + + +
+ +
275    @classmethod
+276    def from_expression(
+277        cls, expression: exp.Expression, ctes: t.Optional[t.Dict[str, Step]] = None
+278    ) -> Step:
+279        table = expression
+280        alias_ = expression.alias_or_name
+281
+282        if isinstance(expression, exp.Subquery):
+283            table = expression.this
+284            step = Step.from_expression(table, ctes)
+285            step.name = alias_
+286            return step
+287
+288        step = Scan()
+289        step.name = alias_
+290        step.source = expression
+291        if ctes and table.name in ctes:
+292            step.add_dependency(ctes[table.name])
+293
+294        return step
+
+ + +

Builds a DAG of Steps from a SQL expression so that it's easier to execute in an engine. +Note: the expression's tables and subqueries must be aliased for this method to work. For +example, given the following expression:

+ +

SELECT + x.a, + SUM(x.b) +FROM x AS x +JOIN y AS y + ON x.a = y.a +GROUP BY x.a

+ +

the following DAG is produced (the expression IDs might differ per execution):

+ +
    +
  • Aggregate: x (4347984624) +Context: + Aggregations: + - SUM(x.b) + Group: + - x.a +Projections: +
      +
    • x.a
    • +
    • "x"."" +Dependencies: +
        +
      • Join: x (4347985296) +Context: +y: +On: x.a = y.a +Projections: +Dependencies:
      • +
    • +
    • Scan: x (4347983136) +Context: + Source: x AS x +Projections:
    • +
    • Scan: y (4343416624) +Context: + Source: y AS y +Projections:
    • +
  • +
+ +
Arguments:
+ +
    +
  • expression: the expression to build the DAG from.
  • +
  • ctes: a dictionary that maps CTEs to their corresponding Step DAG by name.
  • +
+ +
Returns:
+ +
+

A Step DAG corresponding to expression.

+
+
+ + +
+
+
Inherited Members
+
+ +
+
+
+
+ +
+ + class + Join(Step): + + + +
+ +
304class Join(Step):
+305    @classmethod
+306    def from_joins(
+307        cls, joins: t.Iterable[exp.Join], ctes: t.Optional[t.Dict[str, Step]] = None
+308    ) -> Step:
+309        step = Join()
+310
+311        for join in joins:
+312            source_key, join_key, condition = join_condition(join)
+313            step.joins[join.this.alias_or_name] = {
+314                "side": join.side,
+315                "join_key": join_key,
+316                "source_key": source_key,
+317                "condition": condition,
+318            }
+319
+320            step.add_dependency(Scan.from_expression(join.this, ctes))
+321
+322        return step
+323
+324    def __init__(self) -> None:
+325        super().__init__()
+326        self.joins: t.Dict[str, t.Dict[str, t.List[str] | exp.Expression]] = {}
+327
+328    def _to_s(self, indent: str) -> t.List[str]:
+329        lines = []
+330        for name, join in self.joins.items():
+331            lines.append(f"{indent}{name}: {join['side']}")
+332            if join.get("condition"):
+333                lines.append(f"{indent}On: {join['condition'].sql()}")  # type: ignore
+334        return lines
+
+ + + + +
+ +
+ + Join() + + + +
+ +
324    def __init__(self) -> None:
+325        super().__init__()
+326        self.joins: t.Dict[str, t.Dict[str, t.List[str] | exp.Expression]] = {}
+
+ + + + +
+
+ +
+
@classmethod
+ + def + from_joins( cls, joins: Iterable[sqlglot.expressions.Join], ctes: Optional[Dict[str, sqlglot.planner.Step]] = None) -> sqlglot.planner.Step: + + + +
+ +
305    @classmethod
+306    def from_joins(
+307        cls, joins: t.Iterable[exp.Join], ctes: t.Optional[t.Dict[str, Step]] = None
+308    ) -> Step:
+309        step = Join()
+310
+311        for join in joins:
+312            source_key, join_key, condition = join_condition(join)
+313            step.joins[join.this.alias_or_name] = {
+314                "side": join.side,
+315                "join_key": join_key,
+316                "source_key": source_key,
+317                "condition": condition,
+318            }
+319
+320            step.add_dependency(Scan.from_expression(join.this, ctes))
+321
+322        return step
+
+ + + + +
+
+
Inherited Members
+
+ +
+
+
+
+ +
+ + class + Aggregate(Step): + + + +
+ +
337class Aggregate(Step):
+338    def __init__(self) -> None:
+339        super().__init__()
+340        self.aggregations: t.List[exp.Expression] = []
+341        self.operands: t.Tuple[exp.Expression, ...] = ()
+342        self.group: t.Dict[str, exp.Expression] = {}
+343        self.source: t.Optional[str] = None
+344
+345    def _to_s(self, indent: str) -> t.List[str]:
+346        lines = [f"{indent}Aggregations:"]
+347
+348        for expression in self.aggregations:
+349            lines.append(f"{indent}  - {expression.sql()}")
+350
+351        if self.group:
+352            lines.append(f"{indent}Group:")
+353            for expression in self.group.values():
+354                lines.append(f"{indent}  - {expression.sql()}")
+355        if self.condition:
+356            lines.append(f"{indent}Having:")
+357            lines.append(f"{indent}  - {self.condition.sql()}")
+358        if self.operands:
+359            lines.append(f"{indent}Operands:")
+360            for expression in self.operands:
+361                lines.append(f"{indent}  - {expression.sql()}")
+362
+363        return lines
+
+ + + + +
+ +
+ + Aggregate() + + + +
+ +
338    def __init__(self) -> None:
+339        super().__init__()
+340        self.aggregations: t.List[exp.Expression] = []
+341        self.operands: t.Tuple[exp.Expression, ...] = ()
+342        self.group: t.Dict[str, exp.Expression] = {}
+343        self.source: t.Optional[str] = None
+
+ + + + +
+
+
Inherited Members
+
+ +
+
+
+
+ +
+ + class + Sort(Step): + + + +
+ +
366class Sort(Step):
+367    def __init__(self) -> None:
+368        super().__init__()
+369        self.key = None
+370
+371    def _to_s(self, indent: str) -> t.List[str]:
+372        lines = [f"{indent}Key:"]
+373
+374        for expression in self.key:  # type: ignore
+375            lines.append(f"{indent}  - {expression.sql()}")
+376
+377        return lines
+
+ + + + +
+ +
+ + Sort() + + + +
+ +
367    def __init__(self) -> None:
+368        super().__init__()
+369        self.key = None
+
+ + + + +
+
+
Inherited Members
+
+ +
+
+
+
+ +
+ + class + SetOperation(Step): + + + +
+ +
380class SetOperation(Step):
+381    def __init__(
+382        self,
+383        op: t.Type[exp.Expression],
+384        left: str | None,
+385        right: str | None,
+386        distinct: bool = False,
+387    ) -> None:
+388        super().__init__()
+389        self.op = op
+390        self.left = left
+391        self.right = right
+392        self.distinct = distinct
+393
+394    @classmethod
+395    def from_expression(
+396        cls, expression: exp.Expression, ctes: t.Optional[t.Dict[str, Step]] = None
+397    ) -> Step:
+398        assert isinstance(expression, exp.Union)
+399        left = Step.from_expression(expression.left, ctes)
+400        right = Step.from_expression(expression.right, ctes)
+401        step = cls(
+402            op=expression.__class__,
+403            left=left.name,
+404            right=right.name,
+405            distinct=bool(expression.args.get("distinct")),
+406        )
+407        step.add_dependency(left)
+408        step.add_dependency(right)
+409        return step
+410
+411    def _to_s(self, indent: str) -> t.List[str]:
+412        lines = []
+413        if self.distinct:
+414            lines.append(f"{indent}Distinct: {self.distinct}")
+415        return lines
+416
+417    @property
+418    def type_name(self) -> str:
+419        return self.op.__name__
+
+ + + + +
+ +
+ + SetOperation( op: Type[sqlglot.expressions.Expression], left: str | None, right: str | None, distinct: bool = False) + + + +
+ +
381    def __init__(
+382        self,
+383        op: t.Type[exp.Expression],
+384        left: str | None,
+385        right: str | None,
+386        distinct: bool = False,
+387    ) -> None:
+388        super().__init__()
+389        self.op = op
+390        self.left = left
+391        self.right = right
+392        self.distinct = distinct
+
+ + + + +
+
+ +
+
@classmethod
+ + def + from_expression( cls, expression: sqlglot.expressions.Expression, ctes: Optional[Dict[str, sqlglot.planner.Step]] = None) -> sqlglot.planner.Step: + + + +
+ +
394    @classmethod
+395    def from_expression(
+396        cls, expression: exp.Expression, ctes: t.Optional[t.Dict[str, Step]] = None
+397    ) -> Step:
+398        assert isinstance(expression, exp.Union)
+399        left = Step.from_expression(expression.left, ctes)
+400        right = Step.from_expression(expression.right, ctes)
+401        step = cls(
+402            op=expression.__class__,
+403            left=left.name,
+404            right=right.name,
+405            distinct=bool(expression.args.get("distinct")),
+406        )
+407        step.add_dependency(left)
+408        step.add_dependency(right)
+409        return step
+
+ + +

Builds a DAG of Steps from a SQL expression so that it's easier to execute in an engine. +Note: the expression's tables and subqueries must be aliased for this method to work. For +example, given the following expression:

+ +

SELECT + x.a, + SUM(x.b) +FROM x AS x +JOIN y AS y + ON x.a = y.a +GROUP BY x.a

+ +

the following DAG is produced (the expression IDs might differ per execution):

+ +
    +
  • Aggregate: x (4347984624) +Context: + Aggregations: + - SUM(x.b) + Group: + - x.a +Projections: +
      +
    • x.a
    • +
    • "x"."" +Dependencies: +
        +
      • Join: x (4347985296) +Context: +y: +On: x.a = y.a +Projections: +Dependencies:
      • +
    • +
    • Scan: x (4347983136) +Context: + Source: x AS x +Projections:
    • +
    • Scan: y (4343416624) +Context: + Source: y AS y +Projections:
    • +
  • +
+ +
Arguments:
+ +
    +
  • expression: the expression to build the DAG from.
  • +
  • ctes: a dictionary that maps CTEs to their corresponding Step DAG by name.
  • +
+ +
Returns:
+ +
+

A Step DAG corresponding to expression.

+
+
+ + +
+
+
Inherited Members
+
+ +
+
+
+
+ + \ No newline at end of file diff --git a/docs/sqlglot/schema.html b/docs/sqlglot/schema.html new file mode 100644 index 0000000..9edba6c --- /dev/null +++ b/docs/sqlglot/schema.html @@ -0,0 +1,1624 @@ + + + + + + + sqlglot.schema API documentation + + + + + + + + + +
+
+ Edit on GitHub +

+sqlglot.schema

+ + + + + + +
  1from __future__ import annotations
+  2
+  3import abc
+  4import typing as t
+  5
+  6import sqlglot
+  7from sqlglot import expressions as exp
+  8from sqlglot.errors import SchemaError
+  9from sqlglot.helper import dict_depth
+ 10from sqlglot.trie import in_trie, new_trie
+ 11
+ 12if t.TYPE_CHECKING:
+ 13    from sqlglot.dataframe.sql.types import StructType
+ 14    from sqlglot.dialects.dialect import DialectType
+ 15
+ 16    ColumnMapping = t.Union[t.Dict, str, StructType, t.List]
+ 17
+ 18TABLE_ARGS = ("this", "db", "catalog")
+ 19
+ 20T = t.TypeVar("T")
+ 21
+ 22
+ 23class Schema(abc.ABC):
+ 24    """Abstract base class for database schemas"""
+ 25
+ 26    @abc.abstractmethod
+ 27    def add_table(
+ 28        self, table: exp.Table | str, column_mapping: t.Optional[ColumnMapping] = None
+ 29    ) -> None:
+ 30        """
+ 31        Register or update a table. Some implementing classes may require column information to also be provided.
+ 32
+ 33        Args:
+ 34            table: table expression instance or string representing the table.
+ 35            column_mapping: a column mapping that describes the structure of the table.
+ 36        """
+ 37
+ 38    @abc.abstractmethod
+ 39    def column_names(self, table: exp.Table | str, only_visible: bool = False) -> t.List[str]:
+ 40        """
+ 41        Get the column names for a table.
+ 42
+ 43        Args:
+ 44            table: the `Table` expression instance.
+ 45            only_visible: whether to include invisible columns.
+ 46
+ 47        Returns:
+ 48            The list of column names.
+ 49        """
+ 50
+ 51    @abc.abstractmethod
+ 52    def get_column_type(self, table: exp.Table | str, column: exp.Column) -> exp.DataType:
+ 53        """
+ 54        Get the :class:`sqlglot.exp.DataType` type of a column in the schema.
+ 55
+ 56        Args:
+ 57            table: the source table.
+ 58            column: the target column.
+ 59
+ 60        Returns:
+ 61            The resulting column type.
+ 62        """
+ 63
+ 64    @property
+ 65    def supported_table_args(self) -> t.Tuple[str, ...]:
+ 66        """
+ 67        Table arguments this schema support, e.g. `("this", "db", "catalog")`
+ 68        """
+ 69        raise NotImplementedError
+ 70
+ 71
+ 72class AbstractMappingSchema(t.Generic[T]):
+ 73    def __init__(
+ 74        self,
+ 75        mapping: dict | None = None,
+ 76    ) -> None:
+ 77        self.mapping = mapping or {}
+ 78        self.mapping_trie = self._build_trie(self.mapping)
+ 79        self._supported_table_args: t.Tuple[str, ...] = tuple()
+ 80
+ 81    def _build_trie(self, schema: t.Dict) -> t.Dict:
+ 82        return new_trie(tuple(reversed(t)) for t in flatten_schema(schema, depth=self._depth()))
+ 83
+ 84    def _depth(self) -> int:
+ 85        return dict_depth(self.mapping)
+ 86
+ 87    @property
+ 88    def supported_table_args(self) -> t.Tuple[str, ...]:
+ 89        if not self._supported_table_args and self.mapping:
+ 90            depth = self._depth()
+ 91
+ 92            if not depth:  # None
+ 93                self._supported_table_args = tuple()
+ 94            elif 1 <= depth <= 3:
+ 95                self._supported_table_args = TABLE_ARGS[:depth]
+ 96            else:
+ 97                raise SchemaError(f"Invalid mapping shape. Depth: {depth}")
+ 98
+ 99        return self._supported_table_args
+100
+101    def table_parts(self, table: exp.Table) -> t.List[str]:
+102        if isinstance(table.this, exp.ReadCSV):
+103            return [table.this.name]
+104        return [table.text(part) for part in TABLE_ARGS if table.text(part)]
+105
+106    def find(
+107        self, table: exp.Table, trie: t.Optional[t.Dict] = None, raise_on_missing: bool = True
+108    ) -> t.Optional[T]:
+109        parts = self.table_parts(table)[0 : len(self.supported_table_args)]
+110        value, trie = in_trie(self.mapping_trie if trie is None else trie, parts)
+111
+112        if value == 0:
+113            return None
+114        elif value == 1:
+115            possibilities = flatten_schema(trie, depth=dict_depth(trie) - 1)
+116            if len(possibilities) == 1:
+117                parts.extend(possibilities[0])
+118            else:
+119                message = ", ".join(".".join(parts) for parts in possibilities)
+120                if raise_on_missing:
+121                    raise SchemaError(f"Ambiguous mapping for {table}: {message}.")
+122                return None
+123        return self._nested_get(parts, raise_on_missing=raise_on_missing)
+124
+125    def _nested_get(
+126        self, parts: t.Sequence[str], d: t.Optional[t.Dict] = None, raise_on_missing=True
+127    ) -> t.Optional[t.Any]:
+128        return _nested_get(
+129            d or self.mapping,
+130            *zip(self.supported_table_args, reversed(parts)),
+131            raise_on_missing=raise_on_missing,
+132        )
+133
+134
+135class MappingSchema(AbstractMappingSchema[t.Dict[str, str]], Schema):
+136    """
+137    Schema based on a nested mapping.
+138
+139    Args:
+140        schema (dict): Mapping in one of the following forms:
+141            1. {table: {col: type}}
+142            2. {db: {table: {col: type}}}
+143            3. {catalog: {db: {table: {col: type}}}}
+144            4. None - Tables will be added later
+145        visible (dict): Optional mapping of which columns in the schema are visible. If not provided, all columns
+146            are assumed to be visible. The nesting should mirror that of the schema:
+147            1. {table: set(*cols)}}
+148            2. {db: {table: set(*cols)}}}
+149            3. {catalog: {db: {table: set(*cols)}}}}
+150        dialect (str): The dialect to be used for custom type mappings.
+151    """
+152
+153    def __init__(
+154        self,
+155        schema: t.Optional[t.Dict] = None,
+156        visible: t.Optional[t.Dict] = None,
+157        dialect: DialectType = None,
+158    ) -> None:
+159        self.dialect = dialect
+160        self.visible = visible or {}
+161        self._type_mapping_cache: t.Dict[str, exp.DataType] = {}
+162        super().__init__(self._normalize(schema or {}))
+163
+164    @classmethod
+165    def from_mapping_schema(cls, mapping_schema: MappingSchema) -> MappingSchema:
+166        return MappingSchema(
+167            schema=mapping_schema.mapping,
+168            visible=mapping_schema.visible,
+169            dialect=mapping_schema.dialect,
+170        )
+171
+172    def copy(self, **kwargs) -> MappingSchema:
+173        return MappingSchema(
+174            **{  # type: ignore
+175                "schema": self.mapping.copy(),
+176                "visible": self.visible.copy(),
+177                "dialect": self.dialect,
+178                **kwargs,
+179            }
+180        )
+181
+182    def _normalize(self, schema: t.Dict) -> t.Dict:
+183        """
+184        Converts all identifiers in the schema into lowercase, unless they're quoted.
+185
+186        Args:
+187            schema: the schema to normalize.
+188
+189        Returns:
+190            The normalized schema mapping.
+191        """
+192        flattened_schema = flatten_schema(schema, depth=dict_depth(schema) - 1)
+193
+194        normalized_mapping: t.Dict = {}
+195        for keys in flattened_schema:
+196            columns = _nested_get(schema, *zip(keys, keys))
+197            assert columns is not None
+198
+199            normalized_keys = [self._normalize_name(key) for key in keys]
+200            for column_name, column_type in columns.items():
+201                _nested_set(
+202                    normalized_mapping,
+203                    normalized_keys + [self._normalize_name(column_name)],
+204                    column_type,
+205                )
+206
+207        return normalized_mapping
+208
+209    def add_table(
+210        self, table: exp.Table | str, column_mapping: t.Optional[ColumnMapping] = None
+211    ) -> None:
+212        """
+213        Register or update a table. Updates are only performed if a new column mapping is provided.
+214
+215        Args:
+216            table: the `Table` expression instance or string representing the table.
+217            column_mapping: a column mapping that describes the structure of the table.
+218        """
+219        table_ = self._ensure_table(table)
+220        column_mapping = ensure_column_mapping(column_mapping)
+221        schema = self.find(table_, raise_on_missing=False)
+222
+223        if schema and not column_mapping:
+224            return
+225
+226        _nested_set(
+227            self.mapping,
+228            list(reversed(self.table_parts(table_))),
+229            column_mapping,
+230        )
+231        self.mapping_trie = self._build_trie(self.mapping)
+232
+233    def _normalize_name(self, name: str) -> str:
+234        try:
+235            identifier: t.Optional[exp.Expression] = sqlglot.parse_one(
+236                name, read=self.dialect, into=exp.Identifier
+237            )
+238        except:
+239            identifier = exp.to_identifier(name)
+240        assert isinstance(identifier, exp.Identifier)
+241
+242        if identifier.quoted:
+243            return identifier.name
+244        return identifier.name.lower()
+245
+246    def _depth(self) -> int:
+247        # The columns themselves are a mapping, but we don't want to include those
+248        return super()._depth() - 1
+249
+250    def _ensure_table(self, table: exp.Table | str) -> exp.Table:
+251        table_ = exp.to_table(table)
+252
+253        if not table_:
+254            raise SchemaError(f"Not a valid table '{table}'")
+255
+256        return table_
+257
+258    def column_names(self, table: exp.Table | str, only_visible: bool = False) -> t.List[str]:
+259        table_ = self._ensure_table(table)
+260        schema = self.find(table_)
+261
+262        if schema is None:
+263            return []
+264
+265        if not only_visible or not self.visible:
+266            return list(schema)
+267
+268        visible = self._nested_get(self.table_parts(table_), self.visible)
+269        return [col for col in schema if col in visible]  # type: ignore
+270
+271    def get_column_type(self, table: exp.Table | str, column: exp.Column | str) -> exp.DataType:
+272        column_name = column if isinstance(column, str) else column.name
+273        table_ = exp.to_table(table)
+274        if table_:
+275            table_schema = self.find(table_, raise_on_missing=False)
+276            if table_schema:
+277                column_type = table_schema.get(column_name)
+278
+279                if isinstance(column_type, exp.DataType):
+280                    return column_type
+281                elif isinstance(column_type, str):
+282                    return self._to_data_type(column_type.upper())
+283                raise SchemaError(f"Unknown column type '{column_type}'")
+284            return exp.DataType(this=exp.DataType.Type.UNKNOWN)
+285        raise SchemaError(f"Could not convert table '{table}'")
+286
+287    def _to_data_type(self, schema_type: str) -> exp.DataType:
+288        """
+289        Convert a type represented as a string to the corresponding :class:`sqlglot.exp.DataType` object.
+290
+291        Args:
+292            schema_type: the type we want to convert.
+293
+294        Returns:
+295            The resulting expression type.
+296        """
+297        if schema_type not in self._type_mapping_cache:
+298            try:
+299                expression = exp.maybe_parse(schema_type, into=exp.DataType, dialect=self.dialect)
+300                if expression is None:
+301                    raise ValueError(f"Could not parse {schema_type}")
+302                self._type_mapping_cache[schema_type] = expression  # type: ignore
+303            except AttributeError:
+304                raise SchemaError(f"Failed to convert type {schema_type}")
+305
+306        return self._type_mapping_cache[schema_type]
+307
+308
+309def ensure_schema(schema: t.Any) -> Schema:
+310    if isinstance(schema, Schema):
+311        return schema
+312
+313    return MappingSchema(schema)
+314
+315
+316def ensure_column_mapping(mapping: t.Optional[ColumnMapping]):
+317    if isinstance(mapping, dict):
+318        return mapping
+319    elif isinstance(mapping, str):
+320        col_name_type_strs = [x.strip() for x in mapping.split(",")]
+321        return {
+322            name_type_str.split(":")[0].strip(): name_type_str.split(":")[1].strip()
+323            for name_type_str in col_name_type_strs
+324        }
+325    # Check if mapping looks like a DataFrame StructType
+326    elif hasattr(mapping, "simpleString"):
+327        return {struct_field.name: struct_field.dataType.simpleString() for struct_field in mapping}  # type: ignore
+328    elif isinstance(mapping, list):
+329        return {x.strip(): None for x in mapping}
+330    elif mapping is None:
+331        return {}
+332    raise ValueError(f"Invalid mapping provided: {type(mapping)}")
+333
+334
+335def flatten_schema(
+336    schema: t.Dict, depth: int, keys: t.Optional[t.List[str]] = None
+337) -> t.List[t.List[str]]:
+338    tables = []
+339    keys = keys or []
+340
+341    for k, v in schema.items():
+342        if depth >= 2:
+343            tables.extend(flatten_schema(v, depth - 1, keys + [k]))
+344        elif depth == 1:
+345            tables.append(keys + [k])
+346    return tables
+347
+348
+349def _nested_get(
+350    d: t.Dict, *path: t.Tuple[str, str], raise_on_missing: bool = True
+351) -> t.Optional[t.Any]:
+352    """
+353    Get a value for a nested dictionary.
+354
+355    Args:
+356        d: the dictionary to search.
+357        *path: tuples of (name, key), where:
+358            `key` is the key in the dictionary to get.
+359            `name` is a string to use in the error if `key` isn't found.
+360
+361    Returns:
+362        The value or None if it doesn't exist.
+363    """
+364    for name, key in path:
+365        d = d.get(key)  # type: ignore
+366        if d is None:
+367            if raise_on_missing:
+368                name = "table" if name == "this" else name
+369                raise ValueError(f"Unknown {name}: {key}")
+370            return None
+371    return d
+372
+373
+374def _nested_set(d: t.Dict, keys: t.List[str], value: t.Any) -> t.Dict:
+375    """
+376    In-place set a value for a nested dictionary
+377
+378    Example:
+379        >>> _nested_set({}, ["top_key", "second_key"], "value")
+380        {'top_key': {'second_key': 'value'}}
+381
+382        >>> _nested_set({"top_key": {"third_key": "third_value"}}, ["top_key", "second_key"], "value")
+383        {'top_key': {'third_key': 'third_value', 'second_key': 'value'}}
+384
+385    Args:
+386        d: dictionary to update.
+387            keys: the keys that makeup the path to `value`.
+388            value: the value to set in the dictionary for the given key path.
+389
+390        Returns:
+391                The (possibly) updated dictionary.
+392    """
+393    if not keys:
+394        return d
+395
+396    if len(keys) == 1:
+397        d[keys[0]] = value
+398        return d
+399
+400    subd = d
+401    for key in keys[:-1]:
+402        if key not in subd:
+403            subd = subd.setdefault(key, {})
+404        else:
+405            subd = subd[key]
+406
+407    subd[keys[-1]] = value
+408    return d
+
+ + +
+
+ +
+ + class + Schema(abc.ABC): + + + +
+ +
24class Schema(abc.ABC):
+25    """Abstract base class for database schemas"""
+26
+27    @abc.abstractmethod
+28    def add_table(
+29        self, table: exp.Table | str, column_mapping: t.Optional[ColumnMapping] = None
+30    ) -> None:
+31        """
+32        Register or update a table. Some implementing classes may require column information to also be provided.
+33
+34        Args:
+35            table: table expression instance or string representing the table.
+36            column_mapping: a column mapping that describes the structure of the table.
+37        """
+38
+39    @abc.abstractmethod
+40    def column_names(self, table: exp.Table | str, only_visible: bool = False) -> t.List[str]:
+41        """
+42        Get the column names for a table.
+43
+44        Args:
+45            table: the `Table` expression instance.
+46            only_visible: whether to include invisible columns.
+47
+48        Returns:
+49            The list of column names.
+50        """
+51
+52    @abc.abstractmethod
+53    def get_column_type(self, table: exp.Table | str, column: exp.Column) -> exp.DataType:
+54        """
+55        Get the :class:`sqlglot.exp.DataType` type of a column in the schema.
+56
+57        Args:
+58            table: the source table.
+59            column: the target column.
+60
+61        Returns:
+62            The resulting column type.
+63        """
+64
+65    @property
+66    def supported_table_args(self) -> t.Tuple[str, ...]:
+67        """
+68        Table arguments this schema support, e.g. `("this", "db", "catalog")`
+69        """
+70        raise NotImplementedError
+
+ + +

Abstract base class for database schemas

+
+ + +
+ +
+
@abc.abstractmethod
+ + def + add_table( self, table: sqlglot.expressions.Table | str, column_mapping: Union[Dict, str, sqlglot.dataframe.sql.types.StructType, List, NoneType] = None) -> None: + + + +
+ +
27    @abc.abstractmethod
+28    def add_table(
+29        self, table: exp.Table | str, column_mapping: t.Optional[ColumnMapping] = None
+30    ) -> None:
+31        """
+32        Register or update a table. Some implementing classes may require column information to also be provided.
+33
+34        Args:
+35            table: table expression instance or string representing the table.
+36            column_mapping: a column mapping that describes the structure of the table.
+37        """
+
+ + +

Register or update a table. Some implementing classes may require column information to also be provided.

+ +
Arguments:
+ +
    +
  • table: table expression instance or string representing the table.
  • +
  • column_mapping: a column mapping that describes the structure of the table.
  • +
+
+ + +
+
+ +
+
@abc.abstractmethod
+ + def + column_names( self, table: sqlglot.expressions.Table | str, only_visible: bool = False) -> List[str]: + + + +
+ +
39    @abc.abstractmethod
+40    def column_names(self, table: exp.Table | str, only_visible: bool = False) -> t.List[str]:
+41        """
+42        Get the column names for a table.
+43
+44        Args:
+45            table: the `Table` expression instance.
+46            only_visible: whether to include invisible columns.
+47
+48        Returns:
+49            The list of column names.
+50        """
+
+ + +

Get the column names for a table.

+ +
Arguments:
+ +
    +
  • table: the Table expression instance.
  • +
  • only_visible: whether to include invisible columns.
  • +
+ +
Returns:
+ +
+

The list of column names.

+
+
+ + +
+
+ +
+
@abc.abstractmethod
+ + def + get_column_type( self, table: sqlglot.expressions.Table | str, column: sqlglot.expressions.Column) -> sqlglot.expressions.DataType: + + + +
+ +
52    @abc.abstractmethod
+53    def get_column_type(self, table: exp.Table | str, column: exp.Column) -> exp.DataType:
+54        """
+55        Get the :class:`sqlglot.exp.DataType` type of a column in the schema.
+56
+57        Args:
+58            table: the source table.
+59            column: the target column.
+60
+61        Returns:
+62            The resulting column type.
+63        """
+
+ + +

Get the sqlglot.exp.DataType type of a column in the schema.

+ +
Arguments:
+ +
    +
  • table: the source table.
  • +
  • column: the target column.
  • +
+ +
Returns:
+ +
+

The resulting column type.

+
+
+ + +
+
+
+ supported_table_args: Tuple[str, ...] + + +
+ + +

Table arguments this schema support, e.g. ("this", "db", "catalog")

+
+ + +
+
+
+ +
+ + class + AbstractMappingSchema(typing.Generic[~T]): + + + +
+ +
 73class AbstractMappingSchema(t.Generic[T]):
+ 74    def __init__(
+ 75        self,
+ 76        mapping: dict | None = None,
+ 77    ) -> None:
+ 78        self.mapping = mapping or {}
+ 79        self.mapping_trie = self._build_trie(self.mapping)
+ 80        self._supported_table_args: t.Tuple[str, ...] = tuple()
+ 81
+ 82    def _build_trie(self, schema: t.Dict) -> t.Dict:
+ 83        return new_trie(tuple(reversed(t)) for t in flatten_schema(schema, depth=self._depth()))
+ 84
+ 85    def _depth(self) -> int:
+ 86        return dict_depth(self.mapping)
+ 87
+ 88    @property
+ 89    def supported_table_args(self) -> t.Tuple[str, ...]:
+ 90        if not self._supported_table_args and self.mapping:
+ 91            depth = self._depth()
+ 92
+ 93            if not depth:  # None
+ 94                self._supported_table_args = tuple()
+ 95            elif 1 <= depth <= 3:
+ 96                self._supported_table_args = TABLE_ARGS[:depth]
+ 97            else:
+ 98                raise SchemaError(f"Invalid mapping shape. Depth: {depth}")
+ 99
+100        return self._supported_table_args
+101
+102    def table_parts(self, table: exp.Table) -> t.List[str]:
+103        if isinstance(table.this, exp.ReadCSV):
+104            return [table.this.name]
+105        return [table.text(part) for part in TABLE_ARGS if table.text(part)]
+106
+107    def find(
+108        self, table: exp.Table, trie: t.Optional[t.Dict] = None, raise_on_missing: bool = True
+109    ) -> t.Optional[T]:
+110        parts = self.table_parts(table)[0 : len(self.supported_table_args)]
+111        value, trie = in_trie(self.mapping_trie if trie is None else trie, parts)
+112
+113        if value == 0:
+114            return None
+115        elif value == 1:
+116            possibilities = flatten_schema(trie, depth=dict_depth(trie) - 1)
+117            if len(possibilities) == 1:
+118                parts.extend(possibilities[0])
+119            else:
+120                message = ", ".join(".".join(parts) for parts in possibilities)
+121                if raise_on_missing:
+122                    raise SchemaError(f"Ambiguous mapping for {table}: {message}.")
+123                return None
+124        return self._nested_get(parts, raise_on_missing=raise_on_missing)
+125
+126    def _nested_get(
+127        self, parts: t.Sequence[str], d: t.Optional[t.Dict] = None, raise_on_missing=True
+128    ) -> t.Optional[t.Any]:
+129        return _nested_get(
+130            d or self.mapping,
+131            *zip(self.supported_table_args, reversed(parts)),
+132            raise_on_missing=raise_on_missing,
+133        )
+
+ + +

Abstract base class for generic types.

+ +

A generic type is typically declared by inheriting from +this class parameterized with one or more type variables. +For example, a generic mapping type might be defined as::

+ +

class Mapping(Generic[KT, VT]): + def __getitem__(self, key: KT) -> VT: + ... + # Etc.

+ +

This class can then be used as follows::

+ +

def lookup_name(mapping: Mapping[KT, VT], key: KT, default: VT) -> VT: + try: + return mapping[key] + except KeyError: + return default

+
+ + +
+ +
+ + AbstractMappingSchema(mapping: dict | None = None) + + + +
+ +
74    def __init__(
+75        self,
+76        mapping: dict | None = None,
+77    ) -> None:
+78        self.mapping = mapping or {}
+79        self.mapping_trie = self._build_trie(self.mapping)
+80        self._supported_table_args: t.Tuple[str, ...] = tuple()
+
+ + + + +
+
+ +
+ + def + table_parts(self, table: sqlglot.expressions.Table) -> List[str]: + + + +
+ +
102    def table_parts(self, table: exp.Table) -> t.List[str]:
+103        if isinstance(table.this, exp.ReadCSV):
+104            return [table.this.name]
+105        return [table.text(part) for part in TABLE_ARGS if table.text(part)]
+
+ + + + +
+
+ +
+ + def + find( self, table: sqlglot.expressions.Table, trie: Optional[Dict] = None, raise_on_missing: bool = True) -> Optional[~T]: + + + +
+ +
107    def find(
+108        self, table: exp.Table, trie: t.Optional[t.Dict] = None, raise_on_missing: bool = True
+109    ) -> t.Optional[T]:
+110        parts = self.table_parts(table)[0 : len(self.supported_table_args)]
+111        value, trie = in_trie(self.mapping_trie if trie is None else trie, parts)
+112
+113        if value == 0:
+114            return None
+115        elif value == 1:
+116            possibilities = flatten_schema(trie, depth=dict_depth(trie) - 1)
+117            if len(possibilities) == 1:
+118                parts.extend(possibilities[0])
+119            else:
+120                message = ", ".join(".".join(parts) for parts in possibilities)
+121                if raise_on_missing:
+122                    raise SchemaError(f"Ambiguous mapping for {table}: {message}.")
+123                return None
+124        return self._nested_get(parts, raise_on_missing=raise_on_missing)
+
+ + + + +
+
+
+ +
+ + class + MappingSchema(sqlglot.schema.AbstractMappingSchema[typing.Dict[str, str]], Schema): + + + +
+ +
136class MappingSchema(AbstractMappingSchema[t.Dict[str, str]], Schema):
+137    """
+138    Schema based on a nested mapping.
+139
+140    Args:
+141        schema (dict): Mapping in one of the following forms:
+142            1. {table: {col: type}}
+143            2. {db: {table: {col: type}}}
+144            3. {catalog: {db: {table: {col: type}}}}
+145            4. None - Tables will be added later
+146        visible (dict): Optional mapping of which columns in the schema are visible. If not provided, all columns
+147            are assumed to be visible. The nesting should mirror that of the schema:
+148            1. {table: set(*cols)}}
+149            2. {db: {table: set(*cols)}}}
+150            3. {catalog: {db: {table: set(*cols)}}}}
+151        dialect (str): The dialect to be used for custom type mappings.
+152    """
+153
+154    def __init__(
+155        self,
+156        schema: t.Optional[t.Dict] = None,
+157        visible: t.Optional[t.Dict] = None,
+158        dialect: DialectType = None,
+159    ) -> None:
+160        self.dialect = dialect
+161        self.visible = visible or {}
+162        self._type_mapping_cache: t.Dict[str, exp.DataType] = {}
+163        super().__init__(self._normalize(schema or {}))
+164
+165    @classmethod
+166    def from_mapping_schema(cls, mapping_schema: MappingSchema) -> MappingSchema:
+167        return MappingSchema(
+168            schema=mapping_schema.mapping,
+169            visible=mapping_schema.visible,
+170            dialect=mapping_schema.dialect,
+171        )
+172
+173    def copy(self, **kwargs) -> MappingSchema:
+174        return MappingSchema(
+175            **{  # type: ignore
+176                "schema": self.mapping.copy(),
+177                "visible": self.visible.copy(),
+178                "dialect": self.dialect,
+179                **kwargs,
+180            }
+181        )
+182
+183    def _normalize(self, schema: t.Dict) -> t.Dict:
+184        """
+185        Converts all identifiers in the schema into lowercase, unless they're quoted.
+186
+187        Args:
+188            schema: the schema to normalize.
+189
+190        Returns:
+191            The normalized schema mapping.
+192        """
+193        flattened_schema = flatten_schema(schema, depth=dict_depth(schema) - 1)
+194
+195        normalized_mapping: t.Dict = {}
+196        for keys in flattened_schema:
+197            columns = _nested_get(schema, *zip(keys, keys))
+198            assert columns is not None
+199
+200            normalized_keys = [self._normalize_name(key) for key in keys]
+201            for column_name, column_type in columns.items():
+202                _nested_set(
+203                    normalized_mapping,
+204                    normalized_keys + [self._normalize_name(column_name)],
+205                    column_type,
+206                )
+207
+208        return normalized_mapping
+209
+210    def add_table(
+211        self, table: exp.Table | str, column_mapping: t.Optional[ColumnMapping] = None
+212    ) -> None:
+213        """
+214        Register or update a table. Updates are only performed if a new column mapping is provided.
+215
+216        Args:
+217            table: the `Table` expression instance or string representing the table.
+218            column_mapping: a column mapping that describes the structure of the table.
+219        """
+220        table_ = self._ensure_table(table)
+221        column_mapping = ensure_column_mapping(column_mapping)
+222        schema = self.find(table_, raise_on_missing=False)
+223
+224        if schema and not column_mapping:
+225            return
+226
+227        _nested_set(
+228            self.mapping,
+229            list(reversed(self.table_parts(table_))),
+230            column_mapping,
+231        )
+232        self.mapping_trie = self._build_trie(self.mapping)
+233
+234    def _normalize_name(self, name: str) -> str:
+235        try:
+236            identifier: t.Optional[exp.Expression] = sqlglot.parse_one(
+237                name, read=self.dialect, into=exp.Identifier
+238            )
+239        except:
+240            identifier = exp.to_identifier(name)
+241        assert isinstance(identifier, exp.Identifier)
+242
+243        if identifier.quoted:
+244            return identifier.name
+245        return identifier.name.lower()
+246
+247    def _depth(self) -> int:
+248        # The columns themselves are a mapping, but we don't want to include those
+249        return super()._depth() - 1
+250
+251    def _ensure_table(self, table: exp.Table | str) -> exp.Table:
+252        table_ = exp.to_table(table)
+253
+254        if not table_:
+255            raise SchemaError(f"Not a valid table '{table}'")
+256
+257        return table_
+258
+259    def column_names(self, table: exp.Table | str, only_visible: bool = False) -> t.List[str]:
+260        table_ = self._ensure_table(table)
+261        schema = self.find(table_)
+262
+263        if schema is None:
+264            return []
+265
+266        if not only_visible or not self.visible:
+267            return list(schema)
+268
+269        visible = self._nested_get(self.table_parts(table_), self.visible)
+270        return [col for col in schema if col in visible]  # type: ignore
+271
+272    def get_column_type(self, table: exp.Table | str, column: exp.Column | str) -> exp.DataType:
+273        column_name = column if isinstance(column, str) else column.name
+274        table_ = exp.to_table(table)
+275        if table_:
+276            table_schema = self.find(table_, raise_on_missing=False)
+277            if table_schema:
+278                column_type = table_schema.get(column_name)
+279
+280                if isinstance(column_type, exp.DataType):
+281                    return column_type
+282                elif isinstance(column_type, str):
+283                    return self._to_data_type(column_type.upper())
+284                raise SchemaError(f"Unknown column type '{column_type}'")
+285            return exp.DataType(this=exp.DataType.Type.UNKNOWN)
+286        raise SchemaError(f"Could not convert table '{table}'")
+287
+288    def _to_data_type(self, schema_type: str) -> exp.DataType:
+289        """
+290        Convert a type represented as a string to the corresponding :class:`sqlglot.exp.DataType` object.
+291
+292        Args:
+293            schema_type: the type we want to convert.
+294
+295        Returns:
+296            The resulting expression type.
+297        """
+298        if schema_type not in self._type_mapping_cache:
+299            try:
+300                expression = exp.maybe_parse(schema_type, into=exp.DataType, dialect=self.dialect)
+301                if expression is None:
+302                    raise ValueError(f"Could not parse {schema_type}")
+303                self._type_mapping_cache[schema_type] = expression  # type: ignore
+304            except AttributeError:
+305                raise SchemaError(f"Failed to convert type {schema_type}")
+306
+307        return self._type_mapping_cache[schema_type]
+
+ + +

Schema based on a nested mapping.

+ +
Arguments:
+ +
    +
  • schema (dict): Mapping in one of the following forms: +
      +
    1. {table: {col: type}}
    2. +
    3. {db: {table: {col: type}}}
    4. +
    5. {catalog: {db: {table: {col: type}}}}
    6. +
    7. None - Tables will be added later
    8. +
  • +
  • visible (dict): Optional mapping of which columns in the schema are visible. If not provided, all columns +are assumed to be visible. The nesting should mirror that of the schema: +
      +
    1. {table: set(cols)}}
    2. +
    3. {db: {table: set(cols)}}}
    4. +
    5. {catalog: {db: {table: set(*cols)}}}}
    6. +
  • +
  • dialect (str): The dialect to be used for custom type mappings.
  • +
+
+ + +
+ +
+ + MappingSchema( schema: Optional[Dict] = None, visible: Optional[Dict] = None, dialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None) + + + +
+ +
154    def __init__(
+155        self,
+156        schema: t.Optional[t.Dict] = None,
+157        visible: t.Optional[t.Dict] = None,
+158        dialect: DialectType = None,
+159    ) -> None:
+160        self.dialect = dialect
+161        self.visible = visible or {}
+162        self._type_mapping_cache: t.Dict[str, exp.DataType] = {}
+163        super().__init__(self._normalize(schema or {}))
+
+ + + + +
+
+ +
+
@classmethod
+ + def + from_mapping_schema( cls, mapping_schema: sqlglot.schema.MappingSchema) -> sqlglot.schema.MappingSchema: + + + +
+ +
165    @classmethod
+166    def from_mapping_schema(cls, mapping_schema: MappingSchema) -> MappingSchema:
+167        return MappingSchema(
+168            schema=mapping_schema.mapping,
+169            visible=mapping_schema.visible,
+170            dialect=mapping_schema.dialect,
+171        )
+
+ + + + +
+
+ +
+ + def + copy(self, **kwargs) -> sqlglot.schema.MappingSchema: + + + +
+ +
173    def copy(self, **kwargs) -> MappingSchema:
+174        return MappingSchema(
+175            **{  # type: ignore
+176                "schema": self.mapping.copy(),
+177                "visible": self.visible.copy(),
+178                "dialect": self.dialect,
+179                **kwargs,
+180            }
+181        )
+
+ + + + +
+
+ +
+ + def + add_table( self, table: sqlglot.expressions.Table | str, column_mapping: Union[Dict, str, sqlglot.dataframe.sql.types.StructType, List, NoneType] = None) -> None: + + + +
+ +
210    def add_table(
+211        self, table: exp.Table | str, column_mapping: t.Optional[ColumnMapping] = None
+212    ) -> None:
+213        """
+214        Register or update a table. Updates are only performed if a new column mapping is provided.
+215
+216        Args:
+217            table: the `Table` expression instance or string representing the table.
+218            column_mapping: a column mapping that describes the structure of the table.
+219        """
+220        table_ = self._ensure_table(table)
+221        column_mapping = ensure_column_mapping(column_mapping)
+222        schema = self.find(table_, raise_on_missing=False)
+223
+224        if schema and not column_mapping:
+225            return
+226
+227        _nested_set(
+228            self.mapping,
+229            list(reversed(self.table_parts(table_))),
+230            column_mapping,
+231        )
+232        self.mapping_trie = self._build_trie(self.mapping)
+
+ + +

Register or update a table. Updates are only performed if a new column mapping is provided.

+ +
Arguments:
+ +
    +
  • table: the Table expression instance or string representing the table.
  • +
  • column_mapping: a column mapping that describes the structure of the table.
  • +
+
+ + +
+
+ +
+ + def + column_names( self, table: sqlglot.expressions.Table | str, only_visible: bool = False) -> List[str]: + + + +
+ +
259    def column_names(self, table: exp.Table | str, only_visible: bool = False) -> t.List[str]:
+260        table_ = self._ensure_table(table)
+261        schema = self.find(table_)
+262
+263        if schema is None:
+264            return []
+265
+266        if not only_visible or not self.visible:
+267            return list(schema)
+268
+269        visible = self._nested_get(self.table_parts(table_), self.visible)
+270        return [col for col in schema if col in visible]  # type: ignore
+
+ + +

Get the column names for a table.

+ +
Arguments:
+ +
    +
  • table: the Table expression instance.
  • +
  • only_visible: whether to include invisible columns.
  • +
+ +
Returns:
+ +
+

The list of column names.

+
+
+ + +
+
+ +
+ + def + get_column_type( self, table: sqlglot.expressions.Table | str, column: sqlglot.expressions.Column | str) -> sqlglot.expressions.DataType: + + + +
+ +
272    def get_column_type(self, table: exp.Table | str, column: exp.Column | str) -> exp.DataType:
+273        column_name = column if isinstance(column, str) else column.name
+274        table_ = exp.to_table(table)
+275        if table_:
+276            table_schema = self.find(table_, raise_on_missing=False)
+277            if table_schema:
+278                column_type = table_schema.get(column_name)
+279
+280                if isinstance(column_type, exp.DataType):
+281                    return column_type
+282                elif isinstance(column_type, str):
+283                    return self._to_data_type(column_type.upper())
+284                raise SchemaError(f"Unknown column type '{column_type}'")
+285            return exp.DataType(this=exp.DataType.Type.UNKNOWN)
+286        raise SchemaError(f"Could not convert table '{table}'")
+
+ + +

Get the sqlglot.exp.DataType type of a column in the schema.

+ +
Arguments:
+ +
    +
  • table: the source table.
  • +
  • column: the target column.
  • +
+ +
Returns:
+ +
+

The resulting column type.

+
+
+ + +
+
+
Inherited Members
+
+ +
+
+
+
+ +
+ + def + ensure_schema(schema: Any) -> sqlglot.schema.Schema: + + + +
+ +
310def ensure_schema(schema: t.Any) -> Schema:
+311    if isinstance(schema, Schema):
+312        return schema
+313
+314    return MappingSchema(schema)
+
+ + + + +
+
+ +
+ + def + ensure_column_mapping( mapping: Union[Dict, str, sqlglot.dataframe.sql.types.StructType, List, NoneType]): + + + +
+ +
317def ensure_column_mapping(mapping: t.Optional[ColumnMapping]):
+318    if isinstance(mapping, dict):
+319        return mapping
+320    elif isinstance(mapping, str):
+321        col_name_type_strs = [x.strip() for x in mapping.split(",")]
+322        return {
+323            name_type_str.split(":")[0].strip(): name_type_str.split(":")[1].strip()
+324            for name_type_str in col_name_type_strs
+325        }
+326    # Check if mapping looks like a DataFrame StructType
+327    elif hasattr(mapping, "simpleString"):
+328        return {struct_field.name: struct_field.dataType.simpleString() for struct_field in mapping}  # type: ignore
+329    elif isinstance(mapping, list):
+330        return {x.strip(): None for x in mapping}
+331    elif mapping is None:
+332        return {}
+333    raise ValueError(f"Invalid mapping provided: {type(mapping)}")
+
+ + + + +
+
+ +
+ + def + flatten_schema( schema: Dict, depth: int, keys: Optional[List[str]] = None) -> List[List[str]]: + + + +
+ +
336def flatten_schema(
+337    schema: t.Dict, depth: int, keys: t.Optional[t.List[str]] = None
+338) -> t.List[t.List[str]]:
+339    tables = []
+340    keys = keys or []
+341
+342    for k, v in schema.items():
+343        if depth >= 2:
+344            tables.extend(flatten_schema(v, depth - 1, keys + [k]))
+345        elif depth == 1:
+346            tables.append(keys + [k])
+347    return tables
+
+ + + + +
+
+ + \ No newline at end of file diff --git a/docs/sqlglot/serde.html b/docs/sqlglot/serde.html new file mode 100644 index 0000000..84be9d8 --- /dev/null +++ b/docs/sqlglot/serde.html @@ -0,0 +1,408 @@ + + + + + + + sqlglot.serde API documentation + + + + + + + + + +
+
+ Edit on GitHub +

+sqlglot.serde

+ + + + + + +
 1from __future__ import annotations
+ 2
+ 3import typing as t
+ 4
+ 5from sqlglot import expressions as exp
+ 6
+ 7if t.TYPE_CHECKING:
+ 8    JSON = t.Union[dict, list, str, float, int, bool]
+ 9    Node = t.Union[t.List["Node"], exp.DataType.Type, exp.Expression, JSON]
+10
+11
+12def dump(node: Node) -> JSON:
+13    """
+14    Recursively dump an AST into a JSON-serializable dict.
+15    """
+16    if isinstance(node, list):
+17        return [dump(i) for i in node]
+18    if isinstance(node, exp.DataType.Type):
+19        return {
+20            "class": "DataType.Type",
+21            "value": node.value,
+22        }
+23    if isinstance(node, exp.Expression):
+24        klass = node.__class__.__qualname__
+25        if node.__class__.__module__ != exp.__name__:
+26            klass = f"{node.__module__}.{klass}"
+27        obj = {
+28            "class": klass,
+29            "args": {k: dump(v) for k, v in node.args.items() if v is not None and v != []},
+30        }
+31        if node.type:
+32            obj["type"] = node.type.sql()
+33        if node.comments:
+34            obj["comments"] = node.comments
+35        return obj
+36    return node
+37
+38
+39def load(obj: JSON) -> Node:
+40    """
+41    Recursively load a dict (as returned by `dump`) into an AST.
+42    """
+43    if isinstance(obj, list):
+44        return [load(i) for i in obj]
+45    if isinstance(obj, dict):
+46        class_name = obj["class"]
+47
+48        if class_name == "DataType.Type":
+49            return exp.DataType.Type(obj["value"])
+50
+51        if "." in class_name:
+52            module_path, class_name = class_name.rsplit(".", maxsplit=1)
+53            module = __import__(module_path, fromlist=[class_name])
+54        else:
+55            module = exp
+56
+57        klass = getattr(module, class_name)
+58
+59        expression = klass(**{k: load(v) for k, v in obj["args"].items()})
+60        type_ = obj.get("type")
+61        if type_:
+62            expression.type = exp.DataType.build(type_)
+63        comments = obj.get("comments")
+64        if comments:
+65            expression.comments = load(comments)
+66        return expression
+67    return obj
+
+ + +
+
+ +
+ + def + dump( node: Union[List[ForwardRef('Node')], sqlglot.expressions.DataType.Type, sqlglot.expressions.Expression, dict, list, str, float, int, bool]) -> Union[dict, list, str, float, int, bool]: + + + +
+ +
13def dump(node: Node) -> JSON:
+14    """
+15    Recursively dump an AST into a JSON-serializable dict.
+16    """
+17    if isinstance(node, list):
+18        return [dump(i) for i in node]
+19    if isinstance(node, exp.DataType.Type):
+20        return {
+21            "class": "DataType.Type",
+22            "value": node.value,
+23        }
+24    if isinstance(node, exp.Expression):
+25        klass = node.__class__.__qualname__
+26        if node.__class__.__module__ != exp.__name__:
+27            klass = f"{node.__module__}.{klass}"
+28        obj = {
+29            "class": klass,
+30            "args": {k: dump(v) for k, v in node.args.items() if v is not None and v != []},
+31        }
+32        if node.type:
+33            obj["type"] = node.type.sql()
+34        if node.comments:
+35            obj["comments"] = node.comments
+36        return obj
+37    return node
+
+ + +

Recursively dump an AST into a JSON-serializable dict.

+
+ + +
+
+ +
+ + def + load( obj: Union[dict, list, str, float, int, bool]) -> Union[List[ForwardRef('Node')], sqlglot.expressions.DataType.Type, sqlglot.expressions.Expression, dict, list, str, float, int, bool]: + + + +
+ +
40def load(obj: JSON) -> Node:
+41    """
+42    Recursively load a dict (as returned by `dump`) into an AST.
+43    """
+44    if isinstance(obj, list):
+45        return [load(i) for i in obj]
+46    if isinstance(obj, dict):
+47        class_name = obj["class"]
+48
+49        if class_name == "DataType.Type":
+50            return exp.DataType.Type(obj["value"])
+51
+52        if "." in class_name:
+53            module_path, class_name = class_name.rsplit(".", maxsplit=1)
+54            module = __import__(module_path, fromlist=[class_name])
+55        else:
+56            module = exp
+57
+58        klass = getattr(module, class_name)
+59
+60        expression = klass(**{k: load(v) for k, v in obj["args"].items()})
+61        type_ = obj.get("type")
+62        if type_:
+63            expression.type = exp.DataType.build(type_)
+64        comments = obj.get("comments")
+65        if comments:
+66            expression.comments = load(comments)
+67        return expression
+68    return obj
+
+ + +

Recursively load a dict (as returned by dump) into an AST.

+
+ + +
+
+ + \ No newline at end of file diff --git a/docs/sqlglot/time.html b/docs/sqlglot/time.html new file mode 100644 index 0000000..8d07b25 --- /dev/null +++ b/docs/sqlglot/time.html @@ -0,0 +1,385 @@ + + + + + + + sqlglot.time API documentation + + + + + + + + + +
+
+ Edit on GitHub +

+sqlglot.time

+ + + + + + +
 1import typing as t
+ 2
+ 3# The generic time format is based on python time.strftime.
+ 4# https://docs.python.org/3/library/time.html#time.strftime
+ 5from sqlglot.trie import in_trie, new_trie
+ 6
+ 7
+ 8def format_time(
+ 9    string: str, mapping: t.Dict[str, str], trie: t.Optional[t.Dict] = None
+10) -> t.Optional[str]:
+11    """
+12    Converts a time string given a mapping.
+13
+14    Examples:
+15        >>> format_time("%Y", {"%Y": "YYYY"})
+16        'YYYY'
+17
+18        Args:
+19            mapping: dictionary of time format to target time format.
+20            trie: optional trie, can be passed in for performance.
+21
+22        Returns:
+23            The converted time string.
+24    """
+25    if not string:
+26        return None
+27
+28    start = 0
+29    end = 1
+30    size = len(string)
+31    trie = trie or new_trie(mapping)
+32    current = trie
+33    chunks = []
+34    sym = None
+35
+36    while end <= size:
+37        chars = string[start:end]
+38        result, current = in_trie(current, chars[-1])
+39
+40        if result == 0:
+41            if sym:
+42                end -= 1
+43                chars = sym
+44                sym = None
+45            start += len(chars)
+46            chunks.append(chars)
+47            current = trie
+48        elif result == 2:
+49            sym = chars
+50
+51        end += 1
+52
+53        if result and end > size:
+54            chunks.append(chars)
+55    return "".join(mapping.get(chars, chars) for chars in chunks)
+
+ + +
+
+ +
+ + def + format_time( string: str, mapping: Dict[str, str], trie: Optional[Dict] = None) -> Optional[str]: + + + +
+ +
 9def format_time(
+10    string: str, mapping: t.Dict[str, str], trie: t.Optional[t.Dict] = None
+11) -> t.Optional[str]:
+12    """
+13    Converts a time string given a mapping.
+14
+15    Examples:
+16        >>> format_time("%Y", {"%Y": "YYYY"})
+17        'YYYY'
+18
+19        Args:
+20            mapping: dictionary of time format to target time format.
+21            trie: optional trie, can be passed in for performance.
+22
+23        Returns:
+24            The converted time string.
+25    """
+26    if not string:
+27        return None
+28
+29    start = 0
+30    end = 1
+31    size = len(string)
+32    trie = trie or new_trie(mapping)
+33    current = trie
+34    chunks = []
+35    sym = None
+36
+37    while end <= size:
+38        chars = string[start:end]
+39        result, current = in_trie(current, chars[-1])
+40
+41        if result == 0:
+42            if sym:
+43                end -= 1
+44                chars = sym
+45                sym = None
+46            start += len(chars)
+47            chunks.append(chars)
+48            current = trie
+49        elif result == 2:
+50            sym = chars
+51
+52        end += 1
+53
+54        if result and end > size:
+55            chunks.append(chars)
+56    return "".join(mapping.get(chars, chars) for chars in chunks)
+
+ + +

Converts a time string given a mapping.

+ +
Examples:
+ +
+
+
>>> format_time("%Y", {"%Y": "YYYY"})
+'YYYY'
+
+
+ +

Args: + mapping: dictionary of time format to target time format. + trie: optional trie, can be passed in for performance.

+ +

Returns: + The converted time string.

+
+
+ + +
+
+ + \ No newline at end of file diff --git a/docs/sqlglot/tokens.html b/docs/sqlglot/tokens.html new file mode 100644 index 0000000..09ccc62 --- /dev/null +++ b/docs/sqlglot/tokens.html @@ -0,0 +1,6712 @@ + + + + + + + sqlglot.tokens API documentation + + + + + + + + + +
+
+ Edit on GitHub +

+sqlglot.tokens

+ + + + + + +
   1from __future__ import annotations
+   2
+   3import typing as t
+   4from enum import auto
+   5
+   6from sqlglot.helper import AutoName
+   7from sqlglot.trie import in_trie, new_trie
+   8
+   9
+  10class TokenType(AutoName):
+  11    L_PAREN = auto()
+  12    R_PAREN = auto()
+  13    L_BRACKET = auto()
+  14    R_BRACKET = auto()
+  15    L_BRACE = auto()
+  16    R_BRACE = auto()
+  17    COMMA = auto()
+  18    DOT = auto()
+  19    DASH = auto()
+  20    PLUS = auto()
+  21    COLON = auto()
+  22    DCOLON = auto()
+  23    SEMICOLON = auto()
+  24    STAR = auto()
+  25    BACKSLASH = auto()
+  26    SLASH = auto()
+  27    LT = auto()
+  28    LTE = auto()
+  29    GT = auto()
+  30    GTE = auto()
+  31    NOT = auto()
+  32    EQ = auto()
+  33    NEQ = auto()
+  34    NULLSAFE_EQ = auto()
+  35    AND = auto()
+  36    OR = auto()
+  37    AMP = auto()
+  38    DPIPE = auto()
+  39    PIPE = auto()
+  40    CARET = auto()
+  41    TILDA = auto()
+  42    ARROW = auto()
+  43    DARROW = auto()
+  44    FARROW = auto()
+  45    HASH = auto()
+  46    HASH_ARROW = auto()
+  47    DHASH_ARROW = auto()
+  48    LR_ARROW = auto()
+  49    DOLLAR = auto()
+  50    PARAMETER = auto()
+  51    SESSION_PARAMETER = auto()
+  52    NATIONAL = auto()
+  53
+  54    BLOCK_START = auto()
+  55    BLOCK_END = auto()
+  56
+  57    SPACE = auto()
+  58    BREAK = auto()
+  59
+  60    STRING = auto()
+  61    NUMBER = auto()
+  62    IDENTIFIER = auto()
+  63    COLUMN = auto()
+  64    COLUMN_DEF = auto()
+  65    SCHEMA = auto()
+  66    TABLE = auto()
+  67    VAR = auto()
+  68    BIT_STRING = auto()
+  69    HEX_STRING = auto()
+  70    BYTE_STRING = auto()
+  71
+  72    # types
+  73    BOOLEAN = auto()
+  74    TINYINT = auto()
+  75    SMALLINT = auto()
+  76    INT = auto()
+  77    BIGINT = auto()
+  78    FLOAT = auto()
+  79    DOUBLE = auto()
+  80    DECIMAL = auto()
+  81    CHAR = auto()
+  82    NCHAR = auto()
+  83    VARCHAR = auto()
+  84    NVARCHAR = auto()
+  85    TEXT = auto()
+  86    MEDIUMTEXT = auto()
+  87    LONGTEXT = auto()
+  88    MEDIUMBLOB = auto()
+  89    LONGBLOB = auto()
+  90    BINARY = auto()
+  91    VARBINARY = auto()
+  92    JSON = auto()
+  93    JSONB = auto()
+  94    TIME = auto()
+  95    TIMESTAMP = auto()
+  96    TIMESTAMPTZ = auto()
+  97    TIMESTAMPLTZ = auto()
+  98    DATETIME = auto()
+  99    DATE = auto()
+ 100    UUID = auto()
+ 101    GEOGRAPHY = auto()
+ 102    NULLABLE = auto()
+ 103    GEOMETRY = auto()
+ 104    HLLSKETCH = auto()
+ 105    HSTORE = auto()
+ 106    SUPER = auto()
+ 107    SERIAL = auto()
+ 108    SMALLSERIAL = auto()
+ 109    BIGSERIAL = auto()
+ 110    XML = auto()
+ 111    UNIQUEIDENTIFIER = auto()
+ 112    MONEY = auto()
+ 113    SMALLMONEY = auto()
+ 114    ROWVERSION = auto()
+ 115    IMAGE = auto()
+ 116    VARIANT = auto()
+ 117    OBJECT = auto()
+ 118
+ 119    # keywords
+ 120    ALIAS = auto()
+ 121    ALTER = auto()
+ 122    ALWAYS = auto()
+ 123    ALL = auto()
+ 124    ANTI = auto()
+ 125    ANY = auto()
+ 126    APPLY = auto()
+ 127    ARRAY = auto()
+ 128    ASC = auto()
+ 129    ASOF = auto()
+ 130    AT_TIME_ZONE = auto()
+ 131    AUTO_INCREMENT = auto()
+ 132    BEGIN = auto()
+ 133    BETWEEN = auto()
+ 134    BOTH = auto()
+ 135    BUCKET = auto()
+ 136    BY_DEFAULT = auto()
+ 137    CACHE = auto()
+ 138    CASCADE = auto()
+ 139    CASE = auto()
+ 140    CHARACTER_SET = auto()
+ 141    CHECK = auto()
+ 142    CLUSTER_BY = auto()
+ 143    COLLATE = auto()
+ 144    COMMAND = auto()
+ 145    COMMENT = auto()
+ 146    COMMIT = auto()
+ 147    COMPOUND = auto()
+ 148    CONSTRAINT = auto()
+ 149    CREATE = auto()
+ 150    CROSS = auto()
+ 151    CUBE = auto()
+ 152    CURRENT_DATE = auto()
+ 153    CURRENT_DATETIME = auto()
+ 154    CURRENT_ROW = auto()
+ 155    CURRENT_TIME = auto()
+ 156    CURRENT_TIMESTAMP = auto()
+ 157    DEFAULT = auto()
+ 158    DELETE = auto()
+ 159    DESC = auto()
+ 160    DESCRIBE = auto()
+ 161    DISTINCT = auto()
+ 162    DISTINCT_FROM = auto()
+ 163    DISTRIBUTE_BY = auto()
+ 164    DIV = auto()
+ 165    DROP = auto()
+ 166    ELSE = auto()
+ 167    ENCODE = auto()
+ 168    END = auto()
+ 169    ESCAPE = auto()
+ 170    EXCEPT = auto()
+ 171    EXECUTE = auto()
+ 172    EXISTS = auto()
+ 173    FALSE = auto()
+ 174    FETCH = auto()
+ 175    FILTER = auto()
+ 176    FINAL = auto()
+ 177    FIRST = auto()
+ 178    FOLLOWING = auto()
+ 179    FOR = auto()
+ 180    FOREIGN_KEY = auto()
+ 181    FORMAT = auto()
+ 182    FROM = auto()
+ 183    FULL = auto()
+ 184    FUNCTION = auto()
+ 185    GENERATED = auto()
+ 186    GLOB = auto()
+ 187    GLOBAL = auto()
+ 188    GROUP_BY = auto()
+ 189    GROUPING_SETS = auto()
+ 190    HAVING = auto()
+ 191    HINT = auto()
+ 192    IDENTITY = auto()
+ 193    IF = auto()
+ 194    IGNORE_NULLS = auto()
+ 195    ILIKE = auto()
+ 196    IN = auto()
+ 197    INDEX = auto()
+ 198    INNER = auto()
+ 199    INSERT = auto()
+ 200    INTERSECT = auto()
+ 201    INTERVAL = auto()
+ 202    INTO = auto()
+ 203    INTRODUCER = auto()
+ 204    IRLIKE = auto()
+ 205    IS = auto()
+ 206    ISNULL = auto()
+ 207    JOIN = auto()
+ 208    LANGUAGE = auto()
+ 209    LATERAL = auto()
+ 210    LAZY = auto()
+ 211    LEADING = auto()
+ 212    LEFT = auto()
+ 213    LIKE = auto()
+ 214    LIMIT = auto()
+ 215    LOAD_DATA = auto()
+ 216    LOCAL = auto()
+ 217    MAP = auto()
+ 218    MATCH_RECOGNIZE = auto()
+ 219    MATERIALIZED = auto()
+ 220    MERGE = auto()
+ 221    MOD = auto()
+ 222    NATURAL = auto()
+ 223    NEXT = auto()
+ 224    NO_ACTION = auto()
+ 225    NOTNULL = auto()
+ 226    NULL = auto()
+ 227    NULLS_FIRST = auto()
+ 228    NULLS_LAST = auto()
+ 229    OFFSET = auto()
+ 230    ON = auto()
+ 231    ONLY = auto()
+ 232    OPTIONS = auto()
+ 233    ORDER_BY = auto()
+ 234    ORDERED = auto()
+ 235    ORDINALITY = auto()
+ 236    OUTER = auto()
+ 237    OUT_OF = auto()
+ 238    OVER = auto()
+ 239    OVERWRITE = auto()
+ 240    PARTITION = auto()
+ 241    PARTITION_BY = auto()
+ 242    PERCENT = auto()
+ 243    PIVOT = auto()
+ 244    PLACEHOLDER = auto()
+ 245    PRECEDING = auto()
+ 246    PRIMARY_KEY = auto()
+ 247    PROCEDURE = auto()
+ 248    PROPERTIES = auto()
+ 249    PSEUDO_TYPE = auto()
+ 250    QUALIFY = auto()
+ 251    QUOTE = auto()
+ 252    RANGE = auto()
+ 253    RECURSIVE = auto()
+ 254    REPLACE = auto()
+ 255    RESPECT_NULLS = auto()
+ 256    REFERENCES = auto()
+ 257    RIGHT = auto()
+ 258    RLIKE = auto()
+ 259    ROLLBACK = auto()
+ 260    ROLLUP = auto()
+ 261    ROW = auto()
+ 262    ROWS = auto()
+ 263    SCHEMA_COMMENT = auto()
+ 264    SEED = auto()
+ 265    SELECT = auto()
+ 266    SEMI = auto()
+ 267    SEPARATOR = auto()
+ 268    SERDE_PROPERTIES = auto()
+ 269    SET = auto()
+ 270    SHOW = auto()
+ 271    SIMILAR_TO = auto()
+ 272    SOME = auto()
+ 273    SORTKEY = auto()
+ 274    SORT_BY = auto()
+ 275    STRUCT = auto()
+ 276    TABLE_SAMPLE = auto()
+ 277    TEMPORARY = auto()
+ 278    TOP = auto()
+ 279    THEN = auto()
+ 280    TRAILING = auto()
+ 281    TRUE = auto()
+ 282    UNBOUNDED = auto()
+ 283    UNCACHE = auto()
+ 284    UNION = auto()
+ 285    UNLOGGED = auto()
+ 286    UNNEST = auto()
+ 287    UNPIVOT = auto()
+ 288    UPDATE = auto()
+ 289    USE = auto()
+ 290    USING = auto()
+ 291    VALUES = auto()
+ 292    VIEW = auto()
+ 293    VOLATILE = auto()
+ 294    WHEN = auto()
+ 295    WHERE = auto()
+ 296    WINDOW = auto()
+ 297    WITH = auto()
+ 298    WITH_TIME_ZONE = auto()
+ 299    WITH_LOCAL_TIME_ZONE = auto()
+ 300    WITHIN_GROUP = auto()
+ 301    WITHOUT_TIME_ZONE = auto()
+ 302    UNIQUE = auto()
+ 303
+ 304
+ 305class Token:
+ 306    __slots__ = ("token_type", "text", "line", "col", "comments")
+ 307
+ 308    @classmethod
+ 309    def number(cls, number: int) -> Token:
+ 310        """Returns a NUMBER token with `number` as its text."""
+ 311        return cls(TokenType.NUMBER, str(number))
+ 312
+ 313    @classmethod
+ 314    def string(cls, string: str) -> Token:
+ 315        """Returns a STRING token with `string` as its text."""
+ 316        return cls(TokenType.STRING, string)
+ 317
+ 318    @classmethod
+ 319    def identifier(cls, identifier: str) -> Token:
+ 320        """Returns an IDENTIFIER token with `identifier` as its text."""
+ 321        return cls(TokenType.IDENTIFIER, identifier)
+ 322
+ 323    @classmethod
+ 324    def var(cls, var: str) -> Token:
+ 325        """Returns an VAR token with `var` as its text."""
+ 326        return cls(TokenType.VAR, var)
+ 327
+ 328    def __init__(
+ 329        self,
+ 330        token_type: TokenType,
+ 331        text: str,
+ 332        line: int = 1,
+ 333        col: int = 1,
+ 334        comments: t.List[str] = [],
+ 335    ) -> None:
+ 336        self.token_type = token_type
+ 337        self.text = text
+ 338        self.line = line
+ 339        self.col = max(col - len(text), 1)
+ 340        self.comments = comments
+ 341
+ 342    def __repr__(self) -> str:
+ 343        attributes = ", ".join(f"{k}: {getattr(self, k)}" for k in self.__slots__)
+ 344        return f"<Token {attributes}>"
+ 345
+ 346
+ 347class _Tokenizer(type):
+ 348    def __new__(cls, clsname, bases, attrs):  # type: ignore
+ 349        klass = super().__new__(cls, clsname, bases, attrs)
+ 350
+ 351        klass._QUOTES = {
+ 352            f"{prefix}{s}": e
+ 353            for s, e in cls._delimeter_list_to_dict(klass.QUOTES).items()
+ 354            for prefix in (("",) if s[0].isalpha() else ("", "n", "N"))
+ 355        }
+ 356        klass._BIT_STRINGS = cls._delimeter_list_to_dict(klass.BIT_STRINGS)
+ 357        klass._HEX_STRINGS = cls._delimeter_list_to_dict(klass.HEX_STRINGS)
+ 358        klass._BYTE_STRINGS = cls._delimeter_list_to_dict(klass.BYTE_STRINGS)
+ 359        klass._IDENTIFIERS = cls._delimeter_list_to_dict(klass.IDENTIFIERS)
+ 360        klass._STRING_ESCAPES = set(klass.STRING_ESCAPES)
+ 361        klass._IDENTIFIER_ESCAPES = set(klass.IDENTIFIER_ESCAPES)
+ 362        klass._COMMENTS = dict(
+ 363            (comment, None) if isinstance(comment, str) else (comment[0], comment[1])
+ 364            for comment in klass.COMMENTS
+ 365        )
+ 366
+ 367        klass.KEYWORD_TRIE = new_trie(
+ 368            key.upper()
+ 369            for key in {
+ 370                **klass.KEYWORDS,
+ 371                **{comment: TokenType.COMMENT for comment in klass._COMMENTS},
+ 372                **{quote: TokenType.QUOTE for quote in klass._QUOTES},
+ 373                **{bit_string: TokenType.BIT_STRING for bit_string in klass._BIT_STRINGS},
+ 374                **{hex_string: TokenType.HEX_STRING for hex_string in klass._HEX_STRINGS},
+ 375                **{byte_string: TokenType.BYTE_STRING for byte_string in klass._BYTE_STRINGS},
+ 376            }
+ 377            if " " in key or any(single in key for single in klass.SINGLE_TOKENS)
+ 378        )
+ 379
+ 380        return klass
+ 381
+ 382    @staticmethod
+ 383    def _delimeter_list_to_dict(list: t.List[str | t.Tuple[str, str]]) -> t.Dict[str, str]:
+ 384        return dict((item, item) if isinstance(item, str) else (item[0], item[1]) for item in list)
+ 385
+ 386
+ 387class Tokenizer(metaclass=_Tokenizer):
+ 388    SINGLE_TOKENS = {
+ 389        "(": TokenType.L_PAREN,
+ 390        ")": TokenType.R_PAREN,
+ 391        "[": TokenType.L_BRACKET,
+ 392        "]": TokenType.R_BRACKET,
+ 393        "{": TokenType.L_BRACE,
+ 394        "}": TokenType.R_BRACE,
+ 395        "&": TokenType.AMP,
+ 396        "^": TokenType.CARET,
+ 397        ":": TokenType.COLON,
+ 398        ",": TokenType.COMMA,
+ 399        ".": TokenType.DOT,
+ 400        "-": TokenType.DASH,
+ 401        "=": TokenType.EQ,
+ 402        ">": TokenType.GT,
+ 403        "<": TokenType.LT,
+ 404        "%": TokenType.MOD,
+ 405        "!": TokenType.NOT,
+ 406        "|": TokenType.PIPE,
+ 407        "+": TokenType.PLUS,
+ 408        ";": TokenType.SEMICOLON,
+ 409        "/": TokenType.SLASH,
+ 410        "\\": TokenType.BACKSLASH,
+ 411        "*": TokenType.STAR,
+ 412        "~": TokenType.TILDA,
+ 413        "?": TokenType.PLACEHOLDER,
+ 414        "@": TokenType.PARAMETER,
+ 415        # used for breaking a var like x'y' but nothing else
+ 416        # the token type doesn't matter
+ 417        "'": TokenType.QUOTE,
+ 418        "`": TokenType.IDENTIFIER,
+ 419        '"': TokenType.IDENTIFIER,
+ 420        "#": TokenType.HASH,
+ 421    }
+ 422
+ 423    QUOTES: t.List[t.Tuple[str, str] | str] = ["'"]
+ 424
+ 425    BIT_STRINGS: t.List[str | t.Tuple[str, str]] = []
+ 426
+ 427    HEX_STRINGS: t.List[str | t.Tuple[str, str]] = []
+ 428
+ 429    BYTE_STRINGS: t.List[str | t.Tuple[str, str]] = []
+ 430
+ 431    IDENTIFIERS: t.List[str | t.Tuple[str, str]] = ['"']
+ 432
+ 433    STRING_ESCAPES = ["'"]
+ 434
+ 435    _STRING_ESCAPES: t.Set[str] = set()
+ 436
+ 437    IDENTIFIER_ESCAPES = ['"']
+ 438
+ 439    _IDENTIFIER_ESCAPES: t.Set[str] = set()
+ 440
+ 441    KEYWORDS = {
+ 442        **{
+ 443            f"{key}{postfix}": TokenType.BLOCK_START
+ 444            for key in ("{{", "{%", "{#")
+ 445            for postfix in ("", "+", "-")
+ 446        },
+ 447        **{
+ 448            f"{prefix}{key}": TokenType.BLOCK_END
+ 449            for key in ("%}", "#}")
+ 450            for prefix in ("", "+", "-")
+ 451        },
+ 452        "+}}": TokenType.BLOCK_END,
+ 453        "-}}": TokenType.BLOCK_END,
+ 454        "/*+": TokenType.HINT,
+ 455        "==": TokenType.EQ,
+ 456        "::": TokenType.DCOLON,
+ 457        "||": TokenType.DPIPE,
+ 458        ">=": TokenType.GTE,
+ 459        "<=": TokenType.LTE,
+ 460        "<>": TokenType.NEQ,
+ 461        "!=": TokenType.NEQ,
+ 462        "<=>": TokenType.NULLSAFE_EQ,
+ 463        "->": TokenType.ARROW,
+ 464        "->>": TokenType.DARROW,
+ 465        "=>": TokenType.FARROW,
+ 466        "#>": TokenType.HASH_ARROW,
+ 467        "#>>": TokenType.DHASH_ARROW,
+ 468        "<->": TokenType.LR_ARROW,
+ 469        "ALL": TokenType.ALL,
+ 470        "ALWAYS": TokenType.ALWAYS,
+ 471        "AND": TokenType.AND,
+ 472        "ANTI": TokenType.ANTI,
+ 473        "ANY": TokenType.ANY,
+ 474        "ASC": TokenType.ASC,
+ 475        "AS": TokenType.ALIAS,
+ 476        "AT TIME ZONE": TokenType.AT_TIME_ZONE,
+ 477        "AUTOINCREMENT": TokenType.AUTO_INCREMENT,
+ 478        "AUTO_INCREMENT": TokenType.AUTO_INCREMENT,
+ 479        "BEGIN": TokenType.BEGIN,
+ 480        "BETWEEN": TokenType.BETWEEN,
+ 481        "BOTH": TokenType.BOTH,
+ 482        "BUCKET": TokenType.BUCKET,
+ 483        "BY DEFAULT": TokenType.BY_DEFAULT,
+ 484        "CACHE": TokenType.CACHE,
+ 485        "UNCACHE": TokenType.UNCACHE,
+ 486        "CASE": TokenType.CASE,
+ 487        "CASCADE": TokenType.CASCADE,
+ 488        "CHARACTER SET": TokenType.CHARACTER_SET,
+ 489        "CHECK": TokenType.CHECK,
+ 490        "CLUSTER BY": TokenType.CLUSTER_BY,
+ 491        "COLLATE": TokenType.COLLATE,
+ 492        "COLUMN": TokenType.COLUMN,
+ 493        "COMMENT": TokenType.SCHEMA_COMMENT,
+ 494        "COMMIT": TokenType.COMMIT,
+ 495        "COMPOUND": TokenType.COMPOUND,
+ 496        "CONSTRAINT": TokenType.CONSTRAINT,
+ 497        "CREATE": TokenType.CREATE,
+ 498        "CROSS": TokenType.CROSS,
+ 499        "CUBE": TokenType.CUBE,
+ 500        "CURRENT_DATE": TokenType.CURRENT_DATE,
+ 501        "CURRENT ROW": TokenType.CURRENT_ROW,
+ 502        "CURRENT_TIMESTAMP": TokenType.CURRENT_TIMESTAMP,
+ 503        "DEFAULT": TokenType.DEFAULT,
+ 504        "DELETE": TokenType.DELETE,
+ 505        "DESC": TokenType.DESC,
+ 506        "DESCRIBE": TokenType.DESCRIBE,
+ 507        "DISTINCT": TokenType.DISTINCT,
+ 508        "DISTINCT FROM": TokenType.DISTINCT_FROM,
+ 509        "DISTRIBUTE BY": TokenType.DISTRIBUTE_BY,
+ 510        "DIV": TokenType.DIV,
+ 511        "DROP": TokenType.DROP,
+ 512        "ELSE": TokenType.ELSE,
+ 513        "END": TokenType.END,
+ 514        "ESCAPE": TokenType.ESCAPE,
+ 515        "EXCEPT": TokenType.EXCEPT,
+ 516        "EXECUTE": TokenType.EXECUTE,
+ 517        "EXISTS": TokenType.EXISTS,
+ 518        "FALSE": TokenType.FALSE,
+ 519        "FETCH": TokenType.FETCH,
+ 520        "FILTER": TokenType.FILTER,
+ 521        "FIRST": TokenType.FIRST,
+ 522        "FULL": TokenType.FULL,
+ 523        "FUNCTION": TokenType.FUNCTION,
+ 524        "FOLLOWING": TokenType.FOLLOWING,
+ 525        "FOR": TokenType.FOR,
+ 526        "FOREIGN KEY": TokenType.FOREIGN_KEY,
+ 527        "FORMAT": TokenType.FORMAT,
+ 528        "FROM": TokenType.FROM,
+ 529        "GENERATED": TokenType.GENERATED,
+ 530        "GLOB": TokenType.GLOB,
+ 531        "GROUP BY": TokenType.GROUP_BY,
+ 532        "GROUPING SETS": TokenType.GROUPING_SETS,
+ 533        "HAVING": TokenType.HAVING,
+ 534        "IDENTITY": TokenType.IDENTITY,
+ 535        "IF": TokenType.IF,
+ 536        "ILIKE": TokenType.ILIKE,
+ 537        "IGNORE NULLS": TokenType.IGNORE_NULLS,
+ 538        "IN": TokenType.IN,
+ 539        "INDEX": TokenType.INDEX,
+ 540        "INNER": TokenType.INNER,
+ 541        "INSERT": TokenType.INSERT,
+ 542        "INTERVAL": TokenType.INTERVAL,
+ 543        "INTERSECT": TokenType.INTERSECT,
+ 544        "INTO": TokenType.INTO,
+ 545        "IS": TokenType.IS,
+ 546        "ISNULL": TokenType.ISNULL,
+ 547        "JOIN": TokenType.JOIN,
+ 548        "LATERAL": TokenType.LATERAL,
+ 549        "LAZY": TokenType.LAZY,
+ 550        "LEADING": TokenType.LEADING,
+ 551        "LEFT": TokenType.LEFT,
+ 552        "LIKE": TokenType.LIKE,
+ 553        "LIMIT": TokenType.LIMIT,
+ 554        "LOAD DATA": TokenType.LOAD_DATA,
+ 555        "LOCAL": TokenType.LOCAL,
+ 556        "MATERIALIZED": TokenType.MATERIALIZED,
+ 557        "MERGE": TokenType.MERGE,
+ 558        "NATURAL": TokenType.NATURAL,
+ 559        "NEXT": TokenType.NEXT,
+ 560        "NO ACTION": TokenType.NO_ACTION,
+ 561        "NOT": TokenType.NOT,
+ 562        "NOTNULL": TokenType.NOTNULL,
+ 563        "NULL": TokenType.NULL,
+ 564        "NULLS FIRST": TokenType.NULLS_FIRST,
+ 565        "NULLS LAST": TokenType.NULLS_LAST,
+ 566        "OBJECT": TokenType.OBJECT,
+ 567        "OFFSET": TokenType.OFFSET,
+ 568        "ON": TokenType.ON,
+ 569        "ONLY": TokenType.ONLY,
+ 570        "OPTIONS": TokenType.OPTIONS,
+ 571        "OR": TokenType.OR,
+ 572        "ORDER BY": TokenType.ORDER_BY,
+ 573        "ORDINALITY": TokenType.ORDINALITY,
+ 574        "OUTER": TokenType.OUTER,
+ 575        "OUT OF": TokenType.OUT_OF,
+ 576        "OVER": TokenType.OVER,
+ 577        "OVERWRITE": TokenType.OVERWRITE,
+ 578        "PARTITION": TokenType.PARTITION,
+ 579        "PARTITION BY": TokenType.PARTITION_BY,
+ 580        "PARTITIONED BY": TokenType.PARTITION_BY,
+ 581        "PARTITIONED_BY": TokenType.PARTITION_BY,
+ 582        "PERCENT": TokenType.PERCENT,
+ 583        "PIVOT": TokenType.PIVOT,
+ 584        "PRECEDING": TokenType.PRECEDING,
+ 585        "PRIMARY KEY": TokenType.PRIMARY_KEY,
+ 586        "PROCEDURE": TokenType.PROCEDURE,
+ 587        "QUALIFY": TokenType.QUALIFY,
+ 588        "RANGE": TokenType.RANGE,
+ 589        "RECURSIVE": TokenType.RECURSIVE,
+ 590        "REGEXP": TokenType.RLIKE,
+ 591        "REPLACE": TokenType.REPLACE,
+ 592        "RESPECT NULLS": TokenType.RESPECT_NULLS,
+ 593        "REFERENCES": TokenType.REFERENCES,
+ 594        "RIGHT": TokenType.RIGHT,
+ 595        "RLIKE": TokenType.RLIKE,
+ 596        "ROLLBACK": TokenType.ROLLBACK,
+ 597        "ROLLUP": TokenType.ROLLUP,
+ 598        "ROW": TokenType.ROW,
+ 599        "ROWS": TokenType.ROWS,
+ 600        "SCHEMA": TokenType.SCHEMA,
+ 601        "SEED": TokenType.SEED,
+ 602        "SELECT": TokenType.SELECT,
+ 603        "SEMI": TokenType.SEMI,
+ 604        "SET": TokenType.SET,
+ 605        "SHOW": TokenType.SHOW,
+ 606        "SIMILAR TO": TokenType.SIMILAR_TO,
+ 607        "SOME": TokenType.SOME,
+ 608        "SORTKEY": TokenType.SORTKEY,
+ 609        "SORT BY": TokenType.SORT_BY,
+ 610        "TABLE": TokenType.TABLE,
+ 611        "TABLESAMPLE": TokenType.TABLE_SAMPLE,
+ 612        "TEMP": TokenType.TEMPORARY,
+ 613        "TEMPORARY": TokenType.TEMPORARY,
+ 614        "THEN": TokenType.THEN,
+ 615        "TRUE": TokenType.TRUE,
+ 616        "TRAILING": TokenType.TRAILING,
+ 617        "UNBOUNDED": TokenType.UNBOUNDED,
+ 618        "UNION": TokenType.UNION,
+ 619        "UNLOGGED": TokenType.UNLOGGED,
+ 620        "UNNEST": TokenType.UNNEST,
+ 621        "UNPIVOT": TokenType.UNPIVOT,
+ 622        "UPDATE": TokenType.UPDATE,
+ 623        "USE": TokenType.USE,
+ 624        "USING": TokenType.USING,
+ 625        "VALUES": TokenType.VALUES,
+ 626        "VIEW": TokenType.VIEW,
+ 627        "VOLATILE": TokenType.VOLATILE,
+ 628        "WHEN": TokenType.WHEN,
+ 629        "WHERE": TokenType.WHERE,
+ 630        "WINDOW": TokenType.WINDOW,
+ 631        "WITH": TokenType.WITH,
+ 632        "WITH TIME ZONE": TokenType.WITH_TIME_ZONE,
+ 633        "WITH LOCAL TIME ZONE": TokenType.WITH_LOCAL_TIME_ZONE,
+ 634        "WITHIN GROUP": TokenType.WITHIN_GROUP,
+ 635        "WITHOUT TIME ZONE": TokenType.WITHOUT_TIME_ZONE,
+ 636        "APPLY": TokenType.APPLY,
+ 637        "ARRAY": TokenType.ARRAY,
+ 638        "BOOL": TokenType.BOOLEAN,
+ 639        "BOOLEAN": TokenType.BOOLEAN,
+ 640        "BYTE": TokenType.TINYINT,
+ 641        "TINYINT": TokenType.TINYINT,
+ 642        "SHORT": TokenType.SMALLINT,
+ 643        "SMALLINT": TokenType.SMALLINT,
+ 644        "INT2": TokenType.SMALLINT,
+ 645        "INTEGER": TokenType.INT,
+ 646        "INT": TokenType.INT,
+ 647        "INT4": TokenType.INT,
+ 648        "LONG": TokenType.BIGINT,
+ 649        "BIGINT": TokenType.BIGINT,
+ 650        "INT8": TokenType.BIGINT,
+ 651        "DECIMAL": TokenType.DECIMAL,
+ 652        "MAP": TokenType.MAP,
+ 653        "NULLABLE": TokenType.NULLABLE,
+ 654        "NUMBER": TokenType.DECIMAL,
+ 655        "NUMERIC": TokenType.DECIMAL,
+ 656        "FIXED": TokenType.DECIMAL,
+ 657        "REAL": TokenType.FLOAT,
+ 658        "FLOAT": TokenType.FLOAT,
+ 659        "FLOAT4": TokenType.FLOAT,
+ 660        "FLOAT8": TokenType.DOUBLE,
+ 661        "DOUBLE": TokenType.DOUBLE,
+ 662        "DOUBLE PRECISION": TokenType.DOUBLE,
+ 663        "JSON": TokenType.JSON,
+ 664        "CHAR": TokenType.CHAR,
+ 665        "NCHAR": TokenType.NCHAR,
+ 666        "VARCHAR": TokenType.VARCHAR,
+ 667        "VARCHAR2": TokenType.VARCHAR,
+ 668        "NVARCHAR": TokenType.NVARCHAR,
+ 669        "NVARCHAR2": TokenType.NVARCHAR,
+ 670        "STR": TokenType.TEXT,
+ 671        "STRING": TokenType.TEXT,
+ 672        "TEXT": TokenType.TEXT,
+ 673        "CLOB": TokenType.TEXT,
+ 674        "LONGVARCHAR": TokenType.TEXT,
+ 675        "BINARY": TokenType.BINARY,
+ 676        "BLOB": TokenType.VARBINARY,
+ 677        "BYTEA": TokenType.VARBINARY,
+ 678        "VARBINARY": TokenType.VARBINARY,
+ 679        "TIME": TokenType.TIME,
+ 680        "TIMESTAMP": TokenType.TIMESTAMP,
+ 681        "TIMESTAMPTZ": TokenType.TIMESTAMPTZ,
+ 682        "TIMESTAMPLTZ": TokenType.TIMESTAMPLTZ,
+ 683        "DATE": TokenType.DATE,
+ 684        "DATETIME": TokenType.DATETIME,
+ 685        "UNIQUE": TokenType.UNIQUE,
+ 686        "STRUCT": TokenType.STRUCT,
+ 687        "VARIANT": TokenType.VARIANT,
+ 688        "ALTER": TokenType.ALTER,
+ 689        "ALTER AGGREGATE": TokenType.COMMAND,
+ 690        "ALTER DEFAULT": TokenType.COMMAND,
+ 691        "ALTER DOMAIN": TokenType.COMMAND,
+ 692        "ALTER ROLE": TokenType.COMMAND,
+ 693        "ALTER RULE": TokenType.COMMAND,
+ 694        "ALTER SEQUENCE": TokenType.COMMAND,
+ 695        "ALTER TYPE": TokenType.COMMAND,
+ 696        "ALTER USER": TokenType.COMMAND,
+ 697        "ALTER VIEW": TokenType.COMMAND,
+ 698        "ANALYZE": TokenType.COMMAND,
+ 699        "CALL": TokenType.COMMAND,
+ 700        "COPY": TokenType.COMMAND,
+ 701        "EXPLAIN": TokenType.COMMAND,
+ 702        "OPTIMIZE": TokenType.COMMAND,
+ 703        "PREPARE": TokenType.COMMAND,
+ 704        "TRUNCATE": TokenType.COMMAND,
+ 705        "VACUUM": TokenType.COMMAND,
+ 706    }
+ 707
+ 708    WHITE_SPACE = {
+ 709        " ": TokenType.SPACE,
+ 710        "\t": TokenType.SPACE,
+ 711        "\n": TokenType.BREAK,
+ 712        "\r": TokenType.BREAK,
+ 713        "\r\n": TokenType.BREAK,
+ 714    }
+ 715
+ 716    COMMANDS = {
+ 717        TokenType.COMMAND,
+ 718        TokenType.EXECUTE,
+ 719        TokenType.FETCH,
+ 720        TokenType.SET,
+ 721        TokenType.SHOW,
+ 722    }
+ 723
+ 724    COMMAND_PREFIX_TOKENS = {TokenType.SEMICOLON, TokenType.BEGIN}
+ 725
+ 726    # handle numeric literals like in hive (3L = BIGINT)
+ 727    NUMERIC_LITERALS: t.Dict[str, str] = {}
+ 728    ENCODE: t.Optional[str] = None
+ 729
+ 730    COMMENTS = ["--", ("/*", "*/")]
+ 731    KEYWORD_TRIE = None  # autofilled
+ 732
+ 733    IDENTIFIER_CAN_START_WITH_DIGIT = False
+ 734
+ 735    __slots__ = (
+ 736        "sql",
+ 737        "size",
+ 738        "tokens",
+ 739        "_start",
+ 740        "_current",
+ 741        "_line",
+ 742        "_col",
+ 743        "_comments",
+ 744        "_char",
+ 745        "_end",
+ 746        "_peek",
+ 747        "_prev_token_line",
+ 748        "_prev_token_comments",
+ 749        "_prev_token_type",
+ 750        "_replace_backslash",
+ 751    )
+ 752
+ 753    def __init__(self) -> None:
+ 754        self._replace_backslash = "\\" in self._STRING_ESCAPES
+ 755        self.reset()
+ 756
+ 757    def reset(self) -> None:
+ 758        self.sql = ""
+ 759        self.size = 0
+ 760        self.tokens: t.List[Token] = []
+ 761        self._start = 0
+ 762        self._current = 0
+ 763        self._line = 1
+ 764        self._col = 1
+ 765        self._comments: t.List[str] = []
+ 766
+ 767        self._char = None
+ 768        self._end = None
+ 769        self._peek = None
+ 770        self._prev_token_line = -1
+ 771        self._prev_token_comments: t.List[str] = []
+ 772        self._prev_token_type = None
+ 773
+ 774    def tokenize(self, sql: str) -> t.List[Token]:
+ 775        """Returns a list of tokens corresponding to the SQL string `sql`."""
+ 776        self.reset()
+ 777        self.sql = sql
+ 778        self.size = len(sql)
+ 779        self._scan()
+ 780        return self.tokens
+ 781
+ 782    def _scan(self, until: t.Optional[t.Callable] = None) -> None:
+ 783        while self.size and not self._end:
+ 784            self._start = self._current
+ 785            self._advance()
+ 786
+ 787            if not self._char:
+ 788                break
+ 789
+ 790            white_space = self.WHITE_SPACE.get(self._char)  # type: ignore
+ 791            identifier_end = self._IDENTIFIERS.get(self._char)  # type: ignore
+ 792
+ 793            if white_space:
+ 794                if white_space == TokenType.BREAK:
+ 795                    self._col = 1
+ 796                    self._line += 1
+ 797            elif self._char.isdigit():  # type:ignore
+ 798                self._scan_number()
+ 799            elif identifier_end:
+ 800                self._scan_identifier(identifier_end)
+ 801            else:
+ 802                self._scan_keywords()
+ 803
+ 804            if until and until():
+ 805                break
+ 806
+ 807    def _chars(self, size: int) -> str:
+ 808        if size == 1:
+ 809            return self._char  # type: ignore
+ 810        start = self._current - 1
+ 811        end = start + size
+ 812        if end <= self.size:
+ 813            return self.sql[start:end]
+ 814        return ""
+ 815
+ 816    def _advance(self, i: int = 1) -> None:
+ 817        self._col += i
+ 818        self._current += i
+ 819        self._end = self._current >= self.size  # type: ignore
+ 820        self._char = self.sql[self._current - 1]  # type: ignore
+ 821        self._peek = self.sql[self._current] if self._current < self.size else ""  # type: ignore
+ 822
+ 823    @property
+ 824    def _text(self) -> str:
+ 825        return self.sql[self._start : self._current]
+ 826
+ 827    def _add(self, token_type: TokenType, text: t.Optional[str] = None) -> None:
+ 828        self._prev_token_line = self._line
+ 829        self._prev_token_comments = self._comments
+ 830        self._prev_token_type = token_type  # type: ignore
+ 831        self.tokens.append(
+ 832            Token(
+ 833                token_type,
+ 834                self._text if text is None else text,
+ 835                self._line,
+ 836                self._col,
+ 837                self._comments,
+ 838            )
+ 839        )
+ 840        self._comments = []
+ 841
+ 842        # If we have either a semicolon or a begin token before the command's token, we'll parse
+ 843        # whatever follows the command's token as a string
+ 844        if token_type in self.COMMANDS and (
+ 845            len(self.tokens) == 1 or self.tokens[-2].token_type in self.COMMAND_PREFIX_TOKENS
+ 846        ):
+ 847            start = self._current
+ 848            tokens = len(self.tokens)
+ 849            self._scan(lambda: self._peek == ";")
+ 850            self.tokens = self.tokens[:tokens]
+ 851            text = self.sql[start : self._current].strip()
+ 852            if text:
+ 853                self._add(TokenType.STRING, text)
+ 854
+ 855    def _scan_keywords(self) -> None:
+ 856        size = 0
+ 857        word = None
+ 858        chars = self._text
+ 859        char = chars
+ 860        prev_space = False
+ 861        skip = False
+ 862        trie = self.KEYWORD_TRIE
+ 863
+ 864        while chars:
+ 865            if skip:
+ 866                result = 1
+ 867            else:
+ 868                result, trie = in_trie(trie, char.upper())  # type: ignore
+ 869
+ 870            if result == 0:
+ 871                break
+ 872            if result == 2:
+ 873                word = chars
+ 874            size += 1
+ 875            end = self._current - 1 + size
+ 876
+ 877            if end < self.size:
+ 878                char = self.sql[end]
+ 879                is_space = char in self.WHITE_SPACE
+ 880
+ 881                if not is_space or not prev_space:
+ 882                    if is_space:
+ 883                        char = " "
+ 884                    chars += char
+ 885                    prev_space = is_space
+ 886                    skip = False
+ 887                else:
+ 888                    skip = True
+ 889            else:
+ 890                chars = None  # type: ignore
+ 891
+ 892        if not word:
+ 893            if self._char in self.SINGLE_TOKENS:
+ 894                self._add(self.SINGLE_TOKENS[self._char])  # type: ignore
+ 895                return
+ 896            self._scan_var()
+ 897            return
+ 898
+ 899        if self._scan_string(word):
+ 900            return
+ 901        if self._scan_formatted_string(word):
+ 902            return
+ 903        if self._scan_comment(word):
+ 904            return
+ 905
+ 906        self._advance(size - 1)
+ 907        self._add(self.KEYWORDS[word.upper()])
+ 908
+ 909    def _scan_comment(self, comment_start: str) -> bool:
+ 910        if comment_start not in self._COMMENTS:  # type: ignore
+ 911            return False
+ 912
+ 913        comment_start_line = self._line
+ 914        comment_start_size = len(comment_start)
+ 915        comment_end = self._COMMENTS[comment_start]  # type: ignore
+ 916
+ 917        if comment_end:
+ 918            comment_end_size = len(comment_end)
+ 919
+ 920            while not self._end and self._chars(comment_end_size) != comment_end:
+ 921                self._advance()
+ 922
+ 923            self._comments.append(self._text[comment_start_size : -comment_end_size + 1])  # type: ignore
+ 924            self._advance(comment_end_size - 1)
+ 925        else:
+ 926            while not self._end and self.WHITE_SPACE.get(self._peek) != TokenType.BREAK:  # type: ignore
+ 927                self._advance()
+ 928            self._comments.append(self._text[comment_start_size:])  # type: ignore
+ 929
+ 930        # Leading comment is attached to the succeeding token, whilst trailing comment to the preceding.
+ 931        # Multiple consecutive comments are preserved by appending them to the current comments list.
+ 932        if comment_start_line == self._prev_token_line:
+ 933            self.tokens[-1].comments.extend(self._comments)
+ 934            self._comments = []
+ 935
+ 936        return True
+ 937
+ 938    def _scan_number(self) -> None:
+ 939        if self._char == "0":
+ 940            peek = self._peek.upper()  # type: ignore
+ 941            if peek == "B":
+ 942                return self._scan_bits()
+ 943            elif peek == "X":
+ 944                return self._scan_hex()
+ 945
+ 946        decimal = False
+ 947        scientific = 0
+ 948
+ 949        while True:
+ 950            if self._peek.isdigit():  # type: ignore
+ 951                self._advance()
+ 952            elif self._peek == "." and not decimal:
+ 953                decimal = True
+ 954                self._advance()
+ 955            elif self._peek in ("-", "+") and scientific == 1:
+ 956                scientific += 1
+ 957                self._advance()
+ 958            elif self._peek.upper() == "E" and not scientific:  # type: ignore
+ 959                scientific += 1
+ 960                self._advance()
+ 961            elif self._peek.isidentifier():  # type: ignore
+ 962                number_text = self._text
+ 963                literal = []
+ 964
+ 965                while self._peek.strip() and self._peek not in self.SINGLE_TOKENS:  # type: ignore
+ 966                    literal.append(self._peek.upper())  # type: ignore
+ 967                    self._advance()
+ 968
+ 969                literal = "".join(literal)  # type: ignore
+ 970                token_type = self.KEYWORDS.get(self.NUMERIC_LITERALS.get(literal))  # type: ignore
+ 971
+ 972                if token_type:
+ 973                    self._add(TokenType.NUMBER, number_text)
+ 974                    self._add(TokenType.DCOLON, "::")
+ 975                    return self._add(token_type, literal)  # type: ignore
+ 976                elif self.IDENTIFIER_CAN_START_WITH_DIGIT:
+ 977                    return self._add(TokenType.VAR)
+ 978
+ 979                self._add(TokenType.NUMBER, number_text)
+ 980                return self._advance(-len(literal))
+ 981            else:
+ 982                return self._add(TokenType.NUMBER)
+ 983
+ 984    def _scan_bits(self) -> None:
+ 985        self._advance()
+ 986        value = self._extract_value()
+ 987        try:
+ 988            self._add(TokenType.BIT_STRING, f"{int(value, 2)}")
+ 989        except ValueError:
+ 990            self._add(TokenType.IDENTIFIER)
+ 991
+ 992    def _scan_hex(self) -> None:
+ 993        self._advance()
+ 994        value = self._extract_value()
+ 995        try:
+ 996            self._add(TokenType.HEX_STRING, f"{int(value, 16)}")
+ 997        except ValueError:
+ 998            self._add(TokenType.IDENTIFIER)
+ 999
+1000    def _extract_value(self) -> str:
+1001        while True:
+1002            char = self._peek.strip()  # type: ignore
+1003            if char and char not in self.SINGLE_TOKENS:
+1004                self._advance()
+1005            else:
+1006                break
+1007
+1008        return self._text
+1009
+1010    def _scan_string(self, quote: str) -> bool:
+1011        quote_end = self._QUOTES.get(quote)  # type: ignore
+1012        if quote_end is None:
+1013            return False
+1014
+1015        self._advance(len(quote))
+1016        text = self._extract_string(quote_end)
+1017        text = text.encode(self.ENCODE).decode(self.ENCODE) if self.ENCODE else text  # type: ignore
+1018        text = text.replace("\\\\", "\\") if self._replace_backslash else text
+1019        self._add(TokenType.NATIONAL if quote[0].upper() == "N" else TokenType.STRING, text)
+1020        return True
+1021
+1022    # X'1234, b'0110', E'\\\\\' etc.
+1023    def _scan_formatted_string(self, string_start: str) -> bool:
+1024        if string_start in self._HEX_STRINGS:  # type: ignore
+1025            delimiters = self._HEX_STRINGS  # type: ignore
+1026            token_type = TokenType.HEX_STRING
+1027            base = 16
+1028        elif string_start in self._BIT_STRINGS:  # type: ignore
+1029            delimiters = self._BIT_STRINGS  # type: ignore
+1030            token_type = TokenType.BIT_STRING
+1031            base = 2
+1032        elif string_start in self._BYTE_STRINGS:  # type: ignore
+1033            delimiters = self._BYTE_STRINGS  # type: ignore
+1034            token_type = TokenType.BYTE_STRING
+1035            base = None
+1036        else:
+1037            return False
+1038
+1039        self._advance(len(string_start))
+1040        string_end = delimiters.get(string_start)
+1041        text = self._extract_string(string_end)
+1042
+1043        if base is None:
+1044            self._add(token_type, text)
+1045        else:
+1046            try:
+1047                self._add(token_type, f"{int(text, base)}")
+1048            except:
+1049                raise RuntimeError(
+1050                    f"Numeric string contains invalid characters from {self._line}:{self._start}"
+1051                )
+1052
+1053        return True
+1054
+1055    def _scan_identifier(self, identifier_end: str) -> None:
+1056        text = ""
+1057        identifier_end_is_escape = identifier_end in self._IDENTIFIER_ESCAPES
+1058
+1059        while True:
+1060            if self._end:
+1061                raise RuntimeError(f"Missing {identifier_end} from {self._line}:{self._start}")
+1062
+1063            self._advance()
+1064            if self._char == identifier_end:
+1065                if identifier_end_is_escape and self._peek == identifier_end:
+1066                    text += identifier_end  # type: ignore
+1067                    self._advance()
+1068                    continue
+1069
+1070                break
+1071
+1072            text += self._char  # type: ignore
+1073
+1074        self._add(TokenType.IDENTIFIER, text)
+1075
+1076    def _scan_var(self) -> None:
+1077        while True:
+1078            char = self._peek.strip()  # type: ignore
+1079            if char and char not in self.SINGLE_TOKENS:
+1080                self._advance()
+1081            else:
+1082                break
+1083        self._add(
+1084            TokenType.VAR
+1085            if self._prev_token_type == TokenType.PARAMETER
+1086            else self.KEYWORDS.get(self._text.upper(), TokenType.VAR)
+1087        )
+1088
+1089    def _extract_string(self, delimiter: str) -> str:
+1090        text = ""
+1091        delim_size = len(delimiter)
+1092
+1093        while True:
+1094            if (
+1095                self._char in self._STRING_ESCAPES
+1096                and self._peek
+1097                and (self._peek == delimiter or self._peek in self._STRING_ESCAPES)
+1098            ):
+1099                text += self._peek
+1100                self._advance(2)
+1101            else:
+1102                if self._chars(delim_size) == delimiter:
+1103                    if delim_size > 1:
+1104                        self._advance(delim_size - 1)
+1105                    break
+1106
+1107                if self._end:
+1108                    raise RuntimeError(f"Missing {delimiter} from {self._line}:{self._start}")
+1109                text += self._char  # type: ignore
+1110                self._advance()
+1111
+1112        return text
+
+ + +
+
+ +
+ + class + TokenType(sqlglot.helper.AutoName): + + + +
+ +
 11class TokenType(AutoName):
+ 12    L_PAREN = auto()
+ 13    R_PAREN = auto()
+ 14    L_BRACKET = auto()
+ 15    R_BRACKET = auto()
+ 16    L_BRACE = auto()
+ 17    R_BRACE = auto()
+ 18    COMMA = auto()
+ 19    DOT = auto()
+ 20    DASH = auto()
+ 21    PLUS = auto()
+ 22    COLON = auto()
+ 23    DCOLON = auto()
+ 24    SEMICOLON = auto()
+ 25    STAR = auto()
+ 26    BACKSLASH = auto()
+ 27    SLASH = auto()
+ 28    LT = auto()
+ 29    LTE = auto()
+ 30    GT = auto()
+ 31    GTE = auto()
+ 32    NOT = auto()
+ 33    EQ = auto()
+ 34    NEQ = auto()
+ 35    NULLSAFE_EQ = auto()
+ 36    AND = auto()
+ 37    OR = auto()
+ 38    AMP = auto()
+ 39    DPIPE = auto()
+ 40    PIPE = auto()
+ 41    CARET = auto()
+ 42    TILDA = auto()
+ 43    ARROW = auto()
+ 44    DARROW = auto()
+ 45    FARROW = auto()
+ 46    HASH = auto()
+ 47    HASH_ARROW = auto()
+ 48    DHASH_ARROW = auto()
+ 49    LR_ARROW = auto()
+ 50    DOLLAR = auto()
+ 51    PARAMETER = auto()
+ 52    SESSION_PARAMETER = auto()
+ 53    NATIONAL = auto()
+ 54
+ 55    BLOCK_START = auto()
+ 56    BLOCK_END = auto()
+ 57
+ 58    SPACE = auto()
+ 59    BREAK = auto()
+ 60
+ 61    STRING = auto()
+ 62    NUMBER = auto()
+ 63    IDENTIFIER = auto()
+ 64    COLUMN = auto()
+ 65    COLUMN_DEF = auto()
+ 66    SCHEMA = auto()
+ 67    TABLE = auto()
+ 68    VAR = auto()
+ 69    BIT_STRING = auto()
+ 70    HEX_STRING = auto()
+ 71    BYTE_STRING = auto()
+ 72
+ 73    # types
+ 74    BOOLEAN = auto()
+ 75    TINYINT = auto()
+ 76    SMALLINT = auto()
+ 77    INT = auto()
+ 78    BIGINT = auto()
+ 79    FLOAT = auto()
+ 80    DOUBLE = auto()
+ 81    DECIMAL = auto()
+ 82    CHAR = auto()
+ 83    NCHAR = auto()
+ 84    VARCHAR = auto()
+ 85    NVARCHAR = auto()
+ 86    TEXT = auto()
+ 87    MEDIUMTEXT = auto()
+ 88    LONGTEXT = auto()
+ 89    MEDIUMBLOB = auto()
+ 90    LONGBLOB = auto()
+ 91    BINARY = auto()
+ 92    VARBINARY = auto()
+ 93    JSON = auto()
+ 94    JSONB = auto()
+ 95    TIME = auto()
+ 96    TIMESTAMP = auto()
+ 97    TIMESTAMPTZ = auto()
+ 98    TIMESTAMPLTZ = auto()
+ 99    DATETIME = auto()
+100    DATE = auto()
+101    UUID = auto()
+102    GEOGRAPHY = auto()
+103    NULLABLE = auto()
+104    GEOMETRY = auto()
+105    HLLSKETCH = auto()
+106    HSTORE = auto()
+107    SUPER = auto()
+108    SERIAL = auto()
+109    SMALLSERIAL = auto()
+110    BIGSERIAL = auto()
+111    XML = auto()
+112    UNIQUEIDENTIFIER = auto()
+113    MONEY = auto()
+114    SMALLMONEY = auto()
+115    ROWVERSION = auto()
+116    IMAGE = auto()
+117    VARIANT = auto()
+118    OBJECT = auto()
+119
+120    # keywords
+121    ALIAS = auto()
+122    ALTER = auto()
+123    ALWAYS = auto()
+124    ALL = auto()
+125    ANTI = auto()
+126    ANY = auto()
+127    APPLY = auto()
+128    ARRAY = auto()
+129    ASC = auto()
+130    ASOF = auto()
+131    AT_TIME_ZONE = auto()
+132    AUTO_INCREMENT = auto()
+133    BEGIN = auto()
+134    BETWEEN = auto()
+135    BOTH = auto()
+136    BUCKET = auto()
+137    BY_DEFAULT = auto()
+138    CACHE = auto()
+139    CASCADE = auto()
+140    CASE = auto()
+141    CHARACTER_SET = auto()
+142    CHECK = auto()
+143    CLUSTER_BY = auto()
+144    COLLATE = auto()
+145    COMMAND = auto()
+146    COMMENT = auto()
+147    COMMIT = auto()
+148    COMPOUND = auto()
+149    CONSTRAINT = auto()
+150    CREATE = auto()
+151    CROSS = auto()
+152    CUBE = auto()
+153    CURRENT_DATE = auto()
+154    CURRENT_DATETIME = auto()
+155    CURRENT_ROW = auto()
+156    CURRENT_TIME = auto()
+157    CURRENT_TIMESTAMP = auto()
+158    DEFAULT = auto()
+159    DELETE = auto()
+160    DESC = auto()
+161    DESCRIBE = auto()
+162    DISTINCT = auto()
+163    DISTINCT_FROM = auto()
+164    DISTRIBUTE_BY = auto()
+165    DIV = auto()
+166    DROP = auto()
+167    ELSE = auto()
+168    ENCODE = auto()
+169    END = auto()
+170    ESCAPE = auto()
+171    EXCEPT = auto()
+172    EXECUTE = auto()
+173    EXISTS = auto()
+174    FALSE = auto()
+175    FETCH = auto()
+176    FILTER = auto()
+177    FINAL = auto()
+178    FIRST = auto()
+179    FOLLOWING = auto()
+180    FOR = auto()
+181    FOREIGN_KEY = auto()
+182    FORMAT = auto()
+183    FROM = auto()
+184    FULL = auto()
+185    FUNCTION = auto()
+186    GENERATED = auto()
+187    GLOB = auto()
+188    GLOBAL = auto()
+189    GROUP_BY = auto()
+190    GROUPING_SETS = auto()
+191    HAVING = auto()
+192    HINT = auto()
+193    IDENTITY = auto()
+194    IF = auto()
+195    IGNORE_NULLS = auto()
+196    ILIKE = auto()
+197    IN = auto()
+198    INDEX = auto()
+199    INNER = auto()
+200    INSERT = auto()
+201    INTERSECT = auto()
+202    INTERVAL = auto()
+203    INTO = auto()
+204    INTRODUCER = auto()
+205    IRLIKE = auto()
+206    IS = auto()
+207    ISNULL = auto()
+208    JOIN = auto()
+209    LANGUAGE = auto()
+210    LATERAL = auto()
+211    LAZY = auto()
+212    LEADING = auto()
+213    LEFT = auto()
+214    LIKE = auto()
+215    LIMIT = auto()
+216    LOAD_DATA = auto()
+217    LOCAL = auto()
+218    MAP = auto()
+219    MATCH_RECOGNIZE = auto()
+220    MATERIALIZED = auto()
+221    MERGE = auto()
+222    MOD = auto()
+223    NATURAL = auto()
+224    NEXT = auto()
+225    NO_ACTION = auto()
+226    NOTNULL = auto()
+227    NULL = auto()
+228    NULLS_FIRST = auto()
+229    NULLS_LAST = auto()
+230    OFFSET = auto()
+231    ON = auto()
+232    ONLY = auto()
+233    OPTIONS = auto()
+234    ORDER_BY = auto()
+235    ORDERED = auto()
+236    ORDINALITY = auto()
+237    OUTER = auto()
+238    OUT_OF = auto()
+239    OVER = auto()
+240    OVERWRITE = auto()
+241    PARTITION = auto()
+242    PARTITION_BY = auto()
+243    PERCENT = auto()
+244    PIVOT = auto()
+245    PLACEHOLDER = auto()
+246    PRECEDING = auto()
+247    PRIMARY_KEY = auto()
+248    PROCEDURE = auto()
+249    PROPERTIES = auto()
+250    PSEUDO_TYPE = auto()
+251    QUALIFY = auto()
+252    QUOTE = auto()
+253    RANGE = auto()
+254    RECURSIVE = auto()
+255    REPLACE = auto()
+256    RESPECT_NULLS = auto()
+257    REFERENCES = auto()
+258    RIGHT = auto()
+259    RLIKE = auto()
+260    ROLLBACK = auto()
+261    ROLLUP = auto()
+262    ROW = auto()
+263    ROWS = auto()
+264    SCHEMA_COMMENT = auto()
+265    SEED = auto()
+266    SELECT = auto()
+267    SEMI = auto()
+268    SEPARATOR = auto()
+269    SERDE_PROPERTIES = auto()
+270    SET = auto()
+271    SHOW = auto()
+272    SIMILAR_TO = auto()
+273    SOME = auto()
+274    SORTKEY = auto()
+275    SORT_BY = auto()
+276    STRUCT = auto()
+277    TABLE_SAMPLE = auto()
+278    TEMPORARY = auto()
+279    TOP = auto()
+280    THEN = auto()
+281    TRAILING = auto()
+282    TRUE = auto()
+283    UNBOUNDED = auto()
+284    UNCACHE = auto()
+285    UNION = auto()
+286    UNLOGGED = auto()
+287    UNNEST = auto()
+288    UNPIVOT = auto()
+289    UPDATE = auto()
+290    USE = auto()
+291    USING = auto()
+292    VALUES = auto()
+293    VIEW = auto()
+294    VOLATILE = auto()
+295    WHEN = auto()
+296    WHERE = auto()
+297    WINDOW = auto()
+298    WITH = auto()
+299    WITH_TIME_ZONE = auto()
+300    WITH_LOCAL_TIME_ZONE = auto()
+301    WITHIN_GROUP = auto()
+302    WITHOUT_TIME_ZONE = auto()
+303    UNIQUE = auto()
+
+ + +

An enumeration.

+
+ + +
+
+ L_PAREN = <TokenType.L_PAREN: 'L_PAREN'> + + +
+ + + + +
+
+
+ R_PAREN = <TokenType.R_PAREN: 'R_PAREN'> + + +
+ + + + +
+
+
+ L_BRACKET = <TokenType.L_BRACKET: 'L_BRACKET'> + + +
+ + + + +
+
+
+ R_BRACKET = <TokenType.R_BRACKET: 'R_BRACKET'> + + +
+ + + + +
+
+
+ L_BRACE = <TokenType.L_BRACE: 'L_BRACE'> + + +
+ + + + +
+
+
+ R_BRACE = <TokenType.R_BRACE: 'R_BRACE'> + + +
+ + + + +
+
+
+ COMMA = <TokenType.COMMA: 'COMMA'> + + +
+ + + + +
+
+
+ DOT = <TokenType.DOT: 'DOT'> + + +
+ + + + +
+
+
+ DASH = <TokenType.DASH: 'DASH'> + + +
+ + + + +
+
+
+ PLUS = <TokenType.PLUS: 'PLUS'> + + +
+ + + + +
+
+
+ COLON = <TokenType.COLON: 'COLON'> + + +
+ + + + +
+
+
+ DCOLON = <TokenType.DCOLON: 'DCOLON'> + + +
+ + + + +
+
+
+ SEMICOLON = <TokenType.SEMICOLON: 'SEMICOLON'> + + +
+ + + + +
+
+
+ STAR = <TokenType.STAR: 'STAR'> + + +
+ + + + +
+
+
+ BACKSLASH = <TokenType.BACKSLASH: 'BACKSLASH'> + + +
+ + + + +
+
+
+ SLASH = <TokenType.SLASH: 'SLASH'> + + +
+ + + + +
+
+
+ LT = <TokenType.LT: 'LT'> + + +
+ + + + +
+
+
+ LTE = <TokenType.LTE: 'LTE'> + + +
+ + + + +
+
+
+ GT = <TokenType.GT: 'GT'> + + +
+ + + + +
+
+
+ GTE = <TokenType.GTE: 'GTE'> + + +
+ + + + +
+
+
+ NOT = <TokenType.NOT: 'NOT'> + + +
+ + + + +
+
+
+ EQ = <TokenType.EQ: 'EQ'> + + +
+ + + + +
+
+
+ NEQ = <TokenType.NEQ: 'NEQ'> + + +
+ + + + +
+
+
+ NULLSAFE_EQ = <TokenType.NULLSAFE_EQ: 'NULLSAFE_EQ'> + + +
+ + + + +
+
+
+ AND = <TokenType.AND: 'AND'> + + +
+ + + + +
+
+
+ OR = <TokenType.OR: 'OR'> + + +
+ + + + +
+
+
+ AMP = <TokenType.AMP: 'AMP'> + + +
+ + + + +
+
+
+ DPIPE = <TokenType.DPIPE: 'DPIPE'> + + +
+ + + + +
+
+
+ PIPE = <TokenType.PIPE: 'PIPE'> + + +
+ + + + +
+
+
+ CARET = <TokenType.CARET: 'CARET'> + + +
+ + + + +
+
+
+ TILDA = <TokenType.TILDA: 'TILDA'> + + +
+ + + + +
+
+
+ ARROW = <TokenType.ARROW: 'ARROW'> + + +
+ + + + +
+
+
+ DARROW = <TokenType.DARROW: 'DARROW'> + + +
+ + + + +
+
+
+ FARROW = <TokenType.FARROW: 'FARROW'> + + +
+ + + + +
+
+
+ HASH = <TokenType.HASH: 'HASH'> + + +
+ + + + +
+
+
+ HASH_ARROW = <TokenType.HASH_ARROW: 'HASH_ARROW'> + + +
+ + + + +
+
+
+ DHASH_ARROW = <TokenType.DHASH_ARROW: 'DHASH_ARROW'> + + +
+ + + + +
+
+
+ LR_ARROW = <TokenType.LR_ARROW: 'LR_ARROW'> + + +
+ + + + +
+
+
+ DOLLAR = <TokenType.DOLLAR: 'DOLLAR'> + + +
+ + + + +
+
+
+ PARAMETER = <TokenType.PARAMETER: 'PARAMETER'> + + +
+ + + + +
+
+
+ SESSION_PARAMETER = <TokenType.SESSION_PARAMETER: 'SESSION_PARAMETER'> + + +
+ + + + +
+
+
+ NATIONAL = <TokenType.NATIONAL: 'NATIONAL'> + + +
+ + + + +
+
+
+ BLOCK_START = <TokenType.BLOCK_START: 'BLOCK_START'> + + +
+ + + + +
+
+
+ BLOCK_END = <TokenType.BLOCK_END: 'BLOCK_END'> + + +
+ + + + +
+
+
+ SPACE = <TokenType.SPACE: 'SPACE'> + + +
+ + + + +
+
+
+ BREAK = <TokenType.BREAK: 'BREAK'> + + +
+ + + + +
+
+
+ STRING = <TokenType.STRING: 'STRING'> + + +
+ + + + +
+
+
+ NUMBER = <TokenType.NUMBER: 'NUMBER'> + + +
+ + + + +
+
+
+ IDENTIFIER = <TokenType.IDENTIFIER: 'IDENTIFIER'> + + +
+ + + + +
+
+
+ COLUMN = <TokenType.COLUMN: 'COLUMN'> + + +
+ + + + +
+
+
+ COLUMN_DEF = <TokenType.COLUMN_DEF: 'COLUMN_DEF'> + + +
+ + + + +
+
+
+ SCHEMA = <TokenType.SCHEMA: 'SCHEMA'> + + +
+ + + + +
+
+
+ TABLE = <TokenType.TABLE: 'TABLE'> + + +
+ + + + +
+
+
+ VAR = <TokenType.VAR: 'VAR'> + + +
+ + + + +
+
+
+ BIT_STRING = <TokenType.BIT_STRING: 'BIT_STRING'> + + +
+ + + + +
+
+
+ HEX_STRING = <TokenType.HEX_STRING: 'HEX_STRING'> + + +
+ + + + +
+
+
+ BYTE_STRING = <TokenType.BYTE_STRING: 'BYTE_STRING'> + + +
+ + + + +
+
+
+ BOOLEAN = <TokenType.BOOLEAN: 'BOOLEAN'> + + +
+ + + + +
+
+
+ TINYINT = <TokenType.TINYINT: 'TINYINT'> + + +
+ + + + +
+
+
+ SMALLINT = <TokenType.SMALLINT: 'SMALLINT'> + + +
+ + + + +
+
+
+ INT = <TokenType.INT: 'INT'> + + +
+ + + + +
+
+
+ BIGINT = <TokenType.BIGINT: 'BIGINT'> + + +
+ + + + +
+
+
+ FLOAT = <TokenType.FLOAT: 'FLOAT'> + + +
+ + + + +
+
+
+ DOUBLE = <TokenType.DOUBLE: 'DOUBLE'> + + +
+ + + + +
+
+
+ DECIMAL = <TokenType.DECIMAL: 'DECIMAL'> + + +
+ + + + +
+
+
+ CHAR = <TokenType.CHAR: 'CHAR'> + + +
+ + + + +
+
+
+ NCHAR = <TokenType.NCHAR: 'NCHAR'> + + +
+ + + + +
+
+
+ VARCHAR = <TokenType.VARCHAR: 'VARCHAR'> + + +
+ + + + +
+
+
+ NVARCHAR = <TokenType.NVARCHAR: 'NVARCHAR'> + + +
+ + + + +
+
+
+ TEXT = <TokenType.TEXT: 'TEXT'> + + +
+ + + + +
+
+
+ MEDIUMTEXT = <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'> + + +
+ + + + +
+
+
+ LONGTEXT = <TokenType.LONGTEXT: 'LONGTEXT'> + + +
+ + + + +
+
+
+ MEDIUMBLOB = <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'> + + +
+ + + + +
+
+
+ LONGBLOB = <TokenType.LONGBLOB: 'LONGBLOB'> + + +
+ + + + +
+
+
+ BINARY = <TokenType.BINARY: 'BINARY'> + + +
+ + + + +
+
+
+ VARBINARY = <TokenType.VARBINARY: 'VARBINARY'> + + +
+ + + + +
+
+
+ JSON = <TokenType.JSON: 'JSON'> + + +
+ + + + +
+
+
+ JSONB = <TokenType.JSONB: 'JSONB'> + + +
+ + + + +
+
+
+ TIME = <TokenType.TIME: 'TIME'> + + +
+ + + + +
+
+
+ TIMESTAMP = <TokenType.TIMESTAMP: 'TIMESTAMP'> + + +
+ + + + +
+
+
+ TIMESTAMPTZ = <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'> + + +
+ + + + +
+
+
+ TIMESTAMPLTZ = <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'> + + +
+ + + + +
+
+
+ DATETIME = <TokenType.DATETIME: 'DATETIME'> + + +
+ + + + +
+
+
+ DATE = <TokenType.DATE: 'DATE'> + + +
+ + + + +
+
+
+ UUID = <TokenType.UUID: 'UUID'> + + +
+ + + + +
+
+
+ GEOGRAPHY = <TokenType.GEOGRAPHY: 'GEOGRAPHY'> + + +
+ + + + +
+
+
+ NULLABLE = <TokenType.NULLABLE: 'NULLABLE'> + + +
+ + + + +
+
+
+ GEOMETRY = <TokenType.GEOMETRY: 'GEOMETRY'> + + +
+ + + + +
+
+
+ HLLSKETCH = <TokenType.HLLSKETCH: 'HLLSKETCH'> + + +
+ + + + +
+
+
+ HSTORE = <TokenType.HSTORE: 'HSTORE'> + + +
+ + + + +
+
+
+ SUPER = <TokenType.SUPER: 'SUPER'> + + +
+ + + + +
+
+
+ SERIAL = <TokenType.SERIAL: 'SERIAL'> + + +
+ + + + +
+
+
+ SMALLSERIAL = <TokenType.SMALLSERIAL: 'SMALLSERIAL'> + + +
+ + + + +
+
+
+ BIGSERIAL = <TokenType.BIGSERIAL: 'BIGSERIAL'> + + +
+ + + + +
+
+
+ XML = <TokenType.XML: 'XML'> + + +
+ + + + +
+
+
+ UNIQUEIDENTIFIER = <TokenType.UNIQUEIDENTIFIER: 'UNIQUEIDENTIFIER'> + + +
+ + + + +
+
+
+ MONEY = <TokenType.MONEY: 'MONEY'> + + +
+ + + + +
+
+
+ SMALLMONEY = <TokenType.SMALLMONEY: 'SMALLMONEY'> + + +
+ + + + +
+
+
+ ROWVERSION = <TokenType.ROWVERSION: 'ROWVERSION'> + + +
+ + + + +
+
+
+ IMAGE = <TokenType.IMAGE: 'IMAGE'> + + +
+ + + + +
+
+
+ VARIANT = <TokenType.VARIANT: 'VARIANT'> + + +
+ + + + +
+
+
+ OBJECT = <TokenType.OBJECT: 'OBJECT'> + + +
+ + + + +
+
+
+ ALIAS = <TokenType.ALIAS: 'ALIAS'> + + +
+ + + + +
+
+
+ ALTER = <TokenType.ALTER: 'ALTER'> + + +
+ + + + +
+
+
+ ALWAYS = <TokenType.ALWAYS: 'ALWAYS'> + + +
+ + + + +
+
+
+ ALL = <TokenType.ALL: 'ALL'> + + +
+ + + + +
+
+
+ ANTI = <TokenType.ANTI: 'ANTI'> + + +
+ + + + +
+
+
+ ANY = <TokenType.ANY: 'ANY'> + + +
+ + + + +
+
+
+ APPLY = <TokenType.APPLY: 'APPLY'> + + +
+ + + + +
+
+
+ ARRAY = <TokenType.ARRAY: 'ARRAY'> + + +
+ + + + +
+
+
+ ASC = <TokenType.ASC: 'ASC'> + + +
+ + + + +
+
+
+ ASOF = <TokenType.ASOF: 'ASOF'> + + +
+ + + + +
+
+
+ AT_TIME_ZONE = <TokenType.AT_TIME_ZONE: 'AT_TIME_ZONE'> + + +
+ + + + +
+
+
+ AUTO_INCREMENT = <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'> + + +
+ + + + +
+
+
+ BEGIN = <TokenType.BEGIN: 'BEGIN'> + + +
+ + + + +
+
+
+ BETWEEN = <TokenType.BETWEEN: 'BETWEEN'> + + +
+ + + + +
+
+
+ BOTH = <TokenType.BOTH: 'BOTH'> + + +
+ + + + +
+
+
+ BUCKET = <TokenType.BUCKET: 'BUCKET'> + + +
+ + + + +
+
+
+ BY_DEFAULT = <TokenType.BY_DEFAULT: 'BY_DEFAULT'> + + +
+ + + + +
+
+
+ CACHE = <TokenType.CACHE: 'CACHE'> + + +
+ + + + +
+
+
+ CASCADE = <TokenType.CASCADE: 'CASCADE'> + + +
+ + + + +
+
+
+ CASE = <TokenType.CASE: 'CASE'> + + +
+ + + + +
+
+
+ CHARACTER_SET = <TokenType.CHARACTER_SET: 'CHARACTER_SET'> + + +
+ + + + +
+
+
+ CHECK = <TokenType.CHECK: 'CHECK'> + + +
+ + + + +
+
+
+ CLUSTER_BY = <TokenType.CLUSTER_BY: 'CLUSTER_BY'> + + +
+ + + + +
+
+
+ COLLATE = <TokenType.COLLATE: 'COLLATE'> + + +
+ + + + +
+
+
+ COMMAND = <TokenType.COMMAND: 'COMMAND'> + + +
+ + + + +
+
+
+ COMMENT = <TokenType.COMMENT: 'COMMENT'> + + +
+ + + + +
+
+
+ COMMIT = <TokenType.COMMIT: 'COMMIT'> + + +
+ + + + +
+
+
+ COMPOUND = <TokenType.COMPOUND: 'COMPOUND'> + + +
+ + + + +
+
+
+ CONSTRAINT = <TokenType.CONSTRAINT: 'CONSTRAINT'> + + +
+ + + + +
+
+
+ CREATE = <TokenType.CREATE: 'CREATE'> + + +
+ + + + +
+
+
+ CROSS = <TokenType.CROSS: 'CROSS'> + + +
+ + + + +
+
+
+ CUBE = <TokenType.CUBE: 'CUBE'> + + +
+ + + + +
+
+
+ CURRENT_DATE = <TokenType.CURRENT_DATE: 'CURRENT_DATE'> + + +
+ + + + +
+
+
+ CURRENT_DATETIME = <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'> + + +
+ + + + +
+
+
+ CURRENT_ROW = <TokenType.CURRENT_ROW: 'CURRENT_ROW'> + + +
+ + + + +
+
+
+ CURRENT_TIME = <TokenType.CURRENT_TIME: 'CURRENT_TIME'> + + +
+ + + + +
+
+
+ CURRENT_TIMESTAMP = <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'> + + +
+ + + + +
+
+
+ DEFAULT = <TokenType.DEFAULT: 'DEFAULT'> + + +
+ + + + +
+
+
+ DELETE = <TokenType.DELETE: 'DELETE'> + + +
+ + + + +
+
+
+ DESC = <TokenType.DESC: 'DESC'> + + +
+ + + + +
+
+
+ DESCRIBE = <TokenType.DESCRIBE: 'DESCRIBE'> + + +
+ + + + +
+
+
+ DISTINCT = <TokenType.DISTINCT: 'DISTINCT'> + + +
+ + + + +
+
+
+ DISTINCT_FROM = <TokenType.DISTINCT_FROM: 'DISTINCT_FROM'> + + +
+ + + + +
+
+
+ DISTRIBUTE_BY = <TokenType.DISTRIBUTE_BY: 'DISTRIBUTE_BY'> + + +
+ + + + +
+
+
+ DIV = <TokenType.DIV: 'DIV'> + + +
+ + + + +
+
+
+ DROP = <TokenType.DROP: 'DROP'> + + +
+ + + + +
+
+
+ ELSE = <TokenType.ELSE: 'ELSE'> + + +
+ + + + +
+
+
+ ENCODE = <TokenType.ENCODE: 'ENCODE'> + + +
+ + + + +
+
+
+ END = <TokenType.END: 'END'> + + +
+ + + + +
+
+
+ ESCAPE = <TokenType.ESCAPE: 'ESCAPE'> + + +
+ + + + +
+
+
+ EXCEPT = <TokenType.EXCEPT: 'EXCEPT'> + + +
+ + + + +
+
+
+ EXECUTE = <TokenType.EXECUTE: 'EXECUTE'> + + +
+ + + + +
+
+
+ EXISTS = <TokenType.EXISTS: 'EXISTS'> + + +
+ + + + +
+
+
+ FALSE = <TokenType.FALSE: 'FALSE'> + + +
+ + + + +
+
+
+ FETCH = <TokenType.FETCH: 'FETCH'> + + +
+ + + + +
+
+
+ FILTER = <TokenType.FILTER: 'FILTER'> + + +
+ + + + +
+
+
+ FINAL = <TokenType.FINAL: 'FINAL'> + + +
+ + + + +
+
+
+ FIRST = <TokenType.FIRST: 'FIRST'> + + +
+ + + + +
+
+
+ FOLLOWING = <TokenType.FOLLOWING: 'FOLLOWING'> + + +
+ + + + +
+
+
+ FOR = <TokenType.FOR: 'FOR'> + + +
+ + + + +
+
+
+ FOREIGN_KEY = <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'> + + +
+ + + + +
+
+
+ FORMAT = <TokenType.FORMAT: 'FORMAT'> + + +
+ + + + +
+
+
+ FROM = <TokenType.FROM: 'FROM'> + + +
+ + + + +
+
+
+ FULL = <TokenType.FULL: 'FULL'> + + +
+ + + + +
+
+
+ FUNCTION = <TokenType.FUNCTION: 'FUNCTION'> + + +
+ + + + +
+
+
+ GENERATED = <TokenType.GENERATED: 'GENERATED'> + + +
+ + + + +
+
+
+ GLOB = <TokenType.GLOB: 'GLOB'> + + +
+ + + + +
+
+
+ GLOBAL = <TokenType.GLOBAL: 'GLOBAL'> + + +
+ + + + +
+
+
+ GROUP_BY = <TokenType.GROUP_BY: 'GROUP_BY'> + + +
+ + + + +
+
+
+ GROUPING_SETS = <TokenType.GROUPING_SETS: 'GROUPING_SETS'> + + +
+ + + + +
+
+
+ HAVING = <TokenType.HAVING: 'HAVING'> + + +
+ + + + +
+
+
+ HINT = <TokenType.HINT: 'HINT'> + + +
+ + + + +
+
+
+ IDENTITY = <TokenType.IDENTITY: 'IDENTITY'> + + +
+ + + + +
+
+
+ IF = <TokenType.IF: 'IF'> + + +
+ + + + +
+
+
+ IGNORE_NULLS = <TokenType.IGNORE_NULLS: 'IGNORE_NULLS'> + + +
+ + + + +
+
+
+ ILIKE = <TokenType.ILIKE: 'ILIKE'> + + +
+ + + + +
+
+
+ IN = <TokenType.IN: 'IN'> + + +
+ + + + +
+
+
+ INDEX = <TokenType.INDEX: 'INDEX'> + + +
+ + + + +
+
+
+ INNER = <TokenType.INNER: 'INNER'> + + +
+ + + + +
+
+
+ INSERT = <TokenType.INSERT: 'INSERT'> + + +
+ + + + +
+
+
+ INTERSECT = <TokenType.INTERSECT: 'INTERSECT'> + + +
+ + + + +
+
+
+ INTERVAL = <TokenType.INTERVAL: 'INTERVAL'> + + +
+ + + + +
+
+
+ INTO = <TokenType.INTO: 'INTO'> + + +
+ + + + +
+
+
+ INTRODUCER = <TokenType.INTRODUCER: 'INTRODUCER'> + + +
+ + + + +
+
+
+ IRLIKE = <TokenType.IRLIKE: 'IRLIKE'> + + +
+ + + + +
+
+
+ IS = <TokenType.IS: 'IS'> + + +
+ + + + +
+
+
+ ISNULL = <TokenType.ISNULL: 'ISNULL'> + + +
+ + + + +
+
+
+ JOIN = <TokenType.JOIN: 'JOIN'> + + +
+ + + + +
+
+
+ LANGUAGE = <TokenType.LANGUAGE: 'LANGUAGE'> + + +
+ + + + +
+
+
+ LATERAL = <TokenType.LATERAL: 'LATERAL'> + + +
+ + + + +
+
+
+ LAZY = <TokenType.LAZY: 'LAZY'> + + +
+ + + + +
+
+
+ LEADING = <TokenType.LEADING: 'LEADING'> + + +
+ + + + +
+
+
+ LEFT = <TokenType.LEFT: 'LEFT'> + + +
+ + + + +
+
+
+ LIKE = <TokenType.LIKE: 'LIKE'> + + +
+ + + + +
+
+
+ LIMIT = <TokenType.LIMIT: 'LIMIT'> + + +
+ + + + +
+
+
+ LOAD_DATA = <TokenType.LOAD_DATA: 'LOAD_DATA'> + + +
+ + + + +
+
+
+ LOCAL = <TokenType.LOCAL: 'LOCAL'> + + +
+ + + + +
+
+
+ MAP = <TokenType.MAP: 'MAP'> + + +
+ + + + +
+
+
+ MATCH_RECOGNIZE = <TokenType.MATCH_RECOGNIZE: 'MATCH_RECOGNIZE'> + + +
+ + + + +
+
+
+ MATERIALIZED = <TokenType.MATERIALIZED: 'MATERIALIZED'> + + +
+ + + + +
+
+
+ MERGE = <TokenType.MERGE: 'MERGE'> + + +
+ + + + +
+
+
+ MOD = <TokenType.MOD: 'MOD'> + + +
+ + + + +
+
+
+ NATURAL = <TokenType.NATURAL: 'NATURAL'> + + +
+ + + + +
+
+
+ NEXT = <TokenType.NEXT: 'NEXT'> + + +
+ + + + +
+
+
+ NO_ACTION = <TokenType.NO_ACTION: 'NO_ACTION'> + + +
+ + + + +
+
+
+ NOTNULL = <TokenType.NOTNULL: 'NOTNULL'> + + +
+ + + + +
+
+
+ NULL = <TokenType.NULL: 'NULL'> + + +
+ + + + +
+
+
+ NULLS_FIRST = <TokenType.NULLS_FIRST: 'NULLS_FIRST'> + + +
+ + + + +
+
+
+ NULLS_LAST = <TokenType.NULLS_LAST: 'NULLS_LAST'> + + +
+ + + + +
+
+
+ OFFSET = <TokenType.OFFSET: 'OFFSET'> + + +
+ + + + +
+
+
+ ON = <TokenType.ON: 'ON'> + + +
+ + + + +
+
+
+ ONLY = <TokenType.ONLY: 'ONLY'> + + +
+ + + + +
+
+
+ OPTIONS = <TokenType.OPTIONS: 'OPTIONS'> + + +
+ + + + +
+
+
+ ORDER_BY = <TokenType.ORDER_BY: 'ORDER_BY'> + + +
+ + + + +
+
+
+ ORDERED = <TokenType.ORDERED: 'ORDERED'> + + +
+ + + + +
+
+
+ ORDINALITY = <TokenType.ORDINALITY: 'ORDINALITY'> + + +
+ + + + +
+
+
+ OUTER = <TokenType.OUTER: 'OUTER'> + + +
+ + + + +
+
+
+ OUT_OF = <TokenType.OUT_OF: 'OUT_OF'> + + +
+ + + + +
+
+
+ OVER = <TokenType.OVER: 'OVER'> + + +
+ + + + +
+
+
+ OVERWRITE = <TokenType.OVERWRITE: 'OVERWRITE'> + + +
+ + + + +
+
+
+ PARTITION = <TokenType.PARTITION: 'PARTITION'> + + +
+ + + + +
+
+
+ PARTITION_BY = <TokenType.PARTITION_BY: 'PARTITION_BY'> + + +
+ + + + +
+
+
+ PERCENT = <TokenType.PERCENT: 'PERCENT'> + + +
+ + + + +
+
+
+ PIVOT = <TokenType.PIVOT: 'PIVOT'> + + +
+ + + + +
+
+
+ PLACEHOLDER = <TokenType.PLACEHOLDER: 'PLACEHOLDER'> + + +
+ + + + +
+
+
+ PRECEDING = <TokenType.PRECEDING: 'PRECEDING'> + + +
+ + + + +
+
+
+ PRIMARY_KEY = <TokenType.PRIMARY_KEY: 'PRIMARY_KEY'> + + +
+ + + + +
+
+
+ PROCEDURE = <TokenType.PROCEDURE: 'PROCEDURE'> + + +
+ + + + +
+
+
+ PROPERTIES = <TokenType.PROPERTIES: 'PROPERTIES'> + + +
+ + + + +
+
+
+ PSEUDO_TYPE = <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'> + + +
+ + + + +
+
+
+ QUALIFY = <TokenType.QUALIFY: 'QUALIFY'> + + +
+ + + + +
+
+
+ QUOTE = <TokenType.QUOTE: 'QUOTE'> + + +
+ + + + +
+
+
+ RANGE = <TokenType.RANGE: 'RANGE'> + + +
+ + + + +
+
+
+ RECURSIVE = <TokenType.RECURSIVE: 'RECURSIVE'> + + +
+ + + + +
+
+
+ REPLACE = <TokenType.REPLACE: 'REPLACE'> + + +
+ + + + +
+
+
+ RESPECT_NULLS = <TokenType.RESPECT_NULLS: 'RESPECT_NULLS'> + + +
+ + + + +
+
+
+ REFERENCES = <TokenType.REFERENCES: 'REFERENCES'> + + +
+ + + + +
+
+
+ RIGHT = <TokenType.RIGHT: 'RIGHT'> + + +
+ + + + +
+
+
+ RLIKE = <TokenType.RLIKE: 'RLIKE'> + + +
+ + + + +
+
+
+ ROLLBACK = <TokenType.ROLLBACK: 'ROLLBACK'> + + +
+ + + + +
+
+
+ ROLLUP = <TokenType.ROLLUP: 'ROLLUP'> + + +
+ + + + +
+
+
+ ROW = <TokenType.ROW: 'ROW'> + + +
+ + + + +
+
+
+ ROWS = <TokenType.ROWS: 'ROWS'> + + +
+ + + + +
+
+
+ SCHEMA_COMMENT = <TokenType.SCHEMA_COMMENT: 'SCHEMA_COMMENT'> + + +
+ + + + +
+
+
+ SEED = <TokenType.SEED: 'SEED'> + + +
+ + + + +
+
+
+ SELECT = <TokenType.SELECT: 'SELECT'> + + +
+ + + + +
+
+
+ SEMI = <TokenType.SEMI: 'SEMI'> + + +
+ + + + +
+
+
+ SEPARATOR = <TokenType.SEPARATOR: 'SEPARATOR'> + + +
+ + + + +
+
+
+ SERDE_PROPERTIES = <TokenType.SERDE_PROPERTIES: 'SERDE_PROPERTIES'> + + +
+ + + + +
+
+
+ SET = <TokenType.SET: 'SET'> + + +
+ + + + +
+
+
+ SHOW = <TokenType.SHOW: 'SHOW'> + + +
+ + + + +
+
+
+ SIMILAR_TO = <TokenType.SIMILAR_TO: 'SIMILAR_TO'> + + +
+ + + + +
+
+
+ SOME = <TokenType.SOME: 'SOME'> + + +
+ + + + +
+
+
+ SORTKEY = <TokenType.SORTKEY: 'SORTKEY'> + + +
+ + + + +
+
+
+ SORT_BY = <TokenType.SORT_BY: 'SORT_BY'> + + +
+ + + + +
+
+
+ STRUCT = <TokenType.STRUCT: 'STRUCT'> + + +
+ + + + +
+
+
+ TABLE_SAMPLE = <TokenType.TABLE_SAMPLE: 'TABLE_SAMPLE'> + + +
+ + + + +
+
+
+ TEMPORARY = <TokenType.TEMPORARY: 'TEMPORARY'> + + +
+ + + + +
+
+
+ TOP = <TokenType.TOP: 'TOP'> + + +
+ + + + +
+
+
+ THEN = <TokenType.THEN: 'THEN'> + + +
+ + + + +
+
+
+ TRAILING = <TokenType.TRAILING: 'TRAILING'> + + +
+ + + + +
+
+
+ TRUE = <TokenType.TRUE: 'TRUE'> + + +
+ + + + +
+
+
+ UNBOUNDED = <TokenType.UNBOUNDED: 'UNBOUNDED'> + + +
+ + + + +
+
+
+ UNCACHE = <TokenType.UNCACHE: 'UNCACHE'> + + +
+ + + + +
+
+
+ UNION = <TokenType.UNION: 'UNION'> + + +
+ + + + +
+
+
+ UNLOGGED = <TokenType.UNLOGGED: 'UNLOGGED'> + + +
+ + + + +
+
+
+ UNNEST = <TokenType.UNNEST: 'UNNEST'> + + +
+ + + + +
+
+
+ UNPIVOT = <TokenType.UNPIVOT: 'UNPIVOT'> + + +
+ + + + +
+
+
+ UPDATE = <TokenType.UPDATE: 'UPDATE'> + + +
+ + + + +
+
+
+ USE = <TokenType.USE: 'USE'> + + +
+ + + + +
+
+
+ USING = <TokenType.USING: 'USING'> + + +
+ + + + +
+
+
+ VALUES = <TokenType.VALUES: 'VALUES'> + + +
+ + + + +
+
+
+ VIEW = <TokenType.VIEW: 'VIEW'> + + +
+ + + + +
+
+
+ VOLATILE = <TokenType.VOLATILE: 'VOLATILE'> + + +
+ + + + +
+
+
+ WHEN = <TokenType.WHEN: 'WHEN'> + + +
+ + + + +
+
+
+ WHERE = <TokenType.WHERE: 'WHERE'> + + +
+ + + + +
+
+
+ WINDOW = <TokenType.WINDOW: 'WINDOW'> + + +
+ + + + +
+
+
+ WITH = <TokenType.WITH: 'WITH'> + + +
+ + + + +
+
+
+ WITH_TIME_ZONE = <TokenType.WITH_TIME_ZONE: 'WITH_TIME_ZONE'> + + +
+ + + + +
+
+
+ WITH_LOCAL_TIME_ZONE = <TokenType.WITH_LOCAL_TIME_ZONE: 'WITH_LOCAL_TIME_ZONE'> + + +
+ + + + +
+
+
+ WITHIN_GROUP = <TokenType.WITHIN_GROUP: 'WITHIN_GROUP'> + + +
+ + + + +
+
+
+ WITHOUT_TIME_ZONE = <TokenType.WITHOUT_TIME_ZONE: 'WITHOUT_TIME_ZONE'> + + +
+ + + + +
+
+
+ UNIQUE = <TokenType.UNIQUE: 'UNIQUE'> + + +
+ + + + +
+
+
Inherited Members
+
+
enum.Enum
+
name
+
value
+ +
+
+
+
+
+ +
+ + class + Token: + + + +
+ +
306class Token:
+307    __slots__ = ("token_type", "text", "line", "col", "comments")
+308
+309    @classmethod
+310    def number(cls, number: int) -> Token:
+311        """Returns a NUMBER token with `number` as its text."""
+312        return cls(TokenType.NUMBER, str(number))
+313
+314    @classmethod
+315    def string(cls, string: str) -> Token:
+316        """Returns a STRING token with `string` as its text."""
+317        return cls(TokenType.STRING, string)
+318
+319    @classmethod
+320    def identifier(cls, identifier: str) -> Token:
+321        """Returns an IDENTIFIER token with `identifier` as its text."""
+322        return cls(TokenType.IDENTIFIER, identifier)
+323
+324    @classmethod
+325    def var(cls, var: str) -> Token:
+326        """Returns an VAR token with `var` as its text."""
+327        return cls(TokenType.VAR, var)
+328
+329    def __init__(
+330        self,
+331        token_type: TokenType,
+332        text: str,
+333        line: int = 1,
+334        col: int = 1,
+335        comments: t.List[str] = [],
+336    ) -> None:
+337        self.token_type = token_type
+338        self.text = text
+339        self.line = line
+340        self.col = max(col - len(text), 1)
+341        self.comments = comments
+342
+343    def __repr__(self) -> str:
+344        attributes = ", ".join(f"{k}: {getattr(self, k)}" for k in self.__slots__)
+345        return f"<Token {attributes}>"
+
+ + + + +
+ +
+ + Token( token_type: sqlglot.tokens.TokenType, text: str, line: int = 1, col: int = 1, comments: List[str] = []) + + + +
+ +
329    def __init__(
+330        self,
+331        token_type: TokenType,
+332        text: str,
+333        line: int = 1,
+334        col: int = 1,
+335        comments: t.List[str] = [],
+336    ) -> None:
+337        self.token_type = token_type
+338        self.text = text
+339        self.line = line
+340        self.col = max(col - len(text), 1)
+341        self.comments = comments
+
+ + + + +
+
+ +
+
@classmethod
+ + def + number(cls, number: int) -> sqlglot.tokens.Token: + + + +
+ +
309    @classmethod
+310    def number(cls, number: int) -> Token:
+311        """Returns a NUMBER token with `number` as its text."""
+312        return cls(TokenType.NUMBER, str(number))
+
+ + +

Returns a NUMBER token with number as its text.

+
+ + +
+
+ +
+
@classmethod
+ + def + string(cls, string: str) -> sqlglot.tokens.Token: + + + +
+ +
314    @classmethod
+315    def string(cls, string: str) -> Token:
+316        """Returns a STRING token with `string` as its text."""
+317        return cls(TokenType.STRING, string)
+
+ + +

Returns a STRING token with string as its text.

+
+ + +
+
+ +
+
@classmethod
+ + def + identifier(cls, identifier: str) -> sqlglot.tokens.Token: + + + +
+ +
319    @classmethod
+320    def identifier(cls, identifier: str) -> Token:
+321        """Returns an IDENTIFIER token with `identifier` as its text."""
+322        return cls(TokenType.IDENTIFIER, identifier)
+
+ + +

Returns an IDENTIFIER token with identifier as its text.

+
+ + +
+
+ +
+
@classmethod
+ + def + var(cls, var: str) -> sqlglot.tokens.Token: + + + +
+ +
324    @classmethod
+325    def var(cls, var: str) -> Token:
+326        """Returns an VAR token with `var` as its text."""
+327        return cls(TokenType.VAR, var)
+
+ + +

Returns an VAR token with var as its text.

+
+ + +
+
+
+ +
+ + class + Tokenizer: + + + +
+ +
 388class Tokenizer(metaclass=_Tokenizer):
+ 389    SINGLE_TOKENS = {
+ 390        "(": TokenType.L_PAREN,
+ 391        ")": TokenType.R_PAREN,
+ 392        "[": TokenType.L_BRACKET,
+ 393        "]": TokenType.R_BRACKET,
+ 394        "{": TokenType.L_BRACE,
+ 395        "}": TokenType.R_BRACE,
+ 396        "&": TokenType.AMP,
+ 397        "^": TokenType.CARET,
+ 398        ":": TokenType.COLON,
+ 399        ",": TokenType.COMMA,
+ 400        ".": TokenType.DOT,
+ 401        "-": TokenType.DASH,
+ 402        "=": TokenType.EQ,
+ 403        ">": TokenType.GT,
+ 404        "<": TokenType.LT,
+ 405        "%": TokenType.MOD,
+ 406        "!": TokenType.NOT,
+ 407        "|": TokenType.PIPE,
+ 408        "+": TokenType.PLUS,
+ 409        ";": TokenType.SEMICOLON,
+ 410        "/": TokenType.SLASH,
+ 411        "\\": TokenType.BACKSLASH,
+ 412        "*": TokenType.STAR,
+ 413        "~": TokenType.TILDA,
+ 414        "?": TokenType.PLACEHOLDER,
+ 415        "@": TokenType.PARAMETER,
+ 416        # used for breaking a var like x'y' but nothing else
+ 417        # the token type doesn't matter
+ 418        "'": TokenType.QUOTE,
+ 419        "`": TokenType.IDENTIFIER,
+ 420        '"': TokenType.IDENTIFIER,
+ 421        "#": TokenType.HASH,
+ 422    }
+ 423
+ 424    QUOTES: t.List[t.Tuple[str, str] | str] = ["'"]
+ 425
+ 426    BIT_STRINGS: t.List[str | t.Tuple[str, str]] = []
+ 427
+ 428    HEX_STRINGS: t.List[str | t.Tuple[str, str]] = []
+ 429
+ 430    BYTE_STRINGS: t.List[str | t.Tuple[str, str]] = []
+ 431
+ 432    IDENTIFIERS: t.List[str | t.Tuple[str, str]] = ['"']
+ 433
+ 434    STRING_ESCAPES = ["'"]
+ 435
+ 436    _STRING_ESCAPES: t.Set[str] = set()
+ 437
+ 438    IDENTIFIER_ESCAPES = ['"']
+ 439
+ 440    _IDENTIFIER_ESCAPES: t.Set[str] = set()
+ 441
+ 442    KEYWORDS = {
+ 443        **{
+ 444            f"{key}{postfix}": TokenType.BLOCK_START
+ 445            for key in ("{{", "{%", "{#")
+ 446            for postfix in ("", "+", "-")
+ 447        },
+ 448        **{
+ 449            f"{prefix}{key}": TokenType.BLOCK_END
+ 450            for key in ("%}", "#}")
+ 451            for prefix in ("", "+", "-")
+ 452        },
+ 453        "+}}": TokenType.BLOCK_END,
+ 454        "-}}": TokenType.BLOCK_END,
+ 455        "/*+": TokenType.HINT,
+ 456        "==": TokenType.EQ,
+ 457        "::": TokenType.DCOLON,
+ 458        "||": TokenType.DPIPE,
+ 459        ">=": TokenType.GTE,
+ 460        "<=": TokenType.LTE,
+ 461        "<>": TokenType.NEQ,
+ 462        "!=": TokenType.NEQ,
+ 463        "<=>": TokenType.NULLSAFE_EQ,
+ 464        "->": TokenType.ARROW,
+ 465        "->>": TokenType.DARROW,
+ 466        "=>": TokenType.FARROW,
+ 467        "#>": TokenType.HASH_ARROW,
+ 468        "#>>": TokenType.DHASH_ARROW,
+ 469        "<->": TokenType.LR_ARROW,
+ 470        "ALL": TokenType.ALL,
+ 471        "ALWAYS": TokenType.ALWAYS,
+ 472        "AND": TokenType.AND,
+ 473        "ANTI": TokenType.ANTI,
+ 474        "ANY": TokenType.ANY,
+ 475        "ASC": TokenType.ASC,
+ 476        "AS": TokenType.ALIAS,
+ 477        "AT TIME ZONE": TokenType.AT_TIME_ZONE,
+ 478        "AUTOINCREMENT": TokenType.AUTO_INCREMENT,
+ 479        "AUTO_INCREMENT": TokenType.AUTO_INCREMENT,
+ 480        "BEGIN": TokenType.BEGIN,
+ 481        "BETWEEN": TokenType.BETWEEN,
+ 482        "BOTH": TokenType.BOTH,
+ 483        "BUCKET": TokenType.BUCKET,
+ 484        "BY DEFAULT": TokenType.BY_DEFAULT,
+ 485        "CACHE": TokenType.CACHE,
+ 486        "UNCACHE": TokenType.UNCACHE,
+ 487        "CASE": TokenType.CASE,
+ 488        "CASCADE": TokenType.CASCADE,
+ 489        "CHARACTER SET": TokenType.CHARACTER_SET,
+ 490        "CHECK": TokenType.CHECK,
+ 491        "CLUSTER BY": TokenType.CLUSTER_BY,
+ 492        "COLLATE": TokenType.COLLATE,
+ 493        "COLUMN": TokenType.COLUMN,
+ 494        "COMMENT": TokenType.SCHEMA_COMMENT,
+ 495        "COMMIT": TokenType.COMMIT,
+ 496        "COMPOUND": TokenType.COMPOUND,
+ 497        "CONSTRAINT": TokenType.CONSTRAINT,
+ 498        "CREATE": TokenType.CREATE,
+ 499        "CROSS": TokenType.CROSS,
+ 500        "CUBE": TokenType.CUBE,
+ 501        "CURRENT_DATE": TokenType.CURRENT_DATE,
+ 502        "CURRENT ROW": TokenType.CURRENT_ROW,
+ 503        "CURRENT_TIMESTAMP": TokenType.CURRENT_TIMESTAMP,
+ 504        "DEFAULT": TokenType.DEFAULT,
+ 505        "DELETE": TokenType.DELETE,
+ 506        "DESC": TokenType.DESC,
+ 507        "DESCRIBE": TokenType.DESCRIBE,
+ 508        "DISTINCT": TokenType.DISTINCT,
+ 509        "DISTINCT FROM": TokenType.DISTINCT_FROM,
+ 510        "DISTRIBUTE BY": TokenType.DISTRIBUTE_BY,
+ 511        "DIV": TokenType.DIV,
+ 512        "DROP": TokenType.DROP,
+ 513        "ELSE": TokenType.ELSE,
+ 514        "END": TokenType.END,
+ 515        "ESCAPE": TokenType.ESCAPE,
+ 516        "EXCEPT": TokenType.EXCEPT,
+ 517        "EXECUTE": TokenType.EXECUTE,
+ 518        "EXISTS": TokenType.EXISTS,
+ 519        "FALSE": TokenType.FALSE,
+ 520        "FETCH": TokenType.FETCH,
+ 521        "FILTER": TokenType.FILTER,
+ 522        "FIRST": TokenType.FIRST,
+ 523        "FULL": TokenType.FULL,
+ 524        "FUNCTION": TokenType.FUNCTION,
+ 525        "FOLLOWING": TokenType.FOLLOWING,
+ 526        "FOR": TokenType.FOR,
+ 527        "FOREIGN KEY": TokenType.FOREIGN_KEY,
+ 528        "FORMAT": TokenType.FORMAT,
+ 529        "FROM": TokenType.FROM,
+ 530        "GENERATED": TokenType.GENERATED,
+ 531        "GLOB": TokenType.GLOB,
+ 532        "GROUP BY": TokenType.GROUP_BY,
+ 533        "GROUPING SETS": TokenType.GROUPING_SETS,
+ 534        "HAVING": TokenType.HAVING,
+ 535        "IDENTITY": TokenType.IDENTITY,
+ 536        "IF": TokenType.IF,
+ 537        "ILIKE": TokenType.ILIKE,
+ 538        "IGNORE NULLS": TokenType.IGNORE_NULLS,
+ 539        "IN": TokenType.IN,
+ 540        "INDEX": TokenType.INDEX,
+ 541        "INNER": TokenType.INNER,
+ 542        "INSERT": TokenType.INSERT,
+ 543        "INTERVAL": TokenType.INTERVAL,
+ 544        "INTERSECT": TokenType.INTERSECT,
+ 545        "INTO": TokenType.INTO,
+ 546        "IS": TokenType.IS,
+ 547        "ISNULL": TokenType.ISNULL,
+ 548        "JOIN": TokenType.JOIN,
+ 549        "LATERAL": TokenType.LATERAL,
+ 550        "LAZY": TokenType.LAZY,
+ 551        "LEADING": TokenType.LEADING,
+ 552        "LEFT": TokenType.LEFT,
+ 553        "LIKE": TokenType.LIKE,
+ 554        "LIMIT": TokenType.LIMIT,
+ 555        "LOAD DATA": TokenType.LOAD_DATA,
+ 556        "LOCAL": TokenType.LOCAL,
+ 557        "MATERIALIZED": TokenType.MATERIALIZED,
+ 558        "MERGE": TokenType.MERGE,
+ 559        "NATURAL": TokenType.NATURAL,
+ 560        "NEXT": TokenType.NEXT,
+ 561        "NO ACTION": TokenType.NO_ACTION,
+ 562        "NOT": TokenType.NOT,
+ 563        "NOTNULL": TokenType.NOTNULL,
+ 564        "NULL": TokenType.NULL,
+ 565        "NULLS FIRST": TokenType.NULLS_FIRST,
+ 566        "NULLS LAST": TokenType.NULLS_LAST,
+ 567        "OBJECT": TokenType.OBJECT,
+ 568        "OFFSET": TokenType.OFFSET,
+ 569        "ON": TokenType.ON,
+ 570        "ONLY": TokenType.ONLY,
+ 571        "OPTIONS": TokenType.OPTIONS,
+ 572        "OR": TokenType.OR,
+ 573        "ORDER BY": TokenType.ORDER_BY,
+ 574        "ORDINALITY": TokenType.ORDINALITY,
+ 575        "OUTER": TokenType.OUTER,
+ 576        "OUT OF": TokenType.OUT_OF,
+ 577        "OVER": TokenType.OVER,
+ 578        "OVERWRITE": TokenType.OVERWRITE,
+ 579        "PARTITION": TokenType.PARTITION,
+ 580        "PARTITION BY": TokenType.PARTITION_BY,
+ 581        "PARTITIONED BY": TokenType.PARTITION_BY,
+ 582        "PARTITIONED_BY": TokenType.PARTITION_BY,
+ 583        "PERCENT": TokenType.PERCENT,
+ 584        "PIVOT": TokenType.PIVOT,
+ 585        "PRECEDING": TokenType.PRECEDING,
+ 586        "PRIMARY KEY": TokenType.PRIMARY_KEY,
+ 587        "PROCEDURE": TokenType.PROCEDURE,
+ 588        "QUALIFY": TokenType.QUALIFY,
+ 589        "RANGE": TokenType.RANGE,
+ 590        "RECURSIVE": TokenType.RECURSIVE,
+ 591        "REGEXP": TokenType.RLIKE,
+ 592        "REPLACE": TokenType.REPLACE,
+ 593        "RESPECT NULLS": TokenType.RESPECT_NULLS,
+ 594        "REFERENCES": TokenType.REFERENCES,
+ 595        "RIGHT": TokenType.RIGHT,
+ 596        "RLIKE": TokenType.RLIKE,
+ 597        "ROLLBACK": TokenType.ROLLBACK,
+ 598        "ROLLUP": TokenType.ROLLUP,
+ 599        "ROW": TokenType.ROW,
+ 600        "ROWS": TokenType.ROWS,
+ 601        "SCHEMA": TokenType.SCHEMA,
+ 602        "SEED": TokenType.SEED,
+ 603        "SELECT": TokenType.SELECT,
+ 604        "SEMI": TokenType.SEMI,
+ 605        "SET": TokenType.SET,
+ 606        "SHOW": TokenType.SHOW,
+ 607        "SIMILAR TO": TokenType.SIMILAR_TO,
+ 608        "SOME": TokenType.SOME,
+ 609        "SORTKEY": TokenType.SORTKEY,
+ 610        "SORT BY": TokenType.SORT_BY,
+ 611        "TABLE": TokenType.TABLE,
+ 612        "TABLESAMPLE": TokenType.TABLE_SAMPLE,
+ 613        "TEMP": TokenType.TEMPORARY,
+ 614        "TEMPORARY": TokenType.TEMPORARY,
+ 615        "THEN": TokenType.THEN,
+ 616        "TRUE": TokenType.TRUE,
+ 617        "TRAILING": TokenType.TRAILING,
+ 618        "UNBOUNDED": TokenType.UNBOUNDED,
+ 619        "UNION": TokenType.UNION,
+ 620        "UNLOGGED": TokenType.UNLOGGED,
+ 621        "UNNEST": TokenType.UNNEST,
+ 622        "UNPIVOT": TokenType.UNPIVOT,
+ 623        "UPDATE": TokenType.UPDATE,
+ 624        "USE": TokenType.USE,
+ 625        "USING": TokenType.USING,
+ 626        "VALUES": TokenType.VALUES,
+ 627        "VIEW": TokenType.VIEW,
+ 628        "VOLATILE": TokenType.VOLATILE,
+ 629        "WHEN": TokenType.WHEN,
+ 630        "WHERE": TokenType.WHERE,
+ 631        "WINDOW": TokenType.WINDOW,
+ 632        "WITH": TokenType.WITH,
+ 633        "WITH TIME ZONE": TokenType.WITH_TIME_ZONE,
+ 634        "WITH LOCAL TIME ZONE": TokenType.WITH_LOCAL_TIME_ZONE,
+ 635        "WITHIN GROUP": TokenType.WITHIN_GROUP,
+ 636        "WITHOUT TIME ZONE": TokenType.WITHOUT_TIME_ZONE,
+ 637        "APPLY": TokenType.APPLY,
+ 638        "ARRAY": TokenType.ARRAY,
+ 639        "BOOL": TokenType.BOOLEAN,
+ 640        "BOOLEAN": TokenType.BOOLEAN,
+ 641        "BYTE": TokenType.TINYINT,
+ 642        "TINYINT": TokenType.TINYINT,
+ 643        "SHORT": TokenType.SMALLINT,
+ 644        "SMALLINT": TokenType.SMALLINT,
+ 645        "INT2": TokenType.SMALLINT,
+ 646        "INTEGER": TokenType.INT,
+ 647        "INT": TokenType.INT,
+ 648        "INT4": TokenType.INT,
+ 649        "LONG": TokenType.BIGINT,
+ 650        "BIGINT": TokenType.BIGINT,
+ 651        "INT8": TokenType.BIGINT,
+ 652        "DECIMAL": TokenType.DECIMAL,
+ 653        "MAP": TokenType.MAP,
+ 654        "NULLABLE": TokenType.NULLABLE,
+ 655        "NUMBER": TokenType.DECIMAL,
+ 656        "NUMERIC": TokenType.DECIMAL,
+ 657        "FIXED": TokenType.DECIMAL,
+ 658        "REAL": TokenType.FLOAT,
+ 659        "FLOAT": TokenType.FLOAT,
+ 660        "FLOAT4": TokenType.FLOAT,
+ 661        "FLOAT8": TokenType.DOUBLE,
+ 662        "DOUBLE": TokenType.DOUBLE,
+ 663        "DOUBLE PRECISION": TokenType.DOUBLE,
+ 664        "JSON": TokenType.JSON,
+ 665        "CHAR": TokenType.CHAR,
+ 666        "NCHAR": TokenType.NCHAR,
+ 667        "VARCHAR": TokenType.VARCHAR,
+ 668        "VARCHAR2": TokenType.VARCHAR,
+ 669        "NVARCHAR": TokenType.NVARCHAR,
+ 670        "NVARCHAR2": TokenType.NVARCHAR,
+ 671        "STR": TokenType.TEXT,
+ 672        "STRING": TokenType.TEXT,
+ 673        "TEXT": TokenType.TEXT,
+ 674        "CLOB": TokenType.TEXT,
+ 675        "LONGVARCHAR": TokenType.TEXT,
+ 676        "BINARY": TokenType.BINARY,
+ 677        "BLOB": TokenType.VARBINARY,
+ 678        "BYTEA": TokenType.VARBINARY,
+ 679        "VARBINARY": TokenType.VARBINARY,
+ 680        "TIME": TokenType.TIME,
+ 681        "TIMESTAMP": TokenType.TIMESTAMP,
+ 682        "TIMESTAMPTZ": TokenType.TIMESTAMPTZ,
+ 683        "TIMESTAMPLTZ": TokenType.TIMESTAMPLTZ,
+ 684        "DATE": TokenType.DATE,
+ 685        "DATETIME": TokenType.DATETIME,
+ 686        "UNIQUE": TokenType.UNIQUE,
+ 687        "STRUCT": TokenType.STRUCT,
+ 688        "VARIANT": TokenType.VARIANT,
+ 689        "ALTER": TokenType.ALTER,
+ 690        "ALTER AGGREGATE": TokenType.COMMAND,
+ 691        "ALTER DEFAULT": TokenType.COMMAND,
+ 692        "ALTER DOMAIN": TokenType.COMMAND,
+ 693        "ALTER ROLE": TokenType.COMMAND,
+ 694        "ALTER RULE": TokenType.COMMAND,
+ 695        "ALTER SEQUENCE": TokenType.COMMAND,
+ 696        "ALTER TYPE": TokenType.COMMAND,
+ 697        "ALTER USER": TokenType.COMMAND,
+ 698        "ALTER VIEW": TokenType.COMMAND,
+ 699        "ANALYZE": TokenType.COMMAND,
+ 700        "CALL": TokenType.COMMAND,
+ 701        "COPY": TokenType.COMMAND,
+ 702        "EXPLAIN": TokenType.COMMAND,
+ 703        "OPTIMIZE": TokenType.COMMAND,
+ 704        "PREPARE": TokenType.COMMAND,
+ 705        "TRUNCATE": TokenType.COMMAND,
+ 706        "VACUUM": TokenType.COMMAND,
+ 707    }
+ 708
+ 709    WHITE_SPACE = {
+ 710        " ": TokenType.SPACE,
+ 711        "\t": TokenType.SPACE,
+ 712        "\n": TokenType.BREAK,
+ 713        "\r": TokenType.BREAK,
+ 714        "\r\n": TokenType.BREAK,
+ 715    }
+ 716
+ 717    COMMANDS = {
+ 718        TokenType.COMMAND,
+ 719        TokenType.EXECUTE,
+ 720        TokenType.FETCH,
+ 721        TokenType.SET,
+ 722        TokenType.SHOW,
+ 723    }
+ 724
+ 725    COMMAND_PREFIX_TOKENS = {TokenType.SEMICOLON, TokenType.BEGIN}
+ 726
+ 727    # handle numeric literals like in hive (3L = BIGINT)
+ 728    NUMERIC_LITERALS: t.Dict[str, str] = {}
+ 729    ENCODE: t.Optional[str] = None
+ 730
+ 731    COMMENTS = ["--", ("/*", "*/")]
+ 732    KEYWORD_TRIE = None  # autofilled
+ 733
+ 734    IDENTIFIER_CAN_START_WITH_DIGIT = False
+ 735
+ 736    __slots__ = (
+ 737        "sql",
+ 738        "size",
+ 739        "tokens",
+ 740        "_start",
+ 741        "_current",
+ 742        "_line",
+ 743        "_col",
+ 744        "_comments",
+ 745        "_char",
+ 746        "_end",
+ 747        "_peek",
+ 748        "_prev_token_line",
+ 749        "_prev_token_comments",
+ 750        "_prev_token_type",
+ 751        "_replace_backslash",
+ 752    )
+ 753
+ 754    def __init__(self) -> None:
+ 755        self._replace_backslash = "\\" in self._STRING_ESCAPES
+ 756        self.reset()
+ 757
+ 758    def reset(self) -> None:
+ 759        self.sql = ""
+ 760        self.size = 0
+ 761        self.tokens: t.List[Token] = []
+ 762        self._start = 0
+ 763        self._current = 0
+ 764        self._line = 1
+ 765        self._col = 1
+ 766        self._comments: t.List[str] = []
+ 767
+ 768        self._char = None
+ 769        self._end = None
+ 770        self._peek = None
+ 771        self._prev_token_line = -1
+ 772        self._prev_token_comments: t.List[str] = []
+ 773        self._prev_token_type = None
+ 774
+ 775    def tokenize(self, sql: str) -> t.List[Token]:
+ 776        """Returns a list of tokens corresponding to the SQL string `sql`."""
+ 777        self.reset()
+ 778        self.sql = sql
+ 779        self.size = len(sql)
+ 780        self._scan()
+ 781        return self.tokens
+ 782
+ 783    def _scan(self, until: t.Optional[t.Callable] = None) -> None:
+ 784        while self.size and not self._end:
+ 785            self._start = self._current
+ 786            self._advance()
+ 787
+ 788            if not self._char:
+ 789                break
+ 790
+ 791            white_space = self.WHITE_SPACE.get(self._char)  # type: ignore
+ 792            identifier_end = self._IDENTIFIERS.get(self._char)  # type: ignore
+ 793
+ 794            if white_space:
+ 795                if white_space == TokenType.BREAK:
+ 796                    self._col = 1
+ 797                    self._line += 1
+ 798            elif self._char.isdigit():  # type:ignore
+ 799                self._scan_number()
+ 800            elif identifier_end:
+ 801                self._scan_identifier(identifier_end)
+ 802            else:
+ 803                self._scan_keywords()
+ 804
+ 805            if until and until():
+ 806                break
+ 807
+ 808    def _chars(self, size: int) -> str:
+ 809        if size == 1:
+ 810            return self._char  # type: ignore
+ 811        start = self._current - 1
+ 812        end = start + size
+ 813        if end <= self.size:
+ 814            return self.sql[start:end]
+ 815        return ""
+ 816
+ 817    def _advance(self, i: int = 1) -> None:
+ 818        self._col += i
+ 819        self._current += i
+ 820        self._end = self._current >= self.size  # type: ignore
+ 821        self._char = self.sql[self._current - 1]  # type: ignore
+ 822        self._peek = self.sql[self._current] if self._current < self.size else ""  # type: ignore
+ 823
+ 824    @property
+ 825    def _text(self) -> str:
+ 826        return self.sql[self._start : self._current]
+ 827
+ 828    def _add(self, token_type: TokenType, text: t.Optional[str] = None) -> None:
+ 829        self._prev_token_line = self._line
+ 830        self._prev_token_comments = self._comments
+ 831        self._prev_token_type = token_type  # type: ignore
+ 832        self.tokens.append(
+ 833            Token(
+ 834                token_type,
+ 835                self._text if text is None else text,
+ 836                self._line,
+ 837                self._col,
+ 838                self._comments,
+ 839            )
+ 840        )
+ 841        self._comments = []
+ 842
+ 843        # If we have either a semicolon or a begin token before the command's token, we'll parse
+ 844        # whatever follows the command's token as a string
+ 845        if token_type in self.COMMANDS and (
+ 846            len(self.tokens) == 1 or self.tokens[-2].token_type in self.COMMAND_PREFIX_TOKENS
+ 847        ):
+ 848            start = self._current
+ 849            tokens = len(self.tokens)
+ 850            self._scan(lambda: self._peek == ";")
+ 851            self.tokens = self.tokens[:tokens]
+ 852            text = self.sql[start : self._current].strip()
+ 853            if text:
+ 854                self._add(TokenType.STRING, text)
+ 855
+ 856    def _scan_keywords(self) -> None:
+ 857        size = 0
+ 858        word = None
+ 859        chars = self._text
+ 860        char = chars
+ 861        prev_space = False
+ 862        skip = False
+ 863        trie = self.KEYWORD_TRIE
+ 864
+ 865        while chars:
+ 866            if skip:
+ 867                result = 1
+ 868            else:
+ 869                result, trie = in_trie(trie, char.upper())  # type: ignore
+ 870
+ 871            if result == 0:
+ 872                break
+ 873            if result == 2:
+ 874                word = chars
+ 875            size += 1
+ 876            end = self._current - 1 + size
+ 877
+ 878            if end < self.size:
+ 879                char = self.sql[end]
+ 880                is_space = char in self.WHITE_SPACE
+ 881
+ 882                if not is_space or not prev_space:
+ 883                    if is_space:
+ 884                        char = " "
+ 885                    chars += char
+ 886                    prev_space = is_space
+ 887                    skip = False
+ 888                else:
+ 889                    skip = True
+ 890            else:
+ 891                chars = None  # type: ignore
+ 892
+ 893        if not word:
+ 894            if self._char in self.SINGLE_TOKENS:
+ 895                self._add(self.SINGLE_TOKENS[self._char])  # type: ignore
+ 896                return
+ 897            self._scan_var()
+ 898            return
+ 899
+ 900        if self._scan_string(word):
+ 901            return
+ 902        if self._scan_formatted_string(word):
+ 903            return
+ 904        if self._scan_comment(word):
+ 905            return
+ 906
+ 907        self._advance(size - 1)
+ 908        self._add(self.KEYWORDS[word.upper()])
+ 909
+ 910    def _scan_comment(self, comment_start: str) -> bool:
+ 911        if comment_start not in self._COMMENTS:  # type: ignore
+ 912            return False
+ 913
+ 914        comment_start_line = self._line
+ 915        comment_start_size = len(comment_start)
+ 916        comment_end = self._COMMENTS[comment_start]  # type: ignore
+ 917
+ 918        if comment_end:
+ 919            comment_end_size = len(comment_end)
+ 920
+ 921            while not self._end and self._chars(comment_end_size) != comment_end:
+ 922                self._advance()
+ 923
+ 924            self._comments.append(self._text[comment_start_size : -comment_end_size + 1])  # type: ignore
+ 925            self._advance(comment_end_size - 1)
+ 926        else:
+ 927            while not self._end and self.WHITE_SPACE.get(self._peek) != TokenType.BREAK:  # type: ignore
+ 928                self._advance()
+ 929            self._comments.append(self._text[comment_start_size:])  # type: ignore
+ 930
+ 931        # Leading comment is attached to the succeeding token, whilst trailing comment to the preceding.
+ 932        # Multiple consecutive comments are preserved by appending them to the current comments list.
+ 933        if comment_start_line == self._prev_token_line:
+ 934            self.tokens[-1].comments.extend(self._comments)
+ 935            self._comments = []
+ 936
+ 937        return True
+ 938
+ 939    def _scan_number(self) -> None:
+ 940        if self._char == "0":
+ 941            peek = self._peek.upper()  # type: ignore
+ 942            if peek == "B":
+ 943                return self._scan_bits()
+ 944            elif peek == "X":
+ 945                return self._scan_hex()
+ 946
+ 947        decimal = False
+ 948        scientific = 0
+ 949
+ 950        while True:
+ 951            if self._peek.isdigit():  # type: ignore
+ 952                self._advance()
+ 953            elif self._peek == "." and not decimal:
+ 954                decimal = True
+ 955                self._advance()
+ 956            elif self._peek in ("-", "+") and scientific == 1:
+ 957                scientific += 1
+ 958                self._advance()
+ 959            elif self._peek.upper() == "E" and not scientific:  # type: ignore
+ 960                scientific += 1
+ 961                self._advance()
+ 962            elif self._peek.isidentifier():  # type: ignore
+ 963                number_text = self._text
+ 964                literal = []
+ 965
+ 966                while self._peek.strip() and self._peek not in self.SINGLE_TOKENS:  # type: ignore
+ 967                    literal.append(self._peek.upper())  # type: ignore
+ 968                    self._advance()
+ 969
+ 970                literal = "".join(literal)  # type: ignore
+ 971                token_type = self.KEYWORDS.get(self.NUMERIC_LITERALS.get(literal))  # type: ignore
+ 972
+ 973                if token_type:
+ 974                    self._add(TokenType.NUMBER, number_text)
+ 975                    self._add(TokenType.DCOLON, "::")
+ 976                    return self._add(token_type, literal)  # type: ignore
+ 977                elif self.IDENTIFIER_CAN_START_WITH_DIGIT:
+ 978                    return self._add(TokenType.VAR)
+ 979
+ 980                self._add(TokenType.NUMBER, number_text)
+ 981                return self._advance(-len(literal))
+ 982            else:
+ 983                return self._add(TokenType.NUMBER)
+ 984
+ 985    def _scan_bits(self) -> None:
+ 986        self._advance()
+ 987        value = self._extract_value()
+ 988        try:
+ 989            self._add(TokenType.BIT_STRING, f"{int(value, 2)}")
+ 990        except ValueError:
+ 991            self._add(TokenType.IDENTIFIER)
+ 992
+ 993    def _scan_hex(self) -> None:
+ 994        self._advance()
+ 995        value = self._extract_value()
+ 996        try:
+ 997            self._add(TokenType.HEX_STRING, f"{int(value, 16)}")
+ 998        except ValueError:
+ 999            self._add(TokenType.IDENTIFIER)
+1000
+1001    def _extract_value(self) -> str:
+1002        while True:
+1003            char = self._peek.strip()  # type: ignore
+1004            if char and char not in self.SINGLE_TOKENS:
+1005                self._advance()
+1006            else:
+1007                break
+1008
+1009        return self._text
+1010
+1011    def _scan_string(self, quote: str) -> bool:
+1012        quote_end = self._QUOTES.get(quote)  # type: ignore
+1013        if quote_end is None:
+1014            return False
+1015
+1016        self._advance(len(quote))
+1017        text = self._extract_string(quote_end)
+1018        text = text.encode(self.ENCODE).decode(self.ENCODE) if self.ENCODE else text  # type: ignore
+1019        text = text.replace("\\\\", "\\") if self._replace_backslash else text
+1020        self._add(TokenType.NATIONAL if quote[0].upper() == "N" else TokenType.STRING, text)
+1021        return True
+1022
+1023    # X'1234, b'0110', E'\\\\\' etc.
+1024    def _scan_formatted_string(self, string_start: str) -> bool:
+1025        if string_start in self._HEX_STRINGS:  # type: ignore
+1026            delimiters = self._HEX_STRINGS  # type: ignore
+1027            token_type = TokenType.HEX_STRING
+1028            base = 16
+1029        elif string_start in self._BIT_STRINGS:  # type: ignore
+1030            delimiters = self._BIT_STRINGS  # type: ignore
+1031            token_type = TokenType.BIT_STRING
+1032            base = 2
+1033        elif string_start in self._BYTE_STRINGS:  # type: ignore
+1034            delimiters = self._BYTE_STRINGS  # type: ignore
+1035            token_type = TokenType.BYTE_STRING
+1036            base = None
+1037        else:
+1038            return False
+1039
+1040        self._advance(len(string_start))
+1041        string_end = delimiters.get(string_start)
+1042        text = self._extract_string(string_end)
+1043
+1044        if base is None:
+1045            self._add(token_type, text)
+1046        else:
+1047            try:
+1048                self._add(token_type, f"{int(text, base)}")
+1049            except:
+1050                raise RuntimeError(
+1051                    f"Numeric string contains invalid characters from {self._line}:{self._start}"
+1052                )
+1053
+1054        return True
+1055
+1056    def _scan_identifier(self, identifier_end: str) -> None:
+1057        text = ""
+1058        identifier_end_is_escape = identifier_end in self._IDENTIFIER_ESCAPES
+1059
+1060        while True:
+1061            if self._end:
+1062                raise RuntimeError(f"Missing {identifier_end} from {self._line}:{self._start}")
+1063
+1064            self._advance()
+1065            if self._char == identifier_end:
+1066                if identifier_end_is_escape and self._peek == identifier_end:
+1067                    text += identifier_end  # type: ignore
+1068                    self._advance()
+1069                    continue
+1070
+1071                break
+1072
+1073            text += self._char  # type: ignore
+1074
+1075        self._add(TokenType.IDENTIFIER, text)
+1076
+1077    def _scan_var(self) -> None:
+1078        while True:
+1079            char = self._peek.strip()  # type: ignore
+1080            if char and char not in self.SINGLE_TOKENS:
+1081                self._advance()
+1082            else:
+1083                break
+1084        self._add(
+1085            TokenType.VAR
+1086            if self._prev_token_type == TokenType.PARAMETER
+1087            else self.KEYWORDS.get(self._text.upper(), TokenType.VAR)
+1088        )
+1089
+1090    def _extract_string(self, delimiter: str) -> str:
+1091        text = ""
+1092        delim_size = len(delimiter)
+1093
+1094        while True:
+1095            if (
+1096                self._char in self._STRING_ESCAPES
+1097                and self._peek
+1098                and (self._peek == delimiter or self._peek in self._STRING_ESCAPES)
+1099            ):
+1100                text += self._peek
+1101                self._advance(2)
+1102            else:
+1103                if self._chars(delim_size) == delimiter:
+1104                    if delim_size > 1:
+1105                        self._advance(delim_size - 1)
+1106                    break
+1107
+1108                if self._end:
+1109                    raise RuntimeError(f"Missing {delimiter} from {self._line}:{self._start}")
+1110                text += self._char  # type: ignore
+1111                self._advance()
+1112
+1113        return text
+
+ + + + +
+ +
+ + Tokenizer() + + + +
+ +
754    def __init__(self) -> None:
+755        self._replace_backslash = "\\" in self._STRING_ESCAPES
+756        self.reset()
+
+ + + + +
+
+ +
+ + def + reset(self) -> None: + + + +
+ +
758    def reset(self) -> None:
+759        self.sql = ""
+760        self.size = 0
+761        self.tokens: t.List[Token] = []
+762        self._start = 0
+763        self._current = 0
+764        self._line = 1
+765        self._col = 1
+766        self._comments: t.List[str] = []
+767
+768        self._char = None
+769        self._end = None
+770        self._peek = None
+771        self._prev_token_line = -1
+772        self._prev_token_comments: t.List[str] = []
+773        self._prev_token_type = None
+
+ + + + +
+
+ +
+ + def + tokenize(self, sql: str) -> List[sqlglot.tokens.Token]: + + + +
+ +
775    def tokenize(self, sql: str) -> t.List[Token]:
+776        """Returns a list of tokens corresponding to the SQL string `sql`."""
+777        self.reset()
+778        self.sql = sql
+779        self.size = len(sql)
+780        self._scan()
+781        return self.tokens
+
+ + +

Returns a list of tokens corresponding to the SQL string sql.

+
+ + +
+
+
+ + \ No newline at end of file diff --git a/docs/sqlglot/transforms.html b/docs/sqlglot/transforms.html new file mode 100644 index 0000000..77a2307 --- /dev/null +++ b/docs/sqlglot/transforms.html @@ -0,0 +1,667 @@ + + + + + + + sqlglot.transforms API documentation + + + + + + + + + +
+
+ Edit on GitHub +

+sqlglot.transforms

+ + + + + + +
  1from __future__ import annotations
+  2
+  3import typing as t
+  4
+  5from sqlglot.helper import find_new_name
+  6
+  7if t.TYPE_CHECKING:
+  8    from sqlglot.generator import Generator
+  9
+ 10from sqlglot import expressions as exp
+ 11
+ 12
+ 13def unalias_group(expression: exp.Expression) -> exp.Expression:
+ 14    """
+ 15    Replace references to select aliases in GROUP BY clauses.
+ 16
+ 17    Example:
+ 18        >>> import sqlglot
+ 19        >>> sqlglot.parse_one("SELECT a AS b FROM x GROUP BY b").transform(unalias_group).sql()
+ 20        'SELECT a AS b FROM x GROUP BY 1'
+ 21
+ 22    Args:
+ 23        expression: the expression that will be transformed.
+ 24
+ 25    Returns:
+ 26        The transformed expression.
+ 27    """
+ 28    if isinstance(expression, exp.Group) and isinstance(expression.parent, exp.Select):
+ 29        aliased_selects = {
+ 30            e.alias: i
+ 31            for i, e in enumerate(expression.parent.expressions, start=1)
+ 32            if isinstance(e, exp.Alias)
+ 33        }
+ 34
+ 35        for group_by in expression.expressions:
+ 36            if (
+ 37                isinstance(group_by, exp.Column)
+ 38                and not group_by.table
+ 39                and group_by.name in aliased_selects
+ 40            ):
+ 41                group_by.replace(exp.Literal.number(aliased_selects.get(group_by.name)))
+ 42
+ 43    return expression
+ 44
+ 45
+ 46def eliminate_distinct_on(expression: exp.Expression) -> exp.Expression:
+ 47    """
+ 48    Convert SELECT DISTINCT ON statements to a subquery with a window function.
+ 49
+ 50    This is useful for dialects that don't support SELECT DISTINCT ON but support window functions.
+ 51
+ 52    Args:
+ 53        expression: the expression that will be transformed.
+ 54
+ 55    Returns:
+ 56        The transformed expression.
+ 57    """
+ 58    if (
+ 59        isinstance(expression, exp.Select)
+ 60        and expression.args.get("distinct")
+ 61        and expression.args["distinct"].args.get("on")
+ 62        and isinstance(expression.args["distinct"].args["on"], exp.Tuple)
+ 63    ):
+ 64        distinct_cols = expression.args["distinct"].args["on"].expressions
+ 65        expression.args["distinct"].pop()
+ 66        outer_selects = expression.selects
+ 67        row_number = find_new_name(expression.named_selects, "_row_number")
+ 68        window = exp.Window(
+ 69            this=exp.RowNumber(),
+ 70            partition_by=distinct_cols,
+ 71        )
+ 72        order = expression.args.get("order")
+ 73        if order:
+ 74            window.set("order", order.copy())
+ 75            order.pop()
+ 76        window = exp.alias_(window, row_number)
+ 77        expression.select(window, copy=False)
+ 78        return exp.select(*outer_selects).from_(expression.subquery()).where(f'"{row_number}" = 1')
+ 79    return expression
+ 80
+ 81
+ 82def remove_precision_parameterized_types(expression: exp.Expression) -> exp.Expression:
+ 83    """
+ 84    Some dialects only allow the precision for parameterized types to be defined in the DDL and not in other expressions.
+ 85    This transforms removes the precision from parameterized types in expressions.
+ 86    """
+ 87    return expression.transform(
+ 88        lambda node: exp.DataType(
+ 89            **{
+ 90                **node.args,
+ 91                "expressions": [
+ 92                    node_expression
+ 93                    for node_expression in node.expressions
+ 94                    if isinstance(node_expression, exp.DataType)
+ 95                ],
+ 96            }
+ 97        )
+ 98        if isinstance(node, exp.DataType)
+ 99        else node,
+100    )
+101
+102
+103def preprocess(
+104    transforms: t.List[t.Callable[[exp.Expression], exp.Expression]],
+105    to_sql: t.Callable[[Generator, exp.Expression], str],
+106) -> t.Callable[[Generator, exp.Expression], str]:
+107    """
+108    Creates a new transform by chaining a sequence of transformations and converts the resulting
+109    expression to SQL, using an appropriate `Generator.TRANSFORMS` function.
+110
+111    Args:
+112        transforms: sequence of transform functions. These will be called in order.
+113        to_sql: final transform that converts the resulting expression to a SQL string.
+114
+115    Returns:
+116        Function that can be used as a generator transform.
+117    """
+118
+119    def _to_sql(self, expression):
+120        expression = transforms[0](expression.copy())
+121        for t in transforms[1:]:
+122            expression = t(expression)
+123        return to_sql(self, expression)
+124
+125    return _to_sql
+126
+127
+128def delegate(attr: str) -> t.Callable:
+129    """
+130    Create a new method that delegates to `attr`. This is useful for creating `Generator.TRANSFORMS`
+131    functions that delegate to existing generator methods.
+132    """
+133
+134    def _transform(self, *args, **kwargs):
+135        return getattr(self, attr)(*args, **kwargs)
+136
+137    return _transform
+138
+139
+140UNALIAS_GROUP = {exp.Group: preprocess([unalias_group], delegate("group_sql"))}
+141ELIMINATE_DISTINCT_ON = {exp.Select: preprocess([eliminate_distinct_on], delegate("select_sql"))}
+142REMOVE_PRECISION_PARAMETERIZED_TYPES = {
+143    exp.Cast: preprocess([remove_precision_parameterized_types], delegate("cast_sql"))
+144}
+
+ + +
+
+ +
+ + def + unalias_group( expression: sqlglot.expressions.Expression) -> sqlglot.expressions.Expression: + + + +
+ +
14def unalias_group(expression: exp.Expression) -> exp.Expression:
+15    """
+16    Replace references to select aliases in GROUP BY clauses.
+17
+18    Example:
+19        >>> import sqlglot
+20        >>> sqlglot.parse_one("SELECT a AS b FROM x GROUP BY b").transform(unalias_group).sql()
+21        'SELECT a AS b FROM x GROUP BY 1'
+22
+23    Args:
+24        expression: the expression that will be transformed.
+25
+26    Returns:
+27        The transformed expression.
+28    """
+29    if isinstance(expression, exp.Group) and isinstance(expression.parent, exp.Select):
+30        aliased_selects = {
+31            e.alias: i
+32            for i, e in enumerate(expression.parent.expressions, start=1)
+33            if isinstance(e, exp.Alias)
+34        }
+35
+36        for group_by in expression.expressions:
+37            if (
+38                isinstance(group_by, exp.Column)
+39                and not group_by.table
+40                and group_by.name in aliased_selects
+41            ):
+42                group_by.replace(exp.Literal.number(aliased_selects.get(group_by.name)))
+43
+44    return expression
+
+ + +

Replace references to select aliases in GROUP BY clauses.

+ +
Example:
+ +
+
+
>>> import sqlglot
+>>> sqlglot.parse_one("SELECT a AS b FROM x GROUP BY b").transform(unalias_group).sql()
+'SELECT a AS b FROM x GROUP BY 1'
+
+
+
+ +
Arguments:
+ +
    +
  • expression: the expression that will be transformed.
  • +
+ +
Returns:
+ +
+

The transformed expression.

+
+
+ + +
+
+ +
+ + def + eliminate_distinct_on( expression: sqlglot.expressions.Expression) -> sqlglot.expressions.Expression: + + + +
+ +
47def eliminate_distinct_on(expression: exp.Expression) -> exp.Expression:
+48    """
+49    Convert SELECT DISTINCT ON statements to a subquery with a window function.
+50
+51    This is useful for dialects that don't support SELECT DISTINCT ON but support window functions.
+52
+53    Args:
+54        expression: the expression that will be transformed.
+55
+56    Returns:
+57        The transformed expression.
+58    """
+59    if (
+60        isinstance(expression, exp.Select)
+61        and expression.args.get("distinct")
+62        and expression.args["distinct"].args.get("on")
+63        and isinstance(expression.args["distinct"].args["on"], exp.Tuple)
+64    ):
+65        distinct_cols = expression.args["distinct"].args["on"].expressions
+66        expression.args["distinct"].pop()
+67        outer_selects = expression.selects
+68        row_number = find_new_name(expression.named_selects, "_row_number")
+69        window = exp.Window(
+70            this=exp.RowNumber(),
+71            partition_by=distinct_cols,
+72        )
+73        order = expression.args.get("order")
+74        if order:
+75            window.set("order", order.copy())
+76            order.pop()
+77        window = exp.alias_(window, row_number)
+78        expression.select(window, copy=False)
+79        return exp.select(*outer_selects).from_(expression.subquery()).where(f'"{row_number}" = 1')
+80    return expression
+
+ + +

Convert SELECT DISTINCT ON statements to a subquery with a window function.

+ +

This is useful for dialects that don't support SELECT DISTINCT ON but support window functions.

+ +
Arguments:
+ +
    +
  • expression: the expression that will be transformed.
  • +
+ +
Returns:
+ +
+

The transformed expression.

+
+
+ + +
+
+ +
+ + def + remove_precision_parameterized_types( expression: sqlglot.expressions.Expression) -> sqlglot.expressions.Expression: + + + +
+ +
 83def remove_precision_parameterized_types(expression: exp.Expression) -> exp.Expression:
+ 84    """
+ 85    Some dialects only allow the precision for parameterized types to be defined in the DDL and not in other expressions.
+ 86    This transforms removes the precision from parameterized types in expressions.
+ 87    """
+ 88    return expression.transform(
+ 89        lambda node: exp.DataType(
+ 90            **{
+ 91                **node.args,
+ 92                "expressions": [
+ 93                    node_expression
+ 94                    for node_expression in node.expressions
+ 95                    if isinstance(node_expression, exp.DataType)
+ 96                ],
+ 97            }
+ 98        )
+ 99        if isinstance(node, exp.DataType)
+100        else node,
+101    )
+
+ + +

Some dialects only allow the precision for parameterized types to be defined in the DDL and not in other expressions. +This transforms removes the precision from parameterized types in expressions.

+
+ + +
+
+ +
+ + def + preprocess( transforms: List[Callable[[sqlglot.expressions.Expression], sqlglot.expressions.Expression]], to_sql: Callable[[sqlglot.generator.Generator, sqlglot.expressions.Expression], str]) -> Callable[[sqlglot.generator.Generator, sqlglot.expressions.Expression], str]: + + + +
+ +
104def preprocess(
+105    transforms: t.List[t.Callable[[exp.Expression], exp.Expression]],
+106    to_sql: t.Callable[[Generator, exp.Expression], str],
+107) -> t.Callable[[Generator, exp.Expression], str]:
+108    """
+109    Creates a new transform by chaining a sequence of transformations and converts the resulting
+110    expression to SQL, using an appropriate `Generator.TRANSFORMS` function.
+111
+112    Args:
+113        transforms: sequence of transform functions. These will be called in order.
+114        to_sql: final transform that converts the resulting expression to a SQL string.
+115
+116    Returns:
+117        Function that can be used as a generator transform.
+118    """
+119
+120    def _to_sql(self, expression):
+121        expression = transforms[0](expression.copy())
+122        for t in transforms[1:]:
+123            expression = t(expression)
+124        return to_sql(self, expression)
+125
+126    return _to_sql
+
+ + +

Creates a new transform by chaining a sequence of transformations and converts the resulting +expression to SQL, using an appropriate Generator.TRANSFORMS function.

+ +
Arguments:
+ +
    +
  • transforms: sequence of transform functions. These will be called in order.
  • +
  • to_sql: final transform that converts the resulting expression to a SQL string.
  • +
+ +
Returns:
+ +
+

Function that can be used as a generator transform.

+
+
+ + +
+
+ +
+ + def + delegate(attr: str) -> Callable: + + + +
+ +
129def delegate(attr: str) -> t.Callable:
+130    """
+131    Create a new method that delegates to `attr`. This is useful for creating `Generator.TRANSFORMS`
+132    functions that delegate to existing generator methods.
+133    """
+134
+135    def _transform(self, *args, **kwargs):
+136        return getattr(self, attr)(*args, **kwargs)
+137
+138    return _transform
+
+ + +

Create a new method that delegates to attr. This is useful for creating Generator.TRANSFORMS +functions that delegate to existing generator methods.

+
+ + +
+
+ + \ No newline at end of file diff --git a/docs/sqlglot/trie.html b/docs/sqlglot/trie.html new file mode 100644 index 0000000..29dfb54 --- /dev/null +++ b/docs/sqlglot/trie.html @@ -0,0 +1,479 @@ + + + + + + + sqlglot.trie API documentation + + + + + + + + + +
+
+ Edit on GitHub +

+sqlglot.trie

+ + + + + + +
 1import typing as t
+ 2
+ 3key = t.Sequence[t.Hashable]
+ 4
+ 5
+ 6def new_trie(keywords: t.Iterable[key]) -> t.Dict:
+ 7    """
+ 8    Creates a new trie out of a collection of keywords.
+ 9
+10    The trie is represented as a sequence of nested dictionaries keyed by either single character
+11    strings, or by 0, which is used to designate that a keyword is in the trie.
+12
+13    Example:
+14        >>> new_trie(["bla", "foo", "blab"])
+15        {'b': {'l': {'a': {0: True, 'b': {0: True}}}}, 'f': {'o': {'o': {0: True}}}}
+16
+17    Args:
+18        keywords: the keywords to create the trie from.
+19
+20    Returns:
+21        The trie corresponding to `keywords`.
+22    """
+23    trie: t.Dict = {}
+24
+25    for key in keywords:
+26        current = trie
+27
+28        for char in key:
+29            current = current.setdefault(char, {})
+30        current[0] = True
+31
+32    return trie
+33
+34
+35def in_trie(trie: t.Dict, key: key) -> t.Tuple[int, t.Dict]:
+36    """
+37    Checks whether a key is in a trie.
+38
+39    Examples:
+40        >>> in_trie(new_trie(["cat"]), "bob")
+41        (0, {'c': {'a': {'t': {0: True}}}})
+42
+43        >>> in_trie(new_trie(["cat"]), "ca")
+44        (1, {'t': {0: True}})
+45
+46        >>> in_trie(new_trie(["cat"]), "cat")
+47        (2, {0: True})
+48
+49    Args:
+50        trie: the trie to be searched.
+51        key: the target key.
+52
+53    Returns:
+54        A pair `(value, subtrie)`, where `subtrie` is the sub-trie we get at the point where the search stops, and `value`
+55        is either 0 (search was unsuccessful), 1 (`value` is a prefix of a keyword in `trie`) or 2 (`key is in `trie`).
+56    """
+57    if not key:
+58        return (0, trie)
+59
+60    current = trie
+61
+62    for char in key:
+63        if char not in current:
+64            return (0, current)
+65        current = current[char]
+66
+67    if 0 in current:
+68        return (2, current)
+69    return (1, current)
+
+ + +
+
+ +
+ + def + new_trie(keywords: Iterable[Sequence[Hashable]]) -> Dict: + + + +
+ +
 7def new_trie(keywords: t.Iterable[key]) -> t.Dict:
+ 8    """
+ 9    Creates a new trie out of a collection of keywords.
+10
+11    The trie is represented as a sequence of nested dictionaries keyed by either single character
+12    strings, or by 0, which is used to designate that a keyword is in the trie.
+13
+14    Example:
+15        >>> new_trie(["bla", "foo", "blab"])
+16        {'b': {'l': {'a': {0: True, 'b': {0: True}}}}, 'f': {'o': {'o': {0: True}}}}
+17
+18    Args:
+19        keywords: the keywords to create the trie from.
+20
+21    Returns:
+22        The trie corresponding to `keywords`.
+23    """
+24    trie: t.Dict = {}
+25
+26    for key in keywords:
+27        current = trie
+28
+29        for char in key:
+30            current = current.setdefault(char, {})
+31        current[0] = True
+32
+33    return trie
+
+ + +

Creates a new trie out of a collection of keywords.

+ +

The trie is represented as a sequence of nested dictionaries keyed by either single character +strings, or by 0, which is used to designate that a keyword is in the trie.

+ +
Example:
+ +
+
+
>>> new_trie(["bla", "foo", "blab"])
+{'b': {'l': {'a': {0: True, 'b': {0: True}}}}, 'f': {'o': {'o': {0: True}}}}
+
+
+
+ +
Arguments:
+ +
    +
  • keywords: the keywords to create the trie from.
  • +
+ +
Returns:
+ +
+

The trie corresponding to keywords.

+
+
+ + +
+
+ +
+ + def + in_trie(trie: Dict, key: Sequence[Hashable]) -> Tuple[int, Dict]: + + + +
+ +
36def in_trie(trie: t.Dict, key: key) -> t.Tuple[int, t.Dict]:
+37    """
+38    Checks whether a key is in a trie.
+39
+40    Examples:
+41        >>> in_trie(new_trie(["cat"]), "bob")
+42        (0, {'c': {'a': {'t': {0: True}}}})
+43
+44        >>> in_trie(new_trie(["cat"]), "ca")
+45        (1, {'t': {0: True}})
+46
+47        >>> in_trie(new_trie(["cat"]), "cat")
+48        (2, {0: True})
+49
+50    Args:
+51        trie: the trie to be searched.
+52        key: the target key.
+53
+54    Returns:
+55        A pair `(value, subtrie)`, where `subtrie` is the sub-trie we get at the point where the search stops, and `value`
+56        is either 0 (search was unsuccessful), 1 (`value` is a prefix of a keyword in `trie`) or 2 (`key is in `trie`).
+57    """
+58    if not key:
+59        return (0, trie)
+60
+61    current = trie
+62
+63    for char in key:
+64        if char not in current:
+65            return (0, current)
+66        current = current[char]
+67
+68    if 0 in current:
+69        return (2, current)
+70    return (1, current)
+
+ + +

Checks whether a key is in a trie.

+ +
Examples:
+ +
+
+
>>> in_trie(new_trie(["cat"]), "bob")
+(0, {'c': {'a': {'t': {0: True}}}})
+
+
+ +
+
>>> in_trie(new_trie(["cat"]), "ca")
+(1, {'t': {0: True}})
+
+
+ +
+
>>> in_trie(new_trie(["cat"]), "cat")
+(2, {0: True})
+
+
+
+ +
Arguments:
+ +
    +
  • trie: the trie to be searched.
  • +
  • key: the target key.
  • +
+ +
Returns:
+ +
+

A pair (value, subtrie), where subtrie is the sub-trie we get at the point where the search stops, and value + is either 0 (search was unsuccessful), 1 (value is a prefix of a keyword in trie) or 2 (key is intrie`).

+
+
+ + +
+
+ + \ No newline at end of file diff --git a/pdoc/cli.py b/pdoc/cli.py index 72a986d..1db895e 100755 --- a/pdoc/cli.py +++ b/pdoc/cli.py @@ -26,9 +26,9 @@ if __name__ == "__main__": opts = parser.parse_args() opts.docformat = "google" opts.modules = ["sqlglot"] - opts.footer_text = "Copyright (c) 2022 Toby Mao" + opts.footer_text = "Copyright (c) 2023 Toby Mao" opts.template_directory = Path(__file__).parent.joinpath("templates").absolute() - opts.edit_url = ["sqlglot=https://github.com/tobymao/sqlglot/"] + opts.edit_url = ["sqlglot=https://github.com/tobymao/sqlglot/tree/main/sqlglot/"] with mock.patch("pdoc.__main__.parser", **{"parse_args.return_value": opts}): cli() diff --git a/sqlglot/__init__.py b/sqlglot/__init__.py index 714897f..7b07ae1 100644 --- a/sqlglot/__init__.py +++ b/sqlglot/__init__.py @@ -1,5 +1,6 @@ """ .. include:: ../README.md + ---- """ @@ -39,7 +40,7 @@ if t.TYPE_CHECKING: T = t.TypeVar("T", bound=Expression) -__version__ = "10.6.3" +__version__ = "11.0.1" pretty = False """Whether to format generated SQL by default.""" diff --git a/sqlglot/dialects/bigquery.py b/sqlglot/dialects/bigquery.py index 90ae229..6a19b46 100644 --- a/sqlglot/dialects/bigquery.py +++ b/sqlglot/dialects/bigquery.py @@ -2,6 +2,8 @@ from __future__ import annotations +import typing as t + from sqlglot import exp, generator, parser, tokens, transforms from sqlglot.dialects.dialect import ( Dialect, @@ -14,8 +16,10 @@ from sqlglot.dialects.dialect import ( from sqlglot.helper import seq_get from sqlglot.tokens import TokenType +E = t.TypeVar("E", bound=exp.Expression) + -def _date_add(expression_class): +def _date_add(expression_class: t.Type[E]) -> t.Callable[[t.Sequence], E]: def func(args): interval = seq_get(args, 1) return expression_class( @@ -27,26 +31,26 @@ def _date_add(expression_class): return func -def _date_trunc(args): +def _date_trunc(args: t.Sequence) -> exp.Expression: unit = seq_get(args, 1) if isinstance(unit, exp.Column): unit = exp.Var(this=unit.name) return exp.DateTrunc(this=seq_get(args, 0), expression=unit) -def _date_add_sql(data_type, kind): +def _date_add_sql( + data_type: str, kind: str +) -> t.Callable[[generator.Generator, exp.Expression], str]: def func(self, expression): this = self.sql(expression, "this") - unit = self.sql(expression, "unit") or "'day'" - expression = self.sql(expression, "expression") - return f"{data_type}_{kind}({this}, INTERVAL {expression} {unit})" + return f"{data_type}_{kind}({this}, {self.sql(exp.Interval(this=expression.expression, unit=expression.args.get('unit') or exp.Literal.string('day')))})" return func -def _derived_table_values_to_unnest(self, expression): +def _derived_table_values_to_unnest(self: generator.Generator, expression: exp.Values) -> str: if not isinstance(expression.unnest().parent, exp.From): - expression = transforms.remove_precision_parameterized_types(expression) + expression = t.cast(exp.Values, transforms.remove_precision_parameterized_types(expression)) return self.values_sql(expression) rows = [tuple_exp.expressions for tuple_exp in expression.find_all(exp.Tuple)] structs = [] @@ -60,7 +64,7 @@ def _derived_table_values_to_unnest(self, expression): return self.unnest_sql(unnest_exp) -def _returnsproperty_sql(self, expression): +def _returnsproperty_sql(self: generator.Generator, expression: exp.ReturnsProperty) -> str: this = expression.this if isinstance(this, exp.Schema): this = f"{this.this} <{self.expressions(this)}>" @@ -69,8 +73,8 @@ def _returnsproperty_sql(self, expression): return f"RETURNS {this}" -def _create_sql(self, expression): - kind = expression.args.get("kind") +def _create_sql(self: generator.Generator, expression: exp.Create) -> str: + kind = expression.args["kind"] returns = expression.find(exp.ReturnsProperty) if kind.upper() == "FUNCTION" and returns and returns.args.get("is_table"): expression = expression.copy() @@ -89,6 +93,29 @@ def _create_sql(self, expression): return self.create_sql(expression) +def _unqualify_unnest(expression: exp.Expression) -> exp.Expression: + """Remove references to unnest table aliases since bigquery doesn't allow them. + + These are added by the optimizer's qualify_column step. + """ + if isinstance(expression, exp.Select): + unnests = { + unnest.alias + for unnest in expression.args.get("from", exp.From(expressions=[])).expressions + if isinstance(unnest, exp.Unnest) and unnest.alias + } + + if unnests: + expression = expression.copy() + + for select in expression.expressions: + for column in select.find_all(exp.Column): + if column.table in unnests: + column.set("table", None) + + return expression + + class BigQuery(Dialect): unnest_column_only = True time_mapping = { @@ -110,7 +137,7 @@ class BigQuery(Dialect): ] COMMENTS = ["--", "#", ("/*", "*/")] IDENTIFIERS = ["`"] - ESCAPES = ["\\"] + STRING_ESCAPES = ["\\"] HEX_STRINGS = [("0x", ""), ("0X", "")] KEYWORDS = { @@ -190,6 +217,9 @@ class BigQuery(Dialect): exp.GroupConcat: rename_func("STRING_AGG"), exp.ILike: no_ilike_sql, exp.IntDiv: rename_func("DIV"), + exp.Select: transforms.preprocess( + [_unqualify_unnest], transforms.delegate("select_sql") + ), exp.StrToTime: lambda self, e: f"PARSE_TIMESTAMP({self.format_time(e)}, {self.sql(e, 'this')})", exp.TimeAdd: _date_add_sql("TIME", "ADD"), exp.TimeSub: _date_add_sql("TIME", "SUB"), diff --git a/sqlglot/dialects/clickhouse.py b/sqlglot/dialects/clickhouse.py index 9e8c691..b553df2 100644 --- a/sqlglot/dialects/clickhouse.py +++ b/sqlglot/dialects/clickhouse.py @@ -9,7 +9,7 @@ from sqlglot.parser import parse_var_map from sqlglot.tokens import TokenType -def _lower_func(sql): +def _lower_func(sql: str) -> str: index = sql.index("(") return sql[:index].lower() + sql[index:] diff --git a/sqlglot/dialects/dialect.py b/sqlglot/dialects/dialect.py index 1b20e0a..176a8ce 100644 --- a/sqlglot/dialects/dialect.py +++ b/sqlglot/dialects/dialect.py @@ -11,6 +11,8 @@ from sqlglot.time import format_time from sqlglot.tokens import Tokenizer from sqlglot.trie import new_trie +E = t.TypeVar("E", bound=exp.Expression) + class Dialects(str, Enum): DIALECT = "" @@ -37,14 +39,16 @@ class Dialects(str, Enum): class _Dialect(type): - classes: t.Dict[str, Dialect] = {} + classes: t.Dict[str, t.Type[Dialect]] = {} @classmethod - def __getitem__(cls, key): + def __getitem__(cls, key: str) -> t.Type[Dialect]: return cls.classes[key] @classmethod - def get(cls, key, default=None): + def get( + cls, key: str, default: t.Optional[t.Type[Dialect]] = None + ) -> t.Optional[t.Type[Dialect]]: return cls.classes.get(key, default) def __new__(cls, clsname, bases, attrs): @@ -119,7 +123,7 @@ class Dialect(metaclass=_Dialect): generator_class = None @classmethod - def get_or_raise(cls, dialect): + def get_or_raise(cls, dialect: DialectType) -> t.Type[Dialect]: if not dialect: return cls if isinstance(dialect, _Dialect): @@ -134,7 +138,9 @@ class Dialect(metaclass=_Dialect): return result @classmethod - def format_time(cls, expression): + def format_time( + cls, expression: t.Optional[str | exp.Expression] + ) -> t.Optional[exp.Expression]: if isinstance(expression, str): return exp.Literal.string( format_time( @@ -153,26 +159,28 @@ class Dialect(metaclass=_Dialect): ) return expression - def parse(self, sql, **opts): + def parse(self, sql: str, **opts) -> t.List[t.Optional[exp.Expression]]: return self.parser(**opts).parse(self.tokenizer.tokenize(sql), sql) - def parse_into(self, expression_type, sql, **opts): + def parse_into( + self, expression_type: exp.IntoType, sql: str, **opts + ) -> t.List[t.Optional[exp.Expression]]: return self.parser(**opts).parse_into(expression_type, self.tokenizer.tokenize(sql), sql) - def generate(self, expression, **opts): + def generate(self, expression: t.Optional[exp.Expression], **opts) -> str: return self.generator(**opts).generate(expression) - def transpile(self, code, **opts): - return self.generate(self.parse(code), **opts) + def transpile(self, sql: str, **opts) -> t.List[str]: + return [self.generate(expression, **opts) for expression in self.parse(sql)] @property - def tokenizer(self): + def tokenizer(self) -> Tokenizer: if not hasattr(self, "_tokenizer"): - self._tokenizer = self.tokenizer_class() + self._tokenizer = self.tokenizer_class() # type: ignore return self._tokenizer - def parser(self, **opts): - return self.parser_class( + def parser(self, **opts) -> Parser: + return self.parser_class( # type: ignore **{ "index_offset": self.index_offset, "unnest_column_only": self.unnest_column_only, @@ -182,14 +190,15 @@ class Dialect(metaclass=_Dialect): }, ) - def generator(self, **opts): - return self.generator_class( + def generator(self, **opts) -> Generator: + return self.generator_class( # type: ignore **{ "quote_start": self.quote_start, "quote_end": self.quote_end, "identifier_start": self.identifier_start, "identifier_end": self.identifier_end, - "escape": self.tokenizer_class.ESCAPES[0], + "string_escape": self.tokenizer_class.STRING_ESCAPES[0], + "identifier_escape": self.tokenizer_class.IDENTIFIER_ESCAPES[0], "index_offset": self.index_offset, "time_mapping": self.inverse_time_mapping, "time_trie": self.inverse_time_trie, @@ -202,11 +211,10 @@ class Dialect(metaclass=_Dialect): ) -if t.TYPE_CHECKING: - DialectType = t.Union[str, Dialect, t.Type[Dialect], None] +DialectType = t.Union[str, Dialect, t.Type[Dialect], None] -def rename_func(name): +def rename_func(name: str) -> t.Callable[[Generator, exp.Expression], str]: def _rename(self, expression): args = flatten(expression.args.values()) return f"{self.normalize_func(name)}({self.format_args(*args)})" @@ -214,32 +222,34 @@ def rename_func(name): return _rename -def approx_count_distinct_sql(self, expression): +def approx_count_distinct_sql(self: Generator, expression: exp.ApproxDistinct) -> str: if expression.args.get("accuracy"): self.unsupported("APPROX_COUNT_DISTINCT does not support accuracy") return f"APPROX_COUNT_DISTINCT({self.format_args(expression.this)})" -def if_sql(self, expression): +def if_sql(self: Generator, expression: exp.If) -> str: expressions = self.format_args( expression.this, expression.args.get("true"), expression.args.get("false") ) return f"IF({expressions})" -def arrow_json_extract_sql(self, expression): +def arrow_json_extract_sql(self: Generator, expression: exp.JSONExtract | exp.JSONBExtract) -> str: return self.binary(expression, "->") -def arrow_json_extract_scalar_sql(self, expression): +def arrow_json_extract_scalar_sql( + self: Generator, expression: exp.JSONExtractScalar | exp.JSONBExtractScalar +) -> str: return self.binary(expression, "->>") -def inline_array_sql(self, expression): +def inline_array_sql(self: Generator, expression: exp.Array) -> str: return f"[{self.expressions(expression)}]" -def no_ilike_sql(self, expression): +def no_ilike_sql(self: Generator, expression: exp.ILike) -> str: return self.like_sql( exp.Like( this=exp.Lower(this=expression.this), @@ -248,44 +258,44 @@ def no_ilike_sql(self, expression): ) -def no_paren_current_date_sql(self, expression): +def no_paren_current_date_sql(self: Generator, expression: exp.CurrentDate) -> str: zone = self.sql(expression, "this") return f"CURRENT_DATE AT TIME ZONE {zone}" if zone else "CURRENT_DATE" -def no_recursive_cte_sql(self, expression): +def no_recursive_cte_sql(self: Generator, expression: exp.With) -> str: if expression.args.get("recursive"): self.unsupported("Recursive CTEs are unsupported") expression.args["recursive"] = False return self.with_sql(expression) -def no_safe_divide_sql(self, expression): +def no_safe_divide_sql(self: Generator, expression: exp.SafeDivide) -> str: n = self.sql(expression, "this") d = self.sql(expression, "expression") return f"IF({d} <> 0, {n} / {d}, NULL)" -def no_tablesample_sql(self, expression): +def no_tablesample_sql(self: Generator, expression: exp.TableSample) -> str: self.unsupported("TABLESAMPLE unsupported") return self.sql(expression.this) -def no_pivot_sql(self, expression): +def no_pivot_sql(self: Generator, expression: exp.Pivot) -> str: self.unsupported("PIVOT unsupported") return self.sql(expression) -def no_trycast_sql(self, expression): +def no_trycast_sql(self: Generator, expression: exp.TryCast) -> str: return self.cast_sql(expression) -def no_properties_sql(self, expression): +def no_properties_sql(self: Generator, expression: exp.Properties) -> str: self.unsupported("Properties unsupported") return "" -def str_position_sql(self, expression): +def str_position_sql(self: Generator, expression: exp.StrPosition) -> str: this = self.sql(expression, "this") substr = self.sql(expression, "substr") position = self.sql(expression, "position") @@ -294,13 +304,15 @@ def str_position_sql(self, expression): return f"STRPOS({this}, {substr})" -def struct_extract_sql(self, expression): +def struct_extract_sql(self: Generator, expression: exp.StructExtract) -> str: this = self.sql(expression, "this") struct_key = self.sql(exp.Identifier(this=expression.expression, quoted=True)) return f"{this}.{struct_key}" -def var_map_sql(self, expression, map_func_name="MAP"): +def var_map_sql( + self: Generator, expression: exp.Map | exp.VarMap, map_func_name: str = "MAP" +) -> str: keys = expression.args["keys"] values = expression.args["values"] @@ -315,27 +327,33 @@ def var_map_sql(self, expression, map_func_name="MAP"): return f"{map_func_name}({self.format_args(*args)})" -def format_time_lambda(exp_class, dialect, default=None): +def format_time_lambda( + exp_class: t.Type[E], dialect: str, default: t.Optional[bool | str] = None +) -> t.Callable[[t.Sequence], E]: """Helper used for time expressions. - Args - exp_class (Class): the expression class to instantiate - dialect (string): sql dialect - default (Option[bool | str]): the default format, True being time + Args: + exp_class: the expression class to instantiate. + dialect: target sql dialect. + default: the default format, True being time. + + Returns: + A callable that can be used to return the appropriately formatted time expression. """ - def _format_time(args): + def _format_time(args: t.Sequence): return exp_class( this=seq_get(args, 0), format=Dialect[dialect].format_time( - seq_get(args, 1) or (Dialect[dialect].time_format if default is True else default) + seq_get(args, 1) + or (Dialect[dialect].time_format if default is True else default or None) ), ) return _format_time -def create_with_partitions_sql(self, expression): +def create_with_partitions_sql(self: Generator, expression: exp.Create) -> str: """ In Hive and Spark, the PARTITIONED BY property acts as an extension of a table's schema. When the PARTITIONED BY value is an array of column names, they are transformed into a schema. The corresponding @@ -359,19 +377,21 @@ def create_with_partitions_sql(self, expression): return self.create_sql(expression) -def parse_date_delta(exp_class, unit_mapping=None): - def inner_func(args): +def parse_date_delta( + exp_class: t.Type[E], unit_mapping: t.Optional[t.Dict[str, str]] = None +) -> t.Callable[[t.Sequence], E]: + def inner_func(args: t.Sequence) -> E: unit_based = len(args) == 3 this = seq_get(args, 2) if unit_based else seq_get(args, 0) expression = seq_get(args, 1) if unit_based else seq_get(args, 1) unit = seq_get(args, 0) if unit_based else exp.Literal.string("DAY") - unit = unit_mapping.get(unit.name.lower(), unit) if unit_mapping else unit + unit = unit_mapping.get(unit.name.lower(), unit) if unit_mapping else unit # type: ignore return exp_class(this=this, expression=expression, unit=unit) return inner_func -def locate_to_strposition(args): +def locate_to_strposition(args: t.Sequence) -> exp.Expression: return exp.StrPosition( this=seq_get(args, 1), substr=seq_get(args, 0), @@ -379,22 +399,22 @@ def locate_to_strposition(args): ) -def strposition_to_locate_sql(self, expression): +def strposition_to_locate_sql(self: Generator, expression: exp.StrPosition) -> str: args = self.format_args( expression.args.get("substr"), expression.this, expression.args.get("position") ) return f"LOCATE({args})" -def timestrtotime_sql(self, expression: exp.TimeStrToTime) -> str: +def timestrtotime_sql(self: Generator, expression: exp.TimeStrToTime) -> str: return f"CAST({self.sql(expression, 'this')} AS TIMESTAMP)" -def datestrtodate_sql(self, expression: exp.DateStrToDate) -> str: +def datestrtodate_sql(self: Generator, expression: exp.DateStrToDate) -> str: return f"CAST({self.sql(expression, 'this')} AS DATE)" -def trim_sql(self, expression): +def trim_sql(self: Generator, expression: exp.Trim) -> str: target = self.sql(expression, "this") trim_type = self.sql(expression, "position") remove_chars = self.sql(expression, "expression") diff --git a/sqlglot/dialects/drill.py b/sqlglot/dialects/drill.py index d0a0251..1730eaf 100644 --- a/sqlglot/dialects/drill.py +++ b/sqlglot/dialects/drill.py @@ -1,6 +1,7 @@ from __future__ import annotations import re +import typing as t from sqlglot import exp, generator, parser, tokens from sqlglot.dialects.dialect import ( @@ -16,35 +17,29 @@ from sqlglot.dialects.dialect import ( ) -def _to_timestamp(args): - # TO_TIMESTAMP accepts either a single double argument or (text, text) - if len(args) == 1 and args[0].is_number: - return exp.UnixToTime.from_arg_list(args) - return format_time_lambda(exp.StrToTime, "drill")(args) - - -def _str_to_time_sql(self, expression): +def _str_to_time_sql(self: generator.Generator, expression: exp.TsOrDsToDate) -> str: return f"STRPTIME({self.sql(expression, 'this')}, {self.format_time(expression)})" -def _ts_or_ds_to_date_sql(self, expression): +def _ts_or_ds_to_date_sql(self: generator.Generator, expression: exp.TsOrDsToDate) -> str: time_format = self.format_time(expression) if time_format and time_format not in (Drill.time_format, Drill.date_format): return f"CAST({_str_to_time_sql(self, expression)} AS DATE)" return f"CAST({self.sql(expression, 'this')} AS DATE)" -def _date_add_sql(kind): - def func(self, expression): +def _date_add_sql(kind: str) -> t.Callable[[generator.Generator, exp.DateAdd | exp.DateSub], str]: + def func(self: generator.Generator, expression: exp.DateAdd | exp.DateSub) -> str: this = self.sql(expression, "this") - unit = expression.text("unit").upper() or "DAY" - expression = self.sql(expression, "expression") - return f"DATE_{kind}({this}, INTERVAL '{expression}' {unit})" + unit = exp.Var(this=expression.text("unit").upper() or "DAY") + return ( + f"DATE_{kind}({this}, {self.sql(exp.Interval(this=expression.expression, unit=unit))})" + ) return func -def if_sql(self, expression): +def if_sql(self: generator.Generator, expression: exp.If) -> str: """ Drill requires backticks around certain SQL reserved words, IF being one of them, This function adds the backticks around the keyword IF. @@ -61,7 +56,7 @@ def if_sql(self, expression): return f"`IF`({expressions})" -def _str_to_date(self, expression): +def _str_to_date(self: generator.Generator, expression: exp.StrToDate) -> str: this = self.sql(expression, "this") time_format = self.format_time(expression) if time_format == Drill.date_format: @@ -111,7 +106,7 @@ class Drill(Dialect): class Tokenizer(tokens.Tokenizer): QUOTES = ["'"] IDENTIFIERS = ["`"] - ESCAPES = ["\\"] + STRING_ESCAPES = ["\\"] ENCODE = "utf-8" class Parser(parser.Parser): @@ -168,10 +163,10 @@ class Drill(Dialect): exp.TimeToStr: lambda self, e: f"TO_CHAR({self.sql(e, 'this')}, {self.format_time(e)})", exp.TimeToUnix: rename_func("UNIX_TIMESTAMP"), exp.TryCast: no_trycast_sql, - exp.TsOrDsAdd: lambda self, e: f"DATE_ADD(CAST({self.sql(e, 'this')} AS DATE), INTERVAL '{self.sql(e, 'expression')}' DAY)", + exp.TsOrDsAdd: lambda self, e: f"DATE_ADD(CAST({self.sql(e, 'this')} AS DATE), {self.sql(exp.Interval(this=e.expression, unit=exp.Var(this='DAY')))})", exp.TsOrDsToDate: _ts_or_ds_to_date_sql, exp.TsOrDiToDi: lambda self, e: f"CAST(SUBSTR(REPLACE(CAST({self.sql(e, 'this')} AS VARCHAR), '-', ''), 1, 8) AS INT)", } - def normalize_func(self, name): + def normalize_func(self, name: str) -> str: return name if re.match(exp.SAFE_IDENTIFIER_RE, name) else f"`{name}`" diff --git a/sqlglot/dialects/duckdb.py b/sqlglot/dialects/duckdb.py index 95ff95c..959e5e2 100644 --- a/sqlglot/dialects/duckdb.py +++ b/sqlglot/dialects/duckdb.py @@ -25,10 +25,9 @@ def _str_to_time_sql(self, expression): def _ts_or_ds_add(self, expression): - this = self.sql(expression, "this") - e = self.sql(expression, "expression") + this = expression.args.get("this") unit = self.sql(expression, "unit").strip("'") or "DAY" - return f"CAST({this} AS DATE) + INTERVAL {e} {unit}" + return f"CAST({this} AS DATE) + {self.sql(exp.Interval(this=expression.expression, unit=unit))}" def _ts_or_ds_to_date_sql(self, expression): @@ -40,9 +39,8 @@ def _ts_or_ds_to_date_sql(self, expression): def _date_add(self, expression): this = self.sql(expression, "this") - e = self.sql(expression, "expression") unit = self.sql(expression, "unit").strip("'") or "DAY" - return f"{this} + INTERVAL {e} {unit}" + return f"{this} + {self.sql(exp.Interval(this=expression.expression, unit=unit))}" def _array_sort_sql(self, expression): diff --git a/sqlglot/dialects/hive.py b/sqlglot/dialects/hive.py index f2b6eaa..c558b70 100644 --- a/sqlglot/dialects/hive.py +++ b/sqlglot/dialects/hive.py @@ -172,7 +172,7 @@ class Hive(Dialect): class Tokenizer(tokens.Tokenizer): QUOTES = ["'", '"'] IDENTIFIERS = ["`"] - ESCAPES = ["\\"] + STRING_ESCAPES = ["\\"] ENCODE = "utf-8" KEYWORDS = { diff --git a/sqlglot/dialects/mysql.py b/sqlglot/dialects/mysql.py index a5bd86b..c2c2c8c 100644 --- a/sqlglot/dialects/mysql.py +++ b/sqlglot/dialects/mysql.py @@ -89,8 +89,9 @@ def _date_add_sql(kind): def func(self, expression): this = self.sql(expression, "this") unit = expression.text("unit").upper() or "DAY" - expression = self.sql(expression, "expression") - return f"DATE_{kind}({this}, INTERVAL {expression} {unit})" + return ( + f"DATE_{kind}({this}, {self.sql(exp.Interval(this=expression.expression, unit=unit))})" + ) return func @@ -117,7 +118,7 @@ class MySQL(Dialect): QUOTES = ["'", '"'] COMMENTS = ["--", "#", ("/*", "*/")] IDENTIFIERS = ["`"] - ESCAPES = ["'", "\\"] + STRING_ESCAPES = ["'", "\\"] BIT_STRINGS = [("b'", "'"), ("B'", "'"), ("0b", "")] HEX_STRINGS = [("x'", "'"), ("X'", "'"), ("0x", "")] diff --git a/sqlglot/dialects/postgres.py b/sqlglot/dialects/postgres.py index 6418032..c709665 100644 --- a/sqlglot/dialects/postgres.py +++ b/sqlglot/dialects/postgres.py @@ -40,8 +40,7 @@ def _date_add_sql(kind): expression = expression.copy() expression.args["is_string"] = True - expression = self.sql(expression) - return f"{this} {kind} INTERVAL {expression} {unit}" + return f"{this} {kind} {self.sql(exp.Interval(this=expression, unit=unit))}" return func diff --git a/sqlglot/dialects/redshift.py b/sqlglot/dialects/redshift.py index c3c99eb..813ee5f 100644 --- a/sqlglot/dialects/redshift.py +++ b/sqlglot/dialects/redshift.py @@ -37,11 +37,10 @@ class Redshift(Postgres): return this class Tokenizer(Postgres.Tokenizer): - ESCAPES = ["\\"] + STRING_ESCAPES = ["\\"] KEYWORDS = { **Postgres.Tokenizer.KEYWORDS, # type: ignore - "COPY": TokenType.COMMAND, "ENCODE": TokenType.ENCODE, "GEOMETRY": TokenType.GEOMETRY, "GEOGRAPHY": TokenType.GEOGRAPHY, diff --git a/sqlglot/dialects/snowflake.py b/sqlglot/dialects/snowflake.py index 3b83b02..55a6bd3 100644 --- a/sqlglot/dialects/snowflake.py +++ b/sqlglot/dialects/snowflake.py @@ -180,7 +180,7 @@ class Snowflake(Dialect): class Tokenizer(tokens.Tokenizer): QUOTES = ["'", "$$"] - ESCAPES = ["\\", "'"] + STRING_ESCAPES = ["\\", "'"] SINGLE_TOKENS = { **tokens.Tokenizer.SINGLE_TOKENS, @@ -191,6 +191,7 @@ class Snowflake(Dialect): **tokens.Tokenizer.KEYWORDS, "EXCLUDE": TokenType.EXCEPT, "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE, + "PUT": TokenType.COMMAND, "RENAME": TokenType.REPLACE, "TIMESTAMP_LTZ": TokenType.TIMESTAMPLTZ, "TIMESTAMP_NTZ": TokenType.TIMESTAMP, @@ -222,6 +223,7 @@ class Snowflake(Dialect): exp.TimeToUnix: lambda self, e: f"EXTRACT(epoch_second FROM {self.sql(e, 'this')})", exp.Trim: lambda self, e: f"TRIM({self.format_args(e.this, e.expression)})", exp.UnixToTime: _unix_to_time_sql, + exp.DayOfWeek: rename_func("DAYOFWEEK"), } TYPE_MAPPING = { @@ -294,3 +296,12 @@ class Snowflake(Dialect): kind = f" {kind_value}" if kind_value else "" this = f" {self.sql(expression, 'this')}" return f"DESCRIBE{kind}{this}" + + def generatedasidentitycolumnconstraint_sql( + self, expression: exp.GeneratedAsIdentityColumnConstraint + ) -> str: + start = expression.args.get("start") + start = f" START {start}" if start else "" + increment = expression.args.get("increment") + increment = f" INCREMENT {increment}" if increment else "" + return f"AUTOINCREMENT{start}{increment}" diff --git a/sqlglot/dialects/spark.py b/sqlglot/dialects/spark.py index 8ef4a87..03ec211 100644 --- a/sqlglot/dialects/spark.py +++ b/sqlglot/dialects/spark.py @@ -157,6 +157,7 @@ class Spark(Hive): TRANSFORMS.pop(exp.ILike) WRAP_DERIVED_VALUES = False + CREATE_FUNCTION_AS = False def cast_sql(self, expression: exp.Cast) -> str: if isinstance(expression.this, exp.Cast) and expression.this.is_type( diff --git a/sqlglot/dialects/sqlite.py b/sqlglot/dialects/sqlite.py index 1b39449..a428dd5 100644 --- a/sqlglot/dialects/sqlite.py +++ b/sqlglot/dialects/sqlite.py @@ -49,7 +49,6 @@ class SQLite(Dialect): KEYWORDS = { **tokens.Tokenizer.KEYWORDS, - "AUTOINCREMENT": TokenType.AUTO_INCREMENT, } class Parser(parser.Parser): diff --git a/sqlglot/diff.py b/sqlglot/diff.py index 7d5ec21..7530613 100644 --- a/sqlglot/diff.py +++ b/sqlglot/diff.py @@ -1,5 +1,6 @@ """ .. include:: ../posts/sql_diff.md + ---- """ diff --git a/sqlglot/errors.py b/sqlglot/errors.py index b5ef5ad..300c215 100644 --- a/sqlglot/errors.py +++ b/sqlglot/errors.py @@ -7,10 +7,17 @@ from sqlglot.helper import AutoName class ErrorLevel(AutoName): - IGNORE = auto() # Ignore any parser errors - WARN = auto() # Log any parser errors with ERROR level - RAISE = auto() # Collect all parser errors and raise a single exception - IMMEDIATE = auto() # Immediately raise an exception on the first parser error + IGNORE = auto() + """Ignore all errors.""" + + WARN = auto() + """Log all errors.""" + + RAISE = auto() + """Collect all errors and raise a single exception.""" + + IMMEDIATE = auto() + """Immediately raise an exception on the first error found.""" class SqlglotError(Exception): diff --git a/sqlglot/executor/__init__.py b/sqlglot/executor/__init__.py index 67b4b00..c3d2701 100644 --- a/sqlglot/executor/__init__.py +++ b/sqlglot/executor/__init__.py @@ -1,5 +1,6 @@ """ .. include:: ../../posts/python_sql_engine.md + ---- """ diff --git a/sqlglot/executor/python.py b/sqlglot/executor/python.py index 29848c6..de570b0 100644 --- a/sqlglot/executor/python.py +++ b/sqlglot/executor/python.py @@ -408,7 +408,7 @@ def _lambda_sql(self, e: exp.Lambda) -> str: class Python(Dialect): class Tokenizer(tokens.Tokenizer): - ESCAPES = ["\\"] + STRING_ESCAPES = ["\\"] class Generator(generator.Generator): TRANSFORMS = { diff --git a/sqlglot/expressions.py b/sqlglot/expressions.py index 6bb083a..6800cd5 100644 --- a/sqlglot/expressions.py +++ b/sqlglot/expressions.py @@ -6,6 +6,7 @@ Every AST node in SQLGlot is represented by a subclass of `Expression`. This module contains the implementation of all supported `Expression` types. Additionally, it exposes a number of helper functions, which are mainly used to programmatically build SQL expressions, such as `sqlglot.expressions.select`. + ---- """ @@ -137,6 +138,8 @@ class Expression(metaclass=_Expression): return field if isinstance(field, (Identifier, Literal, Var)): return field.this + if isinstance(field, (Star, Null)): + return field.name return "" @property @@ -176,13 +179,11 @@ class Expression(metaclass=_Expression): return self.text("alias") @property - def name(self): + def name(self) -> str: return self.text("this") @property def alias_or_name(self): - if isinstance(self, Null): - return "NULL" return self.alias or self.name @property @@ -589,12 +590,11 @@ class Expression(metaclass=_Expression): return load(obj) -if t.TYPE_CHECKING: - IntoType = t.Union[ - str, - t.Type[Expression], - t.Collection[t.Union[str, t.Type[Expression]]], - ] +IntoType = t.Union[ + str, + t.Type[Expression], + t.Collection[t.Union[str, t.Type[Expression]]], +] class Condition(Expression): @@ -939,7 +939,7 @@ class EncodeColumnConstraint(ColumnConstraintKind): class GeneratedAsIdentityColumnConstraint(ColumnConstraintKind): # this: True -> ALWAYS, this: False -> BY DEFAULT - arg_types = {"this": True, "start": False, "increment": False} + arg_types = {"this": False, "start": False, "increment": False} class NotNullColumnConstraint(ColumnConstraintKind): @@ -2390,7 +2390,7 @@ class Star(Expression): arg_types = {"except": False, "replace": False} @property - def name(self): + def name(self) -> str: return "*" @property @@ -2413,6 +2413,10 @@ class Placeholder(Expression): class Null(Condition): arg_types: t.Dict[str, t.Any] = {} + @property + def name(self) -> str: + return "NULL" + class Boolean(Condition): pass @@ -2644,7 +2648,9 @@ class Div(Binary): class Dot(Binary): - pass + @property + def name(self) -> str: + return self.expression.name class DPipe(Binary): @@ -2961,7 +2967,7 @@ class Cast(Func): arg_types = {"this": True, "to": True} @property - def name(self): + def name(self) -> str: return self.this.name @property @@ -4027,17 +4033,39 @@ def paren(expression) -> Paren: SAFE_IDENTIFIER_RE = re.compile(r"^[_a-zA-Z][\w]*$") -def to_identifier(alias, quoted=None) -> t.Optional[Identifier]: - if alias is None: +@t.overload +def to_identifier(name: None, quoted: t.Optional[bool] = None) -> None: + ... + + +@t.overload +def to_identifier(name: str | Identifier, quoted: t.Optional[bool] = None) -> Identifier: + ... + + +def to_identifier(name, quoted=None): + """Builds an identifier. + + Args: + name: The name to turn into an identifier. + quoted: Whether or not force quote the identifier. + + Returns: + The identifier ast node. + """ + + if name is None: return None - if isinstance(alias, Identifier): - identifier = alias - elif isinstance(alias, str): - if quoted is None: - quoted = not re.match(SAFE_IDENTIFIER_RE, alias) - identifier = Identifier(this=alias, quoted=quoted) + + if isinstance(name, Identifier): + identifier = name + elif isinstance(name, str): + identifier = Identifier( + this=name, + quoted=not re.match(SAFE_IDENTIFIER_RE, name) if quoted is None else quoted, + ) else: - raise ValueError(f"Alias needs to be a string or an Identifier, got: {alias.__class__}") + raise ValueError(f"Name needs to be a string or an Identifier, got: {name.__class__}") return identifier @@ -4112,20 +4140,31 @@ def to_column(sql_path: str | Column, **kwargs) -> Column: return Column(this=column_name, table=table_name, **kwargs) -def alias_(expression, alias, table=False, dialect=None, quoted=None, **opts): - """ - Create an Alias expression. +def alias_( + expression: str | Expression, + alias: str | Identifier, + table: bool | t.Sequence[str | Identifier] = False, + quoted: t.Optional[bool] = None, + dialect: DialectType = None, + **opts, +): + """Create an Alias expression. + Example: >>> alias_('foo', 'bar').sql() 'foo AS bar' + >>> alias_('(select 1, 2)', 'bar', table=['a', 'b']).sql() + '(SELECT 1, 2) AS bar(a, b)' + Args: - expression (str | Expression): the SQL code strings to parse. + expression: the SQL code strings to parse. If an Expression instance is passed, this is used as-is. - alias (str | Identifier): the alias name to use. If the name has + alias: the alias name to use. If the name has special characters it is quoted. - table (bool): create a table alias, default false - dialect (str): the dialect used to parse the input expression. + table: Whether or not to create a table alias, can also be a list of columns. + quoted: whether or not to quote the alias + dialect: the dialect used to parse the input expression. **opts: other options to use to parse the input expressions. Returns: @@ -4135,8 +4174,14 @@ def alias_(expression, alias, table=False, dialect=None, quoted=None, **opts): alias = to_identifier(alias, quoted=quoted) if table: - expression.set("alias", TableAlias(this=alias)) - return expression + table_alias = TableAlias(this=alias) + exp.set("alias", table_alias) + + if not isinstance(table, bool): + for column in table: + table_alias.append("columns", to_identifier(column, quoted=quoted)) + + return exp # We don't set the "alias" arg for Window expressions, because that would add an IDENTIFIER node in # the AST, representing a "named_window" [1] construct (eg. bigquery). What we want is an ALIAS node diff --git a/sqlglot/generator.py b/sqlglot/generator.py index b95e9bc..0d72fe3 100644 --- a/sqlglot/generator.py +++ b/sqlglot/generator.py @@ -1,6 +1,7 @@ from __future__ import annotations import logging +import re import typing as t from sqlglot import exp @@ -11,6 +12,8 @@ from sqlglot.tokens import TokenType logger = logging.getLogger("sqlglot") +BACKSLASH_RE = re.compile(r"\\(?!b|f|n|r|t|0)") + class Generator: """ @@ -28,7 +31,8 @@ class Generator: identify (bool): if set to True all identifiers will be delimited by the corresponding character. normalize (bool): if set to True all identifiers will lower cased - escape (str): specifies an escape character. Default: '. + string_escape (str): specifies a string escape character. Default: '. + identifier_escape (str): specifies an identifier escape character. Default: ". pad (int): determines padding in a formatted string. Default: 2. indent (int): determines the size of indentation in a formatted string. Default: 4. unnest_column_only (bool): if true unnest table aliases are considered only as column aliases @@ -85,6 +89,9 @@ class Generator: # Wrap derived values in parens, usually standard but spark doesn't support it WRAP_DERIVED_VALUES = True + # Whether or not create function uses an AS before the def. + CREATE_FUNCTION_AS = True + TYPE_MAPPING = { exp.DataType.Type.NCHAR: "CHAR", exp.DataType.Type.NVARCHAR: "VARCHAR", @@ -154,7 +161,8 @@ class Generator: "identifier_end", "identify", "normalize", - "escape", + "string_escape", + "identifier_escape", "pad", "index_offset", "unnest_column_only", @@ -167,6 +175,7 @@ class Generator: "_indent", "_replace_backslash", "_escaped_quote_end", + "_escaped_identifier_end", "_leading_comma", "_max_text_width", "_comments", @@ -183,7 +192,8 @@ class Generator: identifier_end=None, identify=False, normalize=False, - escape=None, + string_escape=None, + identifier_escape=None, pad=2, indent=2, index_offset=0, @@ -208,7 +218,8 @@ class Generator: self.identifier_end = identifier_end or '"' self.identify = identify self.normalize = normalize - self.escape = escape or "'" + self.string_escape = string_escape or "'" + self.identifier_escape = identifier_escape or '"' self.pad = pad self.index_offset = index_offset self.unnest_column_only = unnest_column_only @@ -219,8 +230,9 @@ class Generator: self.max_unsupported = max_unsupported self.null_ordering = null_ordering self._indent = indent - self._replace_backslash = self.escape == "\\" - self._escaped_quote_end = self.escape + self.quote_end + self._replace_backslash = self.string_escape == "\\" + self._escaped_quote_end = self.string_escape + self.quote_end + self._escaped_identifier_end = self.identifier_escape + self.identifier_end self._leading_comma = leading_comma self._max_text_width = max_text_width self._comments = comments @@ -441,6 +453,9 @@ class Generator: def generatedasidentitycolumnconstraint_sql( self, expression: exp.GeneratedAsIdentityColumnConstraint ) -> str: + this = "" + if expression.this is not None: + this = " ALWAYS " if expression.this else " BY DEFAULT " start = expression.args.get("start") start = f"START WITH {start}" if start else "" increment = expression.args.get("increment") @@ -449,9 +464,7 @@ class Generator: if start or increment: sequence_opts = f"{start} {increment}" sequence_opts = f" ({sequence_opts.strip()})" - return ( - f"GENERATED {'ALWAYS' if expression.this else 'BY DEFAULT'} AS IDENTITY{sequence_opts}" - ) + return f"GENERATED{this}AS IDENTITY{sequence_opts}" def notnullcolumnconstraint_sql(self, expression: exp.NotNullColumnConstraint) -> str: return f"{'' if expression.args.get('allow_null') else 'NOT '}NULL" @@ -496,7 +509,12 @@ class Generator: properties_sql = self.sql(properties_exp, "properties") begin = " BEGIN" if expression.args.get("begin") else "" expression_sql = self.sql(expression, "expression") - expression_sql = f" AS{begin}{self.sep()}{expression_sql}" if expression_sql else "" + if expression_sql: + expression_sql = f"{begin}{self.sep()}{expression_sql}" + + if self.CREATE_FUNCTION_AS or kind != "FUNCTION": + expression_sql = f" AS{expression_sql}" + temporary = " TEMPORARY" if expression.args.get("temporary") else "" transient = ( " TRANSIENT" if self.CREATE_TRANSIENT and expression.args.get("transient") else "" @@ -701,6 +719,7 @@ class Generator: def identifier_sql(self, expression: exp.Identifier) -> str: text = expression.name text = text.lower() if self.normalize else text + text = text.replace(self.identifier_end, self._escaped_identifier_end) if expression.args.get("quoted") or self.identify: text = f"{self.identifier_start}{text}{self.identifier_end}" return text @@ -1121,7 +1140,7 @@ class Generator: text = expression.this or "" if expression.is_string: if self._replace_backslash: - text = text.replace("\\", "\\\\") + text = BACKSLASH_RE.sub(r"\\\\", text) text = text.replace(self.quote_end, self._escaped_quote_end) if self.pretty: text = text.replace("\n", self.SENTINEL_LINE_BREAK) @@ -1486,9 +1505,16 @@ class Generator: return f"(SELECT {self.sql(unnest)})" def interval_sql(self, expression: exp.Interval) -> str: - this = self.sql(expression, "this") - this = f" {this}" if this else "" - unit = self.sql(expression, "unit") + this = expression.args.get("this") + if this: + this = ( + f" {this}" + if isinstance(this, exp.Literal) or isinstance(this, exp.Paren) + else f" ({this})" + ) + else: + this = "" + unit = expression.args.get("unit") unit = f" {unit}" if unit else "" return f"INTERVAL{this}{unit}" diff --git a/sqlglot/lineage.py b/sqlglot/lineage.py index a39ad8c..908f126 100644 --- a/sqlglot/lineage.py +++ b/sqlglot/lineage.py @@ -6,6 +6,7 @@ from dataclasses import dataclass, field from sqlglot import Schema, exp, maybe_parse from sqlglot.optimizer import Scope, build_scope, optimize +from sqlglot.optimizer.expand_laterals import expand_laterals from sqlglot.optimizer.qualify_columns import qualify_columns from sqlglot.optimizer.qualify_tables import qualify_tables @@ -38,7 +39,7 @@ def lineage( sql: str | exp.Expression, schema: t.Optional[t.Dict | Schema] = None, sources: t.Optional[t.Dict[str, str | exp.Subqueryable]] = None, - rules: t.Sequence[t.Callable] = (qualify_tables, qualify_columns), + rules: t.Sequence[t.Callable] = (qualify_tables, qualify_columns, expand_laterals), dialect: DialectType = None, ) -> Node: """Build the lineage graph for a column of a SQL query. diff --git a/sqlglot/optimizer/annotate_types.py b/sqlglot/optimizer/annotate_types.py index bfb2bb8..66f97a9 100644 --- a/sqlglot/optimizer/annotate_types.py +++ b/sqlglot/optimizer/annotate_types.py @@ -255,12 +255,23 @@ class TypeAnnotator: for name, source in scope.sources.items(): if not isinstance(source, Scope): continue - if isinstance(source.expression, exp.Values): + if isinstance(source.expression, exp.UDTF): + values = [] + + if isinstance(source.expression, exp.Lateral): + if isinstance(source.expression.this, exp.Explode): + values = [source.expression.this.this] + else: + values = source.expression.expressions[0].expressions + + if not values: + continue + selects[name] = { alias: column for alias, column in zip( source.expression.alias_column_names, - source.expression.expressions[0].expressions, + values, ) } else: @@ -272,7 +283,7 @@ class TypeAnnotator: source = scope.sources.get(col.table) if isinstance(source, exp.Table): col.type = self.schema.get_column_type(source, col) - elif source: + elif source and col.table in selects: col.type = selects[col.table][col.name].type # Then (possibly) annotate the remaining expressions in the scope self._maybe_annotate(scope.expression) diff --git a/sqlglot/optimizer/expand_laterals.py b/sqlglot/optimizer/expand_laterals.py new file mode 100644 index 0000000..59f3fec --- /dev/null +++ b/sqlglot/optimizer/expand_laterals.py @@ -0,0 +1,34 @@ +from __future__ import annotations + +import typing as t + +from sqlglot import exp + + +def expand_laterals(expression: exp.Expression) -> exp.Expression: + """ + Expand lateral column alias references. + + This assumes `qualify_columns` as already run. + + Example: + >>> import sqlglot + >>> sql = "SELECT x.a + 1 AS b, b + 1 AS c FROM x" + >>> expression = sqlglot.parse_one(sql) + >>> expand_laterals(expression).sql() + 'SELECT x.a + 1 AS b, x.a + 1 + 1 AS c FROM x' + + Args: + expression: expression to optimize + Returns: + optimized expression + """ + for select in expression.find_all(exp.Select): + alias_to_expression: t.Dict[str, exp.Expression] = {} + for projection in select.expressions: + for column in projection.find_all(exp.Column): + if not column.table and column.name in alias_to_expression: + column.replace(alias_to_expression[column.name].copy()) + if isinstance(projection, exp.Alias): + alias_to_expression[projection.alias] = projection.this + return expression diff --git a/sqlglot/optimizer/optimizer.py b/sqlglot/optimizer/optimizer.py index 766e059..96fd56b 100644 --- a/sqlglot/optimizer/optimizer.py +++ b/sqlglot/optimizer/optimizer.py @@ -4,6 +4,7 @@ from sqlglot.optimizer.canonicalize import canonicalize from sqlglot.optimizer.eliminate_ctes import eliminate_ctes from sqlglot.optimizer.eliminate_joins import eliminate_joins from sqlglot.optimizer.eliminate_subqueries import eliminate_subqueries +from sqlglot.optimizer.expand_laterals import expand_laterals from sqlglot.optimizer.expand_multi_table_selects import expand_multi_table_selects from sqlglot.optimizer.isolate_table_selects import isolate_table_selects from sqlglot.optimizer.lower_identities import lower_identities @@ -12,7 +13,7 @@ from sqlglot.optimizer.normalize import normalize from sqlglot.optimizer.optimize_joins import optimize_joins from sqlglot.optimizer.pushdown_predicates import pushdown_predicates from sqlglot.optimizer.pushdown_projections import pushdown_projections -from sqlglot.optimizer.qualify_columns import qualify_columns +from sqlglot.optimizer.qualify_columns import qualify_columns, validate_qualify_columns from sqlglot.optimizer.qualify_tables import qualify_tables from sqlglot.optimizer.unnest_subqueries import unnest_subqueries from sqlglot.schema import ensure_schema @@ -22,6 +23,8 @@ RULES = ( qualify_tables, isolate_table_selects, qualify_columns, + expand_laterals, + validate_qualify_columns, pushdown_projections, normalize, unnest_subqueries, diff --git a/sqlglot/optimizer/pushdown_projections.py b/sqlglot/optimizer/pushdown_projections.py index a73647c..54c5021 100644 --- a/sqlglot/optimizer/pushdown_projections.py +++ b/sqlglot/optimizer/pushdown_projections.py @@ -7,7 +7,7 @@ from sqlglot.optimizer.scope import Scope, traverse_scope SELECT_ALL = object() # Selection to use if selection list is empty -DEFAULT_SELECTION = alias("1", "_") +DEFAULT_SELECTION = lambda: alias("1", "_") def pushdown_projections(expression): @@ -93,7 +93,7 @@ def _remove_unused_selections(scope, parent_selections): # If there are no remaining selections, just select a single constant if not new_selections: - new_selections.append(DEFAULT_SELECTION.copy()) + new_selections.append(DEFAULT_SELECTION()) scope.expression.set("expressions", new_selections) if removed: @@ -106,5 +106,5 @@ def _remove_indexed_selections(scope, indexes_to_remove): selection for i, selection in enumerate(scope.selects) if i not in indexes_to_remove ] if not new_selections: - new_selections.append(DEFAULT_SELECTION.copy()) + new_selections.append(DEFAULT_SELECTION()) scope.expression.set("expressions", new_selections) diff --git a/sqlglot/optimizer/qualify_columns.py b/sqlglot/optimizer/qualify_columns.py index 54425a8..ab13d01 100644 --- a/sqlglot/optimizer/qualify_columns.py +++ b/sqlglot/optimizer/qualify_columns.py @@ -37,11 +37,24 @@ def qualify_columns(expression, schema): if not isinstance(scope.expression, exp.UDTF): _expand_stars(scope, resolver) _qualify_outputs(scope) - _check_unknown_tables(scope) return expression +def validate_qualify_columns(expression): + """Raise an `OptimizeError` if any columns aren't qualified""" + unqualified_columns = [] + for scope in traverse_scope(expression): + if isinstance(scope.expression, exp.Select): + unqualified_columns.extend(scope.unqualified_columns) + if scope.external_columns and not scope.is_correlated_subquery: + raise OptimizeError(f"Unknown table: {scope.external_columns[0].table}") + + if unqualified_columns: + raise OptimizeError(f"Ambiguous columns: {unqualified_columns}") + return expression + + def _pop_table_column_aliases(derived_tables): """ Remove table column aliases. @@ -199,10 +212,6 @@ def _qualify_columns(scope, resolver): if not column_table: column_table = resolver.get_table(column_name) - if not scope.is_subquery and not scope.is_udtf: - if column_table is None: - raise OptimizeError(f"Ambiguous column: {column_name}") - # column_table can be a '' because bigquery unnest has no table alias if column_table: column.set("table", exp.to_identifier(column_table)) @@ -231,10 +240,8 @@ def _qualify_columns(scope, resolver): for column in columns_missing_from_scope: column_table = resolver.get_table(column.name) - if column_table is None: - raise OptimizeError(f"Ambiguous column: {column.name}") - - column.set("table", exp.to_identifier(column_table)) + if column_table: + column.set("table", exp.to_identifier(column_table)) def _expand_stars(scope, resolver): @@ -322,11 +329,6 @@ def _qualify_outputs(scope): scope.expression.set("expressions", new_selections) -def _check_unknown_tables(scope): - if scope.external_columns and not scope.is_udtf and not scope.is_correlated_subquery: - raise OptimizeError(f"Unknown table: {scope.external_columns[0].text('table')}") - - class _Resolver: """ Helper for resolving columns. diff --git a/sqlglot/optimizer/qualify_tables.py b/sqlglot/optimizer/qualify_tables.py index 5d8e0d9..65593bd 100644 --- a/sqlglot/optimizer/qualify_tables.py +++ b/sqlglot/optimizer/qualify_tables.py @@ -2,7 +2,7 @@ import itertools from sqlglot import alias, exp from sqlglot.helper import csv_reader -from sqlglot.optimizer.scope import traverse_scope +from sqlglot.optimizer.scope import Scope, traverse_scope def qualify_tables(expression, db=None, catalog=None, schema=None): @@ -25,6 +25,8 @@ def qualify_tables(expression, db=None, catalog=None, schema=None): """ sequence = itertools.count() + next_name = lambda: f"_q_{next(sequence)}" + for scope in traverse_scope(expression): for derived_table in scope.ctes + scope.derived_tables: if not derived_table.args.get("alias"): @@ -46,7 +48,7 @@ def qualify_tables(expression, db=None, catalog=None, schema=None): source = source.replace( alias( source.copy(), - source.this if identifier else f"_q_{next(sequence)}", + source.this if identifier else next_name(), table=True, ) ) @@ -58,5 +60,12 @@ def qualify_tables(expression, db=None, catalog=None, schema=None): schema.add_table( source, {k: type(v).__name__ for k, v in zip(header, columns)} ) + elif isinstance(source, Scope) and source.is_udtf: + udtf = source.expression + table_alias = udtf.args.get("alias") or exp.TableAlias(this=next_name()) + udtf.set("alias", table_alias) + + if not table_alias.name: + table_alias.set("this", next_name()) return expression diff --git a/sqlglot/optimizer/scope.py b/sqlglot/optimizer/scope.py index badbb87..8565c64 100644 --- a/sqlglot/optimizer/scope.py +++ b/sqlglot/optimizer/scope.py @@ -237,6 +237,8 @@ class Scope: ancestor = column.find_ancestor(exp.Qualify, exp.Order, exp.Having, exp.Hint) if ( not ancestor + # Window functions can have an ORDER BY clause + or not isinstance(ancestor.parent, exp.Select) or column.table or (column.name not in named_selects and not isinstance(ancestor, exp.Hint)) ): @@ -479,7 +481,7 @@ def _traverse_scope(scope): elif isinstance(scope.expression, exp.Union): yield from _traverse_union(scope) elif isinstance(scope.expression, exp.UDTF): - pass + _set_udtf_scope(scope) elif isinstance(scope.expression, exp.Subquery): yield from _traverse_subqueries(scope) else: @@ -509,6 +511,22 @@ def _traverse_union(scope): scope.union_scopes = [left, right] +def _set_udtf_scope(scope): + parent = scope.expression.parent + from_ = parent.args.get("from") + + if not from_: + return + + for table in from_.expressions: + if isinstance(table, exp.Table): + scope.tables.append(table) + elif isinstance(table, exp.Subquery): + scope.subqueries.append(table) + _add_table_sources(scope) + _traverse_subqueries(scope) + + def _traverse_derived_tables(derived_tables, scope, scope_type): sources = {} is_cte = scope_type == ScopeType.CTE diff --git a/sqlglot/parser.py b/sqlglot/parser.py index e2b2c54..579c2ce 100644 --- a/sqlglot/parser.py +++ b/sqlglot/parser.py @@ -194,6 +194,7 @@ class Parser(metaclass=_Parser): TokenType.INTERVAL, TokenType.LAZY, TokenType.LEADING, + TokenType.LEFT, TokenType.LOCAL, TokenType.MATERIALIZED, TokenType.MERGE, @@ -208,6 +209,7 @@ class Parser(metaclass=_Parser): TokenType.PRECEDING, TokenType.RANGE, TokenType.REFERENCES, + TokenType.RIGHT, TokenType.ROW, TokenType.ROWS, TokenType.SCHEMA, @@ -237,8 +239,10 @@ class Parser(metaclass=_Parser): TABLE_ALIAS_TOKENS = ID_VAR_TOKENS - { TokenType.APPLY, + TokenType.LEFT, TokenType.NATURAL, TokenType.OFFSET, + TokenType.RIGHT, TokenType.WINDOW, } @@ -258,6 +262,8 @@ class Parser(metaclass=_Parser): TokenType.IDENTIFIER, TokenType.INDEX, TokenType.ISNULL, + TokenType.ILIKE, + TokenType.LIKE, TokenType.MERGE, TokenType.OFFSET, TokenType.PRIMARY_KEY, @@ -971,13 +977,14 @@ class Parser(metaclass=_Parser): if create_token.token_type in (TokenType.FUNCTION, TokenType.PROCEDURE): this = self._parse_user_defined_function(kind=create_token.token_type) properties = self._parse_properties() - if self._match(TokenType.ALIAS): - begin = self._match(TokenType.BEGIN) - return_ = self._match_text_seq("RETURN") - expression = self._parse_statement() - if return_: - expression = self.expression(exp.Return, this=expression) + self._match(TokenType.ALIAS) + begin = self._match(TokenType.BEGIN) + return_ = self._match_text_seq("RETURN") + expression = self._parse_statement() + + if return_: + expression = self.expression(exp.Return, this=expression) elif create_token.token_type == TokenType.INDEX: this = self._parse_index() elif create_token.token_type in ( @@ -2163,7 +2170,9 @@ class Parser(metaclass=_Parser): ) -> t.Optional[exp.Expression]: if self._match(TokenType.TOP if top else TokenType.LIMIT): limit_paren = self._match(TokenType.L_PAREN) - limit_exp = self.expression(exp.Limit, this=this, expression=self._parse_number()) + limit_exp = self.expression( + exp.Limit, this=this, expression=self._parse_number() if top else self._parse_term() + ) if limit_paren: self._match_r_paren() @@ -2740,8 +2749,23 @@ class Parser(metaclass=_Parser): kind: exp.Expression - if self._match(TokenType.AUTO_INCREMENT): - kind = exp.AutoIncrementColumnConstraint() + if self._match_set((TokenType.AUTO_INCREMENT, TokenType.IDENTITY)): + start = None + increment = None + + if self._match(TokenType.L_PAREN, advance=False): + args = self._parse_wrapped_csv(self._parse_bitwise) + start = seq_get(args, 0) + increment = seq_get(args, 1) + elif self._match_text_seq("START"): + start = self._parse_bitwise() + self._match_text_seq("INCREMENT") + increment = self._parse_bitwise() + + if start and increment: + kind = exp.GeneratedAsIdentityColumnConstraint(start=start, increment=increment) + else: + kind = exp.AutoIncrementColumnConstraint() elif self._match(TokenType.CHECK): constraint = self._parse_wrapped(self._parse_conjunction) kind = self.expression(exp.CheckColumnConstraint, this=constraint) @@ -3294,8 +3318,8 @@ class Parser(metaclass=_Parser): if not self._match(TokenType.EXCEPT): return None if self._match(TokenType.L_PAREN, advance=False): - return self._parse_wrapped_id_vars() - return self._parse_csv(self._parse_id_var) + return self._parse_wrapped_csv(self._parse_column) + return self._parse_csv(self._parse_column) def _parse_replace(self) -> t.Optional[t.List[t.Optional[exp.Expression]]]: if not self._match(TokenType.REPLACE): @@ -3442,7 +3466,7 @@ class Parser(metaclass=_Parser): def _parse_alter(self) -> t.Optional[exp.Expression]: if not self._match(TokenType.TABLE): - return None + return self._parse_as_command(self._prev) exists = self._parse_exists() this = self._parse_table(schema=True) diff --git a/sqlglot/tokens.py b/sqlglot/tokens.py index e95057a..8cf17a7 100644 --- a/sqlglot/tokens.py +++ b/sqlglot/tokens.py @@ -357,7 +357,8 @@ class _Tokenizer(type): klass._HEX_STRINGS = cls._delimeter_list_to_dict(klass.HEX_STRINGS) klass._BYTE_STRINGS = cls._delimeter_list_to_dict(klass.BYTE_STRINGS) klass._IDENTIFIERS = cls._delimeter_list_to_dict(klass.IDENTIFIERS) - klass._ESCAPES = set(klass.ESCAPES) + klass._STRING_ESCAPES = set(klass.STRING_ESCAPES) + klass._IDENTIFIER_ESCAPES = set(klass.IDENTIFIER_ESCAPES) klass._COMMENTS = dict( (comment, None) if isinstance(comment, str) else (comment[0], comment[1]) for comment in klass.COMMENTS @@ -429,9 +430,13 @@ class Tokenizer(metaclass=_Tokenizer): IDENTIFIERS: t.List[str | t.Tuple[str, str]] = ['"'] - ESCAPES = ["'"] + STRING_ESCAPES = ["'"] - _ESCAPES: t.Set[str] = set() + _STRING_ESCAPES: t.Set[str] = set() + + IDENTIFIER_ESCAPES = ['"'] + + _IDENTIFIER_ESCAPES: t.Set[str] = set() KEYWORDS = { **{ @@ -469,6 +474,7 @@ class Tokenizer(metaclass=_Tokenizer): "ASC": TokenType.ASC, "AS": TokenType.ALIAS, "AT TIME ZONE": TokenType.AT_TIME_ZONE, + "AUTOINCREMENT": TokenType.AUTO_INCREMENT, "AUTO_INCREMENT": TokenType.AUTO_INCREMENT, "BEGIN": TokenType.BEGIN, "BETWEEN": TokenType.BETWEEN, @@ -691,6 +697,7 @@ class Tokenizer(metaclass=_Tokenizer): "ALTER VIEW": TokenType.COMMAND, "ANALYZE": TokenType.COMMAND, "CALL": TokenType.COMMAND, + "COPY": TokenType.COMMAND, "EXPLAIN": TokenType.COMMAND, "OPTIMIZE": TokenType.COMMAND, "PREPARE": TokenType.COMMAND, @@ -744,7 +751,7 @@ class Tokenizer(metaclass=_Tokenizer): ) def __init__(self) -> None: - self._replace_backslash = "\\" in self._ESCAPES + self._replace_backslash = "\\" in self._STRING_ESCAPES self.reset() def reset(self) -> None: @@ -1046,12 +1053,25 @@ class Tokenizer(metaclass=_Tokenizer): return True def _scan_identifier(self, identifier_end: str) -> None: - while self._peek != identifier_end: + text = "" + identifier_end_is_escape = identifier_end in self._IDENTIFIER_ESCAPES + + while True: if self._end: raise RuntimeError(f"Missing {identifier_end} from {self._line}:{self._start}") + self._advance() - self._advance() - self._add(TokenType.IDENTIFIER, self._text[1:-1]) + if self._char == identifier_end: + if identifier_end_is_escape and self._peek == identifier_end: + text += identifier_end # type: ignore + self._advance() + continue + + break + + text += self._char # type: ignore + + self._add(TokenType.IDENTIFIER, text) def _scan_var(self) -> None: while True: @@ -1072,9 +1092,9 @@ class Tokenizer(metaclass=_Tokenizer): while True: if ( - self._char in self._ESCAPES + self._char in self._STRING_ESCAPES and self._peek - and (self._peek == delimiter or self._peek in self._ESCAPES) + and (self._peek == delimiter or self._peek in self._STRING_ESCAPES) ): text += self._peek self._advance(2) diff --git a/tests/dialects/test_databricks.py b/tests/dialects/test_databricks.py index 1d80dc0..5ae5c6f 100644 --- a/tests/dialects/test_databricks.py +++ b/tests/dialects/test_databricks.py @@ -4,6 +4,9 @@ from tests.dialects.test_dialect import Validator class TestDatabricks(Validator): dialect = "databricks" + def test_databricks(self): + self.validate_identity("CREATE FUNCTION a.b(x INT) RETURNS INT RETURN x + 1") + def test_datediff(self): self.validate_all( "SELECT DATEDIFF(year, 'start', 'end')", diff --git a/tests/dialects/test_dialect.py b/tests/dialects/test_dialect.py index a456415..442fbbb 100644 --- a/tests/dialects/test_dialect.py +++ b/tests/dialects/test_dialect.py @@ -508,7 +508,7 @@ class TestDialect(Validator): }, write={ "bigquery": "DATE_ADD(x, INTERVAL 1 'day')", - "drill": "DATE_ADD(x, INTERVAL '1' DAY)", + "drill": "DATE_ADD(x, INTERVAL 1 DAY)", "duckdb": "x + INTERVAL 1 day", "hive": "DATE_ADD(x, 1)", "mysql": "DATE_ADD(x, INTERVAL 1 DAY)", @@ -525,7 +525,7 @@ class TestDialect(Validator): "DATE_ADD(x, 1)", write={ "bigquery": "DATE_ADD(x, INTERVAL 1 'day')", - "drill": "DATE_ADD(x, INTERVAL '1' DAY)", + "drill": "DATE_ADD(x, INTERVAL 1 DAY)", "duckdb": "x + INTERVAL 1 DAY", "hive": "DATE_ADD(x, 1)", "mysql": "DATE_ADD(x, INTERVAL 1 DAY)", @@ -628,7 +628,7 @@ class TestDialect(Validator): self.validate_all( "TS_OR_DS_ADD('2021-02-01', 1, 'DAY')", write={ - "drill": "DATE_ADD(CAST('2021-02-01' AS DATE), INTERVAL '1' DAY)", + "drill": "DATE_ADD(CAST('2021-02-01' AS DATE), INTERVAL 1 DAY)", "duckdb": "CAST('2021-02-01' AS DATE) + INTERVAL 1 DAY", "hive": "DATE_ADD('2021-02-01', 1)", "presto": "DATE_ADD('DAY', 1, DATE_PARSE(SUBSTR('2021-02-01', 1, 10), '%Y-%m-%d'))", @@ -638,7 +638,7 @@ class TestDialect(Validator): self.validate_all( "DATE_ADD(CAST('2020-01-01' AS DATE), 1)", write={ - "drill": "DATE_ADD(CAST('2020-01-01' AS DATE), INTERVAL '1' DAY)", + "drill": "DATE_ADD(CAST('2020-01-01' AS DATE), INTERVAL 1 DAY)", "duckdb": "CAST('2020-01-01' AS DATE) + INTERVAL 1 DAY", "hive": "DATE_ADD(CAST('2020-01-01' AS DATE), 1)", "presto": "DATE_ADD('day', 1, CAST('2020-01-01' AS DATE))", diff --git a/tests/dialects/test_duckdb.py b/tests/dialects/test_duckdb.py index f01a604..e5cb833 100644 --- a/tests/dialects/test_duckdb.py +++ b/tests/dialects/test_duckdb.py @@ -343,6 +343,10 @@ class TestDuckDB(Validator): }, ) + self.validate_all( + "CAST(x AS DATE) + INTERVAL (7 * -1) DAY", read={"spark": "DATE_SUB(x, 7)"} + ) + def test_bool_or(self): self.validate_all( "SELECT a, LOGICAL_OR(b) FROM table GROUP BY a", diff --git a/tests/dialects/test_hive.py b/tests/dialects/test_hive.py index 1f35d1d..42d9943 100644 --- a/tests/dialects/test_hive.py +++ b/tests/dialects/test_hive.py @@ -287,7 +287,7 @@ class TestHive(Validator): self.validate_all( "DATE_SUB('2020-01-01', 1)", write={ - "duckdb": "CAST('2020-01-01' AS DATE) + INTERVAL 1 * -1 DAY", + "duckdb": "CAST('2020-01-01' AS DATE) + INTERVAL (1 * -1) DAY", "presto": "DATE_ADD('DAY', 1 * -1, DATE_PARSE(SUBSTR('2020-01-01', 1, 10), '%Y-%m-%d'))", "hive": "DATE_ADD('2020-01-01', 1 * -1)", "spark": "DATE_ADD('2020-01-01', 1 * -1)", diff --git a/tests/dialects/test_snowflake.py b/tests/dialects/test_snowflake.py index f3e8e24..201cc4e 100644 --- a/tests/dialects/test_snowflake.py +++ b/tests/dialects/test_snowflake.py @@ -6,6 +6,43 @@ class TestSnowflake(Validator): dialect = "snowflake" def test_snowflake(self): + self.validate_identity("SELECT REGEXP_LIKE(a, b, c)") + self.validate_identity("PUT file:///dir/tmp.csv @%table") + self.validate_identity("CREATE TABLE foo (bar FLOAT AUTOINCREMENT START 0 INCREMENT 1)") + self.validate_identity( + 'COPY INTO NEW_TABLE ("foo", "bar") FROM (SELECT $1, $2, $3, $4 FROM @%old_table)' + ) + + self.validate_all( + "CREATE OR REPLACE TEMPORARY TABLE x (y NUMBER IDENTITY(0, 1))", + write={ + "snowflake": "CREATE OR REPLACE TEMPORARY TABLE x (y DECIMAL AUTOINCREMENT START 0 INCREMENT 1)", + }, + ) + self.validate_all( + "CREATE TEMPORARY TABLE x (y NUMBER AUTOINCREMENT(0, 1))", + write={ + "snowflake": "CREATE TEMPORARY TABLE x (y DECIMAL AUTOINCREMENT START 0 INCREMENT 1)", + }, + ) + self.validate_all( + "CREATE TABLE x (y NUMBER IDENTITY START 0 INCREMENT 1)", + write={ + "snowflake": "CREATE TABLE x (y DECIMAL AUTOINCREMENT START 0 INCREMENT 1)", + }, + ) + self.validate_all( + "ALTER TABLE foo ADD COLUMN id INT identity(1, 1)", + write={ + "snowflake": "ALTER TABLE foo ADD COLUMN id INT AUTOINCREMENT START 1 INCREMENT 1", + }, + ) + self.validate_all( + "SELECT DAYOFWEEK('2016-01-02T23:39:20.123-07:00'::TIMESTAMP)", + write={ + "snowflake": "SELECT DAYOFWEEK(CAST('2016-01-02T23:39:20.123-07:00' AS TIMESTAMPNTZ))", + }, + ) self.validate_all( "SELECT * FROM xxx WHERE col ilike '%Don''t%'", write={ @@ -165,10 +202,10 @@ class TestSnowflake(Validator): self.validate_all( r"SELECT $$a ' \ \t \x21 z $ $$", write={ - "snowflake": r"SELECT 'a \' \\ \\t \\x21 z $ '", + "snowflake": r"SELECT 'a \' \\ \t \\x21 z $ '", }, ) - self.validate_identity("SELECT REGEXP_LIKE(a, b, c)") + self.validate_identity(r"REGEXP_REPLACE('target', 'pattern', '\n')") self.validate_all( "SELECT RLIKE(a, b)", write={ @@ -253,6 +290,8 @@ class TestSnowflake(Validator): ) def test_timestamps(self): + self.validate_identity("SELECT EXTRACT(month FROM a)") + self.validate_all( "SELECT CAST(a AS TIMESTAMP)", write={ @@ -277,7 +316,6 @@ class TestSnowflake(Validator): "snowflake": "SELECT CAST(a AS TIMESTAMPLTZ)", }, ) - self.validate_identity("SELECT EXTRACT(month FROM a)") self.validate_all( "SELECT EXTRACT('month', a)", write={ @@ -313,6 +351,8 @@ class TestSnowflake(Validator): def test_semi_structured_types(self): self.validate_identity("SELECT CAST(a AS VARIANT)") + self.validate_identity("SELECT CAST(a AS ARRAY)") + self.validate_all( "SELECT a::VARIANT", write={ @@ -320,7 +360,6 @@ class TestSnowflake(Validator): "tsql": "SELECT CAST(a AS SQL_VARIANT)", }, ) - self.validate_identity("SELECT CAST(a AS ARRAY)") self.validate_all( "ARRAY_CONSTRUCT(0, 1, 2)", write={ @@ -343,6 +382,7 @@ class TestSnowflake(Validator): "CREATE TABLE a (x DATE, y BIGINT) WITH (PARTITION BY (x), integration='q', auto_refresh=TRUE, file_format=(type = parquet))" ) self.validate_identity("CREATE MATERIALIZED VIEW a COMMENT='...' AS SELECT 1 FROM x") + self.validate_all( "CREATE OR REPLACE TRANSIENT TABLE a (id INT)", read={ diff --git a/tests/fixtures/identity.sql b/tests/fixtures/identity.sql index 5a4871d..b3f546b 100644 --- a/tests/fixtures/identity.sql +++ b/tests/fixtures/identity.sql @@ -17,6 +17,7 @@ SUM(CASE WHEN x > 1 THEN 1 ELSE 0 END) / y '\x' "x" "" +"""x""" N'abc' x x % 1 @@ -101,6 +102,8 @@ SPLIT(SPLIT(referrer, 'utm_source=')[OFFSET(1)], "&")[OFFSET(0)] x[ORDINAL(1)][SAFE_OFFSET(2)] x GLOB '??-*' x GLOB y +LIKE(x, 'z') +ILIKE(x, 'z') x LIKE SUBSTR('abc', 1, 1) x LIKE y x LIKE a.y @@ -113,7 +116,7 @@ INTERVAL '1' day INTERVAL '1' MONTH INTERVAL '1 day' INTERVAL 2 months -INTERVAL 1 + 3 DAYS +INTERVAL (1 + 3) DAYS CAST('45' AS INTERVAL DAYS) TIMESTAMP_DIFF(CURRENT_TIMESTAMP(), 1, DAY) DATETIME_DIFF(CURRENT_DATE, 1, DAY) @@ -221,6 +224,7 @@ SELECT JSON_EXTRACT(x, '$.name') SELECT JSON_EXTRACT_SCALAR(x, '$.name') SELECT x LIKE '%x%' FROM test SELECT * FROM test LIMIT 100 +SELECT * FROM test LIMIT 1 + 1 SELECT * FROM test LIMIT 100 OFFSET 200 SELECT * FROM test FETCH FIRST ROWS ONLY SELECT * FROM test FETCH FIRST 1 ROWS ONLY @@ -393,6 +397,7 @@ SELECT * EXCEPT (a, b) REPLACE (a AS b, b AS C) SELECT * EXCEPT (a, b) REPLACE (a AS b, b AS C) FROM y SELECT a.* EXCEPT (a, b), b.* REPLACE (a AS b, b AS C) SELECT a.* EXCEPT (a, b), b.* REPLACE (a AS b, b AS C) FROM x +SELECT A.* EXCEPT (A.COL_1) FROM TABLE_1 AS A SELECT zoo, animals FROM (VALUES ('oakland', ARRAY('a', 'b')), ('sf', ARRAY('b', 'c'))) AS t(zoo, animals) SELECT zoo, animals FROM UNNEST(ARRAY(STRUCT('oakland' AS zoo, ARRAY('a', 'b') AS animals), STRUCT('sf' AS zoo, ARRAY('b', 'c') AS animals))) AS t(zoo, animals) WITH a AS (SELECT 1) SELECT 1 UNION ALL SELECT 2 @@ -558,6 +563,7 @@ CREATE FUNCTION f AS 'g' CREATE FUNCTION a(b INT, c VARCHAR) AS 'SELECT 1' CREATE FUNCTION a() LANGUAGE sql CREATE FUNCTION a() LANGUAGE sql RETURNS INT +CREATE FUNCTION a.b(x INT) RETURNS INT AS RETURN x + 1 CREATE FUNCTION a.b.c() CREATE INDEX abc ON t (a) CREATE INDEX abc ON t (a, b, b) @@ -585,6 +591,7 @@ INSERT OVERWRITE TABLE a.b PARTITION(ds) SELECT x FROM y INSERT OVERWRITE TABLE a.b PARTITION(ds = 'YYYY-MM-DD') SELECT x FROM y INSERT OVERWRITE TABLE a.b PARTITION(ds, hour) SELECT x FROM y INSERT OVERWRITE TABLE a.b PARTITION(ds = 'YYYY-MM-DD', hour = 'hh') SELECT x FROM y +ALTER SESSION SET STATEMENT_TIMEOUT_IN_SECONDS=3 ALTER AGGREGATE bla(foo) OWNER TO CURRENT_USER ALTER RULE foo ON bla RENAME TO baz ALTER ROLE CURRENT_USER WITH REPLICATION @@ -721,3 +728,6 @@ ALTER TABLE a ADD PRIMARY KEY (x, y) NOT ENFORCED ALTER TABLE a ADD FOREIGN KEY (x, y) REFERENCES bla SELECT end FROM a SELECT id FROM b.a AS a QUALIFY ROW_NUMBER() OVER (PARTITION BY br ORDER BY sadf DESC) = 1 +SELECT LEFT.FOO FROM BLA AS LEFT +SELECT RIGHT.FOO FROM BLA AS RIGHT +SELECT LEFT FROM LEFT LEFT JOIN RIGHT RIGHT JOIN LEFT diff --git a/tests/fixtures/optimizer/expand_laterals.sql b/tests/fixtures/optimizer/expand_laterals.sql new file mode 100644 index 0000000..09bbd0f --- /dev/null +++ b/tests/fixtures/optimizer/expand_laterals.sql @@ -0,0 +1,40 @@ +# title: expand alias reference +SELECT + x.a + 1 AS i, + i + 1 AS j, + j + 1 AS k +FROM x; +SELECT + x.a + 1 AS i, + x.a + 1 + 1 AS j, + x.a + 1 + 1 + 1 AS k +FROM x; + +# title: noop - reference comes before alias +SELECT + b + 1 AS j, + x.a + 1 AS i +FROM x; +SELECT + b + 1 AS j, + x.a + 1 AS i +FROM x; + + +# title: subquery +SELECT + * +FROM ( + SELECT + x.a + 1 AS i, + i + 1 AS j + FROM x +); +SELECT + * +FROM ( + SELECT + x.a + 1 AS i, + x.a + 1 + 1 AS j + FROM x +); diff --git a/tests/fixtures/optimizer/optimizer.sql b/tests/fixtures/optimizer/optimizer.sql index 664b3c7..9c14ec1 100644 --- a/tests/fixtures/optimizer/optimizer.sql +++ b/tests/fixtures/optimizer/optimizer.sql @@ -1,14 +1,20 @@ # title: lateral # execute: false SELECT a, m FROM z LATERAL VIEW EXPLODE([1, 2]) q AS m; +WITH "z_2" AS ( + SELECT + "z"."a" AS "a" + FROM "z" AS "z" +) SELECT "z"."a" AS "a", "q"."m" AS "m" -FROM "z" AS "z" +FROM "z_2" AS "z" LATERAL VIEW EXPLODE(ARRAY(1, 2)) q AS "m"; # title: unnest +# execute: false SELECT x FROM UNNEST([1, 2]) AS q(x, y); SELECT "q"."x" AS "x" @@ -369,3 +375,18 @@ SELECT FROM "x" AS "x" RIGHT JOIN "y_2" AS "y" ON "x"."a" = "y"."b"; + + +# title: lateral column alias reference +SELECT x.a + 1 AS c, c + 1 AS d FROM x; +SELECT + "x"."a" + 1 AS "c", + "x"."a" + 2 AS "d" +FROM "x" AS "x"; + +# title: column reference takes priority over lateral column alias reference +SELECT x.a + 1 AS b, b + 1 AS c FROM x; +SELECT + "x"."a" + 1 AS "b", + "x"."b" + 1 AS "c" +FROM "x" AS "x"; diff --git a/tests/fixtures/optimizer/qualify_columns.sql b/tests/fixtures/optimizer/qualify_columns.sql index ee041e2..141f028 100644 --- a/tests/fixtures/optimizer/qualify_columns.sql +++ b/tests/fixtures/optimizer/qualify_columns.sql @@ -104,14 +104,6 @@ SELECT x.a AS a FROM x AS x ORDER BY x.b; SELECT SUM(a) AS a FROM x ORDER BY SUM(a); SELECT SUM(x.a) AS a FROM x AS x ORDER BY SUM(x.a); -# dialect: bigquery -SELECT ROW_NUMBER() OVER (PARTITION BY a ORDER BY b) AS row_num FROM x QUALIFY row_num = 1; -SELECT ROW_NUMBER() OVER (PARTITION BY x.a ORDER BY x.b) AS row_num FROM x AS x QUALIFY row_num = 1; - -# dialect: bigquery -SELECT x.b, x.a FROM x LEFT JOIN y ON x.b = y.b QUALIFY ROW_NUMBER() OVER(PARTITION BY x.b ORDER BY x.a DESC) = 1; -SELECT x.b AS b, x.a AS a FROM x AS x LEFT JOIN y AS y ON x.b = y.b QUALIFY ROW_NUMBER() OVER (PARTITION BY x.b ORDER BY x.a DESC) = 1; - # execute: false SELECT AGGREGATE(ARRAY(a, x.b), 0, (x, acc) -> x + acc + a) AS sum_agg FROM x; SELECT AGGREGATE(ARRAY(x.a, x.b), 0, (x, acc) -> x + acc + x.a) AS sum_agg FROM x AS x; @@ -199,15 +191,6 @@ SELECT x.a AS a FROM x AS x WHERE x.b IN (SELECT x.b AS b FROM y AS x); SELECT a FROM x AS i WHERE b IN (SELECT b FROM y AS j WHERE j.b IN (SELECT c FROM y AS k WHERE k.b = j.b)); SELECT i.a AS a FROM x AS i WHERE i.b IN (SELECT j.b AS b FROM y AS j WHERE j.b IN (SELECT k.c AS c FROM y AS k WHERE k.b = j.b)); -# execute: false -# dialect: bigquery -SELECT aa FROM x, UNNEST(a) AS aa; -SELECT aa AS aa FROM x AS x, UNNEST(x.a) AS aa; - -# execute: false -SELECT aa FROM x, UNNEST(a) AS t(aa); -SELECT t.aa AS aa FROM x AS x, UNNEST(x.a) AS t(aa); - -------------------------------------- -- Expand * -------------------------------------- @@ -302,3 +285,43 @@ SELECT COALESCE(x.b, y.b, z.b) AS b FROM x AS x JOIN y AS y ON x.b = y.b JOIN z # dialect: spark SELECT /*+ BROADCAST(y) */ x.b FROM x JOIN y ON x.b = y.b; SELECT /*+ BROADCAST(y) */ x.b AS b FROM x AS x JOIN y AS y ON x.b = y.b; + +-------------------------------------- +-- UDTF +-------------------------------------- +# execute: false +SELECT c FROM x LATERAL VIEW EXPLODE (a) AS c; +SELECT _q_0.c AS c FROM x AS x LATERAL VIEW EXPLODE(x.a) _q_0 AS c; + +# execute: false +SELECT c FROM xx LATERAL VIEW EXPLODE (a) AS c; +SELECT _q_0.c AS c FROM xx AS xx LATERAL VIEW EXPLODE(xx.a) _q_0 AS c; + +# execute: false +SELECT c FROM x LATERAL VIEW EXPLODE (a) t AS c; +SELECT t.c AS c FROM x AS x LATERAL VIEW EXPLODE(x.a) t AS c; + +# execute: false +SELECT aa FROM x, UNNEST(a) AS t(aa); +SELECT t.aa AS aa FROM x AS x, UNNEST(x.a) AS t(aa); + +# execute: false +# dialect: bigquery +SELECT aa FROM x, UNNEST(a) AS aa; +SELECT aa AS aa FROM x AS x, UNNEST(x.a) AS aa; + +-------------------------------------- +-- Window functions +-------------------------------------- + +-- ORDER BY in window function +SELECT a + 1 AS a, ROW_NUMBER() OVER (PARTITION BY b ORDER BY a) AS row_num FROM x; +SELECT x.a + 1 AS a, ROW_NUMBER() OVER (PARTITION BY x.b ORDER BY x.a) AS row_num FROM x AS x; + +# dialect: bigquery +SELECT ROW_NUMBER() OVER (PARTITION BY a ORDER BY b) AS row_num FROM x QUALIFY row_num = 1; +SELECT ROW_NUMBER() OVER (PARTITION BY x.a ORDER BY x.b) AS row_num FROM x AS x QUALIFY row_num = 1; + +# dialect: bigquery +SELECT x.b, x.a FROM x LEFT JOIN y ON x.b = y.b QUALIFY ROW_NUMBER() OVER(PARTITION BY x.b ORDER BY x.a DESC) = 1; +SELECT x.b AS b, x.a AS a FROM x AS x LEFT JOIN y AS y ON x.b = y.b QUALIFY ROW_NUMBER() OVER (PARTITION BY x.b ORDER BY x.a DESC) = 1; diff --git a/tests/fixtures/pretty.sql b/tests/fixtures/pretty.sql index a240597..c67ba5d 100644 --- a/tests/fixtures/pretty.sql +++ b/tests/fixtures/pretty.sql @@ -342,3 +342,8 @@ SELECT basket_index FROM table_data CROSS JOIN UNNEST(fruit_basket) AS fruit WITH OFFSET AS basket_index; +SELECT A.* EXCEPT A.COL_1, A.COL_2 FROM TABLE_1 A; +SELECT + A.* + EXCEPT (A.COL_1, A.COL_2) +FROM TABLE_1 AS A; diff --git a/tests/test_expressions.py b/tests/test_expressions.py index 55e07d1..7acc0fa 100644 --- a/tests/test_expressions.py +++ b/tests/test_expressions.py @@ -122,6 +122,10 @@ class TestExpressions(unittest.TestCase): ["first", "second", "third"], ) + self.assertEqual(parse_one("x.*").name, "*") + self.assertEqual(parse_one("NULL").name, "NULL") + self.assertEqual(parse_one("a.b.c").name, "c") + def test_table_name(self): self.assertEqual(exp.table_name(parse_one("a", into=exp.Table)), "a") self.assertEqual(exp.table_name(parse_one("a.b", into=exp.Table)), "a.b") diff --git a/tests/test_optimizer.py b/tests/test_optimizer.py index 360dfb5..b6993ba 100644 --- a/tests/test_optimizer.py +++ b/tests/test_optimizer.py @@ -163,7 +163,10 @@ class TestOptimizer(unittest.TestCase): for sql in load_sql_fixtures("optimizer/qualify_columns__invalid.sql"): with self.subTest(sql): with self.assertRaises((OptimizeError, SchemaError)): - optimizer.qualify_columns.qualify_columns(parse_one(sql), schema=self.schema) + expression = optimizer.qualify_columns.qualify_columns( + parse_one(sql), schema=self.schema + ) + optimizer.qualify_columns.validate_qualify_columns(expression) def test_lower_identities(self): self.check_file("lower_identities", optimizer.lower_identities.lower_identities) @@ -190,6 +193,14 @@ class TestOptimizer(unittest.TestCase): def test_pushdown_predicates(self): self.check_file("pushdown_predicates", optimizer.pushdown_predicates.pushdown_predicates) + def test_expand_laterals(self): + self.check_file( + "expand_laterals", + optimizer.expand_laterals.expand_laterals, + pretty=True, + execute=True, + ) + def test_expand_multi_table_selects(self): self.check_file( "expand_multi_table_selects", @@ -369,6 +380,12 @@ FROM READ_CSV('tests/fixtures/optimizer/tpc-h/nation.csv.gz', 'delimiter', '|') self.assertEqual(expression.right.this.left.type.this, exp.DataType.Type.INT) self.assertEqual(expression.right.this.right.type.this, exp.DataType.Type.INT) + def test_lateral_annotation(self): + expression = optimizer.optimize( + parse_one("SELECT c FROM (select 1 a) as x LATERAL VIEW EXPLODE (a) AS c") + ).expressions[0] + self.assertEqual(expression.type.this, exp.DataType.Type.INT) + def test_derived_tables_column_annotation(self): schema = {"x": {"cola": "INT"}, "y": {"cola": "FLOAT"}} sql = """ -- cgit v1.2.3