summaryrefslogtreecommitdiffstats
path: root/sqlglot/optimizer
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-01-23 05:06:10 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-01-23 05:06:10 +0000
commit258c7df9cab21a4978c100568907ac1cb7fd6ee0 (patch)
treea98c4c9fc7433833be72543de5d99d15b9927442 /sqlglot/optimizer
parentAdding upstream version 20.4.0. (diff)
downloadsqlglot-258c7df9cab21a4978c100568907ac1cb7fd6ee0.tar.xz
sqlglot-258c7df9cab21a4978c100568907ac1cb7fd6ee0.zip
Adding upstream version 20.9.0.upstream/20.9.0
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'sqlglot/optimizer')
-rw-r--r--sqlglot/optimizer/annotate_types.py24
-rw-r--r--sqlglot/optimizer/pushdown_predicates.py50
-rw-r--r--sqlglot/optimizer/pushdown_projections.py7
-rw-r--r--sqlglot/optimizer/qualify.py16
-rw-r--r--sqlglot/optimizer/qualify_columns.py170
-rw-r--r--sqlglot/optimizer/qualify_tables.py6
-rw-r--r--sqlglot/optimizer/scope.py16
-rw-r--r--sqlglot/optimizer/simplify.py23
8 files changed, 228 insertions, 84 deletions
diff --git a/sqlglot/optimizer/annotate_types.py b/sqlglot/optimizer/annotate_types.py
index 7b990f1..d0168d5 100644
--- a/sqlglot/optimizer/annotate_types.py
+++ b/sqlglot/optimizer/annotate_types.py
@@ -195,6 +195,9 @@ class TypeAnnotator(metaclass=_TypeAnnotator):
exp.StrPosition,
exp.TsOrDiToDi,
},
+ exp.DataType.Type.JSON: {
+ exp.ParseJSON,
+ },
exp.DataType.Type.TIMESTAMP: {
exp.CurrentTime,
exp.CurrentTimestamp,
@@ -275,6 +278,7 @@ class TypeAnnotator(metaclass=_TypeAnnotator):
exp.Sum: lambda self, e: self._annotate_by_args(e, "this", "expressions", promote=True),
exp.TryCast: lambda self, e: self._annotate_with_type(e, e.args["to"]),
exp.VarMap: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.MAP),
+ exp.Struct: lambda self, e: self._annotate_by_args(e, "expressions", struct=True),
}
NESTED_TYPES = {
@@ -477,7 +481,12 @@ class TypeAnnotator(metaclass=_TypeAnnotator):
@t.no_type_check
def _annotate_by_args(
- self, expression: E, *args: str, promote: bool = False, array: bool = False
+ self,
+ expression: E,
+ *args: str,
+ promote: bool = False,
+ array: bool = False,
+ struct: bool = False,
) -> E:
self._annotate_args(expression)
@@ -506,6 +515,19 @@ class TypeAnnotator(metaclass=_TypeAnnotator):
),
)
+ if struct:
+ expressions = [
+ expr.type
+ if not expr.args.get("alias")
+ else exp.ColumnDef(this=expr.args["alias"].copy(), kind=expr.type)
+ for expr in expressions
+ ]
+
+ self._set_type(
+ expression,
+ exp.DataType(this=exp.DataType.Type.STRUCT, expressions=expressions, nested=True),
+ )
+
return expression
def _annotate_timeunit(
diff --git a/sqlglot/optimizer/pushdown_predicates.py b/sqlglot/optimizer/pushdown_predicates.py
index 10ff13a..12c3b89 100644
--- a/sqlglot/optimizer/pushdown_predicates.py
+++ b/sqlglot/optimizer/pushdown_predicates.py
@@ -30,13 +30,18 @@ def pushdown_predicates(expression, dialect=None):
where = select.args.get("where")
if where:
selected_sources = scope.selected_sources
+ join_index = {
+ join.alias_or_name: i for i, join in enumerate(select.args.get("joins") or [])
+ }
+
# a right join can only push down to itself and not the source FROM table
for k, (node, source) in selected_sources.items():
parent = node.find_ancestor(exp.Join, exp.From)
if isinstance(parent, exp.Join) and parent.side == "RIGHT":
selected_sources = {k: (node, source)}
break
- pushdown(where.this, selected_sources, scope_ref_count, dialect)
+
+ pushdown(where.this, selected_sources, scope_ref_count, dialect, join_index)
# joins should only pushdown into itself, not to other joins
# so we limit the selected sources to only itself
@@ -53,7 +58,7 @@ def pushdown_predicates(expression, dialect=None):
return expression
-def pushdown(condition, sources, scope_ref_count, dialect):
+def pushdown(condition, sources, scope_ref_count, dialect, join_index=None):
if not condition:
return
@@ -67,21 +72,28 @@ def pushdown(condition, sources, scope_ref_count, dialect):
)
if cnf_like:
- pushdown_cnf(predicates, sources, scope_ref_count)
+ pushdown_cnf(predicates, sources, scope_ref_count, join_index=join_index)
else:
pushdown_dnf(predicates, sources, scope_ref_count)
-def pushdown_cnf(predicates, scope, scope_ref_count):
+def pushdown_cnf(predicates, scope, scope_ref_count, join_index=None):
"""
If the predicates are in CNF like form, we can simply replace each block in the parent.
"""
+ join_index = join_index or {}
for predicate in predicates:
for node in nodes_for_predicate(predicate, scope, scope_ref_count).values():
if isinstance(node, exp.Join):
- predicate.replace(exp.true())
- node.on(predicate, copy=False)
- break
+ name = node.alias_or_name
+ predicate_tables = exp.column_table_names(predicate, name)
+
+ # Don't push the predicate if it references tables that appear in later joins
+ this_index = join_index[name]
+ if all(join_index.get(table, -1) < this_index for table in predicate_tables):
+ predicate.replace(exp.true())
+ node.on(predicate, copy=False)
+ break
if isinstance(node, exp.Select):
predicate.replace(exp.true())
inner_predicate = replace_aliases(node, predicate)
@@ -112,9 +124,7 @@ def pushdown_dnf(predicates, scope, scope_ref_count):
conditions = {}
- # for every pushdown table, find all related conditions in all predicates
- # combine them with ORS
- # (a.x AND and a.y AND b.x) OR (a.z AND c.y) -> (a.x AND a.y) OR (a.z)
+ # pushdown all predicates to their respective nodes
for table in sorted(pushdown_tables):
for predicate in predicates:
nodes = nodes_for_predicate(predicate, scope, scope_ref_count)
@@ -122,23 +132,9 @@ def pushdown_dnf(predicates, scope, scope_ref_count):
if table not in nodes:
continue
- predicate_condition = None
-
- for column in predicate.find_all(exp.Column):
- if column.table == table:
- condition = column.find_ancestor(exp.Condition)
- predicate_condition = (
- exp.and_(predicate_condition, condition)
- if predicate_condition
- else condition
- )
-
- if predicate_condition:
- conditions[table] = (
- exp.or_(conditions[table], predicate_condition)
- if table in conditions
- else predicate_condition
- )
+ conditions[table] = (
+ exp.or_(conditions[table], predicate) if table in conditions else predicate
+ )
for name, node in nodes.items():
if name not in conditions:
diff --git a/sqlglot/optimizer/pushdown_projections.py b/sqlglot/optimizer/pushdown_projections.py
index 4bc3bd2..e3aaebc 100644
--- a/sqlglot/optimizer/pushdown_projections.py
+++ b/sqlglot/optimizer/pushdown_projections.py
@@ -43,9 +43,8 @@ def pushdown_projections(expression, schema=None, remove_unused_selections=True)
parent_selections = referenced_columns.get(scope, {SELECT_ALL})
alias_count = source_column_alias_count.get(scope, 0)
- if scope.expression.args.get("distinct") or (scope.parent and scope.parent.pivots):
- # We can't remove columns SELECT DISTINCT nor UNION DISTINCT. The same holds if
- # we select from a pivoted source in the parent scope.
+ # We can't remove columns SELECT DISTINCT nor UNION DISTINCT.
+ if scope.expression.args.get("distinct"):
parent_selections = {SELECT_ALL}
if isinstance(scope.expression, exp.Union):
@@ -78,7 +77,7 @@ def pushdown_projections(expression, schema=None, remove_unused_selections=True)
# Push the selected columns down to the next scope
for name, (node, source) in scope.selected_sources.items():
if isinstance(source, Scope):
- columns = selects.get(name) or set()
+ columns = {SELECT_ALL} if scope.pivots else selects.get(name) or set()
referenced_columns[source].update(columns)
column_aliases = node.alias_column_names
diff --git a/sqlglot/optimizer/qualify.py b/sqlglot/optimizer/qualify.py
index 5fdbde8..8d83b47 100644
--- a/sqlglot/optimizer/qualify.py
+++ b/sqlglot/optimizer/qualify.py
@@ -3,10 +3,11 @@ from __future__ import annotations
import typing as t
from sqlglot import exp
-from sqlglot.dialects.dialect import DialectType
+from sqlglot.dialects.dialect import Dialect, DialectType
from sqlglot.optimizer.isolate_table_selects import isolate_table_selects
from sqlglot.optimizer.normalize_identifiers import normalize_identifiers
from sqlglot.optimizer.qualify_columns import (
+ pushdown_cte_alias_columns as pushdown_cte_alias_columns_func,
qualify_columns as qualify_columns_func,
quote_identifiers as quote_identifiers_func,
validate_qualify_columns as validate_qualify_columns_func,
@@ -22,6 +23,7 @@ def qualify(
catalog: t.Optional[str] = None,
schema: t.Optional[dict | Schema] = None,
expand_alias_refs: bool = True,
+ expand_stars: bool = True,
infer_schema: t.Optional[bool] = None,
isolate_tables: bool = False,
qualify_columns: bool = True,
@@ -47,6 +49,9 @@ def qualify(
catalog: Default catalog name for tables.
schema: Schema to infer column names and types.
expand_alias_refs: Whether or not to expand references to aliases.
+ expand_stars: Whether or not to expand star queries. This is a necessary step
+ for most of the optimizer's rules to work; do not set to False unless you
+ know what you're doing!
infer_schema: Whether or not to infer the schema if missing.
isolate_tables: Whether or not to isolate table selects.
qualify_columns: Whether or not to qualify columns.
@@ -66,9 +71,16 @@ def qualify(
if isolate_tables:
expression = isolate_table_selects(expression, schema=schema)
+ if Dialect.get_or_raise(dialect).PREFER_CTE_ALIAS_COLUMN:
+ expression = pushdown_cte_alias_columns_func(expression)
+
if qualify_columns:
expression = qualify_columns_func(
- expression, schema, expand_alias_refs=expand_alias_refs, infer_schema=infer_schema
+ expression,
+ schema,
+ expand_alias_refs=expand_alias_refs,
+ expand_stars=expand_stars,
+ infer_schema=infer_schema,
)
if quote_identifiers:
diff --git a/sqlglot/optimizer/qualify_columns.py b/sqlglot/optimizer/qualify_columns.py
index 742cdf5..a6397ae 100644
--- a/sqlglot/optimizer/qualify_columns.py
+++ b/sqlglot/optimizer/qualify_columns.py
@@ -17,6 +17,7 @@ def qualify_columns(
expression: exp.Expression,
schema: t.Dict | Schema,
expand_alias_refs: bool = True,
+ expand_stars: bool = True,
infer_schema: t.Optional[bool] = None,
) -> exp.Expression:
"""
@@ -33,10 +34,16 @@ def qualify_columns(
expression: Expression to qualify.
schema: Database schema.
expand_alias_refs: Whether or not to expand references to aliases.
+ expand_stars: Whether or not to expand star queries. This is a necessary step
+ for most of the optimizer's rules to work; do not set to False unless you
+ know what you're doing!
infer_schema: Whether or not to infer the schema if missing.
Returns:
The qualified expression.
+
+ Notes:
+ - Currently only handles a single PIVOT or UNPIVOT operator
"""
schema = ensure_schema(schema)
infer_schema = schema.empty if infer_schema is None else infer_schema
@@ -57,7 +64,8 @@ def qualify_columns(
_expand_alias_refs(scope, resolver)
if not isinstance(scope.expression, exp.UDTF):
- _expand_stars(scope, resolver, using_column_tables, pseudocolumns)
+ if expand_stars:
+ _expand_stars(scope, resolver, using_column_tables, pseudocolumns)
qualify_outputs(scope)
_expand_group_by(scope)
@@ -68,21 +76,41 @@ def qualify_columns(
def validate_qualify_columns(expression: E) -> E:
"""Raise an `OptimizeError` if any columns aren't qualified"""
- unqualified_columns = []
+ all_unqualified_columns = []
for scope in traverse_scope(expression):
if isinstance(scope.expression, exp.Select):
- unqualified_columns.extend(scope.unqualified_columns)
+ unqualified_columns = scope.unqualified_columns
+
if scope.external_columns and not scope.is_correlated_subquery and not scope.pivots:
column = scope.external_columns[0]
- raise OptimizeError(
- f"""Column '{column}' could not be resolved{f" for table: '{column.table}'" if column.table else ''}"""
- )
+ for_table = f" for table: '{column.table}'" if column.table else ""
+ raise OptimizeError(f"Column '{column}' could not be resolved{for_table}")
+
+ if unqualified_columns and scope.pivots and scope.pivots[0].unpivot:
+ # New columns produced by the UNPIVOT can't be qualified, but there may be columns
+ # under the UNPIVOT's IN clause that can and should be qualified. We recompute
+ # this list here to ensure those in the former category will be excluded.
+ unpivot_columns = set(_unpivot_columns(scope.pivots[0]))
+ unqualified_columns = [c for c in unqualified_columns if c not in unpivot_columns]
+
+ all_unqualified_columns.extend(unqualified_columns)
+
+ if all_unqualified_columns:
+ raise OptimizeError(f"Ambiguous columns: {all_unqualified_columns}")
- if unqualified_columns:
- raise OptimizeError(f"Ambiguous columns: {unqualified_columns}")
return expression
+def _unpivot_columns(unpivot: exp.Pivot) -> t.Iterator[exp.Column]:
+ name_column = []
+ field = unpivot.args.get("field")
+ if isinstance(field, exp.In) and isinstance(field.this, exp.Column):
+ name_column.append(field.this)
+
+ value_columns = (c for e in unpivot.expressions for c in e.find_all(exp.Column))
+ return itertools.chain(name_column, value_columns)
+
+
def _pop_table_column_aliases(derived_tables: t.List[exp.CTE | exp.Subquery]) -> None:
"""
Remove table column aliases.
@@ -216,6 +244,7 @@ def _expand_alias_refs(scope: Scope, resolver: Resolver) -> None:
replace_columns(expression.args.get("group"), literal_index=True)
replace_columns(expression.args.get("having"), resolve_table=True)
replace_columns(expression.args.get("qualify"), resolve_table=True)
+
scope.clear_cache()
@@ -353,18 +382,25 @@ def _expand_stars(
replace_columns: t.Dict[int, t.Dict[str, str]] = {}
coalesced_columns = set()
- # TODO: handle optimization of multiple PIVOTs (and possibly UNPIVOTs) in the future
- pivot_columns = None
pivot_output_columns = None
- pivot = t.cast(t.Optional[exp.Pivot], seq_get(scope.pivots, 0))
+ pivot_exclude_columns = None
- has_pivoted_source = pivot and not pivot.args.get("unpivot")
- if pivot and has_pivoted_source:
- pivot_columns = set(col.output_name for col in pivot.find_all(exp.Column))
+ pivot = t.cast(t.Optional[exp.Pivot], seq_get(scope.pivots, 0))
+ if isinstance(pivot, exp.Pivot) and not pivot.alias_column_names:
+ if pivot.unpivot:
+ pivot_output_columns = [c.output_name for c in _unpivot_columns(pivot)]
+
+ field = pivot.args.get("field")
+ if isinstance(field, exp.In):
+ pivot_exclude_columns = {
+ c.output_name for e in field.expressions for c in e.find_all(exp.Column)
+ }
+ else:
+ pivot_exclude_columns = set(c.output_name for c in pivot.find_all(exp.Column))
- pivot_output_columns = [col.output_name for col in pivot.args.get("columns", [])]
- if not pivot_output_columns:
- pivot_output_columns = [col.alias_or_name for col in pivot.expressions]
+ pivot_output_columns = [c.output_name for c in pivot.args.get("columns", [])]
+ if not pivot_output_columns:
+ pivot_output_columns = [c.alias_or_name for c in pivot.expressions]
for expression in scope.expression.selects:
if isinstance(expression, exp.Star):
@@ -384,47 +420,54 @@ def _expand_stars(
raise OptimizeError(f"Unknown table: {table}")
columns = resolver.get_source_columns(table, only_visible=True)
+ columns = columns or scope.outer_column_list
if pseudocolumns:
columns = [name for name in columns if name.upper() not in pseudocolumns]
- if columns and "*" not in columns:
- table_id = id(table)
- columns_to_exclude = except_columns.get(table_id) or set()
+ if not columns or "*" in columns:
+ return
+
+ table_id = id(table)
+ columns_to_exclude = except_columns.get(table_id) or set()
- if pivot and has_pivoted_source and pivot_columns and pivot_output_columns:
- implicit_columns = [col for col in columns if col not in pivot_columns]
+ if pivot:
+ if pivot_output_columns and pivot_exclude_columns:
+ pivot_columns = [c for c in columns if c not in pivot_exclude_columns]
+ pivot_columns.extend(pivot_output_columns)
+ else:
+ pivot_columns = pivot.alias_column_names
+
+ if pivot_columns:
new_selections.extend(
exp.alias_(exp.column(name, table=pivot.alias), name, copy=False)
- for name in implicit_columns + pivot_output_columns
+ for name in pivot_columns
if name not in columns_to_exclude
)
continue
- for name in columns:
- if name in using_column_tables and table in using_column_tables[name]:
- if name in coalesced_columns:
- continue
-
- coalesced_columns.add(name)
- tables = using_column_tables[name]
- coalesce = [exp.column(name, table=table) for table in tables]
-
- new_selections.append(
- alias(
- exp.Coalesce(this=coalesce[0], expressions=coalesce[1:]),
- alias=name,
- copy=False,
- )
- )
- elif name not in columns_to_exclude:
- alias_ = replace_columns.get(table_id, {}).get(name, name)
- column = exp.column(name, table=table)
- new_selections.append(
- alias(column, alias_, copy=False) if alias_ != name else column
+ for name in columns:
+ if name in using_column_tables and table in using_column_tables[name]:
+ if name in coalesced_columns:
+ continue
+
+ coalesced_columns.add(name)
+ tables = using_column_tables[name]
+ coalesce = [exp.column(name, table=table) for table in tables]
+
+ new_selections.append(
+ alias(
+ exp.Coalesce(this=coalesce[0], expressions=coalesce[1:]),
+ alias=name,
+ copy=False,
)
- else:
- return
+ )
+ elif name not in columns_to_exclude:
+ alias_ = replace_columns.get(table_id, {}).get(name, name)
+ column = exp.column(name, table=table)
+ new_selections.append(
+ alias(column, alias_, copy=False) if alias_ != name else column
+ )
# Ensures we don't overwrite the initial selections with an empty list
if new_selections:
@@ -472,6 +515,9 @@ def qualify_outputs(scope_or_expression: Scope | exp.Expression) -> None:
for i, (selection, aliased_column) in enumerate(
itertools.zip_longest(scope.expression.selects, scope.outer_column_list)
):
+ if selection is None:
+ break
+
if isinstance(selection, exp.Subquery):
if not selection.output_name:
selection.set("alias", exp.TableAlias(this=exp.to_identifier(f"_col_{i}")))
@@ -495,6 +541,38 @@ def quote_identifiers(expression: E, dialect: DialectType = None, identify: bool
)
+def pushdown_cte_alias_columns(expression: exp.Expression) -> exp.Expression:
+ """
+ Pushes down the CTE alias columns into the projection,
+
+ This step is useful in Snowflake where the CTE alias columns can be referenced in the HAVING.
+
+ Example:
+ >>> import sqlglot
+ >>> expression = sqlglot.parse_one("WITH y (c) AS (SELECT SUM(a) FROM ( SELECT 1 a ) AS x HAVING c > 0) SELECT c FROM y")
+ >>> pushdown_cte_alias_columns(expression).sql()
+ 'WITH y(c) AS (SELECT SUM(a) AS c FROM (SELECT 1 AS a) AS x HAVING c > 0) SELECT c FROM y'
+
+ Args:
+ expression: Expression to pushdown.
+
+ Returns:
+ The expression with the CTE aliases pushed down into the projection.
+ """
+ for cte in expression.find_all(exp.CTE):
+ if cte.alias_column_names:
+ new_expressions = []
+ for _alias, projection in zip(cte.alias_column_names, cte.this.expressions):
+ if isinstance(projection, exp.Alias):
+ projection.set("alias", _alias)
+ else:
+ projection = alias(projection, alias=_alias)
+ new_expressions.append(projection)
+ cte.this.set("expressions", new_expressions)
+
+ return expression
+
+
class Resolver:
"""
Helper for resolving columns.
diff --git a/sqlglot/optimizer/qualify_tables.py b/sqlglot/optimizer/qualify_tables.py
index 57ecabe..e0fe641 100644
--- a/sqlglot/optimizer/qualify_tables.py
+++ b/sqlglot/optimizer/qualify_tables.py
@@ -72,11 +72,15 @@ def qualify_tables(
if not source.args.get("catalog") and source.args.get("db"):
source.set("catalog", catalog)
+ pivots = pivots = source.args.get("pivots")
if not source.alias:
+ # Don't add the pivot's alias to the pivoted table, use the table's name instead
+ if pivots and pivots[0].alias == name:
+ name = source.name
+
# Mutates the source by attaching an alias to it
alias(source, name or source.name or next_alias_name(), copy=False, table=True)
- pivots = source.args.get("pivots")
if pivots and not pivots[0].alias:
pivots[0].set(
"alias", exp.TableAlias(this=exp.to_identifier(next_alias_name()))
diff --git a/sqlglot/optimizer/scope.py b/sqlglot/optimizer/scope.py
index d34857d..a3f08d5 100644
--- a/sqlglot/optimizer/scope.py
+++ b/sqlglot/optimizer/scope.py
@@ -539,11 +539,23 @@ def _traverse_union(scope):
# The last scope to be yield should be the top most scope
left = None
- for left in _traverse_scope(scope.branch(scope.expression.left, scope_type=ScopeType.UNION)):
+ for left in _traverse_scope(
+ scope.branch(
+ scope.expression.left,
+ outer_column_list=scope.outer_column_list,
+ scope_type=ScopeType.UNION,
+ )
+ ):
yield left
right = None
- for right in _traverse_scope(scope.branch(scope.expression.right, scope_type=ScopeType.UNION)):
+ for right in _traverse_scope(
+ scope.branch(
+ scope.expression.right,
+ outer_column_list=scope.outer_column_list,
+ scope_type=ScopeType.UNION,
+ )
+ ):
yield right
scope.union_scopes = [left, right]
diff --git a/sqlglot/optimizer/simplify.py b/sqlglot/optimizer/simplify.py
index f53023c..25d4e75 100644
--- a/sqlglot/optimizer/simplify.py
+++ b/sqlglot/optimizer/simplify.py
@@ -100,6 +100,7 @@ def simplify(
node = simplify_parens(node)
node = simplify_datetrunc(node, dialect)
node = sort_comparison(node)
+ node = simplify_startswith(node)
if root:
expression.replace(node)
@@ -776,6 +777,26 @@ def simplify_conditionals(expression):
return expression
+def simplify_startswith(expression: exp.Expression) -> exp.Expression:
+ """
+ Reduces a prefix check to either TRUE or FALSE if both the string and the
+ prefix are statically known.
+
+ Example:
+ >>> from sqlglot import parse_one
+ >>> simplify_startswith(parse_one("STARTSWITH('foo', 'f')")).sql()
+ 'TRUE'
+ """
+ if (
+ isinstance(expression, exp.StartsWith)
+ and expression.this.is_string
+ and expression.expression.is_string
+ ):
+ return exp.convert(expression.name.startswith(expression.expression.name))
+
+ return expression
+
+
DateRange = t.Tuple[datetime.date, datetime.date]
@@ -1160,7 +1181,7 @@ def gen(expression: t.Any) -> str:
GEN_MAP = {
exp.Add: lambda e: _binary(e, "+"),
exp.And: lambda e: _binary(e, "AND"),
- exp.Anonymous: lambda e: f"{e.this} {','.join(gen(e) for e in e.expressions)}",
+ exp.Anonymous: lambda e: f"{e.this.upper()} {','.join(gen(e) for e in e.expressions)}",
exp.Between: lambda e: f"{gen(e.this)} BETWEEN {gen(e.args.get('low'))} AND {gen(e.args.get('high'))}",
exp.Boolean: lambda e: "TRUE" if e.this else "FALSE",
exp.Bracket: lambda e: f"{gen(e.this)}[{gen(e.expressions)}]",