Edit on GitHub

sqlglot.dialects.hive

  1from __future__ import annotations
  2
  3import typing as t
  4
  5from sqlglot import exp, generator, parser, tokens, transforms
  6from sqlglot.dialects.dialect import (
  7    Dialect,
  8    approx_count_distinct_sql,
  9    create_with_partitions_sql,
 10    format_time_lambda,
 11    if_sql,
 12    locate_to_strposition,
 13    max_or_greatest,
 14    min_or_least,
 15    no_ilike_sql,
 16    no_recursive_cte_sql,
 17    no_safe_divide_sql,
 18    no_trycast_sql,
 19    rename_func,
 20    strposition_to_locate_sql,
 21    struct_extract_sql,
 22    timestrtotime_sql,
 23    var_map_sql,
 24)
 25from sqlglot.helper import seq_get
 26from sqlglot.parser import parse_var_map
 27from sqlglot.tokens import TokenType
 28
 29# (FuncType, Multiplier)
 30DATE_DELTA_INTERVAL = {
 31    "YEAR": ("ADD_MONTHS", 12),
 32    "MONTH": ("ADD_MONTHS", 1),
 33    "QUARTER": ("ADD_MONTHS", 3),
 34    "WEEK": ("DATE_ADD", 7),
 35    "DAY": ("DATE_ADD", 1),
 36}
 37
 38TIME_DIFF_FACTOR = {
 39    "MILLISECOND": " * 1000",
 40    "SECOND": "",
 41    "MINUTE": " / 60",
 42    "HOUR": " / 3600",
 43}
 44
 45DIFF_MONTH_SWITCH = ("YEAR", "QUARTER", "MONTH")
 46
 47
 48def _add_date_sql(self: generator.Generator, expression: exp.DateAdd | exp.DateSub) -> str:
 49    unit = expression.text("unit").upper()
 50    func, multiplier = DATE_DELTA_INTERVAL.get(unit, ("DATE_ADD", 1))
 51
 52    if isinstance(expression, exp.DateSub):
 53        multiplier *= -1
 54
 55    if expression.expression.is_number:
 56        modified_increment = exp.Literal.number(int(expression.text("expression")) * multiplier)
 57    else:
 58        modified_increment = expression.expression
 59        if multiplier != 1:
 60            modified_increment = exp.Mul(  # type: ignore
 61                this=modified_increment, expression=exp.Literal.number(multiplier)
 62            )
 63
 64    return self.func(func, expression.this, modified_increment)
 65
 66
 67def _date_diff_sql(self: generator.Generator, expression: exp.DateDiff) -> str:
 68    unit = expression.text("unit").upper()
 69
 70    factor = TIME_DIFF_FACTOR.get(unit)
 71    if factor is not None:
 72        left = self.sql(expression, "this")
 73        right = self.sql(expression, "expression")
 74        sec_diff = f"UNIX_TIMESTAMP({left}) - UNIX_TIMESTAMP({right})"
 75        return f"({sec_diff}){factor}" if factor else sec_diff
 76
 77    sql_func = "MONTHS_BETWEEN" if unit in DIFF_MONTH_SWITCH else "DATEDIFF"
 78    _, multiplier = DATE_DELTA_INTERVAL.get(unit, ("", 1))
 79    multiplier_sql = f" / {multiplier}" if multiplier > 1 else ""
 80    diff_sql = f"{sql_func}({self.format_args(expression.this, expression.expression)})"
 81    return f"{diff_sql}{multiplier_sql}"
 82
 83
 84def _json_format_sql(self: generator.Generator, expression: exp.JSONFormat) -> str:
 85    this = expression.this
 86
 87    if not this.type:
 88        from sqlglot.optimizer.annotate_types import annotate_types
 89
 90        annotate_types(this)
 91
 92    if this.type.is_type(exp.DataType.Type.JSON):
 93        return self.sql(this)
 94    return self.func("TO_JSON", this, expression.args.get("options"))
 95
 96
 97def _array_sort_sql(self: generator.Generator, expression: exp.ArraySort) -> str:
 98    if expression.expression:
 99        self.unsupported("Hive SORT_ARRAY does not support a comparator")
100    return f"SORT_ARRAY({self.sql(expression, 'this')})"
101
102
103def _property_sql(self: generator.Generator, expression: exp.Property) -> str:
104    return f"'{expression.name}'={self.sql(expression, 'value')}"
105
106
107def _str_to_unix_sql(self: generator.Generator, expression: exp.StrToUnix) -> str:
108    return self.func("UNIX_TIMESTAMP", expression.this, _time_format(self, expression))
109
110
111def _str_to_date_sql(self: generator.Generator, expression: exp.StrToDate) -> str:
112    this = self.sql(expression, "this")
113    time_format = self.format_time(expression)
114    if time_format not in (Hive.time_format, Hive.date_format):
115        this = f"FROM_UNIXTIME(UNIX_TIMESTAMP({this}, {time_format}))"
116    return f"CAST({this} AS DATE)"
117
118
119def _str_to_time_sql(self: generator.Generator, expression: exp.StrToTime) -> str:
120    this = self.sql(expression, "this")
121    time_format = self.format_time(expression)
122    if time_format not in (Hive.time_format, Hive.date_format):
123        this = f"FROM_UNIXTIME(UNIX_TIMESTAMP({this}, {time_format}))"
124    return f"CAST({this} AS TIMESTAMP)"
125
126
127def _time_format(
128    self: generator.Generator, expression: exp.UnixToStr | exp.StrToUnix
129) -> t.Optional[str]:
130    time_format = self.format_time(expression)
131    if time_format == Hive.time_format:
132        return None
133    return time_format
134
135
136def _time_to_str(self: generator.Generator, expression: exp.TimeToStr) -> str:
137    this = self.sql(expression, "this")
138    time_format = self.format_time(expression)
139    return f"DATE_FORMAT({this}, {time_format})"
140
141
142def _to_date_sql(self: generator.Generator, expression: exp.TsOrDsToDate) -> str:
143    this = self.sql(expression, "this")
144    time_format = self.format_time(expression)
145    if time_format and time_format not in (Hive.time_format, Hive.date_format):
146        return f"TO_DATE({this}, {time_format})"
147    return f"TO_DATE({this})"
148
149
150class Hive(Dialect):
151    alias_post_tablesample = True
152
153    time_mapping = {
154        "y": "%Y",
155        "Y": "%Y",
156        "YYYY": "%Y",
157        "yyyy": "%Y",
158        "YY": "%y",
159        "yy": "%y",
160        "MMMM": "%B",
161        "MMM": "%b",
162        "MM": "%m",
163        "M": "%-m",
164        "dd": "%d",
165        "d": "%-d",
166        "HH": "%H",
167        "H": "%-H",
168        "hh": "%I",
169        "h": "%-I",
170        "mm": "%M",
171        "m": "%-M",
172        "ss": "%S",
173        "s": "%-S",
174        "SSSSSS": "%f",
175        "a": "%p",
176        "DD": "%j",
177        "D": "%-j",
178        "E": "%a",
179        "EE": "%a",
180        "EEE": "%a",
181        "EEEE": "%A",
182    }
183
184    date_format = "'yyyy-MM-dd'"
185    dateint_format = "'yyyyMMdd'"
186    time_format = "'yyyy-MM-dd HH:mm:ss'"
187
188    class Tokenizer(tokens.Tokenizer):
189        QUOTES = ["'", '"']
190        IDENTIFIERS = ["`"]
191        STRING_ESCAPES = ["\\"]
192        ENCODE = "utf-8"
193        IDENTIFIER_CAN_START_WITH_DIGIT = True
194
195        KEYWORDS = {
196            **tokens.Tokenizer.KEYWORDS,
197            "ADD ARCHIVE": TokenType.COMMAND,
198            "ADD ARCHIVES": TokenType.COMMAND,
199            "ADD FILE": TokenType.COMMAND,
200            "ADD FILES": TokenType.COMMAND,
201            "ADD JAR": TokenType.COMMAND,
202            "ADD JARS": TokenType.COMMAND,
203            "MSCK REPAIR": TokenType.COMMAND,
204            "WITH SERDEPROPERTIES": TokenType.SERDE_PROPERTIES,
205        }
206
207        NUMERIC_LITERALS = {
208            "L": "BIGINT",
209            "S": "SMALLINT",
210            "Y": "TINYINT",
211            "D": "DOUBLE",
212            "F": "FLOAT",
213            "BD": "DECIMAL",
214        }
215
216    class Parser(parser.Parser):
217        LOG_DEFAULTS_TO_LN = True
218        STRICT_CAST = False
219
220        FUNCTIONS = {
221            **parser.Parser.FUNCTIONS,
222            "BASE64": exp.ToBase64.from_arg_list,
223            "COLLECT_LIST": exp.ArrayAgg.from_arg_list,
224            "DATE_ADD": lambda args: exp.TsOrDsAdd(
225                this=seq_get(args, 0),
226                expression=seq_get(args, 1),
227                unit=exp.Literal.string("DAY"),
228            ),
229            "DATEDIFF": lambda args: exp.DateDiff(
230                this=exp.TsOrDsToDate(this=seq_get(args, 0)),
231                expression=exp.TsOrDsToDate(this=seq_get(args, 1)),
232            ),
233            "DATE_SUB": lambda args: exp.TsOrDsAdd(
234                this=seq_get(args, 0),
235                expression=exp.Mul(
236                    this=seq_get(args, 1),
237                    expression=exp.Literal.number(-1),
238                ),
239                unit=exp.Literal.string("DAY"),
240            ),
241            "DATE_FORMAT": lambda args: format_time_lambda(exp.TimeToStr, "hive")(
242                [
243                    exp.TimeStrToTime(this=seq_get(args, 0)),
244                    seq_get(args, 1),
245                ]
246            ),
247            "DAY": lambda args: exp.Day(this=exp.TsOrDsToDate(this=seq_get(args, 0))),
248            "FROM_UNIXTIME": format_time_lambda(exp.UnixToStr, "hive", True),
249            "GET_JSON_OBJECT": exp.JSONExtractScalar.from_arg_list,
250            "LOCATE": locate_to_strposition,
251            "MAP": parse_var_map,
252            "MONTH": lambda args: exp.Month(this=exp.TsOrDsToDate.from_arg_list(args)),
253            "PERCENTILE": exp.Quantile.from_arg_list,
254            "PERCENTILE_APPROX": exp.ApproxQuantile.from_arg_list,
255            "COLLECT_SET": exp.SetAgg.from_arg_list,
256            "SIZE": exp.ArraySize.from_arg_list,
257            "SPLIT": exp.RegexpSplit.from_arg_list,
258            "TO_DATE": format_time_lambda(exp.TsOrDsToDate, "hive"),
259            "TO_JSON": exp.JSONFormat.from_arg_list,
260            "UNBASE64": exp.FromBase64.from_arg_list,
261            "UNIX_TIMESTAMP": format_time_lambda(exp.StrToUnix, "hive", True),
262            "YEAR": lambda args: exp.Year(this=exp.TsOrDsToDate.from_arg_list(args)),
263        }
264
265        PROPERTY_PARSERS = {
266            **parser.Parser.PROPERTY_PARSERS,
267            "WITH SERDEPROPERTIES": lambda self: exp.SerdeProperties(
268                expressions=self._parse_wrapped_csv(self._parse_property)
269            ),
270        }
271
272        QUERY_MODIFIER_PARSERS = {
273            **parser.Parser.QUERY_MODIFIER_PARSERS,
274            "distribute": lambda self: self._parse_sort(exp.Distribute, "DISTRIBUTE", "BY"),
275            "sort": lambda self: self._parse_sort(exp.Sort, "SORT", "BY"),
276            "cluster": lambda self: self._parse_sort(exp.Cluster, "CLUSTER", "BY"),
277        }
278
279    class Generator(generator.Generator):
280        LIMIT_FETCH = "LIMIT"
281        TABLESAMPLE_WITH_METHOD = False
282        TABLESAMPLE_SIZE_IS_PERCENT = True
283        JOIN_HINTS = False
284        TABLE_HINTS = False
285        INDEX_ON = "ON TABLE"
286
287        TYPE_MAPPING = {
288            **generator.Generator.TYPE_MAPPING,
289            exp.DataType.Type.TEXT: "STRING",
290            exp.DataType.Type.DATETIME: "TIMESTAMP",
291            exp.DataType.Type.VARBINARY: "BINARY",
292            exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP",
293            exp.DataType.Type.BIT: "BOOLEAN",
294        }
295
296        TRANSFORMS = {
297            **generator.Generator.TRANSFORMS,
298            exp.Group: transforms.preprocess([transforms.unalias_group]),
299            exp.Select: transforms.preprocess(
300                [
301                    transforms.eliminate_qualify,
302                    transforms.eliminate_distinct_on,
303                    transforms.unnest_to_explode,
304                ]
305            ),
306            exp.Property: _property_sql,
307            exp.ApproxDistinct: approx_count_distinct_sql,
308            exp.ArrayConcat: rename_func("CONCAT"),
309            exp.ArrayJoin: lambda self, e: self.func("CONCAT_WS", e.expression, e.this),
310            exp.ArraySize: rename_func("SIZE"),
311            exp.ArraySort: _array_sort_sql,
312            exp.With: no_recursive_cte_sql,
313            exp.DateAdd: _add_date_sql,
314            exp.DateDiff: _date_diff_sql,
315            exp.DateStrToDate: rename_func("TO_DATE"),
316            exp.DateSub: _add_date_sql,
317            exp.DateToDi: lambda self, e: f"CAST(DATE_FORMAT({self.sql(e, 'this')}, {Hive.dateint_format}) AS INT)",
318            exp.DiToDate: lambda self, e: f"TO_DATE(CAST({self.sql(e, 'this')} AS STRING), {Hive.dateint_format})",
319            exp.FileFormatProperty: lambda self, e: f"STORED AS {self.sql(e, 'this') if isinstance(e.this, exp.InputOutputFormat) else e.name.upper()}",
320            exp.FromBase64: rename_func("UNBASE64"),
321            exp.If: if_sql,
322            exp.ILike: no_ilike_sql,
323            exp.JSONExtract: rename_func("GET_JSON_OBJECT"),
324            exp.JSONExtractScalar: rename_func("GET_JSON_OBJECT"),
325            exp.JSONFormat: _json_format_sql,
326            exp.Map: var_map_sql,
327            exp.Max: max_or_greatest,
328            exp.Min: min_or_least,
329            exp.VarMap: var_map_sql,
330            exp.Create: create_with_partitions_sql,
331            exp.Quantile: rename_func("PERCENTILE"),
332            exp.ApproxQuantile: rename_func("PERCENTILE_APPROX"),
333            exp.RegexpLike: lambda self, e: self.binary(e, "RLIKE"),
334            exp.RegexpSplit: rename_func("SPLIT"),
335            exp.SafeDivide: no_safe_divide_sql,
336            exp.SchemaCommentProperty: lambda self, e: self.naked_property(e),
337            exp.SetAgg: rename_func("COLLECT_SET"),
338            exp.Split: lambda self, e: f"SPLIT({self.sql(e, 'this')}, CONCAT('\\\\Q', {self.sql(e, 'expression')}))",
339            exp.StrPosition: strposition_to_locate_sql,
340            exp.StrToDate: _str_to_date_sql,
341            exp.StrToTime: _str_to_time_sql,
342            exp.StrToUnix: _str_to_unix_sql,
343            exp.StructExtract: struct_extract_sql,
344            exp.TimeStrToDate: rename_func("TO_DATE"),
345            exp.TimeStrToTime: timestrtotime_sql,
346            exp.TimeStrToUnix: rename_func("UNIX_TIMESTAMP"),
347            exp.TimeToStr: _time_to_str,
348            exp.TimeToUnix: rename_func("UNIX_TIMESTAMP"),
349            exp.ToBase64: rename_func("BASE64"),
350            exp.TsOrDiToDi: lambda self, e: f"CAST(SUBSTR(REPLACE(CAST({self.sql(e, 'this')} AS STRING), '-', ''), 1, 8) AS INT)",
351            exp.TsOrDsAdd: lambda self, e: f"DATE_ADD({self.sql(e, 'this')}, {self.sql(e, 'expression')})",
352            exp.TsOrDsToDate: _to_date_sql,
353            exp.TryCast: no_trycast_sql,
354            exp.UnixToStr: lambda self, e: self.func(
355                "FROM_UNIXTIME", e.this, _time_format(self, e)
356            ),
357            exp.UnixToTime: rename_func("FROM_UNIXTIME"),
358            exp.UnixToTimeStr: rename_func("FROM_UNIXTIME"),
359            exp.PartitionedByProperty: lambda self, e: f"PARTITIONED BY {self.sql(e, 'this')}",
360            exp.RowFormatSerdeProperty: lambda self, e: f"ROW FORMAT SERDE {self.sql(e, 'this')}",
361            exp.SerdeProperties: lambda self, e: self.properties(e, prefix="WITH SERDEPROPERTIES"),
362            exp.NumberToStr: rename_func("FORMAT_NUMBER"),
363            exp.LastDateOfMonth: rename_func("LAST_DAY"),
364            exp.National: lambda self, e: self.national_sql(e, prefix=""),
365        }
366
367        PROPERTIES_LOCATION = {
368            **generator.Generator.PROPERTIES_LOCATION,
369            exp.FileFormatProperty: exp.Properties.Location.POST_SCHEMA,
370            exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA,
371            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
372        }
373
374        def arrayagg_sql(self, expression: exp.ArrayAgg) -> str:
375            return self.func(
376                "COLLECT_LIST",
377                expression.this.this if isinstance(expression.this, exp.Order) else expression.this,
378            )
379
380        def with_properties(self, properties: exp.Properties) -> str:
381            return self.properties(
382                properties,
383                prefix=self.seg("TBLPROPERTIES"),
384            )
385
386        def datatype_sql(self, expression: exp.DataType) -> str:
387            if (
388                expression.this in (exp.DataType.Type.VARCHAR, exp.DataType.Type.NVARCHAR)
389                and not expression.expressions
390            ):
391                expression = exp.DataType.build("text")
392            elif expression.this in exp.DataType.TEMPORAL_TYPES:
393                expression = exp.DataType.build(expression.this)
394
395            return super().datatype_sql(expression)
396
397        def after_having_modifiers(self, expression: exp.Expression) -> t.List[str]:
398            return super().after_having_modifiers(expression) + [
399                self.sql(expression, "distribute"),
400                self.sql(expression, "sort"),
401                self.sql(expression, "cluster"),
402            ]
class Hive(sqlglot.dialects.dialect.Dialect):
151class Hive(Dialect):
152    alias_post_tablesample = True
153
154    time_mapping = {
155        "y": "%Y",
156        "Y": "%Y",
157        "YYYY": "%Y",
158        "yyyy": "%Y",
159        "YY": "%y",
160        "yy": "%y",
161        "MMMM": "%B",
162        "MMM": "%b",
163        "MM": "%m",
164        "M": "%-m",
165        "dd": "%d",
166        "d": "%-d",
167        "HH": "%H",
168        "H": "%-H",
169        "hh": "%I",
170        "h": "%-I",
171        "mm": "%M",
172        "m": "%-M",
173        "ss": "%S",
174        "s": "%-S",
175        "SSSSSS": "%f",
176        "a": "%p",
177        "DD": "%j",
178        "D": "%-j",
179        "E": "%a",
180        "EE": "%a",
181        "EEE": "%a",
182        "EEEE": "%A",
183    }
184
185    date_format = "'yyyy-MM-dd'"
186    dateint_format = "'yyyyMMdd'"
187    time_format = "'yyyy-MM-dd HH:mm:ss'"
188
189    class Tokenizer(tokens.Tokenizer):
190        QUOTES = ["'", '"']
191        IDENTIFIERS = ["`"]
192        STRING_ESCAPES = ["\\"]
193        ENCODE = "utf-8"
194        IDENTIFIER_CAN_START_WITH_DIGIT = True
195
196        KEYWORDS = {
197            **tokens.Tokenizer.KEYWORDS,
198            "ADD ARCHIVE": TokenType.COMMAND,
199            "ADD ARCHIVES": TokenType.COMMAND,
200            "ADD FILE": TokenType.COMMAND,
201            "ADD FILES": TokenType.COMMAND,
202            "ADD JAR": TokenType.COMMAND,
203            "ADD JARS": TokenType.COMMAND,
204            "MSCK REPAIR": TokenType.COMMAND,
205            "WITH SERDEPROPERTIES": TokenType.SERDE_PROPERTIES,
206        }
207
208        NUMERIC_LITERALS = {
209            "L": "BIGINT",
210            "S": "SMALLINT",
211            "Y": "TINYINT",
212            "D": "DOUBLE",
213            "F": "FLOAT",
214            "BD": "DECIMAL",
215        }
216
217    class Parser(parser.Parser):
218        LOG_DEFAULTS_TO_LN = True
219        STRICT_CAST = False
220
221        FUNCTIONS = {
222            **parser.Parser.FUNCTIONS,
223            "BASE64": exp.ToBase64.from_arg_list,
224            "COLLECT_LIST": exp.ArrayAgg.from_arg_list,
225            "DATE_ADD": lambda args: exp.TsOrDsAdd(
226                this=seq_get(args, 0),
227                expression=seq_get(args, 1),
228                unit=exp.Literal.string("DAY"),
229            ),
230            "DATEDIFF": lambda args: exp.DateDiff(
231                this=exp.TsOrDsToDate(this=seq_get(args, 0)),
232                expression=exp.TsOrDsToDate(this=seq_get(args, 1)),
233            ),
234            "DATE_SUB": lambda args: exp.TsOrDsAdd(
235                this=seq_get(args, 0),
236                expression=exp.Mul(
237                    this=seq_get(args, 1),
238                    expression=exp.Literal.number(-1),
239                ),
240                unit=exp.Literal.string("DAY"),
241            ),
242            "DATE_FORMAT": lambda args: format_time_lambda(exp.TimeToStr, "hive")(
243                [
244                    exp.TimeStrToTime(this=seq_get(args, 0)),
245                    seq_get(args, 1),
246                ]
247            ),
248            "DAY": lambda args: exp.Day(this=exp.TsOrDsToDate(this=seq_get(args, 0))),
249            "FROM_UNIXTIME": format_time_lambda(exp.UnixToStr, "hive", True),
250            "GET_JSON_OBJECT": exp.JSONExtractScalar.from_arg_list,
251            "LOCATE": locate_to_strposition,
252            "MAP": parse_var_map,
253            "MONTH": lambda args: exp.Month(this=exp.TsOrDsToDate.from_arg_list(args)),
254            "PERCENTILE": exp.Quantile.from_arg_list,
255            "PERCENTILE_APPROX": exp.ApproxQuantile.from_arg_list,
256            "COLLECT_SET": exp.SetAgg.from_arg_list,
257            "SIZE": exp.ArraySize.from_arg_list,
258            "SPLIT": exp.RegexpSplit.from_arg_list,
259            "TO_DATE": format_time_lambda(exp.TsOrDsToDate, "hive"),
260            "TO_JSON": exp.JSONFormat.from_arg_list,
261            "UNBASE64": exp.FromBase64.from_arg_list,
262            "UNIX_TIMESTAMP": format_time_lambda(exp.StrToUnix, "hive", True),
263            "YEAR": lambda args: exp.Year(this=exp.TsOrDsToDate.from_arg_list(args)),
264        }
265
266        PROPERTY_PARSERS = {
267            **parser.Parser.PROPERTY_PARSERS,
268            "WITH SERDEPROPERTIES": lambda self: exp.SerdeProperties(
269                expressions=self._parse_wrapped_csv(self._parse_property)
270            ),
271        }
272
273        QUERY_MODIFIER_PARSERS = {
274            **parser.Parser.QUERY_MODIFIER_PARSERS,
275            "distribute": lambda self: self._parse_sort(exp.Distribute, "DISTRIBUTE", "BY"),
276            "sort": lambda self: self._parse_sort(exp.Sort, "SORT", "BY"),
277            "cluster": lambda self: self._parse_sort(exp.Cluster, "CLUSTER", "BY"),
278        }
279
280    class Generator(generator.Generator):
281        LIMIT_FETCH = "LIMIT"
282        TABLESAMPLE_WITH_METHOD = False
283        TABLESAMPLE_SIZE_IS_PERCENT = True
284        JOIN_HINTS = False
285        TABLE_HINTS = False
286        INDEX_ON = "ON TABLE"
287
288        TYPE_MAPPING = {
289            **generator.Generator.TYPE_MAPPING,
290            exp.DataType.Type.TEXT: "STRING",
291            exp.DataType.Type.DATETIME: "TIMESTAMP",
292            exp.DataType.Type.VARBINARY: "BINARY",
293            exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP",
294            exp.DataType.Type.BIT: "BOOLEAN",
295        }
296
297        TRANSFORMS = {
298            **generator.Generator.TRANSFORMS,
299            exp.Group: transforms.preprocess([transforms.unalias_group]),
300            exp.Select: transforms.preprocess(
301                [
302                    transforms.eliminate_qualify,
303                    transforms.eliminate_distinct_on,
304                    transforms.unnest_to_explode,
305                ]
306            ),
307            exp.Property: _property_sql,
308            exp.ApproxDistinct: approx_count_distinct_sql,
309            exp.ArrayConcat: rename_func("CONCAT"),
310            exp.ArrayJoin: lambda self, e: self.func("CONCAT_WS", e.expression, e.this),
311            exp.ArraySize: rename_func("SIZE"),
312            exp.ArraySort: _array_sort_sql,
313            exp.With: no_recursive_cte_sql,
314            exp.DateAdd: _add_date_sql,
315            exp.DateDiff: _date_diff_sql,
316            exp.DateStrToDate: rename_func("TO_DATE"),
317            exp.DateSub: _add_date_sql,
318            exp.DateToDi: lambda self, e: f"CAST(DATE_FORMAT({self.sql(e, 'this')}, {Hive.dateint_format}) AS INT)",
319            exp.DiToDate: lambda self, e: f"TO_DATE(CAST({self.sql(e, 'this')} AS STRING), {Hive.dateint_format})",
320            exp.FileFormatProperty: lambda self, e: f"STORED AS {self.sql(e, 'this') if isinstance(e.this, exp.InputOutputFormat) else e.name.upper()}",
321            exp.FromBase64: rename_func("UNBASE64"),
322            exp.If: if_sql,
323            exp.ILike: no_ilike_sql,
324            exp.JSONExtract: rename_func("GET_JSON_OBJECT"),
325            exp.JSONExtractScalar: rename_func("GET_JSON_OBJECT"),
326            exp.JSONFormat: _json_format_sql,
327            exp.Map: var_map_sql,
328            exp.Max: max_or_greatest,
329            exp.Min: min_or_least,
330            exp.VarMap: var_map_sql,
331            exp.Create: create_with_partitions_sql,
332            exp.Quantile: rename_func("PERCENTILE"),
333            exp.ApproxQuantile: rename_func("PERCENTILE_APPROX"),
334            exp.RegexpLike: lambda self, e: self.binary(e, "RLIKE"),
335            exp.RegexpSplit: rename_func("SPLIT"),
336            exp.SafeDivide: no_safe_divide_sql,
337            exp.SchemaCommentProperty: lambda self, e: self.naked_property(e),
338            exp.SetAgg: rename_func("COLLECT_SET"),
339            exp.Split: lambda self, e: f"SPLIT({self.sql(e, 'this')}, CONCAT('\\\\Q', {self.sql(e, 'expression')}))",
340            exp.StrPosition: strposition_to_locate_sql,
341            exp.StrToDate: _str_to_date_sql,
342            exp.StrToTime: _str_to_time_sql,
343            exp.StrToUnix: _str_to_unix_sql,
344            exp.StructExtract: struct_extract_sql,
345            exp.TimeStrToDate: rename_func("TO_DATE"),
346            exp.TimeStrToTime: timestrtotime_sql,
347            exp.TimeStrToUnix: rename_func("UNIX_TIMESTAMP"),
348            exp.TimeToStr: _time_to_str,
349            exp.TimeToUnix: rename_func("UNIX_TIMESTAMP"),
350            exp.ToBase64: rename_func("BASE64"),
351            exp.TsOrDiToDi: lambda self, e: f"CAST(SUBSTR(REPLACE(CAST({self.sql(e, 'this')} AS STRING), '-', ''), 1, 8) AS INT)",
352            exp.TsOrDsAdd: lambda self, e: f"DATE_ADD({self.sql(e, 'this')}, {self.sql(e, 'expression')})",
353            exp.TsOrDsToDate: _to_date_sql,
354            exp.TryCast: no_trycast_sql,
355            exp.UnixToStr: lambda self, e: self.func(
356                "FROM_UNIXTIME", e.this, _time_format(self, e)
357            ),
358            exp.UnixToTime: rename_func("FROM_UNIXTIME"),
359            exp.UnixToTimeStr: rename_func("FROM_UNIXTIME"),
360            exp.PartitionedByProperty: lambda self, e: f"PARTITIONED BY {self.sql(e, 'this')}",
361            exp.RowFormatSerdeProperty: lambda self, e: f"ROW FORMAT SERDE {self.sql(e, 'this')}",
362            exp.SerdeProperties: lambda self, e: self.properties(e, prefix="WITH SERDEPROPERTIES"),
363            exp.NumberToStr: rename_func("FORMAT_NUMBER"),
364            exp.LastDateOfMonth: rename_func("LAST_DAY"),
365            exp.National: lambda self, e: self.national_sql(e, prefix=""),
366        }
367
368        PROPERTIES_LOCATION = {
369            **generator.Generator.PROPERTIES_LOCATION,
370            exp.FileFormatProperty: exp.Properties.Location.POST_SCHEMA,
371            exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA,
372            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
373        }
374
375        def arrayagg_sql(self, expression: exp.ArrayAgg) -> str:
376            return self.func(
377                "COLLECT_LIST",
378                expression.this.this if isinstance(expression.this, exp.Order) else expression.this,
379            )
380
381        def with_properties(self, properties: exp.Properties) -> str:
382            return self.properties(
383                properties,
384                prefix=self.seg("TBLPROPERTIES"),
385            )
386
387        def datatype_sql(self, expression: exp.DataType) -> str:
388            if (
389                expression.this in (exp.DataType.Type.VARCHAR, exp.DataType.Type.NVARCHAR)
390                and not expression.expressions
391            ):
392                expression = exp.DataType.build("text")
393            elif expression.this in exp.DataType.TEMPORAL_TYPES:
394                expression = exp.DataType.build(expression.this)
395
396            return super().datatype_sql(expression)
397
398        def after_having_modifiers(self, expression: exp.Expression) -> t.List[str]:
399            return super().after_having_modifiers(expression) + [
400                self.sql(expression, "distribute"),
401                self.sql(expression, "sort"),
402                self.sql(expression, "cluster"),
403            ]
class Hive.Tokenizer(sqlglot.tokens.Tokenizer):
189    class Tokenizer(tokens.Tokenizer):
190        QUOTES = ["'", '"']
191        IDENTIFIERS = ["`"]
192        STRING_ESCAPES = ["\\"]
193        ENCODE = "utf-8"
194        IDENTIFIER_CAN_START_WITH_DIGIT = True
195
196        KEYWORDS = {
197            **tokens.Tokenizer.KEYWORDS,
198            "ADD ARCHIVE": TokenType.COMMAND,
199            "ADD ARCHIVES": TokenType.COMMAND,
200            "ADD FILE": TokenType.COMMAND,
201            "ADD FILES": TokenType.COMMAND,
202            "ADD JAR": TokenType.COMMAND,
203            "ADD JARS": TokenType.COMMAND,
204            "MSCK REPAIR": TokenType.COMMAND,
205            "WITH SERDEPROPERTIES": TokenType.SERDE_PROPERTIES,
206        }
207
208        NUMERIC_LITERALS = {
209            "L": "BIGINT",
210            "S": "SMALLINT",
211            "Y": "TINYINT",
212            "D": "DOUBLE",
213            "F": "FLOAT",
214            "BD": "DECIMAL",
215        }
class Hive.Parser(sqlglot.parser.Parser):
217    class Parser(parser.Parser):
218        LOG_DEFAULTS_TO_LN = True
219        STRICT_CAST = False
220
221        FUNCTIONS = {
222            **parser.Parser.FUNCTIONS,
223            "BASE64": exp.ToBase64.from_arg_list,
224            "COLLECT_LIST": exp.ArrayAgg.from_arg_list,
225            "DATE_ADD": lambda args: exp.TsOrDsAdd(
226                this=seq_get(args, 0),
227                expression=seq_get(args, 1),
228                unit=exp.Literal.string("DAY"),
229            ),
230            "DATEDIFF": lambda args: exp.DateDiff(
231                this=exp.TsOrDsToDate(this=seq_get(args, 0)),
232                expression=exp.TsOrDsToDate(this=seq_get(args, 1)),
233            ),
234            "DATE_SUB": lambda args: exp.TsOrDsAdd(
235                this=seq_get(args, 0),
236                expression=exp.Mul(
237                    this=seq_get(args, 1),
238                    expression=exp.Literal.number(-1),
239                ),
240                unit=exp.Literal.string("DAY"),
241            ),
242            "DATE_FORMAT": lambda args: format_time_lambda(exp.TimeToStr, "hive")(
243                [
244                    exp.TimeStrToTime(this=seq_get(args, 0)),
245                    seq_get(args, 1),
246                ]
247            ),
248            "DAY": lambda args: exp.Day(this=exp.TsOrDsToDate(this=seq_get(args, 0))),
249            "FROM_UNIXTIME": format_time_lambda(exp.UnixToStr, "hive", True),
250            "GET_JSON_OBJECT": exp.JSONExtractScalar.from_arg_list,
251            "LOCATE": locate_to_strposition,
252            "MAP": parse_var_map,
253            "MONTH": lambda args: exp.Month(this=exp.TsOrDsToDate.from_arg_list(args)),
254            "PERCENTILE": exp.Quantile.from_arg_list,
255            "PERCENTILE_APPROX": exp.ApproxQuantile.from_arg_list,
256            "COLLECT_SET": exp.SetAgg.from_arg_list,
257            "SIZE": exp.ArraySize.from_arg_list,
258            "SPLIT": exp.RegexpSplit.from_arg_list,
259            "TO_DATE": format_time_lambda(exp.TsOrDsToDate, "hive"),
260            "TO_JSON": exp.JSONFormat.from_arg_list,
261            "UNBASE64": exp.FromBase64.from_arg_list,
262            "UNIX_TIMESTAMP": format_time_lambda(exp.StrToUnix, "hive", True),
263            "YEAR": lambda args: exp.Year(this=exp.TsOrDsToDate.from_arg_list(args)),
264        }
265
266        PROPERTY_PARSERS = {
267            **parser.Parser.PROPERTY_PARSERS,
268            "WITH SERDEPROPERTIES": lambda self: exp.SerdeProperties(
269                expressions=self._parse_wrapped_csv(self._parse_property)
270            ),
271        }
272
273        QUERY_MODIFIER_PARSERS = {
274            **parser.Parser.QUERY_MODIFIER_PARSERS,
275            "distribute": lambda self: self._parse_sort(exp.Distribute, "DISTRIBUTE", "BY"),
276            "sort": lambda self: self._parse_sort(exp.Sort, "SORT", "BY"),
277            "cluster": lambda self: self._parse_sort(exp.Cluster, "CLUSTER", "BY"),
278        }

Parser consumes a list of tokens produced by the sqlglot.tokens.Tokenizer and produces a parsed syntax tree.

Arguments:
  • error_level: the desired error level. Default: ErrorLevel.RAISE
  • error_message_context: determines the amount of context to capture from a query string when displaying the error message (in number of characters). Default: 50.
  • index_offset: Index offset for arrays eg ARRAY[0] vs ARRAY[1] as the head of a list. Default: 0
  • alias_post_tablesample: If the table alias comes after tablesample. Default: False
  • max_errors: Maximum number of error messages to include in a raised ParseError. This is only relevant if error_level is ErrorLevel.RAISE. Default: 3
  • null_ordering: Indicates the default null ordering method to use if not explicitly set. Options are "nulls_are_small", "nulls_are_large", "nulls_are_last". Default: "nulls_are_small"
class Hive.Generator(sqlglot.generator.Generator):
280    class Generator(generator.Generator):
281        LIMIT_FETCH = "LIMIT"
282        TABLESAMPLE_WITH_METHOD = False
283        TABLESAMPLE_SIZE_IS_PERCENT = True
284        JOIN_HINTS = False
285        TABLE_HINTS = False
286        INDEX_ON = "ON TABLE"
287
288        TYPE_MAPPING = {
289            **generator.Generator.TYPE_MAPPING,
290            exp.DataType.Type.TEXT: "STRING",
291            exp.DataType.Type.DATETIME: "TIMESTAMP",
292            exp.DataType.Type.VARBINARY: "BINARY",
293            exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP",
294            exp.DataType.Type.BIT: "BOOLEAN",
295        }
296
297        TRANSFORMS = {
298            **generator.Generator.TRANSFORMS,
299            exp.Group: transforms.preprocess([transforms.unalias_group]),
300            exp.Select: transforms.preprocess(
301                [
302                    transforms.eliminate_qualify,
303                    transforms.eliminate_distinct_on,
304                    transforms.unnest_to_explode,
305                ]
306            ),
307            exp.Property: _property_sql,
308            exp.ApproxDistinct: approx_count_distinct_sql,
309            exp.ArrayConcat: rename_func("CONCAT"),
310            exp.ArrayJoin: lambda self, e: self.func("CONCAT_WS", e.expression, e.this),
311            exp.ArraySize: rename_func("SIZE"),
312            exp.ArraySort: _array_sort_sql,
313            exp.With: no_recursive_cte_sql,
314            exp.DateAdd: _add_date_sql,
315            exp.DateDiff: _date_diff_sql,
316            exp.DateStrToDate: rename_func("TO_DATE"),
317            exp.DateSub: _add_date_sql,
318            exp.DateToDi: lambda self, e: f"CAST(DATE_FORMAT({self.sql(e, 'this')}, {Hive.dateint_format}) AS INT)",
319            exp.DiToDate: lambda self, e: f"TO_DATE(CAST({self.sql(e, 'this')} AS STRING), {Hive.dateint_format})",
320            exp.FileFormatProperty: lambda self, e: f"STORED AS {self.sql(e, 'this') if isinstance(e.this, exp.InputOutputFormat) else e.name.upper()}",
321            exp.FromBase64: rename_func("UNBASE64"),
322            exp.If: if_sql,
323            exp.ILike: no_ilike_sql,
324            exp.JSONExtract: rename_func("GET_JSON_OBJECT"),
325            exp.JSONExtractScalar: rename_func("GET_JSON_OBJECT"),
326            exp.JSONFormat: _json_format_sql,
327            exp.Map: var_map_sql,
328            exp.Max: max_or_greatest,
329            exp.Min: min_or_least,
330            exp.VarMap: var_map_sql,
331            exp.Create: create_with_partitions_sql,
332            exp.Quantile: rename_func("PERCENTILE"),
333            exp.ApproxQuantile: rename_func("PERCENTILE_APPROX"),
334            exp.RegexpLike: lambda self, e: self.binary(e, "RLIKE"),
335            exp.RegexpSplit: rename_func("SPLIT"),
336            exp.SafeDivide: no_safe_divide_sql,
337            exp.SchemaCommentProperty: lambda self, e: self.naked_property(e),
338            exp.SetAgg: rename_func("COLLECT_SET"),
339            exp.Split: lambda self, e: f"SPLIT({self.sql(e, 'this')}, CONCAT('\\\\Q', {self.sql(e, 'expression')}))",
340            exp.StrPosition: strposition_to_locate_sql,
341            exp.StrToDate: _str_to_date_sql,
342            exp.StrToTime: _str_to_time_sql,
343            exp.StrToUnix: _str_to_unix_sql,
344            exp.StructExtract: struct_extract_sql,
345            exp.TimeStrToDate: rename_func("TO_DATE"),
346            exp.TimeStrToTime: timestrtotime_sql,
347            exp.TimeStrToUnix: rename_func("UNIX_TIMESTAMP"),
348            exp.TimeToStr: _time_to_str,
349            exp.TimeToUnix: rename_func("UNIX_TIMESTAMP"),
350            exp.ToBase64: rename_func("BASE64"),
351            exp.TsOrDiToDi: lambda self, e: f"CAST(SUBSTR(REPLACE(CAST({self.sql(e, 'this')} AS STRING), '-', ''), 1, 8) AS INT)",
352            exp.TsOrDsAdd: lambda self, e: f"DATE_ADD({self.sql(e, 'this')}, {self.sql(e, 'expression')})",
353            exp.TsOrDsToDate: _to_date_sql,
354            exp.TryCast: no_trycast_sql,
355            exp.UnixToStr: lambda self, e: self.func(
356                "FROM_UNIXTIME", e.this, _time_format(self, e)
357            ),
358            exp.UnixToTime: rename_func("FROM_UNIXTIME"),
359            exp.UnixToTimeStr: rename_func("FROM_UNIXTIME"),
360            exp.PartitionedByProperty: lambda self, e: f"PARTITIONED BY {self.sql(e, 'this')}",
361            exp.RowFormatSerdeProperty: lambda self, e: f"ROW FORMAT SERDE {self.sql(e, 'this')}",
362            exp.SerdeProperties: lambda self, e: self.properties(e, prefix="WITH SERDEPROPERTIES"),
363            exp.NumberToStr: rename_func("FORMAT_NUMBER"),
364            exp.LastDateOfMonth: rename_func("LAST_DAY"),
365            exp.National: lambda self, e: self.national_sql(e, prefix=""),
366        }
367
368        PROPERTIES_LOCATION = {
369            **generator.Generator.PROPERTIES_LOCATION,
370            exp.FileFormatProperty: exp.Properties.Location.POST_SCHEMA,
371            exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA,
372            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
373        }
374
375        def arrayagg_sql(self, expression: exp.ArrayAgg) -> str:
376            return self.func(
377                "COLLECT_LIST",
378                expression.this.this if isinstance(expression.this, exp.Order) else expression.this,
379            )
380
381        def with_properties(self, properties: exp.Properties) -> str:
382            return self.properties(
383                properties,
384                prefix=self.seg("TBLPROPERTIES"),
385            )
386
387        def datatype_sql(self, expression: exp.DataType) -> str:
388            if (
389                expression.this in (exp.DataType.Type.VARCHAR, exp.DataType.Type.NVARCHAR)
390                and not expression.expressions
391            ):
392                expression = exp.DataType.build("text")
393            elif expression.this in exp.DataType.TEMPORAL_TYPES:
394                expression = exp.DataType.build(expression.this)
395
396            return super().datatype_sql(expression)
397
398        def after_having_modifiers(self, expression: exp.Expression) -> t.List[str]:
399            return super().after_having_modifiers(expression) + [
400                self.sql(expression, "distribute"),
401                self.sql(expression, "sort"),
402                self.sql(expression, "cluster"),
403            ]

Generator interprets the given syntax tree and produces a SQL string as an output.

Arguments:
  • time_mapping (dict): the dictionary of custom time mappings in which the key represents a python time format and the output the target time format
  • time_trie (trie): a trie of the time_mapping keys
  • pretty (bool): if set to True the returned string will be formatted. Default: False.
  • quote_start (str): specifies which starting character to use to delimit quotes. Default: '.
  • quote_end (str): specifies which ending character to use to delimit quotes. Default: '.
  • identifier_start (str): specifies which starting character to use to delimit identifiers. Default: ".
  • identifier_end (str): specifies which ending character to use to delimit identifiers. Default: ".
  • bit_start (str): specifies which starting character to use to delimit bit literals. Default: None.
  • bit_end (str): specifies which ending character to use to delimit bit literals. Default: None.
  • hex_start (str): specifies which starting character to use to delimit hex literals. Default: None.
  • hex_end (str): specifies which ending character to use to delimit hex literals. Default: None.
  • byte_start (str): specifies which starting character to use to delimit byte literals. Default: None.
  • byte_end (str): specifies which ending character to use to delimit byte literals. Default: None.
  • raw_start (str): specifies which starting character to use to delimit raw literals. Default: None.
  • raw_end (str): specifies which ending character to use to delimit raw literals. Default: None.
  • identify (bool | str): 'always': always quote, 'safe': quote identifiers if they don't contain an upcase, True defaults to always.
  • normalize (bool): if set to True all identifiers will lower cased
  • string_escape (str): specifies a string escape character. Default: '.
  • identifier_escape (str): specifies an identifier escape character. Default: ".
  • pad (int): determines padding in a formatted string. Default: 2.
  • indent (int): determines the size of indentation in a formatted string. Default: 4.
  • unnest_column_only (bool): if true unnest table aliases are considered only as column aliases
  • normalize_functions (str): normalize function names, "upper", "lower", or None Default: "upper"
  • alias_post_tablesample (bool): if the table alias comes after tablesample Default: False
  • unsupported_level (ErrorLevel): determines the generator's behavior when it encounters unsupported expressions. Default ErrorLevel.WARN.
  • null_ordering (str): Indicates the default null ordering method to use if not explicitly set. Options are "nulls_are_small", "nulls_are_large", "nulls_are_last". Default: "nulls_are_small"
  • max_unsupported (int): Maximum number of unsupported messages to include in a raised UnsupportedError. This is only relevant if unsupported_level is ErrorLevel.RAISE. Default: 3
  • leading_comma (bool): if the the comma is leading or trailing in select statements Default: False
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode. The default is on the smaller end because the length only represents a segment and not the true line length. Default: 80
  • comments: Whether or not to preserve comments in the output SQL code. Default: True
def arrayagg_sql(self, expression: sqlglot.expressions.ArrayAgg) -> str:
375        def arrayagg_sql(self, expression: exp.ArrayAgg) -> str:
376            return self.func(
377                "COLLECT_LIST",
378                expression.this.this if isinstance(expression.this, exp.Order) else expression.this,
379            )
def with_properties(self, properties: sqlglot.expressions.Properties) -> str:
381        def with_properties(self, properties: exp.Properties) -> str:
382            return self.properties(
383                properties,
384                prefix=self.seg("TBLPROPERTIES"),
385            )
def datatype_sql(self, expression: sqlglot.expressions.DataType) -> str:
387        def datatype_sql(self, expression: exp.DataType) -> str:
388            if (
389                expression.this in (exp.DataType.Type.VARCHAR, exp.DataType.Type.NVARCHAR)
390                and not expression.expressions
391            ):
392                expression = exp.DataType.build("text")
393            elif expression.this in exp.DataType.TEMPORAL_TYPES:
394                expression = exp.DataType.build(expression.this)
395
396            return super().datatype_sql(expression)
def after_having_modifiers(self, expression: sqlglot.expressions.Expression) -> List[str]:
398        def after_having_modifiers(self, expression: exp.Expression) -> t.List[str]:
399            return super().after_having_modifiers(expression) + [
400                self.sql(expression, "distribute"),
401                self.sql(expression, "sort"),
402                self.sql(expression, "cluster"),
403            ]
Inherited Members
sqlglot.generator.Generator
Generator
generate
unsupported
sep
seg
pad_comment
maybe_comment
wrap
no_identify
normalize_func
indent
sql
uncache_sql
cache_sql
characterset_sql
column_sql
columnposition_sql
columndef_sql
columnconstraint_sql
autoincrementcolumnconstraint_sql
compresscolumnconstraint_sql
generatedasidentitycolumnconstraint_sql
notnullcolumnconstraint_sql
primarykeycolumnconstraint_sql
uniquecolumnconstraint_sql
create_sql
clone_sql
describe_sql
prepend_ctes
with_sql
cte_sql
tablealias_sql
bitstring_sql
hexstring_sql
bytestring_sql
rawstring_sql
datatypesize_sql
directory_sql
delete_sql
drop_sql
except_sql
except_op
fetch_sql
filter_sql
hint_sql
index_sql
identifier_sql
inputoutputformat_sql
national_sql
partition_sql
properties_sql
root_properties
properties
locate_properties
property_sql
likeproperty_sql
fallbackproperty_sql
journalproperty_sql
freespaceproperty_sql
checksumproperty_sql
mergeblockratioproperty_sql
datablocksizeproperty_sql
blockcompressionproperty_sql
isolatedloadingproperty_sql
lockingproperty_sql
withdataproperty_sql
insert_sql
intersect_sql
intersect_op
introducer_sql
pseudotype_sql
onconflict_sql
returning_sql
rowformatdelimitedproperty_sql
table_sql
tablesample_sql
pivot_sql
tuple_sql
update_sql
values_sql
var_sql
into_sql
from_sql
group_sql
having_sql
join_sql
lambda_sql
lateral_sql
limit_sql
offset_sql
setitem_sql
set_sql
pragma_sql
lock_sql
literal_sql
loaddata_sql
null_sql
boolean_sql
order_sql
cluster_sql
distribute_sql
sort_sql
ordered_sql
matchrecognize_sql
query_modifiers
after_limit_modifiers
select_sql
schema_sql
star_sql
parameter_sql
sessionparameter_sql
placeholder_sql
subquery_sql
qualify_sql
union_sql
union_op
unnest_sql
where_sql
window_sql
partition_by_sql
windowspec_sql
withingroup_sql
between_sql
bracket_sql
all_sql
any_sql
exists_sql
case_sql
constraint_sql
nextvaluefor_sql
extract_sql
trim_sql
concat_sql
check_sql
foreignkey_sql
primarykey_sql
unique_sql
if_sql
matchagainst_sql
jsonkeyvalue_sql
jsonobject_sql
openjsoncolumndef_sql
openjson_sql
in_sql
in_unnest_op
interval_sql
return_sql
reference_sql
anonymous_sql
paren_sql
neg_sql
not_sql
alias_sql
aliases_sql
attimezone_sql
add_sql
and_sql
connector_sql
bitwiseand_sql
bitwiseleftshift_sql
bitwisenot_sql
bitwiseor_sql
bitwiserightshift_sql
bitwisexor_sql
cast_sql
currentdate_sql
collate_sql
command_sql
comment_sql
mergetreettlaction_sql
mergetreettl_sql
transaction_sql
commit_sql
rollback_sql
altercolumn_sql
renametable_sql
altertable_sql
droppartition_sql
addconstraint_sql
distinct_sql
ignorenulls_sql
respectnulls_sql
intdiv_sql
dpipe_sql
div_sql
overlaps_sql
distance_sql
dot_sql
eq_sql
escape_sql
glob_sql
gt_sql
gte_sql
ilike_sql
ilikeany_sql
is_sql
like_sql
likeany_sql
similarto_sql
lt_sql
lte_sql
mod_sql
mul_sql
neq_sql
nullsafeeq_sql
nullsafeneq_sql
or_sql
slice_sql
sub_sql
trycast_sql
use_sql
binary
function_fallback_sql
func
format_args
text_width
format_time
expressions
op_expressions
naked_property
set_operation
tag_sql
token_sql
userdefinedfunction_sql
joinhint_sql
kwarg_sql
when_sql
merge_sql
tochar_sql