Edit on GitHub

sqlglot.dialects.hive

  1from __future__ import annotations
  2
  3import typing as t
  4
  5from sqlglot import exp, generator, parser, tokens, transforms
  6from sqlglot.dialects.dialect import (
  7    DATE_ADD_OR_SUB,
  8    Dialect,
  9    NormalizationStrategy,
 10    approx_count_distinct_sql,
 11    arg_max_or_min_no_count,
 12    create_with_partitions_sql,
 13    datestrtodate_sql,
 14    format_time_lambda,
 15    if_sql,
 16    is_parse_json,
 17    left_to_substring_sql,
 18    locate_to_strposition,
 19    max_or_greatest,
 20    min_or_least,
 21    no_ilike_sql,
 22    no_recursive_cte_sql,
 23    no_safe_divide_sql,
 24    no_trycast_sql,
 25    regexp_extract_sql,
 26    regexp_replace_sql,
 27    rename_func,
 28    right_to_substring_sql,
 29    strposition_to_locate_sql,
 30    struct_extract_sql,
 31    time_format,
 32    timestrtotime_sql,
 33    var_map_sql,
 34)
 35from sqlglot.helper import seq_get
 36from sqlglot.parser import parse_var_map
 37from sqlglot.tokens import TokenType
 38
 39# (FuncType, Multiplier)
 40DATE_DELTA_INTERVAL = {
 41    "YEAR": ("ADD_MONTHS", 12),
 42    "MONTH": ("ADD_MONTHS", 1),
 43    "QUARTER": ("ADD_MONTHS", 3),
 44    "WEEK": ("DATE_ADD", 7),
 45    "DAY": ("DATE_ADD", 1),
 46}
 47
 48TIME_DIFF_FACTOR = {
 49    "MILLISECOND": " * 1000",
 50    "SECOND": "",
 51    "MINUTE": " / 60",
 52    "HOUR": " / 3600",
 53}
 54
 55DIFF_MONTH_SWITCH = ("YEAR", "QUARTER", "MONTH")
 56
 57
 58def _create_sql(self, expression: exp.Create) -> str:
 59    # remove UNIQUE column constraints
 60    for constraint in expression.find_all(exp.UniqueColumnConstraint):
 61        if constraint.parent:
 62            constraint.parent.pop()
 63
 64    properties = expression.args.get("properties")
 65    temporary = any(
 66        isinstance(prop, exp.TemporaryProperty)
 67        for prop in (properties.expressions if properties else [])
 68    )
 69
 70    # CTAS with temp tables map to CREATE TEMPORARY VIEW
 71    kind = expression.args["kind"]
 72    if kind.upper() == "TABLE" and temporary:
 73        if expression.expression:
 74            return f"CREATE TEMPORARY VIEW {self.sql(expression, 'this')} AS {self.sql(expression, 'expression')}"
 75        else:
 76            # CREATE TEMPORARY TABLE may require storage provider
 77            expression = self.temporary_storage_provider(expression)
 78
 79    return create_with_partitions_sql(self, expression)
 80
 81
 82def _add_date_sql(self: Hive.Generator, expression: DATE_ADD_OR_SUB) -> str:
 83    if isinstance(expression, exp.TsOrDsAdd) and not expression.unit:
 84        return self.func("DATE_ADD", expression.this, expression.expression)
 85
 86    unit = expression.text("unit").upper()
 87    func, multiplier = DATE_DELTA_INTERVAL.get(unit, ("DATE_ADD", 1))
 88
 89    if isinstance(expression, exp.DateSub):
 90        multiplier *= -1
 91
 92    if expression.expression.is_number:
 93        modified_increment = exp.Literal.number(int(expression.text("expression")) * multiplier)
 94    else:
 95        modified_increment = expression.expression
 96        if multiplier != 1:
 97            modified_increment = exp.Mul(  # type: ignore
 98                this=modified_increment, expression=exp.Literal.number(multiplier)
 99            )
100
101    return self.func(func, expression.this, modified_increment)
102
103
104def _date_diff_sql(self: Hive.Generator, expression: exp.DateDiff | exp.TsOrDsDiff) -> str:
105    unit = expression.text("unit").upper()
106
107    factor = TIME_DIFF_FACTOR.get(unit)
108    if factor is not None:
109        left = self.sql(expression, "this")
110        right = self.sql(expression, "expression")
111        sec_diff = f"UNIX_TIMESTAMP({left}) - UNIX_TIMESTAMP({right})"
112        return f"({sec_diff}){factor}" if factor else sec_diff
113
114    months_between = unit in DIFF_MONTH_SWITCH
115    sql_func = "MONTHS_BETWEEN" if months_between else "DATEDIFF"
116    _, multiplier = DATE_DELTA_INTERVAL.get(unit, ("", 1))
117    multiplier_sql = f" / {multiplier}" if multiplier > 1 else ""
118    diff_sql = f"{sql_func}({self.format_args(expression.this, expression.expression)})"
119
120    if months_between or multiplier_sql:
121        # MONTHS_BETWEEN returns a float, so we need to truncate the fractional part.
122        # For the same reason, we want to truncate if there's a divisor present.
123        diff_sql = f"CAST({diff_sql}{multiplier_sql} AS INT)"
124
125    return diff_sql
126
127
128def _json_format_sql(self: Hive.Generator, expression: exp.JSONFormat) -> str:
129    this = expression.this
130
131    if is_parse_json(this):
132        if this.this.is_string:
133            # Since FROM_JSON requires a nested type, we always wrap the json string with
134            # an array to ensure that "naked" strings like "'a'" will be handled correctly
135            wrapped_json = exp.Literal.string(f"[{this.this.name}]")
136
137            from_json = self.func(
138                "FROM_JSON", wrapped_json, self.func("SCHEMA_OF_JSON", wrapped_json)
139            )
140            to_json = self.func("TO_JSON", from_json)
141
142            # This strips the [, ] delimiters of the dummy array printed by TO_JSON
143            return self.func("REGEXP_EXTRACT", to_json, "'^.(.*).$'", "1")
144        return self.sql(this)
145
146    return self.func("TO_JSON", this, expression.args.get("options"))
147
148
149def _array_sort_sql(self: Hive.Generator, expression: exp.ArraySort) -> str:
150    if expression.expression:
151        self.unsupported("Hive SORT_ARRAY does not support a comparator")
152    return f"SORT_ARRAY({self.sql(expression, 'this')})"
153
154
155def _property_sql(self: Hive.Generator, expression: exp.Property) -> str:
156    return f"{self.property_name(expression, string_key=True)}={self.sql(expression, 'value')}"
157
158
159def _str_to_unix_sql(self: Hive.Generator, expression: exp.StrToUnix) -> str:
160    return self.func("UNIX_TIMESTAMP", expression.this, time_format("hive")(self, expression))
161
162
163def _str_to_date_sql(self: Hive.Generator, expression: exp.StrToDate) -> str:
164    this = self.sql(expression, "this")
165    time_format = self.format_time(expression)
166    if time_format not in (Hive.TIME_FORMAT, Hive.DATE_FORMAT):
167        this = f"FROM_UNIXTIME(UNIX_TIMESTAMP({this}, {time_format}))"
168    return f"CAST({this} AS DATE)"
169
170
171def _str_to_time_sql(self: Hive.Generator, expression: exp.StrToTime) -> str:
172    this = self.sql(expression, "this")
173    time_format = self.format_time(expression)
174    if time_format not in (Hive.TIME_FORMAT, Hive.DATE_FORMAT):
175        this = f"FROM_UNIXTIME(UNIX_TIMESTAMP({this}, {time_format}))"
176    return f"CAST({this} AS TIMESTAMP)"
177
178
179def _time_to_str(self: Hive.Generator, expression: exp.TimeToStr) -> str:
180    this = self.sql(expression, "this")
181    time_format = self.format_time(expression)
182    return f"DATE_FORMAT({this}, {time_format})"
183
184
185def _to_date_sql(self: Hive.Generator, expression: exp.TsOrDsToDate) -> str:
186    this = self.sql(expression, "this")
187    time_format = self.format_time(expression)
188    if time_format and time_format not in (Hive.TIME_FORMAT, Hive.DATE_FORMAT):
189        return f"TO_DATE({this}, {time_format})"
190    if isinstance(expression.this, exp.TsOrDsToDate):
191        return this
192    return f"TO_DATE({this})"
193
194
195class Hive(Dialect):
196    ALIAS_POST_TABLESAMPLE = True
197    IDENTIFIERS_CAN_START_WITH_DIGIT = True
198    SUPPORTS_USER_DEFINED_TYPES = False
199    SAFE_DIVISION = True
200
201    # https://spark.apache.org/docs/latest/sql-ref-identifier.html#description
202    NORMALIZATION_STRATEGY = NormalizationStrategy.CASE_INSENSITIVE
203
204    TIME_MAPPING = {
205        "y": "%Y",
206        "Y": "%Y",
207        "YYYY": "%Y",
208        "yyyy": "%Y",
209        "YY": "%y",
210        "yy": "%y",
211        "MMMM": "%B",
212        "MMM": "%b",
213        "MM": "%m",
214        "M": "%-m",
215        "dd": "%d",
216        "d": "%-d",
217        "HH": "%H",
218        "H": "%-H",
219        "hh": "%I",
220        "h": "%-I",
221        "mm": "%M",
222        "m": "%-M",
223        "ss": "%S",
224        "s": "%-S",
225        "SSSSSS": "%f",
226        "a": "%p",
227        "DD": "%j",
228        "D": "%-j",
229        "E": "%a",
230        "EE": "%a",
231        "EEE": "%a",
232        "EEEE": "%A",
233    }
234
235    DATE_FORMAT = "'yyyy-MM-dd'"
236    DATEINT_FORMAT = "'yyyyMMdd'"
237    TIME_FORMAT = "'yyyy-MM-dd HH:mm:ss'"
238
239    class Tokenizer(tokens.Tokenizer):
240        QUOTES = ["'", '"']
241        IDENTIFIERS = ["`"]
242        STRING_ESCAPES = ["\\"]
243        ENCODE = "utf-8"
244
245        SINGLE_TOKENS = {
246            **tokens.Tokenizer.SINGLE_TOKENS,
247            "$": TokenType.PARAMETER,
248        }
249
250        KEYWORDS = {
251            **tokens.Tokenizer.KEYWORDS,
252            "ADD ARCHIVE": TokenType.COMMAND,
253            "ADD ARCHIVES": TokenType.COMMAND,
254            "ADD FILE": TokenType.COMMAND,
255            "ADD FILES": TokenType.COMMAND,
256            "ADD JAR": TokenType.COMMAND,
257            "ADD JARS": TokenType.COMMAND,
258            "MSCK REPAIR": TokenType.COMMAND,
259            "REFRESH": TokenType.REFRESH,
260            "TIMESTAMP AS OF": TokenType.TIMESTAMP_SNAPSHOT,
261            "VERSION AS OF": TokenType.VERSION_SNAPSHOT,
262            "WITH SERDEPROPERTIES": TokenType.SERDE_PROPERTIES,
263        }
264
265        NUMERIC_LITERALS = {
266            "L": "BIGINT",
267            "S": "SMALLINT",
268            "Y": "TINYINT",
269            "D": "DOUBLE",
270            "F": "FLOAT",
271            "BD": "DECIMAL",
272        }
273
274    class Parser(parser.Parser):
275        LOG_DEFAULTS_TO_LN = True
276        STRICT_CAST = False
277
278        FUNCTIONS = {
279            **parser.Parser.FUNCTIONS,
280            "BASE64": exp.ToBase64.from_arg_list,
281            "COLLECT_LIST": exp.ArrayAgg.from_arg_list,
282            "COLLECT_SET": exp.ArrayUniqueAgg.from_arg_list,
283            "DATE_ADD": lambda args: exp.TsOrDsAdd(
284                this=seq_get(args, 0), expression=seq_get(args, 1), unit=exp.Literal.string("DAY")
285            ),
286            "DATE_FORMAT": lambda args: format_time_lambda(exp.TimeToStr, "hive")(
287                [
288                    exp.TimeStrToTime(this=seq_get(args, 0)),
289                    seq_get(args, 1),
290                ]
291            ),
292            "DATE_SUB": lambda args: exp.TsOrDsAdd(
293                this=seq_get(args, 0),
294                expression=exp.Mul(this=seq_get(args, 1), expression=exp.Literal.number(-1)),
295                unit=exp.Literal.string("DAY"),
296            ),
297            "DATEDIFF": lambda args: exp.DateDiff(
298                this=exp.TsOrDsToDate(this=seq_get(args, 0)),
299                expression=exp.TsOrDsToDate(this=seq_get(args, 1)),
300            ),
301            "DAY": lambda args: exp.Day(this=exp.TsOrDsToDate(this=seq_get(args, 0))),
302            "FROM_UNIXTIME": format_time_lambda(exp.UnixToStr, "hive", True),
303            "GET_JSON_OBJECT": exp.JSONExtractScalar.from_arg_list,
304            "LOCATE": locate_to_strposition,
305            "MAP": parse_var_map,
306            "MONTH": lambda args: exp.Month(this=exp.TsOrDsToDate.from_arg_list(args)),
307            "PERCENTILE": exp.Quantile.from_arg_list,
308            "PERCENTILE_APPROX": exp.ApproxQuantile.from_arg_list,
309            "REGEXP_EXTRACT": lambda args: exp.RegexpExtract(
310                this=seq_get(args, 0), expression=seq_get(args, 1), group=seq_get(args, 2)
311            ),
312            "SIZE": exp.ArraySize.from_arg_list,
313            "SPLIT": exp.RegexpSplit.from_arg_list,
314            "STR_TO_MAP": lambda args: exp.StrToMap(
315                this=seq_get(args, 0),
316                pair_delim=seq_get(args, 1) or exp.Literal.string(","),
317                key_value_delim=seq_get(args, 2) or exp.Literal.string(":"),
318            ),
319            "TO_DATE": format_time_lambda(exp.TsOrDsToDate, "hive"),
320            "TO_JSON": exp.JSONFormat.from_arg_list,
321            "UNBASE64": exp.FromBase64.from_arg_list,
322            "UNIX_TIMESTAMP": format_time_lambda(exp.StrToUnix, "hive", True),
323            "YEAR": lambda args: exp.Year(this=exp.TsOrDsToDate.from_arg_list(args)),
324        }
325
326        NO_PAREN_FUNCTION_PARSERS = {
327            **parser.Parser.NO_PAREN_FUNCTION_PARSERS,
328            "TRANSFORM": lambda self: self._parse_transform(),
329        }
330
331        PROPERTY_PARSERS = {
332            **parser.Parser.PROPERTY_PARSERS,
333            "WITH SERDEPROPERTIES": lambda self: exp.SerdeProperties(
334                expressions=self._parse_wrapped_csv(self._parse_property)
335            ),
336        }
337
338        def _parse_transform(self) -> t.Optional[exp.Transform | exp.QueryTransform]:
339            if not self._match(TokenType.L_PAREN, advance=False):
340                self._retreat(self._index - 1)
341                return None
342
343            args = self._parse_wrapped_csv(self._parse_lambda)
344            row_format_before = self._parse_row_format(match_row=True)
345
346            record_writer = None
347            if self._match_text_seq("RECORDWRITER"):
348                record_writer = self._parse_string()
349
350            if not self._match(TokenType.USING):
351                return exp.Transform.from_arg_list(args)
352
353            command_script = self._parse_string()
354
355            self._match(TokenType.ALIAS)
356            schema = self._parse_schema()
357
358            row_format_after = self._parse_row_format(match_row=True)
359            record_reader = None
360            if self._match_text_seq("RECORDREADER"):
361                record_reader = self._parse_string()
362
363            return self.expression(
364                exp.QueryTransform,
365                expressions=args,
366                command_script=command_script,
367                schema=schema,
368                row_format_before=row_format_before,
369                record_writer=record_writer,
370                row_format_after=row_format_after,
371                record_reader=record_reader,
372            )
373
374        def _parse_types(
375            self, check_func: bool = False, schema: bool = False, allow_identifiers: bool = True
376        ) -> t.Optional[exp.Expression]:
377            """
378            Spark (and most likely Hive) treats casts to CHAR(length) and VARCHAR(length) as casts to
379            STRING in all contexts except for schema definitions. For example, this is in Spark v3.4.0:
380
381                spark-sql (default)> select cast(1234 as varchar(2));
382                23/06/06 15:51:18 WARN CharVarcharUtils: The Spark cast operator does not support
383                char/varchar type and simply treats them as string type. Please use string type
384                directly to avoid confusion. Otherwise, you can set spark.sql.legacy.charVarcharAsString
385                to true, so that Spark treat them as string type as same as Spark 3.0 and earlier
386
387                1234
388                Time taken: 4.265 seconds, Fetched 1 row(s)
389
390            This shows that Spark doesn't truncate the value into '12', which is inconsistent with
391            what other dialects (e.g. postgres) do, so we need to drop the length to transpile correctly.
392
393            Reference: https://spark.apache.org/docs/latest/sql-ref-datatypes.html
394            """
395            this = super()._parse_types(
396                check_func=check_func, schema=schema, allow_identifiers=allow_identifiers
397            )
398
399            if this and not schema:
400                return this.transform(
401                    lambda node: node.replace(exp.DataType.build("text"))
402                    if isinstance(node, exp.DataType) and node.is_type("char", "varchar")
403                    else node,
404                    copy=False,
405                )
406
407            return this
408
409        def _parse_partition_and_order(
410            self,
411        ) -> t.Tuple[t.List[exp.Expression], t.Optional[exp.Expression]]:
412            return (
413                self._parse_csv(self._parse_conjunction)
414                if self._match_set({TokenType.PARTITION_BY, TokenType.DISTRIBUTE_BY})
415                else [],
416                super()._parse_order(skip_order_token=self._match(TokenType.SORT_BY)),
417            )
418
419    class Generator(generator.Generator):
420        LIMIT_FETCH = "LIMIT"
421        TABLESAMPLE_WITH_METHOD = False
422        TABLESAMPLE_SIZE_IS_PERCENT = True
423        JOIN_HINTS = False
424        TABLE_HINTS = False
425        QUERY_HINTS = False
426        INDEX_ON = "ON TABLE"
427        EXTRACT_ALLOWS_QUOTES = False
428        NVL2_SUPPORTED = False
429
430        EXPRESSIONS_WITHOUT_NESTED_CTES = {
431            exp.Insert,
432            exp.Select,
433            exp.Subquery,
434            exp.Union,
435        }
436
437        TYPE_MAPPING = {
438            **generator.Generator.TYPE_MAPPING,
439            exp.DataType.Type.BIT: "BOOLEAN",
440            exp.DataType.Type.DATETIME: "TIMESTAMP",
441            exp.DataType.Type.TEXT: "STRING",
442            exp.DataType.Type.TIME: "TIMESTAMP",
443            exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP",
444            exp.DataType.Type.VARBINARY: "BINARY",
445        }
446
447        TRANSFORMS = {
448            **generator.Generator.TRANSFORMS,
449            exp.Group: transforms.preprocess([transforms.unalias_group]),
450            exp.Select: transforms.preprocess(
451                [
452                    transforms.eliminate_qualify,
453                    transforms.eliminate_distinct_on,
454                    transforms.unnest_to_explode,
455                ]
456            ),
457            exp.Property: _property_sql,
458            exp.AnyValue: rename_func("FIRST"),
459            exp.ApproxDistinct: approx_count_distinct_sql,
460            exp.ArgMax: arg_max_or_min_no_count("MAX_BY"),
461            exp.ArgMin: arg_max_or_min_no_count("MIN_BY"),
462            exp.ArrayConcat: rename_func("CONCAT"),
463            exp.ArrayJoin: lambda self, e: self.func("CONCAT_WS", e.expression, e.this),
464            exp.ArraySize: rename_func("SIZE"),
465            exp.ArraySort: _array_sort_sql,
466            exp.With: no_recursive_cte_sql,
467            exp.DateAdd: _add_date_sql,
468            exp.DateDiff: _date_diff_sql,
469            exp.DateStrToDate: datestrtodate_sql,
470            exp.DateSub: _add_date_sql,
471            exp.DateToDi: lambda self, e: f"CAST(DATE_FORMAT({self.sql(e, 'this')}, {Hive.DATEINT_FORMAT}) AS INT)",
472            exp.DiToDate: lambda self, e: f"TO_DATE(CAST({self.sql(e, 'this')} AS STRING), {Hive.DATEINT_FORMAT})",
473            exp.FileFormatProperty: lambda self, e: f"STORED AS {self.sql(e, 'this') if isinstance(e.this, exp.InputOutputFormat) else e.name.upper()}",
474            exp.FromBase64: rename_func("UNBASE64"),
475            exp.If: if_sql(),
476            exp.ILike: no_ilike_sql,
477            exp.IsNan: rename_func("ISNAN"),
478            exp.JSONExtract: rename_func("GET_JSON_OBJECT"),
479            exp.JSONExtractScalar: rename_func("GET_JSON_OBJECT"),
480            exp.JSONFormat: _json_format_sql,
481            exp.Left: left_to_substring_sql,
482            exp.Map: var_map_sql,
483            exp.Max: max_or_greatest,
484            exp.MD5Digest: lambda self, e: self.func("UNHEX", self.func("MD5", e.this)),
485            exp.Min: min_or_least,
486            exp.MonthsBetween: lambda self, e: self.func("MONTHS_BETWEEN", e.this, e.expression),
487            exp.NotNullColumnConstraint: lambda self, e: ""
488            if e.args.get("allow_null")
489            else "NOT NULL",
490            exp.VarMap: var_map_sql,
491            exp.Create: _create_sql,
492            exp.Quantile: rename_func("PERCENTILE"),
493            exp.ApproxQuantile: rename_func("PERCENTILE_APPROX"),
494            exp.RegexpExtract: regexp_extract_sql,
495            exp.RegexpReplace: regexp_replace_sql,
496            exp.RegexpLike: lambda self, e: self.binary(e, "RLIKE"),
497            exp.RegexpSplit: rename_func("SPLIT"),
498            exp.Right: right_to_substring_sql,
499            exp.SafeDivide: no_safe_divide_sql,
500            exp.SchemaCommentProperty: lambda self, e: self.naked_property(e),
501            exp.ArrayUniqueAgg: rename_func("COLLECT_SET"),
502            exp.Split: lambda self, e: f"SPLIT({self.sql(e, 'this')}, CONCAT('\\\\Q', {self.sql(e, 'expression')}))",
503            exp.StrPosition: strposition_to_locate_sql,
504            exp.StrToDate: _str_to_date_sql,
505            exp.StrToTime: _str_to_time_sql,
506            exp.StrToUnix: _str_to_unix_sql,
507            exp.StructExtract: struct_extract_sql,
508            exp.TimeStrToDate: rename_func("TO_DATE"),
509            exp.TimeStrToTime: timestrtotime_sql,
510            exp.TimeStrToUnix: rename_func("UNIX_TIMESTAMP"),
511            exp.TimeToStr: _time_to_str,
512            exp.TimeToUnix: rename_func("UNIX_TIMESTAMP"),
513            exp.ToBase64: rename_func("BASE64"),
514            exp.TsOrDiToDi: lambda self, e: f"CAST(SUBSTR(REPLACE(CAST({self.sql(e, 'this')} AS STRING), '-', ''), 1, 8) AS INT)",
515            exp.TsOrDsAdd: _add_date_sql,
516            exp.TsOrDsDiff: _date_diff_sql,
517            exp.TsOrDsToDate: _to_date_sql,
518            exp.TryCast: no_trycast_sql,
519            exp.UnixToStr: lambda self, e: self.func(
520                "FROM_UNIXTIME", e.this, time_format("hive")(self, e)
521            ),
522            exp.UnixToTime: rename_func("FROM_UNIXTIME"),
523            exp.UnixToTimeStr: rename_func("FROM_UNIXTIME"),
524            exp.PartitionedByProperty: lambda self, e: f"PARTITIONED BY {self.sql(e, 'this')}",
525            exp.SerdeProperties: lambda self, e: self.properties(e, prefix="WITH SERDEPROPERTIES"),
526            exp.NumberToStr: rename_func("FORMAT_NUMBER"),
527            exp.LastDateOfMonth: rename_func("LAST_DAY"),
528            exp.National: lambda self, e: self.national_sql(e, prefix=""),
529            exp.ClusteredColumnConstraint: lambda self, e: f"({self.expressions(e, 'this', indent=False)})",
530            exp.NonClusteredColumnConstraint: lambda self, e: f"({self.expressions(e, 'this', indent=False)})",
531            exp.NotForReplicationColumnConstraint: lambda self, e: "",
532            exp.OnProperty: lambda self, e: "",
533            exp.PrimaryKeyColumnConstraint: lambda self, e: "PRIMARY KEY",
534        }
535
536        PROPERTIES_LOCATION = {
537            **generator.Generator.PROPERTIES_LOCATION,
538            exp.FileFormatProperty: exp.Properties.Location.POST_SCHEMA,
539            exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA,
540            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
541            exp.WithDataProperty: exp.Properties.Location.UNSUPPORTED,
542        }
543
544        def temporary_storage_provider(self, expression: exp.Create) -> exp.Create:
545            # Hive has no temporary storage provider (there are hive settings though)
546            return expression
547
548        def parameter_sql(self, expression: exp.Parameter) -> str:
549            this = self.sql(expression, "this")
550            expression_sql = self.sql(expression, "expression")
551
552            parent = expression.parent
553            this = f"{this}:{expression_sql}" if expression_sql else this
554
555            if isinstance(parent, exp.EQ) and isinstance(parent.parent, exp.SetItem):
556                # We need to produce SET key = value instead of SET ${key} = value
557                return this
558
559            return f"${{{this}}}"
560
561        def schema_sql(self, expression: exp.Schema) -> str:
562            for ordered in expression.find_all(exp.Ordered):
563                if ordered.args.get("desc") is False:
564                    ordered.set("desc", None)
565
566            return super().schema_sql(expression)
567
568        def constraint_sql(self, expression: exp.Constraint) -> str:
569            for prop in list(expression.find_all(exp.Properties)):
570                prop.pop()
571
572            this = self.sql(expression, "this")
573            expressions = self.expressions(expression, sep=" ", flat=True)
574            return f"CONSTRAINT {this} {expressions}"
575
576        def rowformatserdeproperty_sql(self, expression: exp.RowFormatSerdeProperty) -> str:
577            serde_props = self.sql(expression, "serde_properties")
578            serde_props = f" {serde_props}" if serde_props else ""
579            return f"ROW FORMAT SERDE {self.sql(expression, 'this')}{serde_props}"
580
581        def arrayagg_sql(self, expression: exp.ArrayAgg) -> str:
582            return self.func(
583                "COLLECT_LIST",
584                expression.this.this if isinstance(expression.this, exp.Order) else expression.this,
585            )
586
587        def with_properties(self, properties: exp.Properties) -> str:
588            return self.properties(properties, prefix=self.seg("TBLPROPERTIES"))
589
590        def datatype_sql(self, expression: exp.DataType) -> str:
591            if (
592                expression.this in (exp.DataType.Type.VARCHAR, exp.DataType.Type.NVARCHAR)
593                and not expression.expressions
594            ):
595                expression = exp.DataType.build("text")
596            elif expression.is_type(exp.DataType.Type.TEXT) and expression.expressions:
597                expression.set("this", exp.DataType.Type.VARCHAR)
598            elif expression.this in exp.DataType.TEMPORAL_TYPES:
599                expression = exp.DataType.build(expression.this)
600            elif expression.is_type("float"):
601                size_expression = expression.find(exp.DataTypeParam)
602                if size_expression:
603                    size = int(size_expression.name)
604                    expression = (
605                        exp.DataType.build("float") if size <= 32 else exp.DataType.build("double")
606                    )
607
608            return super().datatype_sql(expression)
609
610        def version_sql(self, expression: exp.Version) -> str:
611            sql = super().version_sql(expression)
612            return sql.replace("FOR ", "", 1)
DATE_DELTA_INTERVAL = {'YEAR': ('ADD_MONTHS', 12), 'MONTH': ('ADD_MONTHS', 1), 'QUARTER': ('ADD_MONTHS', 3), 'WEEK': ('DATE_ADD', 7), 'DAY': ('DATE_ADD', 1)}
TIME_DIFF_FACTOR = {'MILLISECOND': ' * 1000', 'SECOND': '', 'MINUTE': ' / 60', 'HOUR': ' / 3600'}
DIFF_MONTH_SWITCH = ('YEAR', 'QUARTER', 'MONTH')
class Hive(sqlglot.dialects.dialect.Dialect):
196class Hive(Dialect):
197    ALIAS_POST_TABLESAMPLE = True
198    IDENTIFIERS_CAN_START_WITH_DIGIT = True
199    SUPPORTS_USER_DEFINED_TYPES = False
200    SAFE_DIVISION = True
201
202    # https://spark.apache.org/docs/latest/sql-ref-identifier.html#description
203    NORMALIZATION_STRATEGY = NormalizationStrategy.CASE_INSENSITIVE
204
205    TIME_MAPPING = {
206        "y": "%Y",
207        "Y": "%Y",
208        "YYYY": "%Y",
209        "yyyy": "%Y",
210        "YY": "%y",
211        "yy": "%y",
212        "MMMM": "%B",
213        "MMM": "%b",
214        "MM": "%m",
215        "M": "%-m",
216        "dd": "%d",
217        "d": "%-d",
218        "HH": "%H",
219        "H": "%-H",
220        "hh": "%I",
221        "h": "%-I",
222        "mm": "%M",
223        "m": "%-M",
224        "ss": "%S",
225        "s": "%-S",
226        "SSSSSS": "%f",
227        "a": "%p",
228        "DD": "%j",
229        "D": "%-j",
230        "E": "%a",
231        "EE": "%a",
232        "EEE": "%a",
233        "EEEE": "%A",
234    }
235
236    DATE_FORMAT = "'yyyy-MM-dd'"
237    DATEINT_FORMAT = "'yyyyMMdd'"
238    TIME_FORMAT = "'yyyy-MM-dd HH:mm:ss'"
239
240    class Tokenizer(tokens.Tokenizer):
241        QUOTES = ["'", '"']
242        IDENTIFIERS = ["`"]
243        STRING_ESCAPES = ["\\"]
244        ENCODE = "utf-8"
245
246        SINGLE_TOKENS = {
247            **tokens.Tokenizer.SINGLE_TOKENS,
248            "$": TokenType.PARAMETER,
249        }
250
251        KEYWORDS = {
252            **tokens.Tokenizer.KEYWORDS,
253            "ADD ARCHIVE": TokenType.COMMAND,
254            "ADD ARCHIVES": TokenType.COMMAND,
255            "ADD FILE": TokenType.COMMAND,
256            "ADD FILES": TokenType.COMMAND,
257            "ADD JAR": TokenType.COMMAND,
258            "ADD JARS": TokenType.COMMAND,
259            "MSCK REPAIR": TokenType.COMMAND,
260            "REFRESH": TokenType.REFRESH,
261            "TIMESTAMP AS OF": TokenType.TIMESTAMP_SNAPSHOT,
262            "VERSION AS OF": TokenType.VERSION_SNAPSHOT,
263            "WITH SERDEPROPERTIES": TokenType.SERDE_PROPERTIES,
264        }
265
266        NUMERIC_LITERALS = {
267            "L": "BIGINT",
268            "S": "SMALLINT",
269            "Y": "TINYINT",
270            "D": "DOUBLE",
271            "F": "FLOAT",
272            "BD": "DECIMAL",
273        }
274
275    class Parser(parser.Parser):
276        LOG_DEFAULTS_TO_LN = True
277        STRICT_CAST = False
278
279        FUNCTIONS = {
280            **parser.Parser.FUNCTIONS,
281            "BASE64": exp.ToBase64.from_arg_list,
282            "COLLECT_LIST": exp.ArrayAgg.from_arg_list,
283            "COLLECT_SET": exp.ArrayUniqueAgg.from_arg_list,
284            "DATE_ADD": lambda args: exp.TsOrDsAdd(
285                this=seq_get(args, 0), expression=seq_get(args, 1), unit=exp.Literal.string("DAY")
286            ),
287            "DATE_FORMAT": lambda args: format_time_lambda(exp.TimeToStr, "hive")(
288                [
289                    exp.TimeStrToTime(this=seq_get(args, 0)),
290                    seq_get(args, 1),
291                ]
292            ),
293            "DATE_SUB": lambda args: exp.TsOrDsAdd(
294                this=seq_get(args, 0),
295                expression=exp.Mul(this=seq_get(args, 1), expression=exp.Literal.number(-1)),
296                unit=exp.Literal.string("DAY"),
297            ),
298            "DATEDIFF": lambda args: exp.DateDiff(
299                this=exp.TsOrDsToDate(this=seq_get(args, 0)),
300                expression=exp.TsOrDsToDate(this=seq_get(args, 1)),
301            ),
302            "DAY": lambda args: exp.Day(this=exp.TsOrDsToDate(this=seq_get(args, 0))),
303            "FROM_UNIXTIME": format_time_lambda(exp.UnixToStr, "hive", True),
304            "GET_JSON_OBJECT": exp.JSONExtractScalar.from_arg_list,
305            "LOCATE": locate_to_strposition,
306            "MAP": parse_var_map,
307            "MONTH": lambda args: exp.Month(this=exp.TsOrDsToDate.from_arg_list(args)),
308            "PERCENTILE": exp.Quantile.from_arg_list,
309            "PERCENTILE_APPROX": exp.ApproxQuantile.from_arg_list,
310            "REGEXP_EXTRACT": lambda args: exp.RegexpExtract(
311                this=seq_get(args, 0), expression=seq_get(args, 1), group=seq_get(args, 2)
312            ),
313            "SIZE": exp.ArraySize.from_arg_list,
314            "SPLIT": exp.RegexpSplit.from_arg_list,
315            "STR_TO_MAP": lambda args: exp.StrToMap(
316                this=seq_get(args, 0),
317                pair_delim=seq_get(args, 1) or exp.Literal.string(","),
318                key_value_delim=seq_get(args, 2) or exp.Literal.string(":"),
319            ),
320            "TO_DATE": format_time_lambda(exp.TsOrDsToDate, "hive"),
321            "TO_JSON": exp.JSONFormat.from_arg_list,
322            "UNBASE64": exp.FromBase64.from_arg_list,
323            "UNIX_TIMESTAMP": format_time_lambda(exp.StrToUnix, "hive", True),
324            "YEAR": lambda args: exp.Year(this=exp.TsOrDsToDate.from_arg_list(args)),
325        }
326
327        NO_PAREN_FUNCTION_PARSERS = {
328            **parser.Parser.NO_PAREN_FUNCTION_PARSERS,
329            "TRANSFORM": lambda self: self._parse_transform(),
330        }
331
332        PROPERTY_PARSERS = {
333            **parser.Parser.PROPERTY_PARSERS,
334            "WITH SERDEPROPERTIES": lambda self: exp.SerdeProperties(
335                expressions=self._parse_wrapped_csv(self._parse_property)
336            ),
337        }
338
339        def _parse_transform(self) -> t.Optional[exp.Transform | exp.QueryTransform]:
340            if not self._match(TokenType.L_PAREN, advance=False):
341                self._retreat(self._index - 1)
342                return None
343
344            args = self._parse_wrapped_csv(self._parse_lambda)
345            row_format_before = self._parse_row_format(match_row=True)
346
347            record_writer = None
348            if self._match_text_seq("RECORDWRITER"):
349                record_writer = self._parse_string()
350
351            if not self._match(TokenType.USING):
352                return exp.Transform.from_arg_list(args)
353
354            command_script = self._parse_string()
355
356            self._match(TokenType.ALIAS)
357            schema = self._parse_schema()
358
359            row_format_after = self._parse_row_format(match_row=True)
360            record_reader = None
361            if self._match_text_seq("RECORDREADER"):
362                record_reader = self._parse_string()
363
364            return self.expression(
365                exp.QueryTransform,
366                expressions=args,
367                command_script=command_script,
368                schema=schema,
369                row_format_before=row_format_before,
370                record_writer=record_writer,
371                row_format_after=row_format_after,
372                record_reader=record_reader,
373            )
374
375        def _parse_types(
376            self, check_func: bool = False, schema: bool = False, allow_identifiers: bool = True
377        ) -> t.Optional[exp.Expression]:
378            """
379            Spark (and most likely Hive) treats casts to CHAR(length) and VARCHAR(length) as casts to
380            STRING in all contexts except for schema definitions. For example, this is in Spark v3.4.0:
381
382                spark-sql (default)> select cast(1234 as varchar(2));
383                23/06/06 15:51:18 WARN CharVarcharUtils: The Spark cast operator does not support
384                char/varchar type and simply treats them as string type. Please use string type
385                directly to avoid confusion. Otherwise, you can set spark.sql.legacy.charVarcharAsString
386                to true, so that Spark treat them as string type as same as Spark 3.0 and earlier
387
388                1234
389                Time taken: 4.265 seconds, Fetched 1 row(s)
390
391            This shows that Spark doesn't truncate the value into '12', which is inconsistent with
392            what other dialects (e.g. postgres) do, so we need to drop the length to transpile correctly.
393
394            Reference: https://spark.apache.org/docs/latest/sql-ref-datatypes.html
395            """
396            this = super()._parse_types(
397                check_func=check_func, schema=schema, allow_identifiers=allow_identifiers
398            )
399
400            if this and not schema:
401                return this.transform(
402                    lambda node: node.replace(exp.DataType.build("text"))
403                    if isinstance(node, exp.DataType) and node.is_type("char", "varchar")
404                    else node,
405                    copy=False,
406                )
407
408            return this
409
410        def _parse_partition_and_order(
411            self,
412        ) -> t.Tuple[t.List[exp.Expression], t.Optional[exp.Expression]]:
413            return (
414                self._parse_csv(self._parse_conjunction)
415                if self._match_set({TokenType.PARTITION_BY, TokenType.DISTRIBUTE_BY})
416                else [],
417                super()._parse_order(skip_order_token=self._match(TokenType.SORT_BY)),
418            )
419
420    class Generator(generator.Generator):
421        LIMIT_FETCH = "LIMIT"
422        TABLESAMPLE_WITH_METHOD = False
423        TABLESAMPLE_SIZE_IS_PERCENT = True
424        JOIN_HINTS = False
425        TABLE_HINTS = False
426        QUERY_HINTS = False
427        INDEX_ON = "ON TABLE"
428        EXTRACT_ALLOWS_QUOTES = False
429        NVL2_SUPPORTED = False
430
431        EXPRESSIONS_WITHOUT_NESTED_CTES = {
432            exp.Insert,
433            exp.Select,
434            exp.Subquery,
435            exp.Union,
436        }
437
438        TYPE_MAPPING = {
439            **generator.Generator.TYPE_MAPPING,
440            exp.DataType.Type.BIT: "BOOLEAN",
441            exp.DataType.Type.DATETIME: "TIMESTAMP",
442            exp.DataType.Type.TEXT: "STRING",
443            exp.DataType.Type.TIME: "TIMESTAMP",
444            exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP",
445            exp.DataType.Type.VARBINARY: "BINARY",
446        }
447
448        TRANSFORMS = {
449            **generator.Generator.TRANSFORMS,
450            exp.Group: transforms.preprocess([transforms.unalias_group]),
451            exp.Select: transforms.preprocess(
452                [
453                    transforms.eliminate_qualify,
454                    transforms.eliminate_distinct_on,
455                    transforms.unnest_to_explode,
456                ]
457            ),
458            exp.Property: _property_sql,
459            exp.AnyValue: rename_func("FIRST"),
460            exp.ApproxDistinct: approx_count_distinct_sql,
461            exp.ArgMax: arg_max_or_min_no_count("MAX_BY"),
462            exp.ArgMin: arg_max_or_min_no_count("MIN_BY"),
463            exp.ArrayConcat: rename_func("CONCAT"),
464            exp.ArrayJoin: lambda self, e: self.func("CONCAT_WS", e.expression, e.this),
465            exp.ArraySize: rename_func("SIZE"),
466            exp.ArraySort: _array_sort_sql,
467            exp.With: no_recursive_cte_sql,
468            exp.DateAdd: _add_date_sql,
469            exp.DateDiff: _date_diff_sql,
470            exp.DateStrToDate: datestrtodate_sql,
471            exp.DateSub: _add_date_sql,
472            exp.DateToDi: lambda self, e: f"CAST(DATE_FORMAT({self.sql(e, 'this')}, {Hive.DATEINT_FORMAT}) AS INT)",
473            exp.DiToDate: lambda self, e: f"TO_DATE(CAST({self.sql(e, 'this')} AS STRING), {Hive.DATEINT_FORMAT})",
474            exp.FileFormatProperty: lambda self, e: f"STORED AS {self.sql(e, 'this') if isinstance(e.this, exp.InputOutputFormat) else e.name.upper()}",
475            exp.FromBase64: rename_func("UNBASE64"),
476            exp.If: if_sql(),
477            exp.ILike: no_ilike_sql,
478            exp.IsNan: rename_func("ISNAN"),
479            exp.JSONExtract: rename_func("GET_JSON_OBJECT"),
480            exp.JSONExtractScalar: rename_func("GET_JSON_OBJECT"),
481            exp.JSONFormat: _json_format_sql,
482            exp.Left: left_to_substring_sql,
483            exp.Map: var_map_sql,
484            exp.Max: max_or_greatest,
485            exp.MD5Digest: lambda self, e: self.func("UNHEX", self.func("MD5", e.this)),
486            exp.Min: min_or_least,
487            exp.MonthsBetween: lambda self, e: self.func("MONTHS_BETWEEN", e.this, e.expression),
488            exp.NotNullColumnConstraint: lambda self, e: ""
489            if e.args.get("allow_null")
490            else "NOT NULL",
491            exp.VarMap: var_map_sql,
492            exp.Create: _create_sql,
493            exp.Quantile: rename_func("PERCENTILE"),
494            exp.ApproxQuantile: rename_func("PERCENTILE_APPROX"),
495            exp.RegexpExtract: regexp_extract_sql,
496            exp.RegexpReplace: regexp_replace_sql,
497            exp.RegexpLike: lambda self, e: self.binary(e, "RLIKE"),
498            exp.RegexpSplit: rename_func("SPLIT"),
499            exp.Right: right_to_substring_sql,
500            exp.SafeDivide: no_safe_divide_sql,
501            exp.SchemaCommentProperty: lambda self, e: self.naked_property(e),
502            exp.ArrayUniqueAgg: rename_func("COLLECT_SET"),
503            exp.Split: lambda self, e: f"SPLIT({self.sql(e, 'this')}, CONCAT('\\\\Q', {self.sql(e, 'expression')}))",
504            exp.StrPosition: strposition_to_locate_sql,
505            exp.StrToDate: _str_to_date_sql,
506            exp.StrToTime: _str_to_time_sql,
507            exp.StrToUnix: _str_to_unix_sql,
508            exp.StructExtract: struct_extract_sql,
509            exp.TimeStrToDate: rename_func("TO_DATE"),
510            exp.TimeStrToTime: timestrtotime_sql,
511            exp.TimeStrToUnix: rename_func("UNIX_TIMESTAMP"),
512            exp.TimeToStr: _time_to_str,
513            exp.TimeToUnix: rename_func("UNIX_TIMESTAMP"),
514            exp.ToBase64: rename_func("BASE64"),
515            exp.TsOrDiToDi: lambda self, e: f"CAST(SUBSTR(REPLACE(CAST({self.sql(e, 'this')} AS STRING), '-', ''), 1, 8) AS INT)",
516            exp.TsOrDsAdd: _add_date_sql,
517            exp.TsOrDsDiff: _date_diff_sql,
518            exp.TsOrDsToDate: _to_date_sql,
519            exp.TryCast: no_trycast_sql,
520            exp.UnixToStr: lambda self, e: self.func(
521                "FROM_UNIXTIME", e.this, time_format("hive")(self, e)
522            ),
523            exp.UnixToTime: rename_func("FROM_UNIXTIME"),
524            exp.UnixToTimeStr: rename_func("FROM_UNIXTIME"),
525            exp.PartitionedByProperty: lambda self, e: f"PARTITIONED BY {self.sql(e, 'this')}",
526            exp.SerdeProperties: lambda self, e: self.properties(e, prefix="WITH SERDEPROPERTIES"),
527            exp.NumberToStr: rename_func("FORMAT_NUMBER"),
528            exp.LastDateOfMonth: rename_func("LAST_DAY"),
529            exp.National: lambda self, e: self.national_sql(e, prefix=""),
530            exp.ClusteredColumnConstraint: lambda self, e: f"({self.expressions(e, 'this', indent=False)})",
531            exp.NonClusteredColumnConstraint: lambda self, e: f"({self.expressions(e, 'this', indent=False)})",
532            exp.NotForReplicationColumnConstraint: lambda self, e: "",
533            exp.OnProperty: lambda self, e: "",
534            exp.PrimaryKeyColumnConstraint: lambda self, e: "PRIMARY KEY",
535        }
536
537        PROPERTIES_LOCATION = {
538            **generator.Generator.PROPERTIES_LOCATION,
539            exp.FileFormatProperty: exp.Properties.Location.POST_SCHEMA,
540            exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA,
541            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
542            exp.WithDataProperty: exp.Properties.Location.UNSUPPORTED,
543        }
544
545        def temporary_storage_provider(self, expression: exp.Create) -> exp.Create:
546            # Hive has no temporary storage provider (there are hive settings though)
547            return expression
548
549        def parameter_sql(self, expression: exp.Parameter) -> str:
550            this = self.sql(expression, "this")
551            expression_sql = self.sql(expression, "expression")
552
553            parent = expression.parent
554            this = f"{this}:{expression_sql}" if expression_sql else this
555
556            if isinstance(parent, exp.EQ) and isinstance(parent.parent, exp.SetItem):
557                # We need to produce SET key = value instead of SET ${key} = value
558                return this
559
560            return f"${{{this}}}"
561
562        def schema_sql(self, expression: exp.Schema) -> str:
563            for ordered in expression.find_all(exp.Ordered):
564                if ordered.args.get("desc") is False:
565                    ordered.set("desc", None)
566
567            return super().schema_sql(expression)
568
569        def constraint_sql(self, expression: exp.Constraint) -> str:
570            for prop in list(expression.find_all(exp.Properties)):
571                prop.pop()
572
573            this = self.sql(expression, "this")
574            expressions = self.expressions(expression, sep=" ", flat=True)
575            return f"CONSTRAINT {this} {expressions}"
576
577        def rowformatserdeproperty_sql(self, expression: exp.RowFormatSerdeProperty) -> str:
578            serde_props = self.sql(expression, "serde_properties")
579            serde_props = f" {serde_props}" if serde_props else ""
580            return f"ROW FORMAT SERDE {self.sql(expression, 'this')}{serde_props}"
581
582        def arrayagg_sql(self, expression: exp.ArrayAgg) -> str:
583            return self.func(
584                "COLLECT_LIST",
585                expression.this.this if isinstance(expression.this, exp.Order) else expression.this,
586            )
587
588        def with_properties(self, properties: exp.Properties) -> str:
589            return self.properties(properties, prefix=self.seg("TBLPROPERTIES"))
590
591        def datatype_sql(self, expression: exp.DataType) -> str:
592            if (
593                expression.this in (exp.DataType.Type.VARCHAR, exp.DataType.Type.NVARCHAR)
594                and not expression.expressions
595            ):
596                expression = exp.DataType.build("text")
597            elif expression.is_type(exp.DataType.Type.TEXT) and expression.expressions:
598                expression.set("this", exp.DataType.Type.VARCHAR)
599            elif expression.this in exp.DataType.TEMPORAL_TYPES:
600                expression = exp.DataType.build(expression.this)
601            elif expression.is_type("float"):
602                size_expression = expression.find(exp.DataTypeParam)
603                if size_expression:
604                    size = int(size_expression.name)
605                    expression = (
606                        exp.DataType.build("float") if size <= 32 else exp.DataType.build("double")
607                    )
608
609            return super().datatype_sql(expression)
610
611        def version_sql(self, expression: exp.Version) -> str:
612            sql = super().version_sql(expression)
613            return sql.replace("FOR ", "", 1)
ALIAS_POST_TABLESAMPLE = True
IDENTIFIERS_CAN_START_WITH_DIGIT = True
SUPPORTS_USER_DEFINED_TYPES = False
SAFE_DIVISION = True
NORMALIZATION_STRATEGY = <NormalizationStrategy.CASE_INSENSITIVE: 'CASE_INSENSITIVE'>
TIME_MAPPING: Dict[str, str] = {'y': '%Y', 'Y': '%Y', 'YYYY': '%Y', 'yyyy': '%Y', 'YY': '%y', 'yy': '%y', 'MMMM': '%B', 'MMM': '%b', 'MM': '%m', 'M': '%-m', 'dd': '%d', 'd': '%-d', 'HH': '%H', 'H': '%-H', 'hh': '%I', 'h': '%-I', 'mm': '%M', 'm': '%-M', 'ss': '%S', 's': '%-S', 'SSSSSS': '%f', 'a': '%p', 'DD': '%j', 'D': '%-j', 'E': '%a', 'EE': '%a', 'EEE': '%a', 'EEEE': '%A'}
DATE_FORMAT = "'yyyy-MM-dd'"
DATEINT_FORMAT = "'yyyyMMdd'"
TIME_FORMAT = "'yyyy-MM-dd HH:mm:ss'"
tokenizer_class = <class 'Hive.Tokenizer'>
parser_class = <class 'Hive.Parser'>
generator_class = <class 'Hive.Generator'>
TIME_TRIE: Dict = {'y': {0: True, 'y': {'y': {'y': {0: True}}, 0: True}}, 'Y': {0: True, 'Y': {'Y': {'Y': {0: True}}, 0: True}}, 'M': {'M': {'M': {'M': {0: True}, 0: True}, 0: True}, 0: True}, 'd': {'d': {0: True}, 0: True}, 'H': {'H': {0: True}, 0: True}, 'h': {'h': {0: True}, 0: True}, 'm': {'m': {0: True}, 0: True}, 's': {'s': {0: True}, 0: True}, 'S': {'S': {'S': {'S': {'S': {'S': {0: True}}}}}}, 'a': {0: True}, 'D': {'D': {0: True}, 0: True}, 'E': {0: True, 'E': {0: True, 'E': {0: True, 'E': {0: True}}}}}
FORMAT_TRIE: Dict = {'y': {0: True, 'y': {'y': {'y': {0: True}}, 0: True}}, 'Y': {0: True, 'Y': {'Y': {'Y': {0: True}}, 0: True}}, 'M': {'M': {'M': {'M': {0: True}, 0: True}, 0: True}, 0: True}, 'd': {'d': {0: True}, 0: True}, 'H': {'H': {0: True}, 0: True}, 'h': {'h': {0: True}, 0: True}, 'm': {'m': {0: True}, 0: True}, 's': {'s': {0: True}, 0: True}, 'S': {'S': {'S': {'S': {'S': {'S': {0: True}}}}}}, 'a': {0: True}, 'D': {'D': {0: True}, 0: True}, 'E': {0: True, 'E': {0: True, 'E': {0: True, 'E': {0: True}}}}}
INVERSE_TIME_MAPPING: Dict[str, str] = {'%Y': 'yyyy', '%y': 'yy', '%B': 'MMMM', '%b': 'MMM', '%m': 'MM', '%-m': 'M', '%d': 'dd', '%-d': 'd', '%H': 'HH', '%-H': 'H', '%I': 'hh', '%-I': 'h', '%M': 'mm', '%-M': 'm', '%S': 'ss', '%-S': 's', '%f': 'SSSSSS', '%p': 'a', '%j': 'DD', '%-j': 'D', '%a': 'EEE', '%A': 'EEEE'}
INVERSE_TIME_TRIE: Dict = {'%': {'Y': {0: True}, 'y': {0: True}, 'B': {0: True}, 'b': {0: True}, 'm': {0: True}, '-': {'m': {0: True}, 'd': {0: True}, 'H': {0: True}, 'I': {0: True}, 'M': {0: True}, 'S': {0: True}, 'j': {0: True}}, 'd': {0: True}, 'H': {0: True}, 'I': {0: True}, 'M': {0: True}, 'S': {0: True}, 'f': {0: True}, 'p': {0: True}, 'j': {0: True}, 'a': {0: True}, 'A': {0: True}}}
INVERSE_ESCAPE_SEQUENCES: Dict[str, str] = {}
QUOTE_START = "'"
QUOTE_END = "'"
IDENTIFIER_START = '`'
IDENTIFIER_END = '`'
BIT_START: Optional[str] = None
BIT_END: Optional[str] = None
HEX_START: Optional[str] = None
HEX_END: Optional[str] = None
BYTE_START: Optional[str] = None
BYTE_END: Optional[str] = None
class Hive.Tokenizer(sqlglot.tokens.Tokenizer):
240    class Tokenizer(tokens.Tokenizer):
241        QUOTES = ["'", '"']
242        IDENTIFIERS = ["`"]
243        STRING_ESCAPES = ["\\"]
244        ENCODE = "utf-8"
245
246        SINGLE_TOKENS = {
247            **tokens.Tokenizer.SINGLE_TOKENS,
248            "$": TokenType.PARAMETER,
249        }
250
251        KEYWORDS = {
252            **tokens.Tokenizer.KEYWORDS,
253            "ADD ARCHIVE": TokenType.COMMAND,
254            "ADD ARCHIVES": TokenType.COMMAND,
255            "ADD FILE": TokenType.COMMAND,
256            "ADD FILES": TokenType.COMMAND,
257            "ADD JAR": TokenType.COMMAND,
258            "ADD JARS": TokenType.COMMAND,
259            "MSCK REPAIR": TokenType.COMMAND,
260            "REFRESH": TokenType.REFRESH,
261            "TIMESTAMP AS OF": TokenType.TIMESTAMP_SNAPSHOT,
262            "VERSION AS OF": TokenType.VERSION_SNAPSHOT,
263            "WITH SERDEPROPERTIES": TokenType.SERDE_PROPERTIES,
264        }
265
266        NUMERIC_LITERALS = {
267            "L": "BIGINT",
268            "S": "SMALLINT",
269            "Y": "TINYINT",
270            "D": "DOUBLE",
271            "F": "FLOAT",
272            "BD": "DECIMAL",
273        }
QUOTES = ["'", '"']
IDENTIFIERS = ['`']
STRING_ESCAPES = ['\\']
ENCODE = 'utf-8'
SINGLE_TOKENS = {'(': <TokenType.L_PAREN: 'L_PAREN'>, ')': <TokenType.R_PAREN: 'R_PAREN'>, '[': <TokenType.L_BRACKET: 'L_BRACKET'>, ']': <TokenType.R_BRACKET: 'R_BRACKET'>, '{': <TokenType.L_BRACE: 'L_BRACE'>, '}': <TokenType.R_BRACE: 'R_BRACE'>, '&': <TokenType.AMP: 'AMP'>, '^': <TokenType.CARET: 'CARET'>, ':': <TokenType.COLON: 'COLON'>, ',': <TokenType.COMMA: 'COMMA'>, '.': <TokenType.DOT: 'DOT'>, '-': <TokenType.DASH: 'DASH'>, '=': <TokenType.EQ: 'EQ'>, '>': <TokenType.GT: 'GT'>, '<': <TokenType.LT: 'LT'>, '%': <TokenType.MOD: 'MOD'>, '!': <TokenType.NOT: 'NOT'>, '|': <TokenType.PIPE: 'PIPE'>, '+': <TokenType.PLUS: 'PLUS'>, ';': <TokenType.SEMICOLON: 'SEMICOLON'>, '/': <TokenType.SLASH: 'SLASH'>, '\\': <TokenType.BACKSLASH: 'BACKSLASH'>, '*': <TokenType.STAR: 'STAR'>, '~': <TokenType.TILDA: 'TILDA'>, '?': <TokenType.PLACEHOLDER: 'PLACEHOLDER'>, '@': <TokenType.PARAMETER: 'PARAMETER'>, "'": <TokenType.QUOTE: 'QUOTE'>, '`': <TokenType.IDENTIFIER: 'IDENTIFIER'>, '"': <TokenType.IDENTIFIER: 'IDENTIFIER'>, '#': <TokenType.HASH: 'HASH'>, '$': <TokenType.PARAMETER: 'PARAMETER'>}
KEYWORDS = {'{%': <TokenType.BLOCK_START: 'BLOCK_START'>, '{%+': <TokenType.BLOCK_START: 'BLOCK_START'>, '{%-': <TokenType.BLOCK_START: 'BLOCK_START'>, '%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '+%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '-%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '{{+': <TokenType.BLOCK_START: 'BLOCK_START'>, '{{-': <TokenType.BLOCK_START: 'BLOCK_START'>, '+}}': <TokenType.BLOCK_END: 'BLOCK_END'>, '-}}': <TokenType.BLOCK_END: 'BLOCK_END'>, '/*+': <TokenType.HINT: 'HINT'>, '==': <TokenType.EQ: 'EQ'>, '::': <TokenType.DCOLON: 'DCOLON'>, '||': <TokenType.DPIPE: 'DPIPE'>, '>=': <TokenType.GTE: 'GTE'>, '<=': <TokenType.LTE: 'LTE'>, '<>': <TokenType.NEQ: 'NEQ'>, '!=': <TokenType.NEQ: 'NEQ'>, ':=': <TokenType.COLON_EQ: 'COLON_EQ'>, '<=>': <TokenType.NULLSAFE_EQ: 'NULLSAFE_EQ'>, '->': <TokenType.ARROW: 'ARROW'>, '->>': <TokenType.DARROW: 'DARROW'>, '=>': <TokenType.FARROW: 'FARROW'>, '#>': <TokenType.HASH_ARROW: 'HASH_ARROW'>, '#>>': <TokenType.DHASH_ARROW: 'DHASH_ARROW'>, '<->': <TokenType.LR_ARROW: 'LR_ARROW'>, '&&': <TokenType.DAMP: 'DAMP'>, '??': <TokenType.DQMARK: 'DQMARK'>, 'ALL': <TokenType.ALL: 'ALL'>, 'ALWAYS': <TokenType.ALWAYS: 'ALWAYS'>, 'AND': <TokenType.AND: 'AND'>, 'ANTI': <TokenType.ANTI: 'ANTI'>, 'ANY': <TokenType.ANY: 'ANY'>, 'ASC': <TokenType.ASC: 'ASC'>, 'AS': <TokenType.ALIAS: 'ALIAS'>, 'ASOF': <TokenType.ASOF: 'ASOF'>, 'AUTOINCREMENT': <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, 'AUTO_INCREMENT': <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, 'BEGIN': <TokenType.BEGIN: 'BEGIN'>, 'BETWEEN': <TokenType.BETWEEN: 'BETWEEN'>, 'CACHE': <TokenType.CACHE: 'CACHE'>, 'UNCACHE': <TokenType.UNCACHE: 'UNCACHE'>, 'CASE': <TokenType.CASE: 'CASE'>, 'CHARACTER SET': <TokenType.CHARACTER_SET: 'CHARACTER_SET'>, 'CLUSTER BY': <TokenType.CLUSTER_BY: 'CLUSTER_BY'>, 'COLLATE': <TokenType.COLLATE: 'COLLATE'>, 'COLUMN': <TokenType.COLUMN: 'COLUMN'>, 'COMMIT': <TokenType.COMMIT: 'COMMIT'>, 'CONNECT BY': <TokenType.CONNECT_BY: 'CONNECT_BY'>, 'CONSTRAINT': <TokenType.CONSTRAINT: 'CONSTRAINT'>, 'CREATE': <TokenType.CREATE: 'CREATE'>, 'CROSS': <TokenType.CROSS: 'CROSS'>, 'CUBE': <TokenType.CUBE: 'CUBE'>, 'CURRENT_DATE': <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, 'CURRENT_TIME': <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, 'CURRENT_TIMESTAMP': <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, 'CURRENT_USER': <TokenType.CURRENT_USER: 'CURRENT_USER'>, 'DATABASE': <TokenType.DATABASE: 'DATABASE'>, 'DEFAULT': <TokenType.DEFAULT: 'DEFAULT'>, 'DELETE': <TokenType.DELETE: 'DELETE'>, 'DESC': <TokenType.DESC: 'DESC'>, 'DESCRIBE': <TokenType.DESCRIBE: 'DESCRIBE'>, 'DISTINCT': <TokenType.DISTINCT: 'DISTINCT'>, 'DISTRIBUTE BY': <TokenType.DISTRIBUTE_BY: 'DISTRIBUTE_BY'>, 'DIV': <TokenType.DIV: 'DIV'>, 'DROP': <TokenType.DROP: 'DROP'>, 'ELSE': <TokenType.ELSE: 'ELSE'>, 'END': <TokenType.END: 'END'>, 'ESCAPE': <TokenType.ESCAPE: 'ESCAPE'>, 'EXCEPT': <TokenType.EXCEPT: 'EXCEPT'>, 'EXECUTE': <TokenType.EXECUTE: 'EXECUTE'>, 'EXISTS': <TokenType.EXISTS: 'EXISTS'>, 'FALSE': <TokenType.FALSE: 'FALSE'>, 'FETCH': <TokenType.FETCH: 'FETCH'>, 'FILTER': <TokenType.FILTER: 'FILTER'>, 'FIRST': <TokenType.FIRST: 'FIRST'>, 'FULL': <TokenType.FULL: 'FULL'>, 'FUNCTION': <TokenType.FUNCTION: 'FUNCTION'>, 'FOR': <TokenType.FOR: 'FOR'>, 'FOREIGN KEY': <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, 'FORMAT': <TokenType.FORMAT: 'FORMAT'>, 'FROM': <TokenType.FROM: 'FROM'>, 'GEOGRAPHY': <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, 'GEOMETRY': <TokenType.GEOMETRY: 'GEOMETRY'>, 'GLOB': <TokenType.GLOB: 'GLOB'>, 'GROUP BY': <TokenType.GROUP_BY: 'GROUP_BY'>, 'GROUPING SETS': <TokenType.GROUPING_SETS: 'GROUPING_SETS'>, 'HAVING': <TokenType.HAVING: 'HAVING'>, 'ILIKE': <TokenType.ILIKE: 'ILIKE'>, 'IN': <TokenType.IN: 'IN'>, 'INDEX': <TokenType.INDEX: 'INDEX'>, 'INET': <TokenType.INET: 'INET'>, 'INNER': <TokenType.INNER: 'INNER'>, 'INSERT': <TokenType.INSERT: 'INSERT'>, 'INTERVAL': <TokenType.INTERVAL: 'INTERVAL'>, 'INTERSECT': <TokenType.INTERSECT: 'INTERSECT'>, 'INTO': <TokenType.INTO: 'INTO'>, 'IS': <TokenType.IS: 'IS'>, 'ISNULL': <TokenType.ISNULL: 'ISNULL'>, 'JOIN': <TokenType.JOIN: 'JOIN'>, 'KEEP': <TokenType.KEEP: 'KEEP'>, 'KILL': <TokenType.KILL: 'KILL'>, 'LATERAL': <TokenType.LATERAL: 'LATERAL'>, 'LEFT': <TokenType.LEFT: 'LEFT'>, 'LIKE': <TokenType.LIKE: 'LIKE'>, 'LIMIT': <TokenType.LIMIT: 'LIMIT'>, 'LOAD': <TokenType.LOAD: 'LOAD'>, 'LOCK': <TokenType.LOCK: 'LOCK'>, 'MERGE': <TokenType.MERGE: 'MERGE'>, 'NATURAL': <TokenType.NATURAL: 'NATURAL'>, 'NEXT': <TokenType.NEXT: 'NEXT'>, 'NOT': <TokenType.NOT: 'NOT'>, 'NOTNULL': <TokenType.NOTNULL: 'NOTNULL'>, 'NULL': <TokenType.NULL: 'NULL'>, 'OBJECT': <TokenType.OBJECT: 'OBJECT'>, 'OFFSET': <TokenType.OFFSET: 'OFFSET'>, 'ON': <TokenType.ON: 'ON'>, 'OR': <TokenType.OR: 'OR'>, 'XOR': <TokenType.XOR: 'XOR'>, 'ORDER BY': <TokenType.ORDER_BY: 'ORDER_BY'>, 'ORDINALITY': <TokenType.ORDINALITY: 'ORDINALITY'>, 'OUTER': <TokenType.OUTER: 'OUTER'>, 'OVER': <TokenType.OVER: 'OVER'>, 'OVERLAPS': <TokenType.OVERLAPS: 'OVERLAPS'>, 'OVERWRITE': <TokenType.OVERWRITE: 'OVERWRITE'>, 'PARTITION': <TokenType.PARTITION: 'PARTITION'>, 'PARTITION BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PARTITIONED BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PARTITIONED_BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PERCENT': <TokenType.PERCENT: 'PERCENT'>, 'PIVOT': <TokenType.PIVOT: 'PIVOT'>, 'PRAGMA': <TokenType.PRAGMA: 'PRAGMA'>, 'PRIMARY KEY': <TokenType.PRIMARY_KEY: 'PRIMARY_KEY'>, 'PROCEDURE': <TokenType.PROCEDURE: 'PROCEDURE'>, 'QUALIFY': <TokenType.QUALIFY: 'QUALIFY'>, 'RANGE': <TokenType.RANGE: 'RANGE'>, 'RECURSIVE': <TokenType.RECURSIVE: 'RECURSIVE'>, 'REGEXP': <TokenType.RLIKE: 'RLIKE'>, 'REPLACE': <TokenType.REPLACE: 'REPLACE'>, 'RETURNING': <TokenType.RETURNING: 'RETURNING'>, 'REFERENCES': <TokenType.REFERENCES: 'REFERENCES'>, 'RIGHT': <TokenType.RIGHT: 'RIGHT'>, 'RLIKE': <TokenType.RLIKE: 'RLIKE'>, 'ROLLBACK': <TokenType.ROLLBACK: 'ROLLBACK'>, 'ROLLUP': <TokenType.ROLLUP: 'ROLLUP'>, 'ROW': <TokenType.ROW: 'ROW'>, 'ROWS': <TokenType.ROWS: 'ROWS'>, 'SCHEMA': <TokenType.SCHEMA: 'SCHEMA'>, 'SELECT': <TokenType.SELECT: 'SELECT'>, 'SEMI': <TokenType.SEMI: 'SEMI'>, 'SET': <TokenType.SET: 'SET'>, 'SETTINGS': <TokenType.SETTINGS: 'SETTINGS'>, 'SHOW': <TokenType.SHOW: 'SHOW'>, 'SIMILAR TO': <TokenType.SIMILAR_TO: 'SIMILAR_TO'>, 'SOME': <TokenType.SOME: 'SOME'>, 'SORT BY': <TokenType.SORT_BY: 'SORT_BY'>, 'START WITH': <TokenType.START_WITH: 'START_WITH'>, 'TABLE': <TokenType.TABLE: 'TABLE'>, 'TABLESAMPLE': <TokenType.TABLE_SAMPLE: 'TABLE_SAMPLE'>, 'TEMP': <TokenType.TEMPORARY: 'TEMPORARY'>, 'TEMPORARY': <TokenType.TEMPORARY: 'TEMPORARY'>, 'THEN': <TokenType.THEN: 'THEN'>, 'TRUE': <TokenType.TRUE: 'TRUE'>, 'UNION': <TokenType.UNION: 'UNION'>, 'UNKNOWN': <TokenType.UNKNOWN: 'UNKNOWN'>, 'UNNEST': <TokenType.UNNEST: 'UNNEST'>, 'UNPIVOT': <TokenType.UNPIVOT: 'UNPIVOT'>, 'UPDATE': <TokenType.UPDATE: 'UPDATE'>, 'USE': <TokenType.USE: 'USE'>, 'USING': <TokenType.USING: 'USING'>, 'UUID': <TokenType.UUID: 'UUID'>, 'VALUES': <TokenType.VALUES: 'VALUES'>, 'VIEW': <TokenType.VIEW: 'VIEW'>, 'VOLATILE': <TokenType.VOLATILE: 'VOLATILE'>, 'WHEN': <TokenType.WHEN: 'WHEN'>, 'WHERE': <TokenType.WHERE: 'WHERE'>, 'WINDOW': <TokenType.WINDOW: 'WINDOW'>, 'WITH': <TokenType.WITH: 'WITH'>, 'APPLY': <TokenType.APPLY: 'APPLY'>, 'ARRAY': <TokenType.ARRAY: 'ARRAY'>, 'BIT': <TokenType.BIT: 'BIT'>, 'BOOL': <TokenType.BOOLEAN: 'BOOLEAN'>, 'BOOLEAN': <TokenType.BOOLEAN: 'BOOLEAN'>, 'BYTE': <TokenType.TINYINT: 'TINYINT'>, 'MEDIUMINT': <TokenType.MEDIUMINT: 'MEDIUMINT'>, 'INT1': <TokenType.TINYINT: 'TINYINT'>, 'TINYINT': <TokenType.TINYINT: 'TINYINT'>, 'INT16': <TokenType.SMALLINT: 'SMALLINT'>, 'SHORT': <TokenType.SMALLINT: 'SMALLINT'>, 'SMALLINT': <TokenType.SMALLINT: 'SMALLINT'>, 'INT128': <TokenType.INT128: 'INT128'>, 'HUGEINT': <TokenType.INT128: 'INT128'>, 'INT2': <TokenType.SMALLINT: 'SMALLINT'>, 'INTEGER': <TokenType.INT: 'INT'>, 'INT': <TokenType.INT: 'INT'>, 'INT4': <TokenType.INT: 'INT'>, 'INT32': <TokenType.INT: 'INT'>, 'INT64': <TokenType.BIGINT: 'BIGINT'>, 'LONG': <TokenType.BIGINT: 'BIGINT'>, 'BIGINT': <TokenType.BIGINT: 'BIGINT'>, 'INT8': <TokenType.TINYINT: 'TINYINT'>, 'DEC': <TokenType.DECIMAL: 'DECIMAL'>, 'DECIMAL': <TokenType.DECIMAL: 'DECIMAL'>, 'BIGDECIMAL': <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, 'BIGNUMERIC': <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, 'MAP': <TokenType.MAP: 'MAP'>, 'NULLABLE': <TokenType.NULLABLE: 'NULLABLE'>, 'NUMBER': <TokenType.DECIMAL: 'DECIMAL'>, 'NUMERIC': <TokenType.DECIMAL: 'DECIMAL'>, 'FIXED': <TokenType.DECIMAL: 'DECIMAL'>, 'REAL': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT4': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT8': <TokenType.DOUBLE: 'DOUBLE'>, 'DOUBLE': <TokenType.DOUBLE: 'DOUBLE'>, 'DOUBLE PRECISION': <TokenType.DOUBLE: 'DOUBLE'>, 'JSON': <TokenType.JSON: 'JSON'>, 'CHAR': <TokenType.CHAR: 'CHAR'>, 'CHARACTER': <TokenType.CHAR: 'CHAR'>, 'NCHAR': <TokenType.NCHAR: 'NCHAR'>, 'VARCHAR': <TokenType.VARCHAR: 'VARCHAR'>, 'VARCHAR2': <TokenType.VARCHAR: 'VARCHAR'>, 'NVARCHAR': <TokenType.NVARCHAR: 'NVARCHAR'>, 'NVARCHAR2': <TokenType.NVARCHAR: 'NVARCHAR'>, 'STR': <TokenType.TEXT: 'TEXT'>, 'STRING': <TokenType.TEXT: 'TEXT'>, 'TEXT': <TokenType.TEXT: 'TEXT'>, 'LONGTEXT': <TokenType.LONGTEXT: 'LONGTEXT'>, 'MEDIUMTEXT': <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, 'TINYTEXT': <TokenType.TINYTEXT: 'TINYTEXT'>, 'CLOB': <TokenType.TEXT: 'TEXT'>, 'LONGVARCHAR': <TokenType.TEXT: 'TEXT'>, 'BINARY': <TokenType.BINARY: 'BINARY'>, 'BLOB': <TokenType.VARBINARY: 'VARBINARY'>, 'LONGBLOB': <TokenType.LONGBLOB: 'LONGBLOB'>, 'MEDIUMBLOB': <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, 'TINYBLOB': <TokenType.TINYBLOB: 'TINYBLOB'>, 'BYTEA': <TokenType.VARBINARY: 'VARBINARY'>, 'VARBINARY': <TokenType.VARBINARY: 'VARBINARY'>, 'TIME': <TokenType.TIME: 'TIME'>, 'TIMETZ': <TokenType.TIMETZ: 'TIMETZ'>, 'TIMESTAMP': <TokenType.TIMESTAMP: 'TIMESTAMP'>, 'TIMESTAMPTZ': <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, 'TIMESTAMPLTZ': <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, 'DATE': <TokenType.DATE: 'DATE'>, 'DATETIME': <TokenType.DATETIME: 'DATETIME'>, 'INT4RANGE': <TokenType.INT4RANGE: 'INT4RANGE'>, 'INT4MULTIRANGE': <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, 'INT8RANGE': <TokenType.INT8RANGE: 'INT8RANGE'>, 'INT8MULTIRANGE': <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, 'NUMRANGE': <TokenType.NUMRANGE: 'NUMRANGE'>, 'NUMMULTIRANGE': <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, 'TSRANGE': <TokenType.TSRANGE: 'TSRANGE'>, 'TSMULTIRANGE': <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, 'TSTZRANGE': <TokenType.TSTZRANGE: 'TSTZRANGE'>, 'TSTZMULTIRANGE': <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, 'DATERANGE': <TokenType.DATERANGE: 'DATERANGE'>, 'DATEMULTIRANGE': <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, 'UNIQUE': <TokenType.UNIQUE: 'UNIQUE'>, 'STRUCT': <TokenType.STRUCT: 'STRUCT'>, 'VARIANT': <TokenType.VARIANT: 'VARIANT'>, 'ALTER': <TokenType.ALTER: 'ALTER'>, 'ANALYZE': <TokenType.COMMAND: 'COMMAND'>, 'CALL': <TokenType.COMMAND: 'COMMAND'>, 'COMMENT': <TokenType.COMMENT: 'COMMENT'>, 'COPY': <TokenType.COMMAND: 'COMMAND'>, 'EXPLAIN': <TokenType.COMMAND: 'COMMAND'>, 'GRANT': <TokenType.COMMAND: 'COMMAND'>, 'OPTIMIZE': <TokenType.COMMAND: 'COMMAND'>, 'PREPARE': <TokenType.COMMAND: 'COMMAND'>, 'TRUNCATE': <TokenType.COMMAND: 'COMMAND'>, 'VACUUM': <TokenType.COMMAND: 'COMMAND'>, 'USER-DEFINED': <TokenType.USERDEFINED: 'USERDEFINED'>, 'FOR VERSION': <TokenType.VERSION_SNAPSHOT: 'VERSION_SNAPSHOT'>, 'FOR TIMESTAMP': <TokenType.TIMESTAMP_SNAPSHOT: 'TIMESTAMP_SNAPSHOT'>, 'ADD ARCHIVE': <TokenType.COMMAND: 'COMMAND'>, 'ADD ARCHIVES': <TokenType.COMMAND: 'COMMAND'>, 'ADD FILE': <TokenType.COMMAND: 'COMMAND'>, 'ADD FILES': <TokenType.COMMAND: 'COMMAND'>, 'ADD JAR': <TokenType.COMMAND: 'COMMAND'>, 'ADD JARS': <TokenType.COMMAND: 'COMMAND'>, 'MSCK REPAIR': <TokenType.COMMAND: 'COMMAND'>, 'REFRESH': <TokenType.REFRESH: 'REFRESH'>, 'TIMESTAMP AS OF': <TokenType.TIMESTAMP_SNAPSHOT: 'TIMESTAMP_SNAPSHOT'>, 'VERSION AS OF': <TokenType.VERSION_SNAPSHOT: 'VERSION_SNAPSHOT'>, 'WITH SERDEPROPERTIES': <TokenType.SERDE_PROPERTIES: 'SERDE_PROPERTIES'>}
NUMERIC_LITERALS = {'L': 'BIGINT', 'S': 'SMALLINT', 'Y': 'TINYINT', 'D': 'DOUBLE', 'F': 'FLOAT', 'BD': 'DECIMAL'}
class Hive.Parser(sqlglot.parser.Parser):
275    class Parser(parser.Parser):
276        LOG_DEFAULTS_TO_LN = True
277        STRICT_CAST = False
278
279        FUNCTIONS = {
280            **parser.Parser.FUNCTIONS,
281            "BASE64": exp.ToBase64.from_arg_list,
282            "COLLECT_LIST": exp.ArrayAgg.from_arg_list,
283            "COLLECT_SET": exp.ArrayUniqueAgg.from_arg_list,
284            "DATE_ADD": lambda args: exp.TsOrDsAdd(
285                this=seq_get(args, 0), expression=seq_get(args, 1), unit=exp.Literal.string("DAY")
286            ),
287            "DATE_FORMAT": lambda args: format_time_lambda(exp.TimeToStr, "hive")(
288                [
289                    exp.TimeStrToTime(this=seq_get(args, 0)),
290                    seq_get(args, 1),
291                ]
292            ),
293            "DATE_SUB": lambda args: exp.TsOrDsAdd(
294                this=seq_get(args, 0),
295                expression=exp.Mul(this=seq_get(args, 1), expression=exp.Literal.number(-1)),
296                unit=exp.Literal.string("DAY"),
297            ),
298            "DATEDIFF": lambda args: exp.DateDiff(
299                this=exp.TsOrDsToDate(this=seq_get(args, 0)),
300                expression=exp.TsOrDsToDate(this=seq_get(args, 1)),
301            ),
302            "DAY": lambda args: exp.Day(this=exp.TsOrDsToDate(this=seq_get(args, 0))),
303            "FROM_UNIXTIME": format_time_lambda(exp.UnixToStr, "hive", True),
304            "GET_JSON_OBJECT": exp.JSONExtractScalar.from_arg_list,
305            "LOCATE": locate_to_strposition,
306            "MAP": parse_var_map,
307            "MONTH": lambda args: exp.Month(this=exp.TsOrDsToDate.from_arg_list(args)),
308            "PERCENTILE": exp.Quantile.from_arg_list,
309            "PERCENTILE_APPROX": exp.ApproxQuantile.from_arg_list,
310            "REGEXP_EXTRACT": lambda args: exp.RegexpExtract(
311                this=seq_get(args, 0), expression=seq_get(args, 1), group=seq_get(args, 2)
312            ),
313            "SIZE": exp.ArraySize.from_arg_list,
314            "SPLIT": exp.RegexpSplit.from_arg_list,
315            "STR_TO_MAP": lambda args: exp.StrToMap(
316                this=seq_get(args, 0),
317                pair_delim=seq_get(args, 1) or exp.Literal.string(","),
318                key_value_delim=seq_get(args, 2) or exp.Literal.string(":"),
319            ),
320            "TO_DATE": format_time_lambda(exp.TsOrDsToDate, "hive"),
321            "TO_JSON": exp.JSONFormat.from_arg_list,
322            "UNBASE64": exp.FromBase64.from_arg_list,
323            "UNIX_TIMESTAMP": format_time_lambda(exp.StrToUnix, "hive", True),
324            "YEAR": lambda args: exp.Year(this=exp.TsOrDsToDate.from_arg_list(args)),
325        }
326
327        NO_PAREN_FUNCTION_PARSERS = {
328            **parser.Parser.NO_PAREN_FUNCTION_PARSERS,
329            "TRANSFORM": lambda self: self._parse_transform(),
330        }
331
332        PROPERTY_PARSERS = {
333            **parser.Parser.PROPERTY_PARSERS,
334            "WITH SERDEPROPERTIES": lambda self: exp.SerdeProperties(
335                expressions=self._parse_wrapped_csv(self._parse_property)
336            ),
337        }
338
339        def _parse_transform(self) -> t.Optional[exp.Transform | exp.QueryTransform]:
340            if not self._match(TokenType.L_PAREN, advance=False):
341                self._retreat(self._index - 1)
342                return None
343
344            args = self._parse_wrapped_csv(self._parse_lambda)
345            row_format_before = self._parse_row_format(match_row=True)
346
347            record_writer = None
348            if self._match_text_seq("RECORDWRITER"):
349                record_writer = self._parse_string()
350
351            if not self._match(TokenType.USING):
352                return exp.Transform.from_arg_list(args)
353
354            command_script = self._parse_string()
355
356            self._match(TokenType.ALIAS)
357            schema = self._parse_schema()
358
359            row_format_after = self._parse_row_format(match_row=True)
360            record_reader = None
361            if self._match_text_seq("RECORDREADER"):
362                record_reader = self._parse_string()
363
364            return self.expression(
365                exp.QueryTransform,
366                expressions=args,
367                command_script=command_script,
368                schema=schema,
369                row_format_before=row_format_before,
370                record_writer=record_writer,
371                row_format_after=row_format_after,
372                record_reader=record_reader,
373            )
374
375        def _parse_types(
376            self, check_func: bool = False, schema: bool = False, allow_identifiers: bool = True
377        ) -> t.Optional[exp.Expression]:
378            """
379            Spark (and most likely Hive) treats casts to CHAR(length) and VARCHAR(length) as casts to
380            STRING in all contexts except for schema definitions. For example, this is in Spark v3.4.0:
381
382                spark-sql (default)> select cast(1234 as varchar(2));
383                23/06/06 15:51:18 WARN CharVarcharUtils: The Spark cast operator does not support
384                char/varchar type and simply treats them as string type. Please use string type
385                directly to avoid confusion. Otherwise, you can set spark.sql.legacy.charVarcharAsString
386                to true, so that Spark treat them as string type as same as Spark 3.0 and earlier
387
388                1234
389                Time taken: 4.265 seconds, Fetched 1 row(s)
390
391            This shows that Spark doesn't truncate the value into '12', which is inconsistent with
392            what other dialects (e.g. postgres) do, so we need to drop the length to transpile correctly.
393
394            Reference: https://spark.apache.org/docs/latest/sql-ref-datatypes.html
395            """
396            this = super()._parse_types(
397                check_func=check_func, schema=schema, allow_identifiers=allow_identifiers
398            )
399
400            if this and not schema:
401                return this.transform(
402                    lambda node: node.replace(exp.DataType.build("text"))
403                    if isinstance(node, exp.DataType) and node.is_type("char", "varchar")
404                    else node,
405                    copy=False,
406                )
407
408            return this
409
410        def _parse_partition_and_order(
411            self,
412        ) -> t.Tuple[t.List[exp.Expression], t.Optional[exp.Expression]]:
413            return (
414                self._parse_csv(self._parse_conjunction)
415                if self._match_set({TokenType.PARTITION_BY, TokenType.DISTRIBUTE_BY})
416                else [],
417                super()._parse_order(skip_order_token=self._match(TokenType.SORT_BY)),
418            )

Parser consumes a list of tokens produced by the Tokenizer and produces a parsed syntax tree.

Arguments:
  • error_level: The desired error level. Default: ErrorLevel.IMMEDIATE
  • error_message_context: Determines the amount of context to capture from a query string when displaying the error message (in number of characters). Default: 100
  • max_errors: Maximum number of error messages to include in a raised ParseError. This is only relevant if error_level is ErrorLevel.RAISE. Default: 3
LOG_DEFAULTS_TO_LN = True
STRICT_CAST = False
FUNCTIONS = {'ABS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Abs'>>, 'ANY_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.AnyValue'>>, 'APPROX_DISTINCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxDistinct'>>, 'APPROX_COUNT_DISTINCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxDistinct'>>, 'APPROX_QUANTILE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxQuantile'>>, 'APPROX_TOP_K': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxTopK'>>, 'ARG_MAX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMax'>>, 'ARGMAX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMax'>>, 'MAX_BY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMax'>>, 'ARG_MIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMin'>>, 'ARGMIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMin'>>, 'MIN_BY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMin'>>, 'ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Array'>>, 'ARRAY_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAgg'>>, 'ARRAY_ALL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAll'>>, 'ARRAY_ANY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAny'>>, 'ARRAY_CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayConcat'>>, 'ARRAY_CAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayConcat'>>, 'ARRAY_CONTAINS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayContains'>>, 'FILTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayFilter'>>, 'ARRAY_FILTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayFilter'>>, 'ARRAY_JOIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayJoin'>>, 'ARRAY_SIZE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySize'>>, 'ARRAY_SORT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySort'>>, 'ARRAY_SUM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySum'>>, 'ARRAY_UNION_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayUnionAgg'>>, 'ARRAY_UNIQUE_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayUniqueAgg'>>, 'AVG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Avg'>>, 'CASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Case'>>, 'CAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Cast'>>, 'CAST_TO_STR_TYPE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CastToStrType'>>, 'CEIL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ceil'>>, 'CEILING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ceil'>>, 'CHR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Chr'>>, 'CHAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Chr'>>, 'COALESCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Coalesce'>>, 'IFNULL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Coalesce'>>, 'NVL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Coalesce'>>, 'COLLATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Collate'>>, 'CONCAT': <function parse_concat>, 'CONCAT_WS': <function parse_concat_ws>, 'COUNT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Count'>>, 'COUNT_IF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CountIf'>>, 'CURRENT_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentDate'>>, 'CURRENT_DATETIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentDatetime'>>, 'CURRENT_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentTime'>>, 'CURRENT_TIMESTAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentTimestamp'>>, 'CURRENT_USER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentUser'>>, 'DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Date'>>, 'DATE_ADD': <function Hive.Parser.<lambda>>, 'DATEDIFF': <function Hive.Parser.<lambda>>, 'DATE_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateDiff'>>, 'DATEFROMPARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateFromParts'>>, 'DATE_STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateStrToDate'>>, 'DATE_SUB': <function Hive.Parser.<lambda>>, 'DATE_TO_DATE_STR': <function Parser.<lambda>>, 'DATE_TO_DI': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateToDi'>>, 'DATE_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateTrunc'>>, 'DATETIME_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeAdd'>>, 'DATETIME_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeDiff'>>, 'DATETIME_SUB': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeSub'>>, 'DATETIME_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeTrunc'>>, 'DAY': <function Hive.Parser.<lambda>>, 'DAY_OF_MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfMonth'>>, 'DAYOFMONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfMonth'>>, 'DAY_OF_WEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeek'>>, 'DAYOFWEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeek'>>, 'DAY_OF_YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfYear'>>, 'DAYOFYEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfYear'>>, 'DECODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Decode'>>, 'DI_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DiToDate'>>, 'ENCODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Encode'>>, 'EXP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Exp'>>, 'EXPLODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Explode'>>, 'EXPLODE_OUTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ExplodeOuter'>>, 'EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Extract'>>, 'FIRST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.First'>>, 'FLATTEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Flatten'>>, 'FLOOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Floor'>>, 'FROM_BASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FromBase'>>, 'FROM_BASE64': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FromBase64'>>, 'GENERATE_SERIES': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GenerateSeries'>>, 'GREATEST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Greatest'>>, 'GROUP_CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GroupConcat'>>, 'HEX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Hex'>>, 'HLL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Hll'>>, 'IF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.If'>>, 'INITCAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Initcap'>>, 'IS_INF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsInf'>>, 'ISINF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsInf'>>, 'IS_NAN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsNan'>>, 'ISNAN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsNan'>>, 'J_S_O_N_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArray'>>, 'J_S_O_N_ARRAY_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArrayAgg'>>, 'JSON_ARRAY_CONTAINS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArrayContains'>>, 'JSONB_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBExtract'>>, 'JSONB_EXTRACT_SCALAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBExtractScalar'>>, 'JSON_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONExtract'>>, 'JSON_EXTRACT_SCALAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONExtractScalar'>>, 'JSON_FORMAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONFormat'>>, 'J_S_O_N_OBJECT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONObject'>>, 'J_S_O_N_TABLE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONTable'>>, 'LAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Last'>>, 'LAST_DATE_OF_MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LastDateOfMonth'>>, 'LEAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Least'>>, 'LEFT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Left'>>, 'LENGTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Length'>>, 'LEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Length'>>, 'LEVENSHTEIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Levenshtein'>>, 'LN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ln'>>, 'LOG': <function parse_logarithm>, 'LOG10': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Log10'>>, 'LOG2': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Log2'>>, 'LOGICAL_AND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'BOOL_AND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'BOOLAND_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'LOGICAL_OR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'BOOL_OR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'BOOLOR_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'LOWER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lower'>>, 'LCASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lower'>>, 'MD5': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MD5'>>, 'MD5_DIGEST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MD5Digest'>>, 'MAP': <function parse_var_map>, 'MAP_FROM_ENTRIES': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MapFromEntries'>>, 'MATCH_AGAINST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MatchAgainst'>>, 'MAX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Max'>>, 'MIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Min'>>, 'MONTH': <function Hive.Parser.<lambda>>, 'MONTHS_BETWEEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MonthsBetween'>>, 'NEXT_VALUE_FOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.NextValueFor'>>, 'NULLIF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Nullif'>>, 'NUMBER_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.NumberToStr'>>, 'NVL2': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Nvl2'>>, 'OPEN_J_S_O_N': <bound method Func.from_arg_list of <class 'sqlglot.expressions.OpenJSON'>>, 'PARAMETERIZED_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ParameterizedAgg'>>, 'PARSE_JSON': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ParseJSON'>>, 'JSON_PARSE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ParseJSON'>>, 'PERCENTILE_CONT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PercentileCont'>>, 'PERCENTILE_DISC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PercentileDisc'>>, 'POSEXPLODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Posexplode'>>, 'POSEXPLODE_OUTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PosexplodeOuter'>>, 'POWER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Pow'>>, 'POW': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Pow'>>, 'PREDICT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Predict'>>, 'QUANTILE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Quantile'>>, 'RANGE_N': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RangeN'>>, 'READ_CSV': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ReadCSV'>>, 'REDUCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Reduce'>>, 'REGEXP_EXTRACT': <function Hive.Parser.<lambda>>, 'REGEXP_I_LIKE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpILike'>>, 'REGEXP_LIKE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpLike'>>, 'REGEXP_REPLACE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpReplace'>>, 'REGEXP_SPLIT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpSplit'>>, 'REPEAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Repeat'>>, 'RIGHT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Right'>>, 'ROUND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Round'>>, 'ROW_NUMBER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RowNumber'>>, 'SHA': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA'>>, 'SHA1': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA'>>, 'SHA2': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA2'>>, 'SAFE_DIVIDE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SafeDivide'>>, 'SORT_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SortArray'>>, 'SPLIT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpSplit'>>, 'SQRT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sqrt'>>, 'STANDARD_HASH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StandardHash'>>, 'STAR_MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StarMap'>>, 'STARTS_WITH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StartsWith'>>, 'STARTSWITH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StartsWith'>>, 'STDDEV': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stddev'>>, 'STDDEV_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StddevPop'>>, 'STDDEV_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StddevSamp'>>, 'STR_POSITION': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrPosition'>>, 'STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToDate'>>, 'STR_TO_MAP': <function Hive.Parser.<lambda>>, 'STR_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToTime'>>, 'STR_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToUnix'>>, 'STRUCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Struct'>>, 'STRUCT_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StructExtract'>>, 'STUFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stuff'>>, 'INSERT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stuff'>>, 'SUBSTRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Substring'>>, 'SUM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sum'>>, 'TIME_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeAdd'>>, 'TIME_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeDiff'>>, 'TIME_STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToDate'>>, 'TIME_STR_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToTime'>>, 'TIME_STR_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToUnix'>>, 'TIME_SUB': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeSub'>>, 'TIME_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeToStr'>>, 'TIME_TO_TIME_STR': <function Parser.<lambda>>, 'TIME_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeToUnix'>>, 'TIME_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeTrunc'>>, 'TIMESTAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Timestamp'>>, 'TIMESTAMP_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampAdd'>>, 'TIMESTAMP_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampDiff'>>, 'TIMESTAMP_SUB': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampSub'>>, 'TIMESTAMP_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampTrunc'>>, 'TO_BASE64': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToBase64'>>, 'TO_CHAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToChar'>>, 'TO_DAYS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToDays'>>, 'TRANSFORM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Transform'>>, 'TRIM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Trim'>>, 'TRY_CAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TryCast'>>, 'TS_OR_DI_TO_DI': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDiToDi'>>, 'TS_OR_DS_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsAdd'>>, 'TS_OR_DS_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsDiff'>>, 'TS_OR_DS_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsToDate'>>, 'TS_OR_DS_TO_DATE_STR': <function Parser.<lambda>>, 'UNHEX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Unhex'>>, 'UNIX_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToStr'>>, 'UNIX_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToTime'>>, 'UNIX_TO_TIME_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToTimeStr'>>, 'UPPER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Upper'>>, 'UCASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Upper'>>, 'VAR_MAP': <function parse_var_map>, 'VARIANCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VARIANCE_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VAR_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VARIANCE_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.VariancePop'>>, 'VAR_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.VariancePop'>>, 'WEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Week'>>, 'WEEK_OF_YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.WeekOfYear'>>, 'WEEKOFYEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.WeekOfYear'>>, 'WHEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.When'>>, 'X_M_L_TABLE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.XMLTable'>>, 'XOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Xor'>>, 'YEAR': <function Hive.Parser.<lambda>>, 'GLOB': <function Parser.<lambda>>, 'LIKE': <function parse_like>, 'BASE64': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToBase64'>>, 'COLLECT_LIST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAgg'>>, 'COLLECT_SET': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayUniqueAgg'>>, 'DATE_FORMAT': <function Hive.Parser.<lambda>>, 'FROM_UNIXTIME': <function format_time_lambda.<locals>._format_time>, 'GET_JSON_OBJECT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONExtractScalar'>>, 'LOCATE': <function locate_to_strposition>, 'PERCENTILE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Quantile'>>, 'PERCENTILE_APPROX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxQuantile'>>, 'SIZE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySize'>>, 'TO_DATE': <function format_time_lambda.<locals>._format_time>, 'TO_JSON': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONFormat'>>, 'UNBASE64': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FromBase64'>>, 'UNIX_TIMESTAMP': <function format_time_lambda.<locals>._format_time>}
NO_PAREN_FUNCTION_PARSERS = {'ANY': <function Parser.<lambda>>, 'CASE': <function Parser.<lambda>>, 'IF': <function Parser.<lambda>>, 'NEXT': <function Parser.<lambda>>, 'TRANSFORM': <function Hive.Parser.<lambda>>}
PROPERTY_PARSERS = {'ALGORITHM': <function Parser.<lambda>>, 'AUTO_INCREMENT': <function Parser.<lambda>>, 'BLOCKCOMPRESSION': <function Parser.<lambda>>, 'CHARSET': <function Parser.<lambda>>, 'CHARACTER SET': <function Parser.<lambda>>, 'CHECKSUM': <function Parser.<lambda>>, 'CLUSTER BY': <function Parser.<lambda>>, 'CLUSTERED': <function Parser.<lambda>>, 'COLLATE': <function Parser.<lambda>>, 'COMMENT': <function Parser.<lambda>>, 'COPY': <function Parser.<lambda>>, 'DATABLOCKSIZE': <function Parser.<lambda>>, 'DEFINER': <function Parser.<lambda>>, 'DETERMINISTIC': <function Parser.<lambda>>, 'DISTKEY': <function Parser.<lambda>>, 'DISTSTYLE': <function Parser.<lambda>>, 'ENGINE': <function Parser.<lambda>>, 'EXECUTE': <function Parser.<lambda>>, 'EXTERNAL': <function Parser.<lambda>>, 'FALLBACK': <function Parser.<lambda>>, 'FORMAT': <function Parser.<lambda>>, 'FREESPACE': <function Parser.<lambda>>, 'HEAP': <function Parser.<lambda>>, 'IMMUTABLE': <function Parser.<lambda>>, 'INPUT': <function Parser.<lambda>>, 'JOURNAL': <function Parser.<lambda>>, 'LANGUAGE': <function Parser.<lambda>>, 'LAYOUT': <function Parser.<lambda>>, 'LIFETIME': <function Parser.<lambda>>, 'LIKE': <function Parser.<lambda>>, 'LOCATION': <function Parser.<lambda>>, 'LOCK': <function Parser.<lambda>>, 'LOCKING': <function Parser.<lambda>>, 'LOG': <function Parser.<lambda>>, 'MATERIALIZED': <function Parser.<lambda>>, 'MERGEBLOCKRATIO': <function Parser.<lambda>>, 'MULTISET': <function Parser.<lambda>>, 'NO': <function Parser.<lambda>>, 'ON': <function Parser.<lambda>>, 'ORDER BY': <function Parser.<lambda>>, 'OUTPUT': <function Parser.<lambda>>, 'PARTITION': <function Parser.<lambda>>, 'PARTITION BY': <function Parser.<lambda>>, 'PARTITIONED BY': <function Parser.<lambda>>, 'PARTITIONED_BY': <function Parser.<lambda>>, 'PRIMARY KEY': <function Parser.<lambda>>, 'RANGE': <function Parser.<lambda>>, 'REMOTE': <function Parser.<lambda>>, 'RETURNS': <function Parser.<lambda>>, 'ROW': <function Parser.<lambda>>, 'ROW_FORMAT': <function Parser.<lambda>>, 'SAMPLE': <function Parser.<lambda>>, 'SET': <function Parser.<lambda>>, 'SETTINGS': <function Parser.<lambda>>, 'SORTKEY': <function Parser.<lambda>>, 'SOURCE': <function Parser.<lambda>>, 'STABLE': <function Parser.<lambda>>, 'STORED': <function Parser.<lambda>>, 'SYSTEM_VERSIONING': <function Parser.<lambda>>, 'TBLPROPERTIES': <function Parser.<lambda>>, 'TEMP': <function Parser.<lambda>>, 'TEMPORARY': <function Parser.<lambda>>, 'TO': <function Parser.<lambda>>, 'TRANSIENT': <function Parser.<lambda>>, 'TRANSFORM': <function Parser.<lambda>>, 'TTL': <function Parser.<lambda>>, 'USING': <function Parser.<lambda>>, 'VOLATILE': <function Parser.<lambda>>, 'WITH': <function Parser.<lambda>>, 'WITH SERDEPROPERTIES': <function Hive.Parser.<lambda>>}
SHOW_TRIE: Dict = {}
SET_TRIE: Dict = {'GLOBAL': {0: True}, 'LOCAL': {0: True}, 'SESSION': {0: True}, 'TRANSACTION': {0: True}}
class Hive.Generator(sqlglot.generator.Generator):
420    class Generator(generator.Generator):
421        LIMIT_FETCH = "LIMIT"
422        TABLESAMPLE_WITH_METHOD = False
423        TABLESAMPLE_SIZE_IS_PERCENT = True
424        JOIN_HINTS = False
425        TABLE_HINTS = False
426        QUERY_HINTS = False
427        INDEX_ON = "ON TABLE"
428        EXTRACT_ALLOWS_QUOTES = False
429        NVL2_SUPPORTED = False
430
431        EXPRESSIONS_WITHOUT_NESTED_CTES = {
432            exp.Insert,
433            exp.Select,
434            exp.Subquery,
435            exp.Union,
436        }
437
438        TYPE_MAPPING = {
439            **generator.Generator.TYPE_MAPPING,
440            exp.DataType.Type.BIT: "BOOLEAN",
441            exp.DataType.Type.DATETIME: "TIMESTAMP",
442            exp.DataType.Type.TEXT: "STRING",
443            exp.DataType.Type.TIME: "TIMESTAMP",
444            exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP",
445            exp.DataType.Type.VARBINARY: "BINARY",
446        }
447
448        TRANSFORMS = {
449            **generator.Generator.TRANSFORMS,
450            exp.Group: transforms.preprocess([transforms.unalias_group]),
451            exp.Select: transforms.preprocess(
452                [
453                    transforms.eliminate_qualify,
454                    transforms.eliminate_distinct_on,
455                    transforms.unnest_to_explode,
456                ]
457            ),
458            exp.Property: _property_sql,
459            exp.AnyValue: rename_func("FIRST"),
460            exp.ApproxDistinct: approx_count_distinct_sql,
461            exp.ArgMax: arg_max_or_min_no_count("MAX_BY"),
462            exp.ArgMin: arg_max_or_min_no_count("MIN_BY"),
463            exp.ArrayConcat: rename_func("CONCAT"),
464            exp.ArrayJoin: lambda self, e: self.func("CONCAT_WS", e.expression, e.this),
465            exp.ArraySize: rename_func("SIZE"),
466            exp.ArraySort: _array_sort_sql,
467            exp.With: no_recursive_cte_sql,
468            exp.DateAdd: _add_date_sql,
469            exp.DateDiff: _date_diff_sql,
470            exp.DateStrToDate: datestrtodate_sql,
471            exp.DateSub: _add_date_sql,
472            exp.DateToDi: lambda self, e: f"CAST(DATE_FORMAT({self.sql(e, 'this')}, {Hive.DATEINT_FORMAT}) AS INT)",
473            exp.DiToDate: lambda self, e: f"TO_DATE(CAST({self.sql(e, 'this')} AS STRING), {Hive.DATEINT_FORMAT})",
474            exp.FileFormatProperty: lambda self, e: f"STORED AS {self.sql(e, 'this') if isinstance(e.this, exp.InputOutputFormat) else e.name.upper()}",
475            exp.FromBase64: rename_func("UNBASE64"),
476            exp.If: if_sql(),
477            exp.ILike: no_ilike_sql,
478            exp.IsNan: rename_func("ISNAN"),
479            exp.JSONExtract: rename_func("GET_JSON_OBJECT"),
480            exp.JSONExtractScalar: rename_func("GET_JSON_OBJECT"),
481            exp.JSONFormat: _json_format_sql,
482            exp.Left: left_to_substring_sql,
483            exp.Map: var_map_sql,
484            exp.Max: max_or_greatest,
485            exp.MD5Digest: lambda self, e: self.func("UNHEX", self.func("MD5", e.this)),
486            exp.Min: min_or_least,
487            exp.MonthsBetween: lambda self, e: self.func("MONTHS_BETWEEN", e.this, e.expression),
488            exp.NotNullColumnConstraint: lambda self, e: ""
489            if e.args.get("allow_null")
490            else "NOT NULL",
491            exp.VarMap: var_map_sql,
492            exp.Create: _create_sql,
493            exp.Quantile: rename_func("PERCENTILE"),
494            exp.ApproxQuantile: rename_func("PERCENTILE_APPROX"),
495            exp.RegexpExtract: regexp_extract_sql,
496            exp.RegexpReplace: regexp_replace_sql,
497            exp.RegexpLike: lambda self, e: self.binary(e, "RLIKE"),
498            exp.RegexpSplit: rename_func("SPLIT"),
499            exp.Right: right_to_substring_sql,
500            exp.SafeDivide: no_safe_divide_sql,
501            exp.SchemaCommentProperty: lambda self, e: self.naked_property(e),
502            exp.ArrayUniqueAgg: rename_func("COLLECT_SET"),
503            exp.Split: lambda self, e: f"SPLIT({self.sql(e, 'this')}, CONCAT('\\\\Q', {self.sql(e, 'expression')}))",
504            exp.StrPosition: strposition_to_locate_sql,
505            exp.StrToDate: _str_to_date_sql,
506            exp.StrToTime: _str_to_time_sql,
507            exp.StrToUnix: _str_to_unix_sql,
508            exp.StructExtract: struct_extract_sql,
509            exp.TimeStrToDate: rename_func("TO_DATE"),
510            exp.TimeStrToTime: timestrtotime_sql,
511            exp.TimeStrToUnix: rename_func("UNIX_TIMESTAMP"),
512            exp.TimeToStr: _time_to_str,
513            exp.TimeToUnix: rename_func("UNIX_TIMESTAMP"),
514            exp.ToBase64: rename_func("BASE64"),
515            exp.TsOrDiToDi: lambda self, e: f"CAST(SUBSTR(REPLACE(CAST({self.sql(e, 'this')} AS STRING), '-', ''), 1, 8) AS INT)",
516            exp.TsOrDsAdd: _add_date_sql,
517            exp.TsOrDsDiff: _date_diff_sql,
518            exp.TsOrDsToDate: _to_date_sql,
519            exp.TryCast: no_trycast_sql,
520            exp.UnixToStr: lambda self, e: self.func(
521                "FROM_UNIXTIME", e.this, time_format("hive")(self, e)
522            ),
523            exp.UnixToTime: rename_func("FROM_UNIXTIME"),
524            exp.UnixToTimeStr: rename_func("FROM_UNIXTIME"),
525            exp.PartitionedByProperty: lambda self, e: f"PARTITIONED BY {self.sql(e, 'this')}",
526            exp.SerdeProperties: lambda self, e: self.properties(e, prefix="WITH SERDEPROPERTIES"),
527            exp.NumberToStr: rename_func("FORMAT_NUMBER"),
528            exp.LastDateOfMonth: rename_func("LAST_DAY"),
529            exp.National: lambda self, e: self.national_sql(e, prefix=""),
530            exp.ClusteredColumnConstraint: lambda self, e: f"({self.expressions(e, 'this', indent=False)})",
531            exp.NonClusteredColumnConstraint: lambda self, e: f"({self.expressions(e, 'this', indent=False)})",
532            exp.NotForReplicationColumnConstraint: lambda self, e: "",
533            exp.OnProperty: lambda self, e: "",
534            exp.PrimaryKeyColumnConstraint: lambda self, e: "PRIMARY KEY",
535        }
536
537        PROPERTIES_LOCATION = {
538            **generator.Generator.PROPERTIES_LOCATION,
539            exp.FileFormatProperty: exp.Properties.Location.POST_SCHEMA,
540            exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA,
541            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
542            exp.WithDataProperty: exp.Properties.Location.UNSUPPORTED,
543        }
544
545        def temporary_storage_provider(self, expression: exp.Create) -> exp.Create:
546            # Hive has no temporary storage provider (there are hive settings though)
547            return expression
548
549        def parameter_sql(self, expression: exp.Parameter) -> str:
550            this = self.sql(expression, "this")
551            expression_sql = self.sql(expression, "expression")
552
553            parent = expression.parent
554            this = f"{this}:{expression_sql}" if expression_sql else this
555
556            if isinstance(parent, exp.EQ) and isinstance(parent.parent, exp.SetItem):
557                # We need to produce SET key = value instead of SET ${key} = value
558                return this
559
560            return f"${{{this}}}"
561
562        def schema_sql(self, expression: exp.Schema) -> str:
563            for ordered in expression.find_all(exp.Ordered):
564                if ordered.args.get("desc") is False:
565                    ordered.set("desc", None)
566
567            return super().schema_sql(expression)
568
569        def constraint_sql(self, expression: exp.Constraint) -> str:
570            for prop in list(expression.find_all(exp.Properties)):
571                prop.pop()
572
573            this = self.sql(expression, "this")
574            expressions = self.expressions(expression, sep=" ", flat=True)
575            return f"CONSTRAINT {this} {expressions}"
576
577        def rowformatserdeproperty_sql(self, expression: exp.RowFormatSerdeProperty) -> str:
578            serde_props = self.sql(expression, "serde_properties")
579            serde_props = f" {serde_props}" if serde_props else ""
580            return f"ROW FORMAT SERDE {self.sql(expression, 'this')}{serde_props}"
581
582        def arrayagg_sql(self, expression: exp.ArrayAgg) -> str:
583            return self.func(
584                "COLLECT_LIST",
585                expression.this.this if isinstance(expression.this, exp.Order) else expression.this,
586            )
587
588        def with_properties(self, properties: exp.Properties) -> str:
589            return self.properties(properties, prefix=self.seg("TBLPROPERTIES"))
590
591        def datatype_sql(self, expression: exp.DataType) -> str:
592            if (
593                expression.this in (exp.DataType.Type.VARCHAR, exp.DataType.Type.NVARCHAR)
594                and not expression.expressions
595            ):
596                expression = exp.DataType.build("text")
597            elif expression.is_type(exp.DataType.Type.TEXT) and expression.expressions:
598                expression.set("this", exp.DataType.Type.VARCHAR)
599            elif expression.this in exp.DataType.TEMPORAL_TYPES:
600                expression = exp.DataType.build(expression.this)
601            elif expression.is_type("float"):
602                size_expression = expression.find(exp.DataTypeParam)
603                if size_expression:
604                    size = int(size_expression.name)
605                    expression = (
606                        exp.DataType.build("float") if size <= 32 else exp.DataType.build("double")
607                    )
608
609            return super().datatype_sql(expression)
610
611        def version_sql(self, expression: exp.Version) -> str:
612            sql = super().version_sql(expression)
613            return sql.replace("FOR ", "", 1)

Generator converts a given syntax tree to the corresponding SQL string.

Arguments:
  • pretty: Whether or not to format the produced SQL string. Default: False.
  • identify: Determines when an identifier should be quoted. Possible values are: False (default): Never quote, except in cases where it's mandatory by the dialect. True or 'always': Always quote. 'safe': Only quote identifiers that are case insensitive.
  • normalize: Whether or not to normalize identifiers to lowercase. Default: False.
  • pad: Determines the pad size in a formatted string. Default: 2.
  • indent: Determines the indentation size in a formatted string. Default: 2.
  • normalize_functions: Whether or not to normalize all function names. Possible values are: "upper" or True (default): Convert names to uppercase. "lower": Convert names to lowercase. False: Disables function name normalization.
  • unsupported_level: Determines the generator's behavior when it encounters unsupported expressions. Default ErrorLevel.WARN.
  • max_unsupported: Maximum number of unsupported messages to include in a raised UnsupportedError. This is only relevant if unsupported_level is ErrorLevel.RAISE. Default: 3
  • leading_comma: Determines whether or not the comma is leading or trailing in select expressions. This is only relevant when generating in pretty mode. Default: False
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode. The default is on the smaller end because the length only represents a segment and not the true line length. Default: 80
  • comments: Whether or not to preserve comments in the output SQL code. Default: True
LIMIT_FETCH = 'LIMIT'
TABLESAMPLE_WITH_METHOD = False
TABLESAMPLE_SIZE_IS_PERCENT = True
JOIN_HINTS = False
TABLE_HINTS = False
QUERY_HINTS = False
INDEX_ON = 'ON TABLE'
EXTRACT_ALLOWS_QUOTES = False
NVL2_SUPPORTED = False
EXPRESSIONS_WITHOUT_NESTED_CTES = {<class 'sqlglot.expressions.Select'>, <class 'sqlglot.expressions.Insert'>, <class 'sqlglot.expressions.Union'>, <class 'sqlglot.expressions.Subquery'>}
TYPE_MAPPING = {<Type.NCHAR: 'NCHAR'>: 'CHAR', <Type.NVARCHAR: 'NVARCHAR'>: 'VARCHAR', <Type.MEDIUMTEXT: 'MEDIUMTEXT'>: 'TEXT', <Type.LONGTEXT: 'LONGTEXT'>: 'TEXT', <Type.TINYTEXT: 'TINYTEXT'>: 'TEXT', <Type.MEDIUMBLOB: 'MEDIUMBLOB'>: 'BLOB', <Type.LONGBLOB: 'LONGBLOB'>: 'BLOB', <Type.TINYBLOB: 'TINYBLOB'>: 'BLOB', <Type.INET: 'INET'>: 'INET', <Type.BIT: 'BIT'>: 'BOOLEAN', <Type.DATETIME: 'DATETIME'>: 'TIMESTAMP', <Type.TEXT: 'TEXT'>: 'STRING', <Type.TIME: 'TIME'>: 'TIMESTAMP', <Type.TIMESTAMPTZ: 'TIMESTAMPTZ'>: 'TIMESTAMP', <Type.VARBINARY: 'VARBINARY'>: 'BINARY'}
TRANSFORMS = {<class 'sqlglot.expressions.DateAdd'>: <function _add_date_sql>, <class 'sqlglot.expressions.CaseSpecificColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CharacterSetColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CharacterSetProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CheckColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ClusteredColumnConstraint'>: <function Hive.Generator.<lambda>>, <class 'sqlglot.expressions.CollateColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CopyGrantsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CommentColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.DateFormatColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.DefaultColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.EncodeColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ExecuteAsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ExternalProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.HeapProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.InlineLengthColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.InputModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.IntervalSpan'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.LanguageProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.LocationProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.LogProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.MaterializedProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NoPrimaryIndexProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NonClusteredColumnConstraint'>: <function Hive.Generator.<lambda>>, <class 'sqlglot.expressions.NotForReplicationColumnConstraint'>: <function Hive.Generator.<lambda>>, <class 'sqlglot.expressions.OnCommitProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnProperty'>: <function Hive.Generator.<lambda>>, <class 'sqlglot.expressions.OnUpdateColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OutputModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.PathColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.RemoteWithConnectionModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ReturnsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SampleProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SetProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SettingsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SqlSecurityProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.StabilityProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TemporaryProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ToTableProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TransientProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TransformModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TitleColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.UppercaseColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.VarMap'>: <function var_map_sql>, <class 'sqlglot.expressions.VolatileProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.WithJournalTableProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Group'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.Select'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.Property'>: <function _property_sql>, <class 'sqlglot.expressions.AnyValue'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ApproxDistinct'>: <function approx_count_distinct_sql>, <class 'sqlglot.expressions.ArgMax'>: <function arg_max_or_min_no_count.<locals>._arg_max_or_min_sql>, <class 'sqlglot.expressions.ArgMin'>: <function arg_max_or_min_no_count.<locals>._arg_max_or_min_sql>, <class 'sqlglot.expressions.ArrayConcat'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ArrayJoin'>: <function Hive.Generator.<lambda>>, <class 'sqlglot.expressions.ArraySize'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ArraySort'>: <function _array_sort_sql>, <class 'sqlglot.expressions.With'>: <function no_recursive_cte_sql>, <class 'sqlglot.expressions.DateDiff'>: <function _date_diff_sql>, <class 'sqlglot.expressions.DateStrToDate'>: <function datestrtodate_sql>, <class 'sqlglot.expressions.DateSub'>: <function _add_date_sql>, <class 'sqlglot.expressions.DateToDi'>: <function Hive.Generator.<lambda>>, <class 'sqlglot.expressions.DiToDate'>: <function Hive.Generator.<lambda>>, <class 'sqlglot.expressions.FileFormatProperty'>: <function Hive.Generator.<lambda>>, <class 'sqlglot.expressions.FromBase64'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.If'>: <function if_sql.<locals>._if_sql>, <class 'sqlglot.expressions.ILike'>: <function no_ilike_sql>, <class 'sqlglot.expressions.IsNan'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.JSONExtract'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.JSONExtractScalar'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.JSONFormat'>: <function _json_format_sql>, <class 'sqlglot.expressions.Left'>: <function left_to_substring_sql>, <class 'sqlglot.expressions.Map'>: <function var_map_sql>, <class 'sqlglot.expressions.Max'>: <function max_or_greatest>, <class 'sqlglot.expressions.MD5Digest'>: <function Hive.Generator.<lambda>>, <class 'sqlglot.expressions.Min'>: <function min_or_least>, <class 'sqlglot.expressions.MonthsBetween'>: <function Hive.Generator.<lambda>>, <class 'sqlglot.expressions.NotNullColumnConstraint'>: <function Hive.Generator.<lambda>>, <class 'sqlglot.expressions.Create'>: <function _create_sql>, <class 'sqlglot.expressions.Quantile'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ApproxQuantile'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.RegexpExtract'>: <function regexp_extract_sql>, <class 'sqlglot.expressions.RegexpReplace'>: <function regexp_replace_sql>, <class 'sqlglot.expressions.RegexpLike'>: <function Hive.Generator.<lambda>>, <class 'sqlglot.expressions.RegexpSplit'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Right'>: <function right_to_substring_sql>, <class 'sqlglot.expressions.SafeDivide'>: <function no_safe_divide_sql>, <class 'sqlglot.expressions.SchemaCommentProperty'>: <function Hive.Generator.<lambda>>, <class 'sqlglot.expressions.ArrayUniqueAgg'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Split'>: <function Hive.Generator.<lambda>>, <class 'sqlglot.expressions.StrPosition'>: <function strposition_to_locate_sql>, <class 'sqlglot.expressions.StrToDate'>: <function _str_to_date_sql>, <class 'sqlglot.expressions.StrToTime'>: <function _str_to_time_sql>, <class 'sqlglot.expressions.StrToUnix'>: <function _str_to_unix_sql>, <class 'sqlglot.expressions.StructExtract'>: <function struct_extract_sql>, <class 'sqlglot.expressions.TimeStrToDate'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.TimeStrToTime'>: <function timestrtotime_sql>, <class 'sqlglot.expressions.TimeStrToUnix'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.TimeToStr'>: <function _time_to_str>, <class 'sqlglot.expressions.TimeToUnix'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ToBase64'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.TsOrDiToDi'>: <function Hive.Generator.<lambda>>, <class 'sqlglot.expressions.TsOrDsAdd'>: <function _add_date_sql>, <class 'sqlglot.expressions.TsOrDsDiff'>: <function _date_diff_sql>, <class 'sqlglot.expressions.TsOrDsToDate'>: <function _to_date_sql>, <class 'sqlglot.expressions.TryCast'>: <function no_trycast_sql>, <class 'sqlglot.expressions.UnixToStr'>: <function Hive.Generator.<lambda>>, <class 'sqlglot.expressions.UnixToTime'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.UnixToTimeStr'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.PartitionedByProperty'>: <function Hive.Generator.<lambda>>, <class 'sqlglot.expressions.SerdeProperties'>: <function Hive.Generator.<lambda>>, <class 'sqlglot.expressions.NumberToStr'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.LastDateOfMonth'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.National'>: <function Hive.Generator.<lambda>>, <class 'sqlglot.expressions.PrimaryKeyColumnConstraint'>: <function Hive.Generator.<lambda>>}
PROPERTIES_LOCATION = {<class 'sqlglot.expressions.AlgorithmProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.AutoIncrementProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.BlockCompressionProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.CharacterSetProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ChecksumProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.CollateProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.CopyGrantsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Cluster'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ClusteredByProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DataBlocksizeProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.DefinerProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.DictRange'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DictProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DistKeyProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DistStyleProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.EngineProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ExecuteAsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ExternalProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.FallbackProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.FileFormatProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.FreespaceProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.HeapProperty'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.InputModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.IsolatedLoadingProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.JournalProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.LanguageProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LikeProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LocationProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LockingProperty'>: <Location.POST_ALIAS: 'POST_ALIAS'>, <class 'sqlglot.expressions.LogProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.MaterializedProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.MergeBlockRatioProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.NoPrimaryIndexProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.OnProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.OnCommitProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.Order'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.OutputModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.PartitionedByProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.PartitionedOfProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.PrimaryKey'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Property'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.RemoteWithConnectionModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ReturnsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatDelimitedProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatSerdeProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SampleProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SchemaCommentProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SerdeProperties'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Set'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SettingsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SetProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.SortKeyProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SqlSecurityProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.StabilityProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.TemporaryProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.ToTableProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.TransientProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.TransformModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.MergeTreeTTL'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.VolatileProperty'>: <Location.UNSUPPORTED: 'UNSUPPORTED'>, <class 'sqlglot.expressions.WithDataProperty'>: <Location.UNSUPPORTED: 'UNSUPPORTED'>, <class 'sqlglot.expressions.WithJournalTableProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.WithSystemVersioningProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>}
def temporary_storage_provider( self, expression: sqlglot.expressions.Create) -> sqlglot.expressions.Create:
545        def temporary_storage_provider(self, expression: exp.Create) -> exp.Create:
546            # Hive has no temporary storage provider (there are hive settings though)
547            return expression
def parameter_sql(self, expression: sqlglot.expressions.Parameter) -> str:
549        def parameter_sql(self, expression: exp.Parameter) -> str:
550            this = self.sql(expression, "this")
551            expression_sql = self.sql(expression, "expression")
552
553            parent = expression.parent
554            this = f"{this}:{expression_sql}" if expression_sql else this
555
556            if isinstance(parent, exp.EQ) and isinstance(parent.parent, exp.SetItem):
557                # We need to produce SET key = value instead of SET ${key} = value
558                return this
559
560            return f"${{{this}}}"
def schema_sql(self, expression: sqlglot.expressions.Schema) -> str:
562        def schema_sql(self, expression: exp.Schema) -> str:
563            for ordered in expression.find_all(exp.Ordered):
564                if ordered.args.get("desc") is False:
565                    ordered.set("desc", None)
566
567            return super().schema_sql(expression)
def constraint_sql(self, expression: sqlglot.expressions.Constraint) -> str:
569        def constraint_sql(self, expression: exp.Constraint) -> str:
570            for prop in list(expression.find_all(exp.Properties)):
571                prop.pop()
572
573            this = self.sql(expression, "this")
574            expressions = self.expressions(expression, sep=" ", flat=True)
575            return f"CONSTRAINT {this} {expressions}"
def rowformatserdeproperty_sql(self, expression: sqlglot.expressions.RowFormatSerdeProperty) -> str:
577        def rowformatserdeproperty_sql(self, expression: exp.RowFormatSerdeProperty) -> str:
578            serde_props = self.sql(expression, "serde_properties")
579            serde_props = f" {serde_props}" if serde_props else ""
580            return f"ROW FORMAT SERDE {self.sql(expression, 'this')}{serde_props}"
def arrayagg_sql(self, expression: sqlglot.expressions.ArrayAgg) -> str:
582        def arrayagg_sql(self, expression: exp.ArrayAgg) -> str:
583            return self.func(
584                "COLLECT_LIST",
585                expression.this.this if isinstance(expression.this, exp.Order) else expression.this,
586            )
def with_properties(self, properties: sqlglot.expressions.Properties) -> str:
588        def with_properties(self, properties: exp.Properties) -> str:
589            return self.properties(properties, prefix=self.seg("TBLPROPERTIES"))
def datatype_sql(self, expression: sqlglot.expressions.DataType) -> str:
591        def datatype_sql(self, expression: exp.DataType) -> str:
592            if (
593                expression.this in (exp.DataType.Type.VARCHAR, exp.DataType.Type.NVARCHAR)
594                and not expression.expressions
595            ):
596                expression = exp.DataType.build("text")
597            elif expression.is_type(exp.DataType.Type.TEXT) and expression.expressions:
598                expression.set("this", exp.DataType.Type.VARCHAR)
599            elif expression.this in exp.DataType.TEMPORAL_TYPES:
600                expression = exp.DataType.build(expression.this)
601            elif expression.is_type("float"):
602                size_expression = expression.find(exp.DataTypeParam)
603                if size_expression:
604                    size = int(size_expression.name)
605                    expression = (
606                        exp.DataType.build("float") if size <= 32 else exp.DataType.build("double")
607                    )
608
609            return super().datatype_sql(expression)
def version_sql(self, expression: sqlglot.expressions.Version) -> str:
611        def version_sql(self, expression: exp.Version) -> str:
612            sql = super().version_sql(expression)
613            return sql.replace("FOR ", "", 1)
SELECT_KINDS: Tuple[str, ...] = ()
Inherited Members
sqlglot.generator.Generator
Generator
NULL_ORDERING_SUPPORTED
LOCKING_READS_SUPPORTED
EXPLICIT_UNION
WRAP_DERIVED_VALUES
CREATE_FUNCTION_RETURN_AS
MATCHED_BY_SOURCE
SINGLE_STRING_INTERVAL
INTERVAL_ALLOWS_PLURAL_FORM
LIMIT_ONLY_LITERALS
RENAME_TABLE_WITH_DB
GROUPINGS_SEP
QUERY_HINT_SEP
IS_BOOL_ALLOWED
DUPLICATE_KEY_UPDATE_WITH_SET
LIMIT_IS_TOP
RETURNING_END
COLUMN_JOIN_MARKS_SUPPORTED
TZ_TO_WITH_TIME_ZONE
VALUES_AS_TABLE
ALTER_TABLE_INCLUDE_COLUMN_KEYWORD
UNNEST_WITH_ORDINALITY
AGGREGATE_FILTER_SUPPORTED
SEMI_ANTI_JOIN_WITH_SIDE
COMPUTED_COLUMN_WITH_TYPE
SUPPORTS_TABLE_COPY
TABLESAMPLE_REQUIRES_PARENS
COLLATE_IS_FUNC
DATA_TYPE_SPECIFIERS_ALLOWED
ENSURE_BOOLS
CTE_RECURSIVE_KEYWORD_REQUIRED
STAR_MAPPING
TIME_PART_SINGULARS
TOKEN_MAPPING
STRUCT_DELIMITER
PARAMETER_TOKEN
RESERVED_KEYWORDS
WITH_SEPARATED_COMMENTS
EXCLUDE_COMMENTS
UNWRAPPED_INTERVAL_VALUES
KEY_VALUE_DEFINITONS
SENTINEL_LINE_BREAK
pretty
identify
normalize
pad
unsupported_level
max_unsupported
leading_comma
max_text_width
comments
dialect
normalize_functions
unsupported_messages
generate
preprocess
unsupported
sep
seg
pad_comment
maybe_comment
wrap
no_identify
normalize_func
indent
sql
uncache_sql
cache_sql
characterset_sql
column_sql
columnposition_sql
columndef_sql
columnconstraint_sql
computedcolumnconstraint_sql
autoincrementcolumnconstraint_sql
compresscolumnconstraint_sql
generatedasidentitycolumnconstraint_sql
generatedasrowcolumnconstraint_sql
periodforsystemtimeconstraint_sql
notnullcolumnconstraint_sql
transformcolumnconstraint_sql
primarykeycolumnconstraint_sql
uniquecolumnconstraint_sql
createable_sql
create_sql
clone_sql
describe_sql
prepend_ctes
with_sql
cte_sql
tablealias_sql
bitstring_sql
hexstring_sql
bytestring_sql
rawstring_sql
datatypeparam_sql
directory_sql
delete_sql
drop_sql
except_sql
except_op
fetch_sql
filter_sql
hint_sql
index_sql
identifier_sql
inputoutputformat_sql
national_sql
partition_sql
properties_sql
root_properties
properties
locate_properties
property_name
property_sql
likeproperty_sql
fallbackproperty_sql
journalproperty_sql
freespaceproperty_sql
checksumproperty_sql
mergeblockratioproperty_sql
datablocksizeproperty_sql
blockcompressionproperty_sql
isolatedloadingproperty_sql
partitionboundspec_sql
partitionedofproperty_sql
lockingproperty_sql
withdataproperty_sql
withsystemversioningproperty_sql
insert_sql
intersect_sql
intersect_op
introducer_sql
kill_sql
pseudotype_sql
objectidentifier_sql
onconflict_sql
returning_sql
rowformatdelimitedproperty_sql
withtablehint_sql
indextablehint_sql
table_sql
tablesample_sql
pivot_sql
tuple_sql
update_sql
values_sql
var_sql
into_sql
from_sql
group_sql
having_sql
connect_sql
prior_sql
join_sql
lambda_sql
lateral_sql
limit_sql
offset_sql
setitem_sql
set_sql
pragma_sql
lock_sql
literal_sql
escape_str
loaddata_sql
null_sql
boolean_sql
order_sql
cluster_sql
distribute_sql
sort_sql
ordered_sql
matchrecognize_sql
query_modifiers
offset_limit_modifiers
after_having_modifiers
after_limit_modifiers
select_sql
schema_columns_sql
star_sql
sessionparameter_sql
placeholder_sql
subquery_sql
qualify_sql
union_sql
union_op
unnest_sql
where_sql
window_sql
partition_by_sql
windowspec_sql
withingroup_sql
between_sql
bracket_sql
all_sql
any_sql
exists_sql
case_sql
nextvaluefor_sql
extract_sql
trim_sql
concat_sql
check_sql
foreignkey_sql
primarykey_sql
if_sql
matchagainst_sql
jsonkeyvalue_sql
formatjson_sql
jsonobject_sql
jsonarray_sql
jsonarrayagg_sql
jsoncolumndef_sql
jsonschema_sql
jsontable_sql
openjsoncolumndef_sql
openjson_sql
in_sql
in_unnest_op
interval_sql
return_sql
reference_sql
anonymous_sql
paren_sql
neg_sql
not_sql
alias_sql
aliases_sql
attimezone_sql
add_sql
and_sql
xor_sql
connector_sql
bitwiseand_sql
bitwiseleftshift_sql
bitwisenot_sql
bitwiseor_sql
bitwiserightshift_sql
bitwisexor_sql
cast_sql
currentdate_sql
collate_sql
command_sql
comment_sql
mergetreettlaction_sql
mergetreettl_sql
transaction_sql
commit_sql
rollback_sql
altercolumn_sql
renametable_sql
altertable_sql
add_column_sql
droppartition_sql
addconstraint_sql
distinct_sql
ignorenulls_sql
respectnulls_sql
intdiv_sql
dpipe_sql
div_sql
overlaps_sql
distance_sql
dot_sql
eq_sql
propertyeq_sql
escape_sql
glob_sql
gt_sql
gte_sql
ilike_sql
ilikeany_sql
is_sql
like_sql
likeany_sql
similarto_sql
lt_sql
lte_sql
mod_sql
mul_sql
neq_sql
nullsafeeq_sql
nullsafeneq_sql
or_sql
slice_sql
sub_sql
trycast_sql
log_sql
use_sql
binary
function_fallback_sql
func
format_args
text_width
format_time
expressions
op_expressions
naked_property
set_operation
tag_sql
token_sql
userdefinedfunction_sql
joinhint_sql
kwarg_sql
when_sql
merge_sql
tochar_sql
dictproperty_sql
dictrange_sql
dictsubproperty_sql
oncluster_sql
clusteredbyproperty_sql
anyvalue_sql
querytransform_sql
indexconstraintoption_sql
indexcolumnconstraint_sql
nvl2_sql
comprehension_sql
columnprefix_sql
opclass_sql
predict_sql
forin_sql
refresh_sql
operator_sql