From 918abde014f9e5c75dfbe21110c379f7f70435c9 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Sun, 12 Feb 2023 11:06:28 +0100 Subject: Merging upstream version 11.0.1. Signed-off-by: Daniel Baumann --- docs/sqlglot/dialects/spark.html | 1136 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 1136 insertions(+) create mode 100644 docs/sqlglot/dialects/spark.html (limited to 'docs/sqlglot/dialects/spark.html') diff --git a/docs/sqlglot/dialects/spark.html b/docs/sqlglot/dialects/spark.html new file mode 100644 index 0000000..75d5ac4 --- /dev/null +++ b/docs/sqlglot/dialects/spark.html @@ -0,0 +1,1136 @@ + + + + + + + sqlglot.dialects.spark API documentation + + + + + + + + + +
+
+ Edit on GitHub +

+sqlglot.dialects.spark

+ + + + + + +
  1from __future__ import annotations
+  2
+  3from sqlglot import exp, parser
+  4from sqlglot.dialects.dialect import create_with_partitions_sql, rename_func, trim_sql
+  5from sqlglot.dialects.hive import Hive
+  6from sqlglot.helper import seq_get
+  7
+  8
+  9def _create_sql(self, e):
+ 10    kind = e.args.get("kind")
+ 11    temporary = e.args.get("temporary")
+ 12
+ 13    if kind.upper() == "TABLE" and temporary is True:
+ 14        return f"CREATE TEMPORARY VIEW {self.sql(e, 'this')} AS {self.sql(e, 'expression')}"
+ 15    return create_with_partitions_sql(self, e)
+ 16
+ 17
+ 18def _map_sql(self, expression):
+ 19    keys = self.sql(expression.args["keys"])
+ 20    values = self.sql(expression.args["values"])
+ 21    return f"MAP_FROM_ARRAYS({keys}, {values})"
+ 22
+ 23
+ 24def _str_to_date(self, expression):
+ 25    this = self.sql(expression, "this")
+ 26    time_format = self.format_time(expression)
+ 27    if time_format == Hive.date_format:
+ 28        return f"TO_DATE({this})"
+ 29    return f"TO_DATE({this}, {time_format})"
+ 30
+ 31
+ 32def _unix_to_time(self, expression):
+ 33    scale = expression.args.get("scale")
+ 34    timestamp = self.sql(expression, "this")
+ 35    if scale is None:
+ 36        return f"FROM_UNIXTIME({timestamp})"
+ 37    if scale == exp.UnixToTime.SECONDS:
+ 38        return f"TIMESTAMP_SECONDS({timestamp})"
+ 39    if scale == exp.UnixToTime.MILLIS:
+ 40        return f"TIMESTAMP_MILLIS({timestamp})"
+ 41    if scale == exp.UnixToTime.MICROS:
+ 42        return f"TIMESTAMP_MICROS({timestamp})"
+ 43
+ 44    raise ValueError("Improper scale for timestamp")
+ 45
+ 46
+ 47class Spark(Hive):
+ 48    class Parser(Hive.Parser):
+ 49        FUNCTIONS = {
+ 50            **Hive.Parser.FUNCTIONS,  # type: ignore
+ 51            "MAP_FROM_ARRAYS": exp.Map.from_arg_list,
+ 52            "TO_UNIX_TIMESTAMP": exp.StrToUnix.from_arg_list,
+ 53            "LEFT": lambda args: exp.Substring(
+ 54                this=seq_get(args, 0),
+ 55                start=exp.Literal.number(1),
+ 56                length=seq_get(args, 1),
+ 57            ),
+ 58            "SHIFTLEFT": lambda args: exp.BitwiseLeftShift(
+ 59                this=seq_get(args, 0),
+ 60                expression=seq_get(args, 1),
+ 61            ),
+ 62            "SHIFTRIGHT": lambda args: exp.BitwiseRightShift(
+ 63                this=seq_get(args, 0),
+ 64                expression=seq_get(args, 1),
+ 65            ),
+ 66            "RIGHT": lambda args: exp.Substring(
+ 67                this=seq_get(args, 0),
+ 68                start=exp.Sub(
+ 69                    this=exp.Length(this=seq_get(args, 0)),
+ 70                    expression=exp.Add(this=seq_get(args, 1), expression=exp.Literal.number(1)),
+ 71                ),
+ 72                length=seq_get(args, 1),
+ 73            ),
+ 74            "APPROX_PERCENTILE": exp.ApproxQuantile.from_arg_list,
+ 75            "IIF": exp.If.from_arg_list,
+ 76            "AGGREGATE": exp.Reduce.from_arg_list,
+ 77            "DAYOFWEEK": lambda args: exp.DayOfWeek(
+ 78                this=exp.TsOrDsToDate(this=seq_get(args, 0)),
+ 79            ),
+ 80            "DAYOFMONTH": lambda args: exp.DayOfMonth(
+ 81                this=exp.TsOrDsToDate(this=seq_get(args, 0)),
+ 82            ),
+ 83            "DAYOFYEAR": lambda args: exp.DayOfYear(
+ 84                this=exp.TsOrDsToDate(this=seq_get(args, 0)),
+ 85            ),
+ 86            "WEEKOFYEAR": lambda args: exp.WeekOfYear(
+ 87                this=exp.TsOrDsToDate(this=seq_get(args, 0)),
+ 88            ),
+ 89        }
+ 90
+ 91        FUNCTION_PARSERS = {
+ 92            **parser.Parser.FUNCTION_PARSERS,  # type: ignore
+ 93            "BROADCAST": lambda self: self._parse_join_hint("BROADCAST"),
+ 94            "BROADCASTJOIN": lambda self: self._parse_join_hint("BROADCASTJOIN"),
+ 95            "MAPJOIN": lambda self: self._parse_join_hint("MAPJOIN"),
+ 96            "MERGE": lambda self: self._parse_join_hint("MERGE"),
+ 97            "SHUFFLEMERGE": lambda self: self._parse_join_hint("SHUFFLEMERGE"),
+ 98            "MERGEJOIN": lambda self: self._parse_join_hint("MERGEJOIN"),
+ 99            "SHUFFLE_HASH": lambda self: self._parse_join_hint("SHUFFLE_HASH"),
+100            "SHUFFLE_REPLICATE_NL": lambda self: self._parse_join_hint("SHUFFLE_REPLICATE_NL"),
+101        }
+102
+103        def _parse_add_column(self):
+104            return self._match_text_seq("ADD", "COLUMNS") and self._parse_schema()
+105
+106        def _parse_drop_column(self):
+107            return self._match_text_seq("DROP", "COLUMNS") and self.expression(
+108                exp.Drop,
+109                this=self._parse_schema(),
+110                kind="COLUMNS",
+111            )
+112
+113    class Generator(Hive.Generator):
+114        TYPE_MAPPING = {
+115            **Hive.Generator.TYPE_MAPPING,  # type: ignore
+116            exp.DataType.Type.TINYINT: "BYTE",
+117            exp.DataType.Type.SMALLINT: "SHORT",
+118            exp.DataType.Type.BIGINT: "LONG",
+119        }
+120
+121        PROPERTIES_LOCATION = {
+122            **Hive.Generator.PROPERTIES_LOCATION,  # type: ignore
+123            exp.EngineProperty: exp.Properties.Location.UNSUPPORTED,
+124            exp.AutoIncrementProperty: exp.Properties.Location.UNSUPPORTED,
+125            exp.CharacterSetProperty: exp.Properties.Location.UNSUPPORTED,
+126            exp.CollateProperty: exp.Properties.Location.UNSUPPORTED,
+127        }
+128
+129        TRANSFORMS = {
+130            **Hive.Generator.TRANSFORMS,  # type: ignore
+131            exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"),
+132            exp.FileFormatProperty: lambda self, e: f"USING {e.name.upper()}",
+133            exp.ArraySum: lambda self, e: f"AGGREGATE({self.sql(e, 'this')}, 0, (acc, x) -> acc + x, acc -> acc)",
+134            exp.BitwiseLeftShift: rename_func("SHIFTLEFT"),
+135            exp.BitwiseRightShift: rename_func("SHIFTRIGHT"),
+136            exp.DateTrunc: rename_func("TRUNC"),
+137            exp.Hint: lambda self, e: f" /*+ {self.expressions(e).strip()} */",
+138            exp.StrToDate: _str_to_date,
+139            exp.StrToTime: lambda self, e: f"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})",
+140            exp.UnixToTime: _unix_to_time,
+141            exp.Create: _create_sql,
+142            exp.Map: _map_sql,
+143            exp.Reduce: rename_func("AGGREGATE"),
+144            exp.StructKwarg: lambda self, e: f"{self.sql(e, 'this')}: {self.sql(e, 'expression')}",
+145            exp.TimestampTrunc: lambda self, e: f"DATE_TRUNC({self.sql(e, 'unit')}, {self.sql(e, 'this')})",
+146            exp.Trim: trim_sql,
+147            exp.VariancePop: rename_func("VAR_POP"),
+148            exp.DateFromParts: rename_func("MAKE_DATE"),
+149            exp.LogicalOr: rename_func("BOOL_OR"),
+150            exp.DayOfWeek: rename_func("DAYOFWEEK"),
+151            exp.DayOfMonth: rename_func("DAYOFMONTH"),
+152            exp.DayOfYear: rename_func("DAYOFYEAR"),
+153            exp.WeekOfYear: rename_func("WEEKOFYEAR"),
+154            exp.AtTimeZone: lambda self, e: f"FROM_UTC_TIMESTAMP({self.sql(e, 'this')}, {self.sql(e, 'zone')})",
+155        }
+156        TRANSFORMS.pop(exp.ArraySort)
+157        TRANSFORMS.pop(exp.ILike)
+158
+159        WRAP_DERIVED_VALUES = False
+160        CREATE_FUNCTION_AS = False
+161
+162        def cast_sql(self, expression: exp.Cast) -> str:
+163            if isinstance(expression.this, exp.Cast) and expression.this.is_type(
+164                exp.DataType.Type.JSON
+165            ):
+166                schema = f"'{self.sql(expression, 'to')}'"
+167                return f"FROM_JSON({self.format_args(self.sql(expression.this, 'this'), schema)})"
+168            if expression.to.is_type(exp.DataType.Type.JSON):
+169                return f"TO_JSON({self.sql(expression, 'this')})"
+170
+171            return super(Spark.Generator, self).cast_sql(expression)
+172
+173    class Tokenizer(Hive.Tokenizer):
+174        HEX_STRINGS = [("X'", "'")]
+
+ + +
+
+ +
+ + class + Spark(sqlglot.dialects.hive.Hive): + + + +
+ +
 48class Spark(Hive):
+ 49    class Parser(Hive.Parser):
+ 50        FUNCTIONS = {
+ 51            **Hive.Parser.FUNCTIONS,  # type: ignore
+ 52            "MAP_FROM_ARRAYS": exp.Map.from_arg_list,
+ 53            "TO_UNIX_TIMESTAMP": exp.StrToUnix.from_arg_list,
+ 54            "LEFT": lambda args: exp.Substring(
+ 55                this=seq_get(args, 0),
+ 56                start=exp.Literal.number(1),
+ 57                length=seq_get(args, 1),
+ 58            ),
+ 59            "SHIFTLEFT": lambda args: exp.BitwiseLeftShift(
+ 60                this=seq_get(args, 0),
+ 61                expression=seq_get(args, 1),
+ 62            ),
+ 63            "SHIFTRIGHT": lambda args: exp.BitwiseRightShift(
+ 64                this=seq_get(args, 0),
+ 65                expression=seq_get(args, 1),
+ 66            ),
+ 67            "RIGHT": lambda args: exp.Substring(
+ 68                this=seq_get(args, 0),
+ 69                start=exp.Sub(
+ 70                    this=exp.Length(this=seq_get(args, 0)),
+ 71                    expression=exp.Add(this=seq_get(args, 1), expression=exp.Literal.number(1)),
+ 72                ),
+ 73                length=seq_get(args, 1),
+ 74            ),
+ 75            "APPROX_PERCENTILE": exp.ApproxQuantile.from_arg_list,
+ 76            "IIF": exp.If.from_arg_list,
+ 77            "AGGREGATE": exp.Reduce.from_arg_list,
+ 78            "DAYOFWEEK": lambda args: exp.DayOfWeek(
+ 79                this=exp.TsOrDsToDate(this=seq_get(args, 0)),
+ 80            ),
+ 81            "DAYOFMONTH": lambda args: exp.DayOfMonth(
+ 82                this=exp.TsOrDsToDate(this=seq_get(args, 0)),
+ 83            ),
+ 84            "DAYOFYEAR": lambda args: exp.DayOfYear(
+ 85                this=exp.TsOrDsToDate(this=seq_get(args, 0)),
+ 86            ),
+ 87            "WEEKOFYEAR": lambda args: exp.WeekOfYear(
+ 88                this=exp.TsOrDsToDate(this=seq_get(args, 0)),
+ 89            ),
+ 90        }
+ 91
+ 92        FUNCTION_PARSERS = {
+ 93            **parser.Parser.FUNCTION_PARSERS,  # type: ignore
+ 94            "BROADCAST": lambda self: self._parse_join_hint("BROADCAST"),
+ 95            "BROADCASTJOIN": lambda self: self._parse_join_hint("BROADCASTJOIN"),
+ 96            "MAPJOIN": lambda self: self._parse_join_hint("MAPJOIN"),
+ 97            "MERGE": lambda self: self._parse_join_hint("MERGE"),
+ 98            "SHUFFLEMERGE": lambda self: self._parse_join_hint("SHUFFLEMERGE"),
+ 99            "MERGEJOIN": lambda self: self._parse_join_hint("MERGEJOIN"),
+100            "SHUFFLE_HASH": lambda self: self._parse_join_hint("SHUFFLE_HASH"),
+101            "SHUFFLE_REPLICATE_NL": lambda self: self._parse_join_hint("SHUFFLE_REPLICATE_NL"),
+102        }
+103
+104        def _parse_add_column(self):
+105            return self._match_text_seq("ADD", "COLUMNS") and self._parse_schema()
+106
+107        def _parse_drop_column(self):
+108            return self._match_text_seq("DROP", "COLUMNS") and self.expression(
+109                exp.Drop,
+110                this=self._parse_schema(),
+111                kind="COLUMNS",
+112            )
+113
+114    class Generator(Hive.Generator):
+115        TYPE_MAPPING = {
+116            **Hive.Generator.TYPE_MAPPING,  # type: ignore
+117            exp.DataType.Type.TINYINT: "BYTE",
+118            exp.DataType.Type.SMALLINT: "SHORT",
+119            exp.DataType.Type.BIGINT: "LONG",
+120        }
+121
+122        PROPERTIES_LOCATION = {
+123            **Hive.Generator.PROPERTIES_LOCATION,  # type: ignore
+124            exp.EngineProperty: exp.Properties.Location.UNSUPPORTED,
+125            exp.AutoIncrementProperty: exp.Properties.Location.UNSUPPORTED,
+126            exp.CharacterSetProperty: exp.Properties.Location.UNSUPPORTED,
+127            exp.CollateProperty: exp.Properties.Location.UNSUPPORTED,
+128        }
+129
+130        TRANSFORMS = {
+131            **Hive.Generator.TRANSFORMS,  # type: ignore
+132            exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"),
+133            exp.FileFormatProperty: lambda self, e: f"USING {e.name.upper()}",
+134            exp.ArraySum: lambda self, e: f"AGGREGATE({self.sql(e, 'this')}, 0, (acc, x) -> acc + x, acc -> acc)",
+135            exp.BitwiseLeftShift: rename_func("SHIFTLEFT"),
+136            exp.BitwiseRightShift: rename_func("SHIFTRIGHT"),
+137            exp.DateTrunc: rename_func("TRUNC"),
+138            exp.Hint: lambda self, e: f" /*+ {self.expressions(e).strip()} */",
+139            exp.StrToDate: _str_to_date,
+140            exp.StrToTime: lambda self, e: f"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})",
+141            exp.UnixToTime: _unix_to_time,
+142            exp.Create: _create_sql,
+143            exp.Map: _map_sql,
+144            exp.Reduce: rename_func("AGGREGATE"),
+145            exp.StructKwarg: lambda self, e: f"{self.sql(e, 'this')}: {self.sql(e, 'expression')}",
+146            exp.TimestampTrunc: lambda self, e: f"DATE_TRUNC({self.sql(e, 'unit')}, {self.sql(e, 'this')})",
+147            exp.Trim: trim_sql,
+148            exp.VariancePop: rename_func("VAR_POP"),
+149            exp.DateFromParts: rename_func("MAKE_DATE"),
+150            exp.LogicalOr: rename_func("BOOL_OR"),
+151            exp.DayOfWeek: rename_func("DAYOFWEEK"),
+152            exp.DayOfMonth: rename_func("DAYOFMONTH"),
+153            exp.DayOfYear: rename_func("DAYOFYEAR"),
+154            exp.WeekOfYear: rename_func("WEEKOFYEAR"),
+155            exp.AtTimeZone: lambda self, e: f"FROM_UTC_TIMESTAMP({self.sql(e, 'this')}, {self.sql(e, 'zone')})",
+156        }
+157        TRANSFORMS.pop(exp.ArraySort)
+158        TRANSFORMS.pop(exp.ILike)
+159
+160        WRAP_DERIVED_VALUES = False
+161        CREATE_FUNCTION_AS = False
+162
+163        def cast_sql(self, expression: exp.Cast) -> str:
+164            if isinstance(expression.this, exp.Cast) and expression.this.is_type(
+165                exp.DataType.Type.JSON
+166            ):
+167                schema = f"'{self.sql(expression, 'to')}'"
+168                return f"FROM_JSON({self.format_args(self.sql(expression.this, 'this'), schema)})"
+169            if expression.to.is_type(exp.DataType.Type.JSON):
+170                return f"TO_JSON({self.sql(expression, 'this')})"
+171
+172            return super(Spark.Generator, self).cast_sql(expression)
+173
+174    class Tokenizer(Hive.Tokenizer):
+175        HEX_STRINGS = [("X'", "'")]
+
+ + + + +
+
+ + Spark() + + +
+ + + + +
+ +
+
+ +
+ + class + Spark.Parser(sqlglot.dialects.hive.Hive.Parser): + + + +
+ +
 49    class Parser(Hive.Parser):
+ 50        FUNCTIONS = {
+ 51            **Hive.Parser.FUNCTIONS,  # type: ignore
+ 52            "MAP_FROM_ARRAYS": exp.Map.from_arg_list,
+ 53            "TO_UNIX_TIMESTAMP": exp.StrToUnix.from_arg_list,
+ 54            "LEFT": lambda args: exp.Substring(
+ 55                this=seq_get(args, 0),
+ 56                start=exp.Literal.number(1),
+ 57                length=seq_get(args, 1),
+ 58            ),
+ 59            "SHIFTLEFT": lambda args: exp.BitwiseLeftShift(
+ 60                this=seq_get(args, 0),
+ 61                expression=seq_get(args, 1),
+ 62            ),
+ 63            "SHIFTRIGHT": lambda args: exp.BitwiseRightShift(
+ 64                this=seq_get(args, 0),
+ 65                expression=seq_get(args, 1),
+ 66            ),
+ 67            "RIGHT": lambda args: exp.Substring(
+ 68                this=seq_get(args, 0),
+ 69                start=exp.Sub(
+ 70                    this=exp.Length(this=seq_get(args, 0)),
+ 71                    expression=exp.Add(this=seq_get(args, 1), expression=exp.Literal.number(1)),
+ 72                ),
+ 73                length=seq_get(args, 1),
+ 74            ),
+ 75            "APPROX_PERCENTILE": exp.ApproxQuantile.from_arg_list,
+ 76            "IIF": exp.If.from_arg_list,
+ 77            "AGGREGATE": exp.Reduce.from_arg_list,
+ 78            "DAYOFWEEK": lambda args: exp.DayOfWeek(
+ 79                this=exp.TsOrDsToDate(this=seq_get(args, 0)),
+ 80            ),
+ 81            "DAYOFMONTH": lambda args: exp.DayOfMonth(
+ 82                this=exp.TsOrDsToDate(this=seq_get(args, 0)),
+ 83            ),
+ 84            "DAYOFYEAR": lambda args: exp.DayOfYear(
+ 85                this=exp.TsOrDsToDate(this=seq_get(args, 0)),
+ 86            ),
+ 87            "WEEKOFYEAR": lambda args: exp.WeekOfYear(
+ 88                this=exp.TsOrDsToDate(this=seq_get(args, 0)),
+ 89            ),
+ 90        }
+ 91
+ 92        FUNCTION_PARSERS = {
+ 93            **parser.Parser.FUNCTION_PARSERS,  # type: ignore
+ 94            "BROADCAST": lambda self: self._parse_join_hint("BROADCAST"),
+ 95            "BROADCASTJOIN": lambda self: self._parse_join_hint("BROADCASTJOIN"),
+ 96            "MAPJOIN": lambda self: self._parse_join_hint("MAPJOIN"),
+ 97            "MERGE": lambda self: self._parse_join_hint("MERGE"),
+ 98            "SHUFFLEMERGE": lambda self: self._parse_join_hint("SHUFFLEMERGE"),
+ 99            "MERGEJOIN": lambda self: self._parse_join_hint("MERGEJOIN"),
+100            "SHUFFLE_HASH": lambda self: self._parse_join_hint("SHUFFLE_HASH"),
+101            "SHUFFLE_REPLICATE_NL": lambda self: self._parse_join_hint("SHUFFLE_REPLICATE_NL"),
+102        }
+103
+104        def _parse_add_column(self):
+105            return self._match_text_seq("ADD", "COLUMNS") and self._parse_schema()
+106
+107        def _parse_drop_column(self):
+108            return self._match_text_seq("DROP", "COLUMNS") and self.expression(
+109                exp.Drop,
+110                this=self._parse_schema(),
+111                kind="COLUMNS",
+112            )
+
+ + +

Parser consumes a list of tokens produced by the sqlglot.tokens.Tokenizer and produces +a parsed syntax tree.

+ +
Arguments:
+ +
    +
  • error_level: the desired error level. +Default: ErrorLevel.RAISE
  • +
  • error_message_context: determines the amount of context to capture from a +query string when displaying the error message (in number of characters). +Default: 50.
  • +
  • index_offset: Index offset for arrays eg ARRAY[0] vs ARRAY[1] as the head of a list. +Default: 0
  • +
  • alias_post_tablesample: If the table alias comes after tablesample. +Default: False
  • +
  • max_errors: Maximum number of error messages to include in a raised ParseError. +This is only relevant if error_level is ErrorLevel.RAISE. +Default: 3
  • +
  • null_ordering: Indicates the default null ordering method to use if not explicitly set. +Options are "nulls_are_small", "nulls_are_large", "nulls_are_last". +Default: "nulls_are_small"
  • +
+
+ + + +
+
+ +
+ + class + Spark.Generator(sqlglot.dialects.hive.Hive.Generator): + + + +
+ +
114    class Generator(Hive.Generator):
+115        TYPE_MAPPING = {
+116            **Hive.Generator.TYPE_MAPPING,  # type: ignore
+117            exp.DataType.Type.TINYINT: "BYTE",
+118            exp.DataType.Type.SMALLINT: "SHORT",
+119            exp.DataType.Type.BIGINT: "LONG",
+120        }
+121
+122        PROPERTIES_LOCATION = {
+123            **Hive.Generator.PROPERTIES_LOCATION,  # type: ignore
+124            exp.EngineProperty: exp.Properties.Location.UNSUPPORTED,
+125            exp.AutoIncrementProperty: exp.Properties.Location.UNSUPPORTED,
+126            exp.CharacterSetProperty: exp.Properties.Location.UNSUPPORTED,
+127            exp.CollateProperty: exp.Properties.Location.UNSUPPORTED,
+128        }
+129
+130        TRANSFORMS = {
+131            **Hive.Generator.TRANSFORMS,  # type: ignore
+132            exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"),
+133            exp.FileFormatProperty: lambda self, e: f"USING {e.name.upper()}",
+134            exp.ArraySum: lambda self, e: f"AGGREGATE({self.sql(e, 'this')}, 0, (acc, x) -> acc + x, acc -> acc)",
+135            exp.BitwiseLeftShift: rename_func("SHIFTLEFT"),
+136            exp.BitwiseRightShift: rename_func("SHIFTRIGHT"),
+137            exp.DateTrunc: rename_func("TRUNC"),
+138            exp.Hint: lambda self, e: f" /*+ {self.expressions(e).strip()} */",
+139            exp.StrToDate: _str_to_date,
+140            exp.StrToTime: lambda self, e: f"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})",
+141            exp.UnixToTime: _unix_to_time,
+142            exp.Create: _create_sql,
+143            exp.Map: _map_sql,
+144            exp.Reduce: rename_func("AGGREGATE"),
+145            exp.StructKwarg: lambda self, e: f"{self.sql(e, 'this')}: {self.sql(e, 'expression')}",
+146            exp.TimestampTrunc: lambda self, e: f"DATE_TRUNC({self.sql(e, 'unit')}, {self.sql(e, 'this')})",
+147            exp.Trim: trim_sql,
+148            exp.VariancePop: rename_func("VAR_POP"),
+149            exp.DateFromParts: rename_func("MAKE_DATE"),
+150            exp.LogicalOr: rename_func("BOOL_OR"),
+151            exp.DayOfWeek: rename_func("DAYOFWEEK"),
+152            exp.DayOfMonth: rename_func("DAYOFMONTH"),
+153            exp.DayOfYear: rename_func("DAYOFYEAR"),
+154            exp.WeekOfYear: rename_func("WEEKOFYEAR"),
+155            exp.AtTimeZone: lambda self, e: f"FROM_UTC_TIMESTAMP({self.sql(e, 'this')}, {self.sql(e, 'zone')})",
+156        }
+157        TRANSFORMS.pop(exp.ArraySort)
+158        TRANSFORMS.pop(exp.ILike)
+159
+160        WRAP_DERIVED_VALUES = False
+161        CREATE_FUNCTION_AS = False
+162
+163        def cast_sql(self, expression: exp.Cast) -> str:
+164            if isinstance(expression.this, exp.Cast) and expression.this.is_type(
+165                exp.DataType.Type.JSON
+166            ):
+167                schema = f"'{self.sql(expression, 'to')}'"
+168                return f"FROM_JSON({self.format_args(self.sql(expression.this, 'this'), schema)})"
+169            if expression.to.is_type(exp.DataType.Type.JSON):
+170                return f"TO_JSON({self.sql(expression, 'this')})"
+171
+172            return super(Spark.Generator, self).cast_sql(expression)
+
+ + +

Generator interprets the given syntax tree and produces a SQL string as an output.

+ +
Arguments:
+ +
    +
  • time_mapping (dict): the dictionary of custom time mappings in which the key +represents a python time format and the output the target time format
  • +
  • time_trie (trie): a trie of the time_mapping keys
  • +
  • pretty (bool): if set to True the returned string will be formatted. Default: False.
  • +
  • quote_start (str): specifies which starting character to use to delimit quotes. Default: '.
  • +
  • quote_end (str): specifies which ending character to use to delimit quotes. Default: '.
  • +
  • identifier_start (str): specifies which starting character to use to delimit identifiers. Default: ".
  • +
  • identifier_end (str): specifies which ending character to use to delimit identifiers. Default: ".
  • +
  • identify (bool): if set to True all identifiers will be delimited by the corresponding +character.
  • +
  • normalize (bool): if set to True all identifiers will lower cased
  • +
  • string_escape (str): specifies a string escape character. Default: '.
  • +
  • identifier_escape (str): specifies an identifier escape character. Default: ".
  • +
  • pad (int): determines padding in a formatted string. Default: 2.
  • +
  • indent (int): determines the size of indentation in a formatted string. Default: 4.
  • +
  • unnest_column_only (bool): if true unnest table aliases are considered only as column aliases
  • +
  • normalize_functions (str): normalize function names, "upper", "lower", or None +Default: "upper"
  • +
  • alias_post_tablesample (bool): if the table alias comes after tablesample +Default: False
  • +
  • unsupported_level (ErrorLevel): determines the generator's behavior when it encounters +unsupported expressions. Default ErrorLevel.WARN.
  • +
  • null_ordering (str): Indicates the default null ordering method to use if not explicitly set. +Options are "nulls_are_small", "nulls_are_large", "nulls_are_last". +Default: "nulls_are_small"
  • +
  • max_unsupported (int): Maximum number of unsupported messages to include in a raised UnsupportedError. +This is only relevant if unsupported_level is ErrorLevel.RAISE. +Default: 3
  • +
  • leading_comma (bool): if the the comma is leading or trailing in select statements +Default: False
  • +
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode. +The default is on the smaller end because the length only represents a segment and not the true +line length. +Default: 80
  • +
  • comments: Whether or not to preserve comments in the output SQL code. +Default: True
  • +
+
+ + +
+ +
+ + def + cast_sql(self, expression: sqlglot.expressions.Cast) -> str: + + + +
+ +
163        def cast_sql(self, expression: exp.Cast) -> str:
+164            if isinstance(expression.this, exp.Cast) and expression.this.is_type(
+165                exp.DataType.Type.JSON
+166            ):
+167                schema = f"'{self.sql(expression, 'to')}'"
+168                return f"FROM_JSON({self.format_args(self.sql(expression.this, 'this'), schema)})"
+169            if expression.to.is_type(exp.DataType.Type.JSON):
+170                return f"TO_JSON({self.sql(expression, 'this')})"
+171
+172            return super(Spark.Generator, self).cast_sql(expression)
+
+ + + + +
+
+
Inherited Members
+
+
sqlglot.generator.Generator
+
Generator
+
generate
+
unsupported
+
sep
+
seg
+
pad_comment
+
maybe_comment
+
wrap
+
no_identify
+
normalize_func
+
indent
+
sql
+
uncache_sql
+
cache_sql
+
characterset_sql
+
column_sql
+
columndef_sql
+
columnconstraint_sql
+
autoincrementcolumnconstraint_sql
+
checkcolumnconstraint_sql
+
commentcolumnconstraint_sql
+
collatecolumnconstraint_sql
+
encodecolumnconstraint_sql
+
defaultcolumnconstraint_sql
+
generatedasidentitycolumnconstraint_sql
+
notnullcolumnconstraint_sql
+
primarykeycolumnconstraint_sql
+
uniquecolumnconstraint_sql
+
create_sql
+
describe_sql
+
prepend_ctes
+
with_sql
+
cte_sql
+
tablealias_sql
+
bitstring_sql
+
hexstring_sql
+
directory_sql
+
delete_sql
+
drop_sql
+
except_sql
+
except_op
+
fetch_sql
+
filter_sql
+
hint_sql
+
index_sql
+
identifier_sql
+
national_sql
+
partition_sql
+
properties_sql
+
root_properties
+
properties
+
locate_properties
+
property_sql
+
likeproperty_sql
+
fallbackproperty_sql
+
journalproperty_sql
+
freespaceproperty_sql
+
afterjournalproperty_sql
+
checksumproperty_sql
+
mergeblockratioproperty_sql
+
datablocksizeproperty_sql
+
blockcompressionproperty_sql
+
isolatedloadingproperty_sql
+
insert_sql
+
intersect_sql
+
intersect_op
+
introducer_sql
+
pseudotype_sql
+
rowformatdelimitedproperty_sql
+
table_sql
+
tablesample_sql
+
pivot_sql
+
tuple_sql
+
update_sql
+
values_sql
+
var_sql
+
into_sql
+
from_sql
+
group_sql
+
having_sql
+
join_sql
+
lambda_sql
+
lateral_sql
+
limit_sql
+
offset_sql
+
lock_sql
+
literal_sql
+
loaddata_sql
+
null_sql
+
boolean_sql
+
order_sql
+
cluster_sql
+
distribute_sql
+
sort_sql
+
ordered_sql
+
matchrecognize_sql
+
query_modifiers
+
select_sql
+
schema_sql
+
star_sql
+
structkwarg_sql
+
parameter_sql
+
sessionparameter_sql
+
placeholder_sql
+
subquery_sql
+
qualify_sql
+
union_sql
+
union_op
+
unnest_sql
+
where_sql
+
window_sql
+
partition_by_sql
+
window_spec_sql
+
withingroup_sql
+
between_sql
+
bracket_sql
+
all_sql
+
any_sql
+
exists_sql
+
case_sql
+
constraint_sql
+
extract_sql
+
trim_sql
+
concat_sql
+
check_sql
+
foreignkey_sql
+
primarykey_sql
+
unique_sql
+
if_sql
+
in_sql
+
in_unnest_op
+
interval_sql
+
return_sql
+
reference_sql
+
anonymous_sql
+
paren_sql
+
neg_sql
+
not_sql
+
alias_sql
+
aliases_sql
+
attimezone_sql
+
add_sql
+
and_sql
+
connector_sql
+
bitwiseand_sql
+
bitwiseleftshift_sql
+
bitwisenot_sql
+
bitwiseor_sql
+
bitwiserightshift_sql
+
bitwisexor_sql
+
currentdate_sql
+
collate_sql
+
command_sql
+
transaction_sql
+
commit_sql
+
rollback_sql
+
altercolumn_sql
+
renametable_sql
+
altertable_sql
+
droppartition_sql
+
addconstraint_sql
+
distinct_sql
+
ignorenulls_sql
+
respectnulls_sql
+
intdiv_sql
+
dpipe_sql
+
div_sql
+
distance_sql
+
dot_sql
+
eq_sql
+
escape_sql
+
glob_sql
+
gt_sql
+
gte_sql
+
ilike_sql
+
is_sql
+
like_sql
+
similarto_sql
+
lt_sql
+
lte_sql
+
mod_sql
+
mul_sql
+
neq_sql
+
nullsafeeq_sql
+
nullsafeneq_sql
+
or_sql
+
slice_sql
+
sub_sql
+
trycast_sql
+
use_sql
+
binary
+
function_fallback_sql
+
format_args
+
text_width
+
format_time
+
expressions
+
op_expressions
+
naked_property
+
set_operation
+
tag_sql
+
token_sql
+
userdefinedfunction_sql
+
userdefinedfunctionkwarg_sql
+
joinhint_sql
+
kwarg_sql
+
when_sql
+
merge_sql
+ +
+ +
+
+
+
+ +
+ + class + Spark.Tokenizer(sqlglot.dialects.hive.Hive.Tokenizer): + + + +
+ +
174    class Tokenizer(Hive.Tokenizer):
+175        HEX_STRINGS = [("X'", "'")]
+
+ + + + +
+
Inherited Members
+
+ +
+
+
+
+ + \ No newline at end of file -- cgit v1.2.3