Edit on GitHub

sqlglot.dialects.spark2

  1from __future__ import annotations
  2
  3import typing as t
  4
  5from sqlglot import exp, parser
  6from sqlglot.dialects.dialect import create_with_partitions_sql, rename_func, trim_sql
  7from sqlglot.dialects.hive import Hive
  8from sqlglot.helper import seq_get
  9
 10
 11def _create_sql(self: Hive.Generator, e: exp.Create) -> str:
 12    kind = e.args["kind"]
 13    properties = e.args.get("properties")
 14
 15    if kind.upper() == "TABLE" and any(
 16        isinstance(prop, exp.TemporaryProperty)
 17        for prop in (properties.expressions if properties else [])
 18    ):
 19        return f"CREATE TEMPORARY VIEW {self.sql(e, 'this')} AS {self.sql(e, 'expression')}"
 20    return create_with_partitions_sql(self, e)
 21
 22
 23def _map_sql(self: Hive.Generator, expression: exp.Map) -> str:
 24    keys = self.sql(expression.args["keys"])
 25    values = self.sql(expression.args["values"])
 26    return f"MAP_FROM_ARRAYS({keys}, {values})"
 27
 28
 29def _parse_as_cast(to_type: str) -> t.Callable[[t.Sequence], exp.Expression]:
 30    return lambda args: exp.Cast(this=seq_get(args, 0), to=exp.DataType.build(to_type))
 31
 32
 33def _str_to_date(self: Hive.Generator, expression: exp.StrToDate) -> str:
 34    this = self.sql(expression, "this")
 35    time_format = self.format_time(expression)
 36    if time_format == Hive.date_format:
 37        return f"TO_DATE({this})"
 38    return f"TO_DATE({this}, {time_format})"
 39
 40
 41def _unix_to_time_sql(self: Hive.Generator, expression: exp.UnixToTime) -> str:
 42    scale = expression.args.get("scale")
 43    timestamp = self.sql(expression, "this")
 44    if scale is None:
 45        return f"CAST(FROM_UNIXTIME({timestamp}) AS TIMESTAMP)"
 46    if scale == exp.UnixToTime.SECONDS:
 47        return f"TIMESTAMP_SECONDS({timestamp})"
 48    if scale == exp.UnixToTime.MILLIS:
 49        return f"TIMESTAMP_MILLIS({timestamp})"
 50    if scale == exp.UnixToTime.MICROS:
 51        return f"TIMESTAMP_MICROS({timestamp})"
 52
 53    raise ValueError("Improper scale for timestamp")
 54
 55
 56class Spark2(Hive):
 57    class Parser(Hive.Parser):
 58        FUNCTIONS = {
 59            **Hive.Parser.FUNCTIONS,  # type: ignore
 60            "MAP_FROM_ARRAYS": exp.Map.from_arg_list,
 61            "TO_UNIX_TIMESTAMP": exp.StrToUnix.from_arg_list,
 62            "LEFT": lambda args: exp.Substring(
 63                this=seq_get(args, 0),
 64                start=exp.Literal.number(1),
 65                length=seq_get(args, 1),
 66            ),
 67            "SHIFTLEFT": lambda args: exp.BitwiseLeftShift(
 68                this=seq_get(args, 0),
 69                expression=seq_get(args, 1),
 70            ),
 71            "SHIFTRIGHT": lambda args: exp.BitwiseRightShift(
 72                this=seq_get(args, 0),
 73                expression=seq_get(args, 1),
 74            ),
 75            "RIGHT": lambda args: exp.Substring(
 76                this=seq_get(args, 0),
 77                start=exp.Sub(
 78                    this=exp.Length(this=seq_get(args, 0)),
 79                    expression=exp.Add(this=seq_get(args, 1), expression=exp.Literal.number(1)),
 80                ),
 81                length=seq_get(args, 1),
 82            ),
 83            "APPROX_PERCENTILE": exp.ApproxQuantile.from_arg_list,
 84            "IIF": exp.If.from_arg_list,
 85            "AGGREGATE": exp.Reduce.from_arg_list,
 86            "DAYOFWEEK": lambda args: exp.DayOfWeek(
 87                this=exp.TsOrDsToDate(this=seq_get(args, 0)),
 88            ),
 89            "DAYOFMONTH": lambda args: exp.DayOfMonth(
 90                this=exp.TsOrDsToDate(this=seq_get(args, 0)),
 91            ),
 92            "DAYOFYEAR": lambda args: exp.DayOfYear(
 93                this=exp.TsOrDsToDate(this=seq_get(args, 0)),
 94            ),
 95            "WEEKOFYEAR": lambda args: exp.WeekOfYear(
 96                this=exp.TsOrDsToDate(this=seq_get(args, 0)),
 97            ),
 98            "DATE": lambda args: exp.Cast(this=seq_get(args, 0), to=exp.DataType.build("date")),
 99            "DATE_TRUNC": lambda args: exp.TimestampTrunc(
100                this=seq_get(args, 1),
101                unit=exp.var(seq_get(args, 0)),
102            ),
103            "TRUNC": lambda args: exp.DateTrunc(unit=seq_get(args, 1), this=seq_get(args, 0)),
104            "BOOLEAN": _parse_as_cast("boolean"),
105            "DOUBLE": _parse_as_cast("double"),
106            "FLOAT": _parse_as_cast("float"),
107            "INT": _parse_as_cast("int"),
108            "STRING": _parse_as_cast("string"),
109            "TIMESTAMP": _parse_as_cast("timestamp"),
110        }
111
112        FUNCTION_PARSERS = {
113            **parser.Parser.FUNCTION_PARSERS,  # type: ignore
114            "BROADCAST": lambda self: self._parse_join_hint("BROADCAST"),
115            "BROADCASTJOIN": lambda self: self._parse_join_hint("BROADCASTJOIN"),
116            "MAPJOIN": lambda self: self._parse_join_hint("MAPJOIN"),
117            "MERGE": lambda self: self._parse_join_hint("MERGE"),
118            "SHUFFLEMERGE": lambda self: self._parse_join_hint("SHUFFLEMERGE"),
119            "MERGEJOIN": lambda self: self._parse_join_hint("MERGEJOIN"),
120            "SHUFFLE_HASH": lambda self: self._parse_join_hint("SHUFFLE_HASH"),
121            "SHUFFLE_REPLICATE_NL": lambda self: self._parse_join_hint("SHUFFLE_REPLICATE_NL"),
122        }
123
124        def _parse_add_column(self) -> t.Optional[exp.Expression]:
125            return self._match_text_seq("ADD", "COLUMNS") and self._parse_schema()
126
127        def _parse_drop_column(self) -> t.Optional[exp.Expression]:
128            return self._match_text_seq("DROP", "COLUMNS") and self.expression(
129                exp.Drop,
130                this=self._parse_schema(),
131                kind="COLUMNS",
132            )
133
134        def _pivot_column_names(self, pivot_columns: t.List[exp.Expression]) -> t.List[str]:
135            # Spark doesn't add a suffix to the pivot columns when there's a single aggregation
136            if len(pivot_columns) == 1:
137                return [""]
138
139            names = []
140            for agg in pivot_columns:
141                if isinstance(agg, exp.Alias):
142                    names.append(agg.alias)
143                else:
144                    """
145                    This case corresponds to aggregations without aliases being used as suffixes
146                    (e.g. col_avg(foo)). We need to unquote identifiers because they're going to
147                    be quoted in the base parser's `_parse_pivot` method, due to `to_identifier`.
148                    Otherwise, we'd end up with `col_avg(`foo`)` (notice the double quotes).
149
150                    Moreover, function names are lowercased in order to mimic Spark's naming scheme.
151                    """
152                    agg_all_unquoted = agg.transform(
153                        lambda node: exp.Identifier(this=node.name, quoted=False)
154                        if isinstance(node, exp.Identifier)
155                        else node
156                    )
157                    names.append(agg_all_unquoted.sql(dialect="spark", normalize_functions="lower"))
158
159            return names
160
161    class Generator(Hive.Generator):
162        TYPE_MAPPING = {
163            **Hive.Generator.TYPE_MAPPING,  # type: ignore
164            exp.DataType.Type.TINYINT: "BYTE",
165            exp.DataType.Type.SMALLINT: "SHORT",
166            exp.DataType.Type.BIGINT: "LONG",
167        }
168
169        PROPERTIES_LOCATION = {
170            **Hive.Generator.PROPERTIES_LOCATION,  # type: ignore
171            exp.EngineProperty: exp.Properties.Location.UNSUPPORTED,
172            exp.AutoIncrementProperty: exp.Properties.Location.UNSUPPORTED,
173            exp.CharacterSetProperty: exp.Properties.Location.UNSUPPORTED,
174            exp.CollateProperty: exp.Properties.Location.UNSUPPORTED,
175        }
176
177        TRANSFORMS = {
178            **Hive.Generator.TRANSFORMS,  # type: ignore
179            exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"),
180            exp.FileFormatProperty: lambda self, e: f"USING {e.name.upper()}",
181            exp.ArraySum: lambda self, e: f"AGGREGATE({self.sql(e, 'this')}, 0, (acc, x) -> acc + x, acc -> acc)",
182            exp.BitwiseLeftShift: rename_func("SHIFTLEFT"),
183            exp.BitwiseRightShift: rename_func("SHIFTRIGHT"),
184            exp.DateTrunc: lambda self, e: self.func("TRUNC", e.this, e.args.get("unit")),
185            exp.Hint: lambda self, e: f" /*+ {self.expressions(e).strip()} */",
186            exp.StrToDate: _str_to_date,
187            exp.StrToTime: lambda self, e: f"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})",
188            exp.UnixToTime: _unix_to_time_sql,
189            exp.Create: _create_sql,
190            exp.Map: _map_sql,
191            exp.Reduce: rename_func("AGGREGATE"),
192            exp.StructKwarg: lambda self, e: f"{self.sql(e, 'this')}: {self.sql(e, 'expression')}",
193            exp.TimestampTrunc: lambda self, e: self.func(
194                "DATE_TRUNC", exp.Literal.string(e.text("unit")), e.this
195            ),
196            exp.Trim: trim_sql,
197            exp.VariancePop: rename_func("VAR_POP"),
198            exp.DateFromParts: rename_func("MAKE_DATE"),
199            exp.LogicalOr: rename_func("BOOL_OR"),
200            exp.LogicalAnd: rename_func("BOOL_AND"),
201            exp.DayOfWeek: rename_func("DAYOFWEEK"),
202            exp.DayOfMonth: rename_func("DAYOFMONTH"),
203            exp.DayOfYear: rename_func("DAYOFYEAR"),
204            exp.WeekOfYear: rename_func("WEEKOFYEAR"),
205            exp.AtTimeZone: lambda self, e: f"FROM_UTC_TIMESTAMP({self.sql(e, 'this')}, {self.sql(e, 'zone')})",
206        }
207        TRANSFORMS.pop(exp.ArraySort)
208        TRANSFORMS.pop(exp.ILike)
209
210        WRAP_DERIVED_VALUES = False
211        CREATE_FUNCTION_RETURN_AS = False
212
213        def cast_sql(self, expression: exp.Cast) -> str:
214            if isinstance(expression.this, exp.Cast) and expression.this.is_type(
215                exp.DataType.Type.JSON
216            ):
217                schema = f"'{self.sql(expression, 'to')}'"
218                return self.func("FROM_JSON", expression.this.this, schema)
219            if expression.to.is_type(exp.DataType.Type.JSON):
220                return self.func("TO_JSON", expression.this)
221
222            return super(Hive.Generator, self).cast_sql(expression)
223
224    class Tokenizer(Hive.Tokenizer):
225        HEX_STRINGS = [("X'", "'")]
class Spark2(sqlglot.dialects.hive.Hive):
 57class Spark2(Hive):
 58    class Parser(Hive.Parser):
 59        FUNCTIONS = {
 60            **Hive.Parser.FUNCTIONS,  # type: ignore
 61            "MAP_FROM_ARRAYS": exp.Map.from_arg_list,
 62            "TO_UNIX_TIMESTAMP": exp.StrToUnix.from_arg_list,
 63            "LEFT": lambda args: exp.Substring(
 64                this=seq_get(args, 0),
 65                start=exp.Literal.number(1),
 66                length=seq_get(args, 1),
 67            ),
 68            "SHIFTLEFT": lambda args: exp.BitwiseLeftShift(
 69                this=seq_get(args, 0),
 70                expression=seq_get(args, 1),
 71            ),
 72            "SHIFTRIGHT": lambda args: exp.BitwiseRightShift(
 73                this=seq_get(args, 0),
 74                expression=seq_get(args, 1),
 75            ),
 76            "RIGHT": lambda args: exp.Substring(
 77                this=seq_get(args, 0),
 78                start=exp.Sub(
 79                    this=exp.Length(this=seq_get(args, 0)),
 80                    expression=exp.Add(this=seq_get(args, 1), expression=exp.Literal.number(1)),
 81                ),
 82                length=seq_get(args, 1),
 83            ),
 84            "APPROX_PERCENTILE": exp.ApproxQuantile.from_arg_list,
 85            "IIF": exp.If.from_arg_list,
 86            "AGGREGATE": exp.Reduce.from_arg_list,
 87            "DAYOFWEEK": lambda args: exp.DayOfWeek(
 88                this=exp.TsOrDsToDate(this=seq_get(args, 0)),
 89            ),
 90            "DAYOFMONTH": lambda args: exp.DayOfMonth(
 91                this=exp.TsOrDsToDate(this=seq_get(args, 0)),
 92            ),
 93            "DAYOFYEAR": lambda args: exp.DayOfYear(
 94                this=exp.TsOrDsToDate(this=seq_get(args, 0)),
 95            ),
 96            "WEEKOFYEAR": lambda args: exp.WeekOfYear(
 97                this=exp.TsOrDsToDate(this=seq_get(args, 0)),
 98            ),
 99            "DATE": lambda args: exp.Cast(this=seq_get(args, 0), to=exp.DataType.build("date")),
100            "DATE_TRUNC": lambda args: exp.TimestampTrunc(
101                this=seq_get(args, 1),
102                unit=exp.var(seq_get(args, 0)),
103            ),
104            "TRUNC": lambda args: exp.DateTrunc(unit=seq_get(args, 1), this=seq_get(args, 0)),
105            "BOOLEAN": _parse_as_cast("boolean"),
106            "DOUBLE": _parse_as_cast("double"),
107            "FLOAT": _parse_as_cast("float"),
108            "INT": _parse_as_cast("int"),
109            "STRING": _parse_as_cast("string"),
110            "TIMESTAMP": _parse_as_cast("timestamp"),
111        }
112
113        FUNCTION_PARSERS = {
114            **parser.Parser.FUNCTION_PARSERS,  # type: ignore
115            "BROADCAST": lambda self: self._parse_join_hint("BROADCAST"),
116            "BROADCASTJOIN": lambda self: self._parse_join_hint("BROADCASTJOIN"),
117            "MAPJOIN": lambda self: self._parse_join_hint("MAPJOIN"),
118            "MERGE": lambda self: self._parse_join_hint("MERGE"),
119            "SHUFFLEMERGE": lambda self: self._parse_join_hint("SHUFFLEMERGE"),
120            "MERGEJOIN": lambda self: self._parse_join_hint("MERGEJOIN"),
121            "SHUFFLE_HASH": lambda self: self._parse_join_hint("SHUFFLE_HASH"),
122            "SHUFFLE_REPLICATE_NL": lambda self: self._parse_join_hint("SHUFFLE_REPLICATE_NL"),
123        }
124
125        def _parse_add_column(self) -> t.Optional[exp.Expression]:
126            return self._match_text_seq("ADD", "COLUMNS") and self._parse_schema()
127
128        def _parse_drop_column(self) -> t.Optional[exp.Expression]:
129            return self._match_text_seq("DROP", "COLUMNS") and self.expression(
130                exp.Drop,
131                this=self._parse_schema(),
132                kind="COLUMNS",
133            )
134
135        def _pivot_column_names(self, pivot_columns: t.List[exp.Expression]) -> t.List[str]:
136            # Spark doesn't add a suffix to the pivot columns when there's a single aggregation
137            if len(pivot_columns) == 1:
138                return [""]
139
140            names = []
141            for agg in pivot_columns:
142                if isinstance(agg, exp.Alias):
143                    names.append(agg.alias)
144                else:
145                    """
146                    This case corresponds to aggregations without aliases being used as suffixes
147                    (e.g. col_avg(foo)). We need to unquote identifiers because they're going to
148                    be quoted in the base parser's `_parse_pivot` method, due to `to_identifier`.
149                    Otherwise, we'd end up with `col_avg(`foo`)` (notice the double quotes).
150
151                    Moreover, function names are lowercased in order to mimic Spark's naming scheme.
152                    """
153                    agg_all_unquoted = agg.transform(
154                        lambda node: exp.Identifier(this=node.name, quoted=False)
155                        if isinstance(node, exp.Identifier)
156                        else node
157                    )
158                    names.append(agg_all_unquoted.sql(dialect="spark", normalize_functions="lower"))
159
160            return names
161
162    class Generator(Hive.Generator):
163        TYPE_MAPPING = {
164            **Hive.Generator.TYPE_MAPPING,  # type: ignore
165            exp.DataType.Type.TINYINT: "BYTE",
166            exp.DataType.Type.SMALLINT: "SHORT",
167            exp.DataType.Type.BIGINT: "LONG",
168        }
169
170        PROPERTIES_LOCATION = {
171            **Hive.Generator.PROPERTIES_LOCATION,  # type: ignore
172            exp.EngineProperty: exp.Properties.Location.UNSUPPORTED,
173            exp.AutoIncrementProperty: exp.Properties.Location.UNSUPPORTED,
174            exp.CharacterSetProperty: exp.Properties.Location.UNSUPPORTED,
175            exp.CollateProperty: exp.Properties.Location.UNSUPPORTED,
176        }
177
178        TRANSFORMS = {
179            **Hive.Generator.TRANSFORMS,  # type: ignore
180            exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"),
181            exp.FileFormatProperty: lambda self, e: f"USING {e.name.upper()}",
182            exp.ArraySum: lambda self, e: f"AGGREGATE({self.sql(e, 'this')}, 0, (acc, x) -> acc + x, acc -> acc)",
183            exp.BitwiseLeftShift: rename_func("SHIFTLEFT"),
184            exp.BitwiseRightShift: rename_func("SHIFTRIGHT"),
185            exp.DateTrunc: lambda self, e: self.func("TRUNC", e.this, e.args.get("unit")),
186            exp.Hint: lambda self, e: f" /*+ {self.expressions(e).strip()} */",
187            exp.StrToDate: _str_to_date,
188            exp.StrToTime: lambda self, e: f"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})",
189            exp.UnixToTime: _unix_to_time_sql,
190            exp.Create: _create_sql,
191            exp.Map: _map_sql,
192            exp.Reduce: rename_func("AGGREGATE"),
193            exp.StructKwarg: lambda self, e: f"{self.sql(e, 'this')}: {self.sql(e, 'expression')}",
194            exp.TimestampTrunc: lambda self, e: self.func(
195                "DATE_TRUNC", exp.Literal.string(e.text("unit")), e.this
196            ),
197            exp.Trim: trim_sql,
198            exp.VariancePop: rename_func("VAR_POP"),
199            exp.DateFromParts: rename_func("MAKE_DATE"),
200            exp.LogicalOr: rename_func("BOOL_OR"),
201            exp.LogicalAnd: rename_func("BOOL_AND"),
202            exp.DayOfWeek: rename_func("DAYOFWEEK"),
203            exp.DayOfMonth: rename_func("DAYOFMONTH"),
204            exp.DayOfYear: rename_func("DAYOFYEAR"),
205            exp.WeekOfYear: rename_func("WEEKOFYEAR"),
206            exp.AtTimeZone: lambda self, e: f"FROM_UTC_TIMESTAMP({self.sql(e, 'this')}, {self.sql(e, 'zone')})",
207        }
208        TRANSFORMS.pop(exp.ArraySort)
209        TRANSFORMS.pop(exp.ILike)
210
211        WRAP_DERIVED_VALUES = False
212        CREATE_FUNCTION_RETURN_AS = False
213
214        def cast_sql(self, expression: exp.Cast) -> str:
215            if isinstance(expression.this, exp.Cast) and expression.this.is_type(
216                exp.DataType.Type.JSON
217            ):
218                schema = f"'{self.sql(expression, 'to')}'"
219                return self.func("FROM_JSON", expression.this.this, schema)
220            if expression.to.is_type(exp.DataType.Type.JSON):
221                return self.func("TO_JSON", expression.this)
222
223            return super(Hive.Generator, self).cast_sql(expression)
224
225    class Tokenizer(Hive.Tokenizer):
226        HEX_STRINGS = [("X'", "'")]
class Spark2.Parser(sqlglot.dialects.hive.Hive.Parser):
 58    class Parser(Hive.Parser):
 59        FUNCTIONS = {
 60            **Hive.Parser.FUNCTIONS,  # type: ignore
 61            "MAP_FROM_ARRAYS": exp.Map.from_arg_list,
 62            "TO_UNIX_TIMESTAMP": exp.StrToUnix.from_arg_list,
 63            "LEFT": lambda args: exp.Substring(
 64                this=seq_get(args, 0),
 65                start=exp.Literal.number(1),
 66                length=seq_get(args, 1),
 67            ),
 68            "SHIFTLEFT": lambda args: exp.BitwiseLeftShift(
 69                this=seq_get(args, 0),
 70                expression=seq_get(args, 1),
 71            ),
 72            "SHIFTRIGHT": lambda args: exp.BitwiseRightShift(
 73                this=seq_get(args, 0),
 74                expression=seq_get(args, 1),
 75            ),
 76            "RIGHT": lambda args: exp.Substring(
 77                this=seq_get(args, 0),
 78                start=exp.Sub(
 79                    this=exp.Length(this=seq_get(args, 0)),
 80                    expression=exp.Add(this=seq_get(args, 1), expression=exp.Literal.number(1)),
 81                ),
 82                length=seq_get(args, 1),
 83            ),
 84            "APPROX_PERCENTILE": exp.ApproxQuantile.from_arg_list,
 85            "IIF": exp.If.from_arg_list,
 86            "AGGREGATE": exp.Reduce.from_arg_list,
 87            "DAYOFWEEK": lambda args: exp.DayOfWeek(
 88                this=exp.TsOrDsToDate(this=seq_get(args, 0)),
 89            ),
 90            "DAYOFMONTH": lambda args: exp.DayOfMonth(
 91                this=exp.TsOrDsToDate(this=seq_get(args, 0)),
 92            ),
 93            "DAYOFYEAR": lambda args: exp.DayOfYear(
 94                this=exp.TsOrDsToDate(this=seq_get(args, 0)),
 95            ),
 96            "WEEKOFYEAR": lambda args: exp.WeekOfYear(
 97                this=exp.TsOrDsToDate(this=seq_get(args, 0)),
 98            ),
 99            "DATE": lambda args: exp.Cast(this=seq_get(args, 0), to=exp.DataType.build("date")),
100            "DATE_TRUNC": lambda args: exp.TimestampTrunc(
101                this=seq_get(args, 1),
102                unit=exp.var(seq_get(args, 0)),
103            ),
104            "TRUNC": lambda args: exp.DateTrunc(unit=seq_get(args, 1), this=seq_get(args, 0)),
105            "BOOLEAN": _parse_as_cast("boolean"),
106            "DOUBLE": _parse_as_cast("double"),
107            "FLOAT": _parse_as_cast("float"),
108            "INT": _parse_as_cast("int"),
109            "STRING": _parse_as_cast("string"),
110            "TIMESTAMP": _parse_as_cast("timestamp"),
111        }
112
113        FUNCTION_PARSERS = {
114            **parser.Parser.FUNCTION_PARSERS,  # type: ignore
115            "BROADCAST": lambda self: self._parse_join_hint("BROADCAST"),
116            "BROADCASTJOIN": lambda self: self._parse_join_hint("BROADCASTJOIN"),
117            "MAPJOIN": lambda self: self._parse_join_hint("MAPJOIN"),
118            "MERGE": lambda self: self._parse_join_hint("MERGE"),
119            "SHUFFLEMERGE": lambda self: self._parse_join_hint("SHUFFLEMERGE"),
120            "MERGEJOIN": lambda self: self._parse_join_hint("MERGEJOIN"),
121            "SHUFFLE_HASH": lambda self: self._parse_join_hint("SHUFFLE_HASH"),
122            "SHUFFLE_REPLICATE_NL": lambda self: self._parse_join_hint("SHUFFLE_REPLICATE_NL"),
123        }
124
125        def _parse_add_column(self) -> t.Optional[exp.Expression]:
126            return self._match_text_seq("ADD", "COLUMNS") and self._parse_schema()
127
128        def _parse_drop_column(self) -> t.Optional[exp.Expression]:
129            return self._match_text_seq("DROP", "COLUMNS") and self.expression(
130                exp.Drop,
131                this=self._parse_schema(),
132                kind="COLUMNS",
133            )
134
135        def _pivot_column_names(self, pivot_columns: t.List[exp.Expression]) -> t.List[str]:
136            # Spark doesn't add a suffix to the pivot columns when there's a single aggregation
137            if len(pivot_columns) == 1:
138                return [""]
139
140            names = []
141            for agg in pivot_columns:
142                if isinstance(agg, exp.Alias):
143                    names.append(agg.alias)
144                else:
145                    """
146                    This case corresponds to aggregations without aliases being used as suffixes
147                    (e.g. col_avg(foo)). We need to unquote identifiers because they're going to
148                    be quoted in the base parser's `_parse_pivot` method, due to `to_identifier`.
149                    Otherwise, we'd end up with `col_avg(`foo`)` (notice the double quotes).
150
151                    Moreover, function names are lowercased in order to mimic Spark's naming scheme.
152                    """
153                    agg_all_unquoted = agg.transform(
154                        lambda node: exp.Identifier(this=node.name, quoted=False)
155                        if isinstance(node, exp.Identifier)
156                        else node
157                    )
158                    names.append(agg_all_unquoted.sql(dialect="spark", normalize_functions="lower"))
159
160            return names

Parser consumes a list of tokens produced by the sqlglot.tokens.Tokenizer and produces a parsed syntax tree.

Arguments:
  • error_level: the desired error level. Default: ErrorLevel.RAISE
  • error_message_context: determines the amount of context to capture from a query string when displaying the error message (in number of characters). Default: 50.
  • index_offset: Index offset for arrays eg ARRAY[0] vs ARRAY[1] as the head of a list. Default: 0
  • alias_post_tablesample: If the table alias comes after tablesample. Default: False
  • max_errors: Maximum number of error messages to include in a raised ParseError. This is only relevant if error_level is ErrorLevel.RAISE. Default: 3
  • null_ordering: Indicates the default null ordering method to use if not explicitly set. Options are "nulls_are_small", "nulls_are_large", "nulls_are_last". Default: "nulls_are_small"
class Spark2.Generator(sqlglot.dialects.hive.Hive.Generator):
162    class Generator(Hive.Generator):
163        TYPE_MAPPING = {
164            **Hive.Generator.TYPE_MAPPING,  # type: ignore
165            exp.DataType.Type.TINYINT: "BYTE",
166            exp.DataType.Type.SMALLINT: "SHORT",
167            exp.DataType.Type.BIGINT: "LONG",
168        }
169
170        PROPERTIES_LOCATION = {
171            **Hive.Generator.PROPERTIES_LOCATION,  # type: ignore
172            exp.EngineProperty: exp.Properties.Location.UNSUPPORTED,
173            exp.AutoIncrementProperty: exp.Properties.Location.UNSUPPORTED,
174            exp.CharacterSetProperty: exp.Properties.Location.UNSUPPORTED,
175            exp.CollateProperty: exp.Properties.Location.UNSUPPORTED,
176        }
177
178        TRANSFORMS = {
179            **Hive.Generator.TRANSFORMS,  # type: ignore
180            exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"),
181            exp.FileFormatProperty: lambda self, e: f"USING {e.name.upper()}",
182            exp.ArraySum: lambda self, e: f"AGGREGATE({self.sql(e, 'this')}, 0, (acc, x) -> acc + x, acc -> acc)",
183            exp.BitwiseLeftShift: rename_func("SHIFTLEFT"),
184            exp.BitwiseRightShift: rename_func("SHIFTRIGHT"),
185            exp.DateTrunc: lambda self, e: self.func("TRUNC", e.this, e.args.get("unit")),
186            exp.Hint: lambda self, e: f" /*+ {self.expressions(e).strip()} */",
187            exp.StrToDate: _str_to_date,
188            exp.StrToTime: lambda self, e: f"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})",
189            exp.UnixToTime: _unix_to_time_sql,
190            exp.Create: _create_sql,
191            exp.Map: _map_sql,
192            exp.Reduce: rename_func("AGGREGATE"),
193            exp.StructKwarg: lambda self, e: f"{self.sql(e, 'this')}: {self.sql(e, 'expression')}",
194            exp.TimestampTrunc: lambda self, e: self.func(
195                "DATE_TRUNC", exp.Literal.string(e.text("unit")), e.this
196            ),
197            exp.Trim: trim_sql,
198            exp.VariancePop: rename_func("VAR_POP"),
199            exp.DateFromParts: rename_func("MAKE_DATE"),
200            exp.LogicalOr: rename_func("BOOL_OR"),
201            exp.LogicalAnd: rename_func("BOOL_AND"),
202            exp.DayOfWeek: rename_func("DAYOFWEEK"),
203            exp.DayOfMonth: rename_func("DAYOFMONTH"),
204            exp.DayOfYear: rename_func("DAYOFYEAR"),
205            exp.WeekOfYear: rename_func("WEEKOFYEAR"),
206            exp.AtTimeZone: lambda self, e: f"FROM_UTC_TIMESTAMP({self.sql(e, 'this')}, {self.sql(e, 'zone')})",
207        }
208        TRANSFORMS.pop(exp.ArraySort)
209        TRANSFORMS.pop(exp.ILike)
210
211        WRAP_DERIVED_VALUES = False
212        CREATE_FUNCTION_RETURN_AS = False
213
214        def cast_sql(self, expression: exp.Cast) -> str:
215            if isinstance(expression.this, exp.Cast) and expression.this.is_type(
216                exp.DataType.Type.JSON
217            ):
218                schema = f"'{self.sql(expression, 'to')}'"
219                return self.func("FROM_JSON", expression.this.this, schema)
220            if expression.to.is_type(exp.DataType.Type.JSON):
221                return self.func("TO_JSON", expression.this)
222
223            return super(Hive.Generator, self).cast_sql(expression)

Generator interprets the given syntax tree and produces a SQL string as an output.

Arguments:
  • time_mapping (dict): the dictionary of custom time mappings in which the key represents a python time format and the output the target time format
  • time_trie (trie): a trie of the time_mapping keys
  • pretty (bool): if set to True the returned string will be formatted. Default: False.
  • quote_start (str): specifies which starting character to use to delimit quotes. Default: '.
  • quote_end (str): specifies which ending character to use to delimit quotes. Default: '.
  • identifier_start (str): specifies which starting character to use to delimit identifiers. Default: ".
  • identifier_end (str): specifies which ending character to use to delimit identifiers. Default: ".
  • identify (bool | str): 'always': always quote, 'safe': quote identifiers if they don't contain an upcase, True defaults to always.
  • normalize (bool): if set to True all identifiers will lower cased
  • string_escape (str): specifies a string escape character. Default: '.
  • identifier_escape (str): specifies an identifier escape character. Default: ".
  • pad (int): determines padding in a formatted string. Default: 2.
  • indent (int): determines the size of indentation in a formatted string. Default: 4.
  • unnest_column_only (bool): if true unnest table aliases are considered only as column aliases
  • normalize_functions (str): normalize function names, "upper", "lower", or None Default: "upper"
  • alias_post_tablesample (bool): if the table alias comes after tablesample Default: False
  • unsupported_level (ErrorLevel): determines the generator's behavior when it encounters unsupported expressions. Default ErrorLevel.WARN.
  • null_ordering (str): Indicates the default null ordering method to use if not explicitly set. Options are "nulls_are_small", "nulls_are_large", "nulls_are_last". Default: "nulls_are_small"
  • max_unsupported (int): Maximum number of unsupported messages to include in a raised UnsupportedError. This is only relevant if unsupported_level is ErrorLevel.RAISE. Default: 3
  • leading_comma (bool): if the the comma is leading or trailing in select statements Default: False
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode. The default is on the smaller end because the length only represents a segment and not the true line length. Default: 80
  • comments: Whether or not to preserve comments in the output SQL code. Default: True
def cast_sql(self, expression: sqlglot.expressions.Cast) -> str:
214        def cast_sql(self, expression: exp.Cast) -> str:
215            if isinstance(expression.this, exp.Cast) and expression.this.is_type(
216                exp.DataType.Type.JSON
217            ):
218                schema = f"'{self.sql(expression, 'to')}'"
219                return self.func("FROM_JSON", expression.this.this, schema)
220            if expression.to.is_type(exp.DataType.Type.JSON):
221                return self.func("TO_JSON", expression.this)
222
223            return super(Hive.Generator, self).cast_sql(expression)
Inherited Members
sqlglot.generator.Generator
Generator
generate
unsupported
sep
seg
pad_comment
maybe_comment
wrap
no_identify
normalize_func
indent
sql
uncache_sql
cache_sql
characterset_sql
column_sql
columnposition_sql
columndef_sql
columnconstraint_sql
autoincrementcolumnconstraint_sql
compresscolumnconstraint_sql
generatedasidentitycolumnconstraint_sql
notnullcolumnconstraint_sql
primarykeycolumnconstraint_sql
uniquecolumnconstraint_sql
create_sql
describe_sql
prepend_ctes
with_sql
cte_sql
tablealias_sql
bitstring_sql
hexstring_sql
bytestring_sql
directory_sql
delete_sql
drop_sql
except_sql
except_op
fetch_sql
filter_sql
hint_sql
index_sql
identifier_sql
inputoutputformat_sql
national_sql
partition_sql
properties_sql
root_properties
properties
locate_properties
property_sql
likeproperty_sql
fallbackproperty_sql
journalproperty_sql
freespaceproperty_sql
afterjournalproperty_sql
checksumproperty_sql
mergeblockratioproperty_sql
datablocksizeproperty_sql
blockcompressionproperty_sql
isolatedloadingproperty_sql
lockingproperty_sql
withdataproperty_sql
insert_sql
intersect_sql
intersect_op
introducer_sql
pseudotype_sql
onconflict_sql
returning_sql
rowformatdelimitedproperty_sql
table_sql
tablesample_sql
pivot_sql
tuple_sql
update_sql
values_sql
var_sql
into_sql
from_sql
group_sql
having_sql
join_sql
lambda_sql
lateral_sql
limit_sql
offset_sql
setitem_sql
set_sql
pragma_sql
lock_sql
literal_sql
loaddata_sql
null_sql
boolean_sql
order_sql
cluster_sql
distribute_sql
sort_sql
ordered_sql
matchrecognize_sql
query_modifiers
select_sql
schema_sql
star_sql
structkwarg_sql
parameter_sql
sessionparameter_sql
placeholder_sql
subquery_sql
qualify_sql
union_sql
union_op
unnest_sql
where_sql
window_sql
partition_by_sql
windowspec_sql
withingroup_sql
between_sql
bracket_sql
all_sql
any_sql
exists_sql
case_sql
constraint_sql
nextvaluefor_sql
extract_sql
trim_sql
concat_sql
check_sql
foreignkey_sql
primarykey_sql
unique_sql
if_sql
matchagainst_sql
jsonkeyvalue_sql
jsonobject_sql
in_sql
in_unnest_op
interval_sql
return_sql
reference_sql
anonymous_sql
paren_sql
neg_sql
not_sql
alias_sql
aliases_sql
attimezone_sql
add_sql
and_sql
connector_sql
bitwiseand_sql
bitwiseleftshift_sql
bitwisenot_sql
bitwiseor_sql
bitwiserightshift_sql
bitwisexor_sql
currentdate_sql
collate_sql
command_sql
comment_sql
transaction_sql
commit_sql
rollback_sql
altercolumn_sql
renametable_sql
altertable_sql
droppartition_sql
addconstraint_sql
distinct_sql
ignorenulls_sql
respectnulls_sql
intdiv_sql
dpipe_sql
div_sql
overlaps_sql
distance_sql
dot_sql
eq_sql
escape_sql
glob_sql
gt_sql
gte_sql
ilike_sql
ilikeany_sql
is_sql
like_sql
likeany_sql
similarto_sql
lt_sql
lte_sql
mod_sql
mul_sql
neq_sql
nullsafeeq_sql
nullsafeneq_sql
or_sql
slice_sql
sub_sql
trycast_sql
use_sql
binary
function_fallback_sql
func
format_args
text_width
format_time
expressions
op_expressions
naked_property
set_operation
tag_sql
token_sql
userdefinedfunction_sql
joinhint_sql
kwarg_sql
when_sql
merge_sql
tochar_sql
sqlglot.dialects.hive.Hive.Generator
arrayagg_sql
with_properties
datatype_sql
class Spark2.Tokenizer(sqlglot.dialects.hive.Hive.Tokenizer):
225    class Tokenizer(Hive.Tokenizer):
226        HEX_STRINGS = [("X'", "'")]