sqlglot.dialects.spark
1from __future__ import annotations 2 3import typing as t 4 5from sqlglot import exp, parser 6from sqlglot.dialects.dialect import create_with_partitions_sql, rename_func, trim_sql 7from sqlglot.dialects.hive import Hive 8from sqlglot.helper import seq_get 9 10 11def _create_sql(self: Hive.Generator, e: exp.Create) -> str: 12 kind = e.args["kind"] 13 properties = e.args.get("properties") 14 15 if kind.upper() == "TABLE" and any( 16 isinstance(prop, exp.TemporaryProperty) 17 for prop in (properties.expressions if properties else []) 18 ): 19 return f"CREATE TEMPORARY VIEW {self.sql(e, 'this')} AS {self.sql(e, 'expression')}" 20 return create_with_partitions_sql(self, e) 21 22 23def _map_sql(self: Hive.Generator, expression: exp.Map) -> str: 24 keys = self.sql(expression.args["keys"]) 25 values = self.sql(expression.args["values"]) 26 return f"MAP_FROM_ARRAYS({keys}, {values})" 27 28 29def _str_to_date(self: Hive.Generator, expression: exp.StrToDate) -> str: 30 this = self.sql(expression, "this") 31 time_format = self.format_time(expression) 32 if time_format == Hive.date_format: 33 return f"TO_DATE({this})" 34 return f"TO_DATE({this}, {time_format})" 35 36 37def _unix_to_time_sql(self: Hive.Generator, expression: exp.UnixToTime) -> str: 38 scale = expression.args.get("scale") 39 timestamp = self.sql(expression, "this") 40 if scale is None: 41 return f"FROM_UNIXTIME({timestamp})" 42 if scale == exp.UnixToTime.SECONDS: 43 return f"TIMESTAMP_SECONDS({timestamp})" 44 if scale == exp.UnixToTime.MILLIS: 45 return f"TIMESTAMP_MILLIS({timestamp})" 46 if scale == exp.UnixToTime.MICROS: 47 return f"TIMESTAMP_MICROS({timestamp})" 48 49 raise ValueError("Improper scale for timestamp") 50 51 52class Spark(Hive): 53 class Parser(Hive.Parser): 54 FUNCTIONS = { 55 **Hive.Parser.FUNCTIONS, # type: ignore 56 "MAP_FROM_ARRAYS": exp.Map.from_arg_list, 57 "TO_UNIX_TIMESTAMP": exp.StrToUnix.from_arg_list, 58 "LEFT": lambda args: exp.Substring( 59 this=seq_get(args, 0), 60 start=exp.Literal.number(1), 61 length=seq_get(args, 1), 62 ), 63 "SHIFTLEFT": lambda args: exp.BitwiseLeftShift( 64 this=seq_get(args, 0), 65 expression=seq_get(args, 1), 66 ), 67 "SHIFTRIGHT": lambda args: exp.BitwiseRightShift( 68 this=seq_get(args, 0), 69 expression=seq_get(args, 1), 70 ), 71 "RIGHT": lambda args: exp.Substring( 72 this=seq_get(args, 0), 73 start=exp.Sub( 74 this=exp.Length(this=seq_get(args, 0)), 75 expression=exp.Add(this=seq_get(args, 1), expression=exp.Literal.number(1)), 76 ), 77 length=seq_get(args, 1), 78 ), 79 "APPROX_PERCENTILE": exp.ApproxQuantile.from_arg_list, 80 "BOOLEAN": lambda args: exp.Cast( 81 this=seq_get(args, 0), to=exp.DataType.build("boolean") 82 ), 83 "IIF": exp.If.from_arg_list, 84 "INT": lambda args: exp.Cast(this=seq_get(args, 0), to=exp.DataType.build("int")), 85 "AGGREGATE": exp.Reduce.from_arg_list, 86 "DAYOFWEEK": lambda args: exp.DayOfWeek( 87 this=exp.TsOrDsToDate(this=seq_get(args, 0)), 88 ), 89 "DAYOFMONTH": lambda args: exp.DayOfMonth( 90 this=exp.TsOrDsToDate(this=seq_get(args, 0)), 91 ), 92 "DAYOFYEAR": lambda args: exp.DayOfYear( 93 this=exp.TsOrDsToDate(this=seq_get(args, 0)), 94 ), 95 "WEEKOFYEAR": lambda args: exp.WeekOfYear( 96 this=exp.TsOrDsToDate(this=seq_get(args, 0)), 97 ), 98 "DATE": lambda args: exp.Cast(this=seq_get(args, 0), to=exp.DataType.build("date")), 99 "DATE_TRUNC": lambda args: exp.TimestampTrunc( 100 this=seq_get(args, 1), 101 unit=exp.var(seq_get(args, 0)), 102 ), 103 "STRING": lambda args: exp.Cast(this=seq_get(args, 0), to=exp.DataType.build("string")), 104 "TRUNC": lambda args: exp.DateTrunc(unit=seq_get(args, 1), this=seq_get(args, 0)), 105 "TIMESTAMP": lambda args: exp.Cast( 106 this=seq_get(args, 0), to=exp.DataType.build("timestamp") 107 ), 108 } 109 110 FUNCTION_PARSERS = { 111 **parser.Parser.FUNCTION_PARSERS, # type: ignore 112 "BROADCAST": lambda self: self._parse_join_hint("BROADCAST"), 113 "BROADCASTJOIN": lambda self: self._parse_join_hint("BROADCASTJOIN"), 114 "MAPJOIN": lambda self: self._parse_join_hint("MAPJOIN"), 115 "MERGE": lambda self: self._parse_join_hint("MERGE"), 116 "SHUFFLEMERGE": lambda self: self._parse_join_hint("SHUFFLEMERGE"), 117 "MERGEJOIN": lambda self: self._parse_join_hint("MERGEJOIN"), 118 "SHUFFLE_HASH": lambda self: self._parse_join_hint("SHUFFLE_HASH"), 119 "SHUFFLE_REPLICATE_NL": lambda self: self._parse_join_hint("SHUFFLE_REPLICATE_NL"), 120 } 121 122 def _parse_add_column(self) -> t.Optional[exp.Expression]: 123 return self._match_text_seq("ADD", "COLUMNS") and self._parse_schema() 124 125 def _parse_drop_column(self) -> t.Optional[exp.Expression]: 126 return self._match_text_seq("DROP", "COLUMNS") and self.expression( 127 exp.Drop, 128 this=self._parse_schema(), 129 kind="COLUMNS", 130 ) 131 132 def _pivot_column_names(self, pivot_columns: t.List[exp.Expression]) -> t.List[str]: 133 # Spark doesn't add a suffix to the pivot columns when there's a single aggregation 134 if len(pivot_columns) == 1: 135 return [""] 136 137 names = [] 138 for agg in pivot_columns: 139 if isinstance(agg, exp.Alias): 140 names.append(agg.alias) 141 else: 142 """ 143 This case corresponds to aggregations without aliases being used as suffixes 144 (e.g. col_avg(foo)). We need to unquote identifiers because they're going to 145 be quoted in the base parser's `_parse_pivot` method, due to `to_identifier`. 146 Otherwise, we'd end up with `col_avg(`foo`)` (notice the double quotes). 147 148 Moreover, function names are lowercased in order to mimic Spark's naming scheme. 149 """ 150 agg_all_unquoted = agg.transform( 151 lambda node: exp.Identifier(this=node.name, quoted=False) 152 if isinstance(node, exp.Identifier) 153 else node 154 ) 155 names.append(agg_all_unquoted.sql(dialect="spark", normalize_functions="lower")) 156 157 return names 158 159 class Generator(Hive.Generator): 160 TYPE_MAPPING = { 161 **Hive.Generator.TYPE_MAPPING, # type: ignore 162 exp.DataType.Type.TINYINT: "BYTE", 163 exp.DataType.Type.SMALLINT: "SHORT", 164 exp.DataType.Type.BIGINT: "LONG", 165 } 166 167 PROPERTIES_LOCATION = { 168 **Hive.Generator.PROPERTIES_LOCATION, # type: ignore 169 exp.EngineProperty: exp.Properties.Location.UNSUPPORTED, 170 exp.AutoIncrementProperty: exp.Properties.Location.UNSUPPORTED, 171 exp.CharacterSetProperty: exp.Properties.Location.UNSUPPORTED, 172 exp.CollateProperty: exp.Properties.Location.UNSUPPORTED, 173 } 174 175 TRANSFORMS = { 176 **Hive.Generator.TRANSFORMS, # type: ignore 177 exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"), 178 exp.FileFormatProperty: lambda self, e: f"USING {e.name.upper()}", 179 exp.ArraySum: lambda self, e: f"AGGREGATE({self.sql(e, 'this')}, 0, (acc, x) -> acc + x, acc -> acc)", 180 exp.BitwiseLeftShift: rename_func("SHIFTLEFT"), 181 exp.BitwiseRightShift: rename_func("SHIFTRIGHT"), 182 exp.DateTrunc: lambda self, e: self.func("TRUNC", e.this, e.args.get("unit")), 183 exp.Hint: lambda self, e: f" /*+ {self.expressions(e).strip()} */", 184 exp.StrToDate: _str_to_date, 185 exp.StrToTime: lambda self, e: f"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})", 186 exp.UnixToTime: _unix_to_time_sql, 187 exp.Create: _create_sql, 188 exp.Map: _map_sql, 189 exp.Reduce: rename_func("AGGREGATE"), 190 exp.StructKwarg: lambda self, e: f"{self.sql(e, 'this')}: {self.sql(e, 'expression')}", 191 exp.TimestampTrunc: lambda self, e: self.func( 192 "DATE_TRUNC", exp.Literal.string(e.text("unit")), e.this 193 ), 194 exp.Trim: trim_sql, 195 exp.VariancePop: rename_func("VAR_POP"), 196 exp.DateFromParts: rename_func("MAKE_DATE"), 197 exp.LogicalOr: rename_func("BOOL_OR"), 198 exp.LogicalAnd: rename_func("BOOL_AND"), 199 exp.DayOfWeek: rename_func("DAYOFWEEK"), 200 exp.DayOfMonth: rename_func("DAYOFMONTH"), 201 exp.DayOfYear: rename_func("DAYOFYEAR"), 202 exp.WeekOfYear: rename_func("WEEKOFYEAR"), 203 exp.AtTimeZone: lambda self, e: f"FROM_UTC_TIMESTAMP({self.sql(e, 'this')}, {self.sql(e, 'zone')})", 204 } 205 TRANSFORMS.pop(exp.ArraySort) 206 TRANSFORMS.pop(exp.ILike) 207 208 WRAP_DERIVED_VALUES = False 209 CREATE_FUNCTION_RETURN_AS = False 210 211 def cast_sql(self, expression: exp.Cast) -> str: 212 if isinstance(expression.this, exp.Cast) and expression.this.is_type( 213 exp.DataType.Type.JSON 214 ): 215 schema = f"'{self.sql(expression, 'to')}'" 216 return self.func("FROM_JSON", expression.this.this, schema) 217 if expression.to.is_type(exp.DataType.Type.JSON): 218 return self.func("TO_JSON", expression.this) 219 220 return super(Spark.Generator, self).cast_sql(expression) 221 222 class Tokenizer(Hive.Tokenizer): 223 HEX_STRINGS = [("X'", "'")]
53class Spark(Hive): 54 class Parser(Hive.Parser): 55 FUNCTIONS = { 56 **Hive.Parser.FUNCTIONS, # type: ignore 57 "MAP_FROM_ARRAYS": exp.Map.from_arg_list, 58 "TO_UNIX_TIMESTAMP": exp.StrToUnix.from_arg_list, 59 "LEFT": lambda args: exp.Substring( 60 this=seq_get(args, 0), 61 start=exp.Literal.number(1), 62 length=seq_get(args, 1), 63 ), 64 "SHIFTLEFT": lambda args: exp.BitwiseLeftShift( 65 this=seq_get(args, 0), 66 expression=seq_get(args, 1), 67 ), 68 "SHIFTRIGHT": lambda args: exp.BitwiseRightShift( 69 this=seq_get(args, 0), 70 expression=seq_get(args, 1), 71 ), 72 "RIGHT": lambda args: exp.Substring( 73 this=seq_get(args, 0), 74 start=exp.Sub( 75 this=exp.Length(this=seq_get(args, 0)), 76 expression=exp.Add(this=seq_get(args, 1), expression=exp.Literal.number(1)), 77 ), 78 length=seq_get(args, 1), 79 ), 80 "APPROX_PERCENTILE": exp.ApproxQuantile.from_arg_list, 81 "BOOLEAN": lambda args: exp.Cast( 82 this=seq_get(args, 0), to=exp.DataType.build("boolean") 83 ), 84 "IIF": exp.If.from_arg_list, 85 "INT": lambda args: exp.Cast(this=seq_get(args, 0), to=exp.DataType.build("int")), 86 "AGGREGATE": exp.Reduce.from_arg_list, 87 "DAYOFWEEK": lambda args: exp.DayOfWeek( 88 this=exp.TsOrDsToDate(this=seq_get(args, 0)), 89 ), 90 "DAYOFMONTH": lambda args: exp.DayOfMonth( 91 this=exp.TsOrDsToDate(this=seq_get(args, 0)), 92 ), 93 "DAYOFYEAR": lambda args: exp.DayOfYear( 94 this=exp.TsOrDsToDate(this=seq_get(args, 0)), 95 ), 96 "WEEKOFYEAR": lambda args: exp.WeekOfYear( 97 this=exp.TsOrDsToDate(this=seq_get(args, 0)), 98 ), 99 "DATE": lambda args: exp.Cast(this=seq_get(args, 0), to=exp.DataType.build("date")), 100 "DATE_TRUNC": lambda args: exp.TimestampTrunc( 101 this=seq_get(args, 1), 102 unit=exp.var(seq_get(args, 0)), 103 ), 104 "STRING": lambda args: exp.Cast(this=seq_get(args, 0), to=exp.DataType.build("string")), 105 "TRUNC": lambda args: exp.DateTrunc(unit=seq_get(args, 1), this=seq_get(args, 0)), 106 "TIMESTAMP": lambda args: exp.Cast( 107 this=seq_get(args, 0), to=exp.DataType.build("timestamp") 108 ), 109 } 110 111 FUNCTION_PARSERS = { 112 **parser.Parser.FUNCTION_PARSERS, # type: ignore 113 "BROADCAST": lambda self: self._parse_join_hint("BROADCAST"), 114 "BROADCASTJOIN": lambda self: self._parse_join_hint("BROADCASTJOIN"), 115 "MAPJOIN": lambda self: self._parse_join_hint("MAPJOIN"), 116 "MERGE": lambda self: self._parse_join_hint("MERGE"), 117 "SHUFFLEMERGE": lambda self: self._parse_join_hint("SHUFFLEMERGE"), 118 "MERGEJOIN": lambda self: self._parse_join_hint("MERGEJOIN"), 119 "SHUFFLE_HASH": lambda self: self._parse_join_hint("SHUFFLE_HASH"), 120 "SHUFFLE_REPLICATE_NL": lambda self: self._parse_join_hint("SHUFFLE_REPLICATE_NL"), 121 } 122 123 def _parse_add_column(self) -> t.Optional[exp.Expression]: 124 return self._match_text_seq("ADD", "COLUMNS") and self._parse_schema() 125 126 def _parse_drop_column(self) -> t.Optional[exp.Expression]: 127 return self._match_text_seq("DROP", "COLUMNS") and self.expression( 128 exp.Drop, 129 this=self._parse_schema(), 130 kind="COLUMNS", 131 ) 132 133 def _pivot_column_names(self, pivot_columns: t.List[exp.Expression]) -> t.List[str]: 134 # Spark doesn't add a suffix to the pivot columns when there's a single aggregation 135 if len(pivot_columns) == 1: 136 return [""] 137 138 names = [] 139 for agg in pivot_columns: 140 if isinstance(agg, exp.Alias): 141 names.append(agg.alias) 142 else: 143 """ 144 This case corresponds to aggregations without aliases being used as suffixes 145 (e.g. col_avg(foo)). We need to unquote identifiers because they're going to 146 be quoted in the base parser's `_parse_pivot` method, due to `to_identifier`. 147 Otherwise, we'd end up with `col_avg(`foo`)` (notice the double quotes). 148 149 Moreover, function names are lowercased in order to mimic Spark's naming scheme. 150 """ 151 agg_all_unquoted = agg.transform( 152 lambda node: exp.Identifier(this=node.name, quoted=False) 153 if isinstance(node, exp.Identifier) 154 else node 155 ) 156 names.append(agg_all_unquoted.sql(dialect="spark", normalize_functions="lower")) 157 158 return names 159 160 class Generator(Hive.Generator): 161 TYPE_MAPPING = { 162 **Hive.Generator.TYPE_MAPPING, # type: ignore 163 exp.DataType.Type.TINYINT: "BYTE", 164 exp.DataType.Type.SMALLINT: "SHORT", 165 exp.DataType.Type.BIGINT: "LONG", 166 } 167 168 PROPERTIES_LOCATION = { 169 **Hive.Generator.PROPERTIES_LOCATION, # type: ignore 170 exp.EngineProperty: exp.Properties.Location.UNSUPPORTED, 171 exp.AutoIncrementProperty: exp.Properties.Location.UNSUPPORTED, 172 exp.CharacterSetProperty: exp.Properties.Location.UNSUPPORTED, 173 exp.CollateProperty: exp.Properties.Location.UNSUPPORTED, 174 } 175 176 TRANSFORMS = { 177 **Hive.Generator.TRANSFORMS, # type: ignore 178 exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"), 179 exp.FileFormatProperty: lambda self, e: f"USING {e.name.upper()}", 180 exp.ArraySum: lambda self, e: f"AGGREGATE({self.sql(e, 'this')}, 0, (acc, x) -> acc + x, acc -> acc)", 181 exp.BitwiseLeftShift: rename_func("SHIFTLEFT"), 182 exp.BitwiseRightShift: rename_func("SHIFTRIGHT"), 183 exp.DateTrunc: lambda self, e: self.func("TRUNC", e.this, e.args.get("unit")), 184 exp.Hint: lambda self, e: f" /*+ {self.expressions(e).strip()} */", 185 exp.StrToDate: _str_to_date, 186 exp.StrToTime: lambda self, e: f"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})", 187 exp.UnixToTime: _unix_to_time_sql, 188 exp.Create: _create_sql, 189 exp.Map: _map_sql, 190 exp.Reduce: rename_func("AGGREGATE"), 191 exp.StructKwarg: lambda self, e: f"{self.sql(e, 'this')}: {self.sql(e, 'expression')}", 192 exp.TimestampTrunc: lambda self, e: self.func( 193 "DATE_TRUNC", exp.Literal.string(e.text("unit")), e.this 194 ), 195 exp.Trim: trim_sql, 196 exp.VariancePop: rename_func("VAR_POP"), 197 exp.DateFromParts: rename_func("MAKE_DATE"), 198 exp.LogicalOr: rename_func("BOOL_OR"), 199 exp.LogicalAnd: rename_func("BOOL_AND"), 200 exp.DayOfWeek: rename_func("DAYOFWEEK"), 201 exp.DayOfMonth: rename_func("DAYOFMONTH"), 202 exp.DayOfYear: rename_func("DAYOFYEAR"), 203 exp.WeekOfYear: rename_func("WEEKOFYEAR"), 204 exp.AtTimeZone: lambda self, e: f"FROM_UTC_TIMESTAMP({self.sql(e, 'this')}, {self.sql(e, 'zone')})", 205 } 206 TRANSFORMS.pop(exp.ArraySort) 207 TRANSFORMS.pop(exp.ILike) 208 209 WRAP_DERIVED_VALUES = False 210 CREATE_FUNCTION_RETURN_AS = False 211 212 def cast_sql(self, expression: exp.Cast) -> str: 213 if isinstance(expression.this, exp.Cast) and expression.this.is_type( 214 exp.DataType.Type.JSON 215 ): 216 schema = f"'{self.sql(expression, 'to')}'" 217 return self.func("FROM_JSON", expression.this.this, schema) 218 if expression.to.is_type(exp.DataType.Type.JSON): 219 return self.func("TO_JSON", expression.this) 220 221 return super(Spark.Generator, self).cast_sql(expression) 222 223 class Tokenizer(Hive.Tokenizer): 224 HEX_STRINGS = [("X'", "'")]
54 class Parser(Hive.Parser): 55 FUNCTIONS = { 56 **Hive.Parser.FUNCTIONS, # type: ignore 57 "MAP_FROM_ARRAYS": exp.Map.from_arg_list, 58 "TO_UNIX_TIMESTAMP": exp.StrToUnix.from_arg_list, 59 "LEFT": lambda args: exp.Substring( 60 this=seq_get(args, 0), 61 start=exp.Literal.number(1), 62 length=seq_get(args, 1), 63 ), 64 "SHIFTLEFT": lambda args: exp.BitwiseLeftShift( 65 this=seq_get(args, 0), 66 expression=seq_get(args, 1), 67 ), 68 "SHIFTRIGHT": lambda args: exp.BitwiseRightShift( 69 this=seq_get(args, 0), 70 expression=seq_get(args, 1), 71 ), 72 "RIGHT": lambda args: exp.Substring( 73 this=seq_get(args, 0), 74 start=exp.Sub( 75 this=exp.Length(this=seq_get(args, 0)), 76 expression=exp.Add(this=seq_get(args, 1), expression=exp.Literal.number(1)), 77 ), 78 length=seq_get(args, 1), 79 ), 80 "APPROX_PERCENTILE": exp.ApproxQuantile.from_arg_list, 81 "BOOLEAN": lambda args: exp.Cast( 82 this=seq_get(args, 0), to=exp.DataType.build("boolean") 83 ), 84 "IIF": exp.If.from_arg_list, 85 "INT": lambda args: exp.Cast(this=seq_get(args, 0), to=exp.DataType.build("int")), 86 "AGGREGATE": exp.Reduce.from_arg_list, 87 "DAYOFWEEK": lambda args: exp.DayOfWeek( 88 this=exp.TsOrDsToDate(this=seq_get(args, 0)), 89 ), 90 "DAYOFMONTH": lambda args: exp.DayOfMonth( 91 this=exp.TsOrDsToDate(this=seq_get(args, 0)), 92 ), 93 "DAYOFYEAR": lambda args: exp.DayOfYear( 94 this=exp.TsOrDsToDate(this=seq_get(args, 0)), 95 ), 96 "WEEKOFYEAR": lambda args: exp.WeekOfYear( 97 this=exp.TsOrDsToDate(this=seq_get(args, 0)), 98 ), 99 "DATE": lambda args: exp.Cast(this=seq_get(args, 0), to=exp.DataType.build("date")), 100 "DATE_TRUNC": lambda args: exp.TimestampTrunc( 101 this=seq_get(args, 1), 102 unit=exp.var(seq_get(args, 0)), 103 ), 104 "STRING": lambda args: exp.Cast(this=seq_get(args, 0), to=exp.DataType.build("string")), 105 "TRUNC": lambda args: exp.DateTrunc(unit=seq_get(args, 1), this=seq_get(args, 0)), 106 "TIMESTAMP": lambda args: exp.Cast( 107 this=seq_get(args, 0), to=exp.DataType.build("timestamp") 108 ), 109 } 110 111 FUNCTION_PARSERS = { 112 **parser.Parser.FUNCTION_PARSERS, # type: ignore 113 "BROADCAST": lambda self: self._parse_join_hint("BROADCAST"), 114 "BROADCASTJOIN": lambda self: self._parse_join_hint("BROADCASTJOIN"), 115 "MAPJOIN": lambda self: self._parse_join_hint("MAPJOIN"), 116 "MERGE": lambda self: self._parse_join_hint("MERGE"), 117 "SHUFFLEMERGE": lambda self: self._parse_join_hint("SHUFFLEMERGE"), 118 "MERGEJOIN": lambda self: self._parse_join_hint("MERGEJOIN"), 119 "SHUFFLE_HASH": lambda self: self._parse_join_hint("SHUFFLE_HASH"), 120 "SHUFFLE_REPLICATE_NL": lambda self: self._parse_join_hint("SHUFFLE_REPLICATE_NL"), 121 } 122 123 def _parse_add_column(self) -> t.Optional[exp.Expression]: 124 return self._match_text_seq("ADD", "COLUMNS") and self._parse_schema() 125 126 def _parse_drop_column(self) -> t.Optional[exp.Expression]: 127 return self._match_text_seq("DROP", "COLUMNS") and self.expression( 128 exp.Drop, 129 this=self._parse_schema(), 130 kind="COLUMNS", 131 ) 132 133 def _pivot_column_names(self, pivot_columns: t.List[exp.Expression]) -> t.List[str]: 134 # Spark doesn't add a suffix to the pivot columns when there's a single aggregation 135 if len(pivot_columns) == 1: 136 return [""] 137 138 names = [] 139 for agg in pivot_columns: 140 if isinstance(agg, exp.Alias): 141 names.append(agg.alias) 142 else: 143 """ 144 This case corresponds to aggregations without aliases being used as suffixes 145 (e.g. col_avg(foo)). We need to unquote identifiers because they're going to 146 be quoted in the base parser's `_parse_pivot` method, due to `to_identifier`. 147 Otherwise, we'd end up with `col_avg(`foo`)` (notice the double quotes). 148 149 Moreover, function names are lowercased in order to mimic Spark's naming scheme. 150 """ 151 agg_all_unquoted = agg.transform( 152 lambda node: exp.Identifier(this=node.name, quoted=False) 153 if isinstance(node, exp.Identifier) 154 else node 155 ) 156 names.append(agg_all_unquoted.sql(dialect="spark", normalize_functions="lower")) 157 158 return names
Parser consumes a list of tokens produced by the sqlglot.tokens.Tokenizer
and produces
a parsed syntax tree.
Arguments:
- error_level: the desired error level. Default: ErrorLevel.RAISE
- error_message_context: determines the amount of context to capture from a query string when displaying the error message (in number of characters). Default: 50.
- index_offset: Index offset for arrays eg ARRAY[0] vs ARRAY[1] as the head of a list. Default: 0
- alias_post_tablesample: If the table alias comes after tablesample. Default: False
- max_errors: Maximum number of error messages to include in a raised ParseError. This is only relevant if error_level is ErrorLevel.RAISE. Default: 3
- null_ordering: Indicates the default null ordering method to use if not explicitly set. Options are "nulls_are_small", "nulls_are_large", "nulls_are_last". Default: "nulls_are_small"
Inherited Members
160 class Generator(Hive.Generator): 161 TYPE_MAPPING = { 162 **Hive.Generator.TYPE_MAPPING, # type: ignore 163 exp.DataType.Type.TINYINT: "BYTE", 164 exp.DataType.Type.SMALLINT: "SHORT", 165 exp.DataType.Type.BIGINT: "LONG", 166 } 167 168 PROPERTIES_LOCATION = { 169 **Hive.Generator.PROPERTIES_LOCATION, # type: ignore 170 exp.EngineProperty: exp.Properties.Location.UNSUPPORTED, 171 exp.AutoIncrementProperty: exp.Properties.Location.UNSUPPORTED, 172 exp.CharacterSetProperty: exp.Properties.Location.UNSUPPORTED, 173 exp.CollateProperty: exp.Properties.Location.UNSUPPORTED, 174 } 175 176 TRANSFORMS = { 177 **Hive.Generator.TRANSFORMS, # type: ignore 178 exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"), 179 exp.FileFormatProperty: lambda self, e: f"USING {e.name.upper()}", 180 exp.ArraySum: lambda self, e: f"AGGREGATE({self.sql(e, 'this')}, 0, (acc, x) -> acc + x, acc -> acc)", 181 exp.BitwiseLeftShift: rename_func("SHIFTLEFT"), 182 exp.BitwiseRightShift: rename_func("SHIFTRIGHT"), 183 exp.DateTrunc: lambda self, e: self.func("TRUNC", e.this, e.args.get("unit")), 184 exp.Hint: lambda self, e: f" /*+ {self.expressions(e).strip()} */", 185 exp.StrToDate: _str_to_date, 186 exp.StrToTime: lambda self, e: f"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})", 187 exp.UnixToTime: _unix_to_time_sql, 188 exp.Create: _create_sql, 189 exp.Map: _map_sql, 190 exp.Reduce: rename_func("AGGREGATE"), 191 exp.StructKwarg: lambda self, e: f"{self.sql(e, 'this')}: {self.sql(e, 'expression')}", 192 exp.TimestampTrunc: lambda self, e: self.func( 193 "DATE_TRUNC", exp.Literal.string(e.text("unit")), e.this 194 ), 195 exp.Trim: trim_sql, 196 exp.VariancePop: rename_func("VAR_POP"), 197 exp.DateFromParts: rename_func("MAKE_DATE"), 198 exp.LogicalOr: rename_func("BOOL_OR"), 199 exp.LogicalAnd: rename_func("BOOL_AND"), 200 exp.DayOfWeek: rename_func("DAYOFWEEK"), 201 exp.DayOfMonth: rename_func("DAYOFMONTH"), 202 exp.DayOfYear: rename_func("DAYOFYEAR"), 203 exp.WeekOfYear: rename_func("WEEKOFYEAR"), 204 exp.AtTimeZone: lambda self, e: f"FROM_UTC_TIMESTAMP({self.sql(e, 'this')}, {self.sql(e, 'zone')})", 205 } 206 TRANSFORMS.pop(exp.ArraySort) 207 TRANSFORMS.pop(exp.ILike) 208 209 WRAP_DERIVED_VALUES = False 210 CREATE_FUNCTION_RETURN_AS = False 211 212 def cast_sql(self, expression: exp.Cast) -> str: 213 if isinstance(expression.this, exp.Cast) and expression.this.is_type( 214 exp.DataType.Type.JSON 215 ): 216 schema = f"'{self.sql(expression, 'to')}'" 217 return self.func("FROM_JSON", expression.this.this, schema) 218 if expression.to.is_type(exp.DataType.Type.JSON): 219 return self.func("TO_JSON", expression.this) 220 221 return super(Spark.Generator, self).cast_sql(expression)
Generator interprets the given syntax tree and produces a SQL string as an output.
Arguments:
- time_mapping (dict): the dictionary of custom time mappings in which the key represents a python time format and the output the target time format
- time_trie (trie): a trie of the time_mapping keys
- pretty (bool): if set to True the returned string will be formatted. Default: False.
- quote_start (str): specifies which starting character to use to delimit quotes. Default: '.
- quote_end (str): specifies which ending character to use to delimit quotes. Default: '.
- identifier_start (str): specifies which starting character to use to delimit identifiers. Default: ".
- identifier_end (str): specifies which ending character to use to delimit identifiers. Default: ".
- identify (bool | str): 'always': always quote, 'safe': quote identifiers if they don't contain an upcase, True defaults to always.
- normalize (bool): if set to True all identifiers will lower cased
- string_escape (str): specifies a string escape character. Default: '.
- identifier_escape (str): specifies an identifier escape character. Default: ".
- pad (int): determines padding in a formatted string. Default: 2.
- indent (int): determines the size of indentation in a formatted string. Default: 4.
- unnest_column_only (bool): if true unnest table aliases are considered only as column aliases
- normalize_functions (str): normalize function names, "upper", "lower", or None Default: "upper"
- alias_post_tablesample (bool): if the table alias comes after tablesample Default: False
- unsupported_level (ErrorLevel): determines the generator's behavior when it encounters unsupported expressions. Default ErrorLevel.WARN.
- null_ordering (str): Indicates the default null ordering method to use if not explicitly set. Options are "nulls_are_small", "nulls_are_large", "nulls_are_last". Default: "nulls_are_small"
- max_unsupported (int): Maximum number of unsupported messages to include in a raised UnsupportedError. This is only relevant if unsupported_level is ErrorLevel.RAISE. Default: 3
- leading_comma (bool): if the the comma is leading or trailing in select statements Default: False
- max_text_width: The max number of characters in a segment before creating new lines in pretty mode. The default is on the smaller end because the length only represents a segment and not the true line length. Default: 80
- comments: Whether or not to preserve comments in the output SQL code. Default: True
212 def cast_sql(self, expression: exp.Cast) -> str: 213 if isinstance(expression.this, exp.Cast) and expression.this.is_type( 214 exp.DataType.Type.JSON 215 ): 216 schema = f"'{self.sql(expression, 'to')}'" 217 return self.func("FROM_JSON", expression.this.this, schema) 218 if expression.to.is_type(exp.DataType.Type.JSON): 219 return self.func("TO_JSON", expression.this) 220 221 return super(Spark.Generator, self).cast_sql(expression)
Inherited Members
- sqlglot.generator.Generator
- Generator
- generate
- unsupported
- sep
- seg
- pad_comment
- maybe_comment
- wrap
- no_identify
- normalize_func
- indent
- sql
- uncache_sql
- cache_sql
- characterset_sql
- column_sql
- columnposition_sql
- columndef_sql
- columnconstraint_sql
- autoincrementcolumnconstraint_sql
- compresscolumnconstraint_sql
- generatedasidentitycolumnconstraint_sql
- notnullcolumnconstraint_sql
- primarykeycolumnconstraint_sql
- uniquecolumnconstraint_sql
- create_sql
- describe_sql
- prepend_ctes
- with_sql
- cte_sql
- tablealias_sql
- bitstring_sql
- hexstring_sql
- bytestring_sql
- directory_sql
- delete_sql
- drop_sql
- except_sql
- except_op
- fetch_sql
- filter_sql
- hint_sql
- index_sql
- identifier_sql
- inputoutputformat_sql
- national_sql
- partition_sql
- properties_sql
- root_properties
- properties
- locate_properties
- property_sql
- likeproperty_sql
- fallbackproperty_sql
- journalproperty_sql
- freespaceproperty_sql
- afterjournalproperty_sql
- checksumproperty_sql
- mergeblockratioproperty_sql
- datablocksizeproperty_sql
- blockcompressionproperty_sql
- isolatedloadingproperty_sql
- lockingproperty_sql
- withdataproperty_sql
- insert_sql
- intersect_sql
- intersect_op
- introducer_sql
- pseudotype_sql
- onconflict_sql
- returning_sql
- rowformatdelimitedproperty_sql
- table_sql
- tablesample_sql
- pivot_sql
- tuple_sql
- update_sql
- values_sql
- var_sql
- into_sql
- from_sql
- group_sql
- having_sql
- join_sql
- lambda_sql
- lateral_sql
- limit_sql
- offset_sql
- setitem_sql
- set_sql
- pragma_sql
- lock_sql
- literal_sql
- loaddata_sql
- null_sql
- boolean_sql
- order_sql
- cluster_sql
- distribute_sql
- sort_sql
- ordered_sql
- matchrecognize_sql
- query_modifiers
- select_sql
- schema_sql
- star_sql
- structkwarg_sql
- parameter_sql
- sessionparameter_sql
- placeholder_sql
- subquery_sql
- qualify_sql
- union_sql
- union_op
- unnest_sql
- where_sql
- window_sql
- partition_by_sql
- window_spec_sql
- withingroup_sql
- between_sql
- bracket_sql
- all_sql
- any_sql
- exists_sql
- case_sql
- constraint_sql
- extract_sql
- trim_sql
- concat_sql
- check_sql
- foreignkey_sql
- primarykey_sql
- unique_sql
- if_sql
- matchagainst_sql
- jsonkeyvalue_sql
- jsonobject_sql
- in_sql
- in_unnest_op
- interval_sql
- return_sql
- reference_sql
- anonymous_sql
- paren_sql
- neg_sql
- not_sql
- alias_sql
- aliases_sql
- attimezone_sql
- add_sql
- and_sql
- connector_sql
- bitwiseand_sql
- bitwiseleftshift_sql
- bitwisenot_sql
- bitwiseor_sql
- bitwiserightshift_sql
- bitwisexor_sql
- currentdate_sql
- collate_sql
- command_sql
- comment_sql
- transaction_sql
- commit_sql
- rollback_sql
- altercolumn_sql
- renametable_sql
- altertable_sql
- droppartition_sql
- addconstraint_sql
- distinct_sql
- ignorenulls_sql
- respectnulls_sql
- intdiv_sql
- dpipe_sql
- div_sql
- overlaps_sql
- distance_sql
- dot_sql
- eq_sql
- escape_sql
- glob_sql
- gt_sql
- gte_sql
- ilike_sql
- ilikeany_sql
- is_sql
- like_sql
- likeany_sql
- similarto_sql
- lt_sql
- lte_sql
- mod_sql
- mul_sql
- neq_sql
- nullsafeeq_sql
- nullsafeneq_sql
- or_sql
- slice_sql
- sub_sql
- trycast_sql
- use_sql
- binary
- function_fallback_sql
- func
- format_args
- text_width
- format_time
- expressions
- op_expressions
- naked_property
- set_operation
- tag_sql
- token_sql
- userdefinedfunction_sql
- joinhint_sql
- kwarg_sql
- when_sql
- merge_sql
- tochar_sql