sqlglot.dialects.spark2
1from __future__ import annotations 2 3import typing as t 4 5from sqlglot import exp, parser, transforms 6from sqlglot.dialects.dialect import ( 7 create_with_partitions_sql, 8 pivot_column_names, 9 rename_func, 10 trim_sql, 11) 12from sqlglot.dialects.hive import Hive 13from sqlglot.helper import seq_get 14 15 16def _create_sql(self: Hive.Generator, e: exp.Create) -> str: 17 kind = e.args["kind"] 18 properties = e.args.get("properties") 19 20 if kind.upper() == "TABLE" and any( 21 isinstance(prop, exp.TemporaryProperty) 22 for prop in (properties.expressions if properties else []) 23 ): 24 return f"CREATE TEMPORARY VIEW {self.sql(e, 'this')} AS {self.sql(e, 'expression')}" 25 return create_with_partitions_sql(self, e) 26 27 28def _map_sql(self: Hive.Generator, expression: exp.Map) -> str: 29 keys = self.sql(expression.args["keys"]) 30 values = self.sql(expression.args["values"]) 31 return f"MAP_FROM_ARRAYS({keys}, {values})" 32 33 34def _parse_as_cast(to_type: str) -> t.Callable[[t.List], exp.Expression]: 35 return lambda args: exp.Cast(this=seq_get(args, 0), to=exp.DataType.build(to_type)) 36 37 38def _str_to_date(self: Hive.Generator, expression: exp.StrToDate) -> str: 39 this = self.sql(expression, "this") 40 time_format = self.format_time(expression) 41 if time_format == Hive.date_format: 42 return f"TO_DATE({this})" 43 return f"TO_DATE({this}, {time_format})" 44 45 46def _unix_to_time_sql(self: Hive.Generator, expression: exp.UnixToTime) -> str: 47 scale = expression.args.get("scale") 48 timestamp = self.sql(expression, "this") 49 if scale is None: 50 return f"CAST(FROM_UNIXTIME({timestamp}) AS TIMESTAMP)" 51 if scale == exp.UnixToTime.SECONDS: 52 return f"TIMESTAMP_SECONDS({timestamp})" 53 if scale == exp.UnixToTime.MILLIS: 54 return f"TIMESTAMP_MILLIS({timestamp})" 55 if scale == exp.UnixToTime.MICROS: 56 return f"TIMESTAMP_MICROS({timestamp})" 57 58 raise ValueError("Improper scale for timestamp") 59 60 61def _unalias_pivot(expression: exp.Expression) -> exp.Expression: 62 """ 63 Spark doesn't allow PIVOT aliases, so we need to remove them and possibly wrap a 64 pivoted source in a subquery with the same alias to preserve the query's semantics. 65 66 Example: 67 >>> from sqlglot import parse_one 68 >>> expr = parse_one("SELECT piv.x FROM tbl PIVOT (SUM(a) FOR b IN ('x')) piv") 69 >>> print(_unalias_pivot(expr).sql(dialect="spark")) 70 SELECT piv.x FROM (SELECT * FROM tbl PIVOT(SUM(a) FOR b IN ('x'))) AS piv 71 """ 72 if isinstance(expression, exp.From) and expression.this.args.get("pivots"): 73 pivot = expression.this.args["pivots"][0] 74 if pivot.alias: 75 alias = pivot.args["alias"].pop() 76 return exp.From( 77 this=expression.this.replace( 78 exp.select("*").from_(expression.this.copy()).subquery(alias=alias) 79 ) 80 ) 81 82 return expression 83 84 85def _unqualify_pivot_columns(expression: exp.Expression) -> exp.Expression: 86 """ 87 Spark doesn't allow the column referenced in the PIVOT's field to be qualified, 88 so we need to unqualify it. 89 90 Example: 91 >>> from sqlglot import parse_one 92 >>> expr = parse_one("SELECT * FROM tbl PIVOT (SUM(tbl.sales) FOR tbl.quarter IN ('Q1', 'Q2'))") 93 >>> print(_unqualify_pivot_columns(expr).sql(dialect="spark")) 94 SELECT * FROM tbl PIVOT(SUM(tbl.sales) FOR quarter IN ('Q1', 'Q1')) 95 """ 96 if isinstance(expression, exp.Pivot): 97 expression.args["field"].transform( 98 lambda node: exp.column(node.output_name, quoted=node.this.quoted) 99 if isinstance(node, exp.Column) 100 else node, 101 copy=False, 102 ) 103 104 return expression 105 106 107class Spark2(Hive): 108 class Parser(Hive.Parser): 109 FUNCTIONS = { 110 **Hive.Parser.FUNCTIONS, 111 "MAP_FROM_ARRAYS": exp.Map.from_arg_list, 112 "TO_UNIX_TIMESTAMP": exp.StrToUnix.from_arg_list, 113 "LEFT": lambda args: exp.Substring( 114 this=seq_get(args, 0), 115 start=exp.Literal.number(1), 116 length=seq_get(args, 1), 117 ), 118 "SHIFTLEFT": lambda args: exp.BitwiseLeftShift( 119 this=seq_get(args, 0), 120 expression=seq_get(args, 1), 121 ), 122 "SHIFTRIGHT": lambda args: exp.BitwiseRightShift( 123 this=seq_get(args, 0), 124 expression=seq_get(args, 1), 125 ), 126 "RIGHT": lambda args: exp.Substring( 127 this=seq_get(args, 0), 128 start=exp.Sub( 129 this=exp.Length(this=seq_get(args, 0)), 130 expression=exp.Add(this=seq_get(args, 1), expression=exp.Literal.number(1)), 131 ), 132 length=seq_get(args, 1), 133 ), 134 "APPROX_PERCENTILE": exp.ApproxQuantile.from_arg_list, 135 "IIF": exp.If.from_arg_list, 136 "AGGREGATE": exp.Reduce.from_arg_list, 137 "DAYOFWEEK": lambda args: exp.DayOfWeek( 138 this=exp.TsOrDsToDate(this=seq_get(args, 0)), 139 ), 140 "DAYOFMONTH": lambda args: exp.DayOfMonth( 141 this=exp.TsOrDsToDate(this=seq_get(args, 0)), 142 ), 143 "DAYOFYEAR": lambda args: exp.DayOfYear( 144 this=exp.TsOrDsToDate(this=seq_get(args, 0)), 145 ), 146 "WEEKOFYEAR": lambda args: exp.WeekOfYear( 147 this=exp.TsOrDsToDate(this=seq_get(args, 0)), 148 ), 149 "DATE": lambda args: exp.Cast(this=seq_get(args, 0), to=exp.DataType.build("date")), 150 "DATE_TRUNC": lambda args: exp.TimestampTrunc( 151 this=seq_get(args, 1), 152 unit=exp.var(seq_get(args, 0)), 153 ), 154 "TRUNC": lambda args: exp.DateTrunc(unit=seq_get(args, 1), this=seq_get(args, 0)), 155 "BOOLEAN": _parse_as_cast("boolean"), 156 "DOUBLE": _parse_as_cast("double"), 157 "FLOAT": _parse_as_cast("float"), 158 "INT": _parse_as_cast("int"), 159 "STRING": _parse_as_cast("string"), 160 "TIMESTAMP": _parse_as_cast("timestamp"), 161 } 162 163 FUNCTION_PARSERS = { 164 **parser.Parser.FUNCTION_PARSERS, 165 "BROADCAST": lambda self: self._parse_join_hint("BROADCAST"), 166 "BROADCASTJOIN": lambda self: self._parse_join_hint("BROADCASTJOIN"), 167 "MAPJOIN": lambda self: self._parse_join_hint("MAPJOIN"), 168 "MERGE": lambda self: self._parse_join_hint("MERGE"), 169 "SHUFFLEMERGE": lambda self: self._parse_join_hint("SHUFFLEMERGE"), 170 "MERGEJOIN": lambda self: self._parse_join_hint("MERGEJOIN"), 171 "SHUFFLE_HASH": lambda self: self._parse_join_hint("SHUFFLE_HASH"), 172 "SHUFFLE_REPLICATE_NL": lambda self: self._parse_join_hint("SHUFFLE_REPLICATE_NL"), 173 } 174 175 def _parse_add_column(self) -> t.Optional[exp.Expression]: 176 return self._match_text_seq("ADD", "COLUMNS") and self._parse_schema() 177 178 def _parse_drop_column(self) -> t.Optional[exp.Expression]: 179 return self._match_text_seq("DROP", "COLUMNS") and self.expression( 180 exp.Drop, 181 this=self._parse_schema(), 182 kind="COLUMNS", 183 ) 184 185 def _pivot_column_names(self, aggregations: t.List[exp.Expression]) -> t.List[str]: 186 if len(aggregations) == 1: 187 return [""] 188 return pivot_column_names(aggregations, dialect="spark") 189 190 class Generator(Hive.Generator): 191 TYPE_MAPPING = { 192 **Hive.Generator.TYPE_MAPPING, 193 exp.DataType.Type.TINYINT: "BYTE", 194 exp.DataType.Type.SMALLINT: "SHORT", 195 exp.DataType.Type.BIGINT: "LONG", 196 } 197 198 PROPERTIES_LOCATION = { 199 **Hive.Generator.PROPERTIES_LOCATION, 200 exp.EngineProperty: exp.Properties.Location.UNSUPPORTED, 201 exp.AutoIncrementProperty: exp.Properties.Location.UNSUPPORTED, 202 exp.CharacterSetProperty: exp.Properties.Location.UNSUPPORTED, 203 exp.CollateProperty: exp.Properties.Location.UNSUPPORTED, 204 } 205 206 TRANSFORMS = { 207 **Hive.Generator.TRANSFORMS, 208 exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"), 209 exp.ArraySum: lambda self, e: f"AGGREGATE({self.sql(e, 'this')}, 0, (acc, x) -> acc + x, acc -> acc)", 210 exp.AtTimeZone: lambda self, e: f"FROM_UTC_TIMESTAMP({self.sql(e, 'this')}, {self.sql(e, 'zone')})", 211 exp.BitwiseLeftShift: rename_func("SHIFTLEFT"), 212 exp.BitwiseRightShift: rename_func("SHIFTRIGHT"), 213 exp.Create: _create_sql, 214 exp.DateFromParts: rename_func("MAKE_DATE"), 215 exp.DateTrunc: lambda self, e: self.func("TRUNC", e.this, e.args.get("unit")), 216 exp.DayOfMonth: rename_func("DAYOFMONTH"), 217 exp.DayOfWeek: rename_func("DAYOFWEEK"), 218 exp.DayOfYear: rename_func("DAYOFYEAR"), 219 exp.FileFormatProperty: lambda self, e: f"USING {e.name.upper()}", 220 exp.From: transforms.preprocess([_unalias_pivot]), 221 exp.Hint: lambda self, e: f" /*+ {self.expressions(e).strip()} */", 222 exp.LogicalAnd: rename_func("BOOL_AND"), 223 exp.LogicalOr: rename_func("BOOL_OR"), 224 exp.Map: _map_sql, 225 exp.Pivot: transforms.preprocess([_unqualify_pivot_columns]), 226 exp.Reduce: rename_func("AGGREGATE"), 227 exp.StrToDate: _str_to_date, 228 exp.StrToTime: lambda self, e: f"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})", 229 exp.TimestampTrunc: lambda self, e: self.func( 230 "DATE_TRUNC", exp.Literal.string(e.text("unit")), e.this 231 ), 232 exp.Trim: trim_sql, 233 exp.UnixToTime: _unix_to_time_sql, 234 exp.VariancePop: rename_func("VAR_POP"), 235 exp.WeekOfYear: rename_func("WEEKOFYEAR"), 236 exp.WithinGroup: transforms.preprocess( 237 [transforms.remove_within_group_for_percentiles] 238 ), 239 } 240 TRANSFORMS.pop(exp.ArrayJoin) 241 TRANSFORMS.pop(exp.ArraySort) 242 TRANSFORMS.pop(exp.ILike) 243 244 WRAP_DERIVED_VALUES = False 245 CREATE_FUNCTION_RETURN_AS = False 246 247 def cast_sql(self, expression: exp.Cast) -> str: 248 if isinstance(expression.this, exp.Cast) and expression.this.is_type( 249 exp.DataType.Type.JSON 250 ): 251 schema = f"'{self.sql(expression, 'to')}'" 252 return self.func("FROM_JSON", expression.this.this, schema) 253 if expression.to.is_type(exp.DataType.Type.JSON): 254 return self.func("TO_JSON", expression.this) 255 256 return super(Hive.Generator, self).cast_sql(expression) 257 258 def columndef_sql(self, expression: exp.ColumnDef, sep: str = " ") -> str: 259 return super().columndef_sql( 260 expression, 261 sep=": " 262 if isinstance(expression.parent, exp.DataType) 263 and expression.parent.is_type(exp.DataType.Type.STRUCT) 264 else sep, 265 ) 266 267 class Tokenizer(Hive.Tokenizer): 268 HEX_STRINGS = [("X'", "'")]
108class Spark2(Hive): 109 class Parser(Hive.Parser): 110 FUNCTIONS = { 111 **Hive.Parser.FUNCTIONS, 112 "MAP_FROM_ARRAYS": exp.Map.from_arg_list, 113 "TO_UNIX_TIMESTAMP": exp.StrToUnix.from_arg_list, 114 "LEFT": lambda args: exp.Substring( 115 this=seq_get(args, 0), 116 start=exp.Literal.number(1), 117 length=seq_get(args, 1), 118 ), 119 "SHIFTLEFT": lambda args: exp.BitwiseLeftShift( 120 this=seq_get(args, 0), 121 expression=seq_get(args, 1), 122 ), 123 "SHIFTRIGHT": lambda args: exp.BitwiseRightShift( 124 this=seq_get(args, 0), 125 expression=seq_get(args, 1), 126 ), 127 "RIGHT": lambda args: exp.Substring( 128 this=seq_get(args, 0), 129 start=exp.Sub( 130 this=exp.Length(this=seq_get(args, 0)), 131 expression=exp.Add(this=seq_get(args, 1), expression=exp.Literal.number(1)), 132 ), 133 length=seq_get(args, 1), 134 ), 135 "APPROX_PERCENTILE": exp.ApproxQuantile.from_arg_list, 136 "IIF": exp.If.from_arg_list, 137 "AGGREGATE": exp.Reduce.from_arg_list, 138 "DAYOFWEEK": lambda args: exp.DayOfWeek( 139 this=exp.TsOrDsToDate(this=seq_get(args, 0)), 140 ), 141 "DAYOFMONTH": lambda args: exp.DayOfMonth( 142 this=exp.TsOrDsToDate(this=seq_get(args, 0)), 143 ), 144 "DAYOFYEAR": lambda args: exp.DayOfYear( 145 this=exp.TsOrDsToDate(this=seq_get(args, 0)), 146 ), 147 "WEEKOFYEAR": lambda args: exp.WeekOfYear( 148 this=exp.TsOrDsToDate(this=seq_get(args, 0)), 149 ), 150 "DATE": lambda args: exp.Cast(this=seq_get(args, 0), to=exp.DataType.build("date")), 151 "DATE_TRUNC": lambda args: exp.TimestampTrunc( 152 this=seq_get(args, 1), 153 unit=exp.var(seq_get(args, 0)), 154 ), 155 "TRUNC": lambda args: exp.DateTrunc(unit=seq_get(args, 1), this=seq_get(args, 0)), 156 "BOOLEAN": _parse_as_cast("boolean"), 157 "DOUBLE": _parse_as_cast("double"), 158 "FLOAT": _parse_as_cast("float"), 159 "INT": _parse_as_cast("int"), 160 "STRING": _parse_as_cast("string"), 161 "TIMESTAMP": _parse_as_cast("timestamp"), 162 } 163 164 FUNCTION_PARSERS = { 165 **parser.Parser.FUNCTION_PARSERS, 166 "BROADCAST": lambda self: self._parse_join_hint("BROADCAST"), 167 "BROADCASTJOIN": lambda self: self._parse_join_hint("BROADCASTJOIN"), 168 "MAPJOIN": lambda self: self._parse_join_hint("MAPJOIN"), 169 "MERGE": lambda self: self._parse_join_hint("MERGE"), 170 "SHUFFLEMERGE": lambda self: self._parse_join_hint("SHUFFLEMERGE"), 171 "MERGEJOIN": lambda self: self._parse_join_hint("MERGEJOIN"), 172 "SHUFFLE_HASH": lambda self: self._parse_join_hint("SHUFFLE_HASH"), 173 "SHUFFLE_REPLICATE_NL": lambda self: self._parse_join_hint("SHUFFLE_REPLICATE_NL"), 174 } 175 176 def _parse_add_column(self) -> t.Optional[exp.Expression]: 177 return self._match_text_seq("ADD", "COLUMNS") and self._parse_schema() 178 179 def _parse_drop_column(self) -> t.Optional[exp.Expression]: 180 return self._match_text_seq("DROP", "COLUMNS") and self.expression( 181 exp.Drop, 182 this=self._parse_schema(), 183 kind="COLUMNS", 184 ) 185 186 def _pivot_column_names(self, aggregations: t.List[exp.Expression]) -> t.List[str]: 187 if len(aggregations) == 1: 188 return [""] 189 return pivot_column_names(aggregations, dialect="spark") 190 191 class Generator(Hive.Generator): 192 TYPE_MAPPING = { 193 **Hive.Generator.TYPE_MAPPING, 194 exp.DataType.Type.TINYINT: "BYTE", 195 exp.DataType.Type.SMALLINT: "SHORT", 196 exp.DataType.Type.BIGINT: "LONG", 197 } 198 199 PROPERTIES_LOCATION = { 200 **Hive.Generator.PROPERTIES_LOCATION, 201 exp.EngineProperty: exp.Properties.Location.UNSUPPORTED, 202 exp.AutoIncrementProperty: exp.Properties.Location.UNSUPPORTED, 203 exp.CharacterSetProperty: exp.Properties.Location.UNSUPPORTED, 204 exp.CollateProperty: exp.Properties.Location.UNSUPPORTED, 205 } 206 207 TRANSFORMS = { 208 **Hive.Generator.TRANSFORMS, 209 exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"), 210 exp.ArraySum: lambda self, e: f"AGGREGATE({self.sql(e, 'this')}, 0, (acc, x) -> acc + x, acc -> acc)", 211 exp.AtTimeZone: lambda self, e: f"FROM_UTC_TIMESTAMP({self.sql(e, 'this')}, {self.sql(e, 'zone')})", 212 exp.BitwiseLeftShift: rename_func("SHIFTLEFT"), 213 exp.BitwiseRightShift: rename_func("SHIFTRIGHT"), 214 exp.Create: _create_sql, 215 exp.DateFromParts: rename_func("MAKE_DATE"), 216 exp.DateTrunc: lambda self, e: self.func("TRUNC", e.this, e.args.get("unit")), 217 exp.DayOfMonth: rename_func("DAYOFMONTH"), 218 exp.DayOfWeek: rename_func("DAYOFWEEK"), 219 exp.DayOfYear: rename_func("DAYOFYEAR"), 220 exp.FileFormatProperty: lambda self, e: f"USING {e.name.upper()}", 221 exp.From: transforms.preprocess([_unalias_pivot]), 222 exp.Hint: lambda self, e: f" /*+ {self.expressions(e).strip()} */", 223 exp.LogicalAnd: rename_func("BOOL_AND"), 224 exp.LogicalOr: rename_func("BOOL_OR"), 225 exp.Map: _map_sql, 226 exp.Pivot: transforms.preprocess([_unqualify_pivot_columns]), 227 exp.Reduce: rename_func("AGGREGATE"), 228 exp.StrToDate: _str_to_date, 229 exp.StrToTime: lambda self, e: f"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})", 230 exp.TimestampTrunc: lambda self, e: self.func( 231 "DATE_TRUNC", exp.Literal.string(e.text("unit")), e.this 232 ), 233 exp.Trim: trim_sql, 234 exp.UnixToTime: _unix_to_time_sql, 235 exp.VariancePop: rename_func("VAR_POP"), 236 exp.WeekOfYear: rename_func("WEEKOFYEAR"), 237 exp.WithinGroup: transforms.preprocess( 238 [transforms.remove_within_group_for_percentiles] 239 ), 240 } 241 TRANSFORMS.pop(exp.ArrayJoin) 242 TRANSFORMS.pop(exp.ArraySort) 243 TRANSFORMS.pop(exp.ILike) 244 245 WRAP_DERIVED_VALUES = False 246 CREATE_FUNCTION_RETURN_AS = False 247 248 def cast_sql(self, expression: exp.Cast) -> str: 249 if isinstance(expression.this, exp.Cast) and expression.this.is_type( 250 exp.DataType.Type.JSON 251 ): 252 schema = f"'{self.sql(expression, 'to')}'" 253 return self.func("FROM_JSON", expression.this.this, schema) 254 if expression.to.is_type(exp.DataType.Type.JSON): 255 return self.func("TO_JSON", expression.this) 256 257 return super(Hive.Generator, self).cast_sql(expression) 258 259 def columndef_sql(self, expression: exp.ColumnDef, sep: str = " ") -> str: 260 return super().columndef_sql( 261 expression, 262 sep=": " 263 if isinstance(expression.parent, exp.DataType) 264 and expression.parent.is_type(exp.DataType.Type.STRUCT) 265 else sep, 266 ) 267 268 class Tokenizer(Hive.Tokenizer): 269 HEX_STRINGS = [("X'", "'")]
109 class Parser(Hive.Parser): 110 FUNCTIONS = { 111 **Hive.Parser.FUNCTIONS, 112 "MAP_FROM_ARRAYS": exp.Map.from_arg_list, 113 "TO_UNIX_TIMESTAMP": exp.StrToUnix.from_arg_list, 114 "LEFT": lambda args: exp.Substring( 115 this=seq_get(args, 0), 116 start=exp.Literal.number(1), 117 length=seq_get(args, 1), 118 ), 119 "SHIFTLEFT": lambda args: exp.BitwiseLeftShift( 120 this=seq_get(args, 0), 121 expression=seq_get(args, 1), 122 ), 123 "SHIFTRIGHT": lambda args: exp.BitwiseRightShift( 124 this=seq_get(args, 0), 125 expression=seq_get(args, 1), 126 ), 127 "RIGHT": lambda args: exp.Substring( 128 this=seq_get(args, 0), 129 start=exp.Sub( 130 this=exp.Length(this=seq_get(args, 0)), 131 expression=exp.Add(this=seq_get(args, 1), expression=exp.Literal.number(1)), 132 ), 133 length=seq_get(args, 1), 134 ), 135 "APPROX_PERCENTILE": exp.ApproxQuantile.from_arg_list, 136 "IIF": exp.If.from_arg_list, 137 "AGGREGATE": exp.Reduce.from_arg_list, 138 "DAYOFWEEK": lambda args: exp.DayOfWeek( 139 this=exp.TsOrDsToDate(this=seq_get(args, 0)), 140 ), 141 "DAYOFMONTH": lambda args: exp.DayOfMonth( 142 this=exp.TsOrDsToDate(this=seq_get(args, 0)), 143 ), 144 "DAYOFYEAR": lambda args: exp.DayOfYear( 145 this=exp.TsOrDsToDate(this=seq_get(args, 0)), 146 ), 147 "WEEKOFYEAR": lambda args: exp.WeekOfYear( 148 this=exp.TsOrDsToDate(this=seq_get(args, 0)), 149 ), 150 "DATE": lambda args: exp.Cast(this=seq_get(args, 0), to=exp.DataType.build("date")), 151 "DATE_TRUNC": lambda args: exp.TimestampTrunc( 152 this=seq_get(args, 1), 153 unit=exp.var(seq_get(args, 0)), 154 ), 155 "TRUNC": lambda args: exp.DateTrunc(unit=seq_get(args, 1), this=seq_get(args, 0)), 156 "BOOLEAN": _parse_as_cast("boolean"), 157 "DOUBLE": _parse_as_cast("double"), 158 "FLOAT": _parse_as_cast("float"), 159 "INT": _parse_as_cast("int"), 160 "STRING": _parse_as_cast("string"), 161 "TIMESTAMP": _parse_as_cast("timestamp"), 162 } 163 164 FUNCTION_PARSERS = { 165 **parser.Parser.FUNCTION_PARSERS, 166 "BROADCAST": lambda self: self._parse_join_hint("BROADCAST"), 167 "BROADCASTJOIN": lambda self: self._parse_join_hint("BROADCASTJOIN"), 168 "MAPJOIN": lambda self: self._parse_join_hint("MAPJOIN"), 169 "MERGE": lambda self: self._parse_join_hint("MERGE"), 170 "SHUFFLEMERGE": lambda self: self._parse_join_hint("SHUFFLEMERGE"), 171 "MERGEJOIN": lambda self: self._parse_join_hint("MERGEJOIN"), 172 "SHUFFLE_HASH": lambda self: self._parse_join_hint("SHUFFLE_HASH"), 173 "SHUFFLE_REPLICATE_NL": lambda self: self._parse_join_hint("SHUFFLE_REPLICATE_NL"), 174 } 175 176 def _parse_add_column(self) -> t.Optional[exp.Expression]: 177 return self._match_text_seq("ADD", "COLUMNS") and self._parse_schema() 178 179 def _parse_drop_column(self) -> t.Optional[exp.Expression]: 180 return self._match_text_seq("DROP", "COLUMNS") and self.expression( 181 exp.Drop, 182 this=self._parse_schema(), 183 kind="COLUMNS", 184 ) 185 186 def _pivot_column_names(self, aggregations: t.List[exp.Expression]) -> t.List[str]: 187 if len(aggregations) == 1: 188 return [""] 189 return pivot_column_names(aggregations, dialect="spark")
Parser consumes a list of tokens produced by the sqlglot.tokens.Tokenizer
and produces
a parsed syntax tree.
Arguments:
- error_level: the desired error level. Default: ErrorLevel.RAISE
- error_message_context: determines the amount of context to capture from a query string when displaying the error message (in number of characters). Default: 50.
- index_offset: Index offset for arrays eg ARRAY[0] vs ARRAY[1] as the head of a list. Default: 0
- alias_post_tablesample: If the table alias comes after tablesample. Default: False
- max_errors: Maximum number of error messages to include in a raised ParseError. This is only relevant if error_level is ErrorLevel.RAISE. Default: 3
- null_ordering: Indicates the default null ordering method to use if not explicitly set. Options are "nulls_are_small", "nulls_are_large", "nulls_are_last". Default: "nulls_are_small"
Inherited Members
191 class Generator(Hive.Generator): 192 TYPE_MAPPING = { 193 **Hive.Generator.TYPE_MAPPING, 194 exp.DataType.Type.TINYINT: "BYTE", 195 exp.DataType.Type.SMALLINT: "SHORT", 196 exp.DataType.Type.BIGINT: "LONG", 197 } 198 199 PROPERTIES_LOCATION = { 200 **Hive.Generator.PROPERTIES_LOCATION, 201 exp.EngineProperty: exp.Properties.Location.UNSUPPORTED, 202 exp.AutoIncrementProperty: exp.Properties.Location.UNSUPPORTED, 203 exp.CharacterSetProperty: exp.Properties.Location.UNSUPPORTED, 204 exp.CollateProperty: exp.Properties.Location.UNSUPPORTED, 205 } 206 207 TRANSFORMS = { 208 **Hive.Generator.TRANSFORMS, 209 exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"), 210 exp.ArraySum: lambda self, e: f"AGGREGATE({self.sql(e, 'this')}, 0, (acc, x) -> acc + x, acc -> acc)", 211 exp.AtTimeZone: lambda self, e: f"FROM_UTC_TIMESTAMP({self.sql(e, 'this')}, {self.sql(e, 'zone')})", 212 exp.BitwiseLeftShift: rename_func("SHIFTLEFT"), 213 exp.BitwiseRightShift: rename_func("SHIFTRIGHT"), 214 exp.Create: _create_sql, 215 exp.DateFromParts: rename_func("MAKE_DATE"), 216 exp.DateTrunc: lambda self, e: self.func("TRUNC", e.this, e.args.get("unit")), 217 exp.DayOfMonth: rename_func("DAYOFMONTH"), 218 exp.DayOfWeek: rename_func("DAYOFWEEK"), 219 exp.DayOfYear: rename_func("DAYOFYEAR"), 220 exp.FileFormatProperty: lambda self, e: f"USING {e.name.upper()}", 221 exp.From: transforms.preprocess([_unalias_pivot]), 222 exp.Hint: lambda self, e: f" /*+ {self.expressions(e).strip()} */", 223 exp.LogicalAnd: rename_func("BOOL_AND"), 224 exp.LogicalOr: rename_func("BOOL_OR"), 225 exp.Map: _map_sql, 226 exp.Pivot: transforms.preprocess([_unqualify_pivot_columns]), 227 exp.Reduce: rename_func("AGGREGATE"), 228 exp.StrToDate: _str_to_date, 229 exp.StrToTime: lambda self, e: f"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})", 230 exp.TimestampTrunc: lambda self, e: self.func( 231 "DATE_TRUNC", exp.Literal.string(e.text("unit")), e.this 232 ), 233 exp.Trim: trim_sql, 234 exp.UnixToTime: _unix_to_time_sql, 235 exp.VariancePop: rename_func("VAR_POP"), 236 exp.WeekOfYear: rename_func("WEEKOFYEAR"), 237 exp.WithinGroup: transforms.preprocess( 238 [transforms.remove_within_group_for_percentiles] 239 ), 240 } 241 TRANSFORMS.pop(exp.ArrayJoin) 242 TRANSFORMS.pop(exp.ArraySort) 243 TRANSFORMS.pop(exp.ILike) 244 245 WRAP_DERIVED_VALUES = False 246 CREATE_FUNCTION_RETURN_AS = False 247 248 def cast_sql(self, expression: exp.Cast) -> str: 249 if isinstance(expression.this, exp.Cast) and expression.this.is_type( 250 exp.DataType.Type.JSON 251 ): 252 schema = f"'{self.sql(expression, 'to')}'" 253 return self.func("FROM_JSON", expression.this.this, schema) 254 if expression.to.is_type(exp.DataType.Type.JSON): 255 return self.func("TO_JSON", expression.this) 256 257 return super(Hive.Generator, self).cast_sql(expression) 258 259 def columndef_sql(self, expression: exp.ColumnDef, sep: str = " ") -> str: 260 return super().columndef_sql( 261 expression, 262 sep=": " 263 if isinstance(expression.parent, exp.DataType) 264 and expression.parent.is_type(exp.DataType.Type.STRUCT) 265 else sep, 266 )
Generator interprets the given syntax tree and produces a SQL string as an output.
Arguments:
- time_mapping (dict): the dictionary of custom time mappings in which the key represents a python time format and the output the target time format
- time_trie (trie): a trie of the time_mapping keys
- pretty (bool): if set to True the returned string will be formatted. Default: False.
- quote_start (str): specifies which starting character to use to delimit quotes. Default: '.
- quote_end (str): specifies which ending character to use to delimit quotes. Default: '.
- identifier_start (str): specifies which starting character to use to delimit identifiers. Default: ".
- identifier_end (str): specifies which ending character to use to delimit identifiers. Default: ".
- bit_start (str): specifies which starting character to use to delimit bit literals. Default: None.
- bit_end (str): specifies which ending character to use to delimit bit literals. Default: None.
- hex_start (str): specifies which starting character to use to delimit hex literals. Default: None.
- hex_end (str): specifies which ending character to use to delimit hex literals. Default: None.
- byte_start (str): specifies which starting character to use to delimit byte literals. Default: None.
- byte_end (str): specifies which ending character to use to delimit byte literals. Default: None.
- raw_start (str): specifies which starting character to use to delimit raw literals. Default: None.
- raw_end (str): specifies which ending character to use to delimit raw literals. Default: None.
- identify (bool | str): 'always': always quote, 'safe': quote identifiers if they don't contain an upcase, True defaults to always.
- normalize (bool): if set to True all identifiers will lower cased
- string_escape (str): specifies a string escape character. Default: '.
- identifier_escape (str): specifies an identifier escape character. Default: ".
- pad (int): determines padding in a formatted string. Default: 2.
- indent (int): determines the size of indentation in a formatted string. Default: 4.
- unnest_column_only (bool): if true unnest table aliases are considered only as column aliases
- normalize_functions (str): normalize function names, "upper", "lower", or None Default: "upper"
- alias_post_tablesample (bool): if the table alias comes after tablesample Default: False
- unsupported_level (ErrorLevel): determines the generator's behavior when it encounters unsupported expressions. Default ErrorLevel.WARN.
- null_ordering (str): Indicates the default null ordering method to use if not explicitly set. Options are "nulls_are_small", "nulls_are_large", "nulls_are_last". Default: "nulls_are_small"
- max_unsupported (int): Maximum number of unsupported messages to include in a raised UnsupportedError. This is only relevant if unsupported_level is ErrorLevel.RAISE. Default: 3
- leading_comma (bool): if the the comma is leading or trailing in select statements Default: False
- max_text_width: The max number of characters in a segment before creating new lines in pretty mode. The default is on the smaller end because the length only represents a segment and not the true line length. Default: 80
- comments: Whether or not to preserve comments in the output SQL code. Default: True
248 def cast_sql(self, expression: exp.Cast) -> str: 249 if isinstance(expression.this, exp.Cast) and expression.this.is_type( 250 exp.DataType.Type.JSON 251 ): 252 schema = f"'{self.sql(expression, 'to')}'" 253 return self.func("FROM_JSON", expression.this.this, schema) 254 if expression.to.is_type(exp.DataType.Type.JSON): 255 return self.func("TO_JSON", expression.this) 256 257 return super(Hive.Generator, self).cast_sql(expression)
Inherited Members
- sqlglot.generator.Generator
- Generator
- generate
- unsupported
- sep
- seg
- pad_comment
- maybe_comment
- wrap
- no_identify
- normalize_func
- indent
- sql
- uncache_sql
- cache_sql
- characterset_sql
- column_sql
- columnposition_sql
- columnconstraint_sql
- autoincrementcolumnconstraint_sql
- compresscolumnconstraint_sql
- generatedasidentitycolumnconstraint_sql
- notnullcolumnconstraint_sql
- primarykeycolumnconstraint_sql
- uniquecolumnconstraint_sql
- create_sql
- clone_sql
- describe_sql
- prepend_ctes
- with_sql
- cte_sql
- tablealias_sql
- bitstring_sql
- hexstring_sql
- bytestring_sql
- rawstring_sql
- datatypesize_sql
- directory_sql
- delete_sql
- drop_sql
- except_sql
- except_op
- fetch_sql
- filter_sql
- hint_sql
- index_sql
- identifier_sql
- inputoutputformat_sql
- national_sql
- partition_sql
- properties_sql
- root_properties
- properties
- locate_properties
- property_sql
- likeproperty_sql
- fallbackproperty_sql
- journalproperty_sql
- freespaceproperty_sql
- checksumproperty_sql
- mergeblockratioproperty_sql
- datablocksizeproperty_sql
- blockcompressionproperty_sql
- isolatedloadingproperty_sql
- lockingproperty_sql
- withdataproperty_sql
- insert_sql
- intersect_sql
- intersect_op
- introducer_sql
- pseudotype_sql
- onconflict_sql
- returning_sql
- rowformatdelimitedproperty_sql
- table_sql
- tablesample_sql
- pivot_sql
- tuple_sql
- update_sql
- values_sql
- var_sql
- into_sql
- from_sql
- group_sql
- having_sql
- join_sql
- lambda_sql
- lateral_sql
- limit_sql
- offset_sql
- setitem_sql
- set_sql
- pragma_sql
- lock_sql
- literal_sql
- loaddata_sql
- null_sql
- boolean_sql
- order_sql
- cluster_sql
- distribute_sql
- sort_sql
- ordered_sql
- matchrecognize_sql
- query_modifiers
- after_limit_modifiers
- select_sql
- schema_sql
- star_sql
- parameter_sql
- sessionparameter_sql
- placeholder_sql
- subquery_sql
- qualify_sql
- union_sql
- union_op
- unnest_sql
- where_sql
- window_sql
- partition_by_sql
- windowspec_sql
- withingroup_sql
- between_sql
- bracket_sql
- all_sql
- any_sql
- exists_sql
- case_sql
- constraint_sql
- nextvaluefor_sql
- extract_sql
- trim_sql
- concat_sql
- check_sql
- foreignkey_sql
- primarykey_sql
- unique_sql
- if_sql
- matchagainst_sql
- jsonkeyvalue_sql
- jsonobject_sql
- openjsoncolumndef_sql
- openjson_sql
- in_sql
- in_unnest_op
- interval_sql
- return_sql
- reference_sql
- anonymous_sql
- paren_sql
- neg_sql
- not_sql
- alias_sql
- aliases_sql
- attimezone_sql
- add_sql
- and_sql
- connector_sql
- bitwiseand_sql
- bitwiseleftshift_sql
- bitwisenot_sql
- bitwiseor_sql
- bitwiserightshift_sql
- bitwisexor_sql
- currentdate_sql
- collate_sql
- command_sql
- comment_sql
- mergetreettlaction_sql
- mergetreettl_sql
- transaction_sql
- commit_sql
- rollback_sql
- altercolumn_sql
- renametable_sql
- altertable_sql
- droppartition_sql
- addconstraint_sql
- distinct_sql
- ignorenulls_sql
- respectnulls_sql
- intdiv_sql
- dpipe_sql
- div_sql
- overlaps_sql
- distance_sql
- dot_sql
- eq_sql
- escape_sql
- glob_sql
- gt_sql
- gte_sql
- ilike_sql
- ilikeany_sql
- is_sql
- like_sql
- likeany_sql
- similarto_sql
- lt_sql
- lte_sql
- mod_sql
- mul_sql
- neq_sql
- nullsafeeq_sql
- nullsafeneq_sql
- or_sql
- slice_sql
- sub_sql
- trycast_sql
- use_sql
- binary
- function_fallback_sql
- func
- format_args
- text_width
- format_time
- expressions
- op_expressions
- naked_property
- set_operation
- tag_sql
- token_sql
- userdefinedfunction_sql
- joinhint_sql
- kwarg_sql
- when_sql
- merge_sql
- tochar_sql