sqlglot.dialects.hive
1from __future__ import annotations 2 3import typing as t 4 5from sqlglot import exp, generator, parser, tokens, transforms 6from sqlglot.dialects.dialect import ( 7 Dialect, 8 approx_count_distinct_sql, 9 create_with_partitions_sql, 10 format_time_lambda, 11 if_sql, 12 locate_to_strposition, 13 min_or_least, 14 no_ilike_sql, 15 no_recursive_cte_sql, 16 no_safe_divide_sql, 17 no_trycast_sql, 18 rename_func, 19 strposition_to_locate_sql, 20 struct_extract_sql, 21 timestrtotime_sql, 22 var_map_sql, 23) 24from sqlglot.helper import seq_get 25from sqlglot.parser import parse_var_map 26from sqlglot.tokens import TokenType 27 28# (FuncType, Multiplier) 29DATE_DELTA_INTERVAL = { 30 "YEAR": ("ADD_MONTHS", 12), 31 "MONTH": ("ADD_MONTHS", 1), 32 "QUARTER": ("ADD_MONTHS", 3), 33 "WEEK": ("DATE_ADD", 7), 34 "DAY": ("DATE_ADD", 1), 35} 36 37DIFF_MONTH_SWITCH = ("YEAR", "QUARTER", "MONTH") 38 39 40def _add_date_sql(self: generator.Generator, expression: exp.DateAdd) -> str: 41 unit = expression.text("unit").upper() 42 func, multiplier = DATE_DELTA_INTERVAL.get(unit, ("DATE_ADD", 1)) 43 modified_increment = ( 44 int(expression.text("expression")) * multiplier 45 if expression.expression.is_number 46 else expression.expression 47 ) 48 modified_increment = exp.Literal.number(modified_increment) 49 return self.func(func, expression.this, modified_increment.this) 50 51 52def _date_diff_sql(self: generator.Generator, expression: exp.DateDiff) -> str: 53 unit = expression.text("unit").upper() 54 sql_func = "MONTHS_BETWEEN" if unit in DIFF_MONTH_SWITCH else "DATEDIFF" 55 _, multiplier = DATE_DELTA_INTERVAL.get(unit, ("", 1)) 56 multiplier_sql = f" / {multiplier}" if multiplier > 1 else "" 57 diff_sql = f"{sql_func}({self.format_args(expression.this, expression.expression)})" 58 return f"{diff_sql}{multiplier_sql}" 59 60 61def _array_sort(self: generator.Generator, expression: exp.ArraySort) -> str: 62 if expression.expression: 63 self.unsupported("Hive SORT_ARRAY does not support a comparator") 64 return f"SORT_ARRAY({self.sql(expression, 'this')})" 65 66 67def _property_sql(self: generator.Generator, expression: exp.Property) -> str: 68 return f"'{expression.name}'={self.sql(expression, 'value')}" 69 70 71def _str_to_unix(self: generator.Generator, expression: exp.StrToUnix) -> str: 72 return self.func("UNIX_TIMESTAMP", expression.this, _time_format(self, expression)) 73 74 75def _str_to_date(self: generator.Generator, expression: exp.StrToDate) -> str: 76 this = self.sql(expression, "this") 77 time_format = self.format_time(expression) 78 if time_format not in (Hive.time_format, Hive.date_format): 79 this = f"FROM_UNIXTIME(UNIX_TIMESTAMP({this}, {time_format}))" 80 return f"CAST({this} AS DATE)" 81 82 83def _str_to_time(self: generator.Generator, expression: exp.StrToTime) -> str: 84 this = self.sql(expression, "this") 85 time_format = self.format_time(expression) 86 if time_format not in (Hive.time_format, Hive.date_format): 87 this = f"FROM_UNIXTIME(UNIX_TIMESTAMP({this}, {time_format}))" 88 return f"CAST({this} AS TIMESTAMP)" 89 90 91def _time_format( 92 self: generator.Generator, expression: exp.UnixToStr | exp.StrToUnix 93) -> t.Optional[str]: 94 time_format = self.format_time(expression) 95 if time_format == Hive.time_format: 96 return None 97 return time_format 98 99 100def _time_to_str(self: generator.Generator, expression: exp.TimeToStr) -> str: 101 this = self.sql(expression, "this") 102 time_format = self.format_time(expression) 103 return f"DATE_FORMAT({this}, {time_format})" 104 105 106def _to_date_sql(self: generator.Generator, expression: exp.TsOrDsToDate) -> str: 107 this = self.sql(expression, "this") 108 time_format = self.format_time(expression) 109 if time_format and time_format not in (Hive.time_format, Hive.date_format): 110 return f"TO_DATE({this}, {time_format})" 111 return f"TO_DATE({this})" 112 113 114def _unnest_to_explode_sql(self: generator.Generator, expression: exp.Join) -> str: 115 unnest = expression.this 116 if isinstance(unnest, exp.Unnest): 117 alias = unnest.args.get("alias") 118 udtf = exp.Posexplode if unnest.args.get("ordinality") else exp.Explode 119 return "".join( 120 self.sql( 121 exp.Lateral( 122 this=udtf(this=expression), 123 view=True, 124 alias=exp.TableAlias(this=alias.this, columns=[column]), # type: ignore 125 ) 126 ) 127 for expression, column in zip(unnest.expressions, alias.columns if alias else []) 128 ) 129 return self.join_sql(expression) 130 131 132def _index_sql(self: generator.Generator, expression: exp.Index) -> str: 133 this = self.sql(expression, "this") 134 table = self.sql(expression, "table") 135 columns = self.sql(expression, "columns") 136 return f"{this} ON TABLE {table} {columns}" 137 138 139class Hive(Dialect): 140 alias_post_tablesample = True 141 142 time_mapping = { 143 "y": "%Y", 144 "Y": "%Y", 145 "YYYY": "%Y", 146 "yyyy": "%Y", 147 "YY": "%y", 148 "yy": "%y", 149 "MMMM": "%B", 150 "MMM": "%b", 151 "MM": "%m", 152 "M": "%-m", 153 "dd": "%d", 154 "d": "%-d", 155 "HH": "%H", 156 "H": "%-H", 157 "hh": "%I", 158 "h": "%-I", 159 "mm": "%M", 160 "m": "%-M", 161 "ss": "%S", 162 "s": "%-S", 163 "SSSSSS": "%f", 164 "a": "%p", 165 "DD": "%j", 166 "D": "%-j", 167 "E": "%a", 168 "EE": "%a", 169 "EEE": "%a", 170 "EEEE": "%A", 171 } 172 173 date_format = "'yyyy-MM-dd'" 174 dateint_format = "'yyyyMMdd'" 175 time_format = "'yyyy-MM-dd HH:mm:ss'" 176 177 class Tokenizer(tokens.Tokenizer): 178 QUOTES = ["'", '"'] 179 IDENTIFIERS = ["`"] 180 STRING_ESCAPES = ["\\"] 181 ENCODE = "utf-8" 182 183 KEYWORDS = { 184 **tokens.Tokenizer.KEYWORDS, 185 "ADD ARCHIVE": TokenType.COMMAND, 186 "ADD ARCHIVES": TokenType.COMMAND, 187 "ADD FILE": TokenType.COMMAND, 188 "ADD FILES": TokenType.COMMAND, 189 "ADD JAR": TokenType.COMMAND, 190 "ADD JARS": TokenType.COMMAND, 191 "MSCK REPAIR": TokenType.COMMAND, 192 "WITH SERDEPROPERTIES": TokenType.SERDE_PROPERTIES, 193 } 194 195 NUMERIC_LITERALS = { 196 "L": "BIGINT", 197 "S": "SMALLINT", 198 "Y": "TINYINT", 199 "D": "DOUBLE", 200 "F": "FLOAT", 201 "BD": "DECIMAL", 202 } 203 204 IDENTIFIER_CAN_START_WITH_DIGIT = True 205 206 class Parser(parser.Parser): 207 STRICT_CAST = False 208 209 FUNCTIONS = { 210 **parser.Parser.FUNCTIONS, # type: ignore 211 "APPROX_COUNT_DISTINCT": exp.ApproxDistinct.from_arg_list, 212 "COLLECT_LIST": exp.ArrayAgg.from_arg_list, 213 "DATE_ADD": lambda args: exp.TsOrDsAdd( 214 this=seq_get(args, 0), 215 expression=seq_get(args, 1), 216 unit=exp.Literal.string("DAY"), 217 ), 218 "DATEDIFF": lambda args: exp.DateDiff( 219 this=exp.TsOrDsToDate(this=seq_get(args, 0)), 220 expression=exp.TsOrDsToDate(this=seq_get(args, 1)), 221 ), 222 "DATE_SUB": lambda args: exp.TsOrDsAdd( 223 this=seq_get(args, 0), 224 expression=exp.Mul( 225 this=seq_get(args, 1), 226 expression=exp.Literal.number(-1), 227 ), 228 unit=exp.Literal.string("DAY"), 229 ), 230 "DATE_FORMAT": lambda args: format_time_lambda(exp.TimeToStr, "hive")( 231 [ 232 exp.TimeStrToTime(this=seq_get(args, 0)), 233 seq_get(args, 1), 234 ] 235 ), 236 "DAY": lambda args: exp.Day(this=exp.TsOrDsToDate(this=seq_get(args, 0))), 237 "FROM_UNIXTIME": format_time_lambda(exp.UnixToStr, "hive", True), 238 "GET_JSON_OBJECT": exp.JSONExtractScalar.from_arg_list, 239 "LOCATE": locate_to_strposition, 240 "LOG": ( 241 lambda args: exp.Log.from_arg_list(args) 242 if len(args) > 1 243 else exp.Ln.from_arg_list(args) 244 ), 245 "MAP": parse_var_map, 246 "MONTH": lambda args: exp.Month(this=exp.TsOrDsToDate.from_arg_list(args)), 247 "PERCENTILE": exp.Quantile.from_arg_list, 248 "PERCENTILE_APPROX": exp.ApproxQuantile.from_arg_list, 249 "COLLECT_SET": exp.SetAgg.from_arg_list, 250 "SIZE": exp.ArraySize.from_arg_list, 251 "SPLIT": exp.RegexpSplit.from_arg_list, 252 "TO_DATE": format_time_lambda(exp.TsOrDsToDate, "hive"), 253 "UNIX_TIMESTAMP": format_time_lambda(exp.StrToUnix, "hive", True), 254 "YEAR": lambda args: exp.Year(this=exp.TsOrDsToDate.from_arg_list(args)), 255 } 256 257 PROPERTY_PARSERS = { 258 **parser.Parser.PROPERTY_PARSERS, # type: ignore 259 "WITH SERDEPROPERTIES": lambda self: exp.SerdeProperties( 260 expressions=self._parse_wrapped_csv(self._parse_property) 261 ), 262 } 263 264 class Generator(generator.Generator): 265 TYPE_MAPPING = { 266 **generator.Generator.TYPE_MAPPING, # type: ignore 267 exp.DataType.Type.TEXT: "STRING", 268 exp.DataType.Type.DATETIME: "TIMESTAMP", 269 exp.DataType.Type.VARBINARY: "BINARY", 270 exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP", 271 } 272 273 TRANSFORMS = { 274 **generator.Generator.TRANSFORMS, # type: ignore 275 **transforms.UNALIAS_GROUP, # type: ignore 276 **transforms.ELIMINATE_QUALIFY, # type: ignore 277 exp.Property: _property_sql, 278 exp.ApproxDistinct: approx_count_distinct_sql, 279 exp.ArrayConcat: rename_func("CONCAT"), 280 exp.ArraySize: rename_func("SIZE"), 281 exp.ArraySort: _array_sort, 282 exp.With: no_recursive_cte_sql, 283 exp.DateAdd: _add_date_sql, 284 exp.DateDiff: _date_diff_sql, 285 exp.DateStrToDate: rename_func("TO_DATE"), 286 exp.DateToDi: lambda self, e: f"CAST(DATE_FORMAT({self.sql(e, 'this')}, {Hive.dateint_format}) AS INT)", 287 exp.DiToDate: lambda self, e: f"TO_DATE(CAST({self.sql(e, 'this')} AS STRING), {Hive.dateint_format})", 288 exp.FileFormatProperty: lambda self, e: f"STORED AS {e.name.upper()}", 289 exp.If: if_sql, 290 exp.Index: _index_sql, 291 exp.ILike: no_ilike_sql, 292 exp.Join: _unnest_to_explode_sql, 293 exp.JSONExtract: rename_func("GET_JSON_OBJECT"), 294 exp.JSONExtractScalar: rename_func("GET_JSON_OBJECT"), 295 exp.Map: var_map_sql, 296 exp.Min: min_or_least, 297 exp.VarMap: var_map_sql, 298 exp.Create: create_with_partitions_sql, 299 exp.Quantile: rename_func("PERCENTILE"), 300 exp.ApproxQuantile: rename_func("PERCENTILE_APPROX"), 301 exp.RegexpLike: lambda self, e: self.binary(e, "RLIKE"), 302 exp.RegexpSplit: rename_func("SPLIT"), 303 exp.SafeDivide: no_safe_divide_sql, 304 exp.SchemaCommentProperty: lambda self, e: self.naked_property(e), 305 exp.SetAgg: rename_func("COLLECT_SET"), 306 exp.Split: lambda self, e: f"SPLIT({self.sql(e, 'this')}, CONCAT('\\\\Q', {self.sql(e, 'expression')}))", 307 exp.StrPosition: strposition_to_locate_sql, 308 exp.StrToDate: _str_to_date, 309 exp.StrToTime: _str_to_time, 310 exp.StrToUnix: _str_to_unix, 311 exp.StructExtract: struct_extract_sql, 312 exp.TableFormatProperty: lambda self, e: f"USING {self.sql(e, 'this')}", 313 exp.TimeStrToDate: rename_func("TO_DATE"), 314 exp.TimeStrToTime: timestrtotime_sql, 315 exp.TimeStrToUnix: rename_func("UNIX_TIMESTAMP"), 316 exp.TimeToStr: _time_to_str, 317 exp.TimeToUnix: rename_func("UNIX_TIMESTAMP"), 318 exp.TsOrDiToDi: lambda self, e: f"CAST(SUBSTR(REPLACE(CAST({self.sql(e, 'this')} AS STRING), '-', ''), 1, 8) AS INT)", 319 exp.TsOrDsAdd: lambda self, e: f"DATE_ADD({self.sql(e, 'this')}, {self.sql(e, 'expression')})", 320 exp.TsOrDsToDate: _to_date_sql, 321 exp.TryCast: no_trycast_sql, 322 exp.UnixToStr: lambda self, e: self.func( 323 "FROM_UNIXTIME", e.this, _time_format(self, e) 324 ), 325 exp.UnixToTime: rename_func("FROM_UNIXTIME"), 326 exp.UnixToTimeStr: rename_func("FROM_UNIXTIME"), 327 exp.PartitionedByProperty: lambda self, e: f"PARTITIONED BY {self.sql(e, 'this')}", 328 exp.RowFormatSerdeProperty: lambda self, e: f"ROW FORMAT SERDE {self.sql(e, 'this')}", 329 exp.SerdeProperties: lambda self, e: self.properties(e, prefix="WITH SERDEPROPERTIES"), 330 exp.NumberToStr: rename_func("FORMAT_NUMBER"), 331 exp.LastDateOfMonth: rename_func("LAST_DAY"), 332 } 333 334 PROPERTIES_LOCATION = { 335 **generator.Generator.PROPERTIES_LOCATION, # type: ignore 336 exp.FileFormatProperty: exp.Properties.Location.POST_SCHEMA, 337 exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA, 338 exp.TableFormatProperty: exp.Properties.Location.POST_SCHEMA, 339 } 340 341 def arrayagg_sql(self, expression: exp.ArrayAgg) -> str: 342 return self.func( 343 "COLLECT_LIST", 344 expression.this.this if isinstance(expression.this, exp.Order) else expression.this, 345 ) 346 347 def with_properties(self, properties: exp.Properties) -> str: 348 return self.properties( 349 properties, 350 prefix=self.seg("TBLPROPERTIES"), 351 ) 352 353 def datatype_sql(self, expression: exp.DataType) -> str: 354 if ( 355 expression.this in (exp.DataType.Type.VARCHAR, exp.DataType.Type.NVARCHAR) 356 and not expression.expressions 357 ): 358 expression = exp.DataType.build("text") 359 elif expression.this in exp.DataType.TEMPORAL_TYPES: 360 expression = exp.DataType.build(expression.this) 361 return super().datatype_sql(expression)
140class Hive(Dialect): 141 alias_post_tablesample = True 142 143 time_mapping = { 144 "y": "%Y", 145 "Y": "%Y", 146 "YYYY": "%Y", 147 "yyyy": "%Y", 148 "YY": "%y", 149 "yy": "%y", 150 "MMMM": "%B", 151 "MMM": "%b", 152 "MM": "%m", 153 "M": "%-m", 154 "dd": "%d", 155 "d": "%-d", 156 "HH": "%H", 157 "H": "%-H", 158 "hh": "%I", 159 "h": "%-I", 160 "mm": "%M", 161 "m": "%-M", 162 "ss": "%S", 163 "s": "%-S", 164 "SSSSSS": "%f", 165 "a": "%p", 166 "DD": "%j", 167 "D": "%-j", 168 "E": "%a", 169 "EE": "%a", 170 "EEE": "%a", 171 "EEEE": "%A", 172 } 173 174 date_format = "'yyyy-MM-dd'" 175 dateint_format = "'yyyyMMdd'" 176 time_format = "'yyyy-MM-dd HH:mm:ss'" 177 178 class Tokenizer(tokens.Tokenizer): 179 QUOTES = ["'", '"'] 180 IDENTIFIERS = ["`"] 181 STRING_ESCAPES = ["\\"] 182 ENCODE = "utf-8" 183 184 KEYWORDS = { 185 **tokens.Tokenizer.KEYWORDS, 186 "ADD ARCHIVE": TokenType.COMMAND, 187 "ADD ARCHIVES": TokenType.COMMAND, 188 "ADD FILE": TokenType.COMMAND, 189 "ADD FILES": TokenType.COMMAND, 190 "ADD JAR": TokenType.COMMAND, 191 "ADD JARS": TokenType.COMMAND, 192 "MSCK REPAIR": TokenType.COMMAND, 193 "WITH SERDEPROPERTIES": TokenType.SERDE_PROPERTIES, 194 } 195 196 NUMERIC_LITERALS = { 197 "L": "BIGINT", 198 "S": "SMALLINT", 199 "Y": "TINYINT", 200 "D": "DOUBLE", 201 "F": "FLOAT", 202 "BD": "DECIMAL", 203 } 204 205 IDENTIFIER_CAN_START_WITH_DIGIT = True 206 207 class Parser(parser.Parser): 208 STRICT_CAST = False 209 210 FUNCTIONS = { 211 **parser.Parser.FUNCTIONS, # type: ignore 212 "APPROX_COUNT_DISTINCT": exp.ApproxDistinct.from_arg_list, 213 "COLLECT_LIST": exp.ArrayAgg.from_arg_list, 214 "DATE_ADD": lambda args: exp.TsOrDsAdd( 215 this=seq_get(args, 0), 216 expression=seq_get(args, 1), 217 unit=exp.Literal.string("DAY"), 218 ), 219 "DATEDIFF": lambda args: exp.DateDiff( 220 this=exp.TsOrDsToDate(this=seq_get(args, 0)), 221 expression=exp.TsOrDsToDate(this=seq_get(args, 1)), 222 ), 223 "DATE_SUB": lambda args: exp.TsOrDsAdd( 224 this=seq_get(args, 0), 225 expression=exp.Mul( 226 this=seq_get(args, 1), 227 expression=exp.Literal.number(-1), 228 ), 229 unit=exp.Literal.string("DAY"), 230 ), 231 "DATE_FORMAT": lambda args: format_time_lambda(exp.TimeToStr, "hive")( 232 [ 233 exp.TimeStrToTime(this=seq_get(args, 0)), 234 seq_get(args, 1), 235 ] 236 ), 237 "DAY": lambda args: exp.Day(this=exp.TsOrDsToDate(this=seq_get(args, 0))), 238 "FROM_UNIXTIME": format_time_lambda(exp.UnixToStr, "hive", True), 239 "GET_JSON_OBJECT": exp.JSONExtractScalar.from_arg_list, 240 "LOCATE": locate_to_strposition, 241 "LOG": ( 242 lambda args: exp.Log.from_arg_list(args) 243 if len(args) > 1 244 else exp.Ln.from_arg_list(args) 245 ), 246 "MAP": parse_var_map, 247 "MONTH": lambda args: exp.Month(this=exp.TsOrDsToDate.from_arg_list(args)), 248 "PERCENTILE": exp.Quantile.from_arg_list, 249 "PERCENTILE_APPROX": exp.ApproxQuantile.from_arg_list, 250 "COLLECT_SET": exp.SetAgg.from_arg_list, 251 "SIZE": exp.ArraySize.from_arg_list, 252 "SPLIT": exp.RegexpSplit.from_arg_list, 253 "TO_DATE": format_time_lambda(exp.TsOrDsToDate, "hive"), 254 "UNIX_TIMESTAMP": format_time_lambda(exp.StrToUnix, "hive", True), 255 "YEAR": lambda args: exp.Year(this=exp.TsOrDsToDate.from_arg_list(args)), 256 } 257 258 PROPERTY_PARSERS = { 259 **parser.Parser.PROPERTY_PARSERS, # type: ignore 260 "WITH SERDEPROPERTIES": lambda self: exp.SerdeProperties( 261 expressions=self._parse_wrapped_csv(self._parse_property) 262 ), 263 } 264 265 class Generator(generator.Generator): 266 TYPE_MAPPING = { 267 **generator.Generator.TYPE_MAPPING, # type: ignore 268 exp.DataType.Type.TEXT: "STRING", 269 exp.DataType.Type.DATETIME: "TIMESTAMP", 270 exp.DataType.Type.VARBINARY: "BINARY", 271 exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP", 272 } 273 274 TRANSFORMS = { 275 **generator.Generator.TRANSFORMS, # type: ignore 276 **transforms.UNALIAS_GROUP, # type: ignore 277 **transforms.ELIMINATE_QUALIFY, # type: ignore 278 exp.Property: _property_sql, 279 exp.ApproxDistinct: approx_count_distinct_sql, 280 exp.ArrayConcat: rename_func("CONCAT"), 281 exp.ArraySize: rename_func("SIZE"), 282 exp.ArraySort: _array_sort, 283 exp.With: no_recursive_cte_sql, 284 exp.DateAdd: _add_date_sql, 285 exp.DateDiff: _date_diff_sql, 286 exp.DateStrToDate: rename_func("TO_DATE"), 287 exp.DateToDi: lambda self, e: f"CAST(DATE_FORMAT({self.sql(e, 'this')}, {Hive.dateint_format}) AS INT)", 288 exp.DiToDate: lambda self, e: f"TO_DATE(CAST({self.sql(e, 'this')} AS STRING), {Hive.dateint_format})", 289 exp.FileFormatProperty: lambda self, e: f"STORED AS {e.name.upper()}", 290 exp.If: if_sql, 291 exp.Index: _index_sql, 292 exp.ILike: no_ilike_sql, 293 exp.Join: _unnest_to_explode_sql, 294 exp.JSONExtract: rename_func("GET_JSON_OBJECT"), 295 exp.JSONExtractScalar: rename_func("GET_JSON_OBJECT"), 296 exp.Map: var_map_sql, 297 exp.Min: min_or_least, 298 exp.VarMap: var_map_sql, 299 exp.Create: create_with_partitions_sql, 300 exp.Quantile: rename_func("PERCENTILE"), 301 exp.ApproxQuantile: rename_func("PERCENTILE_APPROX"), 302 exp.RegexpLike: lambda self, e: self.binary(e, "RLIKE"), 303 exp.RegexpSplit: rename_func("SPLIT"), 304 exp.SafeDivide: no_safe_divide_sql, 305 exp.SchemaCommentProperty: lambda self, e: self.naked_property(e), 306 exp.SetAgg: rename_func("COLLECT_SET"), 307 exp.Split: lambda self, e: f"SPLIT({self.sql(e, 'this')}, CONCAT('\\\\Q', {self.sql(e, 'expression')}))", 308 exp.StrPosition: strposition_to_locate_sql, 309 exp.StrToDate: _str_to_date, 310 exp.StrToTime: _str_to_time, 311 exp.StrToUnix: _str_to_unix, 312 exp.StructExtract: struct_extract_sql, 313 exp.TableFormatProperty: lambda self, e: f"USING {self.sql(e, 'this')}", 314 exp.TimeStrToDate: rename_func("TO_DATE"), 315 exp.TimeStrToTime: timestrtotime_sql, 316 exp.TimeStrToUnix: rename_func("UNIX_TIMESTAMP"), 317 exp.TimeToStr: _time_to_str, 318 exp.TimeToUnix: rename_func("UNIX_TIMESTAMP"), 319 exp.TsOrDiToDi: lambda self, e: f"CAST(SUBSTR(REPLACE(CAST({self.sql(e, 'this')} AS STRING), '-', ''), 1, 8) AS INT)", 320 exp.TsOrDsAdd: lambda self, e: f"DATE_ADD({self.sql(e, 'this')}, {self.sql(e, 'expression')})", 321 exp.TsOrDsToDate: _to_date_sql, 322 exp.TryCast: no_trycast_sql, 323 exp.UnixToStr: lambda self, e: self.func( 324 "FROM_UNIXTIME", e.this, _time_format(self, e) 325 ), 326 exp.UnixToTime: rename_func("FROM_UNIXTIME"), 327 exp.UnixToTimeStr: rename_func("FROM_UNIXTIME"), 328 exp.PartitionedByProperty: lambda self, e: f"PARTITIONED BY {self.sql(e, 'this')}", 329 exp.RowFormatSerdeProperty: lambda self, e: f"ROW FORMAT SERDE {self.sql(e, 'this')}", 330 exp.SerdeProperties: lambda self, e: self.properties(e, prefix="WITH SERDEPROPERTIES"), 331 exp.NumberToStr: rename_func("FORMAT_NUMBER"), 332 exp.LastDateOfMonth: rename_func("LAST_DAY"), 333 } 334 335 PROPERTIES_LOCATION = { 336 **generator.Generator.PROPERTIES_LOCATION, # type: ignore 337 exp.FileFormatProperty: exp.Properties.Location.POST_SCHEMA, 338 exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA, 339 exp.TableFormatProperty: exp.Properties.Location.POST_SCHEMA, 340 } 341 342 def arrayagg_sql(self, expression: exp.ArrayAgg) -> str: 343 return self.func( 344 "COLLECT_LIST", 345 expression.this.this if isinstance(expression.this, exp.Order) else expression.this, 346 ) 347 348 def with_properties(self, properties: exp.Properties) -> str: 349 return self.properties( 350 properties, 351 prefix=self.seg("TBLPROPERTIES"), 352 ) 353 354 def datatype_sql(self, expression: exp.DataType) -> str: 355 if ( 356 expression.this in (exp.DataType.Type.VARCHAR, exp.DataType.Type.NVARCHAR) 357 and not expression.expressions 358 ): 359 expression = exp.DataType.build("text") 360 elif expression.this in exp.DataType.TEMPORAL_TYPES: 361 expression = exp.DataType.build(expression.this) 362 return super().datatype_sql(expression)
178 class Tokenizer(tokens.Tokenizer): 179 QUOTES = ["'", '"'] 180 IDENTIFIERS = ["`"] 181 STRING_ESCAPES = ["\\"] 182 ENCODE = "utf-8" 183 184 KEYWORDS = { 185 **tokens.Tokenizer.KEYWORDS, 186 "ADD ARCHIVE": TokenType.COMMAND, 187 "ADD ARCHIVES": TokenType.COMMAND, 188 "ADD FILE": TokenType.COMMAND, 189 "ADD FILES": TokenType.COMMAND, 190 "ADD JAR": TokenType.COMMAND, 191 "ADD JARS": TokenType.COMMAND, 192 "MSCK REPAIR": TokenType.COMMAND, 193 "WITH SERDEPROPERTIES": TokenType.SERDE_PROPERTIES, 194 } 195 196 NUMERIC_LITERALS = { 197 "L": "BIGINT", 198 "S": "SMALLINT", 199 "Y": "TINYINT", 200 "D": "DOUBLE", 201 "F": "FLOAT", 202 "BD": "DECIMAL", 203 } 204 205 IDENTIFIER_CAN_START_WITH_DIGIT = True
Inherited Members
207 class Parser(parser.Parser): 208 STRICT_CAST = False 209 210 FUNCTIONS = { 211 **parser.Parser.FUNCTIONS, # type: ignore 212 "APPROX_COUNT_DISTINCT": exp.ApproxDistinct.from_arg_list, 213 "COLLECT_LIST": exp.ArrayAgg.from_arg_list, 214 "DATE_ADD": lambda args: exp.TsOrDsAdd( 215 this=seq_get(args, 0), 216 expression=seq_get(args, 1), 217 unit=exp.Literal.string("DAY"), 218 ), 219 "DATEDIFF": lambda args: exp.DateDiff( 220 this=exp.TsOrDsToDate(this=seq_get(args, 0)), 221 expression=exp.TsOrDsToDate(this=seq_get(args, 1)), 222 ), 223 "DATE_SUB": lambda args: exp.TsOrDsAdd( 224 this=seq_get(args, 0), 225 expression=exp.Mul( 226 this=seq_get(args, 1), 227 expression=exp.Literal.number(-1), 228 ), 229 unit=exp.Literal.string("DAY"), 230 ), 231 "DATE_FORMAT": lambda args: format_time_lambda(exp.TimeToStr, "hive")( 232 [ 233 exp.TimeStrToTime(this=seq_get(args, 0)), 234 seq_get(args, 1), 235 ] 236 ), 237 "DAY": lambda args: exp.Day(this=exp.TsOrDsToDate(this=seq_get(args, 0))), 238 "FROM_UNIXTIME": format_time_lambda(exp.UnixToStr, "hive", True), 239 "GET_JSON_OBJECT": exp.JSONExtractScalar.from_arg_list, 240 "LOCATE": locate_to_strposition, 241 "LOG": ( 242 lambda args: exp.Log.from_arg_list(args) 243 if len(args) > 1 244 else exp.Ln.from_arg_list(args) 245 ), 246 "MAP": parse_var_map, 247 "MONTH": lambda args: exp.Month(this=exp.TsOrDsToDate.from_arg_list(args)), 248 "PERCENTILE": exp.Quantile.from_arg_list, 249 "PERCENTILE_APPROX": exp.ApproxQuantile.from_arg_list, 250 "COLLECT_SET": exp.SetAgg.from_arg_list, 251 "SIZE": exp.ArraySize.from_arg_list, 252 "SPLIT": exp.RegexpSplit.from_arg_list, 253 "TO_DATE": format_time_lambda(exp.TsOrDsToDate, "hive"), 254 "UNIX_TIMESTAMP": format_time_lambda(exp.StrToUnix, "hive", True), 255 "YEAR": lambda args: exp.Year(this=exp.TsOrDsToDate.from_arg_list(args)), 256 } 257 258 PROPERTY_PARSERS = { 259 **parser.Parser.PROPERTY_PARSERS, # type: ignore 260 "WITH SERDEPROPERTIES": lambda self: exp.SerdeProperties( 261 expressions=self._parse_wrapped_csv(self._parse_property) 262 ), 263 }
Parser consumes a list of tokens produced by the sqlglot.tokens.Tokenizer
and produces
a parsed syntax tree.
Arguments:
- error_level: the desired error level. Default: ErrorLevel.RAISE
- error_message_context: determines the amount of context to capture from a query string when displaying the error message (in number of characters). Default: 50.
- index_offset: Index offset for arrays eg ARRAY[0] vs ARRAY[1] as the head of a list. Default: 0
- alias_post_tablesample: If the table alias comes after tablesample. Default: False
- max_errors: Maximum number of error messages to include in a raised ParseError. This is only relevant if error_level is ErrorLevel.RAISE. Default: 3
- null_ordering: Indicates the default null ordering method to use if not explicitly set. Options are "nulls_are_small", "nulls_are_large", "nulls_are_last". Default: "nulls_are_small"
Inherited Members
265 class Generator(generator.Generator): 266 TYPE_MAPPING = { 267 **generator.Generator.TYPE_MAPPING, # type: ignore 268 exp.DataType.Type.TEXT: "STRING", 269 exp.DataType.Type.DATETIME: "TIMESTAMP", 270 exp.DataType.Type.VARBINARY: "BINARY", 271 exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP", 272 } 273 274 TRANSFORMS = { 275 **generator.Generator.TRANSFORMS, # type: ignore 276 **transforms.UNALIAS_GROUP, # type: ignore 277 **transforms.ELIMINATE_QUALIFY, # type: ignore 278 exp.Property: _property_sql, 279 exp.ApproxDistinct: approx_count_distinct_sql, 280 exp.ArrayConcat: rename_func("CONCAT"), 281 exp.ArraySize: rename_func("SIZE"), 282 exp.ArraySort: _array_sort, 283 exp.With: no_recursive_cte_sql, 284 exp.DateAdd: _add_date_sql, 285 exp.DateDiff: _date_diff_sql, 286 exp.DateStrToDate: rename_func("TO_DATE"), 287 exp.DateToDi: lambda self, e: f"CAST(DATE_FORMAT({self.sql(e, 'this')}, {Hive.dateint_format}) AS INT)", 288 exp.DiToDate: lambda self, e: f"TO_DATE(CAST({self.sql(e, 'this')} AS STRING), {Hive.dateint_format})", 289 exp.FileFormatProperty: lambda self, e: f"STORED AS {e.name.upper()}", 290 exp.If: if_sql, 291 exp.Index: _index_sql, 292 exp.ILike: no_ilike_sql, 293 exp.Join: _unnest_to_explode_sql, 294 exp.JSONExtract: rename_func("GET_JSON_OBJECT"), 295 exp.JSONExtractScalar: rename_func("GET_JSON_OBJECT"), 296 exp.Map: var_map_sql, 297 exp.Min: min_or_least, 298 exp.VarMap: var_map_sql, 299 exp.Create: create_with_partitions_sql, 300 exp.Quantile: rename_func("PERCENTILE"), 301 exp.ApproxQuantile: rename_func("PERCENTILE_APPROX"), 302 exp.RegexpLike: lambda self, e: self.binary(e, "RLIKE"), 303 exp.RegexpSplit: rename_func("SPLIT"), 304 exp.SafeDivide: no_safe_divide_sql, 305 exp.SchemaCommentProperty: lambda self, e: self.naked_property(e), 306 exp.SetAgg: rename_func("COLLECT_SET"), 307 exp.Split: lambda self, e: f"SPLIT({self.sql(e, 'this')}, CONCAT('\\\\Q', {self.sql(e, 'expression')}))", 308 exp.StrPosition: strposition_to_locate_sql, 309 exp.StrToDate: _str_to_date, 310 exp.StrToTime: _str_to_time, 311 exp.StrToUnix: _str_to_unix, 312 exp.StructExtract: struct_extract_sql, 313 exp.TableFormatProperty: lambda self, e: f"USING {self.sql(e, 'this')}", 314 exp.TimeStrToDate: rename_func("TO_DATE"), 315 exp.TimeStrToTime: timestrtotime_sql, 316 exp.TimeStrToUnix: rename_func("UNIX_TIMESTAMP"), 317 exp.TimeToStr: _time_to_str, 318 exp.TimeToUnix: rename_func("UNIX_TIMESTAMP"), 319 exp.TsOrDiToDi: lambda self, e: f"CAST(SUBSTR(REPLACE(CAST({self.sql(e, 'this')} AS STRING), '-', ''), 1, 8) AS INT)", 320 exp.TsOrDsAdd: lambda self, e: f"DATE_ADD({self.sql(e, 'this')}, {self.sql(e, 'expression')})", 321 exp.TsOrDsToDate: _to_date_sql, 322 exp.TryCast: no_trycast_sql, 323 exp.UnixToStr: lambda self, e: self.func( 324 "FROM_UNIXTIME", e.this, _time_format(self, e) 325 ), 326 exp.UnixToTime: rename_func("FROM_UNIXTIME"), 327 exp.UnixToTimeStr: rename_func("FROM_UNIXTIME"), 328 exp.PartitionedByProperty: lambda self, e: f"PARTITIONED BY {self.sql(e, 'this')}", 329 exp.RowFormatSerdeProperty: lambda self, e: f"ROW FORMAT SERDE {self.sql(e, 'this')}", 330 exp.SerdeProperties: lambda self, e: self.properties(e, prefix="WITH SERDEPROPERTIES"), 331 exp.NumberToStr: rename_func("FORMAT_NUMBER"), 332 exp.LastDateOfMonth: rename_func("LAST_DAY"), 333 } 334 335 PROPERTIES_LOCATION = { 336 **generator.Generator.PROPERTIES_LOCATION, # type: ignore 337 exp.FileFormatProperty: exp.Properties.Location.POST_SCHEMA, 338 exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA, 339 exp.TableFormatProperty: exp.Properties.Location.POST_SCHEMA, 340 } 341 342 def arrayagg_sql(self, expression: exp.ArrayAgg) -> str: 343 return self.func( 344 "COLLECT_LIST", 345 expression.this.this if isinstance(expression.this, exp.Order) else expression.this, 346 ) 347 348 def with_properties(self, properties: exp.Properties) -> str: 349 return self.properties( 350 properties, 351 prefix=self.seg("TBLPROPERTIES"), 352 ) 353 354 def datatype_sql(self, expression: exp.DataType) -> str: 355 if ( 356 expression.this in (exp.DataType.Type.VARCHAR, exp.DataType.Type.NVARCHAR) 357 and not expression.expressions 358 ): 359 expression = exp.DataType.build("text") 360 elif expression.this in exp.DataType.TEMPORAL_TYPES: 361 expression = exp.DataType.build(expression.this) 362 return super().datatype_sql(expression)
Generator interprets the given syntax tree and produces a SQL string as an output.
Arguments:
- time_mapping (dict): the dictionary of custom time mappings in which the key represents a python time format and the output the target time format
- time_trie (trie): a trie of the time_mapping keys
- pretty (bool): if set to True the returned string will be formatted. Default: False.
- quote_start (str): specifies which starting character to use to delimit quotes. Default: '.
- quote_end (str): specifies which ending character to use to delimit quotes. Default: '.
- identifier_start (str): specifies which starting character to use to delimit identifiers. Default: ".
- identifier_end (str): specifies which ending character to use to delimit identifiers. Default: ".
- identify (bool | str): 'always': always quote, 'safe': quote identifiers if they don't contain an upcase, True defaults to always.
- normalize (bool): if set to True all identifiers will lower cased
- string_escape (str): specifies a string escape character. Default: '.
- identifier_escape (str): specifies an identifier escape character. Default: ".
- pad (int): determines padding in a formatted string. Default: 2.
- indent (int): determines the size of indentation in a formatted string. Default: 4.
- unnest_column_only (bool): if true unnest table aliases are considered only as column aliases
- normalize_functions (str): normalize function names, "upper", "lower", or None Default: "upper"
- alias_post_tablesample (bool): if the table alias comes after tablesample Default: False
- unsupported_level (ErrorLevel): determines the generator's behavior when it encounters unsupported expressions. Default ErrorLevel.WARN.
- null_ordering (str): Indicates the default null ordering method to use if not explicitly set. Options are "nulls_are_small", "nulls_are_large", "nulls_are_last". Default: "nulls_are_small"
- max_unsupported (int): Maximum number of unsupported messages to include in a raised UnsupportedError. This is only relevant if unsupported_level is ErrorLevel.RAISE. Default: 3
- leading_comma (bool): if the the comma is leading or trailing in select statements Default: False
- max_text_width: The max number of characters in a segment before creating new lines in pretty mode. The default is on the smaller end because the length only represents a segment and not the true line length. Default: 80
- comments: Whether or not to preserve comments in the output SQL code. Default: True
354 def datatype_sql(self, expression: exp.DataType) -> str: 355 if ( 356 expression.this in (exp.DataType.Type.VARCHAR, exp.DataType.Type.NVARCHAR) 357 and not expression.expressions 358 ): 359 expression = exp.DataType.build("text") 360 elif expression.this in exp.DataType.TEMPORAL_TYPES: 361 expression = exp.DataType.build(expression.this) 362 return super().datatype_sql(expression)
Inherited Members
- sqlglot.generator.Generator
- Generator
- generate
- unsupported
- sep
- seg
- pad_comment
- maybe_comment
- wrap
- no_identify
- normalize_func
- indent
- sql
- uncache_sql
- cache_sql
- characterset_sql
- column_sql
- columndef_sql
- columnconstraint_sql
- autoincrementcolumnconstraint_sql
- compresscolumnconstraint_sql
- generatedasidentitycolumnconstraint_sql
- notnullcolumnconstraint_sql
- primarykeycolumnconstraint_sql
- uniquecolumnconstraint_sql
- create_sql
- describe_sql
- prepend_ctes
- with_sql
- cte_sql
- tablealias_sql
- bitstring_sql
- hexstring_sql
- directory_sql
- delete_sql
- drop_sql
- except_sql
- except_op
- fetch_sql
- filter_sql
- hint_sql
- index_sql
- identifier_sql
- national_sql
- partition_sql
- properties_sql
- root_properties
- properties
- locate_properties
- property_sql
- likeproperty_sql
- fallbackproperty_sql
- journalproperty_sql
- freespaceproperty_sql
- afterjournalproperty_sql
- checksumproperty_sql
- mergeblockratioproperty_sql
- datablocksizeproperty_sql
- blockcompressionproperty_sql
- isolatedloadingproperty_sql
- lockingproperty_sql
- withdataproperty_sql
- insert_sql
- intersect_sql
- intersect_op
- introducer_sql
- pseudotype_sql
- returning_sql
- rowformatdelimitedproperty_sql
- table_sql
- tablesample_sql
- pivot_sql
- tuple_sql
- update_sql
- values_sql
- var_sql
- into_sql
- from_sql
- group_sql
- having_sql
- join_sql
- lambda_sql
- lateral_sql
- limit_sql
- offset_sql
- setitem_sql
- set_sql
- lock_sql
- literal_sql
- loaddata_sql
- null_sql
- boolean_sql
- order_sql
- cluster_sql
- distribute_sql
- sort_sql
- ordered_sql
- matchrecognize_sql
- query_modifiers
- select_sql
- schema_sql
- star_sql
- structkwarg_sql
- parameter_sql
- sessionparameter_sql
- placeholder_sql
- subquery_sql
- qualify_sql
- union_sql
- union_op
- unnest_sql
- where_sql
- window_sql
- partition_by_sql
- window_spec_sql
- withingroup_sql
- between_sql
- bracket_sql
- all_sql
- any_sql
- exists_sql
- case_sql
- constraint_sql
- extract_sql
- trim_sql
- concat_sql
- check_sql
- foreignkey_sql
- primarykey_sql
- unique_sql
- if_sql
- in_sql
- in_unnest_op
- interval_sql
- return_sql
- reference_sql
- anonymous_sql
- paren_sql
- neg_sql
- not_sql
- alias_sql
- aliases_sql
- attimezone_sql
- add_sql
- and_sql
- connector_sql
- bitwiseand_sql
- bitwiseleftshift_sql
- bitwisenot_sql
- bitwiseor_sql
- bitwiserightshift_sql
- bitwisexor_sql
- cast_sql
- currentdate_sql
- collate_sql
- command_sql
- comment_sql
- transaction_sql
- commit_sql
- rollback_sql
- altercolumn_sql
- renametable_sql
- altertable_sql
- droppartition_sql
- addconstraint_sql
- distinct_sql
- ignorenulls_sql
- respectnulls_sql
- intdiv_sql
- dpipe_sql
- div_sql
- overlaps_sql
- distance_sql
- dot_sql
- eq_sql
- escape_sql
- glob_sql
- gt_sql
- gte_sql
- ilike_sql
- is_sql
- like_sql
- similarto_sql
- lt_sql
- lte_sql
- mod_sql
- mul_sql
- neq_sql
- nullsafeeq_sql
- nullsafeneq_sql
- or_sql
- slice_sql
- sub_sql
- trycast_sql
- use_sql
- binary
- function_fallback_sql
- func
- format_args
- text_width
- format_time
- expressions
- op_expressions
- naked_property
- set_operation
- tag_sql
- token_sql
- userdefinedfunction_sql
- joinhint_sql
- kwarg_sql
- when_sql
- merge_sql
- tochar_sql