sqlglot.dialects.hive
1from __future__ import annotations 2 3import typing as t 4 5from sqlglot import exp, generator, parser, tokens, transforms 6from sqlglot.dialects.dialect import ( 7 Dialect, 8 approx_count_distinct_sql, 9 create_with_partitions_sql, 10 format_time_lambda, 11 if_sql, 12 locate_to_strposition, 13 max_or_greatest, 14 min_or_least, 15 no_ilike_sql, 16 no_recursive_cte_sql, 17 no_safe_divide_sql, 18 no_trycast_sql, 19 rename_func, 20 strposition_to_locate_sql, 21 struct_extract_sql, 22 timestrtotime_sql, 23 var_map_sql, 24) 25from sqlglot.helper import seq_get 26from sqlglot.parser import parse_var_map 27from sqlglot.tokens import TokenType 28 29# (FuncType, Multiplier) 30DATE_DELTA_INTERVAL = { 31 "YEAR": ("ADD_MONTHS", 12), 32 "MONTH": ("ADD_MONTHS", 1), 33 "QUARTER": ("ADD_MONTHS", 3), 34 "WEEK": ("DATE_ADD", 7), 35 "DAY": ("DATE_ADD", 1), 36} 37 38TIME_DIFF_FACTOR = { 39 "MILLISECOND": " * 1000", 40 "SECOND": "", 41 "MINUTE": " / 60", 42 "HOUR": " / 3600", 43} 44 45DIFF_MONTH_SWITCH = ("YEAR", "QUARTER", "MONTH") 46 47 48def _add_date_sql(self: generator.Generator, expression: exp.DateAdd | exp.DateSub) -> str: 49 unit = expression.text("unit").upper() 50 func, multiplier = DATE_DELTA_INTERVAL.get(unit, ("DATE_ADD", 1)) 51 52 if isinstance(expression, exp.DateSub): 53 multiplier *= -1 54 55 if expression.expression.is_number: 56 modified_increment = exp.Literal.number(int(expression.text("expression")) * multiplier) 57 else: 58 modified_increment = expression.expression 59 if multiplier != 1: 60 modified_increment = exp.Mul( # type: ignore 61 this=modified_increment, expression=exp.Literal.number(multiplier) 62 ) 63 64 return self.func(func, expression.this, modified_increment) 65 66 67def _date_diff_sql(self: generator.Generator, expression: exp.DateDiff) -> str: 68 unit = expression.text("unit").upper() 69 70 factor = TIME_DIFF_FACTOR.get(unit) 71 if factor is not None: 72 left = self.sql(expression, "this") 73 right = self.sql(expression, "expression") 74 sec_diff = f"UNIX_TIMESTAMP({left}) - UNIX_TIMESTAMP({right})" 75 return f"({sec_diff}){factor}" if factor else sec_diff 76 77 sql_func = "MONTHS_BETWEEN" if unit in DIFF_MONTH_SWITCH else "DATEDIFF" 78 _, multiplier = DATE_DELTA_INTERVAL.get(unit, ("", 1)) 79 multiplier_sql = f" / {multiplier}" if multiplier > 1 else "" 80 diff_sql = f"{sql_func}({self.format_args(expression.this, expression.expression)})" 81 return f"{diff_sql}{multiplier_sql}" 82 83 84def _json_format_sql(self: generator.Generator, expression: exp.JSONFormat) -> str: 85 this = expression.this 86 87 if not this.type: 88 from sqlglot.optimizer.annotate_types import annotate_types 89 90 annotate_types(this) 91 92 if this.type.is_type(exp.DataType.Type.JSON): 93 return self.sql(this) 94 return self.func("TO_JSON", this, expression.args.get("options")) 95 96 97def _array_sort_sql(self: generator.Generator, expression: exp.ArraySort) -> str: 98 if expression.expression: 99 self.unsupported("Hive SORT_ARRAY does not support a comparator") 100 return f"SORT_ARRAY({self.sql(expression, 'this')})" 101 102 103def _property_sql(self: generator.Generator, expression: exp.Property) -> str: 104 return f"'{expression.name}'={self.sql(expression, 'value')}" 105 106 107def _str_to_unix_sql(self: generator.Generator, expression: exp.StrToUnix) -> str: 108 return self.func("UNIX_TIMESTAMP", expression.this, _time_format(self, expression)) 109 110 111def _str_to_date_sql(self: generator.Generator, expression: exp.StrToDate) -> str: 112 this = self.sql(expression, "this") 113 time_format = self.format_time(expression) 114 if time_format not in (Hive.time_format, Hive.date_format): 115 this = f"FROM_UNIXTIME(UNIX_TIMESTAMP({this}, {time_format}))" 116 return f"CAST({this} AS DATE)" 117 118 119def _str_to_time_sql(self: generator.Generator, expression: exp.StrToTime) -> str: 120 this = self.sql(expression, "this") 121 time_format = self.format_time(expression) 122 if time_format not in (Hive.time_format, Hive.date_format): 123 this = f"FROM_UNIXTIME(UNIX_TIMESTAMP({this}, {time_format}))" 124 return f"CAST({this} AS TIMESTAMP)" 125 126 127def _time_format( 128 self: generator.Generator, expression: exp.UnixToStr | exp.StrToUnix 129) -> t.Optional[str]: 130 time_format = self.format_time(expression) 131 if time_format == Hive.time_format: 132 return None 133 return time_format 134 135 136def _time_to_str(self: generator.Generator, expression: exp.TimeToStr) -> str: 137 this = self.sql(expression, "this") 138 time_format = self.format_time(expression) 139 return f"DATE_FORMAT({this}, {time_format})" 140 141 142def _to_date_sql(self: generator.Generator, expression: exp.TsOrDsToDate) -> str: 143 this = self.sql(expression, "this") 144 time_format = self.format_time(expression) 145 if time_format and time_format not in (Hive.time_format, Hive.date_format): 146 return f"TO_DATE({this}, {time_format})" 147 return f"TO_DATE({this})" 148 149 150def _index_sql(self: generator.Generator, expression: exp.Index) -> str: 151 this = self.sql(expression, "this") 152 table = self.sql(expression, "table") 153 columns = self.sql(expression, "columns") 154 return f"{this} ON TABLE {table} {columns}" 155 156 157class Hive(Dialect): 158 alias_post_tablesample = True 159 160 time_mapping = { 161 "y": "%Y", 162 "Y": "%Y", 163 "YYYY": "%Y", 164 "yyyy": "%Y", 165 "YY": "%y", 166 "yy": "%y", 167 "MMMM": "%B", 168 "MMM": "%b", 169 "MM": "%m", 170 "M": "%-m", 171 "dd": "%d", 172 "d": "%-d", 173 "HH": "%H", 174 "H": "%-H", 175 "hh": "%I", 176 "h": "%-I", 177 "mm": "%M", 178 "m": "%-M", 179 "ss": "%S", 180 "s": "%-S", 181 "SSSSSS": "%f", 182 "a": "%p", 183 "DD": "%j", 184 "D": "%-j", 185 "E": "%a", 186 "EE": "%a", 187 "EEE": "%a", 188 "EEEE": "%A", 189 } 190 191 date_format = "'yyyy-MM-dd'" 192 dateint_format = "'yyyyMMdd'" 193 time_format = "'yyyy-MM-dd HH:mm:ss'" 194 195 class Tokenizer(tokens.Tokenizer): 196 QUOTES = ["'", '"'] 197 IDENTIFIERS = ["`"] 198 STRING_ESCAPES = ["\\"] 199 ENCODE = "utf-8" 200 IDENTIFIER_CAN_START_WITH_DIGIT = True 201 202 KEYWORDS = { 203 **tokens.Tokenizer.KEYWORDS, 204 "ADD ARCHIVE": TokenType.COMMAND, 205 "ADD ARCHIVES": TokenType.COMMAND, 206 "ADD FILE": TokenType.COMMAND, 207 "ADD FILES": TokenType.COMMAND, 208 "ADD JAR": TokenType.COMMAND, 209 "ADD JARS": TokenType.COMMAND, 210 "MSCK REPAIR": TokenType.COMMAND, 211 "WITH SERDEPROPERTIES": TokenType.SERDE_PROPERTIES, 212 } 213 214 NUMERIC_LITERALS = { 215 "L": "BIGINT", 216 "S": "SMALLINT", 217 "Y": "TINYINT", 218 "D": "DOUBLE", 219 "F": "FLOAT", 220 "BD": "DECIMAL", 221 } 222 223 class Parser(parser.Parser): 224 LOG_DEFAULTS_TO_LN = True 225 STRICT_CAST = False 226 227 FUNCTIONS = { 228 **parser.Parser.FUNCTIONS, # type: ignore 229 "APPROX_COUNT_DISTINCT": exp.ApproxDistinct.from_arg_list, 230 "COLLECT_LIST": exp.ArrayAgg.from_arg_list, 231 "DATE_ADD": lambda args: exp.TsOrDsAdd( 232 this=seq_get(args, 0), 233 expression=seq_get(args, 1), 234 unit=exp.Literal.string("DAY"), 235 ), 236 "DATEDIFF": lambda args: exp.DateDiff( 237 this=exp.TsOrDsToDate(this=seq_get(args, 0)), 238 expression=exp.TsOrDsToDate(this=seq_get(args, 1)), 239 ), 240 "DATE_SUB": lambda args: exp.TsOrDsAdd( 241 this=seq_get(args, 0), 242 expression=exp.Mul( 243 this=seq_get(args, 1), 244 expression=exp.Literal.number(-1), 245 ), 246 unit=exp.Literal.string("DAY"), 247 ), 248 "DATE_FORMAT": lambda args: format_time_lambda(exp.TimeToStr, "hive")( 249 [ 250 exp.TimeStrToTime(this=seq_get(args, 0)), 251 seq_get(args, 1), 252 ] 253 ), 254 "DAY": lambda args: exp.Day(this=exp.TsOrDsToDate(this=seq_get(args, 0))), 255 "FROM_UNIXTIME": format_time_lambda(exp.UnixToStr, "hive", True), 256 "GET_JSON_OBJECT": exp.JSONExtractScalar.from_arg_list, 257 "LOCATE": locate_to_strposition, 258 "MAP": parse_var_map, 259 "MONTH": lambda args: exp.Month(this=exp.TsOrDsToDate.from_arg_list(args)), 260 "PERCENTILE": exp.Quantile.from_arg_list, 261 "PERCENTILE_APPROX": exp.ApproxQuantile.from_arg_list, 262 "COLLECT_SET": exp.SetAgg.from_arg_list, 263 "SIZE": exp.ArraySize.from_arg_list, 264 "SPLIT": exp.RegexpSplit.from_arg_list, 265 "TO_DATE": format_time_lambda(exp.TsOrDsToDate, "hive"), 266 "TO_JSON": exp.JSONFormat.from_arg_list, 267 "UNIX_TIMESTAMP": format_time_lambda(exp.StrToUnix, "hive", True), 268 "YEAR": lambda args: exp.Year(this=exp.TsOrDsToDate.from_arg_list(args)), 269 } 270 271 PROPERTY_PARSERS = { 272 **parser.Parser.PROPERTY_PARSERS, # type: ignore 273 "WITH SERDEPROPERTIES": lambda self: exp.SerdeProperties( 274 expressions=self._parse_wrapped_csv(self._parse_property) 275 ), 276 } 277 278 class Generator(generator.Generator): 279 LIMIT_FETCH = "LIMIT" 280 TABLESAMPLE_WITH_METHOD = False 281 TABLESAMPLE_SIZE_IS_PERCENT = True 282 JOIN_HINTS = False 283 TABLE_HINTS = False 284 285 TYPE_MAPPING = { 286 **generator.Generator.TYPE_MAPPING, # type: ignore 287 exp.DataType.Type.TEXT: "STRING", 288 exp.DataType.Type.DATETIME: "TIMESTAMP", 289 exp.DataType.Type.VARBINARY: "BINARY", 290 exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP", 291 exp.DataType.Type.BIT: "BOOLEAN", 292 } 293 294 TRANSFORMS = { 295 **generator.Generator.TRANSFORMS, # type: ignore 296 exp.Group: transforms.preprocess([transforms.unalias_group]), 297 exp.Select: transforms.preprocess( 298 [ 299 transforms.eliminate_qualify, 300 transforms.eliminate_distinct_on, 301 transforms.unnest_to_explode, 302 ] 303 ), 304 exp.Property: _property_sql, 305 exp.ApproxDistinct: approx_count_distinct_sql, 306 exp.ArrayConcat: rename_func("CONCAT"), 307 exp.ArraySize: rename_func("SIZE"), 308 exp.ArraySort: _array_sort_sql, 309 exp.With: no_recursive_cte_sql, 310 exp.DateAdd: _add_date_sql, 311 exp.DateDiff: _date_diff_sql, 312 exp.DateStrToDate: rename_func("TO_DATE"), 313 exp.DateSub: _add_date_sql, 314 exp.DateToDi: lambda self, e: f"CAST(DATE_FORMAT({self.sql(e, 'this')}, {Hive.dateint_format}) AS INT)", 315 exp.DiToDate: lambda self, e: f"TO_DATE(CAST({self.sql(e, 'this')} AS STRING), {Hive.dateint_format})", 316 exp.FileFormatProperty: lambda self, e: f"STORED AS {self.sql(e, 'this') if isinstance(e.this, exp.InputOutputFormat) else e.name.upper()}", 317 exp.If: if_sql, 318 exp.Index: _index_sql, 319 exp.ILike: no_ilike_sql, 320 exp.JSONExtract: rename_func("GET_JSON_OBJECT"), 321 exp.JSONExtractScalar: rename_func("GET_JSON_OBJECT"), 322 exp.JSONFormat: _json_format_sql, 323 exp.Map: var_map_sql, 324 exp.Max: max_or_greatest, 325 exp.Min: min_or_least, 326 exp.VarMap: var_map_sql, 327 exp.Create: create_with_partitions_sql, 328 exp.Quantile: rename_func("PERCENTILE"), 329 exp.ApproxQuantile: rename_func("PERCENTILE_APPROX"), 330 exp.RegexpLike: lambda self, e: self.binary(e, "RLIKE"), 331 exp.RegexpSplit: rename_func("SPLIT"), 332 exp.SafeDivide: no_safe_divide_sql, 333 exp.SchemaCommentProperty: lambda self, e: self.naked_property(e), 334 exp.SetAgg: rename_func("COLLECT_SET"), 335 exp.Split: lambda self, e: f"SPLIT({self.sql(e, 'this')}, CONCAT('\\\\Q', {self.sql(e, 'expression')}))", 336 exp.StrPosition: strposition_to_locate_sql, 337 exp.StrToDate: _str_to_date_sql, 338 exp.StrToTime: _str_to_time_sql, 339 exp.StrToUnix: _str_to_unix_sql, 340 exp.StructExtract: struct_extract_sql, 341 exp.TableFormatProperty: lambda self, e: f"USING {self.sql(e, 'this')}", 342 exp.TimeStrToDate: rename_func("TO_DATE"), 343 exp.TimeStrToTime: timestrtotime_sql, 344 exp.TimeStrToUnix: rename_func("UNIX_TIMESTAMP"), 345 exp.TimeToStr: _time_to_str, 346 exp.TimeToUnix: rename_func("UNIX_TIMESTAMP"), 347 exp.TsOrDiToDi: lambda self, e: f"CAST(SUBSTR(REPLACE(CAST({self.sql(e, 'this')} AS STRING), '-', ''), 1, 8) AS INT)", 348 exp.TsOrDsAdd: lambda self, e: f"DATE_ADD({self.sql(e, 'this')}, {self.sql(e, 'expression')})", 349 exp.TsOrDsToDate: _to_date_sql, 350 exp.TryCast: no_trycast_sql, 351 exp.UnixToStr: lambda self, e: self.func( 352 "FROM_UNIXTIME", e.this, _time_format(self, e) 353 ), 354 exp.UnixToTime: rename_func("FROM_UNIXTIME"), 355 exp.UnixToTimeStr: rename_func("FROM_UNIXTIME"), 356 exp.PartitionedByProperty: lambda self, e: f"PARTITIONED BY {self.sql(e, 'this')}", 357 exp.RowFormatSerdeProperty: lambda self, e: f"ROW FORMAT SERDE {self.sql(e, 'this')}", 358 exp.SerdeProperties: lambda self, e: self.properties(e, prefix="WITH SERDEPROPERTIES"), 359 exp.NumberToStr: rename_func("FORMAT_NUMBER"), 360 exp.LastDateOfMonth: rename_func("LAST_DAY"), 361 exp.National: lambda self, e: self.sql(e, "this"), 362 } 363 364 PROPERTIES_LOCATION = { 365 **generator.Generator.PROPERTIES_LOCATION, # type: ignore 366 exp.FileFormatProperty: exp.Properties.Location.POST_SCHEMA, 367 exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA, 368 exp.TableFormatProperty: exp.Properties.Location.POST_SCHEMA, 369 exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED, 370 } 371 372 def arrayagg_sql(self, expression: exp.ArrayAgg) -> str: 373 return self.func( 374 "COLLECT_LIST", 375 expression.this.this if isinstance(expression.this, exp.Order) else expression.this, 376 ) 377 378 def with_properties(self, properties: exp.Properties) -> str: 379 return self.properties( 380 properties, 381 prefix=self.seg("TBLPROPERTIES"), 382 ) 383 384 def datatype_sql(self, expression: exp.DataType) -> str: 385 if ( 386 expression.this in (exp.DataType.Type.VARCHAR, exp.DataType.Type.NVARCHAR) 387 and not expression.expressions 388 ): 389 expression = exp.DataType.build("text") 390 elif expression.this in exp.DataType.TEMPORAL_TYPES: 391 expression = exp.DataType.build(expression.this) 392 393 return super().datatype_sql(expression)
158class Hive(Dialect): 159 alias_post_tablesample = True 160 161 time_mapping = { 162 "y": "%Y", 163 "Y": "%Y", 164 "YYYY": "%Y", 165 "yyyy": "%Y", 166 "YY": "%y", 167 "yy": "%y", 168 "MMMM": "%B", 169 "MMM": "%b", 170 "MM": "%m", 171 "M": "%-m", 172 "dd": "%d", 173 "d": "%-d", 174 "HH": "%H", 175 "H": "%-H", 176 "hh": "%I", 177 "h": "%-I", 178 "mm": "%M", 179 "m": "%-M", 180 "ss": "%S", 181 "s": "%-S", 182 "SSSSSS": "%f", 183 "a": "%p", 184 "DD": "%j", 185 "D": "%-j", 186 "E": "%a", 187 "EE": "%a", 188 "EEE": "%a", 189 "EEEE": "%A", 190 } 191 192 date_format = "'yyyy-MM-dd'" 193 dateint_format = "'yyyyMMdd'" 194 time_format = "'yyyy-MM-dd HH:mm:ss'" 195 196 class Tokenizer(tokens.Tokenizer): 197 QUOTES = ["'", '"'] 198 IDENTIFIERS = ["`"] 199 STRING_ESCAPES = ["\\"] 200 ENCODE = "utf-8" 201 IDENTIFIER_CAN_START_WITH_DIGIT = True 202 203 KEYWORDS = { 204 **tokens.Tokenizer.KEYWORDS, 205 "ADD ARCHIVE": TokenType.COMMAND, 206 "ADD ARCHIVES": TokenType.COMMAND, 207 "ADD FILE": TokenType.COMMAND, 208 "ADD FILES": TokenType.COMMAND, 209 "ADD JAR": TokenType.COMMAND, 210 "ADD JARS": TokenType.COMMAND, 211 "MSCK REPAIR": TokenType.COMMAND, 212 "WITH SERDEPROPERTIES": TokenType.SERDE_PROPERTIES, 213 } 214 215 NUMERIC_LITERALS = { 216 "L": "BIGINT", 217 "S": "SMALLINT", 218 "Y": "TINYINT", 219 "D": "DOUBLE", 220 "F": "FLOAT", 221 "BD": "DECIMAL", 222 } 223 224 class Parser(parser.Parser): 225 LOG_DEFAULTS_TO_LN = True 226 STRICT_CAST = False 227 228 FUNCTIONS = { 229 **parser.Parser.FUNCTIONS, # type: ignore 230 "APPROX_COUNT_DISTINCT": exp.ApproxDistinct.from_arg_list, 231 "COLLECT_LIST": exp.ArrayAgg.from_arg_list, 232 "DATE_ADD": lambda args: exp.TsOrDsAdd( 233 this=seq_get(args, 0), 234 expression=seq_get(args, 1), 235 unit=exp.Literal.string("DAY"), 236 ), 237 "DATEDIFF": lambda args: exp.DateDiff( 238 this=exp.TsOrDsToDate(this=seq_get(args, 0)), 239 expression=exp.TsOrDsToDate(this=seq_get(args, 1)), 240 ), 241 "DATE_SUB": lambda args: exp.TsOrDsAdd( 242 this=seq_get(args, 0), 243 expression=exp.Mul( 244 this=seq_get(args, 1), 245 expression=exp.Literal.number(-1), 246 ), 247 unit=exp.Literal.string("DAY"), 248 ), 249 "DATE_FORMAT": lambda args: format_time_lambda(exp.TimeToStr, "hive")( 250 [ 251 exp.TimeStrToTime(this=seq_get(args, 0)), 252 seq_get(args, 1), 253 ] 254 ), 255 "DAY": lambda args: exp.Day(this=exp.TsOrDsToDate(this=seq_get(args, 0))), 256 "FROM_UNIXTIME": format_time_lambda(exp.UnixToStr, "hive", True), 257 "GET_JSON_OBJECT": exp.JSONExtractScalar.from_arg_list, 258 "LOCATE": locate_to_strposition, 259 "MAP": parse_var_map, 260 "MONTH": lambda args: exp.Month(this=exp.TsOrDsToDate.from_arg_list(args)), 261 "PERCENTILE": exp.Quantile.from_arg_list, 262 "PERCENTILE_APPROX": exp.ApproxQuantile.from_arg_list, 263 "COLLECT_SET": exp.SetAgg.from_arg_list, 264 "SIZE": exp.ArraySize.from_arg_list, 265 "SPLIT": exp.RegexpSplit.from_arg_list, 266 "TO_DATE": format_time_lambda(exp.TsOrDsToDate, "hive"), 267 "TO_JSON": exp.JSONFormat.from_arg_list, 268 "UNIX_TIMESTAMP": format_time_lambda(exp.StrToUnix, "hive", True), 269 "YEAR": lambda args: exp.Year(this=exp.TsOrDsToDate.from_arg_list(args)), 270 } 271 272 PROPERTY_PARSERS = { 273 **parser.Parser.PROPERTY_PARSERS, # type: ignore 274 "WITH SERDEPROPERTIES": lambda self: exp.SerdeProperties( 275 expressions=self._parse_wrapped_csv(self._parse_property) 276 ), 277 } 278 279 class Generator(generator.Generator): 280 LIMIT_FETCH = "LIMIT" 281 TABLESAMPLE_WITH_METHOD = False 282 TABLESAMPLE_SIZE_IS_PERCENT = True 283 JOIN_HINTS = False 284 TABLE_HINTS = False 285 286 TYPE_MAPPING = { 287 **generator.Generator.TYPE_MAPPING, # type: ignore 288 exp.DataType.Type.TEXT: "STRING", 289 exp.DataType.Type.DATETIME: "TIMESTAMP", 290 exp.DataType.Type.VARBINARY: "BINARY", 291 exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP", 292 exp.DataType.Type.BIT: "BOOLEAN", 293 } 294 295 TRANSFORMS = { 296 **generator.Generator.TRANSFORMS, # type: ignore 297 exp.Group: transforms.preprocess([transforms.unalias_group]), 298 exp.Select: transforms.preprocess( 299 [ 300 transforms.eliminate_qualify, 301 transforms.eliminate_distinct_on, 302 transforms.unnest_to_explode, 303 ] 304 ), 305 exp.Property: _property_sql, 306 exp.ApproxDistinct: approx_count_distinct_sql, 307 exp.ArrayConcat: rename_func("CONCAT"), 308 exp.ArraySize: rename_func("SIZE"), 309 exp.ArraySort: _array_sort_sql, 310 exp.With: no_recursive_cte_sql, 311 exp.DateAdd: _add_date_sql, 312 exp.DateDiff: _date_diff_sql, 313 exp.DateStrToDate: rename_func("TO_DATE"), 314 exp.DateSub: _add_date_sql, 315 exp.DateToDi: lambda self, e: f"CAST(DATE_FORMAT({self.sql(e, 'this')}, {Hive.dateint_format}) AS INT)", 316 exp.DiToDate: lambda self, e: f"TO_DATE(CAST({self.sql(e, 'this')} AS STRING), {Hive.dateint_format})", 317 exp.FileFormatProperty: lambda self, e: f"STORED AS {self.sql(e, 'this') if isinstance(e.this, exp.InputOutputFormat) else e.name.upper()}", 318 exp.If: if_sql, 319 exp.Index: _index_sql, 320 exp.ILike: no_ilike_sql, 321 exp.JSONExtract: rename_func("GET_JSON_OBJECT"), 322 exp.JSONExtractScalar: rename_func("GET_JSON_OBJECT"), 323 exp.JSONFormat: _json_format_sql, 324 exp.Map: var_map_sql, 325 exp.Max: max_or_greatest, 326 exp.Min: min_or_least, 327 exp.VarMap: var_map_sql, 328 exp.Create: create_with_partitions_sql, 329 exp.Quantile: rename_func("PERCENTILE"), 330 exp.ApproxQuantile: rename_func("PERCENTILE_APPROX"), 331 exp.RegexpLike: lambda self, e: self.binary(e, "RLIKE"), 332 exp.RegexpSplit: rename_func("SPLIT"), 333 exp.SafeDivide: no_safe_divide_sql, 334 exp.SchemaCommentProperty: lambda self, e: self.naked_property(e), 335 exp.SetAgg: rename_func("COLLECT_SET"), 336 exp.Split: lambda self, e: f"SPLIT({self.sql(e, 'this')}, CONCAT('\\\\Q', {self.sql(e, 'expression')}))", 337 exp.StrPosition: strposition_to_locate_sql, 338 exp.StrToDate: _str_to_date_sql, 339 exp.StrToTime: _str_to_time_sql, 340 exp.StrToUnix: _str_to_unix_sql, 341 exp.StructExtract: struct_extract_sql, 342 exp.TableFormatProperty: lambda self, e: f"USING {self.sql(e, 'this')}", 343 exp.TimeStrToDate: rename_func("TO_DATE"), 344 exp.TimeStrToTime: timestrtotime_sql, 345 exp.TimeStrToUnix: rename_func("UNIX_TIMESTAMP"), 346 exp.TimeToStr: _time_to_str, 347 exp.TimeToUnix: rename_func("UNIX_TIMESTAMP"), 348 exp.TsOrDiToDi: lambda self, e: f"CAST(SUBSTR(REPLACE(CAST({self.sql(e, 'this')} AS STRING), '-', ''), 1, 8) AS INT)", 349 exp.TsOrDsAdd: lambda self, e: f"DATE_ADD({self.sql(e, 'this')}, {self.sql(e, 'expression')})", 350 exp.TsOrDsToDate: _to_date_sql, 351 exp.TryCast: no_trycast_sql, 352 exp.UnixToStr: lambda self, e: self.func( 353 "FROM_UNIXTIME", e.this, _time_format(self, e) 354 ), 355 exp.UnixToTime: rename_func("FROM_UNIXTIME"), 356 exp.UnixToTimeStr: rename_func("FROM_UNIXTIME"), 357 exp.PartitionedByProperty: lambda self, e: f"PARTITIONED BY {self.sql(e, 'this')}", 358 exp.RowFormatSerdeProperty: lambda self, e: f"ROW FORMAT SERDE {self.sql(e, 'this')}", 359 exp.SerdeProperties: lambda self, e: self.properties(e, prefix="WITH SERDEPROPERTIES"), 360 exp.NumberToStr: rename_func("FORMAT_NUMBER"), 361 exp.LastDateOfMonth: rename_func("LAST_DAY"), 362 exp.National: lambda self, e: self.sql(e, "this"), 363 } 364 365 PROPERTIES_LOCATION = { 366 **generator.Generator.PROPERTIES_LOCATION, # type: ignore 367 exp.FileFormatProperty: exp.Properties.Location.POST_SCHEMA, 368 exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA, 369 exp.TableFormatProperty: exp.Properties.Location.POST_SCHEMA, 370 exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED, 371 } 372 373 def arrayagg_sql(self, expression: exp.ArrayAgg) -> str: 374 return self.func( 375 "COLLECT_LIST", 376 expression.this.this if isinstance(expression.this, exp.Order) else expression.this, 377 ) 378 379 def with_properties(self, properties: exp.Properties) -> str: 380 return self.properties( 381 properties, 382 prefix=self.seg("TBLPROPERTIES"), 383 ) 384 385 def datatype_sql(self, expression: exp.DataType) -> str: 386 if ( 387 expression.this in (exp.DataType.Type.VARCHAR, exp.DataType.Type.NVARCHAR) 388 and not expression.expressions 389 ): 390 expression = exp.DataType.build("text") 391 elif expression.this in exp.DataType.TEMPORAL_TYPES: 392 expression = exp.DataType.build(expression.this) 393 394 return super().datatype_sql(expression)
196 class Tokenizer(tokens.Tokenizer): 197 QUOTES = ["'", '"'] 198 IDENTIFIERS = ["`"] 199 STRING_ESCAPES = ["\\"] 200 ENCODE = "utf-8" 201 IDENTIFIER_CAN_START_WITH_DIGIT = True 202 203 KEYWORDS = { 204 **tokens.Tokenizer.KEYWORDS, 205 "ADD ARCHIVE": TokenType.COMMAND, 206 "ADD ARCHIVES": TokenType.COMMAND, 207 "ADD FILE": TokenType.COMMAND, 208 "ADD FILES": TokenType.COMMAND, 209 "ADD JAR": TokenType.COMMAND, 210 "ADD JARS": TokenType.COMMAND, 211 "MSCK REPAIR": TokenType.COMMAND, 212 "WITH SERDEPROPERTIES": TokenType.SERDE_PROPERTIES, 213 } 214 215 NUMERIC_LITERALS = { 216 "L": "BIGINT", 217 "S": "SMALLINT", 218 "Y": "TINYINT", 219 "D": "DOUBLE", 220 "F": "FLOAT", 221 "BD": "DECIMAL", 222 }
Inherited Members
224 class Parser(parser.Parser): 225 LOG_DEFAULTS_TO_LN = True 226 STRICT_CAST = False 227 228 FUNCTIONS = { 229 **parser.Parser.FUNCTIONS, # type: ignore 230 "APPROX_COUNT_DISTINCT": exp.ApproxDistinct.from_arg_list, 231 "COLLECT_LIST": exp.ArrayAgg.from_arg_list, 232 "DATE_ADD": lambda args: exp.TsOrDsAdd( 233 this=seq_get(args, 0), 234 expression=seq_get(args, 1), 235 unit=exp.Literal.string("DAY"), 236 ), 237 "DATEDIFF": lambda args: exp.DateDiff( 238 this=exp.TsOrDsToDate(this=seq_get(args, 0)), 239 expression=exp.TsOrDsToDate(this=seq_get(args, 1)), 240 ), 241 "DATE_SUB": lambda args: exp.TsOrDsAdd( 242 this=seq_get(args, 0), 243 expression=exp.Mul( 244 this=seq_get(args, 1), 245 expression=exp.Literal.number(-1), 246 ), 247 unit=exp.Literal.string("DAY"), 248 ), 249 "DATE_FORMAT": lambda args: format_time_lambda(exp.TimeToStr, "hive")( 250 [ 251 exp.TimeStrToTime(this=seq_get(args, 0)), 252 seq_get(args, 1), 253 ] 254 ), 255 "DAY": lambda args: exp.Day(this=exp.TsOrDsToDate(this=seq_get(args, 0))), 256 "FROM_UNIXTIME": format_time_lambda(exp.UnixToStr, "hive", True), 257 "GET_JSON_OBJECT": exp.JSONExtractScalar.from_arg_list, 258 "LOCATE": locate_to_strposition, 259 "MAP": parse_var_map, 260 "MONTH": lambda args: exp.Month(this=exp.TsOrDsToDate.from_arg_list(args)), 261 "PERCENTILE": exp.Quantile.from_arg_list, 262 "PERCENTILE_APPROX": exp.ApproxQuantile.from_arg_list, 263 "COLLECT_SET": exp.SetAgg.from_arg_list, 264 "SIZE": exp.ArraySize.from_arg_list, 265 "SPLIT": exp.RegexpSplit.from_arg_list, 266 "TO_DATE": format_time_lambda(exp.TsOrDsToDate, "hive"), 267 "TO_JSON": exp.JSONFormat.from_arg_list, 268 "UNIX_TIMESTAMP": format_time_lambda(exp.StrToUnix, "hive", True), 269 "YEAR": lambda args: exp.Year(this=exp.TsOrDsToDate.from_arg_list(args)), 270 } 271 272 PROPERTY_PARSERS = { 273 **parser.Parser.PROPERTY_PARSERS, # type: ignore 274 "WITH SERDEPROPERTIES": lambda self: exp.SerdeProperties( 275 expressions=self._parse_wrapped_csv(self._parse_property) 276 ), 277 }
Parser consumes a list of tokens produced by the sqlglot.tokens.Tokenizer
and produces
a parsed syntax tree.
Arguments:
- error_level: the desired error level. Default: ErrorLevel.RAISE
- error_message_context: determines the amount of context to capture from a query string when displaying the error message (in number of characters). Default: 50.
- index_offset: Index offset for arrays eg ARRAY[0] vs ARRAY[1] as the head of a list. Default: 0
- alias_post_tablesample: If the table alias comes after tablesample. Default: False
- max_errors: Maximum number of error messages to include in a raised ParseError. This is only relevant if error_level is ErrorLevel.RAISE. Default: 3
- null_ordering: Indicates the default null ordering method to use if not explicitly set. Options are "nulls_are_small", "nulls_are_large", "nulls_are_last". Default: "nulls_are_small"
Inherited Members
279 class Generator(generator.Generator): 280 LIMIT_FETCH = "LIMIT" 281 TABLESAMPLE_WITH_METHOD = False 282 TABLESAMPLE_SIZE_IS_PERCENT = True 283 JOIN_HINTS = False 284 TABLE_HINTS = False 285 286 TYPE_MAPPING = { 287 **generator.Generator.TYPE_MAPPING, # type: ignore 288 exp.DataType.Type.TEXT: "STRING", 289 exp.DataType.Type.DATETIME: "TIMESTAMP", 290 exp.DataType.Type.VARBINARY: "BINARY", 291 exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP", 292 exp.DataType.Type.BIT: "BOOLEAN", 293 } 294 295 TRANSFORMS = { 296 **generator.Generator.TRANSFORMS, # type: ignore 297 exp.Group: transforms.preprocess([transforms.unalias_group]), 298 exp.Select: transforms.preprocess( 299 [ 300 transforms.eliminate_qualify, 301 transforms.eliminate_distinct_on, 302 transforms.unnest_to_explode, 303 ] 304 ), 305 exp.Property: _property_sql, 306 exp.ApproxDistinct: approx_count_distinct_sql, 307 exp.ArrayConcat: rename_func("CONCAT"), 308 exp.ArraySize: rename_func("SIZE"), 309 exp.ArraySort: _array_sort_sql, 310 exp.With: no_recursive_cte_sql, 311 exp.DateAdd: _add_date_sql, 312 exp.DateDiff: _date_diff_sql, 313 exp.DateStrToDate: rename_func("TO_DATE"), 314 exp.DateSub: _add_date_sql, 315 exp.DateToDi: lambda self, e: f"CAST(DATE_FORMAT({self.sql(e, 'this')}, {Hive.dateint_format}) AS INT)", 316 exp.DiToDate: lambda self, e: f"TO_DATE(CAST({self.sql(e, 'this')} AS STRING), {Hive.dateint_format})", 317 exp.FileFormatProperty: lambda self, e: f"STORED AS {self.sql(e, 'this') if isinstance(e.this, exp.InputOutputFormat) else e.name.upper()}", 318 exp.If: if_sql, 319 exp.Index: _index_sql, 320 exp.ILike: no_ilike_sql, 321 exp.JSONExtract: rename_func("GET_JSON_OBJECT"), 322 exp.JSONExtractScalar: rename_func("GET_JSON_OBJECT"), 323 exp.JSONFormat: _json_format_sql, 324 exp.Map: var_map_sql, 325 exp.Max: max_or_greatest, 326 exp.Min: min_or_least, 327 exp.VarMap: var_map_sql, 328 exp.Create: create_with_partitions_sql, 329 exp.Quantile: rename_func("PERCENTILE"), 330 exp.ApproxQuantile: rename_func("PERCENTILE_APPROX"), 331 exp.RegexpLike: lambda self, e: self.binary(e, "RLIKE"), 332 exp.RegexpSplit: rename_func("SPLIT"), 333 exp.SafeDivide: no_safe_divide_sql, 334 exp.SchemaCommentProperty: lambda self, e: self.naked_property(e), 335 exp.SetAgg: rename_func("COLLECT_SET"), 336 exp.Split: lambda self, e: f"SPLIT({self.sql(e, 'this')}, CONCAT('\\\\Q', {self.sql(e, 'expression')}))", 337 exp.StrPosition: strposition_to_locate_sql, 338 exp.StrToDate: _str_to_date_sql, 339 exp.StrToTime: _str_to_time_sql, 340 exp.StrToUnix: _str_to_unix_sql, 341 exp.StructExtract: struct_extract_sql, 342 exp.TableFormatProperty: lambda self, e: f"USING {self.sql(e, 'this')}", 343 exp.TimeStrToDate: rename_func("TO_DATE"), 344 exp.TimeStrToTime: timestrtotime_sql, 345 exp.TimeStrToUnix: rename_func("UNIX_TIMESTAMP"), 346 exp.TimeToStr: _time_to_str, 347 exp.TimeToUnix: rename_func("UNIX_TIMESTAMP"), 348 exp.TsOrDiToDi: lambda self, e: f"CAST(SUBSTR(REPLACE(CAST({self.sql(e, 'this')} AS STRING), '-', ''), 1, 8) AS INT)", 349 exp.TsOrDsAdd: lambda self, e: f"DATE_ADD({self.sql(e, 'this')}, {self.sql(e, 'expression')})", 350 exp.TsOrDsToDate: _to_date_sql, 351 exp.TryCast: no_trycast_sql, 352 exp.UnixToStr: lambda self, e: self.func( 353 "FROM_UNIXTIME", e.this, _time_format(self, e) 354 ), 355 exp.UnixToTime: rename_func("FROM_UNIXTIME"), 356 exp.UnixToTimeStr: rename_func("FROM_UNIXTIME"), 357 exp.PartitionedByProperty: lambda self, e: f"PARTITIONED BY {self.sql(e, 'this')}", 358 exp.RowFormatSerdeProperty: lambda self, e: f"ROW FORMAT SERDE {self.sql(e, 'this')}", 359 exp.SerdeProperties: lambda self, e: self.properties(e, prefix="WITH SERDEPROPERTIES"), 360 exp.NumberToStr: rename_func("FORMAT_NUMBER"), 361 exp.LastDateOfMonth: rename_func("LAST_DAY"), 362 exp.National: lambda self, e: self.sql(e, "this"), 363 } 364 365 PROPERTIES_LOCATION = { 366 **generator.Generator.PROPERTIES_LOCATION, # type: ignore 367 exp.FileFormatProperty: exp.Properties.Location.POST_SCHEMA, 368 exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA, 369 exp.TableFormatProperty: exp.Properties.Location.POST_SCHEMA, 370 exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED, 371 } 372 373 def arrayagg_sql(self, expression: exp.ArrayAgg) -> str: 374 return self.func( 375 "COLLECT_LIST", 376 expression.this.this if isinstance(expression.this, exp.Order) else expression.this, 377 ) 378 379 def with_properties(self, properties: exp.Properties) -> str: 380 return self.properties( 381 properties, 382 prefix=self.seg("TBLPROPERTIES"), 383 ) 384 385 def datatype_sql(self, expression: exp.DataType) -> str: 386 if ( 387 expression.this in (exp.DataType.Type.VARCHAR, exp.DataType.Type.NVARCHAR) 388 and not expression.expressions 389 ): 390 expression = exp.DataType.build("text") 391 elif expression.this in exp.DataType.TEMPORAL_TYPES: 392 expression = exp.DataType.build(expression.this) 393 394 return super().datatype_sql(expression)
Generator interprets the given syntax tree and produces a SQL string as an output.
Arguments:
- time_mapping (dict): the dictionary of custom time mappings in which the key represents a python time format and the output the target time format
- time_trie (trie): a trie of the time_mapping keys
- pretty (bool): if set to True the returned string will be formatted. Default: False.
- quote_start (str): specifies which starting character to use to delimit quotes. Default: '.
- quote_end (str): specifies which ending character to use to delimit quotes. Default: '.
- identifier_start (str): specifies which starting character to use to delimit identifiers. Default: ".
- identifier_end (str): specifies which ending character to use to delimit identifiers. Default: ".
- identify (bool | str): 'always': always quote, 'safe': quote identifiers if they don't contain an upcase, True defaults to always.
- normalize (bool): if set to True all identifiers will lower cased
- string_escape (str): specifies a string escape character. Default: '.
- identifier_escape (str): specifies an identifier escape character. Default: ".
- pad (int): determines padding in a formatted string. Default: 2.
- indent (int): determines the size of indentation in a formatted string. Default: 4.
- unnest_column_only (bool): if true unnest table aliases are considered only as column aliases
- normalize_functions (str): normalize function names, "upper", "lower", or None Default: "upper"
- alias_post_tablesample (bool): if the table alias comes after tablesample Default: False
- unsupported_level (ErrorLevel): determines the generator's behavior when it encounters unsupported expressions. Default ErrorLevel.WARN.
- null_ordering (str): Indicates the default null ordering method to use if not explicitly set. Options are "nulls_are_small", "nulls_are_large", "nulls_are_last". Default: "nulls_are_small"
- max_unsupported (int): Maximum number of unsupported messages to include in a raised UnsupportedError. This is only relevant if unsupported_level is ErrorLevel.RAISE. Default: 3
- leading_comma (bool): if the the comma is leading or trailing in select statements Default: False
- max_text_width: The max number of characters in a segment before creating new lines in pretty mode. The default is on the smaller end because the length only represents a segment and not the true line length. Default: 80
- comments: Whether or not to preserve comments in the output SQL code. Default: True
385 def datatype_sql(self, expression: exp.DataType) -> str: 386 if ( 387 expression.this in (exp.DataType.Type.VARCHAR, exp.DataType.Type.NVARCHAR) 388 and not expression.expressions 389 ): 390 expression = exp.DataType.build("text") 391 elif expression.this in exp.DataType.TEMPORAL_TYPES: 392 expression = exp.DataType.build(expression.this) 393 394 return super().datatype_sql(expression)
Inherited Members
- sqlglot.generator.Generator
- Generator
- generate
- unsupported
- sep
- seg
- pad_comment
- maybe_comment
- wrap
- no_identify
- normalize_func
- indent
- sql
- uncache_sql
- cache_sql
- characterset_sql
- column_sql
- columnposition_sql
- columndef_sql
- columnconstraint_sql
- autoincrementcolumnconstraint_sql
- compresscolumnconstraint_sql
- generatedasidentitycolumnconstraint_sql
- notnullcolumnconstraint_sql
- primarykeycolumnconstraint_sql
- uniquecolumnconstraint_sql
- create_sql
- describe_sql
- prepend_ctes
- with_sql
- cte_sql
- tablealias_sql
- bitstring_sql
- hexstring_sql
- bytestring_sql
- directory_sql
- delete_sql
- drop_sql
- except_sql
- except_op
- fetch_sql
- filter_sql
- hint_sql
- index_sql
- identifier_sql
- inputoutputformat_sql
- national_sql
- partition_sql
- properties_sql
- root_properties
- properties
- locate_properties
- property_sql
- likeproperty_sql
- fallbackproperty_sql
- journalproperty_sql
- freespaceproperty_sql
- afterjournalproperty_sql
- checksumproperty_sql
- mergeblockratioproperty_sql
- datablocksizeproperty_sql
- blockcompressionproperty_sql
- isolatedloadingproperty_sql
- lockingproperty_sql
- withdataproperty_sql
- insert_sql
- intersect_sql
- intersect_op
- introducer_sql
- pseudotype_sql
- onconflict_sql
- returning_sql
- rowformatdelimitedproperty_sql
- table_sql
- tablesample_sql
- pivot_sql
- tuple_sql
- update_sql
- values_sql
- var_sql
- into_sql
- from_sql
- group_sql
- having_sql
- join_sql
- lambda_sql
- lateral_sql
- limit_sql
- offset_sql
- setitem_sql
- set_sql
- pragma_sql
- lock_sql
- literal_sql
- loaddata_sql
- null_sql
- boolean_sql
- order_sql
- cluster_sql
- distribute_sql
- sort_sql
- ordered_sql
- matchrecognize_sql
- query_modifiers
- select_sql
- schema_sql
- star_sql
- structkwarg_sql
- parameter_sql
- sessionparameter_sql
- placeholder_sql
- subquery_sql
- qualify_sql
- union_sql
- union_op
- unnest_sql
- where_sql
- window_sql
- partition_by_sql
- windowspec_sql
- withingroup_sql
- between_sql
- bracket_sql
- all_sql
- any_sql
- exists_sql
- case_sql
- constraint_sql
- nextvaluefor_sql
- extract_sql
- trim_sql
- concat_sql
- check_sql
- foreignkey_sql
- primarykey_sql
- unique_sql
- if_sql
- matchagainst_sql
- jsonkeyvalue_sql
- jsonobject_sql
- in_sql
- in_unnest_op
- interval_sql
- return_sql
- reference_sql
- anonymous_sql
- paren_sql
- neg_sql
- not_sql
- alias_sql
- aliases_sql
- attimezone_sql
- add_sql
- and_sql
- connector_sql
- bitwiseand_sql
- bitwiseleftshift_sql
- bitwisenot_sql
- bitwiseor_sql
- bitwiserightshift_sql
- bitwisexor_sql
- cast_sql
- currentdate_sql
- collate_sql
- command_sql
- comment_sql
- transaction_sql
- commit_sql
- rollback_sql
- altercolumn_sql
- renametable_sql
- altertable_sql
- droppartition_sql
- addconstraint_sql
- distinct_sql
- ignorenulls_sql
- respectnulls_sql
- intdiv_sql
- dpipe_sql
- div_sql
- overlaps_sql
- distance_sql
- dot_sql
- eq_sql
- escape_sql
- glob_sql
- gt_sql
- gte_sql
- ilike_sql
- ilikeany_sql
- is_sql
- like_sql
- likeany_sql
- similarto_sql
- lt_sql
- lte_sql
- mod_sql
- mul_sql
- neq_sql
- nullsafeeq_sql
- nullsafeneq_sql
- or_sql
- slice_sql
- sub_sql
- trycast_sql
- use_sql
- binary
- function_fallback_sql
- func
- format_args
- text_width
- format_time
- expressions
- op_expressions
- naked_property
- set_operation
- tag_sql
- token_sql
- userdefinedfunction_sql
- joinhint_sql
- kwarg_sql
- when_sql
- merge_sql
- tochar_sql