sqlglot.dialects.snowflake
1from __future__ import annotations 2 3import typing as t 4 5from sqlglot import exp, generator, parser, tokens, transforms 6from sqlglot.dialects.dialect import ( 7 Dialect, 8 date_trunc_to_time, 9 datestrtodate_sql, 10 format_time_lambda, 11 inline_array_sql, 12 max_or_greatest, 13 min_or_least, 14 rename_func, 15 timestamptrunc_sql, 16 timestrtotime_sql, 17 ts_or_ds_to_date_sql, 18 var_map_sql, 19) 20from sqlglot.expressions import Literal 21from sqlglot.helper import flatten, seq_get 22from sqlglot.parser import binary_range_parser 23from sqlglot.tokens import TokenType 24 25 26def _check_int(s: str) -> bool: 27 if s[0] in ("-", "+"): 28 return s[1:].isdigit() 29 return s.isdigit() 30 31 32# from https://docs.snowflake.com/en/sql-reference/functions/to_timestamp.html 33def _snowflake_to_timestamp(args: t.Sequence) -> t.Union[exp.StrToTime, exp.UnixToTime]: 34 if len(args) == 2: 35 first_arg, second_arg = args 36 if second_arg.is_string: 37 # case: <string_expr> [ , <format> ] 38 return format_time_lambda(exp.StrToTime, "snowflake")(args) 39 40 # case: <numeric_expr> [ , <scale> ] 41 if second_arg.name not in ["0", "3", "9"]: 42 raise ValueError( 43 f"Scale for snowflake numeric timestamp is {second_arg}, but should be 0, 3, or 9" 44 ) 45 46 if second_arg.name == "0": 47 timescale = exp.UnixToTime.SECONDS 48 elif second_arg.name == "3": 49 timescale = exp.UnixToTime.MILLIS 50 elif second_arg.name == "9": 51 timescale = exp.UnixToTime.MICROS 52 53 return exp.UnixToTime(this=first_arg, scale=timescale) 54 55 first_arg = seq_get(args, 0) 56 if not isinstance(first_arg, Literal): 57 # case: <variant_expr> 58 return format_time_lambda(exp.StrToTime, "snowflake", default=True)(args) 59 60 if first_arg.is_string: 61 if _check_int(first_arg.this): 62 # case: <integer> 63 return exp.UnixToTime.from_arg_list(args) 64 65 # case: <date_expr> 66 return format_time_lambda(exp.StrToTime, "snowflake", default=True)(args) 67 68 # case: <numeric_expr> 69 return exp.UnixToTime.from_arg_list(args) 70 71 72def _unix_to_time_sql(self: generator.Generator, expression: exp.UnixToTime) -> str: 73 scale = expression.args.get("scale") 74 timestamp = self.sql(expression, "this") 75 if scale in [None, exp.UnixToTime.SECONDS]: 76 return f"TO_TIMESTAMP({timestamp})" 77 if scale == exp.UnixToTime.MILLIS: 78 return f"TO_TIMESTAMP({timestamp}, 3)" 79 if scale == exp.UnixToTime.MICROS: 80 return f"TO_TIMESTAMP({timestamp}, 9)" 81 82 raise ValueError("Improper scale for timestamp") 83 84 85# https://docs.snowflake.com/en/sql-reference/functions/date_part.html 86# https://docs.snowflake.com/en/sql-reference/functions-date-time.html#label-supported-date-time-parts 87def _parse_date_part(self: parser.Parser) -> t.Optional[exp.Expression]: 88 this = self._parse_var() or self._parse_type() 89 90 if not this: 91 return None 92 93 self._match(TokenType.COMMA) 94 expression = self._parse_bitwise() 95 96 name = this.name.upper() 97 if name.startswith("EPOCH"): 98 if name.startswith("EPOCH_MILLISECOND"): 99 scale = 10**3 100 elif name.startswith("EPOCH_MICROSECOND"): 101 scale = 10**6 102 elif name.startswith("EPOCH_NANOSECOND"): 103 scale = 10**9 104 else: 105 scale = None 106 107 ts = self.expression(exp.Cast, this=expression, to=exp.DataType.build("TIMESTAMP")) 108 to_unix: exp.Expression = self.expression(exp.TimeToUnix, this=ts) 109 110 if scale: 111 to_unix = exp.Mul(this=to_unix, expression=exp.Literal.number(scale)) 112 113 return to_unix 114 115 return self.expression(exp.Extract, this=this, expression=expression) 116 117 118# https://docs.snowflake.com/en/sql-reference/functions/div0 119def _div0_to_if(args: t.Sequence) -> exp.Expression: 120 cond = exp.EQ(this=seq_get(args, 1), expression=exp.Literal.number(0)) 121 true = exp.Literal.number(0) 122 false = exp.Div(this=seq_get(args, 0), expression=seq_get(args, 1)) 123 return exp.If(this=cond, true=true, false=false) 124 125 126# https://docs.snowflake.com/en/sql-reference/functions/zeroifnull 127def _zeroifnull_to_if(args: t.Sequence) -> exp.Expression: 128 cond = exp.Is(this=seq_get(args, 0), expression=exp.Null()) 129 return exp.If(this=cond, true=exp.Literal.number(0), false=seq_get(args, 0)) 130 131 132# https://docs.snowflake.com/en/sql-reference/functions/zeroifnull 133def _nullifzero_to_if(args: t.Sequence) -> exp.Expression: 134 cond = exp.EQ(this=seq_get(args, 0), expression=exp.Literal.number(0)) 135 return exp.If(this=cond, true=exp.Null(), false=seq_get(args, 0)) 136 137 138def _datatype_sql(self: generator.Generator, expression: exp.DataType) -> str: 139 if expression.this == exp.DataType.Type.ARRAY: 140 return "ARRAY" 141 elif expression.this == exp.DataType.Type.MAP: 142 return "OBJECT" 143 return self.datatype_sql(expression) 144 145 146class Snowflake(Dialect): 147 null_ordering = "nulls_are_large" 148 time_format = "'yyyy-mm-dd hh24:mi:ss'" 149 150 time_mapping = { 151 "YYYY": "%Y", 152 "yyyy": "%Y", 153 "YY": "%y", 154 "yy": "%y", 155 "MMMM": "%B", 156 "mmmm": "%B", 157 "MON": "%b", 158 "mon": "%b", 159 "MM": "%m", 160 "mm": "%m", 161 "DD": "%d", 162 "dd": "%-d", 163 "DY": "%a", 164 "dy": "%w", 165 "HH24": "%H", 166 "hh24": "%H", 167 "HH12": "%I", 168 "hh12": "%I", 169 "MI": "%M", 170 "mi": "%M", 171 "SS": "%S", 172 "ss": "%S", 173 "FF": "%f", 174 "ff": "%f", 175 "FF6": "%f", 176 "ff6": "%f", 177 } 178 179 class Parser(parser.Parser): 180 QUOTED_PIVOT_COLUMNS = True 181 182 FUNCTIONS = { 183 **parser.Parser.FUNCTIONS, 184 "ARRAYAGG": exp.ArrayAgg.from_arg_list, 185 "ARRAY_CONSTRUCT": exp.Array.from_arg_list, 186 "ARRAY_TO_STRING": exp.ArrayJoin.from_arg_list, 187 "CONVERT_TIMEZONE": lambda args: exp.AtTimeZone( 188 this=seq_get(args, 1), 189 zone=seq_get(args, 0), 190 ), 191 "DATE_TRUNC": date_trunc_to_time, 192 "DATEADD": lambda args: exp.DateAdd( 193 this=seq_get(args, 2), 194 expression=seq_get(args, 1), 195 unit=seq_get(args, 0), 196 ), 197 "DATEDIFF": lambda args: exp.DateDiff( 198 this=seq_get(args, 2), 199 expression=seq_get(args, 1), 200 unit=seq_get(args, 0), 201 ), 202 "DIV0": _div0_to_if, 203 "IFF": exp.If.from_arg_list, 204 "NULLIFZERO": _nullifzero_to_if, 205 "OBJECT_CONSTRUCT": parser.parse_var_map, 206 "RLIKE": exp.RegexpLike.from_arg_list, 207 "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)), 208 "TO_ARRAY": exp.Array.from_arg_list, 209 "TO_VARCHAR": exp.ToChar.from_arg_list, 210 "TO_TIMESTAMP": _snowflake_to_timestamp, 211 "ZEROIFNULL": _zeroifnull_to_if, 212 } 213 214 FUNCTION_PARSERS = { 215 **parser.Parser.FUNCTION_PARSERS, 216 "DATE_PART": _parse_date_part, 217 } 218 FUNCTION_PARSERS.pop("TRIM") 219 220 FUNC_TOKENS = { 221 *parser.Parser.FUNC_TOKENS, 222 TokenType.RLIKE, 223 TokenType.TABLE, 224 } 225 226 COLUMN_OPERATORS = { 227 **parser.Parser.COLUMN_OPERATORS, # type: ignore 228 TokenType.COLON: lambda self, this, path: self.expression( 229 exp.Bracket, 230 this=this, 231 expressions=[path], 232 ), 233 } 234 235 RANGE_PARSERS = { 236 **parser.Parser.RANGE_PARSERS, # type: ignore 237 TokenType.LIKE_ANY: binary_range_parser(exp.LikeAny), 238 TokenType.ILIKE_ANY: binary_range_parser(exp.ILikeAny), 239 } 240 241 ALTER_PARSERS = { 242 **parser.Parser.ALTER_PARSERS, # type: ignore 243 "UNSET": lambda self: self._parse_alter_table_set_tag(unset=True), 244 "SET": lambda self: self._parse_alter_table_set_tag(), 245 } 246 247 def _parse_alter_table_set_tag(self, unset: bool = False) -> exp.Expression: 248 self._match_text_seq("TAG") 249 parser = t.cast(t.Callable, self._parse_id_var if unset else self._parse_conjunction) 250 return self.expression(exp.SetTag, expressions=self._parse_csv(parser), unset=unset) 251 252 class Tokenizer(tokens.Tokenizer): 253 QUOTES = ["'", "$$"] 254 STRING_ESCAPES = ["\\", "'"] 255 256 KEYWORDS = { 257 **tokens.Tokenizer.KEYWORDS, 258 "EXCLUDE": TokenType.EXCEPT, 259 "ILIKE ANY": TokenType.ILIKE_ANY, 260 "LIKE ANY": TokenType.LIKE_ANY, 261 "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE, 262 "PUT": TokenType.COMMAND, 263 "RENAME": TokenType.REPLACE, 264 "TIMESTAMP_LTZ": TokenType.TIMESTAMPLTZ, 265 "TIMESTAMP_NTZ": TokenType.TIMESTAMP, 266 "TIMESTAMP_TZ": TokenType.TIMESTAMPTZ, 267 "TIMESTAMPNTZ": TokenType.TIMESTAMP, 268 "MINUS": TokenType.EXCEPT, 269 "SAMPLE": TokenType.TABLE_SAMPLE, 270 } 271 272 SINGLE_TOKENS = { 273 **tokens.Tokenizer.SINGLE_TOKENS, 274 "$": TokenType.PARAMETER, 275 } 276 277 VAR_SINGLE_TOKENS = {"$"} 278 279 class Generator(generator.Generator): 280 PARAMETER_TOKEN = "$" 281 MATCHED_BY_SOURCE = False 282 SINGLE_STRING_INTERVAL = True 283 JOIN_HINTS = False 284 TABLE_HINTS = False 285 286 TRANSFORMS = { 287 **generator.Generator.TRANSFORMS, # type: ignore 288 exp.Array: inline_array_sql, 289 exp.ArrayConcat: rename_func("ARRAY_CAT"), 290 exp.ArrayJoin: rename_func("ARRAY_TO_STRING"), 291 exp.AtTimeZone: lambda self, e: self.func( 292 "CONVERT_TIMEZONE", e.args.get("zone"), e.this 293 ), 294 exp.DateAdd: lambda self, e: self.func("DATEADD", e.text("unit"), e.expression, e.this), 295 exp.DateDiff: lambda self, e: self.func( 296 "DATEDIFF", e.text("unit"), e.expression, e.this 297 ), 298 exp.DateStrToDate: datestrtodate_sql, 299 exp.DataType: _datatype_sql, 300 exp.DayOfWeek: rename_func("DAYOFWEEK"), 301 exp.If: rename_func("IFF"), 302 exp.LogicalAnd: rename_func("BOOLAND_AGG"), 303 exp.LogicalOr: rename_func("BOOLOR_AGG"), 304 exp.Map: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"), 305 exp.Max: max_or_greatest, 306 exp.Min: min_or_least, 307 exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}", 308 exp.Select: transforms.preprocess([transforms.eliminate_distinct_on]), 309 exp.StarMap: rename_func("OBJECT_CONSTRUCT"), 310 exp.StrPosition: lambda self, e: self.func( 311 "POSITION", e.args.get("substr"), e.this, e.args.get("position") 312 ), 313 exp.StrToTime: lambda self, e: f"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})", 314 exp.TimeStrToTime: timestrtotime_sql, 315 exp.TimeToUnix: lambda self, e: f"EXTRACT(epoch_second FROM {self.sql(e, 'this')})", 316 exp.TimeToStr: lambda self, e: self.func( 317 "TO_CHAR", exp.cast(e.this, "timestamp"), self.format_time(e) 318 ), 319 exp.TimestampTrunc: timestamptrunc_sql, 320 exp.ToChar: lambda self, e: self.function_fallback_sql(e), 321 exp.Trim: lambda self, e: self.func("TRIM", e.this, e.expression), 322 exp.TsOrDsToDate: ts_or_ds_to_date_sql("snowflake"), 323 exp.UnixToTime: _unix_to_time_sql, 324 exp.VarMap: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"), 325 } 326 327 TYPE_MAPPING = { 328 **generator.Generator.TYPE_MAPPING, # type: ignore 329 exp.DataType.Type.TIMESTAMP: "TIMESTAMPNTZ", 330 } 331 332 STAR_MAPPING = { 333 "except": "EXCLUDE", 334 "replace": "RENAME", 335 } 336 337 PROPERTIES_LOCATION = { 338 **generator.Generator.PROPERTIES_LOCATION, # type: ignore 339 exp.SetProperty: exp.Properties.Location.UNSUPPORTED, 340 exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED, 341 } 342 343 def except_op(self, expression: exp.Except) -> str: 344 if not expression.args.get("distinct", False): 345 self.unsupported("EXCEPT with All is not supported in Snowflake") 346 return super().except_op(expression) 347 348 def intersect_op(self, expression: exp.Intersect) -> str: 349 if not expression.args.get("distinct", False): 350 self.unsupported("INTERSECT with All is not supported in Snowflake") 351 return super().intersect_op(expression) 352 353 def values_sql(self, expression: exp.Values) -> str: 354 """Due to a bug in Snowflake we want to make sure that all columns in a VALUES table alias are unquoted. 355 356 We also want to make sure that after we find matches where we need to unquote a column that we prevent users 357 from adding quotes to the column by using the `identify` argument when generating the SQL. 358 """ 359 alias = expression.args.get("alias") 360 if alias and alias.args.get("columns"): 361 expression = expression.transform( 362 lambda node: exp.Identifier(**{**node.args, "quoted": False}) 363 if isinstance(node, exp.Identifier) 364 and isinstance(node.parent, exp.TableAlias) 365 and node.arg_key == "columns" 366 else node, 367 ) 368 return self.no_identify(lambda: super(self.__class__, self).values_sql(expression)) 369 return super().values_sql(expression) 370 371 def settag_sql(self, expression: exp.SetTag) -> str: 372 action = "UNSET" if expression.args.get("unset") else "SET" 373 return f"{action} TAG {self.expressions(expression)}" 374 375 def select_sql(self, expression: exp.Select) -> str: 376 """Due to a bug in Snowflake we want to make sure that all columns in a VALUES table alias are unquoted and also 377 that all columns in a SELECT are unquoted. We also want to make sure that after we find matches where we need 378 to unquote a column that we prevent users from adding quotes to the column by using the `identify` argument when 379 generating the SQL. 380 381 Note: We make an assumption that any columns referenced in a VALUES expression should be unquoted throughout the 382 expression. This might not be true in a case where the same column name can be sourced from another table that can 383 properly quote but should be true in most cases. 384 """ 385 values_identifiers = set( 386 flatten( 387 (v.args.get("alias") or exp.Alias()).args.get("columns", []) 388 for v in expression.find_all(exp.Values) 389 ) 390 ) 391 if values_identifiers: 392 expression = expression.transform( 393 lambda node: exp.Identifier(**{**node.args, "quoted": False}) 394 if isinstance(node, exp.Identifier) and node in values_identifiers 395 else node, 396 ) 397 return self.no_identify(lambda: super(self.__class__, self).select_sql(expression)) 398 return super().select_sql(expression) 399 400 def describe_sql(self, expression: exp.Describe) -> str: 401 # Default to table if kind is unknown 402 kind_value = expression.args.get("kind") or "TABLE" 403 kind = f" {kind_value}" if kind_value else "" 404 this = f" {self.sql(expression, 'this')}" 405 return f"DESCRIBE{kind}{this}" 406 407 def generatedasidentitycolumnconstraint_sql( 408 self, expression: exp.GeneratedAsIdentityColumnConstraint 409 ) -> str: 410 start = expression.args.get("start") 411 start = f" START {start}" if start else "" 412 increment = expression.args.get("increment") 413 increment = f" INCREMENT {increment}" if increment else "" 414 return f"AUTOINCREMENT{start}{increment}"
147class Snowflake(Dialect): 148 null_ordering = "nulls_are_large" 149 time_format = "'yyyy-mm-dd hh24:mi:ss'" 150 151 time_mapping = { 152 "YYYY": "%Y", 153 "yyyy": "%Y", 154 "YY": "%y", 155 "yy": "%y", 156 "MMMM": "%B", 157 "mmmm": "%B", 158 "MON": "%b", 159 "mon": "%b", 160 "MM": "%m", 161 "mm": "%m", 162 "DD": "%d", 163 "dd": "%-d", 164 "DY": "%a", 165 "dy": "%w", 166 "HH24": "%H", 167 "hh24": "%H", 168 "HH12": "%I", 169 "hh12": "%I", 170 "MI": "%M", 171 "mi": "%M", 172 "SS": "%S", 173 "ss": "%S", 174 "FF": "%f", 175 "ff": "%f", 176 "FF6": "%f", 177 "ff6": "%f", 178 } 179 180 class Parser(parser.Parser): 181 QUOTED_PIVOT_COLUMNS = True 182 183 FUNCTIONS = { 184 **parser.Parser.FUNCTIONS, 185 "ARRAYAGG": exp.ArrayAgg.from_arg_list, 186 "ARRAY_CONSTRUCT": exp.Array.from_arg_list, 187 "ARRAY_TO_STRING": exp.ArrayJoin.from_arg_list, 188 "CONVERT_TIMEZONE": lambda args: exp.AtTimeZone( 189 this=seq_get(args, 1), 190 zone=seq_get(args, 0), 191 ), 192 "DATE_TRUNC": date_trunc_to_time, 193 "DATEADD": lambda args: exp.DateAdd( 194 this=seq_get(args, 2), 195 expression=seq_get(args, 1), 196 unit=seq_get(args, 0), 197 ), 198 "DATEDIFF": lambda args: exp.DateDiff( 199 this=seq_get(args, 2), 200 expression=seq_get(args, 1), 201 unit=seq_get(args, 0), 202 ), 203 "DIV0": _div0_to_if, 204 "IFF": exp.If.from_arg_list, 205 "NULLIFZERO": _nullifzero_to_if, 206 "OBJECT_CONSTRUCT": parser.parse_var_map, 207 "RLIKE": exp.RegexpLike.from_arg_list, 208 "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)), 209 "TO_ARRAY": exp.Array.from_arg_list, 210 "TO_VARCHAR": exp.ToChar.from_arg_list, 211 "TO_TIMESTAMP": _snowflake_to_timestamp, 212 "ZEROIFNULL": _zeroifnull_to_if, 213 } 214 215 FUNCTION_PARSERS = { 216 **parser.Parser.FUNCTION_PARSERS, 217 "DATE_PART": _parse_date_part, 218 } 219 FUNCTION_PARSERS.pop("TRIM") 220 221 FUNC_TOKENS = { 222 *parser.Parser.FUNC_TOKENS, 223 TokenType.RLIKE, 224 TokenType.TABLE, 225 } 226 227 COLUMN_OPERATORS = { 228 **parser.Parser.COLUMN_OPERATORS, # type: ignore 229 TokenType.COLON: lambda self, this, path: self.expression( 230 exp.Bracket, 231 this=this, 232 expressions=[path], 233 ), 234 } 235 236 RANGE_PARSERS = { 237 **parser.Parser.RANGE_PARSERS, # type: ignore 238 TokenType.LIKE_ANY: binary_range_parser(exp.LikeAny), 239 TokenType.ILIKE_ANY: binary_range_parser(exp.ILikeAny), 240 } 241 242 ALTER_PARSERS = { 243 **parser.Parser.ALTER_PARSERS, # type: ignore 244 "UNSET": lambda self: self._parse_alter_table_set_tag(unset=True), 245 "SET": lambda self: self._parse_alter_table_set_tag(), 246 } 247 248 def _parse_alter_table_set_tag(self, unset: bool = False) -> exp.Expression: 249 self._match_text_seq("TAG") 250 parser = t.cast(t.Callable, self._parse_id_var if unset else self._parse_conjunction) 251 return self.expression(exp.SetTag, expressions=self._parse_csv(parser), unset=unset) 252 253 class Tokenizer(tokens.Tokenizer): 254 QUOTES = ["'", "$$"] 255 STRING_ESCAPES = ["\\", "'"] 256 257 KEYWORDS = { 258 **tokens.Tokenizer.KEYWORDS, 259 "EXCLUDE": TokenType.EXCEPT, 260 "ILIKE ANY": TokenType.ILIKE_ANY, 261 "LIKE ANY": TokenType.LIKE_ANY, 262 "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE, 263 "PUT": TokenType.COMMAND, 264 "RENAME": TokenType.REPLACE, 265 "TIMESTAMP_LTZ": TokenType.TIMESTAMPLTZ, 266 "TIMESTAMP_NTZ": TokenType.TIMESTAMP, 267 "TIMESTAMP_TZ": TokenType.TIMESTAMPTZ, 268 "TIMESTAMPNTZ": TokenType.TIMESTAMP, 269 "MINUS": TokenType.EXCEPT, 270 "SAMPLE": TokenType.TABLE_SAMPLE, 271 } 272 273 SINGLE_TOKENS = { 274 **tokens.Tokenizer.SINGLE_TOKENS, 275 "$": TokenType.PARAMETER, 276 } 277 278 VAR_SINGLE_TOKENS = {"$"} 279 280 class Generator(generator.Generator): 281 PARAMETER_TOKEN = "$" 282 MATCHED_BY_SOURCE = False 283 SINGLE_STRING_INTERVAL = True 284 JOIN_HINTS = False 285 TABLE_HINTS = False 286 287 TRANSFORMS = { 288 **generator.Generator.TRANSFORMS, # type: ignore 289 exp.Array: inline_array_sql, 290 exp.ArrayConcat: rename_func("ARRAY_CAT"), 291 exp.ArrayJoin: rename_func("ARRAY_TO_STRING"), 292 exp.AtTimeZone: lambda self, e: self.func( 293 "CONVERT_TIMEZONE", e.args.get("zone"), e.this 294 ), 295 exp.DateAdd: lambda self, e: self.func("DATEADD", e.text("unit"), e.expression, e.this), 296 exp.DateDiff: lambda self, e: self.func( 297 "DATEDIFF", e.text("unit"), e.expression, e.this 298 ), 299 exp.DateStrToDate: datestrtodate_sql, 300 exp.DataType: _datatype_sql, 301 exp.DayOfWeek: rename_func("DAYOFWEEK"), 302 exp.If: rename_func("IFF"), 303 exp.LogicalAnd: rename_func("BOOLAND_AGG"), 304 exp.LogicalOr: rename_func("BOOLOR_AGG"), 305 exp.Map: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"), 306 exp.Max: max_or_greatest, 307 exp.Min: min_or_least, 308 exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}", 309 exp.Select: transforms.preprocess([transforms.eliminate_distinct_on]), 310 exp.StarMap: rename_func("OBJECT_CONSTRUCT"), 311 exp.StrPosition: lambda self, e: self.func( 312 "POSITION", e.args.get("substr"), e.this, e.args.get("position") 313 ), 314 exp.StrToTime: lambda self, e: f"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})", 315 exp.TimeStrToTime: timestrtotime_sql, 316 exp.TimeToUnix: lambda self, e: f"EXTRACT(epoch_second FROM {self.sql(e, 'this')})", 317 exp.TimeToStr: lambda self, e: self.func( 318 "TO_CHAR", exp.cast(e.this, "timestamp"), self.format_time(e) 319 ), 320 exp.TimestampTrunc: timestamptrunc_sql, 321 exp.ToChar: lambda self, e: self.function_fallback_sql(e), 322 exp.Trim: lambda self, e: self.func("TRIM", e.this, e.expression), 323 exp.TsOrDsToDate: ts_or_ds_to_date_sql("snowflake"), 324 exp.UnixToTime: _unix_to_time_sql, 325 exp.VarMap: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"), 326 } 327 328 TYPE_MAPPING = { 329 **generator.Generator.TYPE_MAPPING, # type: ignore 330 exp.DataType.Type.TIMESTAMP: "TIMESTAMPNTZ", 331 } 332 333 STAR_MAPPING = { 334 "except": "EXCLUDE", 335 "replace": "RENAME", 336 } 337 338 PROPERTIES_LOCATION = { 339 **generator.Generator.PROPERTIES_LOCATION, # type: ignore 340 exp.SetProperty: exp.Properties.Location.UNSUPPORTED, 341 exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED, 342 } 343 344 def except_op(self, expression: exp.Except) -> str: 345 if not expression.args.get("distinct", False): 346 self.unsupported("EXCEPT with All is not supported in Snowflake") 347 return super().except_op(expression) 348 349 def intersect_op(self, expression: exp.Intersect) -> str: 350 if not expression.args.get("distinct", False): 351 self.unsupported("INTERSECT with All is not supported in Snowflake") 352 return super().intersect_op(expression) 353 354 def values_sql(self, expression: exp.Values) -> str: 355 """Due to a bug in Snowflake we want to make sure that all columns in a VALUES table alias are unquoted. 356 357 We also want to make sure that after we find matches where we need to unquote a column that we prevent users 358 from adding quotes to the column by using the `identify` argument when generating the SQL. 359 """ 360 alias = expression.args.get("alias") 361 if alias and alias.args.get("columns"): 362 expression = expression.transform( 363 lambda node: exp.Identifier(**{**node.args, "quoted": False}) 364 if isinstance(node, exp.Identifier) 365 and isinstance(node.parent, exp.TableAlias) 366 and node.arg_key == "columns" 367 else node, 368 ) 369 return self.no_identify(lambda: super(self.__class__, self).values_sql(expression)) 370 return super().values_sql(expression) 371 372 def settag_sql(self, expression: exp.SetTag) -> str: 373 action = "UNSET" if expression.args.get("unset") else "SET" 374 return f"{action} TAG {self.expressions(expression)}" 375 376 def select_sql(self, expression: exp.Select) -> str: 377 """Due to a bug in Snowflake we want to make sure that all columns in a VALUES table alias are unquoted and also 378 that all columns in a SELECT are unquoted. We also want to make sure that after we find matches where we need 379 to unquote a column that we prevent users from adding quotes to the column by using the `identify` argument when 380 generating the SQL. 381 382 Note: We make an assumption that any columns referenced in a VALUES expression should be unquoted throughout the 383 expression. This might not be true in a case where the same column name can be sourced from another table that can 384 properly quote but should be true in most cases. 385 """ 386 values_identifiers = set( 387 flatten( 388 (v.args.get("alias") or exp.Alias()).args.get("columns", []) 389 for v in expression.find_all(exp.Values) 390 ) 391 ) 392 if values_identifiers: 393 expression = expression.transform( 394 lambda node: exp.Identifier(**{**node.args, "quoted": False}) 395 if isinstance(node, exp.Identifier) and node in values_identifiers 396 else node, 397 ) 398 return self.no_identify(lambda: super(self.__class__, self).select_sql(expression)) 399 return super().select_sql(expression) 400 401 def describe_sql(self, expression: exp.Describe) -> str: 402 # Default to table if kind is unknown 403 kind_value = expression.args.get("kind") or "TABLE" 404 kind = f" {kind_value}" if kind_value else "" 405 this = f" {self.sql(expression, 'this')}" 406 return f"DESCRIBE{kind}{this}" 407 408 def generatedasidentitycolumnconstraint_sql( 409 self, expression: exp.GeneratedAsIdentityColumnConstraint 410 ) -> str: 411 start = expression.args.get("start") 412 start = f" START {start}" if start else "" 413 increment = expression.args.get("increment") 414 increment = f" INCREMENT {increment}" if increment else "" 415 return f"AUTOINCREMENT{start}{increment}"
180 class Parser(parser.Parser): 181 QUOTED_PIVOT_COLUMNS = True 182 183 FUNCTIONS = { 184 **parser.Parser.FUNCTIONS, 185 "ARRAYAGG": exp.ArrayAgg.from_arg_list, 186 "ARRAY_CONSTRUCT": exp.Array.from_arg_list, 187 "ARRAY_TO_STRING": exp.ArrayJoin.from_arg_list, 188 "CONVERT_TIMEZONE": lambda args: exp.AtTimeZone( 189 this=seq_get(args, 1), 190 zone=seq_get(args, 0), 191 ), 192 "DATE_TRUNC": date_trunc_to_time, 193 "DATEADD": lambda args: exp.DateAdd( 194 this=seq_get(args, 2), 195 expression=seq_get(args, 1), 196 unit=seq_get(args, 0), 197 ), 198 "DATEDIFF": lambda args: exp.DateDiff( 199 this=seq_get(args, 2), 200 expression=seq_get(args, 1), 201 unit=seq_get(args, 0), 202 ), 203 "DIV0": _div0_to_if, 204 "IFF": exp.If.from_arg_list, 205 "NULLIFZERO": _nullifzero_to_if, 206 "OBJECT_CONSTRUCT": parser.parse_var_map, 207 "RLIKE": exp.RegexpLike.from_arg_list, 208 "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)), 209 "TO_ARRAY": exp.Array.from_arg_list, 210 "TO_VARCHAR": exp.ToChar.from_arg_list, 211 "TO_TIMESTAMP": _snowflake_to_timestamp, 212 "ZEROIFNULL": _zeroifnull_to_if, 213 } 214 215 FUNCTION_PARSERS = { 216 **parser.Parser.FUNCTION_PARSERS, 217 "DATE_PART": _parse_date_part, 218 } 219 FUNCTION_PARSERS.pop("TRIM") 220 221 FUNC_TOKENS = { 222 *parser.Parser.FUNC_TOKENS, 223 TokenType.RLIKE, 224 TokenType.TABLE, 225 } 226 227 COLUMN_OPERATORS = { 228 **parser.Parser.COLUMN_OPERATORS, # type: ignore 229 TokenType.COLON: lambda self, this, path: self.expression( 230 exp.Bracket, 231 this=this, 232 expressions=[path], 233 ), 234 } 235 236 RANGE_PARSERS = { 237 **parser.Parser.RANGE_PARSERS, # type: ignore 238 TokenType.LIKE_ANY: binary_range_parser(exp.LikeAny), 239 TokenType.ILIKE_ANY: binary_range_parser(exp.ILikeAny), 240 } 241 242 ALTER_PARSERS = { 243 **parser.Parser.ALTER_PARSERS, # type: ignore 244 "UNSET": lambda self: self._parse_alter_table_set_tag(unset=True), 245 "SET": lambda self: self._parse_alter_table_set_tag(), 246 } 247 248 def _parse_alter_table_set_tag(self, unset: bool = False) -> exp.Expression: 249 self._match_text_seq("TAG") 250 parser = t.cast(t.Callable, self._parse_id_var if unset else self._parse_conjunction) 251 return self.expression(exp.SetTag, expressions=self._parse_csv(parser), unset=unset)
Parser consumes a list of tokens produced by the sqlglot.tokens.Tokenizer
and produces
a parsed syntax tree.
Arguments:
- error_level: the desired error level. Default: ErrorLevel.RAISE
- error_message_context: determines the amount of context to capture from a query string when displaying the error message (in number of characters). Default: 50.
- index_offset: Index offset for arrays eg ARRAY[0] vs ARRAY[1] as the head of a list. Default: 0
- alias_post_tablesample: If the table alias comes after tablesample. Default: False
- max_errors: Maximum number of error messages to include in a raised ParseError. This is only relevant if error_level is ErrorLevel.RAISE. Default: 3
- null_ordering: Indicates the default null ordering method to use if not explicitly set. Options are "nulls_are_small", "nulls_are_large", "nulls_are_last". Default: "nulls_are_small"
Inherited Members
253 class Tokenizer(tokens.Tokenizer): 254 QUOTES = ["'", "$$"] 255 STRING_ESCAPES = ["\\", "'"] 256 257 KEYWORDS = { 258 **tokens.Tokenizer.KEYWORDS, 259 "EXCLUDE": TokenType.EXCEPT, 260 "ILIKE ANY": TokenType.ILIKE_ANY, 261 "LIKE ANY": TokenType.LIKE_ANY, 262 "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE, 263 "PUT": TokenType.COMMAND, 264 "RENAME": TokenType.REPLACE, 265 "TIMESTAMP_LTZ": TokenType.TIMESTAMPLTZ, 266 "TIMESTAMP_NTZ": TokenType.TIMESTAMP, 267 "TIMESTAMP_TZ": TokenType.TIMESTAMPTZ, 268 "TIMESTAMPNTZ": TokenType.TIMESTAMP, 269 "MINUS": TokenType.EXCEPT, 270 "SAMPLE": TokenType.TABLE_SAMPLE, 271 } 272 273 SINGLE_TOKENS = { 274 **tokens.Tokenizer.SINGLE_TOKENS, 275 "$": TokenType.PARAMETER, 276 } 277 278 VAR_SINGLE_TOKENS = {"$"}
Inherited Members
280 class Generator(generator.Generator): 281 PARAMETER_TOKEN = "$" 282 MATCHED_BY_SOURCE = False 283 SINGLE_STRING_INTERVAL = True 284 JOIN_HINTS = False 285 TABLE_HINTS = False 286 287 TRANSFORMS = { 288 **generator.Generator.TRANSFORMS, # type: ignore 289 exp.Array: inline_array_sql, 290 exp.ArrayConcat: rename_func("ARRAY_CAT"), 291 exp.ArrayJoin: rename_func("ARRAY_TO_STRING"), 292 exp.AtTimeZone: lambda self, e: self.func( 293 "CONVERT_TIMEZONE", e.args.get("zone"), e.this 294 ), 295 exp.DateAdd: lambda self, e: self.func("DATEADD", e.text("unit"), e.expression, e.this), 296 exp.DateDiff: lambda self, e: self.func( 297 "DATEDIFF", e.text("unit"), e.expression, e.this 298 ), 299 exp.DateStrToDate: datestrtodate_sql, 300 exp.DataType: _datatype_sql, 301 exp.DayOfWeek: rename_func("DAYOFWEEK"), 302 exp.If: rename_func("IFF"), 303 exp.LogicalAnd: rename_func("BOOLAND_AGG"), 304 exp.LogicalOr: rename_func("BOOLOR_AGG"), 305 exp.Map: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"), 306 exp.Max: max_or_greatest, 307 exp.Min: min_or_least, 308 exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}", 309 exp.Select: transforms.preprocess([transforms.eliminate_distinct_on]), 310 exp.StarMap: rename_func("OBJECT_CONSTRUCT"), 311 exp.StrPosition: lambda self, e: self.func( 312 "POSITION", e.args.get("substr"), e.this, e.args.get("position") 313 ), 314 exp.StrToTime: lambda self, e: f"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})", 315 exp.TimeStrToTime: timestrtotime_sql, 316 exp.TimeToUnix: lambda self, e: f"EXTRACT(epoch_second FROM {self.sql(e, 'this')})", 317 exp.TimeToStr: lambda self, e: self.func( 318 "TO_CHAR", exp.cast(e.this, "timestamp"), self.format_time(e) 319 ), 320 exp.TimestampTrunc: timestamptrunc_sql, 321 exp.ToChar: lambda self, e: self.function_fallback_sql(e), 322 exp.Trim: lambda self, e: self.func("TRIM", e.this, e.expression), 323 exp.TsOrDsToDate: ts_or_ds_to_date_sql("snowflake"), 324 exp.UnixToTime: _unix_to_time_sql, 325 exp.VarMap: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"), 326 } 327 328 TYPE_MAPPING = { 329 **generator.Generator.TYPE_MAPPING, # type: ignore 330 exp.DataType.Type.TIMESTAMP: "TIMESTAMPNTZ", 331 } 332 333 STAR_MAPPING = { 334 "except": "EXCLUDE", 335 "replace": "RENAME", 336 } 337 338 PROPERTIES_LOCATION = { 339 **generator.Generator.PROPERTIES_LOCATION, # type: ignore 340 exp.SetProperty: exp.Properties.Location.UNSUPPORTED, 341 exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED, 342 } 343 344 def except_op(self, expression: exp.Except) -> str: 345 if not expression.args.get("distinct", False): 346 self.unsupported("EXCEPT with All is not supported in Snowflake") 347 return super().except_op(expression) 348 349 def intersect_op(self, expression: exp.Intersect) -> str: 350 if not expression.args.get("distinct", False): 351 self.unsupported("INTERSECT with All is not supported in Snowflake") 352 return super().intersect_op(expression) 353 354 def values_sql(self, expression: exp.Values) -> str: 355 """Due to a bug in Snowflake we want to make sure that all columns in a VALUES table alias are unquoted. 356 357 We also want to make sure that after we find matches where we need to unquote a column that we prevent users 358 from adding quotes to the column by using the `identify` argument when generating the SQL. 359 """ 360 alias = expression.args.get("alias") 361 if alias and alias.args.get("columns"): 362 expression = expression.transform( 363 lambda node: exp.Identifier(**{**node.args, "quoted": False}) 364 if isinstance(node, exp.Identifier) 365 and isinstance(node.parent, exp.TableAlias) 366 and node.arg_key == "columns" 367 else node, 368 ) 369 return self.no_identify(lambda: super(self.__class__, self).values_sql(expression)) 370 return super().values_sql(expression) 371 372 def settag_sql(self, expression: exp.SetTag) -> str: 373 action = "UNSET" if expression.args.get("unset") else "SET" 374 return f"{action} TAG {self.expressions(expression)}" 375 376 def select_sql(self, expression: exp.Select) -> str: 377 """Due to a bug in Snowflake we want to make sure that all columns in a VALUES table alias are unquoted and also 378 that all columns in a SELECT are unquoted. We also want to make sure that after we find matches where we need 379 to unquote a column that we prevent users from adding quotes to the column by using the `identify` argument when 380 generating the SQL. 381 382 Note: We make an assumption that any columns referenced in a VALUES expression should be unquoted throughout the 383 expression. This might not be true in a case where the same column name can be sourced from another table that can 384 properly quote but should be true in most cases. 385 """ 386 values_identifiers = set( 387 flatten( 388 (v.args.get("alias") or exp.Alias()).args.get("columns", []) 389 for v in expression.find_all(exp.Values) 390 ) 391 ) 392 if values_identifiers: 393 expression = expression.transform( 394 lambda node: exp.Identifier(**{**node.args, "quoted": False}) 395 if isinstance(node, exp.Identifier) and node in values_identifiers 396 else node, 397 ) 398 return self.no_identify(lambda: super(self.__class__, self).select_sql(expression)) 399 return super().select_sql(expression) 400 401 def describe_sql(self, expression: exp.Describe) -> str: 402 # Default to table if kind is unknown 403 kind_value = expression.args.get("kind") or "TABLE" 404 kind = f" {kind_value}" if kind_value else "" 405 this = f" {self.sql(expression, 'this')}" 406 return f"DESCRIBE{kind}{this}" 407 408 def generatedasidentitycolumnconstraint_sql( 409 self, expression: exp.GeneratedAsIdentityColumnConstraint 410 ) -> str: 411 start = expression.args.get("start") 412 start = f" START {start}" if start else "" 413 increment = expression.args.get("increment") 414 increment = f" INCREMENT {increment}" if increment else "" 415 return f"AUTOINCREMENT{start}{increment}"
Generator interprets the given syntax tree and produces a SQL string as an output.
Arguments:
- time_mapping (dict): the dictionary of custom time mappings in which the key represents a python time format and the output the target time format
- time_trie (trie): a trie of the time_mapping keys
- pretty (bool): if set to True the returned string will be formatted. Default: False.
- quote_start (str): specifies which starting character to use to delimit quotes. Default: '.
- quote_end (str): specifies which ending character to use to delimit quotes. Default: '.
- identifier_start (str): specifies which starting character to use to delimit identifiers. Default: ".
- identifier_end (str): specifies which ending character to use to delimit identifiers. Default: ".
- identify (bool | str): 'always': always quote, 'safe': quote identifiers if they don't contain an upcase, True defaults to always.
- normalize (bool): if set to True all identifiers will lower cased
- string_escape (str): specifies a string escape character. Default: '.
- identifier_escape (str): specifies an identifier escape character. Default: ".
- pad (int): determines padding in a formatted string. Default: 2.
- indent (int): determines the size of indentation in a formatted string. Default: 4.
- unnest_column_only (bool): if true unnest table aliases are considered only as column aliases
- normalize_functions (str): normalize function names, "upper", "lower", or None Default: "upper"
- alias_post_tablesample (bool): if the table alias comes after tablesample Default: False
- unsupported_level (ErrorLevel): determines the generator's behavior when it encounters unsupported expressions. Default ErrorLevel.WARN.
- null_ordering (str): Indicates the default null ordering method to use if not explicitly set. Options are "nulls_are_small", "nulls_are_large", "nulls_are_last". Default: "nulls_are_small"
- max_unsupported (int): Maximum number of unsupported messages to include in a raised UnsupportedError. This is only relevant if unsupported_level is ErrorLevel.RAISE. Default: 3
- leading_comma (bool): if the the comma is leading or trailing in select statements Default: False
- max_text_width: The max number of characters in a segment before creating new lines in pretty mode. The default is on the smaller end because the length only represents a segment and not the true line length. Default: 80
- comments: Whether or not to preserve comments in the output SQL code. Default: True
354 def values_sql(self, expression: exp.Values) -> str: 355 """Due to a bug in Snowflake we want to make sure that all columns in a VALUES table alias are unquoted. 356 357 We also want to make sure that after we find matches where we need to unquote a column that we prevent users 358 from adding quotes to the column by using the `identify` argument when generating the SQL. 359 """ 360 alias = expression.args.get("alias") 361 if alias and alias.args.get("columns"): 362 expression = expression.transform( 363 lambda node: exp.Identifier(**{**node.args, "quoted": False}) 364 if isinstance(node, exp.Identifier) 365 and isinstance(node.parent, exp.TableAlias) 366 and node.arg_key == "columns" 367 else node, 368 ) 369 return self.no_identify(lambda: super(self.__class__, self).values_sql(expression)) 370 return super().values_sql(expression)
Due to a bug in Snowflake we want to make sure that all columns in a VALUES table alias are unquoted.
We also want to make sure that after we find matches where we need to unquote a column that we prevent users
from adding quotes to the column by using the identify
argument when generating the SQL.
376 def select_sql(self, expression: exp.Select) -> str: 377 """Due to a bug in Snowflake we want to make sure that all columns in a VALUES table alias are unquoted and also 378 that all columns in a SELECT are unquoted. We also want to make sure that after we find matches where we need 379 to unquote a column that we prevent users from adding quotes to the column by using the `identify` argument when 380 generating the SQL. 381 382 Note: We make an assumption that any columns referenced in a VALUES expression should be unquoted throughout the 383 expression. This might not be true in a case where the same column name can be sourced from another table that can 384 properly quote but should be true in most cases. 385 """ 386 values_identifiers = set( 387 flatten( 388 (v.args.get("alias") or exp.Alias()).args.get("columns", []) 389 for v in expression.find_all(exp.Values) 390 ) 391 ) 392 if values_identifiers: 393 expression = expression.transform( 394 lambda node: exp.Identifier(**{**node.args, "quoted": False}) 395 if isinstance(node, exp.Identifier) and node in values_identifiers 396 else node, 397 ) 398 return self.no_identify(lambda: super(self.__class__, self).select_sql(expression)) 399 return super().select_sql(expression)
Due to a bug in Snowflake we want to make sure that all columns in a VALUES table alias are unquoted and also
that all columns in a SELECT are unquoted. We also want to make sure that after we find matches where we need
to unquote a column that we prevent users from adding quotes to the column by using the identify
argument when
generating the SQL.
Note: We make an assumption that any columns referenced in a VALUES expression should be unquoted throughout the expression. This might not be true in a case where the same column name can be sourced from another table that can properly quote but should be true in most cases.
408 def generatedasidentitycolumnconstraint_sql( 409 self, expression: exp.GeneratedAsIdentityColumnConstraint 410 ) -> str: 411 start = expression.args.get("start") 412 start = f" START {start}" if start else "" 413 increment = expression.args.get("increment") 414 increment = f" INCREMENT {increment}" if increment else "" 415 return f"AUTOINCREMENT{start}{increment}"
Inherited Members
- sqlglot.generator.Generator
- Generator
- generate
- unsupported
- sep
- seg
- pad_comment
- maybe_comment
- wrap
- no_identify
- normalize_func
- indent
- sql
- uncache_sql
- cache_sql
- characterset_sql
- column_sql
- columnposition_sql
- columndef_sql
- columnconstraint_sql
- autoincrementcolumnconstraint_sql
- compresscolumnconstraint_sql
- notnullcolumnconstraint_sql
- primarykeycolumnconstraint_sql
- uniquecolumnconstraint_sql
- create_sql
- prepend_ctes
- with_sql
- cte_sql
- tablealias_sql
- bitstring_sql
- hexstring_sql
- bytestring_sql
- datatype_sql
- directory_sql
- delete_sql
- drop_sql
- except_sql
- fetch_sql
- filter_sql
- hint_sql
- index_sql
- identifier_sql
- inputoutputformat_sql
- national_sql
- partition_sql
- properties_sql
- root_properties
- properties
- with_properties
- locate_properties
- property_sql
- likeproperty_sql
- fallbackproperty_sql
- journalproperty_sql
- freespaceproperty_sql
- afterjournalproperty_sql
- checksumproperty_sql
- mergeblockratioproperty_sql
- datablocksizeproperty_sql
- blockcompressionproperty_sql
- isolatedloadingproperty_sql
- lockingproperty_sql
- withdataproperty_sql
- insert_sql
- intersect_sql
- introducer_sql
- pseudotype_sql
- onconflict_sql
- returning_sql
- rowformatdelimitedproperty_sql
- table_sql
- tablesample_sql
- pivot_sql
- tuple_sql
- update_sql
- var_sql
- into_sql
- from_sql
- group_sql
- having_sql
- join_sql
- lambda_sql
- lateral_sql
- limit_sql
- offset_sql
- setitem_sql
- set_sql
- pragma_sql
- lock_sql
- literal_sql
- loaddata_sql
- null_sql
- boolean_sql
- order_sql
- cluster_sql
- distribute_sql
- sort_sql
- ordered_sql
- matchrecognize_sql
- query_modifiers
- schema_sql
- star_sql
- structkwarg_sql
- parameter_sql
- sessionparameter_sql
- placeholder_sql
- subquery_sql
- qualify_sql
- union_sql
- union_op
- unnest_sql
- where_sql
- window_sql
- partition_by_sql
- windowspec_sql
- withingroup_sql
- between_sql
- bracket_sql
- all_sql
- any_sql
- exists_sql
- case_sql
- constraint_sql
- nextvaluefor_sql
- extract_sql
- trim_sql
- concat_sql
- check_sql
- foreignkey_sql
- primarykey_sql
- unique_sql
- if_sql
- matchagainst_sql
- jsonkeyvalue_sql
- jsonobject_sql
- in_sql
- in_unnest_op
- interval_sql
- return_sql
- reference_sql
- anonymous_sql
- paren_sql
- neg_sql
- not_sql
- alias_sql
- aliases_sql
- attimezone_sql
- add_sql
- and_sql
- connector_sql
- bitwiseand_sql
- bitwiseleftshift_sql
- bitwisenot_sql
- bitwiseor_sql
- bitwiserightshift_sql
- bitwisexor_sql
- cast_sql
- currentdate_sql
- collate_sql
- command_sql
- comment_sql
- transaction_sql
- commit_sql
- rollback_sql
- altercolumn_sql
- renametable_sql
- altertable_sql
- droppartition_sql
- addconstraint_sql
- distinct_sql
- ignorenulls_sql
- respectnulls_sql
- intdiv_sql
- dpipe_sql
- div_sql
- overlaps_sql
- distance_sql
- dot_sql
- eq_sql
- escape_sql
- glob_sql
- gt_sql
- gte_sql
- ilike_sql
- ilikeany_sql
- is_sql
- like_sql
- likeany_sql
- similarto_sql
- lt_sql
- lte_sql
- mod_sql
- mul_sql
- neq_sql
- nullsafeeq_sql
- nullsafeneq_sql
- or_sql
- slice_sql
- sub_sql
- trycast_sql
- use_sql
- binary
- function_fallback_sql
- func
- format_args
- text_width
- format_time
- expressions
- op_expressions
- naked_property
- set_operation
- tag_sql
- token_sql
- userdefinedfunction_sql
- joinhint_sql
- kwarg_sql
- when_sql
- merge_sql
- tochar_sql