sqlglot.dialects.bigquery
1from __future__ import annotations 2 3import logging 4import re 5import typing as t 6 7from sqlglot import exp, generator, parser, tokens, transforms 8from sqlglot._typing import E 9from sqlglot.dialects.dialect import ( 10 Dialect, 11 binary_from_function, 12 datestrtodate_sql, 13 format_time_lambda, 14 inline_array_sql, 15 max_or_greatest, 16 min_or_least, 17 no_ilike_sql, 18 parse_date_delta_with_interval, 19 regexp_replace_sql, 20 rename_func, 21 timestrtotime_sql, 22 ts_or_ds_to_date_sql, 23) 24from sqlglot.helper import seq_get, split_num_words 25from sqlglot.tokens import TokenType 26 27logger = logging.getLogger("sqlglot") 28 29 30def _date_add_sql( 31 data_type: str, kind: str 32) -> t.Callable[[generator.Generator, exp.Expression], str]: 33 def func(self, expression): 34 this = self.sql(expression, "this") 35 unit = expression.args.get("unit") 36 unit = exp.var(unit.name.upper() if unit else "DAY") 37 interval = exp.Interval(this=expression.expression, unit=unit) 38 return f"{data_type}_{kind}({this}, {self.sql(interval)})" 39 40 return func 41 42 43def _derived_table_values_to_unnest(self: generator.Generator, expression: exp.Values) -> str: 44 if not expression.find_ancestor(exp.From, exp.Join): 45 return self.values_sql(expression) 46 47 alias = expression.args.get("alias") 48 49 structs = [ 50 exp.Struct( 51 expressions=[ 52 exp.alias_(value, column_name) 53 for value, column_name in zip( 54 t.expressions, 55 alias.columns 56 if alias and alias.columns 57 else (f"_c{i}" for i in range(len(t.expressions))), 58 ) 59 ] 60 ) 61 for t in expression.find_all(exp.Tuple) 62 ] 63 64 return self.unnest_sql(exp.Unnest(expressions=[exp.Array(expressions=structs)])) 65 66 67def _returnsproperty_sql(self: generator.Generator, expression: exp.ReturnsProperty) -> str: 68 this = expression.this 69 if isinstance(this, exp.Schema): 70 this = f"{this.this} <{self.expressions(this)}>" 71 else: 72 this = self.sql(this) 73 return f"RETURNS {this}" 74 75 76def _create_sql(self: generator.Generator, expression: exp.Create) -> str: 77 kind = expression.args["kind"] 78 returns = expression.find(exp.ReturnsProperty) 79 if kind.upper() == "FUNCTION" and returns and returns.args.get("is_table"): 80 expression = expression.copy() 81 expression.set("kind", "TABLE FUNCTION") 82 if isinstance( 83 expression.expression, 84 ( 85 exp.Subquery, 86 exp.Literal, 87 ), 88 ): 89 expression.set("expression", expression.expression.this) 90 91 return self.create_sql(expression) 92 93 return self.create_sql(expression) 94 95 96def _unqualify_unnest(expression: exp.Expression) -> exp.Expression: 97 """Remove references to unnest table aliases since bigquery doesn't allow them. 98 99 These are added by the optimizer's qualify_column step. 100 """ 101 from sqlglot.optimizer.scope import Scope 102 103 if isinstance(expression, exp.Select): 104 for unnest in expression.find_all(exp.Unnest): 105 if isinstance(unnest.parent, (exp.From, exp.Join)) and unnest.alias: 106 for column in Scope(expression).find_all(exp.Column): 107 if column.table == unnest.alias: 108 column.set("table", None) 109 110 return expression 111 112 113# https://issuetracker.google.com/issues/162294746 114# workaround for bigquery bug when grouping by an expression and then ordering 115# WITH x AS (SELECT 1 y) 116# SELECT y + 1 z 117# FROM x 118# GROUP BY x + 1 119# ORDER by z 120def _alias_ordered_group(expression: exp.Expression) -> exp.Expression: 121 if isinstance(expression, exp.Select): 122 group = expression.args.get("group") 123 order = expression.args.get("order") 124 125 if group and order: 126 aliases = { 127 select.this: select.args["alias"] 128 for select in expression.selects 129 if isinstance(select, exp.Alias) 130 } 131 132 for e in group.expressions: 133 alias = aliases.get(e) 134 135 if alias: 136 e.replace(exp.column(alias)) 137 138 return expression 139 140 141def _pushdown_cte_column_names(expression: exp.Expression) -> exp.Expression: 142 """BigQuery doesn't allow column names when defining a CTE, so we try to push them down.""" 143 if isinstance(expression, exp.CTE) and expression.alias_column_names: 144 cte_query = expression.this 145 146 if cte_query.is_star: 147 logger.warning( 148 "Can't push down CTE column names for star queries. Run the query through" 149 " the optimizer or use 'qualify' to expand the star projections first." 150 ) 151 return expression 152 153 column_names = expression.alias_column_names 154 expression.args["alias"].set("columns", None) 155 156 for name, select in zip(column_names, cte_query.selects): 157 to_replace = select 158 159 if isinstance(select, exp.Alias): 160 select = select.this 161 162 # Inner aliases are shadowed by the CTE column names 163 to_replace.replace(exp.alias_(select, name)) 164 165 return expression 166 167 168def _parse_timestamp(args: t.List) -> exp.StrToTime: 169 this = format_time_lambda(exp.StrToTime, "bigquery")([seq_get(args, 1), seq_get(args, 0)]) 170 this.set("zone", seq_get(args, 2)) 171 return this 172 173 174def _parse_date(args: t.List) -> exp.Date | exp.DateFromParts: 175 expr_type = exp.DateFromParts if len(args) == 3 else exp.Date 176 return expr_type.from_arg_list(args) 177 178 179def _parse_to_hex(args: t.List) -> exp.Hex | exp.MD5: 180 # TO_HEX(MD5(..)) is common in BigQuery, so it's parsed into MD5 to simplify its transpilation 181 arg = seq_get(args, 0) 182 return exp.MD5(this=arg.this) if isinstance(arg, exp.MD5Digest) else exp.Hex(this=arg) 183 184 185class BigQuery(Dialect): 186 UNNEST_COLUMN_ONLY = True 187 188 # https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#case_sensitivity 189 RESOLVES_IDENTIFIERS_AS_UPPERCASE = None 190 191 # bigquery udfs are case sensitive 192 NORMALIZE_FUNCTIONS = False 193 194 TIME_MAPPING = { 195 "%D": "%m/%d/%y", 196 } 197 198 FORMAT_MAPPING = { 199 "DD": "%d", 200 "MM": "%m", 201 "MON": "%b", 202 "MONTH": "%B", 203 "YYYY": "%Y", 204 "YY": "%y", 205 "HH": "%I", 206 "HH12": "%I", 207 "HH24": "%H", 208 "MI": "%M", 209 "SS": "%S", 210 "SSSSS": "%f", 211 "TZH": "%z", 212 } 213 214 # The _PARTITIONTIME and _PARTITIONDATE pseudo-columns are not returned by a SELECT * statement 215 # https://cloud.google.com/bigquery/docs/querying-partitioned-tables#query_an_ingestion-time_partitioned_table 216 PSEUDOCOLUMNS = {"_PARTITIONTIME", "_PARTITIONDATE"} 217 218 @classmethod 219 def normalize_identifier(cls, expression: E) -> E: 220 # In BigQuery, CTEs aren't case-sensitive, but table names are (by default, at least). 221 # The following check is essentially a heuristic to detect tables based on whether or 222 # not they're qualified. 223 if isinstance(expression, exp.Identifier): 224 parent = expression.parent 225 226 while isinstance(parent, exp.Dot): 227 parent = parent.parent 228 229 if ( 230 not isinstance(parent, exp.UserDefinedFunction) 231 and not (isinstance(parent, exp.Table) and parent.db) 232 and not expression.meta.get("is_table") 233 ): 234 expression.set("this", expression.this.lower()) 235 236 return expression 237 238 class Tokenizer(tokens.Tokenizer): 239 QUOTES = ["'", '"', '"""', "'''"] 240 COMMENTS = ["--", "#", ("/*", "*/")] 241 IDENTIFIERS = ["`"] 242 STRING_ESCAPES = ["\\"] 243 244 HEX_STRINGS = [("0x", ""), ("0X", "")] 245 246 BYTE_STRINGS = [ 247 (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("b", "B") 248 ] 249 250 RAW_STRINGS = [ 251 (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("r", "R") 252 ] 253 254 KEYWORDS = { 255 **tokens.Tokenizer.KEYWORDS, 256 "ANY TYPE": TokenType.VARIANT, 257 "BEGIN": TokenType.COMMAND, 258 "BEGIN TRANSACTION": TokenType.BEGIN, 259 "CURRENT_DATETIME": TokenType.CURRENT_DATETIME, 260 "BYTES": TokenType.BINARY, 261 "DECLARE": TokenType.COMMAND, 262 "FLOAT64": TokenType.DOUBLE, 263 "INT64": TokenType.BIGINT, 264 "RECORD": TokenType.STRUCT, 265 "TIMESTAMP": TokenType.TIMESTAMPTZ, 266 "NOT DETERMINISTIC": TokenType.VOLATILE, 267 "UNKNOWN": TokenType.NULL, 268 } 269 KEYWORDS.pop("DIV") 270 271 class Parser(parser.Parser): 272 PREFIXED_PIVOT_COLUMNS = True 273 274 LOG_BASE_FIRST = False 275 LOG_DEFAULTS_TO_LN = True 276 277 FUNCTIONS = { 278 **parser.Parser.FUNCTIONS, 279 "DATE": _parse_date, 280 "DATE_ADD": parse_date_delta_with_interval(exp.DateAdd), 281 "DATE_SUB": parse_date_delta_with_interval(exp.DateSub), 282 "DATE_TRUNC": lambda args: exp.DateTrunc( 283 unit=exp.Literal.string(str(seq_get(args, 1))), 284 this=seq_get(args, 0), 285 ), 286 "DATETIME_ADD": parse_date_delta_with_interval(exp.DatetimeAdd), 287 "DATETIME_SUB": parse_date_delta_with_interval(exp.DatetimeSub), 288 "DIV": binary_from_function(exp.IntDiv), 289 "GENERATE_ARRAY": exp.GenerateSeries.from_arg_list, 290 "MD5": exp.MD5Digest.from_arg_list, 291 "TO_HEX": _parse_to_hex, 292 "PARSE_DATE": lambda args: format_time_lambda(exp.StrToDate, "bigquery")( 293 [seq_get(args, 1), seq_get(args, 0)] 294 ), 295 "PARSE_TIMESTAMP": _parse_timestamp, 296 "REGEXP_CONTAINS": exp.RegexpLike.from_arg_list, 297 "REGEXP_EXTRACT": lambda args: exp.RegexpExtract( 298 this=seq_get(args, 0), 299 expression=seq_get(args, 1), 300 position=seq_get(args, 2), 301 occurrence=seq_get(args, 3), 302 group=exp.Literal.number(1) 303 if re.compile(str(seq_get(args, 1))).groups == 1 304 else None, 305 ), 306 "SPLIT": lambda args: exp.Split( 307 # https://cloud.google.com/bigquery/docs/reference/standard-sql/string_functions#split 308 this=seq_get(args, 0), 309 expression=seq_get(args, 1) or exp.Literal.string(","), 310 ), 311 "TIME_ADD": parse_date_delta_with_interval(exp.TimeAdd), 312 "TIME_SUB": parse_date_delta_with_interval(exp.TimeSub), 313 "TIMESTAMP_ADD": parse_date_delta_with_interval(exp.TimestampAdd), 314 "TIMESTAMP_SUB": parse_date_delta_with_interval(exp.TimestampSub), 315 "TO_JSON_STRING": exp.JSONFormat.from_arg_list, 316 } 317 318 FUNCTION_PARSERS = { 319 **parser.Parser.FUNCTION_PARSERS, 320 "ARRAY": lambda self: self.expression(exp.Array, expressions=[self._parse_statement()]), 321 } 322 FUNCTION_PARSERS.pop("TRIM") 323 324 NO_PAREN_FUNCTIONS = { 325 **parser.Parser.NO_PAREN_FUNCTIONS, 326 TokenType.CURRENT_DATETIME: exp.CurrentDatetime, 327 } 328 329 NESTED_TYPE_TOKENS = { 330 *parser.Parser.NESTED_TYPE_TOKENS, 331 TokenType.TABLE, 332 } 333 334 ID_VAR_TOKENS = { 335 *parser.Parser.ID_VAR_TOKENS, 336 TokenType.VALUES, 337 } 338 339 PROPERTY_PARSERS = { 340 **parser.Parser.PROPERTY_PARSERS, 341 "NOT DETERMINISTIC": lambda self: self.expression( 342 exp.StabilityProperty, this=exp.Literal.string("VOLATILE") 343 ), 344 "OPTIONS": lambda self: self._parse_with_property(), 345 } 346 347 CONSTRAINT_PARSERS = { 348 **parser.Parser.CONSTRAINT_PARSERS, 349 "OPTIONS": lambda self: exp.Properties(expressions=self._parse_with_property()), 350 } 351 352 def _parse_table_part(self, schema: bool = False) -> t.Optional[exp.Expression]: 353 this = super()._parse_table_part(schema=schema) 354 355 # https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#table_names 356 if isinstance(this, exp.Identifier): 357 table_name = this.name 358 while self._match(TokenType.DASH, advance=False) and self._next: 359 self._advance(2) 360 table_name += f"-{self._prev.text}" 361 362 this = exp.Identifier(this=table_name, quoted=this.args.get("quoted")) 363 364 return this 365 366 def _parse_table_parts(self, schema: bool = False) -> exp.Table: 367 table = super()._parse_table_parts(schema=schema) 368 if isinstance(table.this, exp.Identifier) and "." in table.name: 369 catalog, db, this, *rest = ( 370 t.cast(t.Optional[exp.Expression], exp.to_identifier(x)) 371 for x in split_num_words(table.name, ".", 3) 372 ) 373 374 if rest and this: 375 this = exp.Dot.build(t.cast(t.List[exp.Expression], [this, *rest])) 376 377 table = exp.Table(this=this, db=db, catalog=catalog) 378 379 return table 380 381 class Generator(generator.Generator): 382 EXPLICIT_UNION = True 383 INTERVAL_ALLOWS_PLURAL_FORM = False 384 JOIN_HINTS = False 385 QUERY_HINTS = False 386 TABLE_HINTS = False 387 LIMIT_FETCH = "LIMIT" 388 RENAME_TABLE_WITH_DB = False 389 ESCAPE_LINE_BREAK = True 390 391 TRANSFORMS = { 392 **generator.Generator.TRANSFORMS, 393 exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"), 394 exp.ArraySize: rename_func("ARRAY_LENGTH"), 395 exp.Cast: transforms.preprocess([transforms.remove_precision_parameterized_types]), 396 exp.Create: _create_sql, 397 exp.CTE: transforms.preprocess([_pushdown_cte_column_names]), 398 exp.DateAdd: _date_add_sql("DATE", "ADD"), 399 exp.DateDiff: lambda self, e: f"DATE_DIFF({self.sql(e, 'this')}, {self.sql(e, 'expression')}, {self.sql(e.args.get('unit', 'DAY'))})", 400 exp.DateFromParts: rename_func("DATE"), 401 exp.DateStrToDate: datestrtodate_sql, 402 exp.DateSub: _date_add_sql("DATE", "SUB"), 403 exp.DatetimeAdd: _date_add_sql("DATETIME", "ADD"), 404 exp.DatetimeSub: _date_add_sql("DATETIME", "SUB"), 405 exp.DateTrunc: lambda self, e: self.func("DATE_TRUNC", e.this, e.text("unit")), 406 exp.GenerateSeries: rename_func("GENERATE_ARRAY"), 407 exp.GroupConcat: rename_func("STRING_AGG"), 408 exp.Hex: rename_func("TO_HEX"), 409 exp.ILike: no_ilike_sql, 410 exp.IntDiv: rename_func("DIV"), 411 exp.JSONFormat: rename_func("TO_JSON_STRING"), 412 exp.Max: max_or_greatest, 413 exp.MD5: lambda self, e: self.func("TO_HEX", self.func("MD5", e.this)), 414 exp.MD5Digest: rename_func("MD5"), 415 exp.Min: min_or_least, 416 exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}", 417 exp.RegexpExtract: lambda self, e: self.func( 418 "REGEXP_EXTRACT", 419 e.this, 420 e.expression, 421 e.args.get("position"), 422 e.args.get("occurrence"), 423 ), 424 exp.RegexpReplace: regexp_replace_sql, 425 exp.RegexpLike: rename_func("REGEXP_CONTAINS"), 426 exp.ReturnsProperty: _returnsproperty_sql, 427 exp.Select: transforms.preprocess( 428 [ 429 transforms.explode_to_unnest, 430 _unqualify_unnest, 431 transforms.eliminate_distinct_on, 432 _alias_ordered_group, 433 ] 434 ), 435 exp.StabilityProperty: lambda self, e: f"DETERMINISTIC" 436 if e.name == "IMMUTABLE" 437 else "NOT DETERMINISTIC", 438 exp.StrToDate: lambda self, e: f"PARSE_DATE({self.format_time(e)}, {self.sql(e, 'this')})", 439 exp.StrToTime: lambda self, e: self.func( 440 "PARSE_TIMESTAMP", self.format_time(e), e.this, e.args.get("zone") 441 ), 442 exp.TimeAdd: _date_add_sql("TIME", "ADD"), 443 exp.TimeSub: _date_add_sql("TIME", "SUB"), 444 exp.TimestampAdd: _date_add_sql("TIMESTAMP", "ADD"), 445 exp.TimestampSub: _date_add_sql("TIMESTAMP", "SUB"), 446 exp.TimeStrToTime: timestrtotime_sql, 447 exp.Trim: lambda self, e: self.func(f"TRIM", e.this, e.expression), 448 exp.TsOrDsAdd: _date_add_sql("DATE", "ADD"), 449 exp.TsOrDsToDate: ts_or_ds_to_date_sql("bigquery"), 450 exp.Unhex: rename_func("FROM_HEX"), 451 exp.Values: _derived_table_values_to_unnest, 452 exp.VariancePop: rename_func("VAR_POP"), 453 } 454 455 TYPE_MAPPING = { 456 **generator.Generator.TYPE_MAPPING, 457 exp.DataType.Type.BIGDECIMAL: "BIGNUMERIC", 458 exp.DataType.Type.BIGINT: "INT64", 459 exp.DataType.Type.BINARY: "BYTES", 460 exp.DataType.Type.BOOLEAN: "BOOL", 461 exp.DataType.Type.CHAR: "STRING", 462 exp.DataType.Type.DECIMAL: "NUMERIC", 463 exp.DataType.Type.DOUBLE: "FLOAT64", 464 exp.DataType.Type.FLOAT: "FLOAT64", 465 exp.DataType.Type.INT: "INT64", 466 exp.DataType.Type.NCHAR: "STRING", 467 exp.DataType.Type.NVARCHAR: "STRING", 468 exp.DataType.Type.SMALLINT: "INT64", 469 exp.DataType.Type.TEXT: "STRING", 470 exp.DataType.Type.TIMESTAMP: "DATETIME", 471 exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP", 472 exp.DataType.Type.TIMESTAMPLTZ: "TIMESTAMP", 473 exp.DataType.Type.TINYINT: "INT64", 474 exp.DataType.Type.VARBINARY: "BYTES", 475 exp.DataType.Type.VARCHAR: "STRING", 476 exp.DataType.Type.VARIANT: "ANY TYPE", 477 } 478 479 PROPERTIES_LOCATION = { 480 **generator.Generator.PROPERTIES_LOCATION, 481 exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA, 482 exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED, 483 } 484 485 # from: https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#reserved_keywords 486 RESERVED_KEYWORDS = { 487 *generator.Generator.RESERVED_KEYWORDS, 488 "all", 489 "and", 490 "any", 491 "array", 492 "as", 493 "asc", 494 "assert_rows_modified", 495 "at", 496 "between", 497 "by", 498 "case", 499 "cast", 500 "collate", 501 "contains", 502 "create", 503 "cross", 504 "cube", 505 "current", 506 "default", 507 "define", 508 "desc", 509 "distinct", 510 "else", 511 "end", 512 "enum", 513 "escape", 514 "except", 515 "exclude", 516 "exists", 517 "extract", 518 "false", 519 "fetch", 520 "following", 521 "for", 522 "from", 523 "full", 524 "group", 525 "grouping", 526 "groups", 527 "hash", 528 "having", 529 "if", 530 "ignore", 531 "in", 532 "inner", 533 "intersect", 534 "interval", 535 "into", 536 "is", 537 "join", 538 "lateral", 539 "left", 540 "like", 541 "limit", 542 "lookup", 543 "merge", 544 "natural", 545 "new", 546 "no", 547 "not", 548 "null", 549 "nulls", 550 "of", 551 "on", 552 "or", 553 "order", 554 "outer", 555 "over", 556 "partition", 557 "preceding", 558 "proto", 559 "qualify", 560 "range", 561 "recursive", 562 "respect", 563 "right", 564 "rollup", 565 "rows", 566 "select", 567 "set", 568 "some", 569 "struct", 570 "tablesample", 571 "then", 572 "to", 573 "treat", 574 "true", 575 "unbounded", 576 "union", 577 "unnest", 578 "using", 579 "when", 580 "where", 581 "window", 582 "with", 583 "within", 584 } 585 586 def attimezone_sql(self, expression: exp.AtTimeZone) -> str: 587 parent = expression.parent 588 589 # BigQuery allows CAST(.. AS {STRING|TIMESTAMP} [FORMAT <fmt> [AT TIME ZONE <tz>]]). 590 # Only the TIMESTAMP one should use the below conversion, when AT TIME ZONE is included. 591 if not isinstance(parent, exp.Cast) or not parent.to.is_type("text"): 592 return self.func( 593 "TIMESTAMP", self.func("DATETIME", expression.this, expression.args.get("zone")) 594 ) 595 596 return super().attimezone_sql(expression) 597 598 def trycast_sql(self, expression: exp.TryCast) -> str: 599 return self.cast_sql(expression, safe_prefix="SAFE_") 600 601 def cte_sql(self, expression: exp.CTE) -> str: 602 if expression.alias_column_names: 603 self.unsupported("Column names in CTE definition are not supported.") 604 return super().cte_sql(expression) 605 606 def array_sql(self, expression: exp.Array) -> str: 607 first_arg = seq_get(expression.expressions, 0) 608 if isinstance(first_arg, exp.Subqueryable): 609 return f"ARRAY{self.wrap(self.sql(first_arg))}" 610 611 return inline_array_sql(self, expression) 612 613 def transaction_sql(self, *_) -> str: 614 return "BEGIN TRANSACTION" 615 616 def commit_sql(self, *_) -> str: 617 return "COMMIT TRANSACTION" 618 619 def rollback_sql(self, *_) -> str: 620 return "ROLLBACK TRANSACTION" 621 622 def in_unnest_op(self, expression: exp.Unnest) -> str: 623 return self.sql(expression) 624 625 def except_op(self, expression: exp.Except) -> str: 626 if not expression.args.get("distinct", False): 627 self.unsupported("EXCEPT without DISTINCT is not supported in BigQuery") 628 return f"EXCEPT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}" 629 630 def intersect_op(self, expression: exp.Intersect) -> str: 631 if not expression.args.get("distinct", False): 632 self.unsupported("INTERSECT without DISTINCT is not supported in BigQuery") 633 return f"INTERSECT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}" 634 635 def with_properties(self, properties: exp.Properties) -> str: 636 return self.properties(properties, prefix=self.seg("OPTIONS"))
logger =
<Logger sqlglot (WARNING)>
186class BigQuery(Dialect): 187 UNNEST_COLUMN_ONLY = True 188 189 # https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#case_sensitivity 190 RESOLVES_IDENTIFIERS_AS_UPPERCASE = None 191 192 # bigquery udfs are case sensitive 193 NORMALIZE_FUNCTIONS = False 194 195 TIME_MAPPING = { 196 "%D": "%m/%d/%y", 197 } 198 199 FORMAT_MAPPING = { 200 "DD": "%d", 201 "MM": "%m", 202 "MON": "%b", 203 "MONTH": "%B", 204 "YYYY": "%Y", 205 "YY": "%y", 206 "HH": "%I", 207 "HH12": "%I", 208 "HH24": "%H", 209 "MI": "%M", 210 "SS": "%S", 211 "SSSSS": "%f", 212 "TZH": "%z", 213 } 214 215 # The _PARTITIONTIME and _PARTITIONDATE pseudo-columns are not returned by a SELECT * statement 216 # https://cloud.google.com/bigquery/docs/querying-partitioned-tables#query_an_ingestion-time_partitioned_table 217 PSEUDOCOLUMNS = {"_PARTITIONTIME", "_PARTITIONDATE"} 218 219 @classmethod 220 def normalize_identifier(cls, expression: E) -> E: 221 # In BigQuery, CTEs aren't case-sensitive, but table names are (by default, at least). 222 # The following check is essentially a heuristic to detect tables based on whether or 223 # not they're qualified. 224 if isinstance(expression, exp.Identifier): 225 parent = expression.parent 226 227 while isinstance(parent, exp.Dot): 228 parent = parent.parent 229 230 if ( 231 not isinstance(parent, exp.UserDefinedFunction) 232 and not (isinstance(parent, exp.Table) and parent.db) 233 and not expression.meta.get("is_table") 234 ): 235 expression.set("this", expression.this.lower()) 236 237 return expression 238 239 class Tokenizer(tokens.Tokenizer): 240 QUOTES = ["'", '"', '"""', "'''"] 241 COMMENTS = ["--", "#", ("/*", "*/")] 242 IDENTIFIERS = ["`"] 243 STRING_ESCAPES = ["\\"] 244 245 HEX_STRINGS = [("0x", ""), ("0X", "")] 246 247 BYTE_STRINGS = [ 248 (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("b", "B") 249 ] 250 251 RAW_STRINGS = [ 252 (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("r", "R") 253 ] 254 255 KEYWORDS = { 256 **tokens.Tokenizer.KEYWORDS, 257 "ANY TYPE": TokenType.VARIANT, 258 "BEGIN": TokenType.COMMAND, 259 "BEGIN TRANSACTION": TokenType.BEGIN, 260 "CURRENT_DATETIME": TokenType.CURRENT_DATETIME, 261 "BYTES": TokenType.BINARY, 262 "DECLARE": TokenType.COMMAND, 263 "FLOAT64": TokenType.DOUBLE, 264 "INT64": TokenType.BIGINT, 265 "RECORD": TokenType.STRUCT, 266 "TIMESTAMP": TokenType.TIMESTAMPTZ, 267 "NOT DETERMINISTIC": TokenType.VOLATILE, 268 "UNKNOWN": TokenType.NULL, 269 } 270 KEYWORDS.pop("DIV") 271 272 class Parser(parser.Parser): 273 PREFIXED_PIVOT_COLUMNS = True 274 275 LOG_BASE_FIRST = False 276 LOG_DEFAULTS_TO_LN = True 277 278 FUNCTIONS = { 279 **parser.Parser.FUNCTIONS, 280 "DATE": _parse_date, 281 "DATE_ADD": parse_date_delta_with_interval(exp.DateAdd), 282 "DATE_SUB": parse_date_delta_with_interval(exp.DateSub), 283 "DATE_TRUNC": lambda args: exp.DateTrunc( 284 unit=exp.Literal.string(str(seq_get(args, 1))), 285 this=seq_get(args, 0), 286 ), 287 "DATETIME_ADD": parse_date_delta_with_interval(exp.DatetimeAdd), 288 "DATETIME_SUB": parse_date_delta_with_interval(exp.DatetimeSub), 289 "DIV": binary_from_function(exp.IntDiv), 290 "GENERATE_ARRAY": exp.GenerateSeries.from_arg_list, 291 "MD5": exp.MD5Digest.from_arg_list, 292 "TO_HEX": _parse_to_hex, 293 "PARSE_DATE": lambda args: format_time_lambda(exp.StrToDate, "bigquery")( 294 [seq_get(args, 1), seq_get(args, 0)] 295 ), 296 "PARSE_TIMESTAMP": _parse_timestamp, 297 "REGEXP_CONTAINS": exp.RegexpLike.from_arg_list, 298 "REGEXP_EXTRACT": lambda args: exp.RegexpExtract( 299 this=seq_get(args, 0), 300 expression=seq_get(args, 1), 301 position=seq_get(args, 2), 302 occurrence=seq_get(args, 3), 303 group=exp.Literal.number(1) 304 if re.compile(str(seq_get(args, 1))).groups == 1 305 else None, 306 ), 307 "SPLIT": lambda args: exp.Split( 308 # https://cloud.google.com/bigquery/docs/reference/standard-sql/string_functions#split 309 this=seq_get(args, 0), 310 expression=seq_get(args, 1) or exp.Literal.string(","), 311 ), 312 "TIME_ADD": parse_date_delta_with_interval(exp.TimeAdd), 313 "TIME_SUB": parse_date_delta_with_interval(exp.TimeSub), 314 "TIMESTAMP_ADD": parse_date_delta_with_interval(exp.TimestampAdd), 315 "TIMESTAMP_SUB": parse_date_delta_with_interval(exp.TimestampSub), 316 "TO_JSON_STRING": exp.JSONFormat.from_arg_list, 317 } 318 319 FUNCTION_PARSERS = { 320 **parser.Parser.FUNCTION_PARSERS, 321 "ARRAY": lambda self: self.expression(exp.Array, expressions=[self._parse_statement()]), 322 } 323 FUNCTION_PARSERS.pop("TRIM") 324 325 NO_PAREN_FUNCTIONS = { 326 **parser.Parser.NO_PAREN_FUNCTIONS, 327 TokenType.CURRENT_DATETIME: exp.CurrentDatetime, 328 } 329 330 NESTED_TYPE_TOKENS = { 331 *parser.Parser.NESTED_TYPE_TOKENS, 332 TokenType.TABLE, 333 } 334 335 ID_VAR_TOKENS = { 336 *parser.Parser.ID_VAR_TOKENS, 337 TokenType.VALUES, 338 } 339 340 PROPERTY_PARSERS = { 341 **parser.Parser.PROPERTY_PARSERS, 342 "NOT DETERMINISTIC": lambda self: self.expression( 343 exp.StabilityProperty, this=exp.Literal.string("VOLATILE") 344 ), 345 "OPTIONS": lambda self: self._parse_with_property(), 346 } 347 348 CONSTRAINT_PARSERS = { 349 **parser.Parser.CONSTRAINT_PARSERS, 350 "OPTIONS": lambda self: exp.Properties(expressions=self._parse_with_property()), 351 } 352 353 def _parse_table_part(self, schema: bool = False) -> t.Optional[exp.Expression]: 354 this = super()._parse_table_part(schema=schema) 355 356 # https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#table_names 357 if isinstance(this, exp.Identifier): 358 table_name = this.name 359 while self._match(TokenType.DASH, advance=False) and self._next: 360 self._advance(2) 361 table_name += f"-{self._prev.text}" 362 363 this = exp.Identifier(this=table_name, quoted=this.args.get("quoted")) 364 365 return this 366 367 def _parse_table_parts(self, schema: bool = False) -> exp.Table: 368 table = super()._parse_table_parts(schema=schema) 369 if isinstance(table.this, exp.Identifier) and "." in table.name: 370 catalog, db, this, *rest = ( 371 t.cast(t.Optional[exp.Expression], exp.to_identifier(x)) 372 for x in split_num_words(table.name, ".", 3) 373 ) 374 375 if rest and this: 376 this = exp.Dot.build(t.cast(t.List[exp.Expression], [this, *rest])) 377 378 table = exp.Table(this=this, db=db, catalog=catalog) 379 380 return table 381 382 class Generator(generator.Generator): 383 EXPLICIT_UNION = True 384 INTERVAL_ALLOWS_PLURAL_FORM = False 385 JOIN_HINTS = False 386 QUERY_HINTS = False 387 TABLE_HINTS = False 388 LIMIT_FETCH = "LIMIT" 389 RENAME_TABLE_WITH_DB = False 390 ESCAPE_LINE_BREAK = True 391 392 TRANSFORMS = { 393 **generator.Generator.TRANSFORMS, 394 exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"), 395 exp.ArraySize: rename_func("ARRAY_LENGTH"), 396 exp.Cast: transforms.preprocess([transforms.remove_precision_parameterized_types]), 397 exp.Create: _create_sql, 398 exp.CTE: transforms.preprocess([_pushdown_cte_column_names]), 399 exp.DateAdd: _date_add_sql("DATE", "ADD"), 400 exp.DateDiff: lambda self, e: f"DATE_DIFF({self.sql(e, 'this')}, {self.sql(e, 'expression')}, {self.sql(e.args.get('unit', 'DAY'))})", 401 exp.DateFromParts: rename_func("DATE"), 402 exp.DateStrToDate: datestrtodate_sql, 403 exp.DateSub: _date_add_sql("DATE", "SUB"), 404 exp.DatetimeAdd: _date_add_sql("DATETIME", "ADD"), 405 exp.DatetimeSub: _date_add_sql("DATETIME", "SUB"), 406 exp.DateTrunc: lambda self, e: self.func("DATE_TRUNC", e.this, e.text("unit")), 407 exp.GenerateSeries: rename_func("GENERATE_ARRAY"), 408 exp.GroupConcat: rename_func("STRING_AGG"), 409 exp.Hex: rename_func("TO_HEX"), 410 exp.ILike: no_ilike_sql, 411 exp.IntDiv: rename_func("DIV"), 412 exp.JSONFormat: rename_func("TO_JSON_STRING"), 413 exp.Max: max_or_greatest, 414 exp.MD5: lambda self, e: self.func("TO_HEX", self.func("MD5", e.this)), 415 exp.MD5Digest: rename_func("MD5"), 416 exp.Min: min_or_least, 417 exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}", 418 exp.RegexpExtract: lambda self, e: self.func( 419 "REGEXP_EXTRACT", 420 e.this, 421 e.expression, 422 e.args.get("position"), 423 e.args.get("occurrence"), 424 ), 425 exp.RegexpReplace: regexp_replace_sql, 426 exp.RegexpLike: rename_func("REGEXP_CONTAINS"), 427 exp.ReturnsProperty: _returnsproperty_sql, 428 exp.Select: transforms.preprocess( 429 [ 430 transforms.explode_to_unnest, 431 _unqualify_unnest, 432 transforms.eliminate_distinct_on, 433 _alias_ordered_group, 434 ] 435 ), 436 exp.StabilityProperty: lambda self, e: f"DETERMINISTIC" 437 if e.name == "IMMUTABLE" 438 else "NOT DETERMINISTIC", 439 exp.StrToDate: lambda self, e: f"PARSE_DATE({self.format_time(e)}, {self.sql(e, 'this')})", 440 exp.StrToTime: lambda self, e: self.func( 441 "PARSE_TIMESTAMP", self.format_time(e), e.this, e.args.get("zone") 442 ), 443 exp.TimeAdd: _date_add_sql("TIME", "ADD"), 444 exp.TimeSub: _date_add_sql("TIME", "SUB"), 445 exp.TimestampAdd: _date_add_sql("TIMESTAMP", "ADD"), 446 exp.TimestampSub: _date_add_sql("TIMESTAMP", "SUB"), 447 exp.TimeStrToTime: timestrtotime_sql, 448 exp.Trim: lambda self, e: self.func(f"TRIM", e.this, e.expression), 449 exp.TsOrDsAdd: _date_add_sql("DATE", "ADD"), 450 exp.TsOrDsToDate: ts_or_ds_to_date_sql("bigquery"), 451 exp.Unhex: rename_func("FROM_HEX"), 452 exp.Values: _derived_table_values_to_unnest, 453 exp.VariancePop: rename_func("VAR_POP"), 454 } 455 456 TYPE_MAPPING = { 457 **generator.Generator.TYPE_MAPPING, 458 exp.DataType.Type.BIGDECIMAL: "BIGNUMERIC", 459 exp.DataType.Type.BIGINT: "INT64", 460 exp.DataType.Type.BINARY: "BYTES", 461 exp.DataType.Type.BOOLEAN: "BOOL", 462 exp.DataType.Type.CHAR: "STRING", 463 exp.DataType.Type.DECIMAL: "NUMERIC", 464 exp.DataType.Type.DOUBLE: "FLOAT64", 465 exp.DataType.Type.FLOAT: "FLOAT64", 466 exp.DataType.Type.INT: "INT64", 467 exp.DataType.Type.NCHAR: "STRING", 468 exp.DataType.Type.NVARCHAR: "STRING", 469 exp.DataType.Type.SMALLINT: "INT64", 470 exp.DataType.Type.TEXT: "STRING", 471 exp.DataType.Type.TIMESTAMP: "DATETIME", 472 exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP", 473 exp.DataType.Type.TIMESTAMPLTZ: "TIMESTAMP", 474 exp.DataType.Type.TINYINT: "INT64", 475 exp.DataType.Type.VARBINARY: "BYTES", 476 exp.DataType.Type.VARCHAR: "STRING", 477 exp.DataType.Type.VARIANT: "ANY TYPE", 478 } 479 480 PROPERTIES_LOCATION = { 481 **generator.Generator.PROPERTIES_LOCATION, 482 exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA, 483 exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED, 484 } 485 486 # from: https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#reserved_keywords 487 RESERVED_KEYWORDS = { 488 *generator.Generator.RESERVED_KEYWORDS, 489 "all", 490 "and", 491 "any", 492 "array", 493 "as", 494 "asc", 495 "assert_rows_modified", 496 "at", 497 "between", 498 "by", 499 "case", 500 "cast", 501 "collate", 502 "contains", 503 "create", 504 "cross", 505 "cube", 506 "current", 507 "default", 508 "define", 509 "desc", 510 "distinct", 511 "else", 512 "end", 513 "enum", 514 "escape", 515 "except", 516 "exclude", 517 "exists", 518 "extract", 519 "false", 520 "fetch", 521 "following", 522 "for", 523 "from", 524 "full", 525 "group", 526 "grouping", 527 "groups", 528 "hash", 529 "having", 530 "if", 531 "ignore", 532 "in", 533 "inner", 534 "intersect", 535 "interval", 536 "into", 537 "is", 538 "join", 539 "lateral", 540 "left", 541 "like", 542 "limit", 543 "lookup", 544 "merge", 545 "natural", 546 "new", 547 "no", 548 "not", 549 "null", 550 "nulls", 551 "of", 552 "on", 553 "or", 554 "order", 555 "outer", 556 "over", 557 "partition", 558 "preceding", 559 "proto", 560 "qualify", 561 "range", 562 "recursive", 563 "respect", 564 "right", 565 "rollup", 566 "rows", 567 "select", 568 "set", 569 "some", 570 "struct", 571 "tablesample", 572 "then", 573 "to", 574 "treat", 575 "true", 576 "unbounded", 577 "union", 578 "unnest", 579 "using", 580 "when", 581 "where", 582 "window", 583 "with", 584 "within", 585 } 586 587 def attimezone_sql(self, expression: exp.AtTimeZone) -> str: 588 parent = expression.parent 589 590 # BigQuery allows CAST(.. AS {STRING|TIMESTAMP} [FORMAT <fmt> [AT TIME ZONE <tz>]]). 591 # Only the TIMESTAMP one should use the below conversion, when AT TIME ZONE is included. 592 if not isinstance(parent, exp.Cast) or not parent.to.is_type("text"): 593 return self.func( 594 "TIMESTAMP", self.func("DATETIME", expression.this, expression.args.get("zone")) 595 ) 596 597 return super().attimezone_sql(expression) 598 599 def trycast_sql(self, expression: exp.TryCast) -> str: 600 return self.cast_sql(expression, safe_prefix="SAFE_") 601 602 def cte_sql(self, expression: exp.CTE) -> str: 603 if expression.alias_column_names: 604 self.unsupported("Column names in CTE definition are not supported.") 605 return super().cte_sql(expression) 606 607 def array_sql(self, expression: exp.Array) -> str: 608 first_arg = seq_get(expression.expressions, 0) 609 if isinstance(first_arg, exp.Subqueryable): 610 return f"ARRAY{self.wrap(self.sql(first_arg))}" 611 612 return inline_array_sql(self, expression) 613 614 def transaction_sql(self, *_) -> str: 615 return "BEGIN TRANSACTION" 616 617 def commit_sql(self, *_) -> str: 618 return "COMMIT TRANSACTION" 619 620 def rollback_sql(self, *_) -> str: 621 return "ROLLBACK TRANSACTION" 622 623 def in_unnest_op(self, expression: exp.Unnest) -> str: 624 return self.sql(expression) 625 626 def except_op(self, expression: exp.Except) -> str: 627 if not expression.args.get("distinct", False): 628 self.unsupported("EXCEPT without DISTINCT is not supported in BigQuery") 629 return f"EXCEPT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}" 630 631 def intersect_op(self, expression: exp.Intersect) -> str: 632 if not expression.args.get("distinct", False): 633 self.unsupported("INTERSECT without DISTINCT is not supported in BigQuery") 634 return f"INTERSECT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}" 635 636 def with_properties(self, properties: exp.Properties) -> str: 637 return self.properties(properties, prefix=self.seg("OPTIONS"))
FORMAT_MAPPING: Dict[str, str] =
{'DD': '%d', 'MM': '%m', 'MON': '%b', 'MONTH': '%B', 'YYYY': '%Y', 'YY': '%y', 'HH': '%I', 'HH12': '%I', 'HH24': '%H', 'MI': '%M', 'SS': '%S', 'SSSSS': '%f', 'TZH': '%z'}
@classmethod
def
normalize_identifier(cls, expression: ~E) -> ~E:
219 @classmethod 220 def normalize_identifier(cls, expression: E) -> E: 221 # In BigQuery, CTEs aren't case-sensitive, but table names are (by default, at least). 222 # The following check is essentially a heuristic to detect tables based on whether or 223 # not they're qualified. 224 if isinstance(expression, exp.Identifier): 225 parent = expression.parent 226 227 while isinstance(parent, exp.Dot): 228 parent = parent.parent 229 230 if ( 231 not isinstance(parent, exp.UserDefinedFunction) 232 and not (isinstance(parent, exp.Table) and parent.db) 233 and not expression.meta.get("is_table") 234 ): 235 expression.set("this", expression.this.lower()) 236 237 return expression
Normalizes an unquoted identifier to either lower or upper case, thus essentially making it case-insensitive. If a dialect treats all identifiers as case-insensitive, they will be normalized regardless of being quoted or not.
tokenizer_class =
<class 'sqlglot.dialects.bigquery.BigQuery.Tokenizer'>
parser_class =
<class 'sqlglot.dialects.bigquery.BigQuery.Parser'>
generator_class =
<class 'sqlglot.dialects.bigquery.BigQuery.Generator'>
FORMAT_TRIE: Dict =
{'D': {'D': {0: True}}, 'M': {'M': {0: True}, 'O': {'N': {0: True, 'T': {'H': {0: True}}}}, 'I': {0: True}}, 'Y': {'Y': {'Y': {'Y': {0: True}}, 0: True}}, 'H': {'H': {0: True, '1': {'2': {0: True}}, '2': {'4': {0: True}}}}, 'S': {'S': {0: True, 'S': {'S': {'S': {0: True}}}}}, 'T': {'Z': {'H': {0: True}}}}
Inherited Members
- sqlglot.dialects.dialect.Dialect
- INDEX_OFFSET
- ALIAS_POST_TABLESAMPLE
- IDENTIFIERS_CAN_START_WITH_DIGIT
- STRICT_STRING_CONCAT
- NULL_ORDERING
- DATE_FORMAT
- DATEINT_FORMAT
- TIME_FORMAT
- get_or_raise
- format_time
- case_sensitive
- can_identify
- quote_identifier
- parse
- parse_into
- generate
- transpile
- tokenize
- tokenizer
- parser
- generator
239 class Tokenizer(tokens.Tokenizer): 240 QUOTES = ["'", '"', '"""', "'''"] 241 COMMENTS = ["--", "#", ("/*", "*/")] 242 IDENTIFIERS = ["`"] 243 STRING_ESCAPES = ["\\"] 244 245 HEX_STRINGS = [("0x", ""), ("0X", "")] 246 247 BYTE_STRINGS = [ 248 (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("b", "B") 249 ] 250 251 RAW_STRINGS = [ 252 (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("r", "R") 253 ] 254 255 KEYWORDS = { 256 **tokens.Tokenizer.KEYWORDS, 257 "ANY TYPE": TokenType.VARIANT, 258 "BEGIN": TokenType.COMMAND, 259 "BEGIN TRANSACTION": TokenType.BEGIN, 260 "CURRENT_DATETIME": TokenType.CURRENT_DATETIME, 261 "BYTES": TokenType.BINARY, 262 "DECLARE": TokenType.COMMAND, 263 "FLOAT64": TokenType.DOUBLE, 264 "INT64": TokenType.BIGINT, 265 "RECORD": TokenType.STRUCT, 266 "TIMESTAMP": TokenType.TIMESTAMPTZ, 267 "NOT DETERMINISTIC": TokenType.VOLATILE, 268 "UNKNOWN": TokenType.NULL, 269 } 270 KEYWORDS.pop("DIV")
BYTE_STRINGS =
[("b'", "'"), ("B'", "'"), ('b"', '"'), ('B"', '"'), ('b"""', '"""'), ('B"""', '"""'), ("b'''", "'''"), ("B'''", "'''")]
RAW_STRINGS =
[("r'", "'"), ("R'", "'"), ('r"', '"'), ('R"', '"'), ('r"""', '"""'), ('R"""', '"""'), ("r'''", "'''"), ("R'''", "'''")]
KEYWORDS =
{'{%': <TokenType.BLOCK_START: 'BLOCK_START'>, '{%+': <TokenType.BLOCK_START: 'BLOCK_START'>, '{%-': <TokenType.BLOCK_START: 'BLOCK_START'>, '%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '+%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '-%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '{{+': <TokenType.BLOCK_START: 'BLOCK_START'>, '{{-': <TokenType.BLOCK_START: 'BLOCK_START'>, '+}}': <TokenType.BLOCK_END: 'BLOCK_END'>, '-}}': <TokenType.BLOCK_END: 'BLOCK_END'>, '/*+': <TokenType.HINT: 'HINT'>, '==': <TokenType.EQ: 'EQ'>, '::': <TokenType.DCOLON: 'DCOLON'>, '||': <TokenType.DPIPE: 'DPIPE'>, '>=': <TokenType.GTE: 'GTE'>, '<=': <TokenType.LTE: 'LTE'>, '<>': <TokenType.NEQ: 'NEQ'>, '!=': <TokenType.NEQ: 'NEQ'>, '<=>': <TokenType.NULLSAFE_EQ: 'NULLSAFE_EQ'>, '->': <TokenType.ARROW: 'ARROW'>, '->>': <TokenType.DARROW: 'DARROW'>, '=>': <TokenType.FARROW: 'FARROW'>, '#>': <TokenType.HASH_ARROW: 'HASH_ARROW'>, '#>>': <TokenType.DHASH_ARROW: 'DHASH_ARROW'>, '<->': <TokenType.LR_ARROW: 'LR_ARROW'>, '&&': <TokenType.DAMP: 'DAMP'>, 'ALL': <TokenType.ALL: 'ALL'>, 'ALWAYS': <TokenType.ALWAYS: 'ALWAYS'>, 'AND': <TokenType.AND: 'AND'>, 'ANTI': <TokenType.ANTI: 'ANTI'>, 'ANY': <TokenType.ANY: 'ANY'>, 'ASC': <TokenType.ASC: 'ASC'>, 'AS': <TokenType.ALIAS: 'ALIAS'>, 'ASOF': <TokenType.ASOF: 'ASOF'>, 'AUTOINCREMENT': <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, 'AUTO_INCREMENT': <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, 'BEGIN': <TokenType.COMMAND: 'COMMAND'>, 'BETWEEN': <TokenType.BETWEEN: 'BETWEEN'>, 'CACHE': <TokenType.CACHE: 'CACHE'>, 'UNCACHE': <TokenType.UNCACHE: 'UNCACHE'>, 'CASE': <TokenType.CASE: 'CASE'>, 'CHARACTER SET': <TokenType.CHARACTER_SET: 'CHARACTER_SET'>, 'CLUSTER BY': <TokenType.CLUSTER_BY: 'CLUSTER_BY'>, 'COLLATE': <TokenType.COLLATE: 'COLLATE'>, 'COLUMN': <TokenType.COLUMN: 'COLUMN'>, 'COMMIT': <TokenType.COMMIT: 'COMMIT'>, 'CONSTRAINT': <TokenType.CONSTRAINT: 'CONSTRAINT'>, 'CREATE': <TokenType.CREATE: 'CREATE'>, 'CROSS': <TokenType.CROSS: 'CROSS'>, 'CUBE': <TokenType.CUBE: 'CUBE'>, 'CURRENT_DATE': <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, 'CURRENT_TIME': <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, 'CURRENT_TIMESTAMP': <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, 'CURRENT_USER': <TokenType.CURRENT_USER: 'CURRENT_USER'>, 'DATABASE': <TokenType.DATABASE: 'DATABASE'>, 'DEFAULT': <TokenType.DEFAULT: 'DEFAULT'>, 'DELETE': <TokenType.DELETE: 'DELETE'>, 'DESC': <TokenType.DESC: 'DESC'>, 'DESCRIBE': <TokenType.DESCRIBE: 'DESCRIBE'>, 'DISTINCT': <TokenType.DISTINCT: 'DISTINCT'>, 'DISTRIBUTE BY': <TokenType.DISTRIBUTE_BY: 'DISTRIBUTE_BY'>, 'DROP': <TokenType.DROP: 'DROP'>, 'ELSE': <TokenType.ELSE: 'ELSE'>, 'END': <TokenType.END: 'END'>, 'ESCAPE': <TokenType.ESCAPE: 'ESCAPE'>, 'EXCEPT': <TokenType.EXCEPT: 'EXCEPT'>, 'EXECUTE': <TokenType.EXECUTE: 'EXECUTE'>, 'EXISTS': <TokenType.EXISTS: 'EXISTS'>, 'FALSE': <TokenType.FALSE: 'FALSE'>, 'FETCH': <TokenType.FETCH: 'FETCH'>, 'FILTER': <TokenType.FILTER: 'FILTER'>, 'FIRST': <TokenType.FIRST: 'FIRST'>, 'FULL': <TokenType.FULL: 'FULL'>, 'FUNCTION': <TokenType.FUNCTION: 'FUNCTION'>, 'FOR': <TokenType.FOR: 'FOR'>, 'FOREIGN KEY': <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, 'FORMAT': <TokenType.FORMAT: 'FORMAT'>, 'FROM': <TokenType.FROM: 'FROM'>, 'GEOGRAPHY': <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, 'GEOMETRY': <TokenType.GEOMETRY: 'GEOMETRY'>, 'GLOB': <TokenType.GLOB: 'GLOB'>, 'GROUP BY': <TokenType.GROUP_BY: 'GROUP_BY'>, 'GROUPING SETS': <TokenType.GROUPING_SETS: 'GROUPING_SETS'>, 'HAVING': <TokenType.HAVING: 'HAVING'>, 'IF': <TokenType.IF: 'IF'>, 'ILIKE': <TokenType.ILIKE: 'ILIKE'>, 'IN': <TokenType.IN: 'IN'>, 'INDEX': <TokenType.INDEX: 'INDEX'>, 'INET': <TokenType.INET: 'INET'>, 'INNER': <TokenType.INNER: 'INNER'>, 'INSERT': <TokenType.INSERT: 'INSERT'>, 'INTERVAL': <TokenType.INTERVAL: 'INTERVAL'>, 'INTERSECT': <TokenType.INTERSECT: 'INTERSECT'>, 'INTO': <TokenType.INTO: 'INTO'>, 'IS': <TokenType.IS: 'IS'>, 'ISNULL': <TokenType.ISNULL: 'ISNULL'>, 'JOIN': <TokenType.JOIN: 'JOIN'>, 'KEEP': <TokenType.KEEP: 'KEEP'>, 'LATERAL': <TokenType.LATERAL: 'LATERAL'>, 'LEFT': <TokenType.LEFT: 'LEFT'>, 'LIKE': <TokenType.LIKE: 'LIKE'>, 'LIMIT': <TokenType.LIMIT: 'LIMIT'>, 'LOAD': <TokenType.LOAD: 'LOAD'>, 'LOCK': <TokenType.LOCK: 'LOCK'>, 'MERGE': <TokenType.MERGE: 'MERGE'>, 'NATURAL': <TokenType.NATURAL: 'NATURAL'>, 'NEXT': <TokenType.NEXT: 'NEXT'>, 'NEXT VALUE FOR': <TokenType.NEXT_VALUE_FOR: 'NEXT_VALUE_FOR'>, 'NOT': <TokenType.NOT: 'NOT'>, 'NOTNULL': <TokenType.NOTNULL: 'NOTNULL'>, 'NULL': <TokenType.NULL: 'NULL'>, 'OBJECT': <TokenType.OBJECT: 'OBJECT'>, 'OFFSET': <TokenType.OFFSET: 'OFFSET'>, 'ON': <TokenType.ON: 'ON'>, 'OR': <TokenType.OR: 'OR'>, 'XOR': <TokenType.XOR: 'XOR'>, 'ORDER BY': <TokenType.ORDER_BY: 'ORDER_BY'>, 'ORDINALITY': <TokenType.ORDINALITY: 'ORDINALITY'>, 'OUTER': <TokenType.OUTER: 'OUTER'>, 'OVER': <TokenType.OVER: 'OVER'>, 'OVERLAPS': <TokenType.OVERLAPS: 'OVERLAPS'>, 'OVERWRITE': <TokenType.OVERWRITE: 'OVERWRITE'>, 'PARTITION': <TokenType.PARTITION: 'PARTITION'>, 'PARTITION BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PARTITIONED BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PARTITIONED_BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PERCENT': <TokenType.PERCENT: 'PERCENT'>, 'PIVOT': <TokenType.PIVOT: 'PIVOT'>, 'PRAGMA': <TokenType.PRAGMA: 'PRAGMA'>, 'PRIMARY KEY': <TokenType.PRIMARY_KEY: 'PRIMARY_KEY'>, 'PROCEDURE': <TokenType.PROCEDURE: 'PROCEDURE'>, 'QUALIFY': <TokenType.QUALIFY: 'QUALIFY'>, 'RANGE': <TokenType.RANGE: 'RANGE'>, 'RECURSIVE': <TokenType.RECURSIVE: 'RECURSIVE'>, 'REGEXP': <TokenType.RLIKE: 'RLIKE'>, 'REPLACE': <TokenType.REPLACE: 'REPLACE'>, 'RETURNING': <TokenType.RETURNING: 'RETURNING'>, 'REFERENCES': <TokenType.REFERENCES: 'REFERENCES'>, 'RIGHT': <TokenType.RIGHT: 'RIGHT'>, 'RLIKE': <TokenType.RLIKE: 'RLIKE'>, 'ROLLBACK': <TokenType.ROLLBACK: 'ROLLBACK'>, 'ROLLUP': <TokenType.ROLLUP: 'ROLLUP'>, 'ROW': <TokenType.ROW: 'ROW'>, 'ROWS': <TokenType.ROWS: 'ROWS'>, 'SCHEMA': <TokenType.SCHEMA: 'SCHEMA'>, 'SELECT': <TokenType.SELECT: 'SELECT'>, 'SEMI': <TokenType.SEMI: 'SEMI'>, 'SET': <TokenType.SET: 'SET'>, 'SETTINGS': <TokenType.SETTINGS: 'SETTINGS'>, 'SHOW': <TokenType.SHOW: 'SHOW'>, 'SIMILAR TO': <TokenType.SIMILAR_TO: 'SIMILAR_TO'>, 'SOME': <TokenType.SOME: 'SOME'>, 'SORT BY': <TokenType.SORT_BY: 'SORT_BY'>, 'TABLE': <TokenType.TABLE: 'TABLE'>, 'TABLESAMPLE': <TokenType.TABLE_SAMPLE: 'TABLE_SAMPLE'>, 'TEMP': <TokenType.TEMPORARY: 'TEMPORARY'>, 'TEMPORARY': <TokenType.TEMPORARY: 'TEMPORARY'>, 'THEN': <TokenType.THEN: 'THEN'>, 'TRUE': <TokenType.TRUE: 'TRUE'>, 'UNION': <TokenType.UNION: 'UNION'>, 'UNNEST': <TokenType.UNNEST: 'UNNEST'>, 'UNPIVOT': <TokenType.UNPIVOT: 'UNPIVOT'>, 'UPDATE': <TokenType.UPDATE: 'UPDATE'>, 'USE': <TokenType.USE: 'USE'>, 'USING': <TokenType.USING: 'USING'>, 'UUID': <TokenType.UUID: 'UUID'>, 'VALUES': <TokenType.VALUES: 'VALUES'>, 'VIEW': <TokenType.VIEW: 'VIEW'>, 'VOLATILE': <TokenType.VOLATILE: 'VOLATILE'>, 'WHEN': <TokenType.WHEN: 'WHEN'>, 'WHERE': <TokenType.WHERE: 'WHERE'>, 'WINDOW': <TokenType.WINDOW: 'WINDOW'>, 'WITH': <TokenType.WITH: 'WITH'>, 'APPLY': <TokenType.APPLY: 'APPLY'>, 'ARRAY': <TokenType.ARRAY: 'ARRAY'>, 'BIT': <TokenType.BIT: 'BIT'>, 'BOOL': <TokenType.BOOLEAN: 'BOOLEAN'>, 'BOOLEAN': <TokenType.BOOLEAN: 'BOOLEAN'>, 'BYTE': <TokenType.TINYINT: 'TINYINT'>, 'TINYINT': <TokenType.TINYINT: 'TINYINT'>, 'SHORT': <TokenType.SMALLINT: 'SMALLINT'>, 'SMALLINT': <TokenType.SMALLINT: 'SMALLINT'>, 'INT2': <TokenType.SMALLINT: 'SMALLINT'>, 'INTEGER': <TokenType.INT: 'INT'>, 'INT': <TokenType.INT: 'INT'>, 'INT4': <TokenType.INT: 'INT'>, 'LONG': <TokenType.BIGINT: 'BIGINT'>, 'BIGINT': <TokenType.BIGINT: 'BIGINT'>, 'INT8': <TokenType.BIGINT: 'BIGINT'>, 'DEC': <TokenType.DECIMAL: 'DECIMAL'>, 'DECIMAL': <TokenType.DECIMAL: 'DECIMAL'>, 'BIGDECIMAL': <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, 'BIGNUMERIC': <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, 'MAP': <TokenType.MAP: 'MAP'>, 'NULLABLE': <TokenType.NULLABLE: 'NULLABLE'>, 'NUMBER': <TokenType.DECIMAL: 'DECIMAL'>, 'NUMERIC': <TokenType.DECIMAL: 'DECIMAL'>, 'FIXED': <TokenType.DECIMAL: 'DECIMAL'>, 'REAL': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT4': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT8': <TokenType.DOUBLE: 'DOUBLE'>, 'DOUBLE': <TokenType.DOUBLE: 'DOUBLE'>, 'DOUBLE PRECISION': <TokenType.DOUBLE: 'DOUBLE'>, 'JSON': <TokenType.JSON: 'JSON'>, 'CHAR': <TokenType.CHAR: 'CHAR'>, 'CHARACTER': <TokenType.CHAR: 'CHAR'>, 'NCHAR': <TokenType.NCHAR: 'NCHAR'>, 'VARCHAR': <TokenType.VARCHAR: 'VARCHAR'>, 'VARCHAR2': <TokenType.VARCHAR: 'VARCHAR'>, 'NVARCHAR': <TokenType.NVARCHAR: 'NVARCHAR'>, 'NVARCHAR2': <TokenType.NVARCHAR: 'NVARCHAR'>, 'STR': <TokenType.TEXT: 'TEXT'>, 'STRING': <TokenType.TEXT: 'TEXT'>, 'TEXT': <TokenType.TEXT: 'TEXT'>, 'CLOB': <TokenType.TEXT: 'TEXT'>, 'LONGVARCHAR': <TokenType.TEXT: 'TEXT'>, 'BINARY': <TokenType.BINARY: 'BINARY'>, 'BLOB': <TokenType.VARBINARY: 'VARBINARY'>, 'BYTEA': <TokenType.VARBINARY: 'VARBINARY'>, 'VARBINARY': <TokenType.VARBINARY: 'VARBINARY'>, 'TIME': <TokenType.TIME: 'TIME'>, 'TIMESTAMP': <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, 'TIMESTAMPTZ': <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, 'TIMESTAMPLTZ': <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, 'DATE': <TokenType.DATE: 'DATE'>, 'DATETIME': <TokenType.DATETIME: 'DATETIME'>, 'INT4RANGE': <TokenType.INT4RANGE: 'INT4RANGE'>, 'INT4MULTIRANGE': <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, 'INT8RANGE': <TokenType.INT8RANGE: 'INT8RANGE'>, 'INT8MULTIRANGE': <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, 'NUMRANGE': <TokenType.NUMRANGE: 'NUMRANGE'>, 'NUMMULTIRANGE': <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, 'TSRANGE': <TokenType.TSRANGE: 'TSRANGE'>, 'TSMULTIRANGE': <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, 'TSTZRANGE': <TokenType.TSTZRANGE: 'TSTZRANGE'>, 'TSTZMULTIRANGE': <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, 'DATERANGE': <TokenType.DATERANGE: 'DATERANGE'>, 'DATEMULTIRANGE': <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, 'UNIQUE': <TokenType.UNIQUE: 'UNIQUE'>, 'STRUCT': <TokenType.STRUCT: 'STRUCT'>, 'VARIANT': <TokenType.VARIANT: 'VARIANT'>, 'ALTER': <TokenType.ALTER: 'ALTER'>, 'ANALYZE': <TokenType.COMMAND: 'COMMAND'>, 'CALL': <TokenType.COMMAND: 'COMMAND'>, 'COMMENT': <TokenType.COMMENT: 'COMMENT'>, 'COPY': <TokenType.COMMAND: 'COMMAND'>, 'EXPLAIN': <TokenType.COMMAND: 'COMMAND'>, 'GRANT': <TokenType.COMMAND: 'COMMAND'>, 'OPTIMIZE': <TokenType.COMMAND: 'COMMAND'>, 'PREPARE': <TokenType.COMMAND: 'COMMAND'>, 'TRUNCATE': <TokenType.COMMAND: 'COMMAND'>, 'VACUUM': <TokenType.COMMAND: 'COMMAND'>, 'USER-DEFINED': <TokenType.USERDEFINED: 'USERDEFINED'>, 'ANY TYPE': <TokenType.VARIANT: 'VARIANT'>, 'BEGIN TRANSACTION': <TokenType.BEGIN: 'BEGIN'>, 'CURRENT_DATETIME': <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, 'BYTES': <TokenType.BINARY: 'BINARY'>, 'DECLARE': <TokenType.COMMAND: 'COMMAND'>, 'FLOAT64': <TokenType.DOUBLE: 'DOUBLE'>, 'INT64': <TokenType.BIGINT: 'BIGINT'>, 'RECORD': <TokenType.STRUCT: 'STRUCT'>, 'NOT DETERMINISTIC': <TokenType.VOLATILE: 'VOLATILE'>, 'UNKNOWN': <TokenType.NULL: 'NULL'>}
272 class Parser(parser.Parser): 273 PREFIXED_PIVOT_COLUMNS = True 274 275 LOG_BASE_FIRST = False 276 LOG_DEFAULTS_TO_LN = True 277 278 FUNCTIONS = { 279 **parser.Parser.FUNCTIONS, 280 "DATE": _parse_date, 281 "DATE_ADD": parse_date_delta_with_interval(exp.DateAdd), 282 "DATE_SUB": parse_date_delta_with_interval(exp.DateSub), 283 "DATE_TRUNC": lambda args: exp.DateTrunc( 284 unit=exp.Literal.string(str(seq_get(args, 1))), 285 this=seq_get(args, 0), 286 ), 287 "DATETIME_ADD": parse_date_delta_with_interval(exp.DatetimeAdd), 288 "DATETIME_SUB": parse_date_delta_with_interval(exp.DatetimeSub), 289 "DIV": binary_from_function(exp.IntDiv), 290 "GENERATE_ARRAY": exp.GenerateSeries.from_arg_list, 291 "MD5": exp.MD5Digest.from_arg_list, 292 "TO_HEX": _parse_to_hex, 293 "PARSE_DATE": lambda args: format_time_lambda(exp.StrToDate, "bigquery")( 294 [seq_get(args, 1), seq_get(args, 0)] 295 ), 296 "PARSE_TIMESTAMP": _parse_timestamp, 297 "REGEXP_CONTAINS": exp.RegexpLike.from_arg_list, 298 "REGEXP_EXTRACT": lambda args: exp.RegexpExtract( 299 this=seq_get(args, 0), 300 expression=seq_get(args, 1), 301 position=seq_get(args, 2), 302 occurrence=seq_get(args, 3), 303 group=exp.Literal.number(1) 304 if re.compile(str(seq_get(args, 1))).groups == 1 305 else None, 306 ), 307 "SPLIT": lambda args: exp.Split( 308 # https://cloud.google.com/bigquery/docs/reference/standard-sql/string_functions#split 309 this=seq_get(args, 0), 310 expression=seq_get(args, 1) or exp.Literal.string(","), 311 ), 312 "TIME_ADD": parse_date_delta_with_interval(exp.TimeAdd), 313 "TIME_SUB": parse_date_delta_with_interval(exp.TimeSub), 314 "TIMESTAMP_ADD": parse_date_delta_with_interval(exp.TimestampAdd), 315 "TIMESTAMP_SUB": parse_date_delta_with_interval(exp.TimestampSub), 316 "TO_JSON_STRING": exp.JSONFormat.from_arg_list, 317 } 318 319 FUNCTION_PARSERS = { 320 **parser.Parser.FUNCTION_PARSERS, 321 "ARRAY": lambda self: self.expression(exp.Array, expressions=[self._parse_statement()]), 322 } 323 FUNCTION_PARSERS.pop("TRIM") 324 325 NO_PAREN_FUNCTIONS = { 326 **parser.Parser.NO_PAREN_FUNCTIONS, 327 TokenType.CURRENT_DATETIME: exp.CurrentDatetime, 328 } 329 330 NESTED_TYPE_TOKENS = { 331 *parser.Parser.NESTED_TYPE_TOKENS, 332 TokenType.TABLE, 333 } 334 335 ID_VAR_TOKENS = { 336 *parser.Parser.ID_VAR_TOKENS, 337 TokenType.VALUES, 338 } 339 340 PROPERTY_PARSERS = { 341 **parser.Parser.PROPERTY_PARSERS, 342 "NOT DETERMINISTIC": lambda self: self.expression( 343 exp.StabilityProperty, this=exp.Literal.string("VOLATILE") 344 ), 345 "OPTIONS": lambda self: self._parse_with_property(), 346 } 347 348 CONSTRAINT_PARSERS = { 349 **parser.Parser.CONSTRAINT_PARSERS, 350 "OPTIONS": lambda self: exp.Properties(expressions=self._parse_with_property()), 351 } 352 353 def _parse_table_part(self, schema: bool = False) -> t.Optional[exp.Expression]: 354 this = super()._parse_table_part(schema=schema) 355 356 # https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#table_names 357 if isinstance(this, exp.Identifier): 358 table_name = this.name 359 while self._match(TokenType.DASH, advance=False) and self._next: 360 self._advance(2) 361 table_name += f"-{self._prev.text}" 362 363 this = exp.Identifier(this=table_name, quoted=this.args.get("quoted")) 364 365 return this 366 367 def _parse_table_parts(self, schema: bool = False) -> exp.Table: 368 table = super()._parse_table_parts(schema=schema) 369 if isinstance(table.this, exp.Identifier) and "." in table.name: 370 catalog, db, this, *rest = ( 371 t.cast(t.Optional[exp.Expression], exp.to_identifier(x)) 372 for x in split_num_words(table.name, ".", 3) 373 ) 374 375 if rest and this: 376 this = exp.Dot.build(t.cast(t.List[exp.Expression], [this, *rest])) 377 378 table = exp.Table(this=this, db=db, catalog=catalog) 379 380 return table
Parser consumes a list of tokens produced by the Tokenizer and produces a parsed syntax tree.
Arguments:
- error_level: The desired error level. Default: ErrorLevel.IMMEDIATE
- error_message_context: Determines the amount of context to capture from a query string when displaying the error message (in number of characters). Default: 100
- max_errors: Maximum number of error messages to include in a raised ParseError. This is only relevant if error_level is ErrorLevel.RAISE. Default: 3
FUNCTIONS =
{'ABS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Abs'>>, 'ANY_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.AnyValue'>>, 'APPROX_DISTINCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxDistinct'>>, 'APPROX_COUNT_DISTINCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxDistinct'>>, 'APPROX_QUANTILE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxQuantile'>>, 'ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Array'>>, 'ARRAY_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAgg'>>, 'ARRAY_ALL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAll'>>, 'ARRAY_ANY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAny'>>, 'ARRAY_CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayConcat'>>, 'ARRAY_CONTAINS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayContains'>>, 'FILTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayFilter'>>, 'ARRAY_FILTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayFilter'>>, 'ARRAY_JOIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayJoin'>>, 'ARRAY_SIZE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySize'>>, 'ARRAY_SORT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySort'>>, 'ARRAY_SUM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySum'>>, 'ARRAY_UNION_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayUnionAgg'>>, 'AVG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Avg'>>, 'CASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Case'>>, 'CAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Cast'>>, 'CAST_TO_STR_TYPE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CastToStrType'>>, 'CEIL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ceil'>>, 'CEILING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ceil'>>, 'COALESCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Coalesce'>>, 'IFNULL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Coalesce'>>, 'NVL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Coalesce'>>, 'CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Concat'>>, 'CONCAT_WS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ConcatWs'>>, 'COUNT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Count'>>, 'COUNT_IF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CountIf'>>, 'CURRENT_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentDate'>>, 'CURRENT_DATETIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentDatetime'>>, 'CURRENT_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentTime'>>, 'CURRENT_TIMESTAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentTimestamp'>>, 'CURRENT_USER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentUser'>>, 'DATE': <function _parse_date>, 'DATE_ADD': <function parse_date_delta_with_interval.<locals>.func>, 'DATEDIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateDiff'>>, 'DATE_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateDiff'>>, 'DATEFROMPARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateFromParts'>>, 'DATE_STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateStrToDate'>>, 'DATE_SUB': <function parse_date_delta_with_interval.<locals>.func>, 'DATE_TO_DATE_STR': <function Parser.<lambda>>, 'DATE_TO_DI': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateToDi'>>, 'DATE_TRUNC': <function BigQuery.Parser.<lambda>>, 'DATETIME_ADD': <function parse_date_delta_with_interval.<locals>.func>, 'DATETIME_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeDiff'>>, 'DATETIME_SUB': <function parse_date_delta_with_interval.<locals>.func>, 'DATETIME_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeTrunc'>>, 'DAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Day'>>, 'DAY_OF_MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfMonth'>>, 'DAYOFMONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfMonth'>>, 'DAY_OF_WEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeek'>>, 'DAYOFWEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeek'>>, 'DAY_OF_YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfYear'>>, 'DAYOFYEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfYear'>>, 'DECODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Decode'>>, 'DI_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DiToDate'>>, 'ENCODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Encode'>>, 'EXP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Exp'>>, 'EXPLODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Explode'>>, 'EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Extract'>>, 'FLOOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Floor'>>, 'FROM_BASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FromBase'>>, 'FROM_BASE64': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FromBase64'>>, 'GENERATE_SERIES': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GenerateSeries'>>, 'GREATEST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Greatest'>>, 'GROUP_CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GroupConcat'>>, 'HEX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Hex'>>, 'HLL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Hll'>>, 'IF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.If'>>, 'INITCAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Initcap'>>, 'IS_NAN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsNan'>>, 'ISNAN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsNan'>>, 'JSON_ARRAY_CONTAINS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArrayContains'>>, 'JSONB_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBExtract'>>, 'JSONB_EXTRACT_SCALAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBExtractScalar'>>, 'JSON_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONExtract'>>, 'JSON_EXTRACT_SCALAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONExtractScalar'>>, 'JSON_FORMAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONFormat'>>, 'J_S_O_N_OBJECT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONObject'>>, 'LAST_DATE_OF_MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LastDateOfMonth'>>, 'LEAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Least'>>, 'LEFT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Left'>>, 'LENGTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Length'>>, 'LEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Length'>>, 'LEVENSHTEIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Levenshtein'>>, 'LN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ln'>>, 'LOG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Log'>>, 'LOG10': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Log10'>>, 'LOG2': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Log2'>>, 'LOGICAL_AND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'BOOL_AND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'BOOLAND_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'LOGICAL_OR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'BOOL_OR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'BOOLOR_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'LOWER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lower'>>, 'LCASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lower'>>, 'MD5': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MD5Digest'>>, 'MD5_DIGEST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MD5Digest'>>, 'MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Map'>>, 'MAP_FROM_ENTRIES': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MapFromEntries'>>, 'MATCH_AGAINST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MatchAgainst'>>, 'MAX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Max'>>, 'MIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Min'>>, 'MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Month'>>, 'MONTHS_BETWEEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MonthsBetween'>>, 'NEXT_VALUE_FOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.NextValueFor'>>, 'NUMBER_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.NumberToStr'>>, 'NVL2': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Nvl2'>>, 'OPEN_J_S_O_N': <bound method Func.from_arg_list of <class 'sqlglot.expressions.OpenJSON'>>, 'PARAMETERIZED_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ParameterizedAgg'>>, 'PERCENTILE_CONT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PercentileCont'>>, 'PERCENTILE_DISC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PercentileDisc'>>, 'POSEXPLODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Posexplode'>>, 'POWER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Pow'>>, 'POW': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Pow'>>, 'QUANTILE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Quantile'>>, 'RANGE_N': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RangeN'>>, 'READ_CSV': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ReadCSV'>>, 'REDUCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Reduce'>>, 'REGEXP_EXTRACT': <function BigQuery.Parser.<lambda>>, 'REGEXP_I_LIKE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpILike'>>, 'REGEXP_LIKE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpLike'>>, 'REGEXP_REPLACE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpReplace'>>, 'REGEXP_SPLIT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpSplit'>>, 'REPEAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Repeat'>>, 'RIGHT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Right'>>, 'ROUND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Round'>>, 'ROW_NUMBER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RowNumber'>>, 'SHA': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA'>>, 'SHA1': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA'>>, 'SHA2': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA2'>>, 'SAFE_CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SafeConcat'>>, 'SAFE_DIVIDE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SafeDivide'>>, 'SET_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SetAgg'>>, 'SORT_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SortArray'>>, 'SPLIT': <function BigQuery.Parser.<lambda>>, 'SQRT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sqrt'>>, 'STANDARD_HASH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StandardHash'>>, 'STAR_MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StarMap'>>, 'STARTS_WITH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StartsWith'>>, 'STARTSWITH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StartsWith'>>, 'STDDEV': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stddev'>>, 'STDDEV_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StddevPop'>>, 'STDDEV_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StddevSamp'>>, 'STR_POSITION': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrPosition'>>, 'STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToDate'>>, 'STR_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToTime'>>, 'STR_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToUnix'>>, 'STRUCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Struct'>>, 'STRUCT_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StructExtract'>>, 'SUBSTRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Substring'>>, 'SUM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sum'>>, 'TIME_ADD': <function parse_date_delta_with_interval.<locals>.func>, 'TIME_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeDiff'>>, 'TIME_STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToDate'>>, 'TIME_STR_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToTime'>>, 'TIME_STR_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToUnix'>>, 'TIME_SUB': <function parse_date_delta_with_interval.<locals>.func>, 'TIME_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeToStr'>>, 'TIME_TO_TIME_STR': <function Parser.<lambda>>, 'TIME_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeToUnix'>>, 'TIME_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeTrunc'>>, 'TIMESTAMP_ADD': <function parse_date_delta_with_interval.<locals>.func>, 'TIMESTAMP_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampDiff'>>, 'TIMESTAMP_SUB': <function parse_date_delta_with_interval.<locals>.func>, 'TIMESTAMP_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampTrunc'>>, 'TO_BASE64': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToBase64'>>, 'TO_CHAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToChar'>>, 'TRANSFORM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Transform'>>, 'TRIM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Trim'>>, 'TRY_CAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TryCast'>>, 'TS_OR_DI_TO_DI': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDiToDi'>>, 'TS_OR_DS_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsAdd'>>, 'TS_OR_DS_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsToDate'>>, 'TS_OR_DS_TO_DATE_STR': <function Parser.<lambda>>, 'UNHEX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Unhex'>>, 'UNIX_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToStr'>>, 'UNIX_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToTime'>>, 'UNIX_TO_TIME_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToTimeStr'>>, 'UPPER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Upper'>>, 'UCASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Upper'>>, 'VAR_MAP': <function parse_var_map>, 'VARIANCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VARIANCE_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VAR_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VARIANCE_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.VariancePop'>>, 'VAR_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.VariancePop'>>, 'WEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Week'>>, 'WEEK_OF_YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.WeekOfYear'>>, 'WEEKOFYEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.WeekOfYear'>>, 'WHEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.When'>>, 'X_M_L_TABLE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.XMLTable'>>, 'XOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Xor'>>, 'YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Year'>>, 'GLOB': <function Parser.<lambda>>, 'LIKE': <function parse_like>, 'DIV': <function binary_from_function.<locals>.<lambda>>, 'GENERATE_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GenerateSeries'>>, 'TO_HEX': <function _parse_to_hex>, 'PARSE_DATE': <function BigQuery.Parser.<lambda>>, 'PARSE_TIMESTAMP': <function _parse_timestamp>, 'REGEXP_CONTAINS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpLike'>>, 'TO_JSON_STRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONFormat'>>}
FUNCTION_PARSERS =
{'ANY_VALUE': <function Parser.<lambda>>, 'CAST': <function Parser.<lambda>>, 'CONCAT': <function Parser.<lambda>>, 'CONVERT': <function Parser.<lambda>>, 'DECODE': <function Parser.<lambda>>, 'EXTRACT': <function Parser.<lambda>>, 'JSON_OBJECT': <function Parser.<lambda>>, 'LOG': <function Parser.<lambda>>, 'MATCH': <function Parser.<lambda>>, 'OPENJSON': <function Parser.<lambda>>, 'POSITION': <function Parser.<lambda>>, 'SAFE_CAST': <function Parser.<lambda>>, 'STRING_AGG': <function Parser.<lambda>>, 'SUBSTRING': <function Parser.<lambda>>, 'TRY_CAST': <function Parser.<lambda>>, 'TRY_CONVERT': <function Parser.<lambda>>, 'ARRAY': <function BigQuery.Parser.<lambda>>}
NO_PAREN_FUNCTIONS =
{<TokenType.CURRENT_DATE: 'CURRENT_DATE'>: <class 'sqlglot.expressions.CurrentDate'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>: <class 'sqlglot.expressions.CurrentDatetime'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>: <class 'sqlglot.expressions.CurrentTime'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>: <class 'sqlglot.expressions.CurrentTimestamp'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>: <class 'sqlglot.expressions.CurrentUser'>}
NESTED_TYPE_TOKENS =
{<TokenType.ARRAY: 'ARRAY'>, <TokenType.STRUCT: 'STRUCT'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.MAP: 'MAP'>, <TokenType.TABLE: 'TABLE'>}
ID_VAR_TOKENS =
{<TokenType.LONGBLOB: 'LONGBLOB'>, <TokenType.TRUE: 'TRUE'>, <TokenType.APPLY: 'APPLY'>, <TokenType.COMMENT: 'COMMENT'>, <TokenType.DATETIME: 'DATETIME'>, <TokenType.TEMPORARY: 'TEMPORARY'>, <TokenType.BIGINT: 'BIGINT'>, <TokenType.VARIANT: 'VARIANT'>, <TokenType.DATERANGE: 'DATERANGE'>, <TokenType.RANGE: 'RANGE'>, <TokenType.SCHEMA: 'SCHEMA'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.FUNCTION: 'FUNCTION'>, <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, <TokenType.ENUM: 'ENUM'>, <TokenType.DELETE: 'DELETE'>, <TokenType.HSTORE: 'HSTORE'>, <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, <TokenType.UNIQUEIDENTIFIER: 'UNIQUEIDENTIFIER'>, <TokenType.DEFAULT: 'DEFAULT'>, <TokenType.FULL: 'FULL'>, <TokenType.INT256: 'INT256'>, <TokenType.OBJECT: 'OBJECT'>, <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, <TokenType.INT128: 'INT128'>, <TokenType.END: 'END'>, <TokenType.BOOLEAN: 'BOOLEAN'>, <TokenType.PIVOT: 'PIVOT'>, <TokenType.DOUBLE: 'DOUBLE'>, <TokenType.INT4RANGE: 'INT4RANGE'>, <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, <TokenType.DATETIME64: 'DATETIME64'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, <TokenType.SUPER: 'SUPER'>, <TokenType.TOP: 'TOP'>, <TokenType.PROCEDURE: 'PROCEDURE'>, <TokenType.PERCENT: 'PERCENT'>, <TokenType.INT8RANGE: 'INT8RANGE'>, <TokenType.CHAR: 'CHAR'>, <TokenType.VARBINARY: 'VARBINARY'>, <TokenType.NUMRANGE: 'NUMRANGE'>, <TokenType.GEOMETRY: 'GEOMETRY'>, <TokenType.ASC: 'ASC'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, <TokenType.SMALLSERIAL: 'SMALLSERIAL'>, <TokenType.REFERENCES: 'REFERENCES'>, <TokenType.LOAD: 'LOAD'>, <TokenType.UINT256: 'UINT256'>, <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, <TokenType.BINARY: 'BINARY'>, <TokenType.TSTZRANGE: 'TSTZRANGE'>, <TokenType.INDEX: 'INDEX'>, <TokenType.FILTER: 'FILTER'>, <TokenType.UINT128: 'UINT128'>, <TokenType.JSON: 'JSON'>, <TokenType.INET: 'INET'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, <TokenType.NCHAR: 'NCHAR'>, <TokenType.VALUES: 'VALUES'>, <TokenType.IF: 'IF'>, <TokenType.TABLE: 'TABLE'>, <TokenType.USERDEFINED: 'USERDEFINED'>, <TokenType.TIME: 'TIME'>, <TokenType.ANTI: 'ANTI'>, <TokenType.ROW: 'ROW'>, <TokenType.TSRANGE: 'TSRANGE'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.FLOAT: 'FLOAT'>, <TokenType.NEXT: 'NEXT'>, <TokenType.SOME: 'SOME'>, <TokenType.ANY: 'ANY'>, <TokenType.DATE: 'DATE'>, <TokenType.SET: 'SET'>, <TokenType.OVERWRITE: 'OVERWRITE'>, <TokenType.BIT: 'BIT'>, <TokenType.UPDATE: 'UPDATE'>, <TokenType.IPPREFIX: 'IPPREFIX'>, <TokenType.NVARCHAR: 'NVARCHAR'>, <TokenType.VAR: 'VAR'>, <TokenType.ISNULL: 'ISNULL'>, <TokenType.DECIMAL: 'DECIMAL'>, <TokenType.USMALLINT: 'USMALLINT'>, <TokenType.VIEW: 'VIEW'>, <TokenType.SMALLMONEY: 'SMALLMONEY'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.UINT: 'UINT'>, <TokenType.RIGHT: 'RIGHT'>, <TokenType.SHOW: 'SHOW'>, <TokenType.IPADDRESS: 'IPADDRESS'>, <TokenType.DIV: 'DIV'>, <TokenType.DESCRIBE: 'DESCRIBE'>, <TokenType.VARCHAR: 'VARCHAR'>, <TokenType.STRUCT: 'STRUCT'>, <TokenType.SEMI: 'SEMI'>, <TokenType.UTINYINT: 'UTINYINT'>, <TokenType.OFFSET: 'OFFSET'>, <TokenType.SERIAL: 'SERIAL'>, <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, <TokenType.CASE: 'CASE'>, <TokenType.PARTITION: 'PARTITION'>, <TokenType.WINDOW: 'WINDOW'>, <TokenType.IMAGE: 'IMAGE'>, <TokenType.MONEY: 'MONEY'>, <TokenType.FORMAT: 'FORMAT'>, <TokenType.ALL: 'ALL'>, <TokenType.LEFT: 'LEFT'>, <TokenType.COLLATE: 'COLLATE'>, <TokenType.ROWVERSION: 'ROWVERSION'>, <TokenType.MAP: 'MAP'>, <TokenType.UNIQUE: 'UNIQUE'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>, <TokenType.DESC: 'DESC'>, <TokenType.NATURAL: 'NATURAL'>, <TokenType.IS: 'IS'>, <TokenType.XML: 'XML'>, <TokenType.HLLSKETCH: 'HLLSKETCH'>, <TokenType.UUID: 'UUID'>, <TokenType.KEEP: 'KEEP'>, <TokenType.LONGTEXT: 'LONGTEXT'>, <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.DICTIONARY: 'DICTIONARY'>, <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, <TokenType.VOLATILE: 'VOLATILE'>, <TokenType.FIRST: 'FIRST'>, <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>, <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, <TokenType.ROWS: 'ROWS'>, <TokenType.UNPIVOT: 'UNPIVOT'>, <TokenType.EXISTS: 'EXISTS'>, <TokenType.INT: 'INT'>, <TokenType.JSONB: 'JSONB'>, <TokenType.ORDINALITY: 'ORDINALITY'>, <TokenType.TINYINT: 'TINYINT'>, <TokenType.CACHE: 'CACHE'>, <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, <TokenType.SMALLINT: 'SMALLINT'>, <TokenType.COMMIT: 'COMMIT'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.UBIGINT: 'UBIGINT'>, <TokenType.CONSTRAINT: 'CONSTRAINT'>, <TokenType.ESCAPE: 'ESCAPE'>, <TokenType.MERGE: 'MERGE'>, <TokenType.SETTINGS: 'SETTINGS'>, <TokenType.BEGIN: 'BEGIN'>, <TokenType.FALSE: 'FALSE'>, <TokenType.TEXT: 'TEXT'>, <TokenType.PRAGMA: 'PRAGMA'>, <TokenType.BIGSERIAL: 'BIGSERIAL'>, <TokenType.COLUMN: 'COLUMN'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, <TokenType.DATABASE: 'DATABASE'>, <TokenType.INTERVAL: 'INTERVAL'>}
PROPERTY_PARSERS =
{'ALGORITHM': <function Parser.<lambda>>, 'AUTO_INCREMENT': <function Parser.<lambda>>, 'BLOCKCOMPRESSION': <function Parser.<lambda>>, 'CHARACTER SET': <function Parser.<lambda>>, 'CHECKSUM': <function Parser.<lambda>>, 'CLUSTER BY': <function Parser.<lambda>>, 'CLUSTERED': <function Parser.<lambda>>, 'COLLATE': <function Parser.<lambda>>, 'COMMENT': <function Parser.<lambda>>, 'COPY': <function Parser.<lambda>>, 'DATABLOCKSIZE': <function Parser.<lambda>>, 'DEFINER': <function Parser.<lambda>>, 'DETERMINISTIC': <function Parser.<lambda>>, 'DISTKEY': <function Parser.<lambda>>, 'DISTSTYLE': <function Parser.<lambda>>, 'ENGINE': <function Parser.<lambda>>, 'EXECUTE': <function Parser.<lambda>>, 'EXTERNAL': <function Parser.<lambda>>, 'FALLBACK': <function Parser.<lambda>>, 'FORMAT': <function Parser.<lambda>>, 'FREESPACE': <function Parser.<lambda>>, 'HEAP': <function Parser.<lambda>>, 'IMMUTABLE': <function Parser.<lambda>>, 'JOURNAL': <function Parser.<lambda>>, 'LANGUAGE': <function Parser.<lambda>>, 'LAYOUT': <function Parser.<lambda>>, 'LIFETIME': <function Parser.<lambda>>, 'LIKE': <function Parser.<lambda>>, 'LOCATION': <function Parser.<lambda>>, 'LOCK': <function Parser.<lambda>>, 'LOCKING': <function Parser.<lambda>>, 'LOG': <function Parser.<lambda>>, 'MATERIALIZED': <function Parser.<lambda>>, 'MERGEBLOCKRATIO': <function Parser.<lambda>>, 'MULTISET': <function Parser.<lambda>>, 'NO': <function Parser.<lambda>>, 'ON': <function Parser.<lambda>>, 'ORDER BY': <function Parser.<lambda>>, 'PARTITION BY': <function Parser.<lambda>>, 'PARTITIONED BY': <function Parser.<lambda>>, 'PARTITIONED_BY': <function Parser.<lambda>>, 'PRIMARY KEY': <function Parser.<lambda>>, 'RANGE': <function Parser.<lambda>>, 'RETURNS': <function Parser.<lambda>>, 'ROW': <function Parser.<lambda>>, 'ROW_FORMAT': <function Parser.<lambda>>, 'SET': <function Parser.<lambda>>, 'SETTINGS': <function Parser.<lambda>>, 'SORTKEY': <function Parser.<lambda>>, 'SOURCE': <function Parser.<lambda>>, 'STABLE': <function Parser.<lambda>>, 'STORED': <function Parser.<lambda>>, 'TBLPROPERTIES': <function Parser.<lambda>>, 'TEMP': <function Parser.<lambda>>, 'TEMPORARY': <function Parser.<lambda>>, 'TO': <function Parser.<lambda>>, 'TRANSIENT': <function Parser.<lambda>>, 'TTL': <function Parser.<lambda>>, 'USING': <function Parser.<lambda>>, 'VOLATILE': <function Parser.<lambda>>, 'WITH': <function Parser.<lambda>>, 'NOT DETERMINISTIC': <function BigQuery.Parser.<lambda>>, 'OPTIONS': <function BigQuery.Parser.<lambda>>}
CONSTRAINT_PARSERS =
{'AUTOINCREMENT': <function Parser.<lambda>>, 'AUTO_INCREMENT': <function Parser.<lambda>>, 'CASESPECIFIC': <function Parser.<lambda>>, 'CHARACTER SET': <function Parser.<lambda>>, 'CHECK': <function Parser.<lambda>>, 'COLLATE': <function Parser.<lambda>>, 'COMMENT': <function Parser.<lambda>>, 'COMPRESS': <function Parser.<lambda>>, 'DEFAULT': <function Parser.<lambda>>, 'ENCODE': <function Parser.<lambda>>, 'FOREIGN KEY': <function Parser.<lambda>>, 'FORMAT': <function Parser.<lambda>>, 'GENERATED': <function Parser.<lambda>>, 'IDENTITY': <function Parser.<lambda>>, 'INLINE': <function Parser.<lambda>>, 'LIKE': <function Parser.<lambda>>, 'NOT': <function Parser.<lambda>>, 'NULL': <function Parser.<lambda>>, 'ON': <function Parser.<lambda>>, 'PATH': <function Parser.<lambda>>, 'PRIMARY KEY': <function Parser.<lambda>>, 'REFERENCES': <function Parser.<lambda>>, 'TITLE': <function Parser.<lambda>>, 'TTL': <function Parser.<lambda>>, 'UNIQUE': <function Parser.<lambda>>, 'UPPERCASE': <function Parser.<lambda>>, 'OPTIONS': <function BigQuery.Parser.<lambda>>}
SET_TRIE: Dict =
{'GLOBAL': {0: True}, 'LOCAL': {0: True}, 'SESSION': {0: True}, 'TRANSACTION': {0: True}}
FORMAT_MAPPING: Dict[str, str] =
{'DD': '%d', 'MM': '%m', 'MON': '%b', 'MONTH': '%B', 'YYYY': '%Y', 'YY': '%y', 'HH': '%I', 'HH12': '%I', 'HH24': '%H', 'MI': '%M', 'SS': '%S', 'SSSSS': '%f', 'TZH': '%z'}
FORMAT_TRIE: Dict =
{'D': {'D': {0: True}}, 'M': {'M': {0: True}, 'O': {'N': {0: True, 'T': {'H': {0: True}}}}, 'I': {0: True}}, 'Y': {'Y': {'Y': {'Y': {0: True}}, 0: True}}, 'H': {'H': {0: True, '1': {'2': {0: True}}, '2': {'4': {0: True}}}}, 'S': {'S': {0: True, 'S': {'S': {'S': {0: True}}}}}, 'T': {'Z': {'H': {0: True}}}}
Inherited Members
- sqlglot.parser.Parser
- Parser
- ENUM_TYPE_TOKENS
- TYPE_TOKENS
- SUBQUERY_PREDICATES
- RESERVED_KEYWORDS
- DB_CREATABLES
- CREATABLES
- INTERVAL_VARS
- TABLE_ALIAS_TOKENS
- COMMENT_TABLE_ALIAS_TOKENS
- UPDATE_ALIAS_TOKENS
- TRIM_TYPES
- FUNC_TOKENS
- CONJUNCTION
- EQUALITY
- COMPARISON
- BITWISE
- TERM
- FACTOR
- TIMESTAMPS
- SET_OPERATIONS
- JOIN_METHODS
- JOIN_SIDES
- JOIN_KINDS
- JOIN_HINTS
- LAMBDAS
- COLUMN_OPERATORS
- EXPRESSION_PARSERS
- STATEMENT_PARSERS
- UNARY_PARSERS
- PRIMARY_PARSERS
- PLACEHOLDER_PARSERS
- RANGE_PARSERS
- ALTER_PARSERS
- SCHEMA_UNNAMED_CONSTRAINTS
- NO_PAREN_FUNCTION_PARSERS
- FUNCTIONS_WITH_ALIASED_ARGS
- QUERY_MODIFIER_PARSERS
- SET_PARSERS
- SHOW_PARSERS
- TYPE_LITERAL_PARSERS
- MODIFIABLES
- DDL_SELECT_TOKENS
- PRE_VOLATILE_TOKENS
- TRANSACTION_KIND
- TRANSACTION_CHARACTERISTICS
- INSERT_ALTERNATIVES
- CLONE_KINDS
- TABLE_INDEX_HINT_TOKENS
- WINDOW_ALIAS_TOKENS
- WINDOW_BEFORE_PAREN_TOKENS
- WINDOW_SIDES
- ADD_CONSTRAINT_TOKENS
- STRICT_CAST
- CONCAT_NULL_OUTPUTS_STRING
- IDENTIFY_PIVOT_STRINGS
- INDEX_OFFSET
- ALIAS_POST_TABLESAMPLE
- STRICT_STRING_CONCAT
- NULL_ORDERING
- error_level
- error_message_context
- max_errors
- reset
- parse
- parse_into
- check_errors
- raise_error
- expression
- validate_expression
- errors
- sql
382 class Generator(generator.Generator): 383 EXPLICIT_UNION = True 384 INTERVAL_ALLOWS_PLURAL_FORM = False 385 JOIN_HINTS = False 386 QUERY_HINTS = False 387 TABLE_HINTS = False 388 LIMIT_FETCH = "LIMIT" 389 RENAME_TABLE_WITH_DB = False 390 ESCAPE_LINE_BREAK = True 391 392 TRANSFORMS = { 393 **generator.Generator.TRANSFORMS, 394 exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"), 395 exp.ArraySize: rename_func("ARRAY_LENGTH"), 396 exp.Cast: transforms.preprocess([transforms.remove_precision_parameterized_types]), 397 exp.Create: _create_sql, 398 exp.CTE: transforms.preprocess([_pushdown_cte_column_names]), 399 exp.DateAdd: _date_add_sql("DATE", "ADD"), 400 exp.DateDiff: lambda self, e: f"DATE_DIFF({self.sql(e, 'this')}, {self.sql(e, 'expression')}, {self.sql(e.args.get('unit', 'DAY'))})", 401 exp.DateFromParts: rename_func("DATE"), 402 exp.DateStrToDate: datestrtodate_sql, 403 exp.DateSub: _date_add_sql("DATE", "SUB"), 404 exp.DatetimeAdd: _date_add_sql("DATETIME", "ADD"), 405 exp.DatetimeSub: _date_add_sql("DATETIME", "SUB"), 406 exp.DateTrunc: lambda self, e: self.func("DATE_TRUNC", e.this, e.text("unit")), 407 exp.GenerateSeries: rename_func("GENERATE_ARRAY"), 408 exp.GroupConcat: rename_func("STRING_AGG"), 409 exp.Hex: rename_func("TO_HEX"), 410 exp.ILike: no_ilike_sql, 411 exp.IntDiv: rename_func("DIV"), 412 exp.JSONFormat: rename_func("TO_JSON_STRING"), 413 exp.Max: max_or_greatest, 414 exp.MD5: lambda self, e: self.func("TO_HEX", self.func("MD5", e.this)), 415 exp.MD5Digest: rename_func("MD5"), 416 exp.Min: min_or_least, 417 exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}", 418 exp.RegexpExtract: lambda self, e: self.func( 419 "REGEXP_EXTRACT", 420 e.this, 421 e.expression, 422 e.args.get("position"), 423 e.args.get("occurrence"), 424 ), 425 exp.RegexpReplace: regexp_replace_sql, 426 exp.RegexpLike: rename_func("REGEXP_CONTAINS"), 427 exp.ReturnsProperty: _returnsproperty_sql, 428 exp.Select: transforms.preprocess( 429 [ 430 transforms.explode_to_unnest, 431 _unqualify_unnest, 432 transforms.eliminate_distinct_on, 433 _alias_ordered_group, 434 ] 435 ), 436 exp.StabilityProperty: lambda self, e: f"DETERMINISTIC" 437 if e.name == "IMMUTABLE" 438 else "NOT DETERMINISTIC", 439 exp.StrToDate: lambda self, e: f"PARSE_DATE({self.format_time(e)}, {self.sql(e, 'this')})", 440 exp.StrToTime: lambda self, e: self.func( 441 "PARSE_TIMESTAMP", self.format_time(e), e.this, e.args.get("zone") 442 ), 443 exp.TimeAdd: _date_add_sql("TIME", "ADD"), 444 exp.TimeSub: _date_add_sql("TIME", "SUB"), 445 exp.TimestampAdd: _date_add_sql("TIMESTAMP", "ADD"), 446 exp.TimestampSub: _date_add_sql("TIMESTAMP", "SUB"), 447 exp.TimeStrToTime: timestrtotime_sql, 448 exp.Trim: lambda self, e: self.func(f"TRIM", e.this, e.expression), 449 exp.TsOrDsAdd: _date_add_sql("DATE", "ADD"), 450 exp.TsOrDsToDate: ts_or_ds_to_date_sql("bigquery"), 451 exp.Unhex: rename_func("FROM_HEX"), 452 exp.Values: _derived_table_values_to_unnest, 453 exp.VariancePop: rename_func("VAR_POP"), 454 } 455 456 TYPE_MAPPING = { 457 **generator.Generator.TYPE_MAPPING, 458 exp.DataType.Type.BIGDECIMAL: "BIGNUMERIC", 459 exp.DataType.Type.BIGINT: "INT64", 460 exp.DataType.Type.BINARY: "BYTES", 461 exp.DataType.Type.BOOLEAN: "BOOL", 462 exp.DataType.Type.CHAR: "STRING", 463 exp.DataType.Type.DECIMAL: "NUMERIC", 464 exp.DataType.Type.DOUBLE: "FLOAT64", 465 exp.DataType.Type.FLOAT: "FLOAT64", 466 exp.DataType.Type.INT: "INT64", 467 exp.DataType.Type.NCHAR: "STRING", 468 exp.DataType.Type.NVARCHAR: "STRING", 469 exp.DataType.Type.SMALLINT: "INT64", 470 exp.DataType.Type.TEXT: "STRING", 471 exp.DataType.Type.TIMESTAMP: "DATETIME", 472 exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP", 473 exp.DataType.Type.TIMESTAMPLTZ: "TIMESTAMP", 474 exp.DataType.Type.TINYINT: "INT64", 475 exp.DataType.Type.VARBINARY: "BYTES", 476 exp.DataType.Type.VARCHAR: "STRING", 477 exp.DataType.Type.VARIANT: "ANY TYPE", 478 } 479 480 PROPERTIES_LOCATION = { 481 **generator.Generator.PROPERTIES_LOCATION, 482 exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA, 483 exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED, 484 } 485 486 # from: https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#reserved_keywords 487 RESERVED_KEYWORDS = { 488 *generator.Generator.RESERVED_KEYWORDS, 489 "all", 490 "and", 491 "any", 492 "array", 493 "as", 494 "asc", 495 "assert_rows_modified", 496 "at", 497 "between", 498 "by", 499 "case", 500 "cast", 501 "collate", 502 "contains", 503 "create", 504 "cross", 505 "cube", 506 "current", 507 "default", 508 "define", 509 "desc", 510 "distinct", 511 "else", 512 "end", 513 "enum", 514 "escape", 515 "except", 516 "exclude", 517 "exists", 518 "extract", 519 "false", 520 "fetch", 521 "following", 522 "for", 523 "from", 524 "full", 525 "group", 526 "grouping", 527 "groups", 528 "hash", 529 "having", 530 "if", 531 "ignore", 532 "in", 533 "inner", 534 "intersect", 535 "interval", 536 "into", 537 "is", 538 "join", 539 "lateral", 540 "left", 541 "like", 542 "limit", 543 "lookup", 544 "merge", 545 "natural", 546 "new", 547 "no", 548 "not", 549 "null", 550 "nulls", 551 "of", 552 "on", 553 "or", 554 "order", 555 "outer", 556 "over", 557 "partition", 558 "preceding", 559 "proto", 560 "qualify", 561 "range", 562 "recursive", 563 "respect", 564 "right", 565 "rollup", 566 "rows", 567 "select", 568 "set", 569 "some", 570 "struct", 571 "tablesample", 572 "then", 573 "to", 574 "treat", 575 "true", 576 "unbounded", 577 "union", 578 "unnest", 579 "using", 580 "when", 581 "where", 582 "window", 583 "with", 584 "within", 585 } 586 587 def attimezone_sql(self, expression: exp.AtTimeZone) -> str: 588 parent = expression.parent 589 590 # BigQuery allows CAST(.. AS {STRING|TIMESTAMP} [FORMAT <fmt> [AT TIME ZONE <tz>]]). 591 # Only the TIMESTAMP one should use the below conversion, when AT TIME ZONE is included. 592 if not isinstance(parent, exp.Cast) or not parent.to.is_type("text"): 593 return self.func( 594 "TIMESTAMP", self.func("DATETIME", expression.this, expression.args.get("zone")) 595 ) 596 597 return super().attimezone_sql(expression) 598 599 def trycast_sql(self, expression: exp.TryCast) -> str: 600 return self.cast_sql(expression, safe_prefix="SAFE_") 601 602 def cte_sql(self, expression: exp.CTE) -> str: 603 if expression.alias_column_names: 604 self.unsupported("Column names in CTE definition are not supported.") 605 return super().cte_sql(expression) 606 607 def array_sql(self, expression: exp.Array) -> str: 608 first_arg = seq_get(expression.expressions, 0) 609 if isinstance(first_arg, exp.Subqueryable): 610 return f"ARRAY{self.wrap(self.sql(first_arg))}" 611 612 return inline_array_sql(self, expression) 613 614 def transaction_sql(self, *_) -> str: 615 return "BEGIN TRANSACTION" 616 617 def commit_sql(self, *_) -> str: 618 return "COMMIT TRANSACTION" 619 620 def rollback_sql(self, *_) -> str: 621 return "ROLLBACK TRANSACTION" 622 623 def in_unnest_op(self, expression: exp.Unnest) -> str: 624 return self.sql(expression) 625 626 def except_op(self, expression: exp.Except) -> str: 627 if not expression.args.get("distinct", False): 628 self.unsupported("EXCEPT without DISTINCT is not supported in BigQuery") 629 return f"EXCEPT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}" 630 631 def intersect_op(self, expression: exp.Intersect) -> str: 632 if not expression.args.get("distinct", False): 633 self.unsupported("INTERSECT without DISTINCT is not supported in BigQuery") 634 return f"INTERSECT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}" 635 636 def with_properties(self, properties: exp.Properties) -> str: 637 return self.properties(properties, prefix=self.seg("OPTIONS"))
Generator converts a given syntax tree to the corresponding SQL string.
Arguments:
- pretty: Whether or not to format the produced SQL string. Default: False.
- identify: Determines when an identifier should be quoted. Possible values are: False (default): Never quote, except in cases where it's mandatory by the dialect. True or 'always': Always quote. 'safe': Only quote identifiers that are case insensitive.
- normalize: Whether or not to normalize identifiers to lowercase. Default: False.
- pad: Determines the pad size in a formatted string. Default: 2.
- indent: Determines the indentation size in a formatted string. Default: 2.
- normalize_functions: Whether or not to normalize all function names. Possible values are: "upper" or True (default): Convert names to uppercase. "lower": Convert names to lowercase. False: Disables function name normalization.
- unsupported_level: Determines the generator's behavior when it encounters unsupported expressions. Default ErrorLevel.WARN.
- max_unsupported: Maximum number of unsupported messages to include in a raised UnsupportedError. This is only relevant if unsupported_level is ErrorLevel.RAISE. Default: 3
- leading_comma: Determines whether or not the comma is leading or trailing in select expressions. This is only relevant when generating in pretty mode. Default: False
- max_text_width: The max number of characters in a segment before creating new lines in pretty mode. The default is on the smaller end because the length only represents a segment and not the true line length. Default: 80
- comments: Whether or not to preserve comments in the output SQL code. Default: True
TRANSFORMS =
{<class 'sqlglot.expressions.DateAdd'>: <function _date_add_sql.<locals>.func>, <class 'sqlglot.expressions.TsOrDsAdd'>: <function _date_add_sql.<locals>.func>, <class 'sqlglot.expressions.CaseSpecificColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CharacterSetColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CharacterSetProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CheckColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CollateColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CopyGrantsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CommentColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.DateFormatColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.DefaultColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.EncodeColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ExecuteAsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ExternalProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.HeapProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.InlineLengthColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.LanguageProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.LocationProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.LogProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.MaterializedProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NoPrimaryIndexProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnCommitProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnUpdateColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.PathColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ReturnsProperty'>: <function _returnsproperty_sql>, <class 'sqlglot.expressions.SetProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SettingsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SqlSecurityProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.StabilityProperty'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.TemporaryProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ToTableProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TransientProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TitleColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.UppercaseColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.VarMap'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.VolatileProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.WithJournalTableProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ApproxDistinct'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ArraySize'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Cast'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.Create'>: <function _create_sql>, <class 'sqlglot.expressions.CTE'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.DateDiff'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.DateFromParts'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.DateStrToDate'>: <function datestrtodate_sql>, <class 'sqlglot.expressions.DateSub'>: <function _date_add_sql.<locals>.func>, <class 'sqlglot.expressions.DatetimeAdd'>: <function _date_add_sql.<locals>.func>, <class 'sqlglot.expressions.DatetimeSub'>: <function _date_add_sql.<locals>.func>, <class 'sqlglot.expressions.DateTrunc'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.GenerateSeries'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.GroupConcat'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Hex'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ILike'>: <function no_ilike_sql>, <class 'sqlglot.expressions.IntDiv'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.JSONFormat'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Max'>: <function max_or_greatest>, <class 'sqlglot.expressions.MD5'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.MD5Digest'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Min'>: <function min_or_least>, <class 'sqlglot.expressions.PartitionedByProperty'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.RegexpExtract'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.RegexpReplace'>: <function regexp_replace_sql>, <class 'sqlglot.expressions.RegexpLike'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Select'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.StrToDate'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.StrToTime'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.TimeAdd'>: <function _date_add_sql.<locals>.func>, <class 'sqlglot.expressions.TimeSub'>: <function _date_add_sql.<locals>.func>, <class 'sqlglot.expressions.TimestampAdd'>: <function _date_add_sql.<locals>.func>, <class 'sqlglot.expressions.TimestampSub'>: <function _date_add_sql.<locals>.func>, <class 'sqlglot.expressions.TimeStrToTime'>: <function timestrtotime_sql>, <class 'sqlglot.expressions.Trim'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.TsOrDsToDate'>: <function ts_or_ds_to_date_sql.<locals>._ts_or_ds_to_date_sql>, <class 'sqlglot.expressions.Unhex'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Values'>: <function _derived_table_values_to_unnest>, <class 'sqlglot.expressions.VariancePop'>: <function rename_func.<locals>.<lambda>>}
TYPE_MAPPING =
{<Type.NCHAR: 'NCHAR'>: 'STRING', <Type.NVARCHAR: 'NVARCHAR'>: 'STRING', <Type.MEDIUMTEXT: 'MEDIUMTEXT'>: 'TEXT', <Type.LONGTEXT: 'LONGTEXT'>: 'TEXT', <Type.MEDIUMBLOB: 'MEDIUMBLOB'>: 'BLOB', <Type.LONGBLOB: 'LONGBLOB'>: 'BLOB', <Type.INET: 'INET'>: 'INET', <Type.BIGDECIMAL: 'BIGDECIMAL'>: 'BIGNUMERIC', <Type.BIGINT: 'BIGINT'>: 'INT64', <Type.BINARY: 'BINARY'>: 'BYTES', <Type.BOOLEAN: 'BOOLEAN'>: 'BOOL', <Type.CHAR: 'CHAR'>: 'STRING', <Type.DECIMAL: 'DECIMAL'>: 'NUMERIC', <Type.DOUBLE: 'DOUBLE'>: 'FLOAT64', <Type.FLOAT: 'FLOAT'>: 'FLOAT64', <Type.INT: 'INT'>: 'INT64', <Type.SMALLINT: 'SMALLINT'>: 'INT64', <Type.TEXT: 'TEXT'>: 'STRING', <Type.TIMESTAMP: 'TIMESTAMP'>: 'DATETIME', <Type.TIMESTAMPTZ: 'TIMESTAMPTZ'>: 'TIMESTAMP', <Type.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>: 'TIMESTAMP', <Type.TINYINT: 'TINYINT'>: 'INT64', <Type.VARBINARY: 'VARBINARY'>: 'BYTES', <Type.VARCHAR: 'VARCHAR'>: 'STRING', <Type.VARIANT: 'VARIANT'>: 'ANY TYPE'}
PROPERTIES_LOCATION =
{<class 'sqlglot.expressions.AlgorithmProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.AutoIncrementProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.BlockCompressionProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.CharacterSetProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ChecksumProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.CollateProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.CopyGrantsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Cluster'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ClusteredByProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DataBlocksizeProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.DefinerProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.DictRange'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DictProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DistKeyProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DistStyleProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.EngineProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ExecuteAsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ExternalProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.FallbackProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.FileFormatProperty'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.FreespaceProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.HeapProperty'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.IsolatedLoadingProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.JournalProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.LanguageProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LikeProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LocationProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LockingProperty'>: <Location.POST_ALIAS: 'POST_ALIAS'>, <class 'sqlglot.expressions.LogProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.MaterializedProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.MergeBlockRatioProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.NoPrimaryIndexProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.OnCommitProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.Order'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.PartitionedByProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.PrimaryKey'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Property'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.ReturnsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatDelimitedProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatSerdeProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SchemaCommentProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SerdeProperties'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Set'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SettingsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SetProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.SortKeyProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SqlSecurityProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.StabilityProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.TemporaryProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.ToTableProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.TransientProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.MergeTreeTTL'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.VolatileProperty'>: <Location.UNSUPPORTED: 'UNSUPPORTED'>, <class 'sqlglot.expressions.WithDataProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.WithJournalTableProperty'>: <Location.POST_NAME: 'POST_NAME'>}
RESERVED_KEYWORDS =
{'rows', 'limit', 'to', 'exclude', 'window', 'case', 'interval', 'into', 'for', 'new', 'select', 'desc', 'qualify', 'having', 'default', 'distinct', 'on', 'where', 'unnest', 'assert_rows_modified', 'collate', 'except', 'groups', 'cast', 'intersect', 'left', 'null', 'outer', 'merge', 'hash', 'escape', 'at', 'create', 'if', 'struct', 'current', 'using', 'no', 'not', 'with', 'group', 'set', 'unbounded', 'over', 'natural', 'and', 'partition', 'cross', 'enum', 'right', 'union', 'order', 'some', 'full', 'cube', 'in', 'inner', 'from', 'within', 'like', 'any', 'lookup', 'recursive', 'proto', 'tablesample', 'fetch', 'range', 'following', 'join', 'all', 'else', 'asc', 'treat', 'by', 'is', 'rollup', 'of', 'as', 'when', 'ignore', 'preceding', 'nulls', 'between', 'respect', 'define', 'or', 'true', 'then', 'exists', 'extract', 'false', 'grouping', 'lateral', 'array', 'end', 'contains'}
587 def attimezone_sql(self, expression: exp.AtTimeZone) -> str: 588 parent = expression.parent 589 590 # BigQuery allows CAST(.. AS {STRING|TIMESTAMP} [FORMAT <fmt> [AT TIME ZONE <tz>]]). 591 # Only the TIMESTAMP one should use the below conversion, when AT TIME ZONE is included. 592 if not isinstance(parent, exp.Cast) or not parent.to.is_type("text"): 593 return self.func( 594 "TIMESTAMP", self.func("DATETIME", expression.this, expression.args.get("zone")) 595 ) 596 597 return super().attimezone_sql(expression)
@classmethod
def
can_identify(text: str, identify: str | bool = 'safe') -> bool:
253 @classmethod 254 def can_identify(cls, text: str, identify: str | bool = "safe") -> bool: 255 """Checks if text can be identified given an identify option. 256 257 Args: 258 text: The text to check. 259 identify: 260 "always" or `True`: Always returns true. 261 "safe": True if the identifier is case-insensitive. 262 263 Returns: 264 Whether or not the given text can be identified. 265 """ 266 if identify is True or identify == "always": 267 return True 268 269 if identify == "safe": 270 return not cls.case_sensitive(text) 271 272 return False
Checks if text can be identified given an identify option.
Arguments:
- text: The text to check.
- identify: "always" or
True
: Always returns true. "safe": True if the identifier is case-insensitive.
Returns:
Whether or not the given text can be identified.
Inherited Members
- sqlglot.generator.Generator
- Generator
- NULL_ORDERING_SUPPORTED
- LOCKING_READS_SUPPORTED
- WRAP_DERIVED_VALUES
- CREATE_FUNCTION_RETURN_AS
- MATCHED_BY_SOURCE
- SINGLE_STRING_INTERVAL
- TABLESAMPLE_WITH_METHOD
- TABLESAMPLE_SIZE_IS_PERCENT
- GROUPINGS_SEP
- INDEX_ON
- QUERY_HINT_SEP
- IS_BOOL_ALLOWED
- DUPLICATE_KEY_UPDATE_WITH_SET
- LIMIT_IS_TOP
- RETURNING_END
- COLUMN_JOIN_MARKS_SUPPORTED
- EXTRACT_ALLOWS_QUOTES
- SELECT_KINDS
- VALUES_AS_TABLE
- STAR_MAPPING
- TIME_PART_SINGULARS
- TOKEN_MAPPING
- STRUCT_DELIMITER
- PARAMETER_TOKEN
- WITH_SEPARATED_COMMENTS
- UNWRAPPED_INTERVAL_VALUES
- SENTINEL_LINE_BREAK
- INDEX_OFFSET
- ALIAS_POST_TABLESAMPLE
- IDENTIFIERS_CAN_START_WITH_DIGIT
- STRICT_STRING_CONCAT
- NULL_ORDERING
- pretty
- identify
- normalize
- pad
- unsupported_level
- max_unsupported
- leading_comma
- max_text_width
- comments
- normalize_functions
- unsupported_messages
- generate
- unsupported
- sep
- seg
- pad_comment
- maybe_comment
- wrap
- no_identify
- normalize_func
- indent
- sql
- uncache_sql
- cache_sql
- characterset_sql
- column_sql
- columnposition_sql
- columndef_sql
- columnconstraint_sql
- autoincrementcolumnconstraint_sql
- compresscolumnconstraint_sql
- generatedasidentitycolumnconstraint_sql
- notnullcolumnconstraint_sql
- primarykeycolumnconstraint_sql
- uniquecolumnconstraint_sql
- createable_sql
- create_sql
- clone_sql
- describe_sql
- prepend_ctes
- with_sql
- tablealias_sql
- bitstring_sql
- hexstring_sql
- bytestring_sql
- rawstring_sql
- datatypesize_sql
- datatype_sql
- directory_sql
- delete_sql
- drop_sql
- except_sql
- fetch_sql
- filter_sql
- hint_sql
- index_sql
- identifier_sql
- inputoutputformat_sql
- national_sql
- partition_sql
- properties_sql
- root_properties
- properties
- locate_properties
- property_sql
- likeproperty_sql
- fallbackproperty_sql
- journalproperty_sql
- freespaceproperty_sql
- checksumproperty_sql
- mergeblockratioproperty_sql
- datablocksizeproperty_sql
- blockcompressionproperty_sql
- isolatedloadingproperty_sql
- lockingproperty_sql
- withdataproperty_sql
- insert_sql
- intersect_sql
- introducer_sql
- pseudotype_sql
- onconflict_sql
- returning_sql
- rowformatdelimitedproperty_sql
- withtablehint_sql
- indextablehint_sql
- table_sql
- tablesample_sql
- pivot_sql
- tuple_sql
- update_sql
- values_sql
- var_sql
- into_sql
- from_sql
- group_sql
- having_sql
- join_sql
- lambda_sql
- lateral_sql
- limit_sql
- offset_sql
- setitem_sql
- set_sql
- pragma_sql
- lock_sql
- literal_sql
- escape_str
- loaddata_sql
- null_sql
- boolean_sql
- order_sql
- cluster_sql
- distribute_sql
- sort_sql
- ordered_sql
- matchrecognize_sql
- query_modifiers
- offset_limit_modifiers
- after_having_modifiers
- after_limit_modifiers
- select_sql
- schema_sql
- schema_columns_sql
- star_sql
- parameter_sql
- sessionparameter_sql
- placeholder_sql
- subquery_sql
- qualify_sql
- union_sql
- union_op
- unnest_sql
- where_sql
- window_sql
- partition_by_sql
- windowspec_sql
- withingroup_sql
- between_sql
- bracket_sql
- safebracket_sql
- all_sql
- any_sql
- exists_sql
- case_sql
- constraint_sql
- nextvaluefor_sql
- extract_sql
- trim_sql
- safeconcat_sql
- check_sql
- foreignkey_sql
- primarykey_sql
- if_sql
- matchagainst_sql
- jsonkeyvalue_sql
- jsonobject_sql
- openjsoncolumndef_sql
- openjson_sql
- in_sql
- interval_sql
- return_sql
- reference_sql
- anonymous_sql
- paren_sql
- neg_sql
- not_sql
- alias_sql
- aliases_sql
- add_sql
- and_sql
- xor_sql
- connector_sql
- bitwiseand_sql
- bitwiseleftshift_sql
- bitwisenot_sql
- bitwiseor_sql
- bitwiserightshift_sql
- bitwisexor_sql
- cast_sql
- currentdate_sql
- collate_sql
- command_sql
- comment_sql
- mergetreettlaction_sql
- mergetreettl_sql
- altercolumn_sql
- renametable_sql
- altertable_sql
- droppartition_sql
- addconstraint_sql
- distinct_sql
- ignorenulls_sql
- respectnulls_sql
- intdiv_sql
- dpipe_sql
- safedpipe_sql
- div_sql
- overlaps_sql
- distance_sql
- dot_sql
- eq_sql
- escape_sql
- glob_sql
- gt_sql
- gte_sql
- ilike_sql
- ilikeany_sql
- is_sql
- like_sql
- likeany_sql
- similarto_sql
- lt_sql
- lte_sql
- mod_sql
- mul_sql
- neq_sql
- nullsafeeq_sql
- nullsafeneq_sql
- or_sql
- slice_sql
- sub_sql
- use_sql
- binary
- function_fallback_sql
- func
- format_args
- text_width
- format_time
- expressions
- op_expressions
- naked_property
- set_operation
- tag_sql
- token_sql
- userdefinedfunction_sql
- joinhint_sql
- kwarg_sql
- when_sql
- merge_sql
- tochar_sql
- dictproperty_sql
- dictrange_sql
- dictsubproperty_sql
- oncluster_sql
- clusteredbyproperty_sql
- anyvalue_sql
- querytransform_sql
- indexconstraintoption_sql
- indexcolumnconstraint_sql