From 49af28576db02470fe1d2de04e3901309b60c2e4 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Mon, 10 Jul 2023 07:36:29 +0200 Subject: Merging upstream version 17.3.0. Signed-off-by: Daniel Baumann --- docs/sqlglot/dialects/bigquery.html | 2647 ++++++++++++++++++----------------- 1 file changed, 1329 insertions(+), 1318 deletions(-) (limited to 'docs/sqlglot/dialects/bigquery.html') diff --git a/docs/sqlglot/dialects/bigquery.html b/docs/sqlglot/dialects/bigquery.html index 778f506..3a45e5a 100644 --- a/docs/sqlglot/dialects/bigquery.html +++ b/docs/sqlglot/dialects/bigquery.html @@ -515,445 +515,452 @@ 169 return this 170 171 -172class BigQuery(Dialect): -173 UNNEST_COLUMN_ONLY = True -174 -175 # https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#case_sensitivity -176 RESOLVES_IDENTIFIERS_AS_UPPERCASE = None -177 -178 # bigquery udfs are case sensitive -179 NORMALIZE_FUNCTIONS = False -180 -181 TIME_MAPPING = { -182 "%D": "%m/%d/%y", -183 } -184 -185 FORMAT_MAPPING = { -186 "DD": "%d", -187 "MM": "%m", -188 "MON": "%b", -189 "MONTH": "%B", -190 "YYYY": "%Y", -191 "YY": "%y", -192 "HH": "%I", -193 "HH12": "%I", -194 "HH24": "%H", -195 "MI": "%M", -196 "SS": "%S", -197 "SSSSS": "%f", -198 "TZH": "%z", -199 } -200 -201 @classmethod -202 def normalize_identifier(cls, expression: E) -> E: -203 # In BigQuery, CTEs aren't case-sensitive, but table names are (by default, at least). -204 # The following check is essentially a heuristic to detect tables based on whether or -205 # not they're qualified. -206 if isinstance(expression, exp.Identifier): -207 parent = expression.parent -208 -209 while isinstance(parent, exp.Dot): -210 parent = parent.parent -211 -212 if ( -213 not isinstance(parent, exp.UserDefinedFunction) -214 and not (isinstance(parent, exp.Table) and parent.db) -215 and not expression.meta.get("is_table") -216 ): -217 expression.set("this", expression.this.lower()) -218 -219 return expression -220 -221 class Tokenizer(tokens.Tokenizer): -222 QUOTES = ["'", '"', '"""', "'''"] -223 COMMENTS = ["--", "#", ("/*", "*/")] -224 IDENTIFIERS = ["`"] -225 STRING_ESCAPES = ["\\"] -226 -227 HEX_STRINGS = [("0x", ""), ("0X", "")] -228 -229 BYTE_STRINGS = [ -230 (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("b", "B") -231 ] -232 -233 RAW_STRINGS = [ -234 (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("r", "R") -235 ] -236 -237 KEYWORDS = { -238 **tokens.Tokenizer.KEYWORDS, -239 "ANY TYPE": TokenType.VARIANT, -240 "BEGIN": TokenType.COMMAND, -241 "BEGIN TRANSACTION": TokenType.BEGIN, -242 "CURRENT_DATETIME": TokenType.CURRENT_DATETIME, -243 "BYTES": TokenType.BINARY, -244 "DECLARE": TokenType.COMMAND, -245 "FLOAT64": TokenType.DOUBLE, -246 "INT64": TokenType.BIGINT, -247 "RECORD": TokenType.STRUCT, -248 "TIMESTAMP": TokenType.TIMESTAMPTZ, -249 "NOT DETERMINISTIC": TokenType.VOLATILE, -250 "UNKNOWN": TokenType.NULL, -251 } -252 KEYWORDS.pop("DIV") -253 -254 class Parser(parser.Parser): -255 PREFIXED_PIVOT_COLUMNS = True -256 -257 LOG_BASE_FIRST = False -258 LOG_DEFAULTS_TO_LN = True -259 -260 FUNCTIONS = { -261 **parser.Parser.FUNCTIONS, -262 "DATE_ADD": parse_date_delta_with_interval(exp.DateAdd), -263 "DATE_SUB": parse_date_delta_with_interval(exp.DateSub), -264 "DATE_TRUNC": lambda args: exp.DateTrunc( -265 unit=exp.Literal.string(str(seq_get(args, 1))), -266 this=seq_get(args, 0), -267 ), -268 "DATETIME_ADD": parse_date_delta_with_interval(exp.DatetimeAdd), -269 "DATETIME_SUB": parse_date_delta_with_interval(exp.DatetimeSub), -270 "DIV": lambda args: exp.IntDiv(this=seq_get(args, 0), expression=seq_get(args, 1)), -271 "GENERATE_ARRAY": exp.GenerateSeries.from_arg_list, -272 "PARSE_DATE": lambda args: format_time_lambda(exp.StrToDate, "bigquery")( -273 [seq_get(args, 1), seq_get(args, 0)] -274 ), -275 "PARSE_TIMESTAMP": _parse_timestamp, -276 "REGEXP_CONTAINS": exp.RegexpLike.from_arg_list, -277 "REGEXP_EXTRACT": lambda args: exp.RegexpExtract( -278 this=seq_get(args, 0), -279 expression=seq_get(args, 1), -280 position=seq_get(args, 2), -281 occurrence=seq_get(args, 3), -282 group=exp.Literal.number(1) -283 if re.compile(str(seq_get(args, 1))).groups == 1 -284 else None, -285 ), -286 "SPLIT": lambda args: exp.Split( -287 # https://cloud.google.com/bigquery/docs/reference/standard-sql/string_functions#split -288 this=seq_get(args, 0), -289 expression=seq_get(args, 1) or exp.Literal.string(","), -290 ), -291 "TIME_ADD": parse_date_delta_with_interval(exp.TimeAdd), -292 "TIME_SUB": parse_date_delta_with_interval(exp.TimeSub), -293 "TIMESTAMP_ADD": parse_date_delta_with_interval(exp.TimestampAdd), -294 "TIMESTAMP_SUB": parse_date_delta_with_interval(exp.TimestampSub), -295 "TO_JSON_STRING": exp.JSONFormat.from_arg_list, -296 } -297 -298 FUNCTION_PARSERS = { -299 **parser.Parser.FUNCTION_PARSERS, -300 "ARRAY": lambda self: self.expression(exp.Array, expressions=[self._parse_statement()]), -301 } -302 FUNCTION_PARSERS.pop("TRIM") +172def _parse_date(args: t.List) -> exp.Date | exp.DateFromParts: +173 expr_type = exp.DateFromParts if len(args) == 3 else exp.Date +174 return expr_type.from_arg_list(args) +175 +176 +177class BigQuery(Dialect): +178 UNNEST_COLUMN_ONLY = True +179 +180 # https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#case_sensitivity +181 RESOLVES_IDENTIFIERS_AS_UPPERCASE = None +182 +183 # bigquery udfs are case sensitive +184 NORMALIZE_FUNCTIONS = False +185 +186 TIME_MAPPING = { +187 "%D": "%m/%d/%y", +188 } +189 +190 FORMAT_MAPPING = { +191 "DD": "%d", +192 "MM": "%m", +193 "MON": "%b", +194 "MONTH": "%B", +195 "YYYY": "%Y", +196 "YY": "%y", +197 "HH": "%I", +198 "HH12": "%I", +199 "HH24": "%H", +200 "MI": "%M", +201 "SS": "%S", +202 "SSSSS": "%f", +203 "TZH": "%z", +204 } +205 +206 @classmethod +207 def normalize_identifier(cls, expression: E) -> E: +208 # In BigQuery, CTEs aren't case-sensitive, but table names are (by default, at least). +209 # The following check is essentially a heuristic to detect tables based on whether or +210 # not they're qualified. +211 if isinstance(expression, exp.Identifier): +212 parent = expression.parent +213 +214 while isinstance(parent, exp.Dot): +215 parent = parent.parent +216 +217 if ( +218 not isinstance(parent, exp.UserDefinedFunction) +219 and not (isinstance(parent, exp.Table) and parent.db) +220 and not expression.meta.get("is_table") +221 ): +222 expression.set("this", expression.this.lower()) +223 +224 return expression +225 +226 class Tokenizer(tokens.Tokenizer): +227 QUOTES = ["'", '"', '"""', "'''"] +228 COMMENTS = ["--", "#", ("/*", "*/")] +229 IDENTIFIERS = ["`"] +230 STRING_ESCAPES = ["\\"] +231 +232 HEX_STRINGS = [("0x", ""), ("0X", "")] +233 +234 BYTE_STRINGS = [ +235 (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("b", "B") +236 ] +237 +238 RAW_STRINGS = [ +239 (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("r", "R") +240 ] +241 +242 KEYWORDS = { +243 **tokens.Tokenizer.KEYWORDS, +244 "ANY TYPE": TokenType.VARIANT, +245 "BEGIN": TokenType.COMMAND, +246 "BEGIN TRANSACTION": TokenType.BEGIN, +247 "CURRENT_DATETIME": TokenType.CURRENT_DATETIME, +248 "BYTES": TokenType.BINARY, +249 "DECLARE": TokenType.COMMAND, +250 "FLOAT64": TokenType.DOUBLE, +251 "INT64": TokenType.BIGINT, +252 "RECORD": TokenType.STRUCT, +253 "TIMESTAMP": TokenType.TIMESTAMPTZ, +254 "NOT DETERMINISTIC": TokenType.VOLATILE, +255 "UNKNOWN": TokenType.NULL, +256 } +257 KEYWORDS.pop("DIV") +258 +259 class Parser(parser.Parser): +260 PREFIXED_PIVOT_COLUMNS = True +261 +262 LOG_BASE_FIRST = False +263 LOG_DEFAULTS_TO_LN = True +264 +265 FUNCTIONS = { +266 **parser.Parser.FUNCTIONS, +267 "DATE": _parse_date, +268 "DATE_ADD": parse_date_delta_with_interval(exp.DateAdd), +269 "DATE_SUB": parse_date_delta_with_interval(exp.DateSub), +270 "DATE_TRUNC": lambda args: exp.DateTrunc( +271 unit=exp.Literal.string(str(seq_get(args, 1))), +272 this=seq_get(args, 0), +273 ), +274 "DATETIME_ADD": parse_date_delta_with_interval(exp.DatetimeAdd), +275 "DATETIME_SUB": parse_date_delta_with_interval(exp.DatetimeSub), +276 "DIV": lambda args: exp.IntDiv(this=seq_get(args, 0), expression=seq_get(args, 1)), +277 "GENERATE_ARRAY": exp.GenerateSeries.from_arg_list, +278 "PARSE_DATE": lambda args: format_time_lambda(exp.StrToDate, "bigquery")( +279 [seq_get(args, 1), seq_get(args, 0)] +280 ), +281 "PARSE_TIMESTAMP": _parse_timestamp, +282 "REGEXP_CONTAINS": exp.RegexpLike.from_arg_list, +283 "REGEXP_EXTRACT": lambda args: exp.RegexpExtract( +284 this=seq_get(args, 0), +285 expression=seq_get(args, 1), +286 position=seq_get(args, 2), +287 occurrence=seq_get(args, 3), +288 group=exp.Literal.number(1) +289 if re.compile(str(seq_get(args, 1))).groups == 1 +290 else None, +291 ), +292 "SPLIT": lambda args: exp.Split( +293 # https://cloud.google.com/bigquery/docs/reference/standard-sql/string_functions#split +294 this=seq_get(args, 0), +295 expression=seq_get(args, 1) or exp.Literal.string(","), +296 ), +297 "TIME_ADD": parse_date_delta_with_interval(exp.TimeAdd), +298 "TIME_SUB": parse_date_delta_with_interval(exp.TimeSub), +299 "TIMESTAMP_ADD": parse_date_delta_with_interval(exp.TimestampAdd), +300 "TIMESTAMP_SUB": parse_date_delta_with_interval(exp.TimestampSub), +301 "TO_JSON_STRING": exp.JSONFormat.from_arg_list, +302 } 303 -304 NO_PAREN_FUNCTIONS = { -305 **parser.Parser.NO_PAREN_FUNCTIONS, -306 TokenType.CURRENT_DATETIME: exp.CurrentDatetime, +304 FUNCTION_PARSERS = { +305 **parser.Parser.FUNCTION_PARSERS, +306 "ARRAY": lambda self: self.expression(exp.Array, expressions=[self._parse_statement()]), 307 } -308 -309 NESTED_TYPE_TOKENS = { -310 *parser.Parser.NESTED_TYPE_TOKENS, -311 TokenType.TABLE, -312 } -313 -314 ID_VAR_TOKENS = { -315 *parser.Parser.ID_VAR_TOKENS, -316 TokenType.VALUES, -317 } -318 -319 PROPERTY_PARSERS = { -320 **parser.Parser.PROPERTY_PARSERS, -321 "NOT DETERMINISTIC": lambda self: self.expression( -322 exp.StabilityProperty, this=exp.Literal.string("VOLATILE") -323 ), -324 "OPTIONS": lambda self: self._parse_with_property(), -325 } -326 -327 CONSTRAINT_PARSERS = { -328 **parser.Parser.CONSTRAINT_PARSERS, -329 "OPTIONS": lambda self: exp.Properties(expressions=self._parse_with_property()), -330 } -331 -332 def _parse_table_part(self, schema: bool = False) -> t.Optional[exp.Expression]: -333 this = super()._parse_table_part(schema=schema) -334 -335 # https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#table_names -336 if isinstance(this, exp.Identifier): -337 table_name = this.name -338 while self._match(TokenType.DASH, advance=False) and self._next: -339 self._advance(2) -340 table_name += f"-{self._prev.text}" -341 -342 this = exp.Identifier(this=table_name, quoted=this.args.get("quoted")) -343 -344 return this -345 -346 def _parse_table_parts(self, schema: bool = False) -> exp.Table: -347 table = super()._parse_table_parts(schema=schema) -348 if isinstance(table.this, exp.Identifier) and "." in table.name: -349 catalog, db, this, *rest = ( -350 t.cast(t.Optional[exp.Expression], exp.to_identifier(x)) -351 for x in split_num_words(table.name, ".", 3) -352 ) -353 -354 if rest and this: -355 this = exp.Dot.build(t.cast(t.List[exp.Expression], [this, *rest])) -356 -357 table = exp.Table(this=this, db=db, catalog=catalog) -358 -359 return table -360 -361 class Generator(generator.Generator): -362 EXPLICIT_UNION = True -363 INTERVAL_ALLOWS_PLURAL_FORM = False -364 JOIN_HINTS = False -365 QUERY_HINTS = False -366 TABLE_HINTS = False -367 LIMIT_FETCH = "LIMIT" -368 RENAME_TABLE_WITH_DB = False -369 ESCAPE_LINE_BREAK = True -370 -371 TRANSFORMS = { -372 **generator.Generator.TRANSFORMS, -373 exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"), -374 exp.ArraySize: rename_func("ARRAY_LENGTH"), -375 exp.Cast: transforms.preprocess([transforms.remove_precision_parameterized_types]), -376 exp.CTE: transforms.preprocess([_pushdown_cte_column_names]), -377 exp.DateAdd: _date_add_sql("DATE", "ADD"), -378 exp.DateSub: _date_add_sql("DATE", "SUB"), -379 exp.DatetimeAdd: _date_add_sql("DATETIME", "ADD"), -380 exp.DatetimeSub: _date_add_sql("DATETIME", "SUB"), -381 exp.DateDiff: lambda self, e: f"DATE_DIFF({self.sql(e, 'this')}, {self.sql(e, 'expression')}, {self.sql(e.args.get('unit', 'DAY'))})", -382 exp.DateStrToDate: datestrtodate_sql, -383 exp.DateTrunc: lambda self, e: self.func("DATE_TRUNC", e.this, e.text("unit")), -384 exp.JSONFormat: rename_func("TO_JSON_STRING"), -385 exp.GenerateSeries: rename_func("GENERATE_ARRAY"), -386 exp.GroupConcat: rename_func("STRING_AGG"), -387 exp.ILike: no_ilike_sql, -388 exp.IntDiv: rename_func("DIV"), -389 exp.Max: max_or_greatest, -390 exp.Min: min_or_least, -391 exp.RegexpExtract: lambda self, e: self.func( -392 "REGEXP_EXTRACT", -393 e.this, -394 e.expression, -395 e.args.get("position"), -396 e.args.get("occurrence"), -397 ), -398 exp.RegexpLike: rename_func("REGEXP_CONTAINS"), -399 exp.Select: transforms.preprocess( -400 [ -401 transforms.explode_to_unnest, -402 _unqualify_unnest, -403 transforms.eliminate_distinct_on, -404 _alias_ordered_group, -405 ] -406 ), -407 exp.StrToDate: lambda self, e: f"PARSE_DATE({self.format_time(e)}, {self.sql(e, 'this')})", -408 exp.StrToTime: lambda self, e: self.func( -409 "PARSE_TIMESTAMP", self.format_time(e), e.this, e.args.get("zone") -410 ), -411 exp.TimeAdd: _date_add_sql("TIME", "ADD"), -412 exp.TimeSub: _date_add_sql("TIME", "SUB"), -413 exp.TimestampAdd: _date_add_sql("TIMESTAMP", "ADD"), -414 exp.TimestampSub: _date_add_sql("TIMESTAMP", "SUB"), -415 exp.TimeStrToTime: timestrtotime_sql, -416 exp.TsOrDsToDate: ts_or_ds_to_date_sql("bigquery"), -417 exp.TsOrDsAdd: _date_add_sql("DATE", "ADD"), -418 exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}", -419 exp.VariancePop: rename_func("VAR_POP"), -420 exp.Values: _derived_table_values_to_unnest, -421 exp.ReturnsProperty: _returnsproperty_sql, -422 exp.Create: _create_sql, -423 exp.Trim: lambda self, e: self.func(f"TRIM", e.this, e.expression), -424 exp.StabilityProperty: lambda self, e: f"DETERMINISTIC" -425 if e.name == "IMMUTABLE" -426 else "NOT DETERMINISTIC", -427 } -428 -429 TYPE_MAPPING = { -430 **generator.Generator.TYPE_MAPPING, -431 exp.DataType.Type.BIGDECIMAL: "BIGNUMERIC", -432 exp.DataType.Type.BIGINT: "INT64", -433 exp.DataType.Type.BINARY: "BYTES", -434 exp.DataType.Type.BOOLEAN: "BOOL", -435 exp.DataType.Type.CHAR: "STRING", -436 exp.DataType.Type.DECIMAL: "NUMERIC", -437 exp.DataType.Type.DOUBLE: "FLOAT64", -438 exp.DataType.Type.FLOAT: "FLOAT64", -439 exp.DataType.Type.INT: "INT64", -440 exp.DataType.Type.NCHAR: "STRING", -441 exp.DataType.Type.NVARCHAR: "STRING", -442 exp.DataType.Type.SMALLINT: "INT64", -443 exp.DataType.Type.TEXT: "STRING", -444 exp.DataType.Type.TIMESTAMP: "DATETIME", -445 exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP", -446 exp.DataType.Type.TIMESTAMPLTZ: "TIMESTAMP", -447 exp.DataType.Type.TINYINT: "INT64", -448 exp.DataType.Type.VARBINARY: "BYTES", -449 exp.DataType.Type.VARCHAR: "STRING", -450 exp.DataType.Type.VARIANT: "ANY TYPE", -451 } -452 -453 PROPERTIES_LOCATION = { -454 **generator.Generator.PROPERTIES_LOCATION, -455 exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA, -456 exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED, -457 } -458 -459 # from: https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#reserved_keywords -460 RESERVED_KEYWORDS = { -461 *generator.Generator.RESERVED_KEYWORDS, -462 "all", -463 "and", -464 "any", -465 "array", -466 "as", -467 "asc", -468 "assert_rows_modified", -469 "at", -470 "between", -471 "by", -472 "case", -473 "cast", -474 "collate", -475 "contains", -476 "create", -477 "cross", -478 "cube", -479 "current", -480 "default", -481 "define", -482 "desc", -483 "distinct", -484 "else", -485 "end", -486 "enum", -487 "escape", -488 "except", -489 "exclude", -490 "exists", -491 "extract", -492 "false", -493 "fetch", -494 "following", -495 "for", -496 "from", -497 "full", -498 "group", -499 "grouping", -500 "groups", -501 "hash", -502 "having", -503 "if", -504 "ignore", -505 "in", -506 "inner", -507 "intersect", -508 "interval", -509 "into", -510 "is", -511 "join", -512 "lateral", -513 "left", -514 "like", -515 "limit", -516 "lookup", -517 "merge", -518 "natural", -519 "new", -520 "no", -521 "not", -522 "null", -523 "nulls", -524 "of", -525 "on", -526 "or", -527 "order", -528 "outer", -529 "over", -530 "partition", -531 "preceding", -532 "proto", -533 "qualify", -534 "range", -535 "recursive", -536 "respect", -537 "right", -538 "rollup", -539 "rows", -540 "select", -541 "set", -542 "some", -543 "struct", -544 "tablesample", -545 "then", -546 "to", -547 "treat", -548 "true", -549 "unbounded", -550 "union", -551 "unnest", -552 "using", -553 "when", -554 "where", -555 "window", -556 "with", -557 "within", -558 } -559 -560 def attimezone_sql(self, expression: exp.AtTimeZone) -> str: -561 parent = expression.parent -562 -563 # BigQuery allows CAST(.. AS {STRING|TIMESTAMP} [FORMAT <fmt> [AT TIME ZONE <tz>]]). -564 # Only the TIMESTAMP one should use the below conversion, when AT TIME ZONE is included. -565 if not isinstance(parent, exp.Cast) or not parent.to.is_type("text"): -566 return self.func( -567 "TIMESTAMP", self.func("DATETIME", expression.this, expression.args.get("zone")) -568 ) +308 FUNCTION_PARSERS.pop("TRIM") +309 +310 NO_PAREN_FUNCTIONS = { +311 **parser.Parser.NO_PAREN_FUNCTIONS, +312 TokenType.CURRENT_DATETIME: exp.CurrentDatetime, +313 } +314 +315 NESTED_TYPE_TOKENS = { +316 *parser.Parser.NESTED_TYPE_TOKENS, +317 TokenType.TABLE, +318 } +319 +320 ID_VAR_TOKENS = { +321 *parser.Parser.ID_VAR_TOKENS, +322 TokenType.VALUES, +323 } +324 +325 PROPERTY_PARSERS = { +326 **parser.Parser.PROPERTY_PARSERS, +327 "NOT DETERMINISTIC": lambda self: self.expression( +328 exp.StabilityProperty, this=exp.Literal.string("VOLATILE") +329 ), +330 "OPTIONS": lambda self: self._parse_with_property(), +331 } +332 +333 CONSTRAINT_PARSERS = { +334 **parser.Parser.CONSTRAINT_PARSERS, +335 "OPTIONS": lambda self: exp.Properties(expressions=self._parse_with_property()), +336 } +337 +338 def _parse_table_part(self, schema: bool = False) -> t.Optional[exp.Expression]: +339 this = super()._parse_table_part(schema=schema) +340 +341 # https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#table_names +342 if isinstance(this, exp.Identifier): +343 table_name = this.name +344 while self._match(TokenType.DASH, advance=False) and self._next: +345 self._advance(2) +346 table_name += f"-{self._prev.text}" +347 +348 this = exp.Identifier(this=table_name, quoted=this.args.get("quoted")) +349 +350 return this +351 +352 def _parse_table_parts(self, schema: bool = False) -> exp.Table: +353 table = super()._parse_table_parts(schema=schema) +354 if isinstance(table.this, exp.Identifier) and "." in table.name: +355 catalog, db, this, *rest = ( +356 t.cast(t.Optional[exp.Expression], exp.to_identifier(x)) +357 for x in split_num_words(table.name, ".", 3) +358 ) +359 +360 if rest and this: +361 this = exp.Dot.build(t.cast(t.List[exp.Expression], [this, *rest])) +362 +363 table = exp.Table(this=this, db=db, catalog=catalog) +364 +365 return table +366 +367 class Generator(generator.Generator): +368 EXPLICIT_UNION = True +369 INTERVAL_ALLOWS_PLURAL_FORM = False +370 JOIN_HINTS = False +371 QUERY_HINTS = False +372 TABLE_HINTS = False +373 LIMIT_FETCH = "LIMIT" +374 RENAME_TABLE_WITH_DB = False +375 ESCAPE_LINE_BREAK = True +376 +377 TRANSFORMS = { +378 **generator.Generator.TRANSFORMS, +379 exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"), +380 exp.ArraySize: rename_func("ARRAY_LENGTH"), +381 exp.Cast: transforms.preprocess([transforms.remove_precision_parameterized_types]), +382 exp.CTE: transforms.preprocess([_pushdown_cte_column_names]), +383 exp.DateAdd: _date_add_sql("DATE", "ADD"), +384 exp.DateFromParts: rename_func("DATE"), +385 exp.DateSub: _date_add_sql("DATE", "SUB"), +386 exp.DatetimeAdd: _date_add_sql("DATETIME", "ADD"), +387 exp.DatetimeSub: _date_add_sql("DATETIME", "SUB"), +388 exp.DateDiff: lambda self, e: f"DATE_DIFF({self.sql(e, 'this')}, {self.sql(e, 'expression')}, {self.sql(e.args.get('unit', 'DAY'))})", +389 exp.DateStrToDate: datestrtodate_sql, +390 exp.DateTrunc: lambda self, e: self.func("DATE_TRUNC", e.this, e.text("unit")), +391 exp.JSONFormat: rename_func("TO_JSON_STRING"), +392 exp.GenerateSeries: rename_func("GENERATE_ARRAY"), +393 exp.GroupConcat: rename_func("STRING_AGG"), +394 exp.ILike: no_ilike_sql, +395 exp.IntDiv: rename_func("DIV"), +396 exp.Max: max_or_greatest, +397 exp.Min: min_or_least, +398 exp.RegexpExtract: lambda self, e: self.func( +399 "REGEXP_EXTRACT", +400 e.this, +401 e.expression, +402 e.args.get("position"), +403 e.args.get("occurrence"), +404 ), +405 exp.RegexpLike: rename_func("REGEXP_CONTAINS"), +406 exp.Select: transforms.preprocess( +407 [ +408 transforms.explode_to_unnest, +409 _unqualify_unnest, +410 transforms.eliminate_distinct_on, +411 _alias_ordered_group, +412 ] +413 ), +414 exp.StrToDate: lambda self, e: f"PARSE_DATE({self.format_time(e)}, {self.sql(e, 'this')})", +415 exp.StrToTime: lambda self, e: self.func( +416 "PARSE_TIMESTAMP", self.format_time(e), e.this, e.args.get("zone") +417 ), +418 exp.TimeAdd: _date_add_sql("TIME", "ADD"), +419 exp.TimeSub: _date_add_sql("TIME", "SUB"), +420 exp.TimestampAdd: _date_add_sql("TIMESTAMP", "ADD"), +421 exp.TimestampSub: _date_add_sql("TIMESTAMP", "SUB"), +422 exp.TimeStrToTime: timestrtotime_sql, +423 exp.TsOrDsToDate: ts_or_ds_to_date_sql("bigquery"), +424 exp.TsOrDsAdd: _date_add_sql("DATE", "ADD"), +425 exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}", +426 exp.VariancePop: rename_func("VAR_POP"), +427 exp.Values: _derived_table_values_to_unnest, +428 exp.ReturnsProperty: _returnsproperty_sql, +429 exp.Create: _create_sql, +430 exp.Trim: lambda self, e: self.func(f"TRIM", e.this, e.expression), +431 exp.StabilityProperty: lambda self, e: f"DETERMINISTIC" +432 if e.name == "IMMUTABLE" +433 else "NOT DETERMINISTIC", +434 } +435 +436 TYPE_MAPPING = { +437 **generator.Generator.TYPE_MAPPING, +438 exp.DataType.Type.BIGDECIMAL: "BIGNUMERIC", +439 exp.DataType.Type.BIGINT: "INT64", +440 exp.DataType.Type.BINARY: "BYTES", +441 exp.DataType.Type.BOOLEAN: "BOOL", +442 exp.DataType.Type.CHAR: "STRING", +443 exp.DataType.Type.DECIMAL: "NUMERIC", +444 exp.DataType.Type.DOUBLE: "FLOAT64", +445 exp.DataType.Type.FLOAT: "FLOAT64", +446 exp.DataType.Type.INT: "INT64", +447 exp.DataType.Type.NCHAR: "STRING", +448 exp.DataType.Type.NVARCHAR: "STRING", +449 exp.DataType.Type.SMALLINT: "INT64", +450 exp.DataType.Type.TEXT: "STRING", +451 exp.DataType.Type.TIMESTAMP: "DATETIME", +452 exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP", +453 exp.DataType.Type.TIMESTAMPLTZ: "TIMESTAMP", +454 exp.DataType.Type.TINYINT: "INT64", +455 exp.DataType.Type.VARBINARY: "BYTES", +456 exp.DataType.Type.VARCHAR: "STRING", +457 exp.DataType.Type.VARIANT: "ANY TYPE", +458 } +459 +460 PROPERTIES_LOCATION = { +461 **generator.Generator.PROPERTIES_LOCATION, +462 exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA, +463 exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED, +464 } +465 +466 # from: https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#reserved_keywords +467 RESERVED_KEYWORDS = { +468 *generator.Generator.RESERVED_KEYWORDS, +469 "all", +470 "and", +471 "any", +472 "array", +473 "as", +474 "asc", +475 "assert_rows_modified", +476 "at", +477 "between", +478 "by", +479 "case", +480 "cast", +481 "collate", +482 "contains", +483 "create", +484 "cross", +485 "cube", +486 "current", +487 "default", +488 "define", +489 "desc", +490 "distinct", +491 "else", +492 "end", +493 "enum", +494 "escape", +495 "except", +496 "exclude", +497 "exists", +498 "extract", +499 "false", +500 "fetch", +501 "following", +502 "for", +503 "from", +504 "full", +505 "group", +506 "grouping", +507 "groups", +508 "hash", +509 "having", +510 "if", +511 "ignore", +512 "in", +513 "inner", +514 "intersect", +515 "interval", +516 "into", +517 "is", +518 "join", +519 "lateral", +520 "left", +521 "like", +522 "limit", +523 "lookup", +524 "merge", +525 "natural", +526 "new", +527 "no", +528 "not", +529 "null", +530 "nulls", +531 "of", +532 "on", +533 "or", +534 "order", +535 "outer", +536 "over", +537 "partition", +538 "preceding", +539 "proto", +540 "qualify", +541 "range", +542 "recursive", +543 "respect", +544 "right", +545 "rollup", +546 "rows", +547 "select", +548 "set", +549 "some", +550 "struct", +551 "tablesample", +552 "then", +553 "to", +554 "treat", +555 "true", +556 "unbounded", +557 "union", +558 "unnest", +559 "using", +560 "when", +561 "where", +562 "window", +563 "with", +564 "within", +565 } +566 +567 def attimezone_sql(self, expression: exp.AtTimeZone) -> str: +568 parent = expression.parent 569 -570 return super().attimezone_sql(expression) -571 -572 def trycast_sql(self, expression: exp.TryCast) -> str: -573 return self.cast_sql(expression, safe_prefix="SAFE_") -574 -575 def cte_sql(self, expression: exp.CTE) -> str: -576 if expression.alias_column_names: -577 self.unsupported("Column names in CTE definition are not supported.") -578 return super().cte_sql(expression) -579 -580 def array_sql(self, expression: exp.Array) -> str: -581 first_arg = seq_get(expression.expressions, 0) -582 if isinstance(first_arg, exp.Subqueryable): -583 return f"ARRAY{self.wrap(self.sql(first_arg))}" -584 -585 return inline_array_sql(self, expression) +570 # BigQuery allows CAST(.. AS {STRING|TIMESTAMP} [FORMAT <fmt> [AT TIME ZONE <tz>]]). +571 # Only the TIMESTAMP one should use the below conversion, when AT TIME ZONE is included. +572 if not isinstance(parent, exp.Cast) or not parent.to.is_type("text"): +573 return self.func( +574 "TIMESTAMP", self.func("DATETIME", expression.this, expression.args.get("zone")) +575 ) +576 +577 return super().attimezone_sql(expression) +578 +579 def trycast_sql(self, expression: exp.TryCast) -> str: +580 return self.cast_sql(expression, safe_prefix="SAFE_") +581 +582 def cte_sql(self, expression: exp.CTE) -> str: +583 if expression.alias_column_names: +584 self.unsupported("Column names in CTE definition are not supported.") +585 return super().cte_sql(expression) 586 -587 def transaction_sql(self, *_) -> str: -588 return "BEGIN TRANSACTION" -589 -590 def commit_sql(self, *_) -> str: -591 return "COMMIT TRANSACTION" -592 -593 def rollback_sql(self, *_) -> str: -594 return "ROLLBACK TRANSACTION" -595 -596 def in_unnest_op(self, expression: exp.Unnest) -> str: -597 return self.sql(expression) -598 -599 def except_op(self, expression: exp.Except) -> str: -600 if not expression.args.get("distinct", False): -601 self.unsupported("EXCEPT without DISTINCT is not supported in BigQuery") -602 return f"EXCEPT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}" -603 -604 def intersect_op(self, expression: exp.Intersect) -> str: -605 if not expression.args.get("distinct", False): -606 self.unsupported("INTERSECT without DISTINCT is not supported in BigQuery") -607 return f"INTERSECT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}" -608 -609 def with_properties(self, properties: exp.Properties) -> str: -610 return self.properties(properties, prefix=self.seg("OPTIONS")) +587 def array_sql(self, expression: exp.Array) -> str: +588 first_arg = seq_get(expression.expressions, 0) +589 if isinstance(first_arg, exp.Subqueryable): +590 return f"ARRAY{self.wrap(self.sql(first_arg))}" +591 +592 return inline_array_sql(self, expression) +593 +594 def transaction_sql(self, *_) -> str: +595 return "BEGIN TRANSACTION" +596 +597 def commit_sql(self, *_) -> str: +598 return "COMMIT TRANSACTION" +599 +600 def rollback_sql(self, *_) -> str: +601 return "ROLLBACK TRANSACTION" +602 +603 def in_unnest_op(self, expression: exp.Unnest) -> str: +604 return self.sql(expression) +605 +606 def except_op(self, expression: exp.Except) -> str: +607 if not expression.args.get("distinct", False): +608 self.unsupported("EXCEPT without DISTINCT is not supported in BigQuery") +609 return f"EXCEPT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}" +610 +611 def intersect_op(self, expression: exp.Intersect) -> str: +612 if not expression.args.get("distinct", False): +613 self.unsupported("INTERSECT without DISTINCT is not supported in BigQuery") +614 return f"INTERSECT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}" +615 +616 def with_properties(self, properties: exp.Properties) -> str: +617 return self.properties(properties, prefix=self.seg("OPTIONS")) @@ -981,445 +988,447 @@ -
173class BigQuery(Dialect):
-174    UNNEST_COLUMN_ONLY = True
-175
-176    # https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#case_sensitivity
-177    RESOLVES_IDENTIFIERS_AS_UPPERCASE = None
-178
-179    # bigquery udfs are case sensitive
-180    NORMALIZE_FUNCTIONS = False
-181
-182    TIME_MAPPING = {
-183        "%D": "%m/%d/%y",
-184    }
-185
-186    FORMAT_MAPPING = {
-187        "DD": "%d",
-188        "MM": "%m",
-189        "MON": "%b",
-190        "MONTH": "%B",
-191        "YYYY": "%Y",
-192        "YY": "%y",
-193        "HH": "%I",
-194        "HH12": "%I",
-195        "HH24": "%H",
-196        "MI": "%M",
-197        "SS": "%S",
-198        "SSSSS": "%f",
-199        "TZH": "%z",
-200    }
-201
-202    @classmethod
-203    def normalize_identifier(cls, expression: E) -> E:
-204        # In BigQuery, CTEs aren't case-sensitive, but table names are (by default, at least).
-205        # The following check is essentially a heuristic to detect tables based on whether or
-206        # not they're qualified.
-207        if isinstance(expression, exp.Identifier):
-208            parent = expression.parent
-209
-210            while isinstance(parent, exp.Dot):
-211                parent = parent.parent
-212
-213            if (
-214                not isinstance(parent, exp.UserDefinedFunction)
-215                and not (isinstance(parent, exp.Table) and parent.db)
-216                and not expression.meta.get("is_table")
-217            ):
-218                expression.set("this", expression.this.lower())
-219
-220        return expression
-221
-222    class Tokenizer(tokens.Tokenizer):
-223        QUOTES = ["'", '"', '"""', "'''"]
-224        COMMENTS = ["--", "#", ("/*", "*/")]
-225        IDENTIFIERS = ["`"]
-226        STRING_ESCAPES = ["\\"]
-227
-228        HEX_STRINGS = [("0x", ""), ("0X", "")]
-229
-230        BYTE_STRINGS = [
-231            (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("b", "B")
-232        ]
-233
-234        RAW_STRINGS = [
-235            (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("r", "R")
-236        ]
-237
-238        KEYWORDS = {
-239            **tokens.Tokenizer.KEYWORDS,
-240            "ANY TYPE": TokenType.VARIANT,
-241            "BEGIN": TokenType.COMMAND,
-242            "BEGIN TRANSACTION": TokenType.BEGIN,
-243            "CURRENT_DATETIME": TokenType.CURRENT_DATETIME,
-244            "BYTES": TokenType.BINARY,
-245            "DECLARE": TokenType.COMMAND,
-246            "FLOAT64": TokenType.DOUBLE,
-247            "INT64": TokenType.BIGINT,
-248            "RECORD": TokenType.STRUCT,
-249            "TIMESTAMP": TokenType.TIMESTAMPTZ,
-250            "NOT DETERMINISTIC": TokenType.VOLATILE,
-251            "UNKNOWN": TokenType.NULL,
-252        }
-253        KEYWORDS.pop("DIV")
-254
-255    class Parser(parser.Parser):
-256        PREFIXED_PIVOT_COLUMNS = True
-257
-258        LOG_BASE_FIRST = False
-259        LOG_DEFAULTS_TO_LN = True
-260
-261        FUNCTIONS = {
-262            **parser.Parser.FUNCTIONS,
-263            "DATE_ADD": parse_date_delta_with_interval(exp.DateAdd),
-264            "DATE_SUB": parse_date_delta_with_interval(exp.DateSub),
-265            "DATE_TRUNC": lambda args: exp.DateTrunc(
-266                unit=exp.Literal.string(str(seq_get(args, 1))),
-267                this=seq_get(args, 0),
-268            ),
-269            "DATETIME_ADD": parse_date_delta_with_interval(exp.DatetimeAdd),
-270            "DATETIME_SUB": parse_date_delta_with_interval(exp.DatetimeSub),
-271            "DIV": lambda args: exp.IntDiv(this=seq_get(args, 0), expression=seq_get(args, 1)),
-272            "GENERATE_ARRAY": exp.GenerateSeries.from_arg_list,
-273            "PARSE_DATE": lambda args: format_time_lambda(exp.StrToDate, "bigquery")(
-274                [seq_get(args, 1), seq_get(args, 0)]
-275            ),
-276            "PARSE_TIMESTAMP": _parse_timestamp,
-277            "REGEXP_CONTAINS": exp.RegexpLike.from_arg_list,
-278            "REGEXP_EXTRACT": lambda args: exp.RegexpExtract(
-279                this=seq_get(args, 0),
-280                expression=seq_get(args, 1),
-281                position=seq_get(args, 2),
-282                occurrence=seq_get(args, 3),
-283                group=exp.Literal.number(1)
-284                if re.compile(str(seq_get(args, 1))).groups == 1
-285                else None,
-286            ),
-287            "SPLIT": lambda args: exp.Split(
-288                # https://cloud.google.com/bigquery/docs/reference/standard-sql/string_functions#split
-289                this=seq_get(args, 0),
-290                expression=seq_get(args, 1) or exp.Literal.string(","),
-291            ),
-292            "TIME_ADD": parse_date_delta_with_interval(exp.TimeAdd),
-293            "TIME_SUB": parse_date_delta_with_interval(exp.TimeSub),
-294            "TIMESTAMP_ADD": parse_date_delta_with_interval(exp.TimestampAdd),
-295            "TIMESTAMP_SUB": parse_date_delta_with_interval(exp.TimestampSub),
-296            "TO_JSON_STRING": exp.JSONFormat.from_arg_list,
-297        }
-298
-299        FUNCTION_PARSERS = {
-300            **parser.Parser.FUNCTION_PARSERS,
-301            "ARRAY": lambda self: self.expression(exp.Array, expressions=[self._parse_statement()]),
-302        }
-303        FUNCTION_PARSERS.pop("TRIM")
+            
178class BigQuery(Dialect):
+179    UNNEST_COLUMN_ONLY = True
+180
+181    # https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#case_sensitivity
+182    RESOLVES_IDENTIFIERS_AS_UPPERCASE = None
+183
+184    # bigquery udfs are case sensitive
+185    NORMALIZE_FUNCTIONS = False
+186
+187    TIME_MAPPING = {
+188        "%D": "%m/%d/%y",
+189    }
+190
+191    FORMAT_MAPPING = {
+192        "DD": "%d",
+193        "MM": "%m",
+194        "MON": "%b",
+195        "MONTH": "%B",
+196        "YYYY": "%Y",
+197        "YY": "%y",
+198        "HH": "%I",
+199        "HH12": "%I",
+200        "HH24": "%H",
+201        "MI": "%M",
+202        "SS": "%S",
+203        "SSSSS": "%f",
+204        "TZH": "%z",
+205    }
+206
+207    @classmethod
+208    def normalize_identifier(cls, expression: E) -> E:
+209        # In BigQuery, CTEs aren't case-sensitive, but table names are (by default, at least).
+210        # The following check is essentially a heuristic to detect tables based on whether or
+211        # not they're qualified.
+212        if isinstance(expression, exp.Identifier):
+213            parent = expression.parent
+214
+215            while isinstance(parent, exp.Dot):
+216                parent = parent.parent
+217
+218            if (
+219                not isinstance(parent, exp.UserDefinedFunction)
+220                and not (isinstance(parent, exp.Table) and parent.db)
+221                and not expression.meta.get("is_table")
+222            ):
+223                expression.set("this", expression.this.lower())
+224
+225        return expression
+226
+227    class Tokenizer(tokens.Tokenizer):
+228        QUOTES = ["'", '"', '"""', "'''"]
+229        COMMENTS = ["--", "#", ("/*", "*/")]
+230        IDENTIFIERS = ["`"]
+231        STRING_ESCAPES = ["\\"]
+232
+233        HEX_STRINGS = [("0x", ""), ("0X", "")]
+234
+235        BYTE_STRINGS = [
+236            (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("b", "B")
+237        ]
+238
+239        RAW_STRINGS = [
+240            (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("r", "R")
+241        ]
+242
+243        KEYWORDS = {
+244            **tokens.Tokenizer.KEYWORDS,
+245            "ANY TYPE": TokenType.VARIANT,
+246            "BEGIN": TokenType.COMMAND,
+247            "BEGIN TRANSACTION": TokenType.BEGIN,
+248            "CURRENT_DATETIME": TokenType.CURRENT_DATETIME,
+249            "BYTES": TokenType.BINARY,
+250            "DECLARE": TokenType.COMMAND,
+251            "FLOAT64": TokenType.DOUBLE,
+252            "INT64": TokenType.BIGINT,
+253            "RECORD": TokenType.STRUCT,
+254            "TIMESTAMP": TokenType.TIMESTAMPTZ,
+255            "NOT DETERMINISTIC": TokenType.VOLATILE,
+256            "UNKNOWN": TokenType.NULL,
+257        }
+258        KEYWORDS.pop("DIV")
+259
+260    class Parser(parser.Parser):
+261        PREFIXED_PIVOT_COLUMNS = True
+262
+263        LOG_BASE_FIRST = False
+264        LOG_DEFAULTS_TO_LN = True
+265
+266        FUNCTIONS = {
+267            **parser.Parser.FUNCTIONS,
+268            "DATE": _parse_date,
+269            "DATE_ADD": parse_date_delta_with_interval(exp.DateAdd),
+270            "DATE_SUB": parse_date_delta_with_interval(exp.DateSub),
+271            "DATE_TRUNC": lambda args: exp.DateTrunc(
+272                unit=exp.Literal.string(str(seq_get(args, 1))),
+273                this=seq_get(args, 0),
+274            ),
+275            "DATETIME_ADD": parse_date_delta_with_interval(exp.DatetimeAdd),
+276            "DATETIME_SUB": parse_date_delta_with_interval(exp.DatetimeSub),
+277            "DIV": lambda args: exp.IntDiv(this=seq_get(args, 0), expression=seq_get(args, 1)),
+278            "GENERATE_ARRAY": exp.GenerateSeries.from_arg_list,
+279            "PARSE_DATE": lambda args: format_time_lambda(exp.StrToDate, "bigquery")(
+280                [seq_get(args, 1), seq_get(args, 0)]
+281            ),
+282            "PARSE_TIMESTAMP": _parse_timestamp,
+283            "REGEXP_CONTAINS": exp.RegexpLike.from_arg_list,
+284            "REGEXP_EXTRACT": lambda args: exp.RegexpExtract(
+285                this=seq_get(args, 0),
+286                expression=seq_get(args, 1),
+287                position=seq_get(args, 2),
+288                occurrence=seq_get(args, 3),
+289                group=exp.Literal.number(1)
+290                if re.compile(str(seq_get(args, 1))).groups == 1
+291                else None,
+292            ),
+293            "SPLIT": lambda args: exp.Split(
+294                # https://cloud.google.com/bigquery/docs/reference/standard-sql/string_functions#split
+295                this=seq_get(args, 0),
+296                expression=seq_get(args, 1) or exp.Literal.string(","),
+297            ),
+298            "TIME_ADD": parse_date_delta_with_interval(exp.TimeAdd),
+299            "TIME_SUB": parse_date_delta_with_interval(exp.TimeSub),
+300            "TIMESTAMP_ADD": parse_date_delta_with_interval(exp.TimestampAdd),
+301            "TIMESTAMP_SUB": parse_date_delta_with_interval(exp.TimestampSub),
+302            "TO_JSON_STRING": exp.JSONFormat.from_arg_list,
+303        }
 304
-305        NO_PAREN_FUNCTIONS = {
-306            **parser.Parser.NO_PAREN_FUNCTIONS,
-307            TokenType.CURRENT_DATETIME: exp.CurrentDatetime,
+305        FUNCTION_PARSERS = {
+306            **parser.Parser.FUNCTION_PARSERS,
+307            "ARRAY": lambda self: self.expression(exp.Array, expressions=[self._parse_statement()]),
 308        }
-309
-310        NESTED_TYPE_TOKENS = {
-311            *parser.Parser.NESTED_TYPE_TOKENS,
-312            TokenType.TABLE,
-313        }
-314
-315        ID_VAR_TOKENS = {
-316            *parser.Parser.ID_VAR_TOKENS,
-317            TokenType.VALUES,
-318        }
-319
-320        PROPERTY_PARSERS = {
-321            **parser.Parser.PROPERTY_PARSERS,
-322            "NOT DETERMINISTIC": lambda self: self.expression(
-323                exp.StabilityProperty, this=exp.Literal.string("VOLATILE")
-324            ),
-325            "OPTIONS": lambda self: self._parse_with_property(),
-326        }
-327
-328        CONSTRAINT_PARSERS = {
-329            **parser.Parser.CONSTRAINT_PARSERS,
-330            "OPTIONS": lambda self: exp.Properties(expressions=self._parse_with_property()),
-331        }
-332
-333        def _parse_table_part(self, schema: bool = False) -> t.Optional[exp.Expression]:
-334            this = super()._parse_table_part(schema=schema)
-335
-336            # https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#table_names
-337            if isinstance(this, exp.Identifier):
-338                table_name = this.name
-339                while self._match(TokenType.DASH, advance=False) and self._next:
-340                    self._advance(2)
-341                    table_name += f"-{self._prev.text}"
-342
-343                this = exp.Identifier(this=table_name, quoted=this.args.get("quoted"))
-344
-345            return this
-346
-347        def _parse_table_parts(self, schema: bool = False) -> exp.Table:
-348            table = super()._parse_table_parts(schema=schema)
-349            if isinstance(table.this, exp.Identifier) and "." in table.name:
-350                catalog, db, this, *rest = (
-351                    t.cast(t.Optional[exp.Expression], exp.to_identifier(x))
-352                    for x in split_num_words(table.name, ".", 3)
-353                )
-354
-355                if rest and this:
-356                    this = exp.Dot.build(t.cast(t.List[exp.Expression], [this, *rest]))
-357
-358                table = exp.Table(this=this, db=db, catalog=catalog)
-359
-360            return table
-361
-362    class Generator(generator.Generator):
-363        EXPLICIT_UNION = True
-364        INTERVAL_ALLOWS_PLURAL_FORM = False
-365        JOIN_HINTS = False
-366        QUERY_HINTS = False
-367        TABLE_HINTS = False
-368        LIMIT_FETCH = "LIMIT"
-369        RENAME_TABLE_WITH_DB = False
-370        ESCAPE_LINE_BREAK = True
-371
-372        TRANSFORMS = {
-373            **generator.Generator.TRANSFORMS,
-374            exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"),
-375            exp.ArraySize: rename_func("ARRAY_LENGTH"),
-376            exp.Cast: transforms.preprocess([transforms.remove_precision_parameterized_types]),
-377            exp.CTE: transforms.preprocess([_pushdown_cte_column_names]),
-378            exp.DateAdd: _date_add_sql("DATE", "ADD"),
-379            exp.DateSub: _date_add_sql("DATE", "SUB"),
-380            exp.DatetimeAdd: _date_add_sql("DATETIME", "ADD"),
-381            exp.DatetimeSub: _date_add_sql("DATETIME", "SUB"),
-382            exp.DateDiff: lambda self, e: f"DATE_DIFF({self.sql(e, 'this')}, {self.sql(e, 'expression')}, {self.sql(e.args.get('unit', 'DAY'))})",
-383            exp.DateStrToDate: datestrtodate_sql,
-384            exp.DateTrunc: lambda self, e: self.func("DATE_TRUNC", e.this, e.text("unit")),
-385            exp.JSONFormat: rename_func("TO_JSON_STRING"),
-386            exp.GenerateSeries: rename_func("GENERATE_ARRAY"),
-387            exp.GroupConcat: rename_func("STRING_AGG"),
-388            exp.ILike: no_ilike_sql,
-389            exp.IntDiv: rename_func("DIV"),
-390            exp.Max: max_or_greatest,
-391            exp.Min: min_or_least,
-392            exp.RegexpExtract: lambda self, e: self.func(
-393                "REGEXP_EXTRACT",
-394                e.this,
-395                e.expression,
-396                e.args.get("position"),
-397                e.args.get("occurrence"),
-398            ),
-399            exp.RegexpLike: rename_func("REGEXP_CONTAINS"),
-400            exp.Select: transforms.preprocess(
-401                [
-402                    transforms.explode_to_unnest,
-403                    _unqualify_unnest,
-404                    transforms.eliminate_distinct_on,
-405                    _alias_ordered_group,
-406                ]
-407            ),
-408            exp.StrToDate: lambda self, e: f"PARSE_DATE({self.format_time(e)}, {self.sql(e, 'this')})",
-409            exp.StrToTime: lambda self, e: self.func(
-410                "PARSE_TIMESTAMP", self.format_time(e), e.this, e.args.get("zone")
-411            ),
-412            exp.TimeAdd: _date_add_sql("TIME", "ADD"),
-413            exp.TimeSub: _date_add_sql("TIME", "SUB"),
-414            exp.TimestampAdd: _date_add_sql("TIMESTAMP", "ADD"),
-415            exp.TimestampSub: _date_add_sql("TIMESTAMP", "SUB"),
-416            exp.TimeStrToTime: timestrtotime_sql,
-417            exp.TsOrDsToDate: ts_or_ds_to_date_sql("bigquery"),
-418            exp.TsOrDsAdd: _date_add_sql("DATE", "ADD"),
-419            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
-420            exp.VariancePop: rename_func("VAR_POP"),
-421            exp.Values: _derived_table_values_to_unnest,
-422            exp.ReturnsProperty: _returnsproperty_sql,
-423            exp.Create: _create_sql,
-424            exp.Trim: lambda self, e: self.func(f"TRIM", e.this, e.expression),
-425            exp.StabilityProperty: lambda self, e: f"DETERMINISTIC"
-426            if e.name == "IMMUTABLE"
-427            else "NOT DETERMINISTIC",
-428        }
-429
-430        TYPE_MAPPING = {
-431            **generator.Generator.TYPE_MAPPING,
-432            exp.DataType.Type.BIGDECIMAL: "BIGNUMERIC",
-433            exp.DataType.Type.BIGINT: "INT64",
-434            exp.DataType.Type.BINARY: "BYTES",
-435            exp.DataType.Type.BOOLEAN: "BOOL",
-436            exp.DataType.Type.CHAR: "STRING",
-437            exp.DataType.Type.DECIMAL: "NUMERIC",
-438            exp.DataType.Type.DOUBLE: "FLOAT64",
-439            exp.DataType.Type.FLOAT: "FLOAT64",
-440            exp.DataType.Type.INT: "INT64",
-441            exp.DataType.Type.NCHAR: "STRING",
-442            exp.DataType.Type.NVARCHAR: "STRING",
-443            exp.DataType.Type.SMALLINT: "INT64",
-444            exp.DataType.Type.TEXT: "STRING",
-445            exp.DataType.Type.TIMESTAMP: "DATETIME",
-446            exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP",
-447            exp.DataType.Type.TIMESTAMPLTZ: "TIMESTAMP",
-448            exp.DataType.Type.TINYINT: "INT64",
-449            exp.DataType.Type.VARBINARY: "BYTES",
-450            exp.DataType.Type.VARCHAR: "STRING",
-451            exp.DataType.Type.VARIANT: "ANY TYPE",
-452        }
-453
-454        PROPERTIES_LOCATION = {
-455            **generator.Generator.PROPERTIES_LOCATION,
-456            exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA,
-457            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
-458        }
-459
-460        # from: https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#reserved_keywords
-461        RESERVED_KEYWORDS = {
-462            *generator.Generator.RESERVED_KEYWORDS,
-463            "all",
-464            "and",
-465            "any",
-466            "array",
-467            "as",
-468            "asc",
-469            "assert_rows_modified",
-470            "at",
-471            "between",
-472            "by",
-473            "case",
-474            "cast",
-475            "collate",
-476            "contains",
-477            "create",
-478            "cross",
-479            "cube",
-480            "current",
-481            "default",
-482            "define",
-483            "desc",
-484            "distinct",
-485            "else",
-486            "end",
-487            "enum",
-488            "escape",
-489            "except",
-490            "exclude",
-491            "exists",
-492            "extract",
-493            "false",
-494            "fetch",
-495            "following",
-496            "for",
-497            "from",
-498            "full",
-499            "group",
-500            "grouping",
-501            "groups",
-502            "hash",
-503            "having",
-504            "if",
-505            "ignore",
-506            "in",
-507            "inner",
-508            "intersect",
-509            "interval",
-510            "into",
-511            "is",
-512            "join",
-513            "lateral",
-514            "left",
-515            "like",
-516            "limit",
-517            "lookup",
-518            "merge",
-519            "natural",
-520            "new",
-521            "no",
-522            "not",
-523            "null",
-524            "nulls",
-525            "of",
-526            "on",
-527            "or",
-528            "order",
-529            "outer",
-530            "over",
-531            "partition",
-532            "preceding",
-533            "proto",
-534            "qualify",
-535            "range",
-536            "recursive",
-537            "respect",
-538            "right",
-539            "rollup",
-540            "rows",
-541            "select",
-542            "set",
-543            "some",
-544            "struct",
-545            "tablesample",
-546            "then",
-547            "to",
-548            "treat",
-549            "true",
-550            "unbounded",
-551            "union",
-552            "unnest",
-553            "using",
-554            "when",
-555            "where",
-556            "window",
-557            "with",
-558            "within",
-559        }
-560
-561        def attimezone_sql(self, expression: exp.AtTimeZone) -> str:
-562            parent = expression.parent
-563
-564            # BigQuery allows CAST(.. AS {STRING|TIMESTAMP} [FORMAT <fmt> [AT TIME ZONE <tz>]]).
-565            # Only the TIMESTAMP one should use the below conversion, when AT TIME ZONE is included.
-566            if not isinstance(parent, exp.Cast) or not parent.to.is_type("text"):
-567                return self.func(
-568                    "TIMESTAMP", self.func("DATETIME", expression.this, expression.args.get("zone"))
-569                )
+309        FUNCTION_PARSERS.pop("TRIM")
+310
+311        NO_PAREN_FUNCTIONS = {
+312            **parser.Parser.NO_PAREN_FUNCTIONS,
+313            TokenType.CURRENT_DATETIME: exp.CurrentDatetime,
+314        }
+315
+316        NESTED_TYPE_TOKENS = {
+317            *parser.Parser.NESTED_TYPE_TOKENS,
+318            TokenType.TABLE,
+319        }
+320
+321        ID_VAR_TOKENS = {
+322            *parser.Parser.ID_VAR_TOKENS,
+323            TokenType.VALUES,
+324        }
+325
+326        PROPERTY_PARSERS = {
+327            **parser.Parser.PROPERTY_PARSERS,
+328            "NOT DETERMINISTIC": lambda self: self.expression(
+329                exp.StabilityProperty, this=exp.Literal.string("VOLATILE")
+330            ),
+331            "OPTIONS": lambda self: self._parse_with_property(),
+332        }
+333
+334        CONSTRAINT_PARSERS = {
+335            **parser.Parser.CONSTRAINT_PARSERS,
+336            "OPTIONS": lambda self: exp.Properties(expressions=self._parse_with_property()),
+337        }
+338
+339        def _parse_table_part(self, schema: bool = False) -> t.Optional[exp.Expression]:
+340            this = super()._parse_table_part(schema=schema)
+341
+342            # https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#table_names
+343            if isinstance(this, exp.Identifier):
+344                table_name = this.name
+345                while self._match(TokenType.DASH, advance=False) and self._next:
+346                    self._advance(2)
+347                    table_name += f"-{self._prev.text}"
+348
+349                this = exp.Identifier(this=table_name, quoted=this.args.get("quoted"))
+350
+351            return this
+352
+353        def _parse_table_parts(self, schema: bool = False) -> exp.Table:
+354            table = super()._parse_table_parts(schema=schema)
+355            if isinstance(table.this, exp.Identifier) and "." in table.name:
+356                catalog, db, this, *rest = (
+357                    t.cast(t.Optional[exp.Expression], exp.to_identifier(x))
+358                    for x in split_num_words(table.name, ".", 3)
+359                )
+360
+361                if rest and this:
+362                    this = exp.Dot.build(t.cast(t.List[exp.Expression], [this, *rest]))
+363
+364                table = exp.Table(this=this, db=db, catalog=catalog)
+365
+366            return table
+367
+368    class Generator(generator.Generator):
+369        EXPLICIT_UNION = True
+370        INTERVAL_ALLOWS_PLURAL_FORM = False
+371        JOIN_HINTS = False
+372        QUERY_HINTS = False
+373        TABLE_HINTS = False
+374        LIMIT_FETCH = "LIMIT"
+375        RENAME_TABLE_WITH_DB = False
+376        ESCAPE_LINE_BREAK = True
+377
+378        TRANSFORMS = {
+379            **generator.Generator.TRANSFORMS,
+380            exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"),
+381            exp.ArraySize: rename_func("ARRAY_LENGTH"),
+382            exp.Cast: transforms.preprocess([transforms.remove_precision_parameterized_types]),
+383            exp.CTE: transforms.preprocess([_pushdown_cte_column_names]),
+384            exp.DateAdd: _date_add_sql("DATE", "ADD"),
+385            exp.DateFromParts: rename_func("DATE"),
+386            exp.DateSub: _date_add_sql("DATE", "SUB"),
+387            exp.DatetimeAdd: _date_add_sql("DATETIME", "ADD"),
+388            exp.DatetimeSub: _date_add_sql("DATETIME", "SUB"),
+389            exp.DateDiff: lambda self, e: f"DATE_DIFF({self.sql(e, 'this')}, {self.sql(e, 'expression')}, {self.sql(e.args.get('unit', 'DAY'))})",
+390            exp.DateStrToDate: datestrtodate_sql,
+391            exp.DateTrunc: lambda self, e: self.func("DATE_TRUNC", e.this, e.text("unit")),
+392            exp.JSONFormat: rename_func("TO_JSON_STRING"),
+393            exp.GenerateSeries: rename_func("GENERATE_ARRAY"),
+394            exp.GroupConcat: rename_func("STRING_AGG"),
+395            exp.ILike: no_ilike_sql,
+396            exp.IntDiv: rename_func("DIV"),
+397            exp.Max: max_or_greatest,
+398            exp.Min: min_or_least,
+399            exp.RegexpExtract: lambda self, e: self.func(
+400                "REGEXP_EXTRACT",
+401                e.this,
+402                e.expression,
+403                e.args.get("position"),
+404                e.args.get("occurrence"),
+405            ),
+406            exp.RegexpLike: rename_func("REGEXP_CONTAINS"),
+407            exp.Select: transforms.preprocess(
+408                [
+409                    transforms.explode_to_unnest,
+410                    _unqualify_unnest,
+411                    transforms.eliminate_distinct_on,
+412                    _alias_ordered_group,
+413                ]
+414            ),
+415            exp.StrToDate: lambda self, e: f"PARSE_DATE({self.format_time(e)}, {self.sql(e, 'this')})",
+416            exp.StrToTime: lambda self, e: self.func(
+417                "PARSE_TIMESTAMP", self.format_time(e), e.this, e.args.get("zone")
+418            ),
+419            exp.TimeAdd: _date_add_sql("TIME", "ADD"),
+420            exp.TimeSub: _date_add_sql("TIME", "SUB"),
+421            exp.TimestampAdd: _date_add_sql("TIMESTAMP", "ADD"),
+422            exp.TimestampSub: _date_add_sql("TIMESTAMP", "SUB"),
+423            exp.TimeStrToTime: timestrtotime_sql,
+424            exp.TsOrDsToDate: ts_or_ds_to_date_sql("bigquery"),
+425            exp.TsOrDsAdd: _date_add_sql("DATE", "ADD"),
+426            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
+427            exp.VariancePop: rename_func("VAR_POP"),
+428            exp.Values: _derived_table_values_to_unnest,
+429            exp.ReturnsProperty: _returnsproperty_sql,
+430            exp.Create: _create_sql,
+431            exp.Trim: lambda self, e: self.func(f"TRIM", e.this, e.expression),
+432            exp.StabilityProperty: lambda self, e: f"DETERMINISTIC"
+433            if e.name == "IMMUTABLE"
+434            else "NOT DETERMINISTIC",
+435        }
+436
+437        TYPE_MAPPING = {
+438            **generator.Generator.TYPE_MAPPING,
+439            exp.DataType.Type.BIGDECIMAL: "BIGNUMERIC",
+440            exp.DataType.Type.BIGINT: "INT64",
+441            exp.DataType.Type.BINARY: "BYTES",
+442            exp.DataType.Type.BOOLEAN: "BOOL",
+443            exp.DataType.Type.CHAR: "STRING",
+444            exp.DataType.Type.DECIMAL: "NUMERIC",
+445            exp.DataType.Type.DOUBLE: "FLOAT64",
+446            exp.DataType.Type.FLOAT: "FLOAT64",
+447            exp.DataType.Type.INT: "INT64",
+448            exp.DataType.Type.NCHAR: "STRING",
+449            exp.DataType.Type.NVARCHAR: "STRING",
+450            exp.DataType.Type.SMALLINT: "INT64",
+451            exp.DataType.Type.TEXT: "STRING",
+452            exp.DataType.Type.TIMESTAMP: "DATETIME",
+453            exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP",
+454            exp.DataType.Type.TIMESTAMPLTZ: "TIMESTAMP",
+455            exp.DataType.Type.TINYINT: "INT64",
+456            exp.DataType.Type.VARBINARY: "BYTES",
+457            exp.DataType.Type.VARCHAR: "STRING",
+458            exp.DataType.Type.VARIANT: "ANY TYPE",
+459        }
+460
+461        PROPERTIES_LOCATION = {
+462            **generator.Generator.PROPERTIES_LOCATION,
+463            exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA,
+464            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
+465        }
+466
+467        # from: https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#reserved_keywords
+468        RESERVED_KEYWORDS = {
+469            *generator.Generator.RESERVED_KEYWORDS,
+470            "all",
+471            "and",
+472            "any",
+473            "array",
+474            "as",
+475            "asc",
+476            "assert_rows_modified",
+477            "at",
+478            "between",
+479            "by",
+480            "case",
+481            "cast",
+482            "collate",
+483            "contains",
+484            "create",
+485            "cross",
+486            "cube",
+487            "current",
+488            "default",
+489            "define",
+490            "desc",
+491            "distinct",
+492            "else",
+493            "end",
+494            "enum",
+495            "escape",
+496            "except",
+497            "exclude",
+498            "exists",
+499            "extract",
+500            "false",
+501            "fetch",
+502            "following",
+503            "for",
+504            "from",
+505            "full",
+506            "group",
+507            "grouping",
+508            "groups",
+509            "hash",
+510            "having",
+511            "if",
+512            "ignore",
+513            "in",
+514            "inner",
+515            "intersect",
+516            "interval",
+517            "into",
+518            "is",
+519            "join",
+520            "lateral",
+521            "left",
+522            "like",
+523            "limit",
+524            "lookup",
+525            "merge",
+526            "natural",
+527            "new",
+528            "no",
+529            "not",
+530            "null",
+531            "nulls",
+532            "of",
+533            "on",
+534            "or",
+535            "order",
+536            "outer",
+537            "over",
+538            "partition",
+539            "preceding",
+540            "proto",
+541            "qualify",
+542            "range",
+543            "recursive",
+544            "respect",
+545            "right",
+546            "rollup",
+547            "rows",
+548            "select",
+549            "set",
+550            "some",
+551            "struct",
+552            "tablesample",
+553            "then",
+554            "to",
+555            "treat",
+556            "true",
+557            "unbounded",
+558            "union",
+559            "unnest",
+560            "using",
+561            "when",
+562            "where",
+563            "window",
+564            "with",
+565            "within",
+566        }
+567
+568        def attimezone_sql(self, expression: exp.AtTimeZone) -> str:
+569            parent = expression.parent
 570
-571            return super().attimezone_sql(expression)
-572
-573        def trycast_sql(self, expression: exp.TryCast) -> str:
-574            return self.cast_sql(expression, safe_prefix="SAFE_")
-575
-576        def cte_sql(self, expression: exp.CTE) -> str:
-577            if expression.alias_column_names:
-578                self.unsupported("Column names in CTE definition are not supported.")
-579            return super().cte_sql(expression)
-580
-581        def array_sql(self, expression: exp.Array) -> str:
-582            first_arg = seq_get(expression.expressions, 0)
-583            if isinstance(first_arg, exp.Subqueryable):
-584                return f"ARRAY{self.wrap(self.sql(first_arg))}"
-585
-586            return inline_array_sql(self, expression)
+571            # BigQuery allows CAST(.. AS {STRING|TIMESTAMP} [FORMAT <fmt> [AT TIME ZONE <tz>]]).
+572            # Only the TIMESTAMP one should use the below conversion, when AT TIME ZONE is included.
+573            if not isinstance(parent, exp.Cast) or not parent.to.is_type("text"):
+574                return self.func(
+575                    "TIMESTAMP", self.func("DATETIME", expression.this, expression.args.get("zone"))
+576                )
+577
+578            return super().attimezone_sql(expression)
+579
+580        def trycast_sql(self, expression: exp.TryCast) -> str:
+581            return self.cast_sql(expression, safe_prefix="SAFE_")
+582
+583        def cte_sql(self, expression: exp.CTE) -> str:
+584            if expression.alias_column_names:
+585                self.unsupported("Column names in CTE definition are not supported.")
+586            return super().cte_sql(expression)
 587
-588        def transaction_sql(self, *_) -> str:
-589            return "BEGIN TRANSACTION"
-590
-591        def commit_sql(self, *_) -> str:
-592            return "COMMIT TRANSACTION"
-593
-594        def rollback_sql(self, *_) -> str:
-595            return "ROLLBACK TRANSACTION"
-596
-597        def in_unnest_op(self, expression: exp.Unnest) -> str:
-598            return self.sql(expression)
-599
-600        def except_op(self, expression: exp.Except) -> str:
-601            if not expression.args.get("distinct", False):
-602                self.unsupported("EXCEPT without DISTINCT is not supported in BigQuery")
-603            return f"EXCEPT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}"
-604
-605        def intersect_op(self, expression: exp.Intersect) -> str:
-606            if not expression.args.get("distinct", False):
-607                self.unsupported("INTERSECT without DISTINCT is not supported in BigQuery")
-608            return f"INTERSECT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}"
-609
-610        def with_properties(self, properties: exp.Properties) -> str:
-611            return self.properties(properties, prefix=self.seg("OPTIONS"))
+588        def array_sql(self, expression: exp.Array) -> str:
+589            first_arg = seq_get(expression.expressions, 0)
+590            if isinstance(first_arg, exp.Subqueryable):
+591                return f"ARRAY{self.wrap(self.sql(first_arg))}"
+592
+593            return inline_array_sql(self, expression)
+594
+595        def transaction_sql(self, *_) -> str:
+596            return "BEGIN TRANSACTION"
+597
+598        def commit_sql(self, *_) -> str:
+599            return "COMMIT TRANSACTION"
+600
+601        def rollback_sql(self, *_) -> str:
+602            return "ROLLBACK TRANSACTION"
+603
+604        def in_unnest_op(self, expression: exp.Unnest) -> str:
+605            return self.sql(expression)
+606
+607        def except_op(self, expression: exp.Except) -> str:
+608            if not expression.args.get("distinct", False):
+609                self.unsupported("EXCEPT without DISTINCT is not supported in BigQuery")
+610            return f"EXCEPT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}"
+611
+612        def intersect_op(self, expression: exp.Intersect) -> str:
+613            if not expression.args.get("distinct", False):
+614                self.unsupported("INTERSECT without DISTINCT is not supported in BigQuery")
+615            return f"INTERSECT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}"
+616
+617        def with_properties(self, properties: exp.Properties) -> str:
+618            return self.properties(properties, prefix=self.seg("OPTIONS"))
 
@@ -1498,25 +1507,25 @@
-
202    @classmethod
-203    def normalize_identifier(cls, expression: E) -> E:
-204        # In BigQuery, CTEs aren't case-sensitive, but table names are (by default, at least).
-205        # The following check is essentially a heuristic to detect tables based on whether or
-206        # not they're qualified.
-207        if isinstance(expression, exp.Identifier):
-208            parent = expression.parent
-209
-210            while isinstance(parent, exp.Dot):
-211                parent = parent.parent
-212
-213            if (
-214                not isinstance(parent, exp.UserDefinedFunction)
-215                and not (isinstance(parent, exp.Table) and parent.db)
-216                and not expression.meta.get("is_table")
-217            ):
-218                expression.set("this", expression.this.lower())
-219
-220        return expression
+            
207    @classmethod
+208    def normalize_identifier(cls, expression: E) -> E:
+209        # In BigQuery, CTEs aren't case-sensitive, but table names are (by default, at least).
+210        # The following check is essentially a heuristic to detect tables based on whether or
+211        # not they're qualified.
+212        if isinstance(expression, exp.Identifier):
+213            parent = expression.parent
+214
+215            while isinstance(parent, exp.Dot):
+216                parent = parent.parent
+217
+218            if (
+219                not isinstance(parent, exp.UserDefinedFunction)
+220                and not (isinstance(parent, exp.Table) and parent.db)
+221                and not expression.meta.get("is_table")
+222            ):
+223                expression.set("this", expression.this.lower())
+224
+225        return expression
 
@@ -1773,38 +1782,38 @@ they will be normalized regardless of being quoted or not.

-
222    class Tokenizer(tokens.Tokenizer):
-223        QUOTES = ["'", '"', '"""', "'''"]
-224        COMMENTS = ["--", "#", ("/*", "*/")]
-225        IDENTIFIERS = ["`"]
-226        STRING_ESCAPES = ["\\"]
-227
-228        HEX_STRINGS = [("0x", ""), ("0X", "")]
-229
-230        BYTE_STRINGS = [
-231            (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("b", "B")
-232        ]
-233
-234        RAW_STRINGS = [
-235            (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("r", "R")
-236        ]
-237
-238        KEYWORDS = {
-239            **tokens.Tokenizer.KEYWORDS,
-240            "ANY TYPE": TokenType.VARIANT,
-241            "BEGIN": TokenType.COMMAND,
-242            "BEGIN TRANSACTION": TokenType.BEGIN,
-243            "CURRENT_DATETIME": TokenType.CURRENT_DATETIME,
-244            "BYTES": TokenType.BINARY,
-245            "DECLARE": TokenType.COMMAND,
-246            "FLOAT64": TokenType.DOUBLE,
-247            "INT64": TokenType.BIGINT,
-248            "RECORD": TokenType.STRUCT,
-249            "TIMESTAMP": TokenType.TIMESTAMPTZ,
-250            "NOT DETERMINISTIC": TokenType.VOLATILE,
-251            "UNKNOWN": TokenType.NULL,
-252        }
-253        KEYWORDS.pop("DIV")
+            
227    class Tokenizer(tokens.Tokenizer):
+228        QUOTES = ["'", '"', '"""', "'''"]
+229        COMMENTS = ["--", "#", ("/*", "*/")]
+230        IDENTIFIERS = ["`"]
+231        STRING_ESCAPES = ["\\"]
+232
+233        HEX_STRINGS = [("0x", ""), ("0X", "")]
+234
+235        BYTE_STRINGS = [
+236            (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("b", "B")
+237        ]
+238
+239        RAW_STRINGS = [
+240            (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("r", "R")
+241        ]
+242
+243        KEYWORDS = {
+244            **tokens.Tokenizer.KEYWORDS,
+245            "ANY TYPE": TokenType.VARIANT,
+246            "BEGIN": TokenType.COMMAND,
+247            "BEGIN TRANSACTION": TokenType.BEGIN,
+248            "CURRENT_DATETIME": TokenType.CURRENT_DATETIME,
+249            "BYTES": TokenType.BINARY,
+250            "DECLARE": TokenType.COMMAND,
+251            "FLOAT64": TokenType.DOUBLE,
+252            "INT64": TokenType.BIGINT,
+253            "RECORD": TokenType.STRUCT,
+254            "TIMESTAMP": TokenType.TIMESTAMPTZ,
+255            "NOT DETERMINISTIC": TokenType.VOLATILE,
+256            "UNKNOWN": TokenType.NULL,
+257        }
+258        KEYWORDS.pop("DIV")
 
@@ -1945,112 +1954,113 @@ they will be normalized regardless of being quoted or not.

-
255    class Parser(parser.Parser):
-256        PREFIXED_PIVOT_COLUMNS = True
-257
-258        LOG_BASE_FIRST = False
-259        LOG_DEFAULTS_TO_LN = True
-260
-261        FUNCTIONS = {
-262            **parser.Parser.FUNCTIONS,
-263            "DATE_ADD": parse_date_delta_with_interval(exp.DateAdd),
-264            "DATE_SUB": parse_date_delta_with_interval(exp.DateSub),
-265            "DATE_TRUNC": lambda args: exp.DateTrunc(
-266                unit=exp.Literal.string(str(seq_get(args, 1))),
-267                this=seq_get(args, 0),
-268            ),
-269            "DATETIME_ADD": parse_date_delta_with_interval(exp.DatetimeAdd),
-270            "DATETIME_SUB": parse_date_delta_with_interval(exp.DatetimeSub),
-271            "DIV": lambda args: exp.IntDiv(this=seq_get(args, 0), expression=seq_get(args, 1)),
-272            "GENERATE_ARRAY": exp.GenerateSeries.from_arg_list,
-273            "PARSE_DATE": lambda args: format_time_lambda(exp.StrToDate, "bigquery")(
-274                [seq_get(args, 1), seq_get(args, 0)]
-275            ),
-276            "PARSE_TIMESTAMP": _parse_timestamp,
-277            "REGEXP_CONTAINS": exp.RegexpLike.from_arg_list,
-278            "REGEXP_EXTRACT": lambda args: exp.RegexpExtract(
-279                this=seq_get(args, 0),
-280                expression=seq_get(args, 1),
-281                position=seq_get(args, 2),
-282                occurrence=seq_get(args, 3),
-283                group=exp.Literal.number(1)
-284                if re.compile(str(seq_get(args, 1))).groups == 1
-285                else None,
-286            ),
-287            "SPLIT": lambda args: exp.Split(
-288                # https://cloud.google.com/bigquery/docs/reference/standard-sql/string_functions#split
-289                this=seq_get(args, 0),
-290                expression=seq_get(args, 1) or exp.Literal.string(","),
-291            ),
-292            "TIME_ADD": parse_date_delta_with_interval(exp.TimeAdd),
-293            "TIME_SUB": parse_date_delta_with_interval(exp.TimeSub),
-294            "TIMESTAMP_ADD": parse_date_delta_with_interval(exp.TimestampAdd),
-295            "TIMESTAMP_SUB": parse_date_delta_with_interval(exp.TimestampSub),
-296            "TO_JSON_STRING": exp.JSONFormat.from_arg_list,
-297        }
-298
-299        FUNCTION_PARSERS = {
-300            **parser.Parser.FUNCTION_PARSERS,
-301            "ARRAY": lambda self: self.expression(exp.Array, expressions=[self._parse_statement()]),
-302        }
-303        FUNCTION_PARSERS.pop("TRIM")
+            
260    class Parser(parser.Parser):
+261        PREFIXED_PIVOT_COLUMNS = True
+262
+263        LOG_BASE_FIRST = False
+264        LOG_DEFAULTS_TO_LN = True
+265
+266        FUNCTIONS = {
+267            **parser.Parser.FUNCTIONS,
+268            "DATE": _parse_date,
+269            "DATE_ADD": parse_date_delta_with_interval(exp.DateAdd),
+270            "DATE_SUB": parse_date_delta_with_interval(exp.DateSub),
+271            "DATE_TRUNC": lambda args: exp.DateTrunc(
+272                unit=exp.Literal.string(str(seq_get(args, 1))),
+273                this=seq_get(args, 0),
+274            ),
+275            "DATETIME_ADD": parse_date_delta_with_interval(exp.DatetimeAdd),
+276            "DATETIME_SUB": parse_date_delta_with_interval(exp.DatetimeSub),
+277            "DIV": lambda args: exp.IntDiv(this=seq_get(args, 0), expression=seq_get(args, 1)),
+278            "GENERATE_ARRAY": exp.GenerateSeries.from_arg_list,
+279            "PARSE_DATE": lambda args: format_time_lambda(exp.StrToDate, "bigquery")(
+280                [seq_get(args, 1), seq_get(args, 0)]
+281            ),
+282            "PARSE_TIMESTAMP": _parse_timestamp,
+283            "REGEXP_CONTAINS": exp.RegexpLike.from_arg_list,
+284            "REGEXP_EXTRACT": lambda args: exp.RegexpExtract(
+285                this=seq_get(args, 0),
+286                expression=seq_get(args, 1),
+287                position=seq_get(args, 2),
+288                occurrence=seq_get(args, 3),
+289                group=exp.Literal.number(1)
+290                if re.compile(str(seq_get(args, 1))).groups == 1
+291                else None,
+292            ),
+293            "SPLIT": lambda args: exp.Split(
+294                # https://cloud.google.com/bigquery/docs/reference/standard-sql/string_functions#split
+295                this=seq_get(args, 0),
+296                expression=seq_get(args, 1) or exp.Literal.string(","),
+297            ),
+298            "TIME_ADD": parse_date_delta_with_interval(exp.TimeAdd),
+299            "TIME_SUB": parse_date_delta_with_interval(exp.TimeSub),
+300            "TIMESTAMP_ADD": parse_date_delta_with_interval(exp.TimestampAdd),
+301            "TIMESTAMP_SUB": parse_date_delta_with_interval(exp.TimestampSub),
+302            "TO_JSON_STRING": exp.JSONFormat.from_arg_list,
+303        }
 304
-305        NO_PAREN_FUNCTIONS = {
-306            **parser.Parser.NO_PAREN_FUNCTIONS,
-307            TokenType.CURRENT_DATETIME: exp.CurrentDatetime,
+305        FUNCTION_PARSERS = {
+306            **parser.Parser.FUNCTION_PARSERS,
+307            "ARRAY": lambda self: self.expression(exp.Array, expressions=[self._parse_statement()]),
 308        }
-309
-310        NESTED_TYPE_TOKENS = {
-311            *parser.Parser.NESTED_TYPE_TOKENS,
-312            TokenType.TABLE,
-313        }
-314
-315        ID_VAR_TOKENS = {
-316            *parser.Parser.ID_VAR_TOKENS,
-317            TokenType.VALUES,
-318        }
-319
-320        PROPERTY_PARSERS = {
-321            **parser.Parser.PROPERTY_PARSERS,
-322            "NOT DETERMINISTIC": lambda self: self.expression(
-323                exp.StabilityProperty, this=exp.Literal.string("VOLATILE")
-324            ),
-325            "OPTIONS": lambda self: self._parse_with_property(),
-326        }
-327
-328        CONSTRAINT_PARSERS = {
-329            **parser.Parser.CONSTRAINT_PARSERS,
-330            "OPTIONS": lambda self: exp.Properties(expressions=self._parse_with_property()),
-331        }
-332
-333        def _parse_table_part(self, schema: bool = False) -> t.Optional[exp.Expression]:
-334            this = super()._parse_table_part(schema=schema)
-335
-336            # https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#table_names
-337            if isinstance(this, exp.Identifier):
-338                table_name = this.name
-339                while self._match(TokenType.DASH, advance=False) and self._next:
-340                    self._advance(2)
-341                    table_name += f"-{self._prev.text}"
-342
-343                this = exp.Identifier(this=table_name, quoted=this.args.get("quoted"))
-344
-345            return this
-346
-347        def _parse_table_parts(self, schema: bool = False) -> exp.Table:
-348            table = super()._parse_table_parts(schema=schema)
-349            if isinstance(table.this, exp.Identifier) and "." in table.name:
-350                catalog, db, this, *rest = (
-351                    t.cast(t.Optional[exp.Expression], exp.to_identifier(x))
-352                    for x in split_num_words(table.name, ".", 3)
-353                )
-354
-355                if rest and this:
-356                    this = exp.Dot.build(t.cast(t.List[exp.Expression], [this, *rest]))
-357
-358                table = exp.Table(this=this, db=db, catalog=catalog)
-359
-360            return table
+309        FUNCTION_PARSERS.pop("TRIM")
+310
+311        NO_PAREN_FUNCTIONS = {
+312            **parser.Parser.NO_PAREN_FUNCTIONS,
+313            TokenType.CURRENT_DATETIME: exp.CurrentDatetime,
+314        }
+315
+316        NESTED_TYPE_TOKENS = {
+317            *parser.Parser.NESTED_TYPE_TOKENS,
+318            TokenType.TABLE,
+319        }
+320
+321        ID_VAR_TOKENS = {
+322            *parser.Parser.ID_VAR_TOKENS,
+323            TokenType.VALUES,
+324        }
+325
+326        PROPERTY_PARSERS = {
+327            **parser.Parser.PROPERTY_PARSERS,
+328            "NOT DETERMINISTIC": lambda self: self.expression(
+329                exp.StabilityProperty, this=exp.Literal.string("VOLATILE")
+330            ),
+331            "OPTIONS": lambda self: self._parse_with_property(),
+332        }
+333
+334        CONSTRAINT_PARSERS = {
+335            **parser.Parser.CONSTRAINT_PARSERS,
+336            "OPTIONS": lambda self: exp.Properties(expressions=self._parse_with_property()),
+337        }
+338
+339        def _parse_table_part(self, schema: bool = False) -> t.Optional[exp.Expression]:
+340            this = super()._parse_table_part(schema=schema)
+341
+342            # https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#table_names
+343            if isinstance(this, exp.Identifier):
+344                table_name = this.name
+345                while self._match(TokenType.DASH, advance=False) and self._next:
+346                    self._advance(2)
+347                    table_name += f"-{self._prev.text}"
+348
+349                this = exp.Identifier(this=table_name, quoted=this.args.get("quoted"))
+350
+351            return this
+352
+353        def _parse_table_parts(self, schema: bool = False) -> exp.Table:
+354            table = super()._parse_table_parts(schema=schema)
+355            if isinstance(table.this, exp.Identifier) and "." in table.name:
+356                catalog, db, this, *rest = (
+357                    t.cast(t.Optional[exp.Expression], exp.to_identifier(x))
+358                    for x in split_num_words(table.name, ".", 3)
+359                )
+360
+361                if rest and this:
+362                    this = exp.Dot.build(t.cast(t.List[exp.Expression], [this, *rest]))
+363
+364                table = exp.Table(this=this, db=db, catalog=catalog)
+365
+366            return table
 
@@ -2111,7 +2121,7 @@ Default: 3
FUNCTIONS = - {'ABS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Abs'>>, 'ANY_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.AnyValue'>>, 'APPROX_DISTINCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxDistinct'>>, 'APPROX_COUNT_DISTINCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxDistinct'>>, 'APPROX_QUANTILE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxQuantile'>>, 'ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Array'>>, 'ARRAY_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAgg'>>, 'ARRAY_ALL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAll'>>, 'ARRAY_ANY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAny'>>, 'ARRAY_CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayConcat'>>, 'ARRAY_CONTAINS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayContains'>>, 'FILTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayFilter'>>, 'ARRAY_FILTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayFilter'>>, 'ARRAY_JOIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayJoin'>>, 'ARRAY_SIZE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySize'>>, 'ARRAY_SORT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySort'>>, 'ARRAY_SUM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySum'>>, 'ARRAY_UNION_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayUnionAgg'>>, 'AVG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Avg'>>, 'CASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Case'>>, 'CAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Cast'>>, 'CAST_TO_STR_TYPE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CastToStrType'>>, 'CEIL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ceil'>>, 'CEILING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ceil'>>, 'COALESCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Coalesce'>>, 'IFNULL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Coalesce'>>, 'NVL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Coalesce'>>, 'CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Concat'>>, 'CONCAT_WS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ConcatWs'>>, 'COUNT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Count'>>, 'COUNT_IF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CountIf'>>, 'CURRENT_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentDate'>>, 'CURRENT_DATETIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentDatetime'>>, 'CURRENT_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentTime'>>, 'CURRENT_TIMESTAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentTimestamp'>>, 'CURRENT_USER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentUser'>>, 'DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Date'>>, 'DATE_ADD': <function parse_date_delta_with_interval.<locals>.func>, 'DATEDIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateDiff'>>, 'DATE_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateDiff'>>, 'DATEFROMPARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateFromParts'>>, 'DATE_STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateStrToDate'>>, 'DATE_SUB': <function parse_date_delta_with_interval.<locals>.func>, 'DATE_TO_DATE_STR': <function Parser.<lambda>>, 'DATE_TO_DI': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateToDi'>>, 'DATE_TRUNC': <function BigQuery.Parser.<lambda>>, 'DATETIME_ADD': <function parse_date_delta_with_interval.<locals>.func>, 'DATETIME_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeDiff'>>, 'DATETIME_SUB': <function parse_date_delta_with_interval.<locals>.func>, 'DATETIME_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeTrunc'>>, 'DAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Day'>>, 'DAY_OF_MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfMonth'>>, 'DAYOFMONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfMonth'>>, 'DAY_OF_WEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeek'>>, 'DAYOFWEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeek'>>, 'DAY_OF_YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfYear'>>, 'DAYOFYEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfYear'>>, 'DECODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Decode'>>, 'DI_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DiToDate'>>, 'ENCODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Encode'>>, 'EXP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Exp'>>, 'EXPLODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Explode'>>, 'EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Extract'>>, 'FLOOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Floor'>>, 'FROM_BASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FromBase'>>, 'FROM_BASE64': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FromBase64'>>, 'GENERATE_SERIES': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GenerateSeries'>>, 'GREATEST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Greatest'>>, 'GROUP_CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GroupConcat'>>, 'HEX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Hex'>>, 'HLL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Hll'>>, 'IF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.If'>>, 'INITCAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Initcap'>>, 'JSON_ARRAY_CONTAINS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArrayContains'>>, 'JSONB_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBExtract'>>, 'JSONB_EXTRACT_SCALAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBExtractScalar'>>, 'JSON_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONExtract'>>, 'JSON_EXTRACT_SCALAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONExtractScalar'>>, 'JSON_FORMAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONFormat'>>, 'J_S_O_N_OBJECT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONObject'>>, 'LAST_DATE_OF_MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LastDateOfMonth'>>, 'LEAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Least'>>, 'LEFT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Left'>>, 'LENGTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Length'>>, 'LEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Length'>>, 'LEVENSHTEIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Levenshtein'>>, 'LN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ln'>>, 'LOG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Log'>>, 'LOG10': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Log10'>>, 'LOG2': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Log2'>>, 'LOGICAL_AND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'BOOL_AND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'BOOLAND_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'LOGICAL_OR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'BOOL_OR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'BOOLOR_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'LOWER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lower'>>, 'LCASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lower'>>, 'MD5': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MD5'>>, 'MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Map'>>, 'MAP_FROM_ENTRIES': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MapFromEntries'>>, 'MATCH_AGAINST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MatchAgainst'>>, 'MAX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Max'>>, 'MIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Min'>>, 'MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Month'>>, 'NEXT_VALUE_FOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.NextValueFor'>>, 'NUMBER_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.NumberToStr'>>, 'NVL2': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Nvl2'>>, 'OPEN_J_S_O_N': <bound method Func.from_arg_list of <class 'sqlglot.expressions.OpenJSON'>>, 'PARAMETERIZED_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ParameterizedAgg'>>, 'PERCENTILE_CONT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PercentileCont'>>, 'PERCENTILE_DISC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PercentileDisc'>>, 'POSEXPLODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Posexplode'>>, 'POWER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Pow'>>, 'POW': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Pow'>>, 'QUANTILE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Quantile'>>, 'RANGE_N': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RangeN'>>, 'READ_CSV': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ReadCSV'>>, 'REDUCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Reduce'>>, 'REGEXP_EXTRACT': <function BigQuery.Parser.<lambda>>, 'REGEXP_I_LIKE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpILike'>>, 'REGEXP_LIKE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpLike'>>, 'REGEXP_SPLIT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpSplit'>>, 'REPEAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Repeat'>>, 'RIGHT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Right'>>, 'ROUND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Round'>>, 'ROW_NUMBER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RowNumber'>>, 'SHA': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA'>>, 'SHA1': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA'>>, 'SHA2': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA2'>>, 'SAFE_CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SafeConcat'>>, 'SAFE_DIVIDE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SafeDivide'>>, 'SET_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SetAgg'>>, 'SORT_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SortArray'>>, 'SPLIT': <function BigQuery.Parser.<lambda>>, 'SQRT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sqrt'>>, 'STANDARD_HASH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StandardHash'>>, 'STAR_MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StarMap'>>, 'STDDEV': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stddev'>>, 'STDDEV_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StddevPop'>>, 'STDDEV_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StddevSamp'>>, 'STR_POSITION': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrPosition'>>, 'STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToDate'>>, 'STR_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToTime'>>, 'STR_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToUnix'>>, 'STRUCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Struct'>>, 'STRUCT_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StructExtract'>>, 'SUBSTRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Substring'>>, 'SUM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sum'>>, 'TIME_ADD': <function parse_date_delta_with_interval.<locals>.func>, 'TIME_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeDiff'>>, 'TIME_STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToDate'>>, 'TIME_STR_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToTime'>>, 'TIME_STR_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToUnix'>>, 'TIME_SUB': <function parse_date_delta_with_interval.<locals>.func>, 'TIME_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeToStr'>>, 'TIME_TO_TIME_STR': <function Parser.<lambda>>, 'TIME_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeToUnix'>>, 'TIME_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeTrunc'>>, 'TIMESTAMP_ADD': <function parse_date_delta_with_interval.<locals>.func>, 'TIMESTAMP_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampDiff'>>, 'TIMESTAMP_SUB': <function parse_date_delta_with_interval.<locals>.func>, 'TIMESTAMP_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampTrunc'>>, 'TO_BASE64': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToBase64'>>, 'TO_CHAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToChar'>>, 'TRIM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Trim'>>, 'TRY_CAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TryCast'>>, 'TS_OR_DI_TO_DI': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDiToDi'>>, 'TS_OR_DS_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsAdd'>>, 'TS_OR_DS_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsToDate'>>, 'TS_OR_DS_TO_DATE_STR': <function Parser.<lambda>>, 'UNHEX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Unhex'>>, 'UNIX_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToStr'>>, 'UNIX_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToTime'>>, 'UNIX_TO_TIME_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToTimeStr'>>, 'UPPER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Upper'>>, 'UCASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Upper'>>, 'VAR_MAP': <function parse_var_map>, 'VARIANCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VARIANCE_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VAR_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VARIANCE_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.VariancePop'>>, 'VAR_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.VariancePop'>>, 'WEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Week'>>, 'WEEK_OF_YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.WeekOfYear'>>, 'WEEKOFYEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.WeekOfYear'>>, 'WHEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.When'>>, 'X_M_L_TABLE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.XMLTable'>>, 'YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Year'>>, 'GLOB': <function Parser.<lambda>>, 'LIKE': <function parse_like>, 'DIV': <function BigQuery.Parser.<lambda>>, 'GENERATE_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GenerateSeries'>>, 'PARSE_DATE': <function BigQuery.Parser.<lambda>>, 'PARSE_TIMESTAMP': <function _parse_timestamp>, 'REGEXP_CONTAINS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpLike'>>, 'TO_JSON_STRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONFormat'>>} + {'ABS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Abs'>>, 'ANY_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.AnyValue'>>, 'APPROX_DISTINCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxDistinct'>>, 'APPROX_COUNT_DISTINCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxDistinct'>>, 'APPROX_QUANTILE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxQuantile'>>, 'ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Array'>>, 'ARRAY_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAgg'>>, 'ARRAY_ALL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAll'>>, 'ARRAY_ANY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAny'>>, 'ARRAY_CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayConcat'>>, 'ARRAY_CONTAINS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayContains'>>, 'FILTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayFilter'>>, 'ARRAY_FILTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayFilter'>>, 'ARRAY_JOIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayJoin'>>, 'ARRAY_SIZE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySize'>>, 'ARRAY_SORT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySort'>>, 'ARRAY_SUM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySum'>>, 'ARRAY_UNION_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayUnionAgg'>>, 'AVG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Avg'>>, 'CASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Case'>>, 'CAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Cast'>>, 'CAST_TO_STR_TYPE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CastToStrType'>>, 'CEIL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ceil'>>, 'CEILING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ceil'>>, 'COALESCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Coalesce'>>, 'IFNULL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Coalesce'>>, 'NVL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Coalesce'>>, 'CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Concat'>>, 'CONCAT_WS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ConcatWs'>>, 'COUNT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Count'>>, 'COUNT_IF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CountIf'>>, 'CURRENT_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentDate'>>, 'CURRENT_DATETIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentDatetime'>>, 'CURRENT_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentTime'>>, 'CURRENT_TIMESTAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentTimestamp'>>, 'CURRENT_USER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentUser'>>, 'DATE': <function _parse_date>, 'DATE_ADD': <function parse_date_delta_with_interval.<locals>.func>, 'DATEDIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateDiff'>>, 'DATE_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateDiff'>>, 'DATEFROMPARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateFromParts'>>, 'DATE_STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateStrToDate'>>, 'DATE_SUB': <function parse_date_delta_with_interval.<locals>.func>, 'DATE_TO_DATE_STR': <function Parser.<lambda>>, 'DATE_TO_DI': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateToDi'>>, 'DATE_TRUNC': <function BigQuery.Parser.<lambda>>, 'DATETIME_ADD': <function parse_date_delta_with_interval.<locals>.func>, 'DATETIME_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeDiff'>>, 'DATETIME_SUB': <function parse_date_delta_with_interval.<locals>.func>, 'DATETIME_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeTrunc'>>, 'DAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Day'>>, 'DAY_OF_MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfMonth'>>, 'DAYOFMONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfMonth'>>, 'DAY_OF_WEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeek'>>, 'DAYOFWEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeek'>>, 'DAY_OF_YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfYear'>>, 'DAYOFYEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfYear'>>, 'DECODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Decode'>>, 'DI_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DiToDate'>>, 'ENCODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Encode'>>, 'EXP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Exp'>>, 'EXPLODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Explode'>>, 'EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Extract'>>, 'FLOOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Floor'>>, 'FROM_BASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FromBase'>>, 'FROM_BASE64': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FromBase64'>>, 'GENERATE_SERIES': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GenerateSeries'>>, 'GREATEST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Greatest'>>, 'GROUP_CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GroupConcat'>>, 'HEX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Hex'>>, 'HLL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Hll'>>, 'IF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.If'>>, 'INITCAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Initcap'>>, 'JSON_ARRAY_CONTAINS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArrayContains'>>, 'JSONB_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBExtract'>>, 'JSONB_EXTRACT_SCALAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBExtractScalar'>>, 'JSON_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONExtract'>>, 'JSON_EXTRACT_SCALAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONExtractScalar'>>, 'JSON_FORMAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONFormat'>>, 'J_S_O_N_OBJECT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONObject'>>, 'LAST_DATE_OF_MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LastDateOfMonth'>>, 'LEAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Least'>>, 'LEFT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Left'>>, 'LENGTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Length'>>, 'LEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Length'>>, 'LEVENSHTEIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Levenshtein'>>, 'LN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ln'>>, 'LOG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Log'>>, 'LOG10': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Log10'>>, 'LOG2': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Log2'>>, 'LOGICAL_AND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'BOOL_AND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'BOOLAND_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'LOGICAL_OR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'BOOL_OR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'BOOLOR_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'LOWER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lower'>>, 'LCASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lower'>>, 'MD5': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MD5'>>, 'MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Map'>>, 'MAP_FROM_ENTRIES': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MapFromEntries'>>, 'MATCH_AGAINST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MatchAgainst'>>, 'MAX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Max'>>, 'MIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Min'>>, 'MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Month'>>, 'NEXT_VALUE_FOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.NextValueFor'>>, 'NUMBER_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.NumberToStr'>>, 'NVL2': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Nvl2'>>, 'OPEN_J_S_O_N': <bound method Func.from_arg_list of <class 'sqlglot.expressions.OpenJSON'>>, 'PARAMETERIZED_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ParameterizedAgg'>>, 'PERCENTILE_CONT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PercentileCont'>>, 'PERCENTILE_DISC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PercentileDisc'>>, 'POSEXPLODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Posexplode'>>, 'POWER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Pow'>>, 'POW': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Pow'>>, 'QUANTILE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Quantile'>>, 'RANGE_N': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RangeN'>>, 'READ_CSV': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ReadCSV'>>, 'REDUCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Reduce'>>, 'REGEXP_EXTRACT': <function BigQuery.Parser.<lambda>>, 'REGEXP_I_LIKE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpILike'>>, 'REGEXP_LIKE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpLike'>>, 'REGEXP_SPLIT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpSplit'>>, 'REPEAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Repeat'>>, 'RIGHT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Right'>>, 'ROUND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Round'>>, 'ROW_NUMBER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RowNumber'>>, 'SHA': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA'>>, 'SHA1': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA'>>, 'SHA2': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA2'>>, 'SAFE_CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SafeConcat'>>, 'SAFE_DIVIDE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SafeDivide'>>, 'SET_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SetAgg'>>, 'SORT_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SortArray'>>, 'SPLIT': <function BigQuery.Parser.<lambda>>, 'SQRT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sqrt'>>, 'STANDARD_HASH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StandardHash'>>, 'STAR_MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StarMap'>>, 'STDDEV': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stddev'>>, 'STDDEV_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StddevPop'>>, 'STDDEV_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StddevSamp'>>, 'STR_POSITION': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrPosition'>>, 'STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToDate'>>, 'STR_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToTime'>>, 'STR_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToUnix'>>, 'STRUCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Struct'>>, 'STRUCT_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StructExtract'>>, 'SUBSTRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Substring'>>, 'SUM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sum'>>, 'TIME_ADD': <function parse_date_delta_with_interval.<locals>.func>, 'TIME_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeDiff'>>, 'TIME_STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToDate'>>, 'TIME_STR_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToTime'>>, 'TIME_STR_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToUnix'>>, 'TIME_SUB': <function parse_date_delta_with_interval.<locals>.func>, 'TIME_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeToStr'>>, 'TIME_TO_TIME_STR': <function Parser.<lambda>>, 'TIME_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeToUnix'>>, 'TIME_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeTrunc'>>, 'TIMESTAMP_ADD': <function parse_date_delta_with_interval.<locals>.func>, 'TIMESTAMP_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampDiff'>>, 'TIMESTAMP_SUB': <function parse_date_delta_with_interval.<locals>.func>, 'TIMESTAMP_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampTrunc'>>, 'TO_BASE64': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToBase64'>>, 'TO_CHAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToChar'>>, 'TRIM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Trim'>>, 'TRY_CAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TryCast'>>, 'TS_OR_DI_TO_DI': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDiToDi'>>, 'TS_OR_DS_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsAdd'>>, 'TS_OR_DS_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsToDate'>>, 'TS_OR_DS_TO_DATE_STR': <function Parser.<lambda>>, 'UNHEX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Unhex'>>, 'UNIX_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToStr'>>, 'UNIX_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToTime'>>, 'UNIX_TO_TIME_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToTimeStr'>>, 'UPPER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Upper'>>, 'UCASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Upper'>>, 'VAR_MAP': <function parse_var_map>, 'VARIANCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VARIANCE_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VAR_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VARIANCE_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.VariancePop'>>, 'VAR_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.VariancePop'>>, 'WEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Week'>>, 'WEEK_OF_YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.WeekOfYear'>>, 'WEEKOFYEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.WeekOfYear'>>, 'WHEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.When'>>, 'X_M_L_TABLE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.XMLTable'>>, 'YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Year'>>, 'GLOB': <function Parser.<lambda>>, 'LIKE': <function parse_like>, 'DIV': <function BigQuery.Parser.<lambda>>, 'GENERATE_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GenerateSeries'>>, 'PARSE_DATE': <function BigQuery.Parser.<lambda>>, 'PARSE_TIMESTAMP': <function _parse_timestamp>, 'REGEXP_CONTAINS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpLike'>>, 'TO_JSON_STRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONFormat'>>}
@@ -2150,7 +2160,7 @@ Default: 3
NESTED_TYPE_TOKENS = - {<TokenType.TABLE: 'TABLE'>, <TokenType.MAP: 'MAP'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.STRUCT: 'STRUCT'>, <TokenType.ARRAY: 'ARRAY'>} + {<TokenType.ARRAY: 'ARRAY'>, <TokenType.MAP: 'MAP'>, <TokenType.STRUCT: 'STRUCT'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.TABLE: 'TABLE'>}
@@ -2163,7 +2173,7 @@ Default: 3
ID_VAR_TOKENS = - {<TokenType.VARBINARY: 'VARBINARY'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, <TokenType.DELETE: 'DELETE'>, <TokenType.JSON: 'JSON'>, <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, <TokenType.UPDATE: 'UPDATE'>, <TokenType.PROCEDURE: 'PROCEDURE'>, <TokenType.DOUBLE: 'DOUBLE'>, <TokenType.DATABASE: 'DATABASE'>, <TokenType.UINT256: 'UINT256'>, <TokenType.BIGSERIAL: 'BIGSERIAL'>, <TokenType.ASC: 'ASC'>, <TokenType.LONGBLOB: 'LONGBLOB'>, <TokenType.VAR: 'VAR'>, <TokenType.SEMI: 'SEMI'>, <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, <TokenType.UINT128: 'UINT128'>, <TokenType.DIV: 'DIV'>, <TokenType.ESCAPE: 'ESCAPE'>, <TokenType.INTERVAL: 'INTERVAL'>, <TokenType.RIGHT: 'RIGHT'>, <TokenType.USERDEFINED: 'USERDEFINED'>, <TokenType.ROW: 'ROW'>, <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, <TokenType.DECIMAL: 'DECIMAL'>, <TokenType.FILTER: 'FILTER'>, <TokenType.FUNCTION: 'FUNCTION'>, <TokenType.NVARCHAR: 'NVARCHAR'>, <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, <TokenType.TIME: 'TIME'>, <TokenType.ANTI: 'ANTI'>, <TokenType.STRUCT: 'STRUCT'>, <TokenType.BOOLEAN: 'BOOLEAN'>, <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, <TokenType.BINARY: 'BINARY'>, <TokenType.GEOMETRY: 'GEOMETRY'>, <TokenType.VARIANT: 'VARIANT'>, <TokenType.REFERENCES: 'REFERENCES'>, <TokenType.TSRANGE: 'TSRANGE'>, <TokenType.IF: 'IF'>, <TokenType.BIT: 'BIT'>, <TokenType.DATERANGE: 'DATERANGE'>, <TokenType.SOME: 'SOME'>, <TokenType.SMALLINT: 'SMALLINT'>, <TokenType.ALL: 'ALL'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.JSONB: 'JSONB'>, <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, <TokenType.OVERWRITE: 'OVERWRITE'>, <TokenType.OFFSET: 'OFFSET'>, <TokenType.XML: 'XML'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, <TokenType.TOP: 'TOP'>, <TokenType.DESC: 'DESC'>, <TokenType.TINYINT: 'TINYINT'>, <TokenType.HSTORE: 'HSTORE'>, <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, <TokenType.END: 'END'>, <TokenType.ENUM: 'ENUM'>, <TokenType.DATETIME: 'DATETIME'>, <TokenType.SMALLMONEY: 'SMALLMONEY'>, <TokenType.INET: 'INET'>, <TokenType.FULL: 'FULL'>, <TokenType.PERCENT: 'PERCENT'>, <TokenType.NATURAL: 'NATURAL'>, <TokenType.UTINYINT: 'UTINYINT'>, <TokenType.LONGTEXT: 'LONGTEXT'>, <TokenType.COLLATE: 'COLLATE'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, <TokenType.LOAD: 'LOAD'>, <TokenType.SET: 'SET'>, <TokenType.VARCHAR: 'VARCHAR'>, <TokenType.UINT: 'UINT'>, <TokenType.BIGINT: 'BIGINT'>, <TokenType.SCHEMA: 'SCHEMA'>, <TokenType.CACHE: 'CACHE'>, <TokenType.COLUMN: 'COLUMN'>, <TokenType.PRAGMA: 'PRAGMA'>, <TokenType.NUMRANGE: 'NUMRANGE'>, <TokenType.UNIQUEIDENTIFIER: 'UNIQUEIDENTIFIER'>, <TokenType.MONEY: 'MONEY'>, <TokenType.VOLATILE: 'VOLATILE'>, <TokenType.ROWVERSION: 'ROWVERSION'>, <TokenType.ORDINALITY: 'ORDINALITY'>, <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>, <TokenType.DATE: 'DATE'>, <TokenType.INDEX: 'INDEX'>, <TokenType.RANGE: 'RANGE'>, <TokenType.CONSTRAINT: 'CONSTRAINT'>, <TokenType.UUID: 'UUID'>, <TokenType.OBJECT: 'OBJECT'>, <TokenType.ISNULL: 'ISNULL'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.DATETIME64: 'DATETIME64'>, <TokenType.SERIAL: 'SERIAL'>, <TokenType.INT8RANGE: 'INT8RANGE'>, <TokenType.IMAGE: 'IMAGE'>, <TokenType.TABLE: 'TABLE'>, <TokenType.TSTZRANGE: 'TSTZRANGE'>, <TokenType.NEXT: 'NEXT'>, <TokenType.LEFT: 'LEFT'>, <TokenType.FALSE: 'FALSE'>, <TokenType.WINDOW: 'WINDOW'>, <TokenType.SMALLSERIAL: 'SMALLSERIAL'>, <TokenType.VALUES: 'VALUES'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.PIVOT: 'PIVOT'>, <TokenType.PARTITION: 'PARTITION'>, <TokenType.INT4RANGE: 'INT4RANGE'>, <TokenType.TEMPORARY: 'TEMPORARY'>, <TokenType.TRUE: 'TRUE'>, <TokenType.INT256: 'INT256'>, <TokenType.DICTIONARY: 'DICTIONARY'>, <TokenType.DESCRIBE: 'DESCRIBE'>, <TokenType.IS: 'IS'>, <TokenType.CHAR: 'CHAR'>, <TokenType.MAP: 'MAP'>, <TokenType.UNIQUE: 'UNIQUE'>, <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, <TokenType.BEGIN: 'BEGIN'>, <TokenType.TEXT: 'TEXT'>, <TokenType.VIEW: 'VIEW'>, <TokenType.HLLSKETCH: 'HLLSKETCH'>, <TokenType.APPLY: 'APPLY'>, <TokenType.SUPER: 'SUPER'>, <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, <TokenType.SHOW: 'SHOW'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.SETTINGS: 'SETTINGS'>, <TokenType.NCHAR: 'NCHAR'>, <TokenType.DEFAULT: 'DEFAULT'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.UBIGINT: 'UBIGINT'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>, <TokenType.INT: 'INT'>, <TokenType.ANY: 'ANY'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.KEEP: 'KEEP'>, <TokenType.FIRST: 'FIRST'>, <TokenType.FORMAT: 'FORMAT'>, <TokenType.UNPIVOT: 'UNPIVOT'>, <TokenType.USMALLINT: 'USMALLINT'>, <TokenType.FLOAT: 'FLOAT'>, <TokenType.EXISTS: 'EXISTS'>, <TokenType.COMMENT: 'COMMENT'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, <TokenType.COMMIT: 'COMMIT'>, <TokenType.ROWS: 'ROWS'>, <TokenType.MERGE: 'MERGE'>, <TokenType.CASE: 'CASE'>, <TokenType.INT128: 'INT128'>} + {<TokenType.CACHE: 'CACHE'>, <TokenType.DEFAULT: 'DEFAULT'>, <TokenType.DELETE: 'DELETE'>, <TokenType.JSONB: 'JSONB'>, <TokenType.MONEY: 'MONEY'>, <TokenType.JSON: 'JSON'>, <TokenType.INT8RANGE: 'INT8RANGE'>, <TokenType.DIV: 'DIV'>, <TokenType.DESC: 'DESC'>, <TokenType.STRUCT: 'STRUCT'>, <TokenType.PROCEDURE: 'PROCEDURE'>, <TokenType.DESCRIBE: 'DESCRIBE'>, <TokenType.UUID: 'UUID'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.FULL: 'FULL'>, <TokenType.NUMRANGE: 'NUMRANGE'>, <TokenType.PRAGMA: 'PRAGMA'>, <TokenType.MAP: 'MAP'>, <TokenType.DATETIME64: 'DATETIME64'>, <TokenType.FORMAT: 'FORMAT'>, <TokenType.SOME: 'SOME'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, <TokenType.ISNULL: 'ISNULL'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.INET: 'INET'>, <TokenType.BIGINT: 'BIGINT'>, <TokenType.DICTIONARY: 'DICTIONARY'>, <TokenType.UPDATE: 'UPDATE'>, <TokenType.COLUMN: 'COLUMN'>, <TokenType.SETTINGS: 'SETTINGS'>, <TokenType.SHOW: 'SHOW'>, <TokenType.XML: 'XML'>, <TokenType.TOP: 'TOP'>, <TokenType.COMMIT: 'COMMIT'>, <TokenType.UINT128: 'UINT128'>, <TokenType.TSTZRANGE: 'TSTZRANGE'>, <TokenType.ROW: 'ROW'>, <TokenType.LEFT: 'LEFT'>, <TokenType.MERGE: 'MERGE'>, <TokenType.IS: 'IS'>, <TokenType.UNIQUEIDENTIFIER: 'UNIQUEIDENTIFIER'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, <TokenType.HLLSKETCH: 'HLLSKETCH'>, <TokenType.LONGTEXT: 'LONGTEXT'>, <TokenType.BINARY: 'BINARY'>, <TokenType.NATURAL: 'NATURAL'>, <TokenType.FLOAT: 'FLOAT'>, <TokenType.IF: 'IF'>, <TokenType.WINDOW: 'WINDOW'>, <TokenType.PERCENT: 'PERCENT'>, <TokenType.ENUM: 'ENUM'>, <TokenType.INT128: 'INT128'>, <TokenType.NVARCHAR: 'NVARCHAR'>, <TokenType.SERIAL: 'SERIAL'>, <TokenType.USMALLINT: 'USMALLINT'>, <TokenType.VAR: 'VAR'>, <TokenType.REFERENCES: 'REFERENCES'>, <TokenType.CONSTRAINT: 'CONSTRAINT'>, <TokenType.INT: 'INT'>, <TokenType.VARCHAR: 'VARCHAR'>, <TokenType.UINT: 'UINT'>, <TokenType.INT4RANGE: 'INT4RANGE'>, <TokenType.SMALLMONEY: 'SMALLMONEY'>, <TokenType.TSRANGE: 'TSRANGE'>, <TokenType.CHAR: 'CHAR'>, <TokenType.UINT256: 'UINT256'>, <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.NEXT: 'NEXT'>, <TokenType.DATERANGE: 'DATERANGE'>, <TokenType.LONGBLOB: 'LONGBLOB'>, <TokenType.SEMI: 'SEMI'>, <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, <TokenType.LOAD: 'LOAD'>, <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, <TokenType.SMALLSERIAL: 'SMALLSERIAL'>, <TokenType.HSTORE: 'HSTORE'>, <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, <TokenType.ANTI: 'ANTI'>, <TokenType.CASE: 'CASE'>, <TokenType.FUNCTION: 'FUNCTION'>, <TokenType.PIVOT: 'PIVOT'>, <TokenType.FIRST: 'FIRST'>, <TokenType.TEMPORARY: 'TEMPORARY'>, <TokenType.SUPER: 'SUPER'>, <TokenType.ASC: 'ASC'>, <TokenType.IMAGE: 'IMAGE'>, <TokenType.SCHEMA: 'SCHEMA'>, <TokenType.FALSE: 'FALSE'>, <TokenType.INTERVAL: 'INTERVAL'>, <TokenType.COMMENT: 'COMMENT'>, <TokenType.UNPIVOT: 'UNPIVOT'>, <TokenType.ALL: 'ALL'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.BIT: 'BIT'>, <TokenType.VOLATILE: 'VOLATILE'>, <TokenType.RANGE: 'RANGE'>, <TokenType.VIEW: 'VIEW'>, <TokenType.ROWVERSION: 'ROWVERSION'>, <TokenType.INDEX: 'INDEX'>, <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, <TokenType.DECIMAL: 'DECIMAL'>, <TokenType.BIGSERIAL: 'BIGSERIAL'>, <TokenType.OBJECT: 'OBJECT'>, <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.BEGIN: 'BEGIN'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, <TokenType.SMALLINT: 'SMALLINT'>, <TokenType.ANY: 'ANY'>, <TokenType.ORDINALITY: 'ORDINALITY'>, <TokenType.END: 'END'>, <TokenType.OVERWRITE: 'OVERWRITE'>, <TokenType.EXISTS: 'EXISTS'>, <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, <TokenType.TABLE: 'TABLE'>, <TokenType.UTINYINT: 'UTINYINT'>, <TokenType.SET: 'SET'>, <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>, <TokenType.DATE: 'DATE'>, <TokenType.DOUBLE: 'DOUBLE'>, <TokenType.COLLATE: 'COLLATE'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>, <TokenType.TEXT: 'TEXT'>, <TokenType.ESCAPE: 'ESCAPE'>, <TokenType.FILTER: 'FILTER'>, <TokenType.DATABASE: 'DATABASE'>, <TokenType.APPLY: 'APPLY'>, <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, <TokenType.VARBINARY: 'VARBINARY'>, <TokenType.TRUE: 'TRUE'>, <TokenType.OFFSET: 'OFFSET'>, <TokenType.RIGHT: 'RIGHT'>, <TokenType.TIME: 'TIME'>, <TokenType.DATETIME: 'DATETIME'>, <TokenType.GEOMETRY: 'GEOMETRY'>, <TokenType.NCHAR: 'NCHAR'>, <TokenType.ROWS: 'ROWS'>, <TokenType.UNIQUE: 'UNIQUE'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, <TokenType.USERDEFINED: 'USERDEFINED'>, <TokenType.INT256: 'INT256'>, <TokenType.BOOLEAN: 'BOOLEAN'>, <TokenType.KEEP: 'KEEP'>, <TokenType.UBIGINT: 'UBIGINT'>, <TokenType.VARIANT: 'VARIANT'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, <TokenType.PARTITION: 'PARTITION'>, <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, <TokenType.VALUES: 'VALUES'>, <TokenType.TINYINT: 'TINYINT'>}
@@ -2376,256 +2386,257 @@ Default: 3
-
362    class Generator(generator.Generator):
-363        EXPLICIT_UNION = True
-364        INTERVAL_ALLOWS_PLURAL_FORM = False
-365        JOIN_HINTS = False
-366        QUERY_HINTS = False
-367        TABLE_HINTS = False
-368        LIMIT_FETCH = "LIMIT"
-369        RENAME_TABLE_WITH_DB = False
-370        ESCAPE_LINE_BREAK = True
-371
-372        TRANSFORMS = {
-373            **generator.Generator.TRANSFORMS,
-374            exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"),
-375            exp.ArraySize: rename_func("ARRAY_LENGTH"),
-376            exp.Cast: transforms.preprocess([transforms.remove_precision_parameterized_types]),
-377            exp.CTE: transforms.preprocess([_pushdown_cte_column_names]),
-378            exp.DateAdd: _date_add_sql("DATE", "ADD"),
-379            exp.DateSub: _date_add_sql("DATE", "SUB"),
-380            exp.DatetimeAdd: _date_add_sql("DATETIME", "ADD"),
-381            exp.DatetimeSub: _date_add_sql("DATETIME", "SUB"),
-382            exp.DateDiff: lambda self, e: f"DATE_DIFF({self.sql(e, 'this')}, {self.sql(e, 'expression')}, {self.sql(e.args.get('unit', 'DAY'))})",
-383            exp.DateStrToDate: datestrtodate_sql,
-384            exp.DateTrunc: lambda self, e: self.func("DATE_TRUNC", e.this, e.text("unit")),
-385            exp.JSONFormat: rename_func("TO_JSON_STRING"),
-386            exp.GenerateSeries: rename_func("GENERATE_ARRAY"),
-387            exp.GroupConcat: rename_func("STRING_AGG"),
-388            exp.ILike: no_ilike_sql,
-389            exp.IntDiv: rename_func("DIV"),
-390            exp.Max: max_or_greatest,
-391            exp.Min: min_or_least,
-392            exp.RegexpExtract: lambda self, e: self.func(
-393                "REGEXP_EXTRACT",
-394                e.this,
-395                e.expression,
-396                e.args.get("position"),
-397                e.args.get("occurrence"),
-398            ),
-399            exp.RegexpLike: rename_func("REGEXP_CONTAINS"),
-400            exp.Select: transforms.preprocess(
-401                [
-402                    transforms.explode_to_unnest,
-403                    _unqualify_unnest,
-404                    transforms.eliminate_distinct_on,
-405                    _alias_ordered_group,
-406                ]
-407            ),
-408            exp.StrToDate: lambda self, e: f"PARSE_DATE({self.format_time(e)}, {self.sql(e, 'this')})",
-409            exp.StrToTime: lambda self, e: self.func(
-410                "PARSE_TIMESTAMP", self.format_time(e), e.this, e.args.get("zone")
-411            ),
-412            exp.TimeAdd: _date_add_sql("TIME", "ADD"),
-413            exp.TimeSub: _date_add_sql("TIME", "SUB"),
-414            exp.TimestampAdd: _date_add_sql("TIMESTAMP", "ADD"),
-415            exp.TimestampSub: _date_add_sql("TIMESTAMP", "SUB"),
-416            exp.TimeStrToTime: timestrtotime_sql,
-417            exp.TsOrDsToDate: ts_or_ds_to_date_sql("bigquery"),
-418            exp.TsOrDsAdd: _date_add_sql("DATE", "ADD"),
-419            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
-420            exp.VariancePop: rename_func("VAR_POP"),
-421            exp.Values: _derived_table_values_to_unnest,
-422            exp.ReturnsProperty: _returnsproperty_sql,
-423            exp.Create: _create_sql,
-424            exp.Trim: lambda self, e: self.func(f"TRIM", e.this, e.expression),
-425            exp.StabilityProperty: lambda self, e: f"DETERMINISTIC"
-426            if e.name == "IMMUTABLE"
-427            else "NOT DETERMINISTIC",
-428        }
-429
-430        TYPE_MAPPING = {
-431            **generator.Generator.TYPE_MAPPING,
-432            exp.DataType.Type.BIGDECIMAL: "BIGNUMERIC",
-433            exp.DataType.Type.BIGINT: "INT64",
-434            exp.DataType.Type.BINARY: "BYTES",
-435            exp.DataType.Type.BOOLEAN: "BOOL",
-436            exp.DataType.Type.CHAR: "STRING",
-437            exp.DataType.Type.DECIMAL: "NUMERIC",
-438            exp.DataType.Type.DOUBLE: "FLOAT64",
-439            exp.DataType.Type.FLOAT: "FLOAT64",
-440            exp.DataType.Type.INT: "INT64",
-441            exp.DataType.Type.NCHAR: "STRING",
-442            exp.DataType.Type.NVARCHAR: "STRING",
-443            exp.DataType.Type.SMALLINT: "INT64",
-444            exp.DataType.Type.TEXT: "STRING",
-445            exp.DataType.Type.TIMESTAMP: "DATETIME",
-446            exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP",
-447            exp.DataType.Type.TIMESTAMPLTZ: "TIMESTAMP",
-448            exp.DataType.Type.TINYINT: "INT64",
-449            exp.DataType.Type.VARBINARY: "BYTES",
-450            exp.DataType.Type.VARCHAR: "STRING",
-451            exp.DataType.Type.VARIANT: "ANY TYPE",
-452        }
-453
-454        PROPERTIES_LOCATION = {
-455            **generator.Generator.PROPERTIES_LOCATION,
-456            exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA,
-457            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
-458        }
-459
-460        # from: https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#reserved_keywords
-461        RESERVED_KEYWORDS = {
-462            *generator.Generator.RESERVED_KEYWORDS,
-463            "all",
-464            "and",
-465            "any",
-466            "array",
-467            "as",
-468            "asc",
-469            "assert_rows_modified",
-470            "at",
-471            "between",
-472            "by",
-473            "case",
-474            "cast",
-475            "collate",
-476            "contains",
-477            "create",
-478            "cross",
-479            "cube",
-480            "current",
-481            "default",
-482            "define",
-483            "desc",
-484            "distinct",
-485            "else",
-486            "end",
-487            "enum",
-488            "escape",
-489            "except",
-490            "exclude",
-491            "exists",
-492            "extract",
-493            "false",
-494            "fetch",
-495            "following",
-496            "for",
-497            "from",
-498            "full",
-499            "group",
-500            "grouping",
-501            "groups",
-502            "hash",
-503            "having",
-504            "if",
-505            "ignore",
-506            "in",
-507            "inner",
-508            "intersect",
-509            "interval",
-510            "into",
-511            "is",
-512            "join",
-513            "lateral",
-514            "left",
-515            "like",
-516            "limit",
-517            "lookup",
-518            "merge",
-519            "natural",
-520            "new",
-521            "no",
-522            "not",
-523            "null",
-524            "nulls",
-525            "of",
-526            "on",
-527            "or",
-528            "order",
-529            "outer",
-530            "over",
-531            "partition",
-532            "preceding",
-533            "proto",
-534            "qualify",
-535            "range",
-536            "recursive",
-537            "respect",
-538            "right",
-539            "rollup",
-540            "rows",
-541            "select",
-542            "set",
-543            "some",
-544            "struct",
-545            "tablesample",
-546            "then",
-547            "to",
-548            "treat",
-549            "true",
-550            "unbounded",
-551            "union",
-552            "unnest",
-553            "using",
-554            "when",
-555            "where",
-556            "window",
-557            "with",
-558            "within",
-559        }
-560
-561        def attimezone_sql(self, expression: exp.AtTimeZone) -> str:
-562            parent = expression.parent
-563
-564            # BigQuery allows CAST(.. AS {STRING|TIMESTAMP} [FORMAT <fmt> [AT TIME ZONE <tz>]]).
-565            # Only the TIMESTAMP one should use the below conversion, when AT TIME ZONE is included.
-566            if not isinstance(parent, exp.Cast) or not parent.to.is_type("text"):
-567                return self.func(
-568                    "TIMESTAMP", self.func("DATETIME", expression.this, expression.args.get("zone"))
-569                )
+            
368    class Generator(generator.Generator):
+369        EXPLICIT_UNION = True
+370        INTERVAL_ALLOWS_PLURAL_FORM = False
+371        JOIN_HINTS = False
+372        QUERY_HINTS = False
+373        TABLE_HINTS = False
+374        LIMIT_FETCH = "LIMIT"
+375        RENAME_TABLE_WITH_DB = False
+376        ESCAPE_LINE_BREAK = True
+377
+378        TRANSFORMS = {
+379            **generator.Generator.TRANSFORMS,
+380            exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"),
+381            exp.ArraySize: rename_func("ARRAY_LENGTH"),
+382            exp.Cast: transforms.preprocess([transforms.remove_precision_parameterized_types]),
+383            exp.CTE: transforms.preprocess([_pushdown_cte_column_names]),
+384            exp.DateAdd: _date_add_sql("DATE", "ADD"),
+385            exp.DateFromParts: rename_func("DATE"),
+386            exp.DateSub: _date_add_sql("DATE", "SUB"),
+387            exp.DatetimeAdd: _date_add_sql("DATETIME", "ADD"),
+388            exp.DatetimeSub: _date_add_sql("DATETIME", "SUB"),
+389            exp.DateDiff: lambda self, e: f"DATE_DIFF({self.sql(e, 'this')}, {self.sql(e, 'expression')}, {self.sql(e.args.get('unit', 'DAY'))})",
+390            exp.DateStrToDate: datestrtodate_sql,
+391            exp.DateTrunc: lambda self, e: self.func("DATE_TRUNC", e.this, e.text("unit")),
+392            exp.JSONFormat: rename_func("TO_JSON_STRING"),
+393            exp.GenerateSeries: rename_func("GENERATE_ARRAY"),
+394            exp.GroupConcat: rename_func("STRING_AGG"),
+395            exp.ILike: no_ilike_sql,
+396            exp.IntDiv: rename_func("DIV"),
+397            exp.Max: max_or_greatest,
+398            exp.Min: min_or_least,
+399            exp.RegexpExtract: lambda self, e: self.func(
+400                "REGEXP_EXTRACT",
+401                e.this,
+402                e.expression,
+403                e.args.get("position"),
+404                e.args.get("occurrence"),
+405            ),
+406            exp.RegexpLike: rename_func("REGEXP_CONTAINS"),
+407            exp.Select: transforms.preprocess(
+408                [
+409                    transforms.explode_to_unnest,
+410                    _unqualify_unnest,
+411                    transforms.eliminate_distinct_on,
+412                    _alias_ordered_group,
+413                ]
+414            ),
+415            exp.StrToDate: lambda self, e: f"PARSE_DATE({self.format_time(e)}, {self.sql(e, 'this')})",
+416            exp.StrToTime: lambda self, e: self.func(
+417                "PARSE_TIMESTAMP", self.format_time(e), e.this, e.args.get("zone")
+418            ),
+419            exp.TimeAdd: _date_add_sql("TIME", "ADD"),
+420            exp.TimeSub: _date_add_sql("TIME", "SUB"),
+421            exp.TimestampAdd: _date_add_sql("TIMESTAMP", "ADD"),
+422            exp.TimestampSub: _date_add_sql("TIMESTAMP", "SUB"),
+423            exp.TimeStrToTime: timestrtotime_sql,
+424            exp.TsOrDsToDate: ts_or_ds_to_date_sql("bigquery"),
+425            exp.TsOrDsAdd: _date_add_sql("DATE", "ADD"),
+426            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
+427            exp.VariancePop: rename_func("VAR_POP"),
+428            exp.Values: _derived_table_values_to_unnest,
+429            exp.ReturnsProperty: _returnsproperty_sql,
+430            exp.Create: _create_sql,
+431            exp.Trim: lambda self, e: self.func(f"TRIM", e.this, e.expression),
+432            exp.StabilityProperty: lambda self, e: f"DETERMINISTIC"
+433            if e.name == "IMMUTABLE"
+434            else "NOT DETERMINISTIC",
+435        }
+436
+437        TYPE_MAPPING = {
+438            **generator.Generator.TYPE_MAPPING,
+439            exp.DataType.Type.BIGDECIMAL: "BIGNUMERIC",
+440            exp.DataType.Type.BIGINT: "INT64",
+441            exp.DataType.Type.BINARY: "BYTES",
+442            exp.DataType.Type.BOOLEAN: "BOOL",
+443            exp.DataType.Type.CHAR: "STRING",
+444            exp.DataType.Type.DECIMAL: "NUMERIC",
+445            exp.DataType.Type.DOUBLE: "FLOAT64",
+446            exp.DataType.Type.FLOAT: "FLOAT64",
+447            exp.DataType.Type.INT: "INT64",
+448            exp.DataType.Type.NCHAR: "STRING",
+449            exp.DataType.Type.NVARCHAR: "STRING",
+450            exp.DataType.Type.SMALLINT: "INT64",
+451            exp.DataType.Type.TEXT: "STRING",
+452            exp.DataType.Type.TIMESTAMP: "DATETIME",
+453            exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP",
+454            exp.DataType.Type.TIMESTAMPLTZ: "TIMESTAMP",
+455            exp.DataType.Type.TINYINT: "INT64",
+456            exp.DataType.Type.VARBINARY: "BYTES",
+457            exp.DataType.Type.VARCHAR: "STRING",
+458            exp.DataType.Type.VARIANT: "ANY TYPE",
+459        }
+460
+461        PROPERTIES_LOCATION = {
+462            **generator.Generator.PROPERTIES_LOCATION,
+463            exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA,
+464            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
+465        }
+466
+467        # from: https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#reserved_keywords
+468        RESERVED_KEYWORDS = {
+469            *generator.Generator.RESERVED_KEYWORDS,
+470            "all",
+471            "and",
+472            "any",
+473            "array",
+474            "as",
+475            "asc",
+476            "assert_rows_modified",
+477            "at",
+478            "between",
+479            "by",
+480            "case",
+481            "cast",
+482            "collate",
+483            "contains",
+484            "create",
+485            "cross",
+486            "cube",
+487            "current",
+488            "default",
+489            "define",
+490            "desc",
+491            "distinct",
+492            "else",
+493            "end",
+494            "enum",
+495            "escape",
+496            "except",
+497            "exclude",
+498            "exists",
+499            "extract",
+500            "false",
+501            "fetch",
+502            "following",
+503            "for",
+504            "from",
+505            "full",
+506            "group",
+507            "grouping",
+508            "groups",
+509            "hash",
+510            "having",
+511            "if",
+512            "ignore",
+513            "in",
+514            "inner",
+515            "intersect",
+516            "interval",
+517            "into",
+518            "is",
+519            "join",
+520            "lateral",
+521            "left",
+522            "like",
+523            "limit",
+524            "lookup",
+525            "merge",
+526            "natural",
+527            "new",
+528            "no",
+529            "not",
+530            "null",
+531            "nulls",
+532            "of",
+533            "on",
+534            "or",
+535            "order",
+536            "outer",
+537            "over",
+538            "partition",
+539            "preceding",
+540            "proto",
+541            "qualify",
+542            "range",
+543            "recursive",
+544            "respect",
+545            "right",
+546            "rollup",
+547            "rows",
+548            "select",
+549            "set",
+550            "some",
+551            "struct",
+552            "tablesample",
+553            "then",
+554            "to",
+555            "treat",
+556            "true",
+557            "unbounded",
+558            "union",
+559            "unnest",
+560            "using",
+561            "when",
+562            "where",
+563            "window",
+564            "with",
+565            "within",
+566        }
+567
+568        def attimezone_sql(self, expression: exp.AtTimeZone) -> str:
+569            parent = expression.parent
 570
-571            return super().attimezone_sql(expression)
-572
-573        def trycast_sql(self, expression: exp.TryCast) -> str:
-574            return self.cast_sql(expression, safe_prefix="SAFE_")
-575
-576        def cte_sql(self, expression: exp.CTE) -> str:
-577            if expression.alias_column_names:
-578                self.unsupported("Column names in CTE definition are not supported.")
-579            return super().cte_sql(expression)
-580
-581        def array_sql(self, expression: exp.Array) -> str:
-582            first_arg = seq_get(expression.expressions, 0)
-583            if isinstance(first_arg, exp.Subqueryable):
-584                return f"ARRAY{self.wrap(self.sql(first_arg))}"
-585
-586            return inline_array_sql(self, expression)
+571            # BigQuery allows CAST(.. AS {STRING|TIMESTAMP} [FORMAT <fmt> [AT TIME ZONE <tz>]]).
+572            # Only the TIMESTAMP one should use the below conversion, when AT TIME ZONE is included.
+573            if not isinstance(parent, exp.Cast) or not parent.to.is_type("text"):
+574                return self.func(
+575                    "TIMESTAMP", self.func("DATETIME", expression.this, expression.args.get("zone"))
+576                )
+577
+578            return super().attimezone_sql(expression)
+579
+580        def trycast_sql(self, expression: exp.TryCast) -> str:
+581            return self.cast_sql(expression, safe_prefix="SAFE_")
+582
+583        def cte_sql(self, expression: exp.CTE) -> str:
+584            if expression.alias_column_names:
+585                self.unsupported("Column names in CTE definition are not supported.")
+586            return super().cte_sql(expression)
 587
-588        def transaction_sql(self, *_) -> str:
-589            return "BEGIN TRANSACTION"
-590
-591        def commit_sql(self, *_) -> str:
-592            return "COMMIT TRANSACTION"
-593
-594        def rollback_sql(self, *_) -> str:
-595            return "ROLLBACK TRANSACTION"
-596
-597        def in_unnest_op(self, expression: exp.Unnest) -> str:
-598            return self.sql(expression)
-599
-600        def except_op(self, expression: exp.Except) -> str:
-601            if not expression.args.get("distinct", False):
-602                self.unsupported("EXCEPT without DISTINCT is not supported in BigQuery")
-603            return f"EXCEPT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}"
-604
-605        def intersect_op(self, expression: exp.Intersect) -> str:
-606            if not expression.args.get("distinct", False):
-607                self.unsupported("INTERSECT without DISTINCT is not supported in BigQuery")
-608            return f"INTERSECT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}"
-609
-610        def with_properties(self, properties: exp.Properties) -> str:
-611            return self.properties(properties, prefix=self.seg("OPTIONS"))
+588        def array_sql(self, expression: exp.Array) -> str:
+589            first_arg = seq_get(expression.expressions, 0)
+590            if isinstance(first_arg, exp.Subqueryable):
+591                return f"ARRAY{self.wrap(self.sql(first_arg))}"
+592
+593            return inline_array_sql(self, expression)
+594
+595        def transaction_sql(self, *_) -> str:
+596            return "BEGIN TRANSACTION"
+597
+598        def commit_sql(self, *_) -> str:
+599            return "COMMIT TRANSACTION"
+600
+601        def rollback_sql(self, *_) -> str:
+602            return "ROLLBACK TRANSACTION"
+603
+604        def in_unnest_op(self, expression: exp.Unnest) -> str:
+605            return self.sql(expression)
+606
+607        def except_op(self, expression: exp.Except) -> str:
+608            if not expression.args.get("distinct", False):
+609                self.unsupported("EXCEPT without DISTINCT is not supported in BigQuery")
+610            return f"EXCEPT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}"
+611
+612        def intersect_op(self, expression: exp.Intersect) -> str:
+613            if not expression.args.get("distinct", False):
+614                self.unsupported("INTERSECT without DISTINCT is not supported in BigQuery")
+615            return f"INTERSECT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}"
+616
+617        def with_properties(self, properties: exp.Properties) -> str:
+618            return self.properties(properties, prefix=self.seg("OPTIONS"))
 
@@ -2768,7 +2779,7 @@ Default: True
TRANSFORMS = - {<class 'sqlglot.expressions.DateAdd'>: <function _date_add_sql.<locals>.func>, <class 'sqlglot.expressions.TsOrDsAdd'>: <function _date_add_sql.<locals>.func>, <class 'sqlglot.expressions.CaseSpecificColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CharacterSetColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CharacterSetProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CheckColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CollateColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CopyGrantsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CommentColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.DateFormatColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.DefaultColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.EncodeColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ExecuteAsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ExternalProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.InlineLengthColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.LanguageProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.LocationProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.LogProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.MaterializedProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NoPrimaryIndexProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnCommitProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnUpdateColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.PathColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ReturnsProperty'>: <function _returnsproperty_sql>, <class 'sqlglot.expressions.SetProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SettingsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SqlSecurityProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.StabilityProperty'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.TemporaryProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ToTableProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TransientProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TitleColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.UppercaseColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.VarMap'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.VolatileProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.WithJournalTableProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ApproxDistinct'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ArraySize'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Cast'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.CTE'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.DateSub'>: <function _date_add_sql.<locals>.func>, <class 'sqlglot.expressions.DatetimeAdd'>: <function _date_add_sql.<locals>.func>, <class 'sqlglot.expressions.DatetimeSub'>: <function _date_add_sql.<locals>.func>, <class 'sqlglot.expressions.DateDiff'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.DateStrToDate'>: <function datestrtodate_sql>, <class 'sqlglot.expressions.DateTrunc'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.JSONFormat'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.GenerateSeries'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.GroupConcat'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ILike'>: <function no_ilike_sql>, <class 'sqlglot.expressions.IntDiv'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Max'>: <function max_or_greatest>, <class 'sqlglot.expressions.Min'>: <function min_or_least>, <class 'sqlglot.expressions.RegexpExtract'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.RegexpLike'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Select'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.StrToDate'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.StrToTime'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.TimeAdd'>: <function _date_add_sql.<locals>.func>, <class 'sqlglot.expressions.TimeSub'>: <function _date_add_sql.<locals>.func>, <class 'sqlglot.expressions.TimestampAdd'>: <function _date_add_sql.<locals>.func>, <class 'sqlglot.expressions.TimestampSub'>: <function _date_add_sql.<locals>.func>, <class 'sqlglot.expressions.TimeStrToTime'>: <function timestrtotime_sql>, <class 'sqlglot.expressions.TsOrDsToDate'>: <function ts_or_ds_to_date_sql.<locals>._ts_or_ds_to_date_sql>, <class 'sqlglot.expressions.PartitionedByProperty'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.VariancePop'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Values'>: <function _derived_table_values_to_unnest>, <class 'sqlglot.expressions.Create'>: <function _create_sql>, <class 'sqlglot.expressions.Trim'>: <function BigQuery.Generator.<lambda>>} + {<class 'sqlglot.expressions.DateAdd'>: <function _date_add_sql.<locals>.func>, <class 'sqlglot.expressions.TsOrDsAdd'>: <function _date_add_sql.<locals>.func>, <class 'sqlglot.expressions.CaseSpecificColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CharacterSetColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CharacterSetProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CheckColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CollateColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CopyGrantsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CommentColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.DateFormatColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.DefaultColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.EncodeColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ExecuteAsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ExternalProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.InlineLengthColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.LanguageProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.LocationProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.LogProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.MaterializedProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NoPrimaryIndexProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnCommitProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnUpdateColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.PathColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ReturnsProperty'>: <function _returnsproperty_sql>, <class 'sqlglot.expressions.SetProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SettingsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SqlSecurityProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.StabilityProperty'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.TemporaryProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ToTableProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TransientProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TitleColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.UppercaseColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.VarMap'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.VolatileProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.WithJournalTableProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ApproxDistinct'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ArraySize'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Cast'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.CTE'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.DateFromParts'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.DateSub'>: <function _date_add_sql.<locals>.func>, <class 'sqlglot.expressions.DatetimeAdd'>: <function _date_add_sql.<locals>.func>, <class 'sqlglot.expressions.DatetimeSub'>: <function _date_add_sql.<locals>.func>, <class 'sqlglot.expressions.DateDiff'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.DateStrToDate'>: <function datestrtodate_sql>, <class 'sqlglot.expressions.DateTrunc'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.JSONFormat'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.GenerateSeries'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.GroupConcat'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ILike'>: <function no_ilike_sql>, <class 'sqlglot.expressions.IntDiv'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Max'>: <function max_or_greatest>, <class 'sqlglot.expressions.Min'>: <function min_or_least>, <class 'sqlglot.expressions.RegexpExtract'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.RegexpLike'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Select'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.StrToDate'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.StrToTime'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.TimeAdd'>: <function _date_add_sql.<locals>.func>, <class 'sqlglot.expressions.TimeSub'>: <function _date_add_sql.<locals>.func>, <class 'sqlglot.expressions.TimestampAdd'>: <function _date_add_sql.<locals>.func>, <class 'sqlglot.expressions.TimestampSub'>: <function _date_add_sql.<locals>.func>, <class 'sqlglot.expressions.TimeStrToTime'>: <function timestrtotime_sql>, <class 'sqlglot.expressions.TsOrDsToDate'>: <function ts_or_ds_to_date_sql.<locals>._ts_or_ds_to_date_sql>, <class 'sqlglot.expressions.PartitionedByProperty'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.VariancePop'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Values'>: <function _derived_table_values_to_unnest>, <class 'sqlglot.expressions.Create'>: <function _create_sql>, <class 'sqlglot.expressions.Trim'>: <function BigQuery.Generator.<lambda>>}
@@ -2807,7 +2818,7 @@ Default: True
RESERVED_KEYWORDS = - {'define', 'within', 'set', 'rows', 'qualify', 'inner', 'using', 'any', 'grouping', 'desc', 'treat', 'to', 'unbounded', 'else', 'except', 'new', 'intersect', 'unnest', 'struct', 'range', 'escape', 'exclude', 'groups', 'extract', 'lookup', 'nulls', 'fetch', 'in', 'with', 'collate', 'end', 'limit', 'null', 'if', 'proto', 'cast', 'on', 'contains', 'into', 'order', 'union', 'for', 'hash', 'no', 'enum', 'left', 'asc', 'cube', 'group', 'where', 'case', 'interval', 'rollup', 'some', 'all', 'full', 'create', 'of', 'merge', 'default', 'partition', 'by', 'right', 'respect', 'outer', 'between', 'when', 'false', 'cross', 'preceding', 'ignore', 'join', 'as', 'recursive', 'tablesample', 'not', 'select', 'from', 'exists', 'then', 'array', 'at', 'or', 'window', 'assert_rows_modified', 'over', 'lateral', 'and', 'current', 'like', 'having', 'distinct', 'following', 'is', 'natural', 'true'} + {'when', 'group', 'null', 'treat', 'then', 'order', 'rollup', 'as', 'some', 'and', 'fetch', 'create', 'end', 'array', 'or', 'on', 'intersect', 'grouping', 'partition', 'from', 'recursive', 'preceding', 'merge', 'having', 'exclude', 'escape', 'set', 'into', 'groups', 'like', 'by', 'lateral', 'over', 'join', 'no', 'else', 'where', 'in', 'between', 'union', 'all', 'cube', 'outer', 'contains', 'distinct', 'unbounded', 'case', 'tablesample', 'window', 'with', 'of', 'if', 'left', 'asc', 'natural', 'ignore', 'desc', 'nulls', 'define', 'new', 'select', 'cross', 'at', 'not', 'any', 'lookup', 'inner', 'following', 'full', 'range', 'is', 'within', 'extract', 'cast', 'exists', 'for', 'qualify', 'false', 'assert_rows_modified', 'to', 'current', 'except', 'proto', 'interval', 'true', 'struct', 'enum', 'unnest', 'hash', 'limit', 'rows', 'using', 'right', 'respect', 'collate', 'default'}
@@ -2827,17 +2838,17 @@ Default: True
-
561        def attimezone_sql(self, expression: exp.AtTimeZone) -> str:
-562            parent = expression.parent
-563
-564            # BigQuery allows CAST(.. AS {STRING|TIMESTAMP} [FORMAT <fmt> [AT TIME ZONE <tz>]]).
-565            # Only the TIMESTAMP one should use the below conversion, when AT TIME ZONE is included.
-566            if not isinstance(parent, exp.Cast) or not parent.to.is_type("text"):
-567                return self.func(
-568                    "TIMESTAMP", self.func("DATETIME", expression.this, expression.args.get("zone"))
-569                )
+            
568        def attimezone_sql(self, expression: exp.AtTimeZone) -> str:
+569            parent = expression.parent
 570
-571            return super().attimezone_sql(expression)
+571            # BigQuery allows CAST(.. AS {STRING|TIMESTAMP} [FORMAT <fmt> [AT TIME ZONE <tz>]]).
+572            # Only the TIMESTAMP one should use the below conversion, when AT TIME ZONE is included.
+573            if not isinstance(parent, exp.Cast) or not parent.to.is_type("text"):
+574                return self.func(
+575                    "TIMESTAMP", self.func("DATETIME", expression.this, expression.args.get("zone"))
+576                )
+577
+578            return super().attimezone_sql(expression)
 
@@ -2855,8 +2866,8 @@ Default: True
-
573        def trycast_sql(self, expression: exp.TryCast) -> str:
-574            return self.cast_sql(expression, safe_prefix="SAFE_")
+            
580        def trycast_sql(self, expression: exp.TryCast) -> str:
+581            return self.cast_sql(expression, safe_prefix="SAFE_")
 
@@ -2874,10 +2885,10 @@ Default: True
-
576        def cte_sql(self, expression: exp.CTE) -> str:
-577            if expression.alias_column_names:
-578                self.unsupported("Column names in CTE definition are not supported.")
-579            return super().cte_sql(expression)
+            
583        def cte_sql(self, expression: exp.CTE) -> str:
+584            if expression.alias_column_names:
+585                self.unsupported("Column names in CTE definition are not supported.")
+586            return super().cte_sql(expression)
 
@@ -2895,12 +2906,12 @@ Default: True
-
581        def array_sql(self, expression: exp.Array) -> str:
-582            first_arg = seq_get(expression.expressions, 0)
-583            if isinstance(first_arg, exp.Subqueryable):
-584                return f"ARRAY{self.wrap(self.sql(first_arg))}"
-585
-586            return inline_array_sql(self, expression)
+            
588        def array_sql(self, expression: exp.Array) -> str:
+589            first_arg = seq_get(expression.expressions, 0)
+590            if isinstance(first_arg, exp.Subqueryable):
+591                return f"ARRAY{self.wrap(self.sql(first_arg))}"
+592
+593            return inline_array_sql(self, expression)
 
@@ -2918,8 +2929,8 @@ Default: True
-
588        def transaction_sql(self, *_) -> str:
-589            return "BEGIN TRANSACTION"
+            
595        def transaction_sql(self, *_) -> str:
+596            return "BEGIN TRANSACTION"
 
@@ -2937,8 +2948,8 @@ Default: True
-
591        def commit_sql(self, *_) -> str:
-592            return "COMMIT TRANSACTION"
+            
598        def commit_sql(self, *_) -> str:
+599            return "COMMIT TRANSACTION"
 
@@ -2956,8 +2967,8 @@ Default: True
-
594        def rollback_sql(self, *_) -> str:
-595            return "ROLLBACK TRANSACTION"
+            
601        def rollback_sql(self, *_) -> str:
+602            return "ROLLBACK TRANSACTION"
 
@@ -2975,8 +2986,8 @@ Default: True
-
597        def in_unnest_op(self, expression: exp.Unnest) -> str:
-598            return self.sql(expression)
+            
604        def in_unnest_op(self, expression: exp.Unnest) -> str:
+605            return self.sql(expression)
 
@@ -2994,10 +3005,10 @@ Default: True
-
600        def except_op(self, expression: exp.Except) -> str:
-601            if not expression.args.get("distinct", False):
-602                self.unsupported("EXCEPT without DISTINCT is not supported in BigQuery")
-603            return f"EXCEPT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}"
+            
607        def except_op(self, expression: exp.Except) -> str:
+608            if not expression.args.get("distinct", False):
+609                self.unsupported("EXCEPT without DISTINCT is not supported in BigQuery")
+610            return f"EXCEPT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}"
 
@@ -3015,10 +3026,10 @@ Default: True
-
605        def intersect_op(self, expression: exp.Intersect) -> str:
-606            if not expression.args.get("distinct", False):
-607                self.unsupported("INTERSECT without DISTINCT is not supported in BigQuery")
-608            return f"INTERSECT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}"
+            
612        def intersect_op(self, expression: exp.Intersect) -> str:
+613            if not expression.args.get("distinct", False):
+614                self.unsupported("INTERSECT without DISTINCT is not supported in BigQuery")
+615            return f"INTERSECT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}"
 
@@ -3036,8 +3047,8 @@ Default: True
-
610        def with_properties(self, properties: exp.Properties) -> str:
-611            return self.properties(properties, prefix=self.seg("OPTIONS"))
+            
617        def with_properties(self, properties: exp.Properties) -> str:
+618            return self.properties(properties, prefix=self.seg("OPTIONS"))
 
-- cgit v1.2.3