Edit on GitHub

sqlglot.dialects.bigquery

  1from __future__ import annotations
  2
  3import logging
  4import re
  5import typing as t
  6
  7from sqlglot import exp, generator, parser, tokens, transforms
  8from sqlglot._typing import E
  9from sqlglot.dialects.dialect import (
 10    Dialect,
 11    binary_from_function,
 12    datestrtodate_sql,
 13    format_time_lambda,
 14    inline_array_sql,
 15    json_keyvalue_comma_sql,
 16    max_or_greatest,
 17    min_or_least,
 18    no_ilike_sql,
 19    parse_date_delta_with_interval,
 20    regexp_replace_sql,
 21    rename_func,
 22    timestrtotime_sql,
 23    ts_or_ds_to_date_sql,
 24)
 25from sqlglot.helper import seq_get, split_num_words
 26from sqlglot.tokens import TokenType
 27
 28logger = logging.getLogger("sqlglot")
 29
 30
 31def _date_add_sql(
 32    data_type: str, kind: str
 33) -> t.Callable[[generator.Generator, exp.Expression], str]:
 34    def func(self, expression):
 35        this = self.sql(expression, "this")
 36        unit = expression.args.get("unit")
 37        unit = exp.var(unit.name.upper() if unit else "DAY")
 38        interval = exp.Interval(this=expression.expression.copy(), unit=unit)
 39        return f"{data_type}_{kind}({this}, {self.sql(interval)})"
 40
 41    return func
 42
 43
 44def _derived_table_values_to_unnest(self: generator.Generator, expression: exp.Values) -> str:
 45    if not expression.find_ancestor(exp.From, exp.Join):
 46        return self.values_sql(expression)
 47
 48    alias = expression.args.get("alias")
 49
 50    structs = [
 51        exp.Struct(
 52            expressions=[
 53                exp.alias_(value, column_name)
 54                for value, column_name in zip(
 55                    t.expressions,
 56                    alias.columns
 57                    if alias and alias.columns
 58                    else (f"_c{i}" for i in range(len(t.expressions))),
 59                )
 60            ]
 61        )
 62        for t in expression.find_all(exp.Tuple)
 63    ]
 64
 65    return self.unnest_sql(exp.Unnest(expressions=[exp.Array(expressions=structs)]))
 66
 67
 68def _returnsproperty_sql(self: generator.Generator, expression: exp.ReturnsProperty) -> str:
 69    this = expression.this
 70    if isinstance(this, exp.Schema):
 71        this = f"{this.this} <{self.expressions(this)}>"
 72    else:
 73        this = self.sql(this)
 74    return f"RETURNS {this}"
 75
 76
 77def _create_sql(self: generator.Generator, expression: exp.Create) -> str:
 78    kind = expression.args["kind"]
 79    returns = expression.find(exp.ReturnsProperty)
 80
 81    if kind.upper() == "FUNCTION" and returns and returns.args.get("is_table"):
 82        expression = expression.copy()
 83        expression.set("kind", "TABLE FUNCTION")
 84
 85        if isinstance(expression.expression, (exp.Subquery, exp.Literal)):
 86            expression.set("expression", expression.expression.this)
 87
 88        return self.create_sql(expression)
 89
 90    return self.create_sql(expression)
 91
 92
 93def _unqualify_unnest(expression: exp.Expression) -> exp.Expression:
 94    """Remove references to unnest table aliases since bigquery doesn't allow them.
 95
 96    These are added by the optimizer's qualify_column step.
 97    """
 98    from sqlglot.optimizer.scope import find_all_in_scope
 99
100    if isinstance(expression, exp.Select):
101        unnest_aliases = {
102            unnest.alias
103            for unnest in find_all_in_scope(expression, exp.Unnest)
104            if isinstance(unnest.parent, (exp.From, exp.Join))
105        }
106        if unnest_aliases:
107            for column in expression.find_all(exp.Column):
108                if column.table in unnest_aliases:
109                    column.set("table", None)
110                elif column.db in unnest_aliases:
111                    column.set("db", None)
112
113    return expression
114
115
116# https://issuetracker.google.com/issues/162294746
117# workaround for bigquery bug when grouping by an expression and then ordering
118# WITH x AS (SELECT 1 y)
119# SELECT y + 1 z
120# FROM x
121# GROUP BY x + 1
122# ORDER by z
123def _alias_ordered_group(expression: exp.Expression) -> exp.Expression:
124    if isinstance(expression, exp.Select):
125        group = expression.args.get("group")
126        order = expression.args.get("order")
127
128        if group and order:
129            aliases = {
130                select.this: select.args["alias"]
131                for select in expression.selects
132                if isinstance(select, exp.Alias)
133            }
134
135            for e in group.expressions:
136                alias = aliases.get(e)
137
138                if alias:
139                    e.replace(exp.column(alias))
140
141    return expression
142
143
144def _pushdown_cte_column_names(expression: exp.Expression) -> exp.Expression:
145    """BigQuery doesn't allow column names when defining a CTE, so we try to push them down."""
146    if isinstance(expression, exp.CTE) and expression.alias_column_names:
147        cte_query = expression.this
148
149        if cte_query.is_star:
150            logger.warning(
151                "Can't push down CTE column names for star queries. Run the query through"
152                " the optimizer or use 'qualify' to expand the star projections first."
153            )
154            return expression
155
156        column_names = expression.alias_column_names
157        expression.args["alias"].set("columns", None)
158
159        for name, select in zip(column_names, cte_query.selects):
160            to_replace = select
161
162            if isinstance(select, exp.Alias):
163                select = select.this
164
165            # Inner aliases are shadowed by the CTE column names
166            to_replace.replace(exp.alias_(select, name))
167
168    return expression
169
170
171def _parse_timestamp(args: t.List) -> exp.StrToTime:
172    this = format_time_lambda(exp.StrToTime, "bigquery")([seq_get(args, 1), seq_get(args, 0)])
173    this.set("zone", seq_get(args, 2))
174    return this
175
176
177def _parse_date(args: t.List) -> exp.Date | exp.DateFromParts:
178    expr_type = exp.DateFromParts if len(args) == 3 else exp.Date
179    return expr_type.from_arg_list(args)
180
181
182def _parse_to_hex(args: t.List) -> exp.Hex | exp.MD5:
183    # TO_HEX(MD5(..)) is common in BigQuery, so it's parsed into MD5 to simplify its transpilation
184    arg = seq_get(args, 0)
185    return exp.MD5(this=arg.this) if isinstance(arg, exp.MD5Digest) else exp.Hex(this=arg)
186
187
188class BigQuery(Dialect):
189    UNNEST_COLUMN_ONLY = True
190
191    # https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#case_sensitivity
192    RESOLVES_IDENTIFIERS_AS_UPPERCASE = None
193
194    # bigquery udfs are case sensitive
195    NORMALIZE_FUNCTIONS = False
196
197    TIME_MAPPING = {
198        "%D": "%m/%d/%y",
199    }
200
201    FORMAT_MAPPING = {
202        "DD": "%d",
203        "MM": "%m",
204        "MON": "%b",
205        "MONTH": "%B",
206        "YYYY": "%Y",
207        "YY": "%y",
208        "HH": "%I",
209        "HH12": "%I",
210        "HH24": "%H",
211        "MI": "%M",
212        "SS": "%S",
213        "SSSSS": "%f",
214        "TZH": "%z",
215    }
216
217    # The _PARTITIONTIME and _PARTITIONDATE pseudo-columns are not returned by a SELECT * statement
218    # https://cloud.google.com/bigquery/docs/querying-partitioned-tables#query_an_ingestion-time_partitioned_table
219    PSEUDOCOLUMNS = {"_PARTITIONTIME", "_PARTITIONDATE"}
220
221    @classmethod
222    def normalize_identifier(cls, expression: E) -> E:
223        # In BigQuery, CTEs aren't case-sensitive, but table names are (by default, at least).
224        # The following check is essentially a heuristic to detect tables based on whether or
225        # not they're qualified.
226        if isinstance(expression, exp.Identifier):
227            parent = expression.parent
228
229            while isinstance(parent, exp.Dot):
230                parent = parent.parent
231
232            if (
233                not isinstance(parent, exp.UserDefinedFunction)
234                and not (isinstance(parent, exp.Table) and parent.db)
235                and not expression.meta.get("is_table")
236            ):
237                expression.set("this", expression.this.lower())
238
239        return expression
240
241    class Tokenizer(tokens.Tokenizer):
242        QUOTES = ["'", '"', '"""', "'''"]
243        COMMENTS = ["--", "#", ("/*", "*/")]
244        IDENTIFIERS = ["`"]
245        STRING_ESCAPES = ["\\"]
246
247        HEX_STRINGS = [("0x", ""), ("0X", "")]
248
249        BYTE_STRINGS = [
250            (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("b", "B")
251        ]
252
253        RAW_STRINGS = [
254            (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("r", "R")
255        ]
256
257        KEYWORDS = {
258            **tokens.Tokenizer.KEYWORDS,
259            "ANY TYPE": TokenType.VARIANT,
260            "BEGIN": TokenType.COMMAND,
261            "BEGIN TRANSACTION": TokenType.BEGIN,
262            "CURRENT_DATETIME": TokenType.CURRENT_DATETIME,
263            "BYTES": TokenType.BINARY,
264            "DECLARE": TokenType.COMMAND,
265            "FLOAT64": TokenType.DOUBLE,
266            "INT64": TokenType.BIGINT,
267            "RECORD": TokenType.STRUCT,
268            "TIMESTAMP": TokenType.TIMESTAMPTZ,
269            "NOT DETERMINISTIC": TokenType.VOLATILE,
270            "UNKNOWN": TokenType.NULL,
271            "FOR SYSTEM_TIME": TokenType.TIMESTAMP_SNAPSHOT,
272        }
273        KEYWORDS.pop("DIV")
274
275    class Parser(parser.Parser):
276        PREFIXED_PIVOT_COLUMNS = True
277
278        LOG_BASE_FIRST = False
279        LOG_DEFAULTS_TO_LN = True
280
281        SUPPORTS_USER_DEFINED_TYPES = False
282
283        FUNCTIONS = {
284            **parser.Parser.FUNCTIONS,
285            "DATE": _parse_date,
286            "DATE_ADD": parse_date_delta_with_interval(exp.DateAdd),
287            "DATE_SUB": parse_date_delta_with_interval(exp.DateSub),
288            "DATE_TRUNC": lambda args: exp.DateTrunc(
289                unit=exp.Literal.string(str(seq_get(args, 1))),
290                this=seq_get(args, 0),
291            ),
292            "DATETIME_ADD": parse_date_delta_with_interval(exp.DatetimeAdd),
293            "DATETIME_SUB": parse_date_delta_with_interval(exp.DatetimeSub),
294            "DIV": binary_from_function(exp.IntDiv),
295            "GENERATE_ARRAY": exp.GenerateSeries.from_arg_list,
296            "MD5": exp.MD5Digest.from_arg_list,
297            "TO_HEX": _parse_to_hex,
298            "PARSE_DATE": lambda args: format_time_lambda(exp.StrToDate, "bigquery")(
299                [seq_get(args, 1), seq_get(args, 0)]
300            ),
301            "PARSE_TIMESTAMP": _parse_timestamp,
302            "REGEXP_CONTAINS": exp.RegexpLike.from_arg_list,
303            "REGEXP_EXTRACT": lambda args: exp.RegexpExtract(
304                this=seq_get(args, 0),
305                expression=seq_get(args, 1),
306                position=seq_get(args, 2),
307                occurrence=seq_get(args, 3),
308                group=exp.Literal.number(1)
309                if re.compile(str(seq_get(args, 1))).groups == 1
310                else None,
311            ),
312            "SHA256": lambda args: exp.SHA2(this=seq_get(args, 0), length=exp.Literal.number(256)),
313            "SHA512": lambda args: exp.SHA2(this=seq_get(args, 0), length=exp.Literal.number(512)),
314            "SPLIT": lambda args: exp.Split(
315                # https://cloud.google.com/bigquery/docs/reference/standard-sql/string_functions#split
316                this=seq_get(args, 0),
317                expression=seq_get(args, 1) or exp.Literal.string(","),
318            ),
319            "TIME_ADD": parse_date_delta_with_interval(exp.TimeAdd),
320            "TIME_SUB": parse_date_delta_with_interval(exp.TimeSub),
321            "TIMESTAMP_ADD": parse_date_delta_with_interval(exp.TimestampAdd),
322            "TIMESTAMP_SUB": parse_date_delta_with_interval(exp.TimestampSub),
323            "TO_JSON_STRING": exp.JSONFormat.from_arg_list,
324        }
325
326        FUNCTION_PARSERS = {
327            **parser.Parser.FUNCTION_PARSERS,
328            "ARRAY": lambda self: self.expression(exp.Array, expressions=[self._parse_statement()]),
329        }
330        FUNCTION_PARSERS.pop("TRIM")
331
332        NO_PAREN_FUNCTIONS = {
333            **parser.Parser.NO_PAREN_FUNCTIONS,
334            TokenType.CURRENT_DATETIME: exp.CurrentDatetime,
335        }
336
337        NESTED_TYPE_TOKENS = {
338            *parser.Parser.NESTED_TYPE_TOKENS,
339            TokenType.TABLE,
340        }
341
342        ID_VAR_TOKENS = {
343            *parser.Parser.ID_VAR_TOKENS,
344            TokenType.VALUES,
345        }
346
347        PROPERTY_PARSERS = {
348            **parser.Parser.PROPERTY_PARSERS,
349            "NOT DETERMINISTIC": lambda self: self.expression(
350                exp.StabilityProperty, this=exp.Literal.string("VOLATILE")
351            ),
352            "OPTIONS": lambda self: self._parse_with_property(),
353        }
354
355        CONSTRAINT_PARSERS = {
356            **parser.Parser.CONSTRAINT_PARSERS,
357            "OPTIONS": lambda self: exp.Properties(expressions=self._parse_with_property()),
358        }
359
360        def _parse_table_part(self, schema: bool = False) -> t.Optional[exp.Expression]:
361            this = super()._parse_table_part(schema=schema)
362
363            # https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#table_names
364            if isinstance(this, exp.Identifier):
365                table_name = this.name
366                while self._match(TokenType.DASH, advance=False) and self._next:
367                    self._advance(2)
368                    table_name += f"-{self._prev.text}"
369
370                this = exp.Identifier(this=table_name, quoted=this.args.get("quoted"))
371
372            return this
373
374        def _parse_table_parts(self, schema: bool = False) -> exp.Table:
375            table = super()._parse_table_parts(schema=schema)
376            if isinstance(table.this, exp.Identifier) and "." in table.name:
377                catalog, db, this, *rest = (
378                    t.cast(t.Optional[exp.Expression], exp.to_identifier(x))
379                    for x in split_num_words(table.name, ".", 3)
380                )
381
382                if rest and this:
383                    this = exp.Dot.build(t.cast(t.List[exp.Expression], [this, *rest]))
384
385                table = exp.Table(this=this, db=db, catalog=catalog)
386
387            return table
388
389    class Generator(generator.Generator):
390        EXPLICIT_UNION = True
391        INTERVAL_ALLOWS_PLURAL_FORM = False
392        JOIN_HINTS = False
393        QUERY_HINTS = False
394        TABLE_HINTS = False
395        LIMIT_FETCH = "LIMIT"
396        RENAME_TABLE_WITH_DB = False
397        ESCAPE_LINE_BREAK = True
398        NVL2_SUPPORTED = False
399
400        TRANSFORMS = {
401            **generator.Generator.TRANSFORMS,
402            exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"),
403            exp.ArraySize: rename_func("ARRAY_LENGTH"),
404            exp.Cast: transforms.preprocess([transforms.remove_precision_parameterized_types]),
405            exp.Create: _create_sql,
406            exp.CTE: transforms.preprocess([_pushdown_cte_column_names]),
407            exp.DateAdd: _date_add_sql("DATE", "ADD"),
408            exp.DateDiff: lambda self, e: f"DATE_DIFF({self.sql(e, 'this')}, {self.sql(e, 'expression')}, {self.sql(e.args.get('unit', 'DAY'))})",
409            exp.DateFromParts: rename_func("DATE"),
410            exp.DateStrToDate: datestrtodate_sql,
411            exp.DateSub: _date_add_sql("DATE", "SUB"),
412            exp.DatetimeAdd: _date_add_sql("DATETIME", "ADD"),
413            exp.DatetimeSub: _date_add_sql("DATETIME", "SUB"),
414            exp.DateTrunc: lambda self, e: self.func("DATE_TRUNC", e.this, e.text("unit")),
415            exp.GenerateSeries: rename_func("GENERATE_ARRAY"),
416            exp.GroupConcat: rename_func("STRING_AGG"),
417            exp.Hex: rename_func("TO_HEX"),
418            exp.ILike: no_ilike_sql,
419            exp.IntDiv: rename_func("DIV"),
420            exp.JSONFormat: rename_func("TO_JSON_STRING"),
421            exp.JSONKeyValue: json_keyvalue_comma_sql,
422            exp.Max: max_or_greatest,
423            exp.MD5: lambda self, e: self.func("TO_HEX", self.func("MD5", e.this)),
424            exp.MD5Digest: rename_func("MD5"),
425            exp.Min: min_or_least,
426            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
427            exp.RegexpExtract: lambda self, e: self.func(
428                "REGEXP_EXTRACT",
429                e.this,
430                e.expression,
431                e.args.get("position"),
432                e.args.get("occurrence"),
433            ),
434            exp.RegexpReplace: regexp_replace_sql,
435            exp.RegexpLike: rename_func("REGEXP_CONTAINS"),
436            exp.ReturnsProperty: _returnsproperty_sql,
437            exp.Select: transforms.preprocess(
438                [
439                    transforms.explode_to_unnest,
440                    _unqualify_unnest,
441                    transforms.eliminate_distinct_on,
442                    _alias_ordered_group,
443                ]
444            ),
445            exp.SHA2: lambda self, e: self.func(
446                f"SHA256" if e.text("length") == "256" else "SHA512", e.this
447            ),
448            exp.StabilityProperty: lambda self, e: f"DETERMINISTIC"
449            if e.name == "IMMUTABLE"
450            else "NOT DETERMINISTIC",
451            exp.StrToDate: lambda self, e: f"PARSE_DATE({self.format_time(e)}, {self.sql(e, 'this')})",
452            exp.StrToTime: lambda self, e: self.func(
453                "PARSE_TIMESTAMP", self.format_time(e), e.this, e.args.get("zone")
454            ),
455            exp.TimeAdd: _date_add_sql("TIME", "ADD"),
456            exp.TimeSub: _date_add_sql("TIME", "SUB"),
457            exp.TimestampAdd: _date_add_sql("TIMESTAMP", "ADD"),
458            exp.TimestampSub: _date_add_sql("TIMESTAMP", "SUB"),
459            exp.TimeStrToTime: timestrtotime_sql,
460            exp.Trim: lambda self, e: self.func(f"TRIM", e.this, e.expression),
461            exp.TsOrDsAdd: _date_add_sql("DATE", "ADD"),
462            exp.TsOrDsToDate: ts_or_ds_to_date_sql("bigquery"),
463            exp.Unhex: rename_func("FROM_HEX"),
464            exp.Values: _derived_table_values_to_unnest,
465            exp.VariancePop: rename_func("VAR_POP"),
466        }
467
468        TYPE_MAPPING = {
469            **generator.Generator.TYPE_MAPPING,
470            exp.DataType.Type.BIGDECIMAL: "BIGNUMERIC",
471            exp.DataType.Type.BIGINT: "INT64",
472            exp.DataType.Type.BINARY: "BYTES",
473            exp.DataType.Type.BOOLEAN: "BOOL",
474            exp.DataType.Type.CHAR: "STRING",
475            exp.DataType.Type.DECIMAL: "NUMERIC",
476            exp.DataType.Type.DOUBLE: "FLOAT64",
477            exp.DataType.Type.FLOAT: "FLOAT64",
478            exp.DataType.Type.INT: "INT64",
479            exp.DataType.Type.NCHAR: "STRING",
480            exp.DataType.Type.NVARCHAR: "STRING",
481            exp.DataType.Type.SMALLINT: "INT64",
482            exp.DataType.Type.TEXT: "STRING",
483            exp.DataType.Type.TIMESTAMP: "DATETIME",
484            exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP",
485            exp.DataType.Type.TIMESTAMPLTZ: "TIMESTAMP",
486            exp.DataType.Type.TINYINT: "INT64",
487            exp.DataType.Type.VARBINARY: "BYTES",
488            exp.DataType.Type.VARCHAR: "STRING",
489            exp.DataType.Type.VARIANT: "ANY TYPE",
490        }
491
492        PROPERTIES_LOCATION = {
493            **generator.Generator.PROPERTIES_LOCATION,
494            exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA,
495            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
496        }
497
498        # from: https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#reserved_keywords
499        RESERVED_KEYWORDS = {
500            *generator.Generator.RESERVED_KEYWORDS,
501            "all",
502            "and",
503            "any",
504            "array",
505            "as",
506            "asc",
507            "assert_rows_modified",
508            "at",
509            "between",
510            "by",
511            "case",
512            "cast",
513            "collate",
514            "contains",
515            "create",
516            "cross",
517            "cube",
518            "current",
519            "default",
520            "define",
521            "desc",
522            "distinct",
523            "else",
524            "end",
525            "enum",
526            "escape",
527            "except",
528            "exclude",
529            "exists",
530            "extract",
531            "false",
532            "fetch",
533            "following",
534            "for",
535            "from",
536            "full",
537            "group",
538            "grouping",
539            "groups",
540            "hash",
541            "having",
542            "if",
543            "ignore",
544            "in",
545            "inner",
546            "intersect",
547            "interval",
548            "into",
549            "is",
550            "join",
551            "lateral",
552            "left",
553            "like",
554            "limit",
555            "lookup",
556            "merge",
557            "natural",
558            "new",
559            "no",
560            "not",
561            "null",
562            "nulls",
563            "of",
564            "on",
565            "or",
566            "order",
567            "outer",
568            "over",
569            "partition",
570            "preceding",
571            "proto",
572            "qualify",
573            "range",
574            "recursive",
575            "respect",
576            "right",
577            "rollup",
578            "rows",
579            "select",
580            "set",
581            "some",
582            "struct",
583            "tablesample",
584            "then",
585            "to",
586            "treat",
587            "true",
588            "unbounded",
589            "union",
590            "unnest",
591            "using",
592            "when",
593            "where",
594            "window",
595            "with",
596            "within",
597        }
598
599        def attimezone_sql(self, expression: exp.AtTimeZone) -> str:
600            parent = expression.parent
601
602            # BigQuery allows CAST(.. AS {STRING|TIMESTAMP} [FORMAT <fmt> [AT TIME ZONE <tz>]]).
603            # Only the TIMESTAMP one should use the below conversion, when AT TIME ZONE is included.
604            if not isinstance(parent, exp.Cast) or not parent.to.is_type("text"):
605                return self.func(
606                    "TIMESTAMP", self.func("DATETIME", expression.this, expression.args.get("zone"))
607                )
608
609            return super().attimezone_sql(expression)
610
611        def trycast_sql(self, expression: exp.TryCast) -> str:
612            return self.cast_sql(expression, safe_prefix="SAFE_")
613
614        def cte_sql(self, expression: exp.CTE) -> str:
615            if expression.alias_column_names:
616                self.unsupported("Column names in CTE definition are not supported.")
617            return super().cte_sql(expression)
618
619        def array_sql(self, expression: exp.Array) -> str:
620            first_arg = seq_get(expression.expressions, 0)
621            if isinstance(first_arg, exp.Subqueryable):
622                return f"ARRAY{self.wrap(self.sql(first_arg))}"
623
624            return inline_array_sql(self, expression)
625
626        def transaction_sql(self, *_) -> str:
627            return "BEGIN TRANSACTION"
628
629        def commit_sql(self, *_) -> str:
630            return "COMMIT TRANSACTION"
631
632        def rollback_sql(self, *_) -> str:
633            return "ROLLBACK TRANSACTION"
634
635        def in_unnest_op(self, expression: exp.Unnest) -> str:
636            return self.sql(expression)
637
638        def except_op(self, expression: exp.Except) -> str:
639            if not expression.args.get("distinct", False):
640                self.unsupported("EXCEPT without DISTINCT is not supported in BigQuery")
641            return f"EXCEPT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}"
642
643        def intersect_op(self, expression: exp.Intersect) -> str:
644            if not expression.args.get("distinct", False):
645                self.unsupported("INTERSECT without DISTINCT is not supported in BigQuery")
646            return f"INTERSECT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}"
647
648        def with_properties(self, properties: exp.Properties) -> str:
649            return self.properties(properties, prefix=self.seg("OPTIONS"))
650
651        def version_sql(self, expression: exp.Version) -> str:
652            if expression.name == "TIMESTAMP":
653                expression = expression.copy()
654                expression.set("this", "SYSTEM_TIME")
655            return super().version_sql(expression)
logger = <Logger sqlglot (WARNING)>
class BigQuery(sqlglot.dialects.dialect.Dialect):
189class BigQuery(Dialect):
190    UNNEST_COLUMN_ONLY = True
191
192    # https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#case_sensitivity
193    RESOLVES_IDENTIFIERS_AS_UPPERCASE = None
194
195    # bigquery udfs are case sensitive
196    NORMALIZE_FUNCTIONS = False
197
198    TIME_MAPPING = {
199        "%D": "%m/%d/%y",
200    }
201
202    FORMAT_MAPPING = {
203        "DD": "%d",
204        "MM": "%m",
205        "MON": "%b",
206        "MONTH": "%B",
207        "YYYY": "%Y",
208        "YY": "%y",
209        "HH": "%I",
210        "HH12": "%I",
211        "HH24": "%H",
212        "MI": "%M",
213        "SS": "%S",
214        "SSSSS": "%f",
215        "TZH": "%z",
216    }
217
218    # The _PARTITIONTIME and _PARTITIONDATE pseudo-columns are not returned by a SELECT * statement
219    # https://cloud.google.com/bigquery/docs/querying-partitioned-tables#query_an_ingestion-time_partitioned_table
220    PSEUDOCOLUMNS = {"_PARTITIONTIME", "_PARTITIONDATE"}
221
222    @classmethod
223    def normalize_identifier(cls, expression: E) -> E:
224        # In BigQuery, CTEs aren't case-sensitive, but table names are (by default, at least).
225        # The following check is essentially a heuristic to detect tables based on whether or
226        # not they're qualified.
227        if isinstance(expression, exp.Identifier):
228            parent = expression.parent
229
230            while isinstance(parent, exp.Dot):
231                parent = parent.parent
232
233            if (
234                not isinstance(parent, exp.UserDefinedFunction)
235                and not (isinstance(parent, exp.Table) and parent.db)
236                and not expression.meta.get("is_table")
237            ):
238                expression.set("this", expression.this.lower())
239
240        return expression
241
242    class Tokenizer(tokens.Tokenizer):
243        QUOTES = ["'", '"', '"""', "'''"]
244        COMMENTS = ["--", "#", ("/*", "*/")]
245        IDENTIFIERS = ["`"]
246        STRING_ESCAPES = ["\\"]
247
248        HEX_STRINGS = [("0x", ""), ("0X", "")]
249
250        BYTE_STRINGS = [
251            (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("b", "B")
252        ]
253
254        RAW_STRINGS = [
255            (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("r", "R")
256        ]
257
258        KEYWORDS = {
259            **tokens.Tokenizer.KEYWORDS,
260            "ANY TYPE": TokenType.VARIANT,
261            "BEGIN": TokenType.COMMAND,
262            "BEGIN TRANSACTION": TokenType.BEGIN,
263            "CURRENT_DATETIME": TokenType.CURRENT_DATETIME,
264            "BYTES": TokenType.BINARY,
265            "DECLARE": TokenType.COMMAND,
266            "FLOAT64": TokenType.DOUBLE,
267            "INT64": TokenType.BIGINT,
268            "RECORD": TokenType.STRUCT,
269            "TIMESTAMP": TokenType.TIMESTAMPTZ,
270            "NOT DETERMINISTIC": TokenType.VOLATILE,
271            "UNKNOWN": TokenType.NULL,
272            "FOR SYSTEM_TIME": TokenType.TIMESTAMP_SNAPSHOT,
273        }
274        KEYWORDS.pop("DIV")
275
276    class Parser(parser.Parser):
277        PREFIXED_PIVOT_COLUMNS = True
278
279        LOG_BASE_FIRST = False
280        LOG_DEFAULTS_TO_LN = True
281
282        SUPPORTS_USER_DEFINED_TYPES = False
283
284        FUNCTIONS = {
285            **parser.Parser.FUNCTIONS,
286            "DATE": _parse_date,
287            "DATE_ADD": parse_date_delta_with_interval(exp.DateAdd),
288            "DATE_SUB": parse_date_delta_with_interval(exp.DateSub),
289            "DATE_TRUNC": lambda args: exp.DateTrunc(
290                unit=exp.Literal.string(str(seq_get(args, 1))),
291                this=seq_get(args, 0),
292            ),
293            "DATETIME_ADD": parse_date_delta_with_interval(exp.DatetimeAdd),
294            "DATETIME_SUB": parse_date_delta_with_interval(exp.DatetimeSub),
295            "DIV": binary_from_function(exp.IntDiv),
296            "GENERATE_ARRAY": exp.GenerateSeries.from_arg_list,
297            "MD5": exp.MD5Digest.from_arg_list,
298            "TO_HEX": _parse_to_hex,
299            "PARSE_DATE": lambda args: format_time_lambda(exp.StrToDate, "bigquery")(
300                [seq_get(args, 1), seq_get(args, 0)]
301            ),
302            "PARSE_TIMESTAMP": _parse_timestamp,
303            "REGEXP_CONTAINS": exp.RegexpLike.from_arg_list,
304            "REGEXP_EXTRACT": lambda args: exp.RegexpExtract(
305                this=seq_get(args, 0),
306                expression=seq_get(args, 1),
307                position=seq_get(args, 2),
308                occurrence=seq_get(args, 3),
309                group=exp.Literal.number(1)
310                if re.compile(str(seq_get(args, 1))).groups == 1
311                else None,
312            ),
313            "SHA256": lambda args: exp.SHA2(this=seq_get(args, 0), length=exp.Literal.number(256)),
314            "SHA512": lambda args: exp.SHA2(this=seq_get(args, 0), length=exp.Literal.number(512)),
315            "SPLIT": lambda args: exp.Split(
316                # https://cloud.google.com/bigquery/docs/reference/standard-sql/string_functions#split
317                this=seq_get(args, 0),
318                expression=seq_get(args, 1) or exp.Literal.string(","),
319            ),
320            "TIME_ADD": parse_date_delta_with_interval(exp.TimeAdd),
321            "TIME_SUB": parse_date_delta_with_interval(exp.TimeSub),
322            "TIMESTAMP_ADD": parse_date_delta_with_interval(exp.TimestampAdd),
323            "TIMESTAMP_SUB": parse_date_delta_with_interval(exp.TimestampSub),
324            "TO_JSON_STRING": exp.JSONFormat.from_arg_list,
325        }
326
327        FUNCTION_PARSERS = {
328            **parser.Parser.FUNCTION_PARSERS,
329            "ARRAY": lambda self: self.expression(exp.Array, expressions=[self._parse_statement()]),
330        }
331        FUNCTION_PARSERS.pop("TRIM")
332
333        NO_PAREN_FUNCTIONS = {
334            **parser.Parser.NO_PAREN_FUNCTIONS,
335            TokenType.CURRENT_DATETIME: exp.CurrentDatetime,
336        }
337
338        NESTED_TYPE_TOKENS = {
339            *parser.Parser.NESTED_TYPE_TOKENS,
340            TokenType.TABLE,
341        }
342
343        ID_VAR_TOKENS = {
344            *parser.Parser.ID_VAR_TOKENS,
345            TokenType.VALUES,
346        }
347
348        PROPERTY_PARSERS = {
349            **parser.Parser.PROPERTY_PARSERS,
350            "NOT DETERMINISTIC": lambda self: self.expression(
351                exp.StabilityProperty, this=exp.Literal.string("VOLATILE")
352            ),
353            "OPTIONS": lambda self: self._parse_with_property(),
354        }
355
356        CONSTRAINT_PARSERS = {
357            **parser.Parser.CONSTRAINT_PARSERS,
358            "OPTIONS": lambda self: exp.Properties(expressions=self._parse_with_property()),
359        }
360
361        def _parse_table_part(self, schema: bool = False) -> t.Optional[exp.Expression]:
362            this = super()._parse_table_part(schema=schema)
363
364            # https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#table_names
365            if isinstance(this, exp.Identifier):
366                table_name = this.name
367                while self._match(TokenType.DASH, advance=False) and self._next:
368                    self._advance(2)
369                    table_name += f"-{self._prev.text}"
370
371                this = exp.Identifier(this=table_name, quoted=this.args.get("quoted"))
372
373            return this
374
375        def _parse_table_parts(self, schema: bool = False) -> exp.Table:
376            table = super()._parse_table_parts(schema=schema)
377            if isinstance(table.this, exp.Identifier) and "." in table.name:
378                catalog, db, this, *rest = (
379                    t.cast(t.Optional[exp.Expression], exp.to_identifier(x))
380                    for x in split_num_words(table.name, ".", 3)
381                )
382
383                if rest and this:
384                    this = exp.Dot.build(t.cast(t.List[exp.Expression], [this, *rest]))
385
386                table = exp.Table(this=this, db=db, catalog=catalog)
387
388            return table
389
390    class Generator(generator.Generator):
391        EXPLICIT_UNION = True
392        INTERVAL_ALLOWS_PLURAL_FORM = False
393        JOIN_HINTS = False
394        QUERY_HINTS = False
395        TABLE_HINTS = False
396        LIMIT_FETCH = "LIMIT"
397        RENAME_TABLE_WITH_DB = False
398        ESCAPE_LINE_BREAK = True
399        NVL2_SUPPORTED = False
400
401        TRANSFORMS = {
402            **generator.Generator.TRANSFORMS,
403            exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"),
404            exp.ArraySize: rename_func("ARRAY_LENGTH"),
405            exp.Cast: transforms.preprocess([transforms.remove_precision_parameterized_types]),
406            exp.Create: _create_sql,
407            exp.CTE: transforms.preprocess([_pushdown_cte_column_names]),
408            exp.DateAdd: _date_add_sql("DATE", "ADD"),
409            exp.DateDiff: lambda self, e: f"DATE_DIFF({self.sql(e, 'this')}, {self.sql(e, 'expression')}, {self.sql(e.args.get('unit', 'DAY'))})",
410            exp.DateFromParts: rename_func("DATE"),
411            exp.DateStrToDate: datestrtodate_sql,
412            exp.DateSub: _date_add_sql("DATE", "SUB"),
413            exp.DatetimeAdd: _date_add_sql("DATETIME", "ADD"),
414            exp.DatetimeSub: _date_add_sql("DATETIME", "SUB"),
415            exp.DateTrunc: lambda self, e: self.func("DATE_TRUNC", e.this, e.text("unit")),
416            exp.GenerateSeries: rename_func("GENERATE_ARRAY"),
417            exp.GroupConcat: rename_func("STRING_AGG"),
418            exp.Hex: rename_func("TO_HEX"),
419            exp.ILike: no_ilike_sql,
420            exp.IntDiv: rename_func("DIV"),
421            exp.JSONFormat: rename_func("TO_JSON_STRING"),
422            exp.JSONKeyValue: json_keyvalue_comma_sql,
423            exp.Max: max_or_greatest,
424            exp.MD5: lambda self, e: self.func("TO_HEX", self.func("MD5", e.this)),
425            exp.MD5Digest: rename_func("MD5"),
426            exp.Min: min_or_least,
427            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
428            exp.RegexpExtract: lambda self, e: self.func(
429                "REGEXP_EXTRACT",
430                e.this,
431                e.expression,
432                e.args.get("position"),
433                e.args.get("occurrence"),
434            ),
435            exp.RegexpReplace: regexp_replace_sql,
436            exp.RegexpLike: rename_func("REGEXP_CONTAINS"),
437            exp.ReturnsProperty: _returnsproperty_sql,
438            exp.Select: transforms.preprocess(
439                [
440                    transforms.explode_to_unnest,
441                    _unqualify_unnest,
442                    transforms.eliminate_distinct_on,
443                    _alias_ordered_group,
444                ]
445            ),
446            exp.SHA2: lambda self, e: self.func(
447                f"SHA256" if e.text("length") == "256" else "SHA512", e.this
448            ),
449            exp.StabilityProperty: lambda self, e: f"DETERMINISTIC"
450            if e.name == "IMMUTABLE"
451            else "NOT DETERMINISTIC",
452            exp.StrToDate: lambda self, e: f"PARSE_DATE({self.format_time(e)}, {self.sql(e, 'this')})",
453            exp.StrToTime: lambda self, e: self.func(
454                "PARSE_TIMESTAMP", self.format_time(e), e.this, e.args.get("zone")
455            ),
456            exp.TimeAdd: _date_add_sql("TIME", "ADD"),
457            exp.TimeSub: _date_add_sql("TIME", "SUB"),
458            exp.TimestampAdd: _date_add_sql("TIMESTAMP", "ADD"),
459            exp.TimestampSub: _date_add_sql("TIMESTAMP", "SUB"),
460            exp.TimeStrToTime: timestrtotime_sql,
461            exp.Trim: lambda self, e: self.func(f"TRIM", e.this, e.expression),
462            exp.TsOrDsAdd: _date_add_sql("DATE", "ADD"),
463            exp.TsOrDsToDate: ts_or_ds_to_date_sql("bigquery"),
464            exp.Unhex: rename_func("FROM_HEX"),
465            exp.Values: _derived_table_values_to_unnest,
466            exp.VariancePop: rename_func("VAR_POP"),
467        }
468
469        TYPE_MAPPING = {
470            **generator.Generator.TYPE_MAPPING,
471            exp.DataType.Type.BIGDECIMAL: "BIGNUMERIC",
472            exp.DataType.Type.BIGINT: "INT64",
473            exp.DataType.Type.BINARY: "BYTES",
474            exp.DataType.Type.BOOLEAN: "BOOL",
475            exp.DataType.Type.CHAR: "STRING",
476            exp.DataType.Type.DECIMAL: "NUMERIC",
477            exp.DataType.Type.DOUBLE: "FLOAT64",
478            exp.DataType.Type.FLOAT: "FLOAT64",
479            exp.DataType.Type.INT: "INT64",
480            exp.DataType.Type.NCHAR: "STRING",
481            exp.DataType.Type.NVARCHAR: "STRING",
482            exp.DataType.Type.SMALLINT: "INT64",
483            exp.DataType.Type.TEXT: "STRING",
484            exp.DataType.Type.TIMESTAMP: "DATETIME",
485            exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP",
486            exp.DataType.Type.TIMESTAMPLTZ: "TIMESTAMP",
487            exp.DataType.Type.TINYINT: "INT64",
488            exp.DataType.Type.VARBINARY: "BYTES",
489            exp.DataType.Type.VARCHAR: "STRING",
490            exp.DataType.Type.VARIANT: "ANY TYPE",
491        }
492
493        PROPERTIES_LOCATION = {
494            **generator.Generator.PROPERTIES_LOCATION,
495            exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA,
496            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
497        }
498
499        # from: https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#reserved_keywords
500        RESERVED_KEYWORDS = {
501            *generator.Generator.RESERVED_KEYWORDS,
502            "all",
503            "and",
504            "any",
505            "array",
506            "as",
507            "asc",
508            "assert_rows_modified",
509            "at",
510            "between",
511            "by",
512            "case",
513            "cast",
514            "collate",
515            "contains",
516            "create",
517            "cross",
518            "cube",
519            "current",
520            "default",
521            "define",
522            "desc",
523            "distinct",
524            "else",
525            "end",
526            "enum",
527            "escape",
528            "except",
529            "exclude",
530            "exists",
531            "extract",
532            "false",
533            "fetch",
534            "following",
535            "for",
536            "from",
537            "full",
538            "group",
539            "grouping",
540            "groups",
541            "hash",
542            "having",
543            "if",
544            "ignore",
545            "in",
546            "inner",
547            "intersect",
548            "interval",
549            "into",
550            "is",
551            "join",
552            "lateral",
553            "left",
554            "like",
555            "limit",
556            "lookup",
557            "merge",
558            "natural",
559            "new",
560            "no",
561            "not",
562            "null",
563            "nulls",
564            "of",
565            "on",
566            "or",
567            "order",
568            "outer",
569            "over",
570            "partition",
571            "preceding",
572            "proto",
573            "qualify",
574            "range",
575            "recursive",
576            "respect",
577            "right",
578            "rollup",
579            "rows",
580            "select",
581            "set",
582            "some",
583            "struct",
584            "tablesample",
585            "then",
586            "to",
587            "treat",
588            "true",
589            "unbounded",
590            "union",
591            "unnest",
592            "using",
593            "when",
594            "where",
595            "window",
596            "with",
597            "within",
598        }
599
600        def attimezone_sql(self, expression: exp.AtTimeZone) -> str:
601            parent = expression.parent
602
603            # BigQuery allows CAST(.. AS {STRING|TIMESTAMP} [FORMAT <fmt> [AT TIME ZONE <tz>]]).
604            # Only the TIMESTAMP one should use the below conversion, when AT TIME ZONE is included.
605            if not isinstance(parent, exp.Cast) or not parent.to.is_type("text"):
606                return self.func(
607                    "TIMESTAMP", self.func("DATETIME", expression.this, expression.args.get("zone"))
608                )
609
610            return super().attimezone_sql(expression)
611
612        def trycast_sql(self, expression: exp.TryCast) -> str:
613            return self.cast_sql(expression, safe_prefix="SAFE_")
614
615        def cte_sql(self, expression: exp.CTE) -> str:
616            if expression.alias_column_names:
617                self.unsupported("Column names in CTE definition are not supported.")
618            return super().cte_sql(expression)
619
620        def array_sql(self, expression: exp.Array) -> str:
621            first_arg = seq_get(expression.expressions, 0)
622            if isinstance(first_arg, exp.Subqueryable):
623                return f"ARRAY{self.wrap(self.sql(first_arg))}"
624
625            return inline_array_sql(self, expression)
626
627        def transaction_sql(self, *_) -> str:
628            return "BEGIN TRANSACTION"
629
630        def commit_sql(self, *_) -> str:
631            return "COMMIT TRANSACTION"
632
633        def rollback_sql(self, *_) -> str:
634            return "ROLLBACK TRANSACTION"
635
636        def in_unnest_op(self, expression: exp.Unnest) -> str:
637            return self.sql(expression)
638
639        def except_op(self, expression: exp.Except) -> str:
640            if not expression.args.get("distinct", False):
641                self.unsupported("EXCEPT without DISTINCT is not supported in BigQuery")
642            return f"EXCEPT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}"
643
644        def intersect_op(self, expression: exp.Intersect) -> str:
645            if not expression.args.get("distinct", False):
646                self.unsupported("INTERSECT without DISTINCT is not supported in BigQuery")
647            return f"INTERSECT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}"
648
649        def with_properties(self, properties: exp.Properties) -> str:
650            return self.properties(properties, prefix=self.seg("OPTIONS"))
651
652        def version_sql(self, expression: exp.Version) -> str:
653            if expression.name == "TIMESTAMP":
654                expression = expression.copy()
655                expression.set("this", "SYSTEM_TIME")
656            return super().version_sql(expression)
UNNEST_COLUMN_ONLY = True
RESOLVES_IDENTIFIERS_AS_UPPERCASE: Optional[bool] = None
NORMALIZE_FUNCTIONS: bool | str = False
TIME_MAPPING: Dict[str, str] = {'%D': '%m/%d/%y'}
FORMAT_MAPPING: Dict[str, str] = {'DD': '%d', 'MM': '%m', 'MON': '%b', 'MONTH': '%B', 'YYYY': '%Y', 'YY': '%y', 'HH': '%I', 'HH12': '%I', 'HH24': '%H', 'MI': '%M', 'SS': '%S', 'SSSSS': '%f', 'TZH': '%z'}
PSEUDOCOLUMNS: Set[str] = {'_PARTITIONDATE', '_PARTITIONTIME'}
@classmethod
def normalize_identifier(cls, expression: ~E) -> ~E:
222    @classmethod
223    def normalize_identifier(cls, expression: E) -> E:
224        # In BigQuery, CTEs aren't case-sensitive, but table names are (by default, at least).
225        # The following check is essentially a heuristic to detect tables based on whether or
226        # not they're qualified.
227        if isinstance(expression, exp.Identifier):
228            parent = expression.parent
229
230            while isinstance(parent, exp.Dot):
231                parent = parent.parent
232
233            if (
234                not isinstance(parent, exp.UserDefinedFunction)
235                and not (isinstance(parent, exp.Table) and parent.db)
236                and not expression.meta.get("is_table")
237            ):
238                expression.set("this", expression.this.lower())
239
240        return expression

Normalizes an unquoted identifier to either lower or upper case, thus essentially making it case-insensitive. If a dialect treats all identifiers as case-insensitive, they will be normalized regardless of being quoted or not.

tokenizer_class = <class 'sqlglot.dialects.bigquery.BigQuery.Tokenizer'>
generator_class = <class 'sqlglot.dialects.bigquery.BigQuery.Generator'>
TIME_TRIE: Dict = {'%': {'D': {0: True}}}
FORMAT_TRIE: Dict = {'D': {'D': {0: True}}, 'M': {'M': {0: True}, 'O': {'N': {0: True, 'T': {'H': {0: True}}}}, 'I': {0: True}}, 'Y': {'Y': {'Y': {'Y': {0: True}}, 0: True}}, 'H': {'H': {0: True, '1': {'2': {0: True}}, '2': {'4': {0: True}}}}, 'S': {'S': {0: True, 'S': {'S': {'S': {0: True}}}}}, 'T': {'Z': {'H': {0: True}}}}
INVERSE_TIME_MAPPING: Dict[str, str] = {'%m/%d/%y': '%D'}
INVERSE_TIME_TRIE: Dict = {'%': {'m': {'/': {'%': {'d': {'/': {'%': {'y': {0: True}}}}}}}}}
QUOTE_START = "'"
QUOTE_END = "'"
IDENTIFIER_START = '`'
IDENTIFIER_END = '`'
BIT_START = None
BIT_END = None
HEX_START = '0x'
HEX_END = ''
BYTE_START = "b'"
BYTE_END = "'"
class BigQuery.Tokenizer(sqlglot.tokens.Tokenizer):
242    class Tokenizer(tokens.Tokenizer):
243        QUOTES = ["'", '"', '"""', "'''"]
244        COMMENTS = ["--", "#", ("/*", "*/")]
245        IDENTIFIERS = ["`"]
246        STRING_ESCAPES = ["\\"]
247
248        HEX_STRINGS = [("0x", ""), ("0X", "")]
249
250        BYTE_STRINGS = [
251            (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("b", "B")
252        ]
253
254        RAW_STRINGS = [
255            (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("r", "R")
256        ]
257
258        KEYWORDS = {
259            **tokens.Tokenizer.KEYWORDS,
260            "ANY TYPE": TokenType.VARIANT,
261            "BEGIN": TokenType.COMMAND,
262            "BEGIN TRANSACTION": TokenType.BEGIN,
263            "CURRENT_DATETIME": TokenType.CURRENT_DATETIME,
264            "BYTES": TokenType.BINARY,
265            "DECLARE": TokenType.COMMAND,
266            "FLOAT64": TokenType.DOUBLE,
267            "INT64": TokenType.BIGINT,
268            "RECORD": TokenType.STRUCT,
269            "TIMESTAMP": TokenType.TIMESTAMPTZ,
270            "NOT DETERMINISTIC": TokenType.VOLATILE,
271            "UNKNOWN": TokenType.NULL,
272            "FOR SYSTEM_TIME": TokenType.TIMESTAMP_SNAPSHOT,
273        }
274        KEYWORDS.pop("DIV")
QUOTES = ["'", '"', '"""', "'''"]
COMMENTS = ['--', '#', ('/*', '*/')]
IDENTIFIERS = ['`']
STRING_ESCAPES = ['\\']
HEX_STRINGS = [('0x', ''), ('0X', '')]
BYTE_STRINGS = [("b'", "'"), ("B'", "'"), ('b"', '"'), ('B"', '"'), ('b"""', '"""'), ('B"""', '"""'), ("b'''", "'''"), ("B'''", "'''")]
RAW_STRINGS = [("r'", "'"), ("R'", "'"), ('r"', '"'), ('R"', '"'), ('r"""', '"""'), ('R"""', '"""'), ("r'''", "'''"), ("R'''", "'''")]
KEYWORDS = {'{%': <TokenType.BLOCK_START: 'BLOCK_START'>, '{%+': <TokenType.BLOCK_START: 'BLOCK_START'>, '{%-': <TokenType.BLOCK_START: 'BLOCK_START'>, '%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '+%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '-%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '{{+': <TokenType.BLOCK_START: 'BLOCK_START'>, '{{-': <TokenType.BLOCK_START: 'BLOCK_START'>, '+}}': <TokenType.BLOCK_END: 'BLOCK_END'>, '-}}': <TokenType.BLOCK_END: 'BLOCK_END'>, '/*+': <TokenType.HINT: 'HINT'>, '==': <TokenType.EQ: 'EQ'>, '::': <TokenType.DCOLON: 'DCOLON'>, '||': <TokenType.DPIPE: 'DPIPE'>, '>=': <TokenType.GTE: 'GTE'>, '<=': <TokenType.LTE: 'LTE'>, '<>': <TokenType.NEQ: 'NEQ'>, '!=': <TokenType.NEQ: 'NEQ'>, '<=>': <TokenType.NULLSAFE_EQ: 'NULLSAFE_EQ'>, '->': <TokenType.ARROW: 'ARROW'>, '->>': <TokenType.DARROW: 'DARROW'>, '=>': <TokenType.FARROW: 'FARROW'>, '#>': <TokenType.HASH_ARROW: 'HASH_ARROW'>, '#>>': <TokenType.DHASH_ARROW: 'DHASH_ARROW'>, '<->': <TokenType.LR_ARROW: 'LR_ARROW'>, '&&': <TokenType.DAMP: 'DAMP'>, '??': <TokenType.DQMARK: 'DQMARK'>, 'ALL': <TokenType.ALL: 'ALL'>, 'ALWAYS': <TokenType.ALWAYS: 'ALWAYS'>, 'AND': <TokenType.AND: 'AND'>, 'ANTI': <TokenType.ANTI: 'ANTI'>, 'ANY': <TokenType.ANY: 'ANY'>, 'ASC': <TokenType.ASC: 'ASC'>, 'AS': <TokenType.ALIAS: 'ALIAS'>, 'ASOF': <TokenType.ASOF: 'ASOF'>, 'AUTOINCREMENT': <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, 'AUTO_INCREMENT': <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, 'BEGIN': <TokenType.COMMAND: 'COMMAND'>, 'BETWEEN': <TokenType.BETWEEN: 'BETWEEN'>, 'CACHE': <TokenType.CACHE: 'CACHE'>, 'UNCACHE': <TokenType.UNCACHE: 'UNCACHE'>, 'CASE': <TokenType.CASE: 'CASE'>, 'CHARACTER SET': <TokenType.CHARACTER_SET: 'CHARACTER_SET'>, 'CLUSTER BY': <TokenType.CLUSTER_BY: 'CLUSTER_BY'>, 'COLLATE': <TokenType.COLLATE: 'COLLATE'>, 'COLUMN': <TokenType.COLUMN: 'COLUMN'>, 'COMMIT': <TokenType.COMMIT: 'COMMIT'>, 'CONNECT BY': <TokenType.CONNECT_BY: 'CONNECT_BY'>, 'CONSTRAINT': <TokenType.CONSTRAINT: 'CONSTRAINT'>, 'CREATE': <TokenType.CREATE: 'CREATE'>, 'CROSS': <TokenType.CROSS: 'CROSS'>, 'CUBE': <TokenType.CUBE: 'CUBE'>, 'CURRENT_DATE': <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, 'CURRENT_TIME': <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, 'CURRENT_TIMESTAMP': <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, 'CURRENT_USER': <TokenType.CURRENT_USER: 'CURRENT_USER'>, 'DATABASE': <TokenType.DATABASE: 'DATABASE'>, 'DEFAULT': <TokenType.DEFAULT: 'DEFAULT'>, 'DELETE': <TokenType.DELETE: 'DELETE'>, 'DESC': <TokenType.DESC: 'DESC'>, 'DESCRIBE': <TokenType.DESCRIBE: 'DESCRIBE'>, 'DISTINCT': <TokenType.DISTINCT: 'DISTINCT'>, 'DISTRIBUTE BY': <TokenType.DISTRIBUTE_BY: 'DISTRIBUTE_BY'>, 'DROP': <TokenType.DROP: 'DROP'>, 'ELSE': <TokenType.ELSE: 'ELSE'>, 'END': <TokenType.END: 'END'>, 'ESCAPE': <TokenType.ESCAPE: 'ESCAPE'>, 'EXCEPT': <TokenType.EXCEPT: 'EXCEPT'>, 'EXECUTE': <TokenType.EXECUTE: 'EXECUTE'>, 'EXISTS': <TokenType.EXISTS: 'EXISTS'>, 'FALSE': <TokenType.FALSE: 'FALSE'>, 'FETCH': <TokenType.FETCH: 'FETCH'>, 'FILTER': <TokenType.FILTER: 'FILTER'>, 'FIRST': <TokenType.FIRST: 'FIRST'>, 'FULL': <TokenType.FULL: 'FULL'>, 'FUNCTION': <TokenType.FUNCTION: 'FUNCTION'>, 'FOR': <TokenType.FOR: 'FOR'>, 'FOREIGN KEY': <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, 'FORMAT': <TokenType.FORMAT: 'FORMAT'>, 'FROM': <TokenType.FROM: 'FROM'>, 'GEOGRAPHY': <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, 'GEOMETRY': <TokenType.GEOMETRY: 'GEOMETRY'>, 'GLOB': <TokenType.GLOB: 'GLOB'>, 'GROUP BY': <TokenType.GROUP_BY: 'GROUP_BY'>, 'GROUPING SETS': <TokenType.GROUPING_SETS: 'GROUPING_SETS'>, 'HAVING': <TokenType.HAVING: 'HAVING'>, 'ILIKE': <TokenType.ILIKE: 'ILIKE'>, 'IN': <TokenType.IN: 'IN'>, 'INDEX': <TokenType.INDEX: 'INDEX'>, 'INET': <TokenType.INET: 'INET'>, 'INNER': <TokenType.INNER: 'INNER'>, 'INSERT': <TokenType.INSERT: 'INSERT'>, 'INTERVAL': <TokenType.INTERVAL: 'INTERVAL'>, 'INTERSECT': <TokenType.INTERSECT: 'INTERSECT'>, 'INTO': <TokenType.INTO: 'INTO'>, 'IS': <TokenType.IS: 'IS'>, 'ISNULL': <TokenType.ISNULL: 'ISNULL'>, 'JOIN': <TokenType.JOIN: 'JOIN'>, 'KEEP': <TokenType.KEEP: 'KEEP'>, 'LATERAL': <TokenType.LATERAL: 'LATERAL'>, 'LEFT': <TokenType.LEFT: 'LEFT'>, 'LIKE': <TokenType.LIKE: 'LIKE'>, 'LIMIT': <TokenType.LIMIT: 'LIMIT'>, 'LOAD': <TokenType.LOAD: 'LOAD'>, 'LOCK': <TokenType.LOCK: 'LOCK'>, 'MERGE': <TokenType.MERGE: 'MERGE'>, 'NATURAL': <TokenType.NATURAL: 'NATURAL'>, 'NEXT': <TokenType.NEXT: 'NEXT'>, 'NOT': <TokenType.NOT: 'NOT'>, 'NOTNULL': <TokenType.NOTNULL: 'NOTNULL'>, 'NULL': <TokenType.NULL: 'NULL'>, 'OBJECT': <TokenType.OBJECT: 'OBJECT'>, 'OFFSET': <TokenType.OFFSET: 'OFFSET'>, 'ON': <TokenType.ON: 'ON'>, 'OR': <TokenType.OR: 'OR'>, 'XOR': <TokenType.XOR: 'XOR'>, 'ORDER BY': <TokenType.ORDER_BY: 'ORDER_BY'>, 'ORDINALITY': <TokenType.ORDINALITY: 'ORDINALITY'>, 'OUTER': <TokenType.OUTER: 'OUTER'>, 'OVER': <TokenType.OVER: 'OVER'>, 'OVERLAPS': <TokenType.OVERLAPS: 'OVERLAPS'>, 'OVERWRITE': <TokenType.OVERWRITE: 'OVERWRITE'>, 'PARTITION': <TokenType.PARTITION: 'PARTITION'>, 'PARTITION BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PARTITIONED BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PARTITIONED_BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PERCENT': <TokenType.PERCENT: 'PERCENT'>, 'PIVOT': <TokenType.PIVOT: 'PIVOT'>, 'PRAGMA': <TokenType.PRAGMA: 'PRAGMA'>, 'PRIMARY KEY': <TokenType.PRIMARY_KEY: 'PRIMARY_KEY'>, 'PROCEDURE': <TokenType.PROCEDURE: 'PROCEDURE'>, 'QUALIFY': <TokenType.QUALIFY: 'QUALIFY'>, 'RANGE': <TokenType.RANGE: 'RANGE'>, 'RECURSIVE': <TokenType.RECURSIVE: 'RECURSIVE'>, 'REGEXP': <TokenType.RLIKE: 'RLIKE'>, 'REPLACE': <TokenType.REPLACE: 'REPLACE'>, 'RETURNING': <TokenType.RETURNING: 'RETURNING'>, 'REFERENCES': <TokenType.REFERENCES: 'REFERENCES'>, 'RIGHT': <TokenType.RIGHT: 'RIGHT'>, 'RLIKE': <TokenType.RLIKE: 'RLIKE'>, 'ROLLBACK': <TokenType.ROLLBACK: 'ROLLBACK'>, 'ROLLUP': <TokenType.ROLLUP: 'ROLLUP'>, 'ROW': <TokenType.ROW: 'ROW'>, 'ROWS': <TokenType.ROWS: 'ROWS'>, 'SCHEMA': <TokenType.SCHEMA: 'SCHEMA'>, 'SELECT': <TokenType.SELECT: 'SELECT'>, 'SEMI': <TokenType.SEMI: 'SEMI'>, 'SET': <TokenType.SET: 'SET'>, 'SETTINGS': <TokenType.SETTINGS: 'SETTINGS'>, 'SHOW': <TokenType.SHOW: 'SHOW'>, 'SIMILAR TO': <TokenType.SIMILAR_TO: 'SIMILAR_TO'>, 'SOME': <TokenType.SOME: 'SOME'>, 'SORT BY': <TokenType.SORT_BY: 'SORT_BY'>, 'START WITH': <TokenType.START_WITH: 'START_WITH'>, 'TABLE': <TokenType.TABLE: 'TABLE'>, 'TABLESAMPLE': <TokenType.TABLE_SAMPLE: 'TABLE_SAMPLE'>, 'TEMP': <TokenType.TEMPORARY: 'TEMPORARY'>, 'TEMPORARY': <TokenType.TEMPORARY: 'TEMPORARY'>, 'THEN': <TokenType.THEN: 'THEN'>, 'TRUE': <TokenType.TRUE: 'TRUE'>, 'UNION': <TokenType.UNION: 'UNION'>, 'UNKNOWN': <TokenType.NULL: 'NULL'>, 'UNNEST': <TokenType.UNNEST: 'UNNEST'>, 'UNPIVOT': <TokenType.UNPIVOT: 'UNPIVOT'>, 'UPDATE': <TokenType.UPDATE: 'UPDATE'>, 'USE': <TokenType.USE: 'USE'>, 'USING': <TokenType.USING: 'USING'>, 'UUID': <TokenType.UUID: 'UUID'>, 'VALUES': <TokenType.VALUES: 'VALUES'>, 'VIEW': <TokenType.VIEW: 'VIEW'>, 'VOLATILE': <TokenType.VOLATILE: 'VOLATILE'>, 'WHEN': <TokenType.WHEN: 'WHEN'>, 'WHERE': <TokenType.WHERE: 'WHERE'>, 'WINDOW': <TokenType.WINDOW: 'WINDOW'>, 'WITH': <TokenType.WITH: 'WITH'>, 'APPLY': <TokenType.APPLY: 'APPLY'>, 'ARRAY': <TokenType.ARRAY: 'ARRAY'>, 'BIT': <TokenType.BIT: 'BIT'>, 'BOOL': <TokenType.BOOLEAN: 'BOOLEAN'>, 'BOOLEAN': <TokenType.BOOLEAN: 'BOOLEAN'>, 'BYTE': <TokenType.TINYINT: 'TINYINT'>, 'TINYINT': <TokenType.TINYINT: 'TINYINT'>, 'SHORT': <TokenType.SMALLINT: 'SMALLINT'>, 'SMALLINT': <TokenType.SMALLINT: 'SMALLINT'>, 'INT128': <TokenType.INT128: 'INT128'>, 'INT2': <TokenType.SMALLINT: 'SMALLINT'>, 'INTEGER': <TokenType.INT: 'INT'>, 'INT': <TokenType.INT: 'INT'>, 'INT4': <TokenType.INT: 'INT'>, 'LONG': <TokenType.BIGINT: 'BIGINT'>, 'BIGINT': <TokenType.BIGINT: 'BIGINT'>, 'INT8': <TokenType.BIGINT: 'BIGINT'>, 'DEC': <TokenType.DECIMAL: 'DECIMAL'>, 'DECIMAL': <TokenType.DECIMAL: 'DECIMAL'>, 'BIGDECIMAL': <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, 'BIGNUMERIC': <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, 'MAP': <TokenType.MAP: 'MAP'>, 'NULLABLE': <TokenType.NULLABLE: 'NULLABLE'>, 'NUMBER': <TokenType.DECIMAL: 'DECIMAL'>, 'NUMERIC': <TokenType.DECIMAL: 'DECIMAL'>, 'FIXED': <TokenType.DECIMAL: 'DECIMAL'>, 'REAL': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT4': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT8': <TokenType.DOUBLE: 'DOUBLE'>, 'DOUBLE': <TokenType.DOUBLE: 'DOUBLE'>, 'DOUBLE PRECISION': <TokenType.DOUBLE: 'DOUBLE'>, 'JSON': <TokenType.JSON: 'JSON'>, 'CHAR': <TokenType.CHAR: 'CHAR'>, 'CHARACTER': <TokenType.CHAR: 'CHAR'>, 'NCHAR': <TokenType.NCHAR: 'NCHAR'>, 'VARCHAR': <TokenType.VARCHAR: 'VARCHAR'>, 'VARCHAR2': <TokenType.VARCHAR: 'VARCHAR'>, 'NVARCHAR': <TokenType.NVARCHAR: 'NVARCHAR'>, 'NVARCHAR2': <TokenType.NVARCHAR: 'NVARCHAR'>, 'STR': <TokenType.TEXT: 'TEXT'>, 'STRING': <TokenType.TEXT: 'TEXT'>, 'TEXT': <TokenType.TEXT: 'TEXT'>, 'CLOB': <TokenType.TEXT: 'TEXT'>, 'LONGVARCHAR': <TokenType.TEXT: 'TEXT'>, 'BINARY': <TokenType.BINARY: 'BINARY'>, 'BLOB': <TokenType.VARBINARY: 'VARBINARY'>, 'BYTEA': <TokenType.VARBINARY: 'VARBINARY'>, 'VARBINARY': <TokenType.VARBINARY: 'VARBINARY'>, 'TIME': <TokenType.TIME: 'TIME'>, 'TIMETZ': <TokenType.TIMETZ: 'TIMETZ'>, 'TIMESTAMP': <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, 'TIMESTAMPTZ': <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, 'TIMESTAMPLTZ': <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, 'DATE': <TokenType.DATE: 'DATE'>, 'DATETIME': <TokenType.DATETIME: 'DATETIME'>, 'INT4RANGE': <TokenType.INT4RANGE: 'INT4RANGE'>, 'INT4MULTIRANGE': <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, 'INT8RANGE': <TokenType.INT8RANGE: 'INT8RANGE'>, 'INT8MULTIRANGE': <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, 'NUMRANGE': <TokenType.NUMRANGE: 'NUMRANGE'>, 'NUMMULTIRANGE': <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, 'TSRANGE': <TokenType.TSRANGE: 'TSRANGE'>, 'TSMULTIRANGE': <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, 'TSTZRANGE': <TokenType.TSTZRANGE: 'TSTZRANGE'>, 'TSTZMULTIRANGE': <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, 'DATERANGE': <TokenType.DATERANGE: 'DATERANGE'>, 'DATEMULTIRANGE': <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, 'UNIQUE': <TokenType.UNIQUE: 'UNIQUE'>, 'STRUCT': <TokenType.STRUCT: 'STRUCT'>, 'VARIANT': <TokenType.VARIANT: 'VARIANT'>, 'ALTER': <TokenType.ALTER: 'ALTER'>, 'ANALYZE': <TokenType.COMMAND: 'COMMAND'>, 'CALL': <TokenType.COMMAND: 'COMMAND'>, 'COMMENT': <TokenType.COMMENT: 'COMMENT'>, 'COPY': <TokenType.COMMAND: 'COMMAND'>, 'EXPLAIN': <TokenType.COMMAND: 'COMMAND'>, 'GRANT': <TokenType.COMMAND: 'COMMAND'>, 'OPTIMIZE': <TokenType.COMMAND: 'COMMAND'>, 'PREPARE': <TokenType.COMMAND: 'COMMAND'>, 'TRUNCATE': <TokenType.COMMAND: 'COMMAND'>, 'VACUUM': <TokenType.COMMAND: 'COMMAND'>, 'USER-DEFINED': <TokenType.USERDEFINED: 'USERDEFINED'>, 'FOR VERSION': <TokenType.VERSION_SNAPSHOT: 'VERSION_SNAPSHOT'>, 'FOR TIMESTAMP': <TokenType.TIMESTAMP_SNAPSHOT: 'TIMESTAMP_SNAPSHOT'>, 'ANY TYPE': <TokenType.VARIANT: 'VARIANT'>, 'BEGIN TRANSACTION': <TokenType.BEGIN: 'BEGIN'>, 'CURRENT_DATETIME': <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, 'BYTES': <TokenType.BINARY: 'BINARY'>, 'DECLARE': <TokenType.COMMAND: 'COMMAND'>, 'FLOAT64': <TokenType.DOUBLE: 'DOUBLE'>, 'INT64': <TokenType.BIGINT: 'BIGINT'>, 'RECORD': <TokenType.STRUCT: 'STRUCT'>, 'NOT DETERMINISTIC': <TokenType.VOLATILE: 'VOLATILE'>, 'FOR SYSTEM_TIME': <TokenType.TIMESTAMP_SNAPSHOT: 'TIMESTAMP_SNAPSHOT'>}
class BigQuery.Parser(sqlglot.parser.Parser):
276    class Parser(parser.Parser):
277        PREFIXED_PIVOT_COLUMNS = True
278
279        LOG_BASE_FIRST = False
280        LOG_DEFAULTS_TO_LN = True
281
282        SUPPORTS_USER_DEFINED_TYPES = False
283
284        FUNCTIONS = {
285            **parser.Parser.FUNCTIONS,
286            "DATE": _parse_date,
287            "DATE_ADD": parse_date_delta_with_interval(exp.DateAdd),
288            "DATE_SUB": parse_date_delta_with_interval(exp.DateSub),
289            "DATE_TRUNC": lambda args: exp.DateTrunc(
290                unit=exp.Literal.string(str(seq_get(args, 1))),
291                this=seq_get(args, 0),
292            ),
293            "DATETIME_ADD": parse_date_delta_with_interval(exp.DatetimeAdd),
294            "DATETIME_SUB": parse_date_delta_with_interval(exp.DatetimeSub),
295            "DIV": binary_from_function(exp.IntDiv),
296            "GENERATE_ARRAY": exp.GenerateSeries.from_arg_list,
297            "MD5": exp.MD5Digest.from_arg_list,
298            "TO_HEX": _parse_to_hex,
299            "PARSE_DATE": lambda args: format_time_lambda(exp.StrToDate, "bigquery")(
300                [seq_get(args, 1), seq_get(args, 0)]
301            ),
302            "PARSE_TIMESTAMP": _parse_timestamp,
303            "REGEXP_CONTAINS": exp.RegexpLike.from_arg_list,
304            "REGEXP_EXTRACT": lambda args: exp.RegexpExtract(
305                this=seq_get(args, 0),
306                expression=seq_get(args, 1),
307                position=seq_get(args, 2),
308                occurrence=seq_get(args, 3),
309                group=exp.Literal.number(1)
310                if re.compile(str(seq_get(args, 1))).groups == 1
311                else None,
312            ),
313            "SHA256": lambda args: exp.SHA2(this=seq_get(args, 0), length=exp.Literal.number(256)),
314            "SHA512": lambda args: exp.SHA2(this=seq_get(args, 0), length=exp.Literal.number(512)),
315            "SPLIT": lambda args: exp.Split(
316                # https://cloud.google.com/bigquery/docs/reference/standard-sql/string_functions#split
317                this=seq_get(args, 0),
318                expression=seq_get(args, 1) or exp.Literal.string(","),
319            ),
320            "TIME_ADD": parse_date_delta_with_interval(exp.TimeAdd),
321            "TIME_SUB": parse_date_delta_with_interval(exp.TimeSub),
322            "TIMESTAMP_ADD": parse_date_delta_with_interval(exp.TimestampAdd),
323            "TIMESTAMP_SUB": parse_date_delta_with_interval(exp.TimestampSub),
324            "TO_JSON_STRING": exp.JSONFormat.from_arg_list,
325        }
326
327        FUNCTION_PARSERS = {
328            **parser.Parser.FUNCTION_PARSERS,
329            "ARRAY": lambda self: self.expression(exp.Array, expressions=[self._parse_statement()]),
330        }
331        FUNCTION_PARSERS.pop("TRIM")
332
333        NO_PAREN_FUNCTIONS = {
334            **parser.Parser.NO_PAREN_FUNCTIONS,
335            TokenType.CURRENT_DATETIME: exp.CurrentDatetime,
336        }
337
338        NESTED_TYPE_TOKENS = {
339            *parser.Parser.NESTED_TYPE_TOKENS,
340            TokenType.TABLE,
341        }
342
343        ID_VAR_TOKENS = {
344            *parser.Parser.ID_VAR_TOKENS,
345            TokenType.VALUES,
346        }
347
348        PROPERTY_PARSERS = {
349            **parser.Parser.PROPERTY_PARSERS,
350            "NOT DETERMINISTIC": lambda self: self.expression(
351                exp.StabilityProperty, this=exp.Literal.string("VOLATILE")
352            ),
353            "OPTIONS": lambda self: self._parse_with_property(),
354        }
355
356        CONSTRAINT_PARSERS = {
357            **parser.Parser.CONSTRAINT_PARSERS,
358            "OPTIONS": lambda self: exp.Properties(expressions=self._parse_with_property()),
359        }
360
361        def _parse_table_part(self, schema: bool = False) -> t.Optional[exp.Expression]:
362            this = super()._parse_table_part(schema=schema)
363
364            # https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#table_names
365            if isinstance(this, exp.Identifier):
366                table_name = this.name
367                while self._match(TokenType.DASH, advance=False) and self._next:
368                    self._advance(2)
369                    table_name += f"-{self._prev.text}"
370
371                this = exp.Identifier(this=table_name, quoted=this.args.get("quoted"))
372
373            return this
374
375        def _parse_table_parts(self, schema: bool = False) -> exp.Table:
376            table = super()._parse_table_parts(schema=schema)
377            if isinstance(table.this, exp.Identifier) and "." in table.name:
378                catalog, db, this, *rest = (
379                    t.cast(t.Optional[exp.Expression], exp.to_identifier(x))
380                    for x in split_num_words(table.name, ".", 3)
381                )
382
383                if rest and this:
384                    this = exp.Dot.build(t.cast(t.List[exp.Expression], [this, *rest]))
385
386                table = exp.Table(this=this, db=db, catalog=catalog)
387
388            return table

Parser consumes a list of tokens produced by the Tokenizer and produces a parsed syntax tree.

Arguments:
  • error_level: The desired error level. Default: ErrorLevel.IMMEDIATE
  • error_message_context: Determines the amount of context to capture from a query string when displaying the error message (in number of characters). Default: 100
  • max_errors: Maximum number of error messages to include in a raised ParseError. This is only relevant if error_level is ErrorLevel.RAISE. Default: 3
PREFIXED_PIVOT_COLUMNS = True
LOG_BASE_FIRST = False
LOG_DEFAULTS_TO_LN = True
SUPPORTS_USER_DEFINED_TYPES = False
FUNCTIONS = {'ABS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Abs'>>, 'ANY_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.AnyValue'>>, 'APPROX_DISTINCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxDistinct'>>, 'APPROX_COUNT_DISTINCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxDistinct'>>, 'APPROX_QUANTILE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxQuantile'>>, 'ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Array'>>, 'ARRAY_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAgg'>>, 'ARRAY_ALL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAll'>>, 'ARRAY_ANY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAny'>>, 'ARRAY_CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayConcat'>>, 'ARRAY_CAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayConcat'>>, 'ARRAY_CONTAINS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayContains'>>, 'FILTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayFilter'>>, 'ARRAY_FILTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayFilter'>>, 'ARRAY_JOIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayJoin'>>, 'ARRAY_SIZE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySize'>>, 'ARRAY_SORT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySort'>>, 'ARRAY_SUM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySum'>>, 'ARRAY_UNION_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayUnionAgg'>>, 'AVG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Avg'>>, 'CASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Case'>>, 'CAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Cast'>>, 'CAST_TO_STR_TYPE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CastToStrType'>>, 'CEIL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ceil'>>, 'CEILING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ceil'>>, 'COALESCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Coalesce'>>, 'IFNULL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Coalesce'>>, 'NVL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Coalesce'>>, 'CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Concat'>>, 'CONCAT_WS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ConcatWs'>>, 'COUNT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Count'>>, 'COUNT_IF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CountIf'>>, 'CURRENT_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentDate'>>, 'CURRENT_DATETIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentDatetime'>>, 'CURRENT_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentTime'>>, 'CURRENT_TIMESTAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentTimestamp'>>, 'CURRENT_USER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentUser'>>, 'DATE': <function _parse_date>, 'DATE_ADD': <function parse_date_delta_with_interval.<locals>.func>, 'DATEDIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateDiff'>>, 'DATE_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateDiff'>>, 'DATEFROMPARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateFromParts'>>, 'DATE_STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateStrToDate'>>, 'DATE_SUB': <function parse_date_delta_with_interval.<locals>.func>, 'DATE_TO_DATE_STR': <function Parser.<lambda>>, 'DATE_TO_DI': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateToDi'>>, 'DATE_TRUNC': <function BigQuery.Parser.<lambda>>, 'DATETIME_ADD': <function parse_date_delta_with_interval.<locals>.func>, 'DATETIME_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeDiff'>>, 'DATETIME_SUB': <function parse_date_delta_with_interval.<locals>.func>, 'DATETIME_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeTrunc'>>, 'DAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Day'>>, 'DAY_OF_MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfMonth'>>, 'DAYOFMONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfMonth'>>, 'DAY_OF_WEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeek'>>, 'DAYOFWEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeek'>>, 'DAY_OF_YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfYear'>>, 'DAYOFYEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfYear'>>, 'DECODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Decode'>>, 'DI_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DiToDate'>>, 'ENCODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Encode'>>, 'EXP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Exp'>>, 'EXPLODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Explode'>>, 'EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Extract'>>, 'FIRST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.First'>>, 'FLOOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Floor'>>, 'FROM_BASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FromBase'>>, 'FROM_BASE64': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FromBase64'>>, 'GENERATE_SERIES': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GenerateSeries'>>, 'GREATEST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Greatest'>>, 'GROUP_CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GroupConcat'>>, 'HEX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Hex'>>, 'HLL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Hll'>>, 'IF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.If'>>, 'INITCAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Initcap'>>, 'IS_NAN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsNan'>>, 'ISNAN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsNan'>>, 'JSON_ARRAY_CONTAINS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArrayContains'>>, 'JSONB_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBExtract'>>, 'JSONB_EXTRACT_SCALAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBExtractScalar'>>, 'JSON_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONExtract'>>, 'JSON_EXTRACT_SCALAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONExtractScalar'>>, 'JSON_FORMAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONFormat'>>, 'J_S_O_N_OBJECT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONObject'>>, 'LAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Last'>>, 'LAST_DATE_OF_MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LastDateOfMonth'>>, 'LEAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Least'>>, 'LEFT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Left'>>, 'LENGTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Length'>>, 'LEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Length'>>, 'LEVENSHTEIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Levenshtein'>>, 'LN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ln'>>, 'LOG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Log'>>, 'LOG10': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Log10'>>, 'LOG2': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Log2'>>, 'LOGICAL_AND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'BOOL_AND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'BOOLAND_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'LOGICAL_OR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'BOOL_OR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'BOOLOR_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'LOWER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lower'>>, 'LCASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lower'>>, 'MD5': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MD5Digest'>>, 'MD5_DIGEST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MD5Digest'>>, 'MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Map'>>, 'MAP_FROM_ENTRIES': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MapFromEntries'>>, 'MATCH_AGAINST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MatchAgainst'>>, 'MAX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Max'>>, 'MIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Min'>>, 'MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Month'>>, 'MONTHS_BETWEEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MonthsBetween'>>, 'NEXT_VALUE_FOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.NextValueFor'>>, 'NUMBER_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.NumberToStr'>>, 'NVL2': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Nvl2'>>, 'OPEN_J_S_O_N': <bound method Func.from_arg_list of <class 'sqlglot.expressions.OpenJSON'>>, 'PARAMETERIZED_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ParameterizedAgg'>>, 'PERCENTILE_CONT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PercentileCont'>>, 'PERCENTILE_DISC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PercentileDisc'>>, 'POSEXPLODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Posexplode'>>, 'POWER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Pow'>>, 'POW': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Pow'>>, 'QUANTILE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Quantile'>>, 'RANGE_N': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RangeN'>>, 'READ_CSV': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ReadCSV'>>, 'REDUCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Reduce'>>, 'REGEXP_EXTRACT': <function BigQuery.Parser.<lambda>>, 'REGEXP_I_LIKE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpILike'>>, 'REGEXP_LIKE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpLike'>>, 'REGEXP_REPLACE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpReplace'>>, 'REGEXP_SPLIT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpSplit'>>, 'REPEAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Repeat'>>, 'RIGHT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Right'>>, 'ROUND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Round'>>, 'ROW_NUMBER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RowNumber'>>, 'SHA': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA'>>, 'SHA1': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA'>>, 'SHA2': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA2'>>, 'SAFE_CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SafeConcat'>>, 'SAFE_DIVIDE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SafeDivide'>>, 'SET_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SetAgg'>>, 'SORT_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SortArray'>>, 'SPLIT': <function BigQuery.Parser.<lambda>>, 'SQRT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sqrt'>>, 'STANDARD_HASH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StandardHash'>>, 'STAR_MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StarMap'>>, 'STARTS_WITH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StartsWith'>>, 'STARTSWITH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StartsWith'>>, 'STDDEV': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stddev'>>, 'STDDEV_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StddevPop'>>, 'STDDEV_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StddevSamp'>>, 'STR_POSITION': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrPosition'>>, 'STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToDate'>>, 'STR_TO_MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToMap'>>, 'STR_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToTime'>>, 'STR_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToUnix'>>, 'STRUCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Struct'>>, 'STRUCT_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StructExtract'>>, 'STUFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stuff'>>, 'INSERT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stuff'>>, 'SUBSTRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Substring'>>, 'SUM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sum'>>, 'TIME_ADD': <function parse_date_delta_with_interval.<locals>.func>, 'TIME_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeDiff'>>, 'TIME_STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToDate'>>, 'TIME_STR_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToTime'>>, 'TIME_STR_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToUnix'>>, 'TIME_SUB': <function parse_date_delta_with_interval.<locals>.func>, 'TIME_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeToStr'>>, 'TIME_TO_TIME_STR': <function Parser.<lambda>>, 'TIME_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeToUnix'>>, 'TIME_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeTrunc'>>, 'TIMESTAMP_ADD': <function parse_date_delta_with_interval.<locals>.func>, 'TIMESTAMP_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampDiff'>>, 'TIMESTAMP_SUB': <function parse_date_delta_with_interval.<locals>.func>, 'TIMESTAMP_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampTrunc'>>, 'TO_BASE64': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToBase64'>>, 'TO_CHAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToChar'>>, 'TRANSFORM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Transform'>>, 'TRIM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Trim'>>, 'TRY_CAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TryCast'>>, 'TS_OR_DI_TO_DI': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDiToDi'>>, 'TS_OR_DS_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsAdd'>>, 'TS_OR_DS_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsToDate'>>, 'TS_OR_DS_TO_DATE_STR': <function Parser.<lambda>>, 'UNHEX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Unhex'>>, 'UNIX_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToStr'>>, 'UNIX_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToTime'>>, 'UNIX_TO_TIME_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToTimeStr'>>, 'UPPER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Upper'>>, 'UCASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Upper'>>, 'VAR_MAP': <function parse_var_map>, 'VARIANCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VARIANCE_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VAR_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VARIANCE_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.VariancePop'>>, 'VAR_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.VariancePop'>>, 'WEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Week'>>, 'WEEK_OF_YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.WeekOfYear'>>, 'WEEKOFYEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.WeekOfYear'>>, 'WHEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.When'>>, 'X_M_L_TABLE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.XMLTable'>>, 'XOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Xor'>>, 'YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Year'>>, 'GLOB': <function Parser.<lambda>>, 'LIKE': <function parse_like>, 'DIV': <function binary_from_function.<locals>.<lambda>>, 'GENERATE_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GenerateSeries'>>, 'TO_HEX': <function _parse_to_hex>, 'PARSE_DATE': <function BigQuery.Parser.<lambda>>, 'PARSE_TIMESTAMP': <function _parse_timestamp>, 'REGEXP_CONTAINS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpLike'>>, 'SHA256': <function BigQuery.Parser.<lambda>>, 'SHA512': <function BigQuery.Parser.<lambda>>, 'TO_JSON_STRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONFormat'>>}
FUNCTION_PARSERS = {'ANY_VALUE': <function Parser.<lambda>>, 'CAST': <function Parser.<lambda>>, 'CONCAT': <function Parser.<lambda>>, 'CONVERT': <function Parser.<lambda>>, 'DECODE': <function Parser.<lambda>>, 'EXTRACT': <function Parser.<lambda>>, 'JSON_OBJECT': <function Parser.<lambda>>, 'LOG': <function Parser.<lambda>>, 'MATCH': <function Parser.<lambda>>, 'OPENJSON': <function Parser.<lambda>>, 'POSITION': <function Parser.<lambda>>, 'SAFE_CAST': <function Parser.<lambda>>, 'STRING_AGG': <function Parser.<lambda>>, 'SUBSTRING': <function Parser.<lambda>>, 'TRY_CAST': <function Parser.<lambda>>, 'TRY_CONVERT': <function Parser.<lambda>>, 'ARRAY': <function BigQuery.Parser.<lambda>>}
NO_PAREN_FUNCTIONS = {<TokenType.CURRENT_DATE: 'CURRENT_DATE'>: <class 'sqlglot.expressions.CurrentDate'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>: <class 'sqlglot.expressions.CurrentDatetime'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>: <class 'sqlglot.expressions.CurrentTime'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>: <class 'sqlglot.expressions.CurrentTimestamp'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>: <class 'sqlglot.expressions.CurrentUser'>}
NESTED_TYPE_TOKENS = {<TokenType.STRUCT: 'STRUCT'>, <TokenType.TABLE: 'TABLE'>, <TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.MAP: 'MAP'>, <TokenType.NESTED: 'NESTED'>, <TokenType.NULLABLE: 'NULLABLE'>}
ID_VAR_TOKENS = {<TokenType.END: 'END'>, <TokenType.NUMRANGE: 'NUMRANGE'>, <TokenType.SUPER: 'SUPER'>, <TokenType.UINT: 'UINT'>, <TokenType.RIGHT: 'RIGHT'>, <TokenType.SET: 'SET'>, <TokenType.UNIQUE: 'UNIQUE'>, <TokenType.SETTINGS: 'SETTINGS'>, <TokenType.UNIQUEIDENTIFIER: 'UNIQUEIDENTIFIER'>, <TokenType.JSONB: 'JSONB'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.VARCHAR: 'VARCHAR'>, <TokenType.OBJECT: 'OBJECT'>, <TokenType.DELETE: 'DELETE'>, <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, <TokenType.BOOLEAN: 'BOOLEAN'>, <TokenType.HLLSKETCH: 'HLLSKETCH'>, <TokenType.FILTER: 'FILTER'>, <TokenType.UUID: 'UUID'>, <TokenType.FLOAT: 'FLOAT'>, <TokenType.SMALLSERIAL: 'SMALLSERIAL'>, <TokenType.PROCEDURE: 'PROCEDURE'>, <TokenType.VARIANT: 'VARIANT'>, <TokenType.JSON: 'JSON'>, <TokenType.UINT128: 'UINT128'>, <TokenType.DATABASE: 'DATABASE'>, <TokenType.MAP: 'MAP'>, <TokenType.NATURAL: 'NATURAL'>, <TokenType.FIRST: 'FIRST'>, <TokenType.ANY: 'ANY'>, <TokenType.COLLATE: 'COLLATE'>, <TokenType.REFERENCES: 'REFERENCES'>, <TokenType.SMALLINT: 'SMALLINT'>, <TokenType.USERDEFINED: 'USERDEFINED'>, <TokenType.IS: 'IS'>, <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, <TokenType.DIV: 'DIV'>, <TokenType.ORDINALITY: 'ORDINALITY'>, <TokenType.NVARCHAR: 'NVARCHAR'>, <TokenType.DEFAULT: 'DEFAULT'>, <TokenType.USMALLINT: 'USMALLINT'>, <TokenType.INT4RANGE: 'INT4RANGE'>, <TokenType.KEEP: 'KEEP'>, <TokenType.VALUES: 'VALUES'>, <TokenType.LOAD: 'LOAD'>, <TokenType.TSTZRANGE: 'TSTZRANGE'>, <TokenType.UINT256: 'UINT256'>, <TokenType.MERGE: 'MERGE'>, <TokenType.PERCENT: 'PERCENT'>, <TokenType.VOLATILE: 'VOLATILE'>, <TokenType.IPADDRESS: 'IPADDRESS'>, <TokenType.INT: 'INT'>, <TokenType.INT256: 'INT256'>, <TokenType.ALL: 'ALL'>, <TokenType.NULL: 'NULL'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.CONSTRAINT: 'CONSTRAINT'>, <TokenType.BIGSERIAL: 'BIGSERIAL'>, <TokenType.UBIGINT: 'UBIGINT'>, <TokenType.YEAR: 'YEAR'>, <TokenType.TOP: 'TOP'>, <TokenType.ENUM: 'ENUM'>, <TokenType.BIT: 'BIT'>, <TokenType.SEMI: 'SEMI'>, <TokenType.SMALLMONEY: 'SMALLMONEY'>, <TokenType.COMMIT: 'COMMIT'>, <TokenType.DESC: 'DESC'>, <TokenType.ROWVERSION: 'ROWVERSION'>, <TokenType.STRUCT: 'STRUCT'>, <TokenType.GEOMETRY: 'GEOMETRY'>, <TokenType.FUNCTION: 'FUNCTION'>, <TokenType.TIME: 'TIME'>, <TokenType.VIEW: 'VIEW'>, <TokenType.XML: 'XML'>, <TokenType.INT128: 'INT128'>, <TokenType.NESTED: 'NESTED'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.TSRANGE: 'TSRANGE'>, <TokenType.UNPIVOT: 'UNPIVOT'>, <TokenType.IMAGE: 'IMAGE'>, <TokenType.TINYINT: 'TINYINT'>, <TokenType.COLUMN: 'COLUMN'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.DATETIME64: 'DATETIME64'>, <TokenType.BIGINT: 'BIGINT'>, <TokenType.MONEY: 'MONEY'>, <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, <TokenType.DOUBLE: 'DOUBLE'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>, <TokenType.ASC: 'ASC'>, <TokenType.COMMENT: 'COMMENT'>, <TokenType.RANGE: 'RANGE'>, <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, <TokenType.WINDOW: 'WINDOW'>, <TokenType.ISNULL: 'ISNULL'>, <TokenType.SHOW: 'SHOW'>, <TokenType.INDEX: 'INDEX'>, <TokenType.SCHEMA: 'SCHEMA'>, <TokenType.DICTIONARY: 'DICTIONARY'>, <TokenType.DECIMAL: 'DECIMAL'>, <TokenType.FALSE: 'FALSE'>, <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, <TokenType.DESCRIBE: 'DESCRIBE'>, <TokenType.TEMPORARY: 'TEMPORARY'>, <TokenType.BINARY: 'BINARY'>, <TokenType.UPDATE: 'UPDATE'>, <TokenType.DATE: 'DATE'>, <TokenType.DATETIME: 'DATETIME'>, <TokenType.IPPREFIX: 'IPPREFIX'>, <TokenType.EXISTS: 'EXISTS'>, <TokenType.NCHAR: 'NCHAR'>, <TokenType.ESCAPE: 'ESCAPE'>, <TokenType.CASE: 'CASE'>, <TokenType.VAR: 'VAR'>, <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, <TokenType.UNKNOWN: 'UNKNOWN'>, <TokenType.APPLY: 'APPLY'>, <TokenType.INET: 'INET'>, <TokenType.INT8RANGE: 'INT8RANGE'>, <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, <TokenType.OVERWRITE: 'OVERWRITE'>, <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, <TokenType.TEXT: 'TEXT'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.TABLE: 'TABLE'>, <TokenType.ROW: 'ROW'>, <TokenType.PRAGMA: 'PRAGMA'>, <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>, <TokenType.CACHE: 'CACHE'>, <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, <TokenType.HSTORE: 'HSTORE'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.SOME: 'SOME'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, <TokenType.DATERANGE: 'DATERANGE'>, <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, <TokenType.ANTI: 'ANTI'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, <TokenType.SERIAL: 'SERIAL'>, <TokenType.FIXEDSTRING: 'FIXEDSTRING'>, <TokenType.NEXT: 'NEXT'>, <TokenType.CHAR: 'CHAR'>, <TokenType.VARBINARY: 'VARBINARY'>, <TokenType.TIMETZ: 'TIMETZ'>, <TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>, <TokenType.BEGIN: 'BEGIN'>, <TokenType.FULL: 'FULL'>, <TokenType.PARTITION: 'PARTITION'>, <TokenType.ROWS: 'ROWS'>, <TokenType.UTINYINT: 'UTINYINT'>, <TokenType.ENUM16: 'ENUM16'>, <TokenType.LONGBLOB: 'LONGBLOB'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, <TokenType.MEDIUMINT: 'MEDIUMINT'>, <TokenType.PIVOT: 'PIVOT'>, <TokenType.FORMAT: 'FORMAT'>, <TokenType.ENUM8: 'ENUM8'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.LEFT: 'LEFT'>, <TokenType.OFFSET: 'OFFSET'>, <TokenType.LONGTEXT: 'LONGTEXT'>, <TokenType.INTERVAL: 'INTERVAL'>, <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, <TokenType.TRUE: 'TRUE'>}
PROPERTY_PARSERS = {'ALGORITHM': <function Parser.<lambda>>, 'AUTO_INCREMENT': <function Parser.<lambda>>, 'BLOCKCOMPRESSION': <function Parser.<lambda>>, 'CHARACTER SET': <function Parser.<lambda>>, 'CHECKSUM': <function Parser.<lambda>>, 'CLUSTER BY': <function Parser.<lambda>>, 'CLUSTERED': <function Parser.<lambda>>, 'COLLATE': <function Parser.<lambda>>, 'COMMENT': <function Parser.<lambda>>, 'COPY': <function Parser.<lambda>>, 'DATABLOCKSIZE': <function Parser.<lambda>>, 'DEFINER': <function Parser.<lambda>>, 'DETERMINISTIC': <function Parser.<lambda>>, 'DISTKEY': <function Parser.<lambda>>, 'DISTSTYLE': <function Parser.<lambda>>, 'ENGINE': <function Parser.<lambda>>, 'EXECUTE': <function Parser.<lambda>>, 'EXTERNAL': <function Parser.<lambda>>, 'FALLBACK': <function Parser.<lambda>>, 'FORMAT': <function Parser.<lambda>>, 'FREESPACE': <function Parser.<lambda>>, 'HEAP': <function Parser.<lambda>>, 'IMMUTABLE': <function Parser.<lambda>>, 'JOURNAL': <function Parser.<lambda>>, 'LANGUAGE': <function Parser.<lambda>>, 'LAYOUT': <function Parser.<lambda>>, 'LIFETIME': <function Parser.<lambda>>, 'LIKE': <function Parser.<lambda>>, 'LOCATION': <function Parser.<lambda>>, 'LOCK': <function Parser.<lambda>>, 'LOCKING': <function Parser.<lambda>>, 'LOG': <function Parser.<lambda>>, 'MATERIALIZED': <function Parser.<lambda>>, 'MERGEBLOCKRATIO': <function Parser.<lambda>>, 'MULTISET': <function Parser.<lambda>>, 'NO': <function Parser.<lambda>>, 'ON': <function Parser.<lambda>>, 'ORDER BY': <function Parser.<lambda>>, 'PARTITION BY': <function Parser.<lambda>>, 'PARTITIONED BY': <function Parser.<lambda>>, 'PARTITIONED_BY': <function Parser.<lambda>>, 'PRIMARY KEY': <function Parser.<lambda>>, 'RANGE': <function Parser.<lambda>>, 'RETURNS': <function Parser.<lambda>>, 'ROW': <function Parser.<lambda>>, 'ROW_FORMAT': <function Parser.<lambda>>, 'SET': <function Parser.<lambda>>, 'SETTINGS': <function Parser.<lambda>>, 'SORTKEY': <function Parser.<lambda>>, 'SOURCE': <function Parser.<lambda>>, 'STABLE': <function Parser.<lambda>>, 'STORED': <function Parser.<lambda>>, 'TBLPROPERTIES': <function Parser.<lambda>>, 'TEMP': <function Parser.<lambda>>, 'TEMPORARY': <function Parser.<lambda>>, 'TO': <function Parser.<lambda>>, 'TRANSIENT': <function Parser.<lambda>>, 'TTL': <function Parser.<lambda>>, 'USING': <function Parser.<lambda>>, 'VOLATILE': <function Parser.<lambda>>, 'WITH': <function Parser.<lambda>>, 'NOT DETERMINISTIC': <function BigQuery.Parser.<lambda>>, 'OPTIONS': <function BigQuery.Parser.<lambda>>}
CONSTRAINT_PARSERS = {'AUTOINCREMENT': <function Parser.<lambda>>, 'AUTO_INCREMENT': <function Parser.<lambda>>, 'CASESPECIFIC': <function Parser.<lambda>>, 'CHARACTER SET': <function Parser.<lambda>>, 'CHECK': <function Parser.<lambda>>, 'COLLATE': <function Parser.<lambda>>, 'COMMENT': <function Parser.<lambda>>, 'COMPRESS': <function Parser.<lambda>>, 'CLUSTERED': <function Parser.<lambda>>, 'NONCLUSTERED': <function Parser.<lambda>>, 'DEFAULT': <function Parser.<lambda>>, 'ENCODE': <function Parser.<lambda>>, 'FOREIGN KEY': <function Parser.<lambda>>, 'FORMAT': <function Parser.<lambda>>, 'GENERATED': <function Parser.<lambda>>, 'IDENTITY': <function Parser.<lambda>>, 'INLINE': <function Parser.<lambda>>, 'LIKE': <function Parser.<lambda>>, 'NOT': <function Parser.<lambda>>, 'NULL': <function Parser.<lambda>>, 'ON': <function Parser.<lambda>>, 'PATH': <function Parser.<lambda>>, 'PRIMARY KEY': <function Parser.<lambda>>, 'REFERENCES': <function Parser.<lambda>>, 'TITLE': <function Parser.<lambda>>, 'TTL': <function Parser.<lambda>>, 'UNIQUE': <function Parser.<lambda>>, 'UPPERCASE': <function Parser.<lambda>>, 'WITH': <function Parser.<lambda>>, 'OPTIONS': <function BigQuery.Parser.<lambda>>}
UNNEST_COLUMN_ONLY: bool = True
NORMALIZE_FUNCTIONS = False
SHOW_TRIE: Dict = {}
SET_TRIE: Dict = {'GLOBAL': {0: True}, 'LOCAL': {0: True}, 'SESSION': {0: True}, 'TRANSACTION': {0: True}}
FORMAT_MAPPING: Dict[str, str] = {'DD': '%d', 'MM': '%m', 'MON': '%b', 'MONTH': '%B', 'YYYY': '%Y', 'YY': '%y', 'HH': '%I', 'HH12': '%I', 'HH24': '%H', 'MI': '%M', 'SS': '%S', 'SSSSS': '%f', 'TZH': '%z'}
FORMAT_TRIE: Dict = {'D': {'D': {0: True}}, 'M': {'M': {0: True}, 'O': {'N': {0: True, 'T': {'H': {0: True}}}}, 'I': {0: True}}, 'Y': {'Y': {'Y': {'Y': {0: True}}, 0: True}}, 'H': {'H': {0: True, '1': {'2': {0: True}}, '2': {'4': {0: True}}}}, 'S': {'S': {0: True, 'S': {'S': {'S': {0: True}}}}}, 'T': {'Z': {'H': {0: True}}}}
TIME_MAPPING: Dict[str, str] = {'%D': '%m/%d/%y'}
TIME_TRIE: Dict = {'%': {'D': {0: True}}}
class BigQuery.Generator(sqlglot.generator.Generator):
390    class Generator(generator.Generator):
391        EXPLICIT_UNION = True
392        INTERVAL_ALLOWS_PLURAL_FORM = False
393        JOIN_HINTS = False
394        QUERY_HINTS = False
395        TABLE_HINTS = False
396        LIMIT_FETCH = "LIMIT"
397        RENAME_TABLE_WITH_DB = False
398        ESCAPE_LINE_BREAK = True
399        NVL2_SUPPORTED = False
400
401        TRANSFORMS = {
402            **generator.Generator.TRANSFORMS,
403            exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"),
404            exp.ArraySize: rename_func("ARRAY_LENGTH"),
405            exp.Cast: transforms.preprocess([transforms.remove_precision_parameterized_types]),
406            exp.Create: _create_sql,
407            exp.CTE: transforms.preprocess([_pushdown_cte_column_names]),
408            exp.DateAdd: _date_add_sql("DATE", "ADD"),
409            exp.DateDiff: lambda self, e: f"DATE_DIFF({self.sql(e, 'this')}, {self.sql(e, 'expression')}, {self.sql(e.args.get('unit', 'DAY'))})",
410            exp.DateFromParts: rename_func("DATE"),
411            exp.DateStrToDate: datestrtodate_sql,
412            exp.DateSub: _date_add_sql("DATE", "SUB"),
413            exp.DatetimeAdd: _date_add_sql("DATETIME", "ADD"),
414            exp.DatetimeSub: _date_add_sql("DATETIME", "SUB"),
415            exp.DateTrunc: lambda self, e: self.func("DATE_TRUNC", e.this, e.text("unit")),
416            exp.GenerateSeries: rename_func("GENERATE_ARRAY"),
417            exp.GroupConcat: rename_func("STRING_AGG"),
418            exp.Hex: rename_func("TO_HEX"),
419            exp.ILike: no_ilike_sql,
420            exp.IntDiv: rename_func("DIV"),
421            exp.JSONFormat: rename_func("TO_JSON_STRING"),
422            exp.JSONKeyValue: json_keyvalue_comma_sql,
423            exp.Max: max_or_greatest,
424            exp.MD5: lambda self, e: self.func("TO_HEX", self.func("MD5", e.this)),
425            exp.MD5Digest: rename_func("MD5"),
426            exp.Min: min_or_least,
427            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
428            exp.RegexpExtract: lambda self, e: self.func(
429                "REGEXP_EXTRACT",
430                e.this,
431                e.expression,
432                e.args.get("position"),
433                e.args.get("occurrence"),
434            ),
435            exp.RegexpReplace: regexp_replace_sql,
436            exp.RegexpLike: rename_func("REGEXP_CONTAINS"),
437            exp.ReturnsProperty: _returnsproperty_sql,
438            exp.Select: transforms.preprocess(
439                [
440                    transforms.explode_to_unnest,
441                    _unqualify_unnest,
442                    transforms.eliminate_distinct_on,
443                    _alias_ordered_group,
444                ]
445            ),
446            exp.SHA2: lambda self, e: self.func(
447                f"SHA256" if e.text("length") == "256" else "SHA512", e.this
448            ),
449            exp.StabilityProperty: lambda self, e: f"DETERMINISTIC"
450            if e.name == "IMMUTABLE"
451            else "NOT DETERMINISTIC",
452            exp.StrToDate: lambda self, e: f"PARSE_DATE({self.format_time(e)}, {self.sql(e, 'this')})",
453            exp.StrToTime: lambda self, e: self.func(
454                "PARSE_TIMESTAMP", self.format_time(e), e.this, e.args.get("zone")
455            ),
456            exp.TimeAdd: _date_add_sql("TIME", "ADD"),
457            exp.TimeSub: _date_add_sql("TIME", "SUB"),
458            exp.TimestampAdd: _date_add_sql("TIMESTAMP", "ADD"),
459            exp.TimestampSub: _date_add_sql("TIMESTAMP", "SUB"),
460            exp.TimeStrToTime: timestrtotime_sql,
461            exp.Trim: lambda self, e: self.func(f"TRIM", e.this, e.expression),
462            exp.TsOrDsAdd: _date_add_sql("DATE", "ADD"),
463            exp.TsOrDsToDate: ts_or_ds_to_date_sql("bigquery"),
464            exp.Unhex: rename_func("FROM_HEX"),
465            exp.Values: _derived_table_values_to_unnest,
466            exp.VariancePop: rename_func("VAR_POP"),
467        }
468
469        TYPE_MAPPING = {
470            **generator.Generator.TYPE_MAPPING,
471            exp.DataType.Type.BIGDECIMAL: "BIGNUMERIC",
472            exp.DataType.Type.BIGINT: "INT64",
473            exp.DataType.Type.BINARY: "BYTES",
474            exp.DataType.Type.BOOLEAN: "BOOL",
475            exp.DataType.Type.CHAR: "STRING",
476            exp.DataType.Type.DECIMAL: "NUMERIC",
477            exp.DataType.Type.DOUBLE: "FLOAT64",
478            exp.DataType.Type.FLOAT: "FLOAT64",
479            exp.DataType.Type.INT: "INT64",
480            exp.DataType.Type.NCHAR: "STRING",
481            exp.DataType.Type.NVARCHAR: "STRING",
482            exp.DataType.Type.SMALLINT: "INT64",
483            exp.DataType.Type.TEXT: "STRING",
484            exp.DataType.Type.TIMESTAMP: "DATETIME",
485            exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP",
486            exp.DataType.Type.TIMESTAMPLTZ: "TIMESTAMP",
487            exp.DataType.Type.TINYINT: "INT64",
488            exp.DataType.Type.VARBINARY: "BYTES",
489            exp.DataType.Type.VARCHAR: "STRING",
490            exp.DataType.Type.VARIANT: "ANY TYPE",
491        }
492
493        PROPERTIES_LOCATION = {
494            **generator.Generator.PROPERTIES_LOCATION,
495            exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA,
496            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
497        }
498
499        # from: https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#reserved_keywords
500        RESERVED_KEYWORDS = {
501            *generator.Generator.RESERVED_KEYWORDS,
502            "all",
503            "and",
504            "any",
505            "array",
506            "as",
507            "asc",
508            "assert_rows_modified",
509            "at",
510            "between",
511            "by",
512            "case",
513            "cast",
514            "collate",
515            "contains",
516            "create",
517            "cross",
518            "cube",
519            "current",
520            "default",
521            "define",
522            "desc",
523            "distinct",
524            "else",
525            "end",
526            "enum",
527            "escape",
528            "except",
529            "exclude",
530            "exists",
531            "extract",
532            "false",
533            "fetch",
534            "following",
535            "for",
536            "from",
537            "full",
538            "group",
539            "grouping",
540            "groups",
541            "hash",
542            "having",
543            "if",
544            "ignore",
545            "in",
546            "inner",
547            "intersect",
548            "interval",
549            "into",
550            "is",
551            "join",
552            "lateral",
553            "left",
554            "like",
555            "limit",
556            "lookup",
557            "merge",
558            "natural",
559            "new",
560            "no",
561            "not",
562            "null",
563            "nulls",
564            "of",
565            "on",
566            "or",
567            "order",
568            "outer",
569            "over",
570            "partition",
571            "preceding",
572            "proto",
573            "qualify",
574            "range",
575            "recursive",
576            "respect",
577            "right",
578            "rollup",
579            "rows",
580            "select",
581            "set",
582            "some",
583            "struct",
584            "tablesample",
585            "then",
586            "to",
587            "treat",
588            "true",
589            "unbounded",
590            "union",
591            "unnest",
592            "using",
593            "when",
594            "where",
595            "window",
596            "with",
597            "within",
598        }
599
600        def attimezone_sql(self, expression: exp.AtTimeZone) -> str:
601            parent = expression.parent
602
603            # BigQuery allows CAST(.. AS {STRING|TIMESTAMP} [FORMAT <fmt> [AT TIME ZONE <tz>]]).
604            # Only the TIMESTAMP one should use the below conversion, when AT TIME ZONE is included.
605            if not isinstance(parent, exp.Cast) or not parent.to.is_type("text"):
606                return self.func(
607                    "TIMESTAMP", self.func("DATETIME", expression.this, expression.args.get("zone"))
608                )
609
610            return super().attimezone_sql(expression)
611
612        def trycast_sql(self, expression: exp.TryCast) -> str:
613            return self.cast_sql(expression, safe_prefix="SAFE_")
614
615        def cte_sql(self, expression: exp.CTE) -> str:
616            if expression.alias_column_names:
617                self.unsupported("Column names in CTE definition are not supported.")
618            return super().cte_sql(expression)
619
620        def array_sql(self, expression: exp.Array) -> str:
621            first_arg = seq_get(expression.expressions, 0)
622            if isinstance(first_arg, exp.Subqueryable):
623                return f"ARRAY{self.wrap(self.sql(first_arg))}"
624
625            return inline_array_sql(self, expression)
626
627        def transaction_sql(self, *_) -> str:
628            return "BEGIN TRANSACTION"
629
630        def commit_sql(self, *_) -> str:
631            return "COMMIT TRANSACTION"
632
633        def rollback_sql(self, *_) -> str:
634            return "ROLLBACK TRANSACTION"
635
636        def in_unnest_op(self, expression: exp.Unnest) -> str:
637            return self.sql(expression)
638
639        def except_op(self, expression: exp.Except) -> str:
640            if not expression.args.get("distinct", False):
641                self.unsupported("EXCEPT without DISTINCT is not supported in BigQuery")
642            return f"EXCEPT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}"
643
644        def intersect_op(self, expression: exp.Intersect) -> str:
645            if not expression.args.get("distinct", False):
646                self.unsupported("INTERSECT without DISTINCT is not supported in BigQuery")
647            return f"INTERSECT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}"
648
649        def with_properties(self, properties: exp.Properties) -> str:
650            return self.properties(properties, prefix=self.seg("OPTIONS"))
651
652        def version_sql(self, expression: exp.Version) -> str:
653            if expression.name == "TIMESTAMP":
654                expression = expression.copy()
655                expression.set("this", "SYSTEM_TIME")
656            return super().version_sql(expression)

Generator converts a given syntax tree to the corresponding SQL string.

Arguments:
  • pretty: Whether or not to format the produced SQL string. Default: False.
  • identify: Determines when an identifier should be quoted. Possible values are: False (default): Never quote, except in cases where it's mandatory by the dialect. True or 'always': Always quote. 'safe': Only quote identifiers that are case insensitive.
  • normalize: Whether or not to normalize identifiers to lowercase. Default: False.
  • pad: Determines the pad size in a formatted string. Default: 2.
  • indent: Determines the indentation size in a formatted string. Default: 2.
  • normalize_functions: Whether or not to normalize all function names. Possible values are: "upper" or True (default): Convert names to uppercase. "lower": Convert names to lowercase. False: Disables function name normalization.
  • unsupported_level: Determines the generator's behavior when it encounters unsupported expressions. Default ErrorLevel.WARN.
  • max_unsupported: Maximum number of unsupported messages to include in a raised UnsupportedError. This is only relevant if unsupported_level is ErrorLevel.RAISE. Default: 3
  • leading_comma: Determines whether or not the comma is leading or trailing in select expressions. This is only relevant when generating in pretty mode. Default: False
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode. The default is on the smaller end because the length only represents a segment and not the true line length. Default: 80
  • comments: Whether or not to preserve comments in the output SQL code. Default: True
EXPLICIT_UNION = True
INTERVAL_ALLOWS_PLURAL_FORM = False
JOIN_HINTS = False
QUERY_HINTS = False
TABLE_HINTS = False
LIMIT_FETCH = 'LIMIT'
RENAME_TABLE_WITH_DB = False
ESCAPE_LINE_BREAK = True
NVL2_SUPPORTED = False
TRANSFORMS = {<class 'sqlglot.expressions.DateAdd'>: <function _date_add_sql.<locals>.func>, <class 'sqlglot.expressions.TsOrDsAdd'>: <function _date_add_sql.<locals>.func>, <class 'sqlglot.expressions.CaseSpecificColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CharacterSetColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CharacterSetProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CheckColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ClusteredColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CollateColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CopyGrantsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CommentColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.DateFormatColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.DefaultColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.EncodeColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ExecuteAsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ExternalProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.HeapProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.InlineLengthColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.IntervalDayToSecondSpan'>: 'DAY TO SECOND', <class 'sqlglot.expressions.IntervalYearToMonthSpan'>: 'YEAR TO MONTH', <class 'sqlglot.expressions.LanguageProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.LocationProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.LogProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.MaterializedProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NoPrimaryIndexProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NonClusteredColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NotForReplicationColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnCommitProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnUpdateColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.PathColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ReturnsProperty'>: <function _returnsproperty_sql>, <class 'sqlglot.expressions.SetProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SettingsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SqlSecurityProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.StabilityProperty'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.TemporaryProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ToTableProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TransientProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TitleColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.UppercaseColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.VarMap'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.VolatileProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.WithJournalTableProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ApproxDistinct'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ArraySize'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Cast'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.Create'>: <function _create_sql>, <class 'sqlglot.expressions.CTE'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.DateDiff'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.DateFromParts'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.DateStrToDate'>: <function datestrtodate_sql>, <class 'sqlglot.expressions.DateSub'>: <function _date_add_sql.<locals>.func>, <class 'sqlglot.expressions.DatetimeAdd'>: <function _date_add_sql.<locals>.func>, <class 'sqlglot.expressions.DatetimeSub'>: <function _date_add_sql.<locals>.func>, <class 'sqlglot.expressions.DateTrunc'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.GenerateSeries'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.GroupConcat'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Hex'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ILike'>: <function no_ilike_sql>, <class 'sqlglot.expressions.IntDiv'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.JSONFormat'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.JSONKeyValue'>: <function json_keyvalue_comma_sql>, <class 'sqlglot.expressions.Max'>: <function max_or_greatest>, <class 'sqlglot.expressions.MD5'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.MD5Digest'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Min'>: <function min_or_least>, <class 'sqlglot.expressions.PartitionedByProperty'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.RegexpExtract'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.RegexpReplace'>: <function regexp_replace_sql>, <class 'sqlglot.expressions.RegexpLike'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Select'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.SHA2'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.StrToDate'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.StrToTime'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.TimeAdd'>: <function _date_add_sql.<locals>.func>, <class 'sqlglot.expressions.TimeSub'>: <function _date_add_sql.<locals>.func>, <class 'sqlglot.expressions.TimestampAdd'>: <function _date_add_sql.<locals>.func>, <class 'sqlglot.expressions.TimestampSub'>: <function _date_add_sql.<locals>.func>, <class 'sqlglot.expressions.TimeStrToTime'>: <function timestrtotime_sql>, <class 'sqlglot.expressions.Trim'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.TsOrDsToDate'>: <function ts_or_ds_to_date_sql.<locals>._ts_or_ds_to_date_sql>, <class 'sqlglot.expressions.Unhex'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Values'>: <function _derived_table_values_to_unnest>, <class 'sqlglot.expressions.VariancePop'>: <function rename_func.<locals>.<lambda>>}
TYPE_MAPPING = {<Type.NCHAR: 'NCHAR'>: 'STRING', <Type.NVARCHAR: 'NVARCHAR'>: 'STRING', <Type.MEDIUMTEXT: 'MEDIUMTEXT'>: 'TEXT', <Type.LONGTEXT: 'LONGTEXT'>: 'TEXT', <Type.MEDIUMBLOB: 'MEDIUMBLOB'>: 'BLOB', <Type.LONGBLOB: 'LONGBLOB'>: 'BLOB', <Type.INET: 'INET'>: 'INET', <Type.BIGDECIMAL: 'BIGDECIMAL'>: 'BIGNUMERIC', <Type.BIGINT: 'BIGINT'>: 'INT64', <Type.BINARY: 'BINARY'>: 'BYTES', <Type.BOOLEAN: 'BOOLEAN'>: 'BOOL', <Type.CHAR: 'CHAR'>: 'STRING', <Type.DECIMAL: 'DECIMAL'>: 'NUMERIC', <Type.DOUBLE: 'DOUBLE'>: 'FLOAT64', <Type.FLOAT: 'FLOAT'>: 'FLOAT64', <Type.INT: 'INT'>: 'INT64', <Type.SMALLINT: 'SMALLINT'>: 'INT64', <Type.TEXT: 'TEXT'>: 'STRING', <Type.TIMESTAMP: 'TIMESTAMP'>: 'DATETIME', <Type.TIMESTAMPTZ: 'TIMESTAMPTZ'>: 'TIMESTAMP', <Type.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>: 'TIMESTAMP', <Type.TINYINT: 'TINYINT'>: 'INT64', <Type.VARBINARY: 'VARBINARY'>: 'BYTES', <Type.VARCHAR: 'VARCHAR'>: 'STRING', <Type.VARIANT: 'VARIANT'>: 'ANY TYPE'}
PROPERTIES_LOCATION = {<class 'sqlglot.expressions.AlgorithmProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.AutoIncrementProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.BlockCompressionProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.CharacterSetProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ChecksumProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.CollateProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.CopyGrantsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Cluster'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ClusteredByProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DataBlocksizeProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.DefinerProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.DictRange'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DictProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DistKeyProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DistStyleProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.EngineProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ExecuteAsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ExternalProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.FallbackProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.FileFormatProperty'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.FreespaceProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.HeapProperty'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.IsolatedLoadingProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.JournalProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.LanguageProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LikeProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LocationProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LockingProperty'>: <Location.POST_ALIAS: 'POST_ALIAS'>, <class 'sqlglot.expressions.LogProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.MaterializedProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.MergeBlockRatioProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.NoPrimaryIndexProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.OnProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.OnCommitProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.Order'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.PartitionedByProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.PrimaryKey'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Property'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.ReturnsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatDelimitedProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatSerdeProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SchemaCommentProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SerdeProperties'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Set'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SettingsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SetProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.SortKeyProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SqlSecurityProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.StabilityProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.TemporaryProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.ToTableProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.TransientProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.MergeTreeTTL'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.VolatileProperty'>: <Location.UNSUPPORTED: 'UNSUPPORTED'>, <class 'sqlglot.expressions.WithDataProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.WithJournalTableProperty'>: <Location.POST_NAME: 'POST_NAME'>}
RESERVED_KEYWORDS = {'lateral', 'all', 'having', 'by', 'from', 'tablesample', 'true', 'within', 'collate', 'window', 'rows', 'unnest', 'preceding', 'between', 'then', 'at', 'lookup', 'left', 'cross', 'join', 'any', 'inner', 'when', 'array', 'natural', 'merge', 'treat', 'default', 'in', 'on', 'respect', 'where', 'range', 'fetch', 'enum', 'rollup', 'exists', 'limit', 'struct', 'over', 'nulls', 'select', 'for', 'qualify', 'groups', 'contains', 'intersect', 'right', 'using', 'cast', 'recursive', 'set', 'distinct', 'is', 'end', 'new', 'define', 'with', 'as', 'exclude', 'full', 'to', 'assert_rows_modified', 'order', 'no', 'false', 'group', 'into', 'null', 'escape', 'proto', 'create', 'outer', 'except', 'if', 'interval', 'grouping', 'or', 'following', 'like', 'current', 'desc', 'cube', 'partition', 'of', 'ignore', 'not', 'some', 'case', 'and', 'asc', 'hash', 'unbounded', 'union', 'extract', 'else'}
def attimezone_sql(self, expression: sqlglot.expressions.AtTimeZone) -> str:
600        def attimezone_sql(self, expression: exp.AtTimeZone) -> str:
601            parent = expression.parent
602
603            # BigQuery allows CAST(.. AS {STRING|TIMESTAMP} [FORMAT <fmt> [AT TIME ZONE <tz>]]).
604            # Only the TIMESTAMP one should use the below conversion, when AT TIME ZONE is included.
605            if not isinstance(parent, exp.Cast) or not parent.to.is_type("text"):
606                return self.func(
607                    "TIMESTAMP", self.func("DATETIME", expression.this, expression.args.get("zone"))
608                )
609
610            return super().attimezone_sql(expression)
def trycast_sql(self, expression: sqlglot.expressions.TryCast) -> str:
612        def trycast_sql(self, expression: exp.TryCast) -> str:
613            return self.cast_sql(expression, safe_prefix="SAFE_")
def cte_sql(self, expression: sqlglot.expressions.CTE) -> str:
615        def cte_sql(self, expression: exp.CTE) -> str:
616            if expression.alias_column_names:
617                self.unsupported("Column names in CTE definition are not supported.")
618            return super().cte_sql(expression)
def array_sql(self, expression: sqlglot.expressions.Array) -> str:
620        def array_sql(self, expression: exp.Array) -> str:
621            first_arg = seq_get(expression.expressions, 0)
622            if isinstance(first_arg, exp.Subqueryable):
623                return f"ARRAY{self.wrap(self.sql(first_arg))}"
624
625            return inline_array_sql(self, expression)
def transaction_sql(self, *_) -> str:
627        def transaction_sql(self, *_) -> str:
628            return "BEGIN TRANSACTION"
def commit_sql(self, *_) -> str:
630        def commit_sql(self, *_) -> str:
631            return "COMMIT TRANSACTION"
def rollback_sql(self, *_) -> str:
633        def rollback_sql(self, *_) -> str:
634            return "ROLLBACK TRANSACTION"
def in_unnest_op(self, expression: sqlglot.expressions.Unnest) -> str:
636        def in_unnest_op(self, expression: exp.Unnest) -> str:
637            return self.sql(expression)
def except_op(self, expression: sqlglot.expressions.Except) -> str:
639        def except_op(self, expression: exp.Except) -> str:
640            if not expression.args.get("distinct", False):
641                self.unsupported("EXCEPT without DISTINCT is not supported in BigQuery")
642            return f"EXCEPT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}"
def intersect_op(self, expression: sqlglot.expressions.Intersect) -> str:
644        def intersect_op(self, expression: exp.Intersect) -> str:
645            if not expression.args.get("distinct", False):
646                self.unsupported("INTERSECT without DISTINCT is not supported in BigQuery")
647            return f"INTERSECT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}"
def with_properties(self, properties: sqlglot.expressions.Properties) -> str:
649        def with_properties(self, properties: exp.Properties) -> str:
650            return self.properties(properties, prefix=self.seg("OPTIONS"))
def version_sql(self, expression: sqlglot.expressions.Version) -> str:
652        def version_sql(self, expression: exp.Version) -> str:
653            if expression.name == "TIMESTAMP":
654                expression = expression.copy()
655                expression.set("this", "SYSTEM_TIME")
656            return super().version_sql(expression)
INVERSE_TIME_MAPPING: Dict[str, str] = {'%m/%d/%y': '%D'}
INVERSE_TIME_TRIE: Dict = {'%': {'m': {'/': {'%': {'d': {'/': {'%': {'y': {0: True}}}}}}}}}
UNNEST_COLUMN_ONLY = True
NORMALIZE_FUNCTIONS: bool | str = False
@classmethod
def can_identify(text: str, identify: str | bool = 'safe') -> bool:
256    @classmethod
257    def can_identify(cls, text: str, identify: str | bool = "safe") -> bool:
258        """Checks if text can be identified given an identify option.
259
260        Args:
261            text: The text to check.
262            identify:
263                "always" or `True`: Always returns true.
264                "safe": True if the identifier is case-insensitive.
265
266        Returns:
267            Whether or not the given text can be identified.
268        """
269        if identify is True or identify == "always":
270            return True
271
272        if identify == "safe":
273            return not cls.case_sensitive(text)
274
275        return False

Checks if text can be identified given an identify option.

Arguments:
  • text: The text to check.
  • identify: "always" or True: Always returns true. "safe": True if the identifier is case-insensitive.
Returns:

Whether or not the given text can be identified.

QUOTE_START = "'"
QUOTE_END = "'"
IDENTIFIER_START = '`'
IDENTIFIER_END = '`'
TOKENIZER_CLASS = <class 'sqlglot.dialects.bigquery.BigQuery.Tokenizer'>
BIT_START: Optional[str] = None
BIT_END: Optional[str] = None
HEX_START: Optional[str] = '0x'
HEX_END: Optional[str] = ''
BYTE_START: Optional[str] = "b'"
BYTE_END: Optional[str] = "'"
Inherited Members
sqlglot.generator.Generator
Generator
NULL_ORDERING_SUPPORTED
LOCKING_READS_SUPPORTED
WRAP_DERIVED_VALUES
CREATE_FUNCTION_RETURN_AS
MATCHED_BY_SOURCE
SINGLE_STRING_INTERVAL
TABLESAMPLE_WITH_METHOD
TABLESAMPLE_SIZE_IS_PERCENT
GROUPINGS_SEP
INDEX_ON
QUERY_HINT_SEP
IS_BOOL_ALLOWED
DUPLICATE_KEY_UPDATE_WITH_SET
LIMIT_IS_TOP
RETURNING_END
COLUMN_JOIN_MARKS_SUPPORTED
EXTRACT_ALLOWS_QUOTES
TZ_TO_WITH_TIME_ZONE
SELECT_KINDS
VALUES_AS_TABLE
ALTER_TABLE_ADD_COLUMN_KEYWORD
STAR_MAPPING
TIME_PART_SINGULARS
TOKEN_MAPPING
STRUCT_DELIMITER
PARAMETER_TOKEN
WITH_SEPARATED_COMMENTS
UNWRAPPED_INTERVAL_VALUES
SENTINEL_LINE_BREAK
INDEX_OFFSET
ALIAS_POST_TABLESAMPLE
IDENTIFIERS_CAN_START_WITH_DIGIT
STRICT_STRING_CONCAT
NULL_ORDERING
pretty
identify
normalize
pad
unsupported_level
max_unsupported
leading_comma
max_text_width
comments
normalize_functions
unsupported_messages
generate
unsupported
sep
seg
pad_comment
maybe_comment
wrap
no_identify
normalize_func
indent
sql
uncache_sql
cache_sql
characterset_sql
column_sql
columnposition_sql
columndef_sql
columnconstraint_sql
computedcolumnconstraint_sql
autoincrementcolumnconstraint_sql
compresscolumnconstraint_sql
generatedasidentitycolumnconstraint_sql
notnullcolumnconstraint_sql
primarykeycolumnconstraint_sql
uniquecolumnconstraint_sql
createable_sql
create_sql
clone_sql
describe_sql
prepend_ctes
with_sql
tablealias_sql
bitstring_sql
hexstring_sql
bytestring_sql
rawstring_sql
datatypeparam_sql
datatype_sql
directory_sql
delete_sql
drop_sql
except_sql
fetch_sql
filter_sql
hint_sql
index_sql
identifier_sql
inputoutputformat_sql
national_sql
partition_sql
properties_sql
root_properties
properties
locate_properties
property_sql
likeproperty_sql
fallbackproperty_sql
journalproperty_sql
freespaceproperty_sql
checksumproperty_sql
mergeblockratioproperty_sql
datablocksizeproperty_sql
blockcompressionproperty_sql
isolatedloadingproperty_sql
lockingproperty_sql
withdataproperty_sql
insert_sql
intersect_sql
introducer_sql
pseudotype_sql
onconflict_sql
returning_sql
rowformatdelimitedproperty_sql
withtablehint_sql
indextablehint_sql
table_sql
tablesample_sql
pivot_sql
tuple_sql
update_sql
values_sql
var_sql
into_sql
from_sql
group_sql
having_sql
connect_sql
prior_sql
join_sql
lambda_sql
lateral_sql
limit_sql
offset_sql
setitem_sql
set_sql
pragma_sql
lock_sql
literal_sql
escape_str
loaddata_sql
null_sql
boolean_sql
order_sql
cluster_sql
distribute_sql
sort_sql
ordered_sql
matchrecognize_sql
query_modifiers
offset_limit_modifiers
after_having_modifiers
after_limit_modifiers
select_sql
schema_sql
schema_columns_sql
star_sql
parameter_sql
sessionparameter_sql
placeholder_sql
subquery_sql
qualify_sql
union_sql
union_op
unnest_sql
where_sql
window_sql
partition_by_sql
windowspec_sql
withingroup_sql
between_sql
bracket_sql
safebracket_sql
all_sql
any_sql
exists_sql
case_sql
constraint_sql
nextvaluefor_sql
extract_sql
trim_sql
safeconcat_sql
check_sql
foreignkey_sql
primarykey_sql
if_sql
matchagainst_sql
jsonkeyvalue_sql
jsonobject_sql
openjsoncolumndef_sql
openjson_sql
in_sql
interval_sql
return_sql
reference_sql
anonymous_sql
paren_sql
neg_sql
not_sql
alias_sql
aliases_sql
add_sql
and_sql
xor_sql
connector_sql
bitwiseand_sql
bitwiseleftshift_sql
bitwisenot_sql
bitwiseor_sql
bitwiserightshift_sql
bitwisexor_sql
cast_sql
currentdate_sql
collate_sql
command_sql
comment_sql
mergetreettlaction_sql
mergetreettl_sql
altercolumn_sql
renametable_sql
altertable_sql
droppartition_sql
addconstraint_sql
distinct_sql
ignorenulls_sql
respectnulls_sql
intdiv_sql
dpipe_sql
safedpipe_sql
div_sql
overlaps_sql
distance_sql
dot_sql
eq_sql
escape_sql
glob_sql
gt_sql
gte_sql
ilike_sql
ilikeany_sql
is_sql
like_sql
likeany_sql
similarto_sql
lt_sql
lte_sql
mod_sql
mul_sql
neq_sql
nullsafeeq_sql
nullsafeneq_sql
or_sql
slice_sql
sub_sql
use_sql
binary
function_fallback_sql
func
format_args
text_width
format_time
expressions
op_expressions
naked_property
set_operation
tag_sql
token_sql
userdefinedfunction_sql
joinhint_sql
kwarg_sql
when_sql
merge_sql
tochar_sql
dictproperty_sql
dictrange_sql
dictsubproperty_sql
oncluster_sql
clusteredbyproperty_sql
anyvalue_sql
querytransform_sql
indexconstraintoption_sql
indexcolumnconstraint_sql
nvl2_sql
comprehension_sql