Edit on GitHub

sqlglot.dialects.bigquery

  1from __future__ import annotations
  2
  3import logging
  4import re
  5import typing as t
  6
  7from sqlglot import exp, generator, parser, tokens, transforms
  8from sqlglot._typing import E
  9from sqlglot.dialects.dialect import (
 10    Dialect,
 11    binary_from_function,
 12    datestrtodate_sql,
 13    format_time_lambda,
 14    inline_array_sql,
 15    json_keyvalue_comma_sql,
 16    max_or_greatest,
 17    min_or_least,
 18    no_ilike_sql,
 19    parse_date_delta_with_interval,
 20    regexp_replace_sql,
 21    rename_func,
 22    timestrtotime_sql,
 23    ts_or_ds_to_date_sql,
 24)
 25from sqlglot.helper import seq_get, split_num_words
 26from sqlglot.tokens import TokenType
 27
 28logger = logging.getLogger("sqlglot")
 29
 30
 31def _date_add_sql(
 32    data_type: str, kind: str
 33) -> t.Callable[[BigQuery.Generator, exp.Expression], str]:
 34    def func(self: BigQuery.Generator, expression: exp.Expression) -> str:
 35        this = self.sql(expression, "this")
 36        unit = expression.args.get("unit")
 37        unit = exp.var(unit.name.upper() if unit else "DAY")
 38        interval = exp.Interval(this=expression.expression.copy(), unit=unit)
 39        return f"{data_type}_{kind}({this}, {self.sql(interval)})"
 40
 41    return func
 42
 43
 44def _derived_table_values_to_unnest(self: BigQuery.Generator, expression: exp.Values) -> str:
 45    if not expression.find_ancestor(exp.From, exp.Join):
 46        return self.values_sql(expression)
 47
 48    alias = expression.args.get("alias")
 49
 50    structs = [
 51        exp.Struct(
 52            expressions=[
 53                exp.alias_(value, column_name)
 54                for value, column_name in zip(
 55                    t.expressions,
 56                    alias.columns
 57                    if alias and alias.columns
 58                    else (f"_c{i}" for i in range(len(t.expressions))),
 59                )
 60            ]
 61        )
 62        for t in expression.find_all(exp.Tuple)
 63    ]
 64
 65    return self.unnest_sql(exp.Unnest(expressions=[exp.Array(expressions=structs)]))
 66
 67
 68def _returnsproperty_sql(self: BigQuery.Generator, expression: exp.ReturnsProperty) -> str:
 69    this = expression.this
 70    if isinstance(this, exp.Schema):
 71        this = f"{this.this} <{self.expressions(this)}>"
 72    else:
 73        this = self.sql(this)
 74    return f"RETURNS {this}"
 75
 76
 77def _create_sql(self: BigQuery.Generator, expression: exp.Create) -> str:
 78    kind = expression.args["kind"]
 79    returns = expression.find(exp.ReturnsProperty)
 80
 81    if kind.upper() == "FUNCTION" and returns and returns.args.get("is_table"):
 82        expression = expression.copy()
 83        expression.set("kind", "TABLE FUNCTION")
 84
 85        if isinstance(expression.expression, (exp.Subquery, exp.Literal)):
 86            expression.set("expression", expression.expression.this)
 87
 88        return self.create_sql(expression)
 89
 90    return self.create_sql(expression)
 91
 92
 93def _unqualify_unnest(expression: exp.Expression) -> exp.Expression:
 94    """Remove references to unnest table aliases since bigquery doesn't allow them.
 95
 96    These are added by the optimizer's qualify_column step.
 97    """
 98    from sqlglot.optimizer.scope import find_all_in_scope
 99
100    if isinstance(expression, exp.Select):
101        unnest_aliases = {
102            unnest.alias
103            for unnest in find_all_in_scope(expression, exp.Unnest)
104            if isinstance(unnest.parent, (exp.From, exp.Join))
105        }
106        if unnest_aliases:
107            for column in expression.find_all(exp.Column):
108                if column.table in unnest_aliases:
109                    column.set("table", None)
110                elif column.db in unnest_aliases:
111                    column.set("db", None)
112
113    return expression
114
115
116# https://issuetracker.google.com/issues/162294746
117# workaround for bigquery bug when grouping by an expression and then ordering
118# WITH x AS (SELECT 1 y)
119# SELECT y + 1 z
120# FROM x
121# GROUP BY x + 1
122# ORDER by z
123def _alias_ordered_group(expression: exp.Expression) -> exp.Expression:
124    if isinstance(expression, exp.Select):
125        group = expression.args.get("group")
126        order = expression.args.get("order")
127
128        if group and order:
129            aliases = {
130                select.this: select.args["alias"]
131                for select in expression.selects
132                if isinstance(select, exp.Alias)
133            }
134
135            for e in group.expressions:
136                alias = aliases.get(e)
137
138                if alias:
139                    e.replace(exp.column(alias))
140
141    return expression
142
143
144def _pushdown_cte_column_names(expression: exp.Expression) -> exp.Expression:
145    """BigQuery doesn't allow column names when defining a CTE, so we try to push them down."""
146    if isinstance(expression, exp.CTE) and expression.alias_column_names:
147        cte_query = expression.this
148
149        if cte_query.is_star:
150            logger.warning(
151                "Can't push down CTE column names for star queries. Run the query through"
152                " the optimizer or use 'qualify' to expand the star projections first."
153            )
154            return expression
155
156        column_names = expression.alias_column_names
157        expression.args["alias"].set("columns", None)
158
159        for name, select in zip(column_names, cte_query.selects):
160            to_replace = select
161
162            if isinstance(select, exp.Alias):
163                select = select.this
164
165            # Inner aliases are shadowed by the CTE column names
166            to_replace.replace(exp.alias_(select, name))
167
168    return expression
169
170
171def _parse_timestamp(args: t.List) -> exp.StrToTime:
172    this = format_time_lambda(exp.StrToTime, "bigquery")([seq_get(args, 1), seq_get(args, 0)])
173    this.set("zone", seq_get(args, 2))
174    return this
175
176
177def _parse_date(args: t.List) -> exp.Date | exp.DateFromParts:
178    expr_type = exp.DateFromParts if len(args) == 3 else exp.Date
179    return expr_type.from_arg_list(args)
180
181
182def _parse_to_hex(args: t.List) -> exp.Hex | exp.MD5:
183    # TO_HEX(MD5(..)) is common in BigQuery, so it's parsed into MD5 to simplify its transpilation
184    arg = seq_get(args, 0)
185    return exp.MD5(this=arg.this) if isinstance(arg, exp.MD5Digest) else exp.Hex(this=arg)
186
187
188class BigQuery(Dialect):
189    UNNEST_COLUMN_ONLY = True
190    SUPPORTS_USER_DEFINED_TYPES = False
191
192    # https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#case_sensitivity
193    RESOLVES_IDENTIFIERS_AS_UPPERCASE = None
194
195    # bigquery udfs are case sensitive
196    NORMALIZE_FUNCTIONS = False
197
198    TIME_MAPPING = {
199        "%D": "%m/%d/%y",
200    }
201
202    FORMAT_MAPPING = {
203        "DD": "%d",
204        "MM": "%m",
205        "MON": "%b",
206        "MONTH": "%B",
207        "YYYY": "%Y",
208        "YY": "%y",
209        "HH": "%I",
210        "HH12": "%I",
211        "HH24": "%H",
212        "MI": "%M",
213        "SS": "%S",
214        "SSSSS": "%f",
215        "TZH": "%z",
216    }
217
218    # The _PARTITIONTIME and _PARTITIONDATE pseudo-columns are not returned by a SELECT * statement
219    # https://cloud.google.com/bigquery/docs/querying-partitioned-tables#query_an_ingestion-time_partitioned_table
220    PSEUDOCOLUMNS = {"_PARTITIONTIME", "_PARTITIONDATE"}
221
222    @classmethod
223    def normalize_identifier(cls, expression: E) -> E:
224        # In BigQuery, CTEs aren't case-sensitive, but table names are (by default, at least).
225        # The following check is essentially a heuristic to detect tables based on whether or
226        # not they're qualified.
227        if isinstance(expression, exp.Identifier):
228            parent = expression.parent
229
230            while isinstance(parent, exp.Dot):
231                parent = parent.parent
232
233            if (
234                not isinstance(parent, exp.UserDefinedFunction)
235                and not (isinstance(parent, exp.Table) and parent.db)
236                and not expression.meta.get("is_table")
237            ):
238                expression.set("this", expression.this.lower())
239
240        return expression
241
242    class Tokenizer(tokens.Tokenizer):
243        QUOTES = ["'", '"', '"""', "'''"]
244        COMMENTS = ["--", "#", ("/*", "*/")]
245        IDENTIFIERS = ["`"]
246        STRING_ESCAPES = ["\\"]
247
248        HEX_STRINGS = [("0x", ""), ("0X", "")]
249
250        BYTE_STRINGS = [
251            (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("b", "B")
252        ]
253
254        RAW_STRINGS = [
255            (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("r", "R")
256        ]
257
258        KEYWORDS = {
259            **tokens.Tokenizer.KEYWORDS,
260            "ANY TYPE": TokenType.VARIANT,
261            "BEGIN": TokenType.COMMAND,
262            "BEGIN TRANSACTION": TokenType.BEGIN,
263            "CURRENT_DATETIME": TokenType.CURRENT_DATETIME,
264            "BYTES": TokenType.BINARY,
265            "DECLARE": TokenType.COMMAND,
266            "FLOAT64": TokenType.DOUBLE,
267            "INT64": TokenType.BIGINT,
268            "RECORD": TokenType.STRUCT,
269            "TIMESTAMP": TokenType.TIMESTAMPTZ,
270            "NOT DETERMINISTIC": TokenType.VOLATILE,
271            "UNKNOWN": TokenType.NULL,
272            "FOR SYSTEM_TIME": TokenType.TIMESTAMP_SNAPSHOT,
273        }
274        KEYWORDS.pop("DIV")
275
276    class Parser(parser.Parser):
277        PREFIXED_PIVOT_COLUMNS = True
278
279        LOG_BASE_FIRST = False
280        LOG_DEFAULTS_TO_LN = True
281
282        FUNCTIONS = {
283            **parser.Parser.FUNCTIONS,
284            "DATE": _parse_date,
285            "DATE_ADD": parse_date_delta_with_interval(exp.DateAdd),
286            "DATE_SUB": parse_date_delta_with_interval(exp.DateSub),
287            "DATE_TRUNC": lambda args: exp.DateTrunc(
288                unit=exp.Literal.string(str(seq_get(args, 1))),
289                this=seq_get(args, 0),
290            ),
291            "DATETIME_ADD": parse_date_delta_with_interval(exp.DatetimeAdd),
292            "DATETIME_SUB": parse_date_delta_with_interval(exp.DatetimeSub),
293            "DIV": binary_from_function(exp.IntDiv),
294            "GENERATE_ARRAY": exp.GenerateSeries.from_arg_list,
295            "MD5": exp.MD5Digest.from_arg_list,
296            "TO_HEX": _parse_to_hex,
297            "PARSE_DATE": lambda args: format_time_lambda(exp.StrToDate, "bigquery")(
298                [seq_get(args, 1), seq_get(args, 0)]
299            ),
300            "PARSE_TIMESTAMP": _parse_timestamp,
301            "REGEXP_CONTAINS": exp.RegexpLike.from_arg_list,
302            "REGEXP_EXTRACT": lambda args: exp.RegexpExtract(
303                this=seq_get(args, 0),
304                expression=seq_get(args, 1),
305                position=seq_get(args, 2),
306                occurrence=seq_get(args, 3),
307                group=exp.Literal.number(1)
308                if re.compile(str(seq_get(args, 1))).groups == 1
309                else None,
310            ),
311            "SHA256": lambda args: exp.SHA2(this=seq_get(args, 0), length=exp.Literal.number(256)),
312            "SHA512": lambda args: exp.SHA2(this=seq_get(args, 0), length=exp.Literal.number(512)),
313            "SPLIT": lambda args: exp.Split(
314                # https://cloud.google.com/bigquery/docs/reference/standard-sql/string_functions#split
315                this=seq_get(args, 0),
316                expression=seq_get(args, 1) or exp.Literal.string(","),
317            ),
318            "TIME_ADD": parse_date_delta_with_interval(exp.TimeAdd),
319            "TIME_SUB": parse_date_delta_with_interval(exp.TimeSub),
320            "TIMESTAMP_ADD": parse_date_delta_with_interval(exp.TimestampAdd),
321            "TIMESTAMP_SUB": parse_date_delta_with_interval(exp.TimestampSub),
322            "TO_JSON_STRING": exp.JSONFormat.from_arg_list,
323        }
324
325        FUNCTION_PARSERS = {
326            **parser.Parser.FUNCTION_PARSERS,
327            "ARRAY": lambda self: self.expression(exp.Array, expressions=[self._parse_statement()]),
328        }
329        FUNCTION_PARSERS.pop("TRIM")
330
331        NO_PAREN_FUNCTIONS = {
332            **parser.Parser.NO_PAREN_FUNCTIONS,
333            TokenType.CURRENT_DATETIME: exp.CurrentDatetime,
334        }
335
336        NESTED_TYPE_TOKENS = {
337            *parser.Parser.NESTED_TYPE_TOKENS,
338            TokenType.TABLE,
339        }
340
341        ID_VAR_TOKENS = {
342            *parser.Parser.ID_VAR_TOKENS,
343            TokenType.VALUES,
344        }
345
346        PROPERTY_PARSERS = {
347            **parser.Parser.PROPERTY_PARSERS,
348            "NOT DETERMINISTIC": lambda self: self.expression(
349                exp.StabilityProperty, this=exp.Literal.string("VOLATILE")
350            ),
351            "OPTIONS": lambda self: self._parse_with_property(),
352        }
353
354        CONSTRAINT_PARSERS = {
355            **parser.Parser.CONSTRAINT_PARSERS,
356            "OPTIONS": lambda self: exp.Properties(expressions=self._parse_with_property()),
357        }
358
359        def _parse_table_part(self, schema: bool = False) -> t.Optional[exp.Expression]:
360            this = super()._parse_table_part(schema=schema) or self._parse_number()
361
362            # https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#table_names
363            if isinstance(this, exp.Identifier):
364                table_name = this.name
365                while self._match(TokenType.DASH, advance=False) and self._next:
366                    self._advance(2)
367                    table_name += f"-{self._prev.text}"
368
369                this = exp.Identifier(this=table_name, quoted=this.args.get("quoted"))
370            elif isinstance(this, exp.Literal):
371                table_name = this.name
372
373                if (
374                    self._curr
375                    and self._prev.end == self._curr.start - 1
376                    and self._parse_var(any_token=True)
377                ):
378                    table_name += self._prev.text
379
380                this = exp.Identifier(this=table_name, quoted=True)
381
382            return this
383
384        def _parse_table_parts(self, schema: bool = False) -> exp.Table:
385            table = super()._parse_table_parts(schema=schema)
386            if isinstance(table.this, exp.Identifier) and "." in table.name:
387                catalog, db, this, *rest = (
388                    t.cast(t.Optional[exp.Expression], exp.to_identifier(x))
389                    for x in split_num_words(table.name, ".", 3)
390                )
391
392                if rest and this:
393                    this = exp.Dot.build(t.cast(t.List[exp.Expression], [this, *rest]))
394
395                table = exp.Table(this=this, db=db, catalog=catalog)
396
397            return table
398
399        def _parse_json_object(self) -> exp.JSONObject:
400            json_object = super()._parse_json_object()
401            array_kv_pair = seq_get(json_object.expressions, 0)
402
403            # Converts BQ's "signature 2" of JSON_OBJECT into SQLGlot's canonical representation
404            # https://cloud.google.com/bigquery/docs/reference/standard-sql/json_functions#json_object_signature2
405            if (
406                array_kv_pair
407                and isinstance(array_kv_pair.this, exp.Array)
408                and isinstance(array_kv_pair.expression, exp.Array)
409            ):
410                keys = array_kv_pair.this.expressions
411                values = array_kv_pair.expression.expressions
412
413                json_object.set(
414                    "expressions",
415                    [exp.JSONKeyValue(this=k, expression=v) for k, v in zip(keys, values)],
416                )
417
418            return json_object
419
420    class Generator(generator.Generator):
421        EXPLICIT_UNION = True
422        INTERVAL_ALLOWS_PLURAL_FORM = False
423        JOIN_HINTS = False
424        QUERY_HINTS = False
425        TABLE_HINTS = False
426        LIMIT_FETCH = "LIMIT"
427        RENAME_TABLE_WITH_DB = False
428        ESCAPE_LINE_BREAK = True
429        NVL2_SUPPORTED = False
430
431        TRANSFORMS = {
432            **generator.Generator.TRANSFORMS,
433            exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"),
434            exp.ArraySize: rename_func("ARRAY_LENGTH"),
435            exp.Cast: transforms.preprocess([transforms.remove_precision_parameterized_types]),
436            exp.Create: _create_sql,
437            exp.CTE: transforms.preprocess([_pushdown_cte_column_names]),
438            exp.DateAdd: _date_add_sql("DATE", "ADD"),
439            exp.DateDiff: lambda self, e: f"DATE_DIFF({self.sql(e, 'this')}, {self.sql(e, 'expression')}, {self.sql(e.args.get('unit', 'DAY'))})",
440            exp.DateFromParts: rename_func("DATE"),
441            exp.DateStrToDate: datestrtodate_sql,
442            exp.DateSub: _date_add_sql("DATE", "SUB"),
443            exp.DatetimeAdd: _date_add_sql("DATETIME", "ADD"),
444            exp.DatetimeSub: _date_add_sql("DATETIME", "SUB"),
445            exp.DateTrunc: lambda self, e: self.func("DATE_TRUNC", e.this, e.text("unit")),
446            exp.GenerateSeries: rename_func("GENERATE_ARRAY"),
447            exp.GroupConcat: rename_func("STRING_AGG"),
448            exp.Hex: rename_func("TO_HEX"),
449            exp.ILike: no_ilike_sql,
450            exp.IntDiv: rename_func("DIV"),
451            exp.JSONFormat: rename_func("TO_JSON_STRING"),
452            exp.JSONKeyValue: json_keyvalue_comma_sql,
453            exp.Max: max_or_greatest,
454            exp.MD5: lambda self, e: self.func("TO_HEX", self.func("MD5", e.this)),
455            exp.MD5Digest: rename_func("MD5"),
456            exp.Min: min_or_least,
457            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
458            exp.RegexpExtract: lambda self, e: self.func(
459                "REGEXP_EXTRACT",
460                e.this,
461                e.expression,
462                e.args.get("position"),
463                e.args.get("occurrence"),
464            ),
465            exp.RegexpReplace: regexp_replace_sql,
466            exp.RegexpLike: rename_func("REGEXP_CONTAINS"),
467            exp.ReturnsProperty: _returnsproperty_sql,
468            exp.Select: transforms.preprocess(
469                [
470                    transforms.explode_to_unnest,
471                    _unqualify_unnest,
472                    transforms.eliminate_distinct_on,
473                    _alias_ordered_group,
474                ]
475            ),
476            exp.SHA2: lambda self, e: self.func(
477                f"SHA256" if e.text("length") == "256" else "SHA512", e.this
478            ),
479            exp.StabilityProperty: lambda self, e: f"DETERMINISTIC"
480            if e.name == "IMMUTABLE"
481            else "NOT DETERMINISTIC",
482            exp.StrToDate: lambda self, e: f"PARSE_DATE({self.format_time(e)}, {self.sql(e, 'this')})",
483            exp.StrToTime: lambda self, e: self.func(
484                "PARSE_TIMESTAMP", self.format_time(e), e.this, e.args.get("zone")
485            ),
486            exp.TimeAdd: _date_add_sql("TIME", "ADD"),
487            exp.TimeSub: _date_add_sql("TIME", "SUB"),
488            exp.TimestampAdd: _date_add_sql("TIMESTAMP", "ADD"),
489            exp.TimestampSub: _date_add_sql("TIMESTAMP", "SUB"),
490            exp.TimeStrToTime: timestrtotime_sql,
491            exp.Trim: lambda self, e: self.func(f"TRIM", e.this, e.expression),
492            exp.TsOrDsAdd: _date_add_sql("DATE", "ADD"),
493            exp.TsOrDsToDate: ts_or_ds_to_date_sql("bigquery"),
494            exp.Unhex: rename_func("FROM_HEX"),
495            exp.Values: _derived_table_values_to_unnest,
496            exp.VariancePop: rename_func("VAR_POP"),
497        }
498
499        TYPE_MAPPING = {
500            **generator.Generator.TYPE_MAPPING,
501            exp.DataType.Type.BIGDECIMAL: "BIGNUMERIC",
502            exp.DataType.Type.BIGINT: "INT64",
503            exp.DataType.Type.BINARY: "BYTES",
504            exp.DataType.Type.BOOLEAN: "BOOL",
505            exp.DataType.Type.CHAR: "STRING",
506            exp.DataType.Type.DECIMAL: "NUMERIC",
507            exp.DataType.Type.DOUBLE: "FLOAT64",
508            exp.DataType.Type.FLOAT: "FLOAT64",
509            exp.DataType.Type.INT: "INT64",
510            exp.DataType.Type.NCHAR: "STRING",
511            exp.DataType.Type.NVARCHAR: "STRING",
512            exp.DataType.Type.SMALLINT: "INT64",
513            exp.DataType.Type.TEXT: "STRING",
514            exp.DataType.Type.TIMESTAMP: "DATETIME",
515            exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP",
516            exp.DataType.Type.TIMESTAMPLTZ: "TIMESTAMP",
517            exp.DataType.Type.TINYINT: "INT64",
518            exp.DataType.Type.VARBINARY: "BYTES",
519            exp.DataType.Type.VARCHAR: "STRING",
520            exp.DataType.Type.VARIANT: "ANY TYPE",
521        }
522
523        PROPERTIES_LOCATION = {
524            **generator.Generator.PROPERTIES_LOCATION,
525            exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA,
526            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
527        }
528
529        # from: https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#reserved_keywords
530        RESERVED_KEYWORDS = {
531            *generator.Generator.RESERVED_KEYWORDS,
532            "all",
533            "and",
534            "any",
535            "array",
536            "as",
537            "asc",
538            "assert_rows_modified",
539            "at",
540            "between",
541            "by",
542            "case",
543            "cast",
544            "collate",
545            "contains",
546            "create",
547            "cross",
548            "cube",
549            "current",
550            "default",
551            "define",
552            "desc",
553            "distinct",
554            "else",
555            "end",
556            "enum",
557            "escape",
558            "except",
559            "exclude",
560            "exists",
561            "extract",
562            "false",
563            "fetch",
564            "following",
565            "for",
566            "from",
567            "full",
568            "group",
569            "grouping",
570            "groups",
571            "hash",
572            "having",
573            "if",
574            "ignore",
575            "in",
576            "inner",
577            "intersect",
578            "interval",
579            "into",
580            "is",
581            "join",
582            "lateral",
583            "left",
584            "like",
585            "limit",
586            "lookup",
587            "merge",
588            "natural",
589            "new",
590            "no",
591            "not",
592            "null",
593            "nulls",
594            "of",
595            "on",
596            "or",
597            "order",
598            "outer",
599            "over",
600            "partition",
601            "preceding",
602            "proto",
603            "qualify",
604            "range",
605            "recursive",
606            "respect",
607            "right",
608            "rollup",
609            "rows",
610            "select",
611            "set",
612            "some",
613            "struct",
614            "tablesample",
615            "then",
616            "to",
617            "treat",
618            "true",
619            "unbounded",
620            "union",
621            "unnest",
622            "using",
623            "when",
624            "where",
625            "window",
626            "with",
627            "within",
628        }
629
630        def attimezone_sql(self, expression: exp.AtTimeZone) -> str:
631            parent = expression.parent
632
633            # BigQuery allows CAST(.. AS {STRING|TIMESTAMP} [FORMAT <fmt> [AT TIME ZONE <tz>]]).
634            # Only the TIMESTAMP one should use the below conversion, when AT TIME ZONE is included.
635            if not isinstance(parent, exp.Cast) or not parent.to.is_type("text"):
636                return self.func(
637                    "TIMESTAMP", self.func("DATETIME", expression.this, expression.args.get("zone"))
638                )
639
640            return super().attimezone_sql(expression)
641
642        def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str:
643            # https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#json_literals
644            if expression.is_type("json"):
645                return f"JSON {self.sql(expression, 'this')}"
646
647            return super().cast_sql(expression, safe_prefix=safe_prefix)
648
649        def trycast_sql(self, expression: exp.TryCast) -> str:
650            return self.cast_sql(expression, safe_prefix="SAFE_")
651
652        def cte_sql(self, expression: exp.CTE) -> str:
653            if expression.alias_column_names:
654                self.unsupported("Column names in CTE definition are not supported.")
655            return super().cte_sql(expression)
656
657        def array_sql(self, expression: exp.Array) -> str:
658            first_arg = seq_get(expression.expressions, 0)
659            if isinstance(first_arg, exp.Subqueryable):
660                return f"ARRAY{self.wrap(self.sql(first_arg))}"
661
662            return inline_array_sql(self, expression)
663
664        def transaction_sql(self, *_) -> str:
665            return "BEGIN TRANSACTION"
666
667        def commit_sql(self, *_) -> str:
668            return "COMMIT TRANSACTION"
669
670        def rollback_sql(self, *_) -> str:
671            return "ROLLBACK TRANSACTION"
672
673        def in_unnest_op(self, expression: exp.Unnest) -> str:
674            return self.sql(expression)
675
676        def except_op(self, expression: exp.Except) -> str:
677            if not expression.args.get("distinct", False):
678                self.unsupported("EXCEPT without DISTINCT is not supported in BigQuery")
679            return f"EXCEPT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}"
680
681        def intersect_op(self, expression: exp.Intersect) -> str:
682            if not expression.args.get("distinct", False):
683                self.unsupported("INTERSECT without DISTINCT is not supported in BigQuery")
684            return f"INTERSECT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}"
685
686        def with_properties(self, properties: exp.Properties) -> str:
687            return self.properties(properties, prefix=self.seg("OPTIONS"))
688
689        def version_sql(self, expression: exp.Version) -> str:
690            if expression.name == "TIMESTAMP":
691                expression = expression.copy()
692                expression.set("this", "SYSTEM_TIME")
693            return super().version_sql(expression)
logger = <Logger sqlglot (WARNING)>
class BigQuery(sqlglot.dialects.dialect.Dialect):
189class BigQuery(Dialect):
190    UNNEST_COLUMN_ONLY = True
191    SUPPORTS_USER_DEFINED_TYPES = False
192
193    # https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#case_sensitivity
194    RESOLVES_IDENTIFIERS_AS_UPPERCASE = None
195
196    # bigquery udfs are case sensitive
197    NORMALIZE_FUNCTIONS = False
198
199    TIME_MAPPING = {
200        "%D": "%m/%d/%y",
201    }
202
203    FORMAT_MAPPING = {
204        "DD": "%d",
205        "MM": "%m",
206        "MON": "%b",
207        "MONTH": "%B",
208        "YYYY": "%Y",
209        "YY": "%y",
210        "HH": "%I",
211        "HH12": "%I",
212        "HH24": "%H",
213        "MI": "%M",
214        "SS": "%S",
215        "SSSSS": "%f",
216        "TZH": "%z",
217    }
218
219    # The _PARTITIONTIME and _PARTITIONDATE pseudo-columns are not returned by a SELECT * statement
220    # https://cloud.google.com/bigquery/docs/querying-partitioned-tables#query_an_ingestion-time_partitioned_table
221    PSEUDOCOLUMNS = {"_PARTITIONTIME", "_PARTITIONDATE"}
222
223    @classmethod
224    def normalize_identifier(cls, expression: E) -> E:
225        # In BigQuery, CTEs aren't case-sensitive, but table names are (by default, at least).
226        # The following check is essentially a heuristic to detect tables based on whether or
227        # not they're qualified.
228        if isinstance(expression, exp.Identifier):
229            parent = expression.parent
230
231            while isinstance(parent, exp.Dot):
232                parent = parent.parent
233
234            if (
235                not isinstance(parent, exp.UserDefinedFunction)
236                and not (isinstance(parent, exp.Table) and parent.db)
237                and not expression.meta.get("is_table")
238            ):
239                expression.set("this", expression.this.lower())
240
241        return expression
242
243    class Tokenizer(tokens.Tokenizer):
244        QUOTES = ["'", '"', '"""', "'''"]
245        COMMENTS = ["--", "#", ("/*", "*/")]
246        IDENTIFIERS = ["`"]
247        STRING_ESCAPES = ["\\"]
248
249        HEX_STRINGS = [("0x", ""), ("0X", "")]
250
251        BYTE_STRINGS = [
252            (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("b", "B")
253        ]
254
255        RAW_STRINGS = [
256            (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("r", "R")
257        ]
258
259        KEYWORDS = {
260            **tokens.Tokenizer.KEYWORDS,
261            "ANY TYPE": TokenType.VARIANT,
262            "BEGIN": TokenType.COMMAND,
263            "BEGIN TRANSACTION": TokenType.BEGIN,
264            "CURRENT_DATETIME": TokenType.CURRENT_DATETIME,
265            "BYTES": TokenType.BINARY,
266            "DECLARE": TokenType.COMMAND,
267            "FLOAT64": TokenType.DOUBLE,
268            "INT64": TokenType.BIGINT,
269            "RECORD": TokenType.STRUCT,
270            "TIMESTAMP": TokenType.TIMESTAMPTZ,
271            "NOT DETERMINISTIC": TokenType.VOLATILE,
272            "UNKNOWN": TokenType.NULL,
273            "FOR SYSTEM_TIME": TokenType.TIMESTAMP_SNAPSHOT,
274        }
275        KEYWORDS.pop("DIV")
276
277    class Parser(parser.Parser):
278        PREFIXED_PIVOT_COLUMNS = True
279
280        LOG_BASE_FIRST = False
281        LOG_DEFAULTS_TO_LN = True
282
283        FUNCTIONS = {
284            **parser.Parser.FUNCTIONS,
285            "DATE": _parse_date,
286            "DATE_ADD": parse_date_delta_with_interval(exp.DateAdd),
287            "DATE_SUB": parse_date_delta_with_interval(exp.DateSub),
288            "DATE_TRUNC": lambda args: exp.DateTrunc(
289                unit=exp.Literal.string(str(seq_get(args, 1))),
290                this=seq_get(args, 0),
291            ),
292            "DATETIME_ADD": parse_date_delta_with_interval(exp.DatetimeAdd),
293            "DATETIME_SUB": parse_date_delta_with_interval(exp.DatetimeSub),
294            "DIV": binary_from_function(exp.IntDiv),
295            "GENERATE_ARRAY": exp.GenerateSeries.from_arg_list,
296            "MD5": exp.MD5Digest.from_arg_list,
297            "TO_HEX": _parse_to_hex,
298            "PARSE_DATE": lambda args: format_time_lambda(exp.StrToDate, "bigquery")(
299                [seq_get(args, 1), seq_get(args, 0)]
300            ),
301            "PARSE_TIMESTAMP": _parse_timestamp,
302            "REGEXP_CONTAINS": exp.RegexpLike.from_arg_list,
303            "REGEXP_EXTRACT": lambda args: exp.RegexpExtract(
304                this=seq_get(args, 0),
305                expression=seq_get(args, 1),
306                position=seq_get(args, 2),
307                occurrence=seq_get(args, 3),
308                group=exp.Literal.number(1)
309                if re.compile(str(seq_get(args, 1))).groups == 1
310                else None,
311            ),
312            "SHA256": lambda args: exp.SHA2(this=seq_get(args, 0), length=exp.Literal.number(256)),
313            "SHA512": lambda args: exp.SHA2(this=seq_get(args, 0), length=exp.Literal.number(512)),
314            "SPLIT": lambda args: exp.Split(
315                # https://cloud.google.com/bigquery/docs/reference/standard-sql/string_functions#split
316                this=seq_get(args, 0),
317                expression=seq_get(args, 1) or exp.Literal.string(","),
318            ),
319            "TIME_ADD": parse_date_delta_with_interval(exp.TimeAdd),
320            "TIME_SUB": parse_date_delta_with_interval(exp.TimeSub),
321            "TIMESTAMP_ADD": parse_date_delta_with_interval(exp.TimestampAdd),
322            "TIMESTAMP_SUB": parse_date_delta_with_interval(exp.TimestampSub),
323            "TO_JSON_STRING": exp.JSONFormat.from_arg_list,
324        }
325
326        FUNCTION_PARSERS = {
327            **parser.Parser.FUNCTION_PARSERS,
328            "ARRAY": lambda self: self.expression(exp.Array, expressions=[self._parse_statement()]),
329        }
330        FUNCTION_PARSERS.pop("TRIM")
331
332        NO_PAREN_FUNCTIONS = {
333            **parser.Parser.NO_PAREN_FUNCTIONS,
334            TokenType.CURRENT_DATETIME: exp.CurrentDatetime,
335        }
336
337        NESTED_TYPE_TOKENS = {
338            *parser.Parser.NESTED_TYPE_TOKENS,
339            TokenType.TABLE,
340        }
341
342        ID_VAR_TOKENS = {
343            *parser.Parser.ID_VAR_TOKENS,
344            TokenType.VALUES,
345        }
346
347        PROPERTY_PARSERS = {
348            **parser.Parser.PROPERTY_PARSERS,
349            "NOT DETERMINISTIC": lambda self: self.expression(
350                exp.StabilityProperty, this=exp.Literal.string("VOLATILE")
351            ),
352            "OPTIONS": lambda self: self._parse_with_property(),
353        }
354
355        CONSTRAINT_PARSERS = {
356            **parser.Parser.CONSTRAINT_PARSERS,
357            "OPTIONS": lambda self: exp.Properties(expressions=self._parse_with_property()),
358        }
359
360        def _parse_table_part(self, schema: bool = False) -> t.Optional[exp.Expression]:
361            this = super()._parse_table_part(schema=schema) or self._parse_number()
362
363            # https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#table_names
364            if isinstance(this, exp.Identifier):
365                table_name = this.name
366                while self._match(TokenType.DASH, advance=False) and self._next:
367                    self._advance(2)
368                    table_name += f"-{self._prev.text}"
369
370                this = exp.Identifier(this=table_name, quoted=this.args.get("quoted"))
371            elif isinstance(this, exp.Literal):
372                table_name = this.name
373
374                if (
375                    self._curr
376                    and self._prev.end == self._curr.start - 1
377                    and self._parse_var(any_token=True)
378                ):
379                    table_name += self._prev.text
380
381                this = exp.Identifier(this=table_name, quoted=True)
382
383            return this
384
385        def _parse_table_parts(self, schema: bool = False) -> exp.Table:
386            table = super()._parse_table_parts(schema=schema)
387            if isinstance(table.this, exp.Identifier) and "." in table.name:
388                catalog, db, this, *rest = (
389                    t.cast(t.Optional[exp.Expression], exp.to_identifier(x))
390                    for x in split_num_words(table.name, ".", 3)
391                )
392
393                if rest and this:
394                    this = exp.Dot.build(t.cast(t.List[exp.Expression], [this, *rest]))
395
396                table = exp.Table(this=this, db=db, catalog=catalog)
397
398            return table
399
400        def _parse_json_object(self) -> exp.JSONObject:
401            json_object = super()._parse_json_object()
402            array_kv_pair = seq_get(json_object.expressions, 0)
403
404            # Converts BQ's "signature 2" of JSON_OBJECT into SQLGlot's canonical representation
405            # https://cloud.google.com/bigquery/docs/reference/standard-sql/json_functions#json_object_signature2
406            if (
407                array_kv_pair
408                and isinstance(array_kv_pair.this, exp.Array)
409                and isinstance(array_kv_pair.expression, exp.Array)
410            ):
411                keys = array_kv_pair.this.expressions
412                values = array_kv_pair.expression.expressions
413
414                json_object.set(
415                    "expressions",
416                    [exp.JSONKeyValue(this=k, expression=v) for k, v in zip(keys, values)],
417                )
418
419            return json_object
420
421    class Generator(generator.Generator):
422        EXPLICIT_UNION = True
423        INTERVAL_ALLOWS_PLURAL_FORM = False
424        JOIN_HINTS = False
425        QUERY_HINTS = False
426        TABLE_HINTS = False
427        LIMIT_FETCH = "LIMIT"
428        RENAME_TABLE_WITH_DB = False
429        ESCAPE_LINE_BREAK = True
430        NVL2_SUPPORTED = False
431
432        TRANSFORMS = {
433            **generator.Generator.TRANSFORMS,
434            exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"),
435            exp.ArraySize: rename_func("ARRAY_LENGTH"),
436            exp.Cast: transforms.preprocess([transforms.remove_precision_parameterized_types]),
437            exp.Create: _create_sql,
438            exp.CTE: transforms.preprocess([_pushdown_cte_column_names]),
439            exp.DateAdd: _date_add_sql("DATE", "ADD"),
440            exp.DateDiff: lambda self, e: f"DATE_DIFF({self.sql(e, 'this')}, {self.sql(e, 'expression')}, {self.sql(e.args.get('unit', 'DAY'))})",
441            exp.DateFromParts: rename_func("DATE"),
442            exp.DateStrToDate: datestrtodate_sql,
443            exp.DateSub: _date_add_sql("DATE", "SUB"),
444            exp.DatetimeAdd: _date_add_sql("DATETIME", "ADD"),
445            exp.DatetimeSub: _date_add_sql("DATETIME", "SUB"),
446            exp.DateTrunc: lambda self, e: self.func("DATE_TRUNC", e.this, e.text("unit")),
447            exp.GenerateSeries: rename_func("GENERATE_ARRAY"),
448            exp.GroupConcat: rename_func("STRING_AGG"),
449            exp.Hex: rename_func("TO_HEX"),
450            exp.ILike: no_ilike_sql,
451            exp.IntDiv: rename_func("DIV"),
452            exp.JSONFormat: rename_func("TO_JSON_STRING"),
453            exp.JSONKeyValue: json_keyvalue_comma_sql,
454            exp.Max: max_or_greatest,
455            exp.MD5: lambda self, e: self.func("TO_HEX", self.func("MD5", e.this)),
456            exp.MD5Digest: rename_func("MD5"),
457            exp.Min: min_or_least,
458            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
459            exp.RegexpExtract: lambda self, e: self.func(
460                "REGEXP_EXTRACT",
461                e.this,
462                e.expression,
463                e.args.get("position"),
464                e.args.get("occurrence"),
465            ),
466            exp.RegexpReplace: regexp_replace_sql,
467            exp.RegexpLike: rename_func("REGEXP_CONTAINS"),
468            exp.ReturnsProperty: _returnsproperty_sql,
469            exp.Select: transforms.preprocess(
470                [
471                    transforms.explode_to_unnest,
472                    _unqualify_unnest,
473                    transforms.eliminate_distinct_on,
474                    _alias_ordered_group,
475                ]
476            ),
477            exp.SHA2: lambda self, e: self.func(
478                f"SHA256" if e.text("length") == "256" else "SHA512", e.this
479            ),
480            exp.StabilityProperty: lambda self, e: f"DETERMINISTIC"
481            if e.name == "IMMUTABLE"
482            else "NOT DETERMINISTIC",
483            exp.StrToDate: lambda self, e: f"PARSE_DATE({self.format_time(e)}, {self.sql(e, 'this')})",
484            exp.StrToTime: lambda self, e: self.func(
485                "PARSE_TIMESTAMP", self.format_time(e), e.this, e.args.get("zone")
486            ),
487            exp.TimeAdd: _date_add_sql("TIME", "ADD"),
488            exp.TimeSub: _date_add_sql("TIME", "SUB"),
489            exp.TimestampAdd: _date_add_sql("TIMESTAMP", "ADD"),
490            exp.TimestampSub: _date_add_sql("TIMESTAMP", "SUB"),
491            exp.TimeStrToTime: timestrtotime_sql,
492            exp.Trim: lambda self, e: self.func(f"TRIM", e.this, e.expression),
493            exp.TsOrDsAdd: _date_add_sql("DATE", "ADD"),
494            exp.TsOrDsToDate: ts_or_ds_to_date_sql("bigquery"),
495            exp.Unhex: rename_func("FROM_HEX"),
496            exp.Values: _derived_table_values_to_unnest,
497            exp.VariancePop: rename_func("VAR_POP"),
498        }
499
500        TYPE_MAPPING = {
501            **generator.Generator.TYPE_MAPPING,
502            exp.DataType.Type.BIGDECIMAL: "BIGNUMERIC",
503            exp.DataType.Type.BIGINT: "INT64",
504            exp.DataType.Type.BINARY: "BYTES",
505            exp.DataType.Type.BOOLEAN: "BOOL",
506            exp.DataType.Type.CHAR: "STRING",
507            exp.DataType.Type.DECIMAL: "NUMERIC",
508            exp.DataType.Type.DOUBLE: "FLOAT64",
509            exp.DataType.Type.FLOAT: "FLOAT64",
510            exp.DataType.Type.INT: "INT64",
511            exp.DataType.Type.NCHAR: "STRING",
512            exp.DataType.Type.NVARCHAR: "STRING",
513            exp.DataType.Type.SMALLINT: "INT64",
514            exp.DataType.Type.TEXT: "STRING",
515            exp.DataType.Type.TIMESTAMP: "DATETIME",
516            exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP",
517            exp.DataType.Type.TIMESTAMPLTZ: "TIMESTAMP",
518            exp.DataType.Type.TINYINT: "INT64",
519            exp.DataType.Type.VARBINARY: "BYTES",
520            exp.DataType.Type.VARCHAR: "STRING",
521            exp.DataType.Type.VARIANT: "ANY TYPE",
522        }
523
524        PROPERTIES_LOCATION = {
525            **generator.Generator.PROPERTIES_LOCATION,
526            exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA,
527            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
528        }
529
530        # from: https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#reserved_keywords
531        RESERVED_KEYWORDS = {
532            *generator.Generator.RESERVED_KEYWORDS,
533            "all",
534            "and",
535            "any",
536            "array",
537            "as",
538            "asc",
539            "assert_rows_modified",
540            "at",
541            "between",
542            "by",
543            "case",
544            "cast",
545            "collate",
546            "contains",
547            "create",
548            "cross",
549            "cube",
550            "current",
551            "default",
552            "define",
553            "desc",
554            "distinct",
555            "else",
556            "end",
557            "enum",
558            "escape",
559            "except",
560            "exclude",
561            "exists",
562            "extract",
563            "false",
564            "fetch",
565            "following",
566            "for",
567            "from",
568            "full",
569            "group",
570            "grouping",
571            "groups",
572            "hash",
573            "having",
574            "if",
575            "ignore",
576            "in",
577            "inner",
578            "intersect",
579            "interval",
580            "into",
581            "is",
582            "join",
583            "lateral",
584            "left",
585            "like",
586            "limit",
587            "lookup",
588            "merge",
589            "natural",
590            "new",
591            "no",
592            "not",
593            "null",
594            "nulls",
595            "of",
596            "on",
597            "or",
598            "order",
599            "outer",
600            "over",
601            "partition",
602            "preceding",
603            "proto",
604            "qualify",
605            "range",
606            "recursive",
607            "respect",
608            "right",
609            "rollup",
610            "rows",
611            "select",
612            "set",
613            "some",
614            "struct",
615            "tablesample",
616            "then",
617            "to",
618            "treat",
619            "true",
620            "unbounded",
621            "union",
622            "unnest",
623            "using",
624            "when",
625            "where",
626            "window",
627            "with",
628            "within",
629        }
630
631        def attimezone_sql(self, expression: exp.AtTimeZone) -> str:
632            parent = expression.parent
633
634            # BigQuery allows CAST(.. AS {STRING|TIMESTAMP} [FORMAT <fmt> [AT TIME ZONE <tz>]]).
635            # Only the TIMESTAMP one should use the below conversion, when AT TIME ZONE is included.
636            if not isinstance(parent, exp.Cast) or not parent.to.is_type("text"):
637                return self.func(
638                    "TIMESTAMP", self.func("DATETIME", expression.this, expression.args.get("zone"))
639                )
640
641            return super().attimezone_sql(expression)
642
643        def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str:
644            # https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#json_literals
645            if expression.is_type("json"):
646                return f"JSON {self.sql(expression, 'this')}"
647
648            return super().cast_sql(expression, safe_prefix=safe_prefix)
649
650        def trycast_sql(self, expression: exp.TryCast) -> str:
651            return self.cast_sql(expression, safe_prefix="SAFE_")
652
653        def cte_sql(self, expression: exp.CTE) -> str:
654            if expression.alias_column_names:
655                self.unsupported("Column names in CTE definition are not supported.")
656            return super().cte_sql(expression)
657
658        def array_sql(self, expression: exp.Array) -> str:
659            first_arg = seq_get(expression.expressions, 0)
660            if isinstance(first_arg, exp.Subqueryable):
661                return f"ARRAY{self.wrap(self.sql(first_arg))}"
662
663            return inline_array_sql(self, expression)
664
665        def transaction_sql(self, *_) -> str:
666            return "BEGIN TRANSACTION"
667
668        def commit_sql(self, *_) -> str:
669            return "COMMIT TRANSACTION"
670
671        def rollback_sql(self, *_) -> str:
672            return "ROLLBACK TRANSACTION"
673
674        def in_unnest_op(self, expression: exp.Unnest) -> str:
675            return self.sql(expression)
676
677        def except_op(self, expression: exp.Except) -> str:
678            if not expression.args.get("distinct", False):
679                self.unsupported("EXCEPT without DISTINCT is not supported in BigQuery")
680            return f"EXCEPT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}"
681
682        def intersect_op(self, expression: exp.Intersect) -> str:
683            if not expression.args.get("distinct", False):
684                self.unsupported("INTERSECT without DISTINCT is not supported in BigQuery")
685            return f"INTERSECT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}"
686
687        def with_properties(self, properties: exp.Properties) -> str:
688            return self.properties(properties, prefix=self.seg("OPTIONS"))
689
690        def version_sql(self, expression: exp.Version) -> str:
691            if expression.name == "TIMESTAMP":
692                expression = expression.copy()
693                expression.set("this", "SYSTEM_TIME")
694            return super().version_sql(expression)
UNNEST_COLUMN_ONLY = True
SUPPORTS_USER_DEFINED_TYPES = False
RESOLVES_IDENTIFIERS_AS_UPPERCASE: Optional[bool] = None
NORMALIZE_FUNCTIONS: bool | str = False
TIME_MAPPING: Dict[str, str] = {'%D': '%m/%d/%y'}
FORMAT_MAPPING: Dict[str, str] = {'DD': '%d', 'MM': '%m', 'MON': '%b', 'MONTH': '%B', 'YYYY': '%Y', 'YY': '%y', 'HH': '%I', 'HH12': '%I', 'HH24': '%H', 'MI': '%M', 'SS': '%S', 'SSSSS': '%f', 'TZH': '%z'}
PSEUDOCOLUMNS: Set[str] = {'_PARTITIONDATE', '_PARTITIONTIME'}
@classmethod
def normalize_identifier(cls, expression: ~E) -> ~E:
223    @classmethod
224    def normalize_identifier(cls, expression: E) -> E:
225        # In BigQuery, CTEs aren't case-sensitive, but table names are (by default, at least).
226        # The following check is essentially a heuristic to detect tables based on whether or
227        # not they're qualified.
228        if isinstance(expression, exp.Identifier):
229            parent = expression.parent
230
231            while isinstance(parent, exp.Dot):
232                parent = parent.parent
233
234            if (
235                not isinstance(parent, exp.UserDefinedFunction)
236                and not (isinstance(parent, exp.Table) and parent.db)
237                and not expression.meta.get("is_table")
238            ):
239                expression.set("this", expression.this.lower())
240
241        return expression

Normalizes an unquoted identifier to either lower or upper case, thus essentially making it case-insensitive. If a dialect treats all identifiers as case-insensitive, they will be normalized regardless of being quoted or not.

tokenizer_class = <class 'BigQuery.Tokenizer'>
parser_class = <class 'BigQuery.Parser'>
generator_class = <class 'BigQuery.Generator'>
TIME_TRIE: Dict = {'%': {'D': {0: True}}}
FORMAT_TRIE: Dict = {'D': {'D': {0: True}}, 'M': {'M': {0: True}, 'O': {'N': {0: True, 'T': {'H': {0: True}}}}, 'I': {0: True}}, 'Y': {'Y': {'Y': {'Y': {0: True}}, 0: True}}, 'H': {'H': {0: True, '1': {'2': {0: True}}, '2': {'4': {0: True}}}}, 'S': {'S': {0: True, 'S': {'S': {'S': {0: True}}}}}, 'T': {'Z': {'H': {0: True}}}}
INVERSE_TIME_MAPPING: Dict[str, str] = {'%m/%d/%y': '%D'}
INVERSE_TIME_TRIE: Dict = {'%': {'m': {'/': {'%': {'d': {'/': {'%': {'y': {0: True}}}}}}}}}
QUOTE_START = "'"
QUOTE_END = "'"
IDENTIFIER_START = '`'
IDENTIFIER_END = '`'
BIT_START = None
BIT_END = None
HEX_START = '0x'
HEX_END = ''
BYTE_START = "b'"
BYTE_END = "'"
class BigQuery.Tokenizer(sqlglot.tokens.Tokenizer):
243    class Tokenizer(tokens.Tokenizer):
244        QUOTES = ["'", '"', '"""', "'''"]
245        COMMENTS = ["--", "#", ("/*", "*/")]
246        IDENTIFIERS = ["`"]
247        STRING_ESCAPES = ["\\"]
248
249        HEX_STRINGS = [("0x", ""), ("0X", "")]
250
251        BYTE_STRINGS = [
252            (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("b", "B")
253        ]
254
255        RAW_STRINGS = [
256            (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("r", "R")
257        ]
258
259        KEYWORDS = {
260            **tokens.Tokenizer.KEYWORDS,
261            "ANY TYPE": TokenType.VARIANT,
262            "BEGIN": TokenType.COMMAND,
263            "BEGIN TRANSACTION": TokenType.BEGIN,
264            "CURRENT_DATETIME": TokenType.CURRENT_DATETIME,
265            "BYTES": TokenType.BINARY,
266            "DECLARE": TokenType.COMMAND,
267            "FLOAT64": TokenType.DOUBLE,
268            "INT64": TokenType.BIGINT,
269            "RECORD": TokenType.STRUCT,
270            "TIMESTAMP": TokenType.TIMESTAMPTZ,
271            "NOT DETERMINISTIC": TokenType.VOLATILE,
272            "UNKNOWN": TokenType.NULL,
273            "FOR SYSTEM_TIME": TokenType.TIMESTAMP_SNAPSHOT,
274        }
275        KEYWORDS.pop("DIV")
QUOTES = ["'", '"', '"""', "'''"]
COMMENTS = ['--', '#', ('/*', '*/')]
IDENTIFIERS = ['`']
STRING_ESCAPES = ['\\']
HEX_STRINGS = [('0x', ''), ('0X', '')]
BYTE_STRINGS = [("b'", "'"), ("B'", "'"), ('b"', '"'), ('B"', '"'), ('b"""', '"""'), ('B"""', '"""'), ("b'''", "'''"), ("B'''", "'''")]
RAW_STRINGS = [("r'", "'"), ("R'", "'"), ('r"', '"'), ('R"', '"'), ('r"""', '"""'), ('R"""', '"""'), ("r'''", "'''"), ("R'''", "'''")]
KEYWORDS = {'{%': <TokenType.BLOCK_START: 'BLOCK_START'>, '{%+': <TokenType.BLOCK_START: 'BLOCK_START'>, '{%-': <TokenType.BLOCK_START: 'BLOCK_START'>, '%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '+%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '-%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '{{+': <TokenType.BLOCK_START: 'BLOCK_START'>, '{{-': <TokenType.BLOCK_START: 'BLOCK_START'>, '+}}': <TokenType.BLOCK_END: 'BLOCK_END'>, '-}}': <TokenType.BLOCK_END: 'BLOCK_END'>, '/*+': <TokenType.HINT: 'HINT'>, '==': <TokenType.EQ: 'EQ'>, '::': <TokenType.DCOLON: 'DCOLON'>, '||': <TokenType.DPIPE: 'DPIPE'>, '>=': <TokenType.GTE: 'GTE'>, '<=': <TokenType.LTE: 'LTE'>, '<>': <TokenType.NEQ: 'NEQ'>, '!=': <TokenType.NEQ: 'NEQ'>, '<=>': <TokenType.NULLSAFE_EQ: 'NULLSAFE_EQ'>, '->': <TokenType.ARROW: 'ARROW'>, '->>': <TokenType.DARROW: 'DARROW'>, '=>': <TokenType.FARROW: 'FARROW'>, '#>': <TokenType.HASH_ARROW: 'HASH_ARROW'>, '#>>': <TokenType.DHASH_ARROW: 'DHASH_ARROW'>, '<->': <TokenType.LR_ARROW: 'LR_ARROW'>, '&&': <TokenType.DAMP: 'DAMP'>, '??': <TokenType.DQMARK: 'DQMARK'>, 'ALL': <TokenType.ALL: 'ALL'>, 'ALWAYS': <TokenType.ALWAYS: 'ALWAYS'>, 'AND': <TokenType.AND: 'AND'>, 'ANTI': <TokenType.ANTI: 'ANTI'>, 'ANY': <TokenType.ANY: 'ANY'>, 'ASC': <TokenType.ASC: 'ASC'>, 'AS': <TokenType.ALIAS: 'ALIAS'>, 'ASOF': <TokenType.ASOF: 'ASOF'>, 'AUTOINCREMENT': <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, 'AUTO_INCREMENT': <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, 'BEGIN': <TokenType.COMMAND: 'COMMAND'>, 'BETWEEN': <TokenType.BETWEEN: 'BETWEEN'>, 'CACHE': <TokenType.CACHE: 'CACHE'>, 'UNCACHE': <TokenType.UNCACHE: 'UNCACHE'>, 'CASE': <TokenType.CASE: 'CASE'>, 'CHARACTER SET': <TokenType.CHARACTER_SET: 'CHARACTER_SET'>, 'CLUSTER BY': <TokenType.CLUSTER_BY: 'CLUSTER_BY'>, 'COLLATE': <TokenType.COLLATE: 'COLLATE'>, 'COLUMN': <TokenType.COLUMN: 'COLUMN'>, 'COMMIT': <TokenType.COMMIT: 'COMMIT'>, 'CONNECT BY': <TokenType.CONNECT_BY: 'CONNECT_BY'>, 'CONSTRAINT': <TokenType.CONSTRAINT: 'CONSTRAINT'>, 'CREATE': <TokenType.CREATE: 'CREATE'>, 'CROSS': <TokenType.CROSS: 'CROSS'>, 'CUBE': <TokenType.CUBE: 'CUBE'>, 'CURRENT_DATE': <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, 'CURRENT_TIME': <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, 'CURRENT_TIMESTAMP': <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, 'CURRENT_USER': <TokenType.CURRENT_USER: 'CURRENT_USER'>, 'DATABASE': <TokenType.DATABASE: 'DATABASE'>, 'DEFAULT': <TokenType.DEFAULT: 'DEFAULT'>, 'DELETE': <TokenType.DELETE: 'DELETE'>, 'DESC': <TokenType.DESC: 'DESC'>, 'DESCRIBE': <TokenType.DESCRIBE: 'DESCRIBE'>, 'DISTINCT': <TokenType.DISTINCT: 'DISTINCT'>, 'DISTRIBUTE BY': <TokenType.DISTRIBUTE_BY: 'DISTRIBUTE_BY'>, 'DROP': <TokenType.DROP: 'DROP'>, 'ELSE': <TokenType.ELSE: 'ELSE'>, 'END': <TokenType.END: 'END'>, 'ESCAPE': <TokenType.ESCAPE: 'ESCAPE'>, 'EXCEPT': <TokenType.EXCEPT: 'EXCEPT'>, 'EXECUTE': <TokenType.EXECUTE: 'EXECUTE'>, 'EXISTS': <TokenType.EXISTS: 'EXISTS'>, 'FALSE': <TokenType.FALSE: 'FALSE'>, 'FETCH': <TokenType.FETCH: 'FETCH'>, 'FILTER': <TokenType.FILTER: 'FILTER'>, 'FIRST': <TokenType.FIRST: 'FIRST'>, 'FULL': <TokenType.FULL: 'FULL'>, 'FUNCTION': <TokenType.FUNCTION: 'FUNCTION'>, 'FOR': <TokenType.FOR: 'FOR'>, 'FOREIGN KEY': <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, 'FORMAT': <TokenType.FORMAT: 'FORMAT'>, 'FROM': <TokenType.FROM: 'FROM'>, 'GEOGRAPHY': <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, 'GEOMETRY': <TokenType.GEOMETRY: 'GEOMETRY'>, 'GLOB': <TokenType.GLOB: 'GLOB'>, 'GROUP BY': <TokenType.GROUP_BY: 'GROUP_BY'>, 'GROUPING SETS': <TokenType.GROUPING_SETS: 'GROUPING_SETS'>, 'HAVING': <TokenType.HAVING: 'HAVING'>, 'ILIKE': <TokenType.ILIKE: 'ILIKE'>, 'IN': <TokenType.IN: 'IN'>, 'INDEX': <TokenType.INDEX: 'INDEX'>, 'INET': <TokenType.INET: 'INET'>, 'INNER': <TokenType.INNER: 'INNER'>, 'INSERT': <TokenType.INSERT: 'INSERT'>, 'INTERVAL': <TokenType.INTERVAL: 'INTERVAL'>, 'INTERSECT': <TokenType.INTERSECT: 'INTERSECT'>, 'INTO': <TokenType.INTO: 'INTO'>, 'IS': <TokenType.IS: 'IS'>, 'ISNULL': <TokenType.ISNULL: 'ISNULL'>, 'JOIN': <TokenType.JOIN: 'JOIN'>, 'KEEP': <TokenType.KEEP: 'KEEP'>, 'LATERAL': <TokenType.LATERAL: 'LATERAL'>, 'LEFT': <TokenType.LEFT: 'LEFT'>, 'LIKE': <TokenType.LIKE: 'LIKE'>, 'LIMIT': <TokenType.LIMIT: 'LIMIT'>, 'LOAD': <TokenType.LOAD: 'LOAD'>, 'LOCK': <TokenType.LOCK: 'LOCK'>, 'MERGE': <TokenType.MERGE: 'MERGE'>, 'NATURAL': <TokenType.NATURAL: 'NATURAL'>, 'NEXT': <TokenType.NEXT: 'NEXT'>, 'NOT': <TokenType.NOT: 'NOT'>, 'NOTNULL': <TokenType.NOTNULL: 'NOTNULL'>, 'NULL': <TokenType.NULL: 'NULL'>, 'OBJECT': <TokenType.OBJECT: 'OBJECT'>, 'OFFSET': <TokenType.OFFSET: 'OFFSET'>, 'ON': <TokenType.ON: 'ON'>, 'OR': <TokenType.OR: 'OR'>, 'XOR': <TokenType.XOR: 'XOR'>, 'ORDER BY': <TokenType.ORDER_BY: 'ORDER_BY'>, 'ORDINALITY': <TokenType.ORDINALITY: 'ORDINALITY'>, 'OUTER': <TokenType.OUTER: 'OUTER'>, 'OVER': <TokenType.OVER: 'OVER'>, 'OVERLAPS': <TokenType.OVERLAPS: 'OVERLAPS'>, 'OVERWRITE': <TokenType.OVERWRITE: 'OVERWRITE'>, 'PARTITION': <TokenType.PARTITION: 'PARTITION'>, 'PARTITION BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PARTITIONED BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PARTITIONED_BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PERCENT': <TokenType.PERCENT: 'PERCENT'>, 'PIVOT': <TokenType.PIVOT: 'PIVOT'>, 'PRAGMA': <TokenType.PRAGMA: 'PRAGMA'>, 'PRIMARY KEY': <TokenType.PRIMARY_KEY: 'PRIMARY_KEY'>, 'PROCEDURE': <TokenType.PROCEDURE: 'PROCEDURE'>, 'QUALIFY': <TokenType.QUALIFY: 'QUALIFY'>, 'RANGE': <TokenType.RANGE: 'RANGE'>, 'RECURSIVE': <TokenType.RECURSIVE: 'RECURSIVE'>, 'REGEXP': <TokenType.RLIKE: 'RLIKE'>, 'REPLACE': <TokenType.REPLACE: 'REPLACE'>, 'RETURNING': <TokenType.RETURNING: 'RETURNING'>, 'REFERENCES': <TokenType.REFERENCES: 'REFERENCES'>, 'RIGHT': <TokenType.RIGHT: 'RIGHT'>, 'RLIKE': <TokenType.RLIKE: 'RLIKE'>, 'ROLLBACK': <TokenType.ROLLBACK: 'ROLLBACK'>, 'ROLLUP': <TokenType.ROLLUP: 'ROLLUP'>, 'ROW': <TokenType.ROW: 'ROW'>, 'ROWS': <TokenType.ROWS: 'ROWS'>, 'SCHEMA': <TokenType.SCHEMA: 'SCHEMA'>, 'SELECT': <TokenType.SELECT: 'SELECT'>, 'SEMI': <TokenType.SEMI: 'SEMI'>, 'SET': <TokenType.SET: 'SET'>, 'SETTINGS': <TokenType.SETTINGS: 'SETTINGS'>, 'SHOW': <TokenType.SHOW: 'SHOW'>, 'SIMILAR TO': <TokenType.SIMILAR_TO: 'SIMILAR_TO'>, 'SOME': <TokenType.SOME: 'SOME'>, 'SORT BY': <TokenType.SORT_BY: 'SORT_BY'>, 'START WITH': <TokenType.START_WITH: 'START_WITH'>, 'TABLE': <TokenType.TABLE: 'TABLE'>, 'TABLESAMPLE': <TokenType.TABLE_SAMPLE: 'TABLE_SAMPLE'>, 'TEMP': <TokenType.TEMPORARY: 'TEMPORARY'>, 'TEMPORARY': <TokenType.TEMPORARY: 'TEMPORARY'>, 'THEN': <TokenType.THEN: 'THEN'>, 'TRUE': <TokenType.TRUE: 'TRUE'>, 'UNION': <TokenType.UNION: 'UNION'>, 'UNKNOWN': <TokenType.NULL: 'NULL'>, 'UNNEST': <TokenType.UNNEST: 'UNNEST'>, 'UNPIVOT': <TokenType.UNPIVOT: 'UNPIVOT'>, 'UPDATE': <TokenType.UPDATE: 'UPDATE'>, 'USE': <TokenType.USE: 'USE'>, 'USING': <TokenType.USING: 'USING'>, 'UUID': <TokenType.UUID: 'UUID'>, 'VALUES': <TokenType.VALUES: 'VALUES'>, 'VIEW': <TokenType.VIEW: 'VIEW'>, 'VOLATILE': <TokenType.VOLATILE: 'VOLATILE'>, 'WHEN': <TokenType.WHEN: 'WHEN'>, 'WHERE': <TokenType.WHERE: 'WHERE'>, 'WINDOW': <TokenType.WINDOW: 'WINDOW'>, 'WITH': <TokenType.WITH: 'WITH'>, 'APPLY': <TokenType.APPLY: 'APPLY'>, 'ARRAY': <TokenType.ARRAY: 'ARRAY'>, 'BIT': <TokenType.BIT: 'BIT'>, 'BOOL': <TokenType.BOOLEAN: 'BOOLEAN'>, 'BOOLEAN': <TokenType.BOOLEAN: 'BOOLEAN'>, 'BYTE': <TokenType.TINYINT: 'TINYINT'>, 'MEDIUMINT': <TokenType.MEDIUMINT: 'MEDIUMINT'>, 'TINYINT': <TokenType.TINYINT: 'TINYINT'>, 'SHORT': <TokenType.SMALLINT: 'SMALLINT'>, 'SMALLINT': <TokenType.SMALLINT: 'SMALLINT'>, 'INT128': <TokenType.INT128: 'INT128'>, 'INT2': <TokenType.SMALLINT: 'SMALLINT'>, 'INTEGER': <TokenType.INT: 'INT'>, 'INT': <TokenType.INT: 'INT'>, 'INT4': <TokenType.INT: 'INT'>, 'LONG': <TokenType.BIGINT: 'BIGINT'>, 'BIGINT': <TokenType.BIGINT: 'BIGINT'>, 'INT8': <TokenType.BIGINT: 'BIGINT'>, 'DEC': <TokenType.DECIMAL: 'DECIMAL'>, 'DECIMAL': <TokenType.DECIMAL: 'DECIMAL'>, 'BIGDECIMAL': <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, 'BIGNUMERIC': <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, 'MAP': <TokenType.MAP: 'MAP'>, 'NULLABLE': <TokenType.NULLABLE: 'NULLABLE'>, 'NUMBER': <TokenType.DECIMAL: 'DECIMAL'>, 'NUMERIC': <TokenType.DECIMAL: 'DECIMAL'>, 'FIXED': <TokenType.DECIMAL: 'DECIMAL'>, 'REAL': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT4': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT8': <TokenType.DOUBLE: 'DOUBLE'>, 'DOUBLE': <TokenType.DOUBLE: 'DOUBLE'>, 'DOUBLE PRECISION': <TokenType.DOUBLE: 'DOUBLE'>, 'JSON': <TokenType.JSON: 'JSON'>, 'CHAR': <TokenType.CHAR: 'CHAR'>, 'CHARACTER': <TokenType.CHAR: 'CHAR'>, 'NCHAR': <TokenType.NCHAR: 'NCHAR'>, 'VARCHAR': <TokenType.VARCHAR: 'VARCHAR'>, 'VARCHAR2': <TokenType.VARCHAR: 'VARCHAR'>, 'NVARCHAR': <TokenType.NVARCHAR: 'NVARCHAR'>, 'NVARCHAR2': <TokenType.NVARCHAR: 'NVARCHAR'>, 'STR': <TokenType.TEXT: 'TEXT'>, 'STRING': <TokenType.TEXT: 'TEXT'>, 'TEXT': <TokenType.TEXT: 'TEXT'>, 'LONGTEXT': <TokenType.LONGTEXT: 'LONGTEXT'>, 'MEDIUMTEXT': <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, 'TINYTEXT': <TokenType.TINYTEXT: 'TINYTEXT'>, 'CLOB': <TokenType.TEXT: 'TEXT'>, 'LONGVARCHAR': <TokenType.TEXT: 'TEXT'>, 'BINARY': <TokenType.BINARY: 'BINARY'>, 'BLOB': <TokenType.VARBINARY: 'VARBINARY'>, 'LONGBLOB': <TokenType.LONGBLOB: 'LONGBLOB'>, 'MEDIUMBLOB': <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, 'TINYBLOB': <TokenType.TINYBLOB: 'TINYBLOB'>, 'BYTEA': <TokenType.VARBINARY: 'VARBINARY'>, 'VARBINARY': <TokenType.VARBINARY: 'VARBINARY'>, 'TIME': <TokenType.TIME: 'TIME'>, 'TIMETZ': <TokenType.TIMETZ: 'TIMETZ'>, 'TIMESTAMP': <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, 'TIMESTAMPTZ': <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, 'TIMESTAMPLTZ': <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, 'DATE': <TokenType.DATE: 'DATE'>, 'DATETIME': <TokenType.DATETIME: 'DATETIME'>, 'INT4RANGE': <TokenType.INT4RANGE: 'INT4RANGE'>, 'INT4MULTIRANGE': <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, 'INT8RANGE': <TokenType.INT8RANGE: 'INT8RANGE'>, 'INT8MULTIRANGE': <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, 'NUMRANGE': <TokenType.NUMRANGE: 'NUMRANGE'>, 'NUMMULTIRANGE': <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, 'TSRANGE': <TokenType.TSRANGE: 'TSRANGE'>, 'TSMULTIRANGE': <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, 'TSTZRANGE': <TokenType.TSTZRANGE: 'TSTZRANGE'>, 'TSTZMULTIRANGE': <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, 'DATERANGE': <TokenType.DATERANGE: 'DATERANGE'>, 'DATEMULTIRANGE': <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, 'UNIQUE': <TokenType.UNIQUE: 'UNIQUE'>, 'STRUCT': <TokenType.STRUCT: 'STRUCT'>, 'VARIANT': <TokenType.VARIANT: 'VARIANT'>, 'ALTER': <TokenType.ALTER: 'ALTER'>, 'ANALYZE': <TokenType.COMMAND: 'COMMAND'>, 'CALL': <TokenType.COMMAND: 'COMMAND'>, 'COMMENT': <TokenType.COMMENT: 'COMMENT'>, 'COPY': <TokenType.COMMAND: 'COMMAND'>, 'EXPLAIN': <TokenType.COMMAND: 'COMMAND'>, 'GRANT': <TokenType.COMMAND: 'COMMAND'>, 'OPTIMIZE': <TokenType.COMMAND: 'COMMAND'>, 'PREPARE': <TokenType.COMMAND: 'COMMAND'>, 'TRUNCATE': <TokenType.COMMAND: 'COMMAND'>, 'VACUUM': <TokenType.COMMAND: 'COMMAND'>, 'USER-DEFINED': <TokenType.USERDEFINED: 'USERDEFINED'>, 'FOR VERSION': <TokenType.VERSION_SNAPSHOT: 'VERSION_SNAPSHOT'>, 'FOR TIMESTAMP': <TokenType.TIMESTAMP_SNAPSHOT: 'TIMESTAMP_SNAPSHOT'>, 'ANY TYPE': <TokenType.VARIANT: 'VARIANT'>, 'BEGIN TRANSACTION': <TokenType.BEGIN: 'BEGIN'>, 'CURRENT_DATETIME': <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, 'BYTES': <TokenType.BINARY: 'BINARY'>, 'DECLARE': <TokenType.COMMAND: 'COMMAND'>, 'FLOAT64': <TokenType.DOUBLE: 'DOUBLE'>, 'INT64': <TokenType.BIGINT: 'BIGINT'>, 'RECORD': <TokenType.STRUCT: 'STRUCT'>, 'NOT DETERMINISTIC': <TokenType.VOLATILE: 'VOLATILE'>, 'FOR SYSTEM_TIME': <TokenType.TIMESTAMP_SNAPSHOT: 'TIMESTAMP_SNAPSHOT'>}
class BigQuery.Parser(sqlglot.parser.Parser):
277    class Parser(parser.Parser):
278        PREFIXED_PIVOT_COLUMNS = True
279
280        LOG_BASE_FIRST = False
281        LOG_DEFAULTS_TO_LN = True
282
283        FUNCTIONS = {
284            **parser.Parser.FUNCTIONS,
285            "DATE": _parse_date,
286            "DATE_ADD": parse_date_delta_with_interval(exp.DateAdd),
287            "DATE_SUB": parse_date_delta_with_interval(exp.DateSub),
288            "DATE_TRUNC": lambda args: exp.DateTrunc(
289                unit=exp.Literal.string(str(seq_get(args, 1))),
290                this=seq_get(args, 0),
291            ),
292            "DATETIME_ADD": parse_date_delta_with_interval(exp.DatetimeAdd),
293            "DATETIME_SUB": parse_date_delta_with_interval(exp.DatetimeSub),
294            "DIV": binary_from_function(exp.IntDiv),
295            "GENERATE_ARRAY": exp.GenerateSeries.from_arg_list,
296            "MD5": exp.MD5Digest.from_arg_list,
297            "TO_HEX": _parse_to_hex,
298            "PARSE_DATE": lambda args: format_time_lambda(exp.StrToDate, "bigquery")(
299                [seq_get(args, 1), seq_get(args, 0)]
300            ),
301            "PARSE_TIMESTAMP": _parse_timestamp,
302            "REGEXP_CONTAINS": exp.RegexpLike.from_arg_list,
303            "REGEXP_EXTRACT": lambda args: exp.RegexpExtract(
304                this=seq_get(args, 0),
305                expression=seq_get(args, 1),
306                position=seq_get(args, 2),
307                occurrence=seq_get(args, 3),
308                group=exp.Literal.number(1)
309                if re.compile(str(seq_get(args, 1))).groups == 1
310                else None,
311            ),
312            "SHA256": lambda args: exp.SHA2(this=seq_get(args, 0), length=exp.Literal.number(256)),
313            "SHA512": lambda args: exp.SHA2(this=seq_get(args, 0), length=exp.Literal.number(512)),
314            "SPLIT": lambda args: exp.Split(
315                # https://cloud.google.com/bigquery/docs/reference/standard-sql/string_functions#split
316                this=seq_get(args, 0),
317                expression=seq_get(args, 1) or exp.Literal.string(","),
318            ),
319            "TIME_ADD": parse_date_delta_with_interval(exp.TimeAdd),
320            "TIME_SUB": parse_date_delta_with_interval(exp.TimeSub),
321            "TIMESTAMP_ADD": parse_date_delta_with_interval(exp.TimestampAdd),
322            "TIMESTAMP_SUB": parse_date_delta_with_interval(exp.TimestampSub),
323            "TO_JSON_STRING": exp.JSONFormat.from_arg_list,
324        }
325
326        FUNCTION_PARSERS = {
327            **parser.Parser.FUNCTION_PARSERS,
328            "ARRAY": lambda self: self.expression(exp.Array, expressions=[self._parse_statement()]),
329        }
330        FUNCTION_PARSERS.pop("TRIM")
331
332        NO_PAREN_FUNCTIONS = {
333            **parser.Parser.NO_PAREN_FUNCTIONS,
334            TokenType.CURRENT_DATETIME: exp.CurrentDatetime,
335        }
336
337        NESTED_TYPE_TOKENS = {
338            *parser.Parser.NESTED_TYPE_TOKENS,
339            TokenType.TABLE,
340        }
341
342        ID_VAR_TOKENS = {
343            *parser.Parser.ID_VAR_TOKENS,
344            TokenType.VALUES,
345        }
346
347        PROPERTY_PARSERS = {
348            **parser.Parser.PROPERTY_PARSERS,
349            "NOT DETERMINISTIC": lambda self: self.expression(
350                exp.StabilityProperty, this=exp.Literal.string("VOLATILE")
351            ),
352            "OPTIONS": lambda self: self._parse_with_property(),
353        }
354
355        CONSTRAINT_PARSERS = {
356            **parser.Parser.CONSTRAINT_PARSERS,
357            "OPTIONS": lambda self: exp.Properties(expressions=self._parse_with_property()),
358        }
359
360        def _parse_table_part(self, schema: bool = False) -> t.Optional[exp.Expression]:
361            this = super()._parse_table_part(schema=schema) or self._parse_number()
362
363            # https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#table_names
364            if isinstance(this, exp.Identifier):
365                table_name = this.name
366                while self._match(TokenType.DASH, advance=False) and self._next:
367                    self._advance(2)
368                    table_name += f"-{self._prev.text}"
369
370                this = exp.Identifier(this=table_name, quoted=this.args.get("quoted"))
371            elif isinstance(this, exp.Literal):
372                table_name = this.name
373
374                if (
375                    self._curr
376                    and self._prev.end == self._curr.start - 1
377                    and self._parse_var(any_token=True)
378                ):
379                    table_name += self._prev.text
380
381                this = exp.Identifier(this=table_name, quoted=True)
382
383            return this
384
385        def _parse_table_parts(self, schema: bool = False) -> exp.Table:
386            table = super()._parse_table_parts(schema=schema)
387            if isinstance(table.this, exp.Identifier) and "." in table.name:
388                catalog, db, this, *rest = (
389                    t.cast(t.Optional[exp.Expression], exp.to_identifier(x))
390                    for x in split_num_words(table.name, ".", 3)
391                )
392
393                if rest and this:
394                    this = exp.Dot.build(t.cast(t.List[exp.Expression], [this, *rest]))
395
396                table = exp.Table(this=this, db=db, catalog=catalog)
397
398            return table
399
400        def _parse_json_object(self) -> exp.JSONObject:
401            json_object = super()._parse_json_object()
402            array_kv_pair = seq_get(json_object.expressions, 0)
403
404            # Converts BQ's "signature 2" of JSON_OBJECT into SQLGlot's canonical representation
405            # https://cloud.google.com/bigquery/docs/reference/standard-sql/json_functions#json_object_signature2
406            if (
407                array_kv_pair
408                and isinstance(array_kv_pair.this, exp.Array)
409                and isinstance(array_kv_pair.expression, exp.Array)
410            ):
411                keys = array_kv_pair.this.expressions
412                values = array_kv_pair.expression.expressions
413
414                json_object.set(
415                    "expressions",
416                    [exp.JSONKeyValue(this=k, expression=v) for k, v in zip(keys, values)],
417                )
418
419            return json_object

Parser consumes a list of tokens produced by the Tokenizer and produces a parsed syntax tree.

Arguments:
  • error_level: The desired error level. Default: ErrorLevel.IMMEDIATE
  • error_message_context: Determines the amount of context to capture from a query string when displaying the error message (in number of characters). Default: 100
  • max_errors: Maximum number of error messages to include in a raised ParseError. This is only relevant if error_level is ErrorLevel.RAISE. Default: 3
PREFIXED_PIVOT_COLUMNS = True
LOG_BASE_FIRST = False
LOG_DEFAULTS_TO_LN = True
FUNCTIONS = {'ABS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Abs'>>, 'ANY_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.AnyValue'>>, 'APPROX_DISTINCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxDistinct'>>, 'APPROX_COUNT_DISTINCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxDistinct'>>, 'APPROX_QUANTILE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxQuantile'>>, 'ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Array'>>, 'ARRAY_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAgg'>>, 'ARRAY_ALL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAll'>>, 'ARRAY_ANY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAny'>>, 'ARRAY_CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayConcat'>>, 'ARRAY_CAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayConcat'>>, 'ARRAY_CONTAINS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayContains'>>, 'FILTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayFilter'>>, 'ARRAY_FILTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayFilter'>>, 'ARRAY_JOIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayJoin'>>, 'ARRAY_SIZE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySize'>>, 'ARRAY_SORT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySort'>>, 'ARRAY_SUM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySum'>>, 'ARRAY_UNION_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayUnionAgg'>>, 'AVG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Avg'>>, 'CASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Case'>>, 'CAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Cast'>>, 'CAST_TO_STR_TYPE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CastToStrType'>>, 'CEIL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ceil'>>, 'CEILING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ceil'>>, 'COALESCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Coalesce'>>, 'IFNULL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Coalesce'>>, 'NVL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Coalesce'>>, 'CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Concat'>>, 'CONCAT_WS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ConcatWs'>>, 'COUNT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Count'>>, 'COUNT_IF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CountIf'>>, 'CURRENT_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentDate'>>, 'CURRENT_DATETIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentDatetime'>>, 'CURRENT_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentTime'>>, 'CURRENT_TIMESTAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentTimestamp'>>, 'CURRENT_USER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentUser'>>, 'DATE': <function _parse_date>, 'DATE_ADD': <function parse_date_delta_with_interval.<locals>.func>, 'DATEDIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateDiff'>>, 'DATE_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateDiff'>>, 'DATEFROMPARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateFromParts'>>, 'DATE_STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateStrToDate'>>, 'DATE_SUB': <function parse_date_delta_with_interval.<locals>.func>, 'DATE_TO_DATE_STR': <function Parser.<lambda>>, 'DATE_TO_DI': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateToDi'>>, 'DATE_TRUNC': <function BigQuery.Parser.<lambda>>, 'DATETIME_ADD': <function parse_date_delta_with_interval.<locals>.func>, 'DATETIME_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeDiff'>>, 'DATETIME_SUB': <function parse_date_delta_with_interval.<locals>.func>, 'DATETIME_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeTrunc'>>, 'DAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Day'>>, 'DAY_OF_MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfMonth'>>, 'DAYOFMONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfMonth'>>, 'DAY_OF_WEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeek'>>, 'DAYOFWEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeek'>>, 'DAY_OF_YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfYear'>>, 'DAYOFYEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfYear'>>, 'DECODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Decode'>>, 'DI_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DiToDate'>>, 'ENCODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Encode'>>, 'EXP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Exp'>>, 'EXPLODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Explode'>>, 'EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Extract'>>, 'FIRST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.First'>>, 'FLOOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Floor'>>, 'FROM_BASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FromBase'>>, 'FROM_BASE64': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FromBase64'>>, 'GENERATE_SERIES': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GenerateSeries'>>, 'GREATEST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Greatest'>>, 'GROUP_CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GroupConcat'>>, 'HEX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Hex'>>, 'HLL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Hll'>>, 'IF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.If'>>, 'INITCAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Initcap'>>, 'IS_NAN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsNan'>>, 'ISNAN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsNan'>>, 'J_S_O_N_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArray'>>, 'J_S_O_N_ARRAY_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArrayAgg'>>, 'JSON_ARRAY_CONTAINS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArrayContains'>>, 'JSONB_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBExtract'>>, 'JSONB_EXTRACT_SCALAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBExtractScalar'>>, 'JSON_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONExtract'>>, 'JSON_EXTRACT_SCALAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONExtractScalar'>>, 'JSON_FORMAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONFormat'>>, 'J_S_O_N_OBJECT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONObject'>>, 'J_S_O_N_TABLE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONTable'>>, 'LAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Last'>>, 'LAST_DATE_OF_MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LastDateOfMonth'>>, 'LEAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Least'>>, 'LEFT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Left'>>, 'LENGTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Length'>>, 'LEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Length'>>, 'LEVENSHTEIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Levenshtein'>>, 'LN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ln'>>, 'LOG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Log'>>, 'LOG10': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Log10'>>, 'LOG2': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Log2'>>, 'LOGICAL_AND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'BOOL_AND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'BOOLAND_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'LOGICAL_OR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'BOOL_OR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'BOOLOR_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'LOWER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lower'>>, 'LCASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lower'>>, 'MD5': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MD5Digest'>>, 'MD5_DIGEST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MD5Digest'>>, 'MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Map'>>, 'MAP_FROM_ENTRIES': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MapFromEntries'>>, 'MATCH_AGAINST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MatchAgainst'>>, 'MAX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Max'>>, 'MIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Min'>>, 'MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Month'>>, 'MONTHS_BETWEEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MonthsBetween'>>, 'NEXT_VALUE_FOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.NextValueFor'>>, 'NUMBER_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.NumberToStr'>>, 'NVL2': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Nvl2'>>, 'OPEN_J_S_O_N': <bound method Func.from_arg_list of <class 'sqlglot.expressions.OpenJSON'>>, 'PARAMETERIZED_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ParameterizedAgg'>>, 'PERCENTILE_CONT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PercentileCont'>>, 'PERCENTILE_DISC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PercentileDisc'>>, 'POSEXPLODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Posexplode'>>, 'POWER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Pow'>>, 'POW': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Pow'>>, 'QUANTILE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Quantile'>>, 'RANGE_N': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RangeN'>>, 'READ_CSV': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ReadCSV'>>, 'REDUCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Reduce'>>, 'REGEXP_EXTRACT': <function BigQuery.Parser.<lambda>>, 'REGEXP_I_LIKE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpILike'>>, 'REGEXP_LIKE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpLike'>>, 'REGEXP_REPLACE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpReplace'>>, 'REGEXP_SPLIT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpSplit'>>, 'REPEAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Repeat'>>, 'RIGHT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Right'>>, 'ROUND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Round'>>, 'ROW_NUMBER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RowNumber'>>, 'SHA': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA'>>, 'SHA1': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA'>>, 'SHA2': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA2'>>, 'SAFE_CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SafeConcat'>>, 'SAFE_DIVIDE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SafeDivide'>>, 'SET_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SetAgg'>>, 'SORT_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SortArray'>>, 'SPLIT': <function BigQuery.Parser.<lambda>>, 'SQRT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sqrt'>>, 'STANDARD_HASH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StandardHash'>>, 'STAR_MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StarMap'>>, 'STARTS_WITH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StartsWith'>>, 'STARTSWITH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StartsWith'>>, 'STDDEV': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stddev'>>, 'STDDEV_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StddevPop'>>, 'STDDEV_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StddevSamp'>>, 'STR_POSITION': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrPosition'>>, 'STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToDate'>>, 'STR_TO_MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToMap'>>, 'STR_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToTime'>>, 'STR_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToUnix'>>, 'STRUCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Struct'>>, 'STRUCT_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StructExtract'>>, 'STUFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stuff'>>, 'INSERT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stuff'>>, 'SUBSTRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Substring'>>, 'SUM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sum'>>, 'TIME_ADD': <function parse_date_delta_with_interval.<locals>.func>, 'TIME_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeDiff'>>, 'TIME_STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToDate'>>, 'TIME_STR_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToTime'>>, 'TIME_STR_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToUnix'>>, 'TIME_SUB': <function parse_date_delta_with_interval.<locals>.func>, 'TIME_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeToStr'>>, 'TIME_TO_TIME_STR': <function Parser.<lambda>>, 'TIME_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeToUnix'>>, 'TIME_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeTrunc'>>, 'TIMESTAMP_ADD': <function parse_date_delta_with_interval.<locals>.func>, 'TIMESTAMP_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampDiff'>>, 'TIMESTAMP_SUB': <function parse_date_delta_with_interval.<locals>.func>, 'TIMESTAMP_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampTrunc'>>, 'TO_BASE64': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToBase64'>>, 'TO_CHAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToChar'>>, 'TRANSFORM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Transform'>>, 'TRIM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Trim'>>, 'TRY_CAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TryCast'>>, 'TS_OR_DI_TO_DI': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDiToDi'>>, 'TS_OR_DS_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsAdd'>>, 'TS_OR_DS_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsToDate'>>, 'TS_OR_DS_TO_DATE_STR': <function Parser.<lambda>>, 'UNHEX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Unhex'>>, 'UNIX_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToStr'>>, 'UNIX_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToTime'>>, 'UNIX_TO_TIME_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToTimeStr'>>, 'UPPER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Upper'>>, 'UCASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Upper'>>, 'VAR_MAP': <function parse_var_map>, 'VARIANCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VARIANCE_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VAR_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VARIANCE_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.VariancePop'>>, 'VAR_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.VariancePop'>>, 'WEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Week'>>, 'WEEK_OF_YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.WeekOfYear'>>, 'WEEKOFYEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.WeekOfYear'>>, 'WHEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.When'>>, 'X_M_L_TABLE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.XMLTable'>>, 'XOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Xor'>>, 'YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Year'>>, 'GLOB': <function Parser.<lambda>>, 'LIKE': <function parse_like>, 'DIV': <function binary_from_function.<locals>.<lambda>>, 'GENERATE_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GenerateSeries'>>, 'TO_HEX': <function _parse_to_hex>, 'PARSE_DATE': <function BigQuery.Parser.<lambda>>, 'PARSE_TIMESTAMP': <function _parse_timestamp>, 'REGEXP_CONTAINS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpLike'>>, 'SHA256': <function BigQuery.Parser.<lambda>>, 'SHA512': <function BigQuery.Parser.<lambda>>, 'TO_JSON_STRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONFormat'>>}
FUNCTION_PARSERS = {'ANY_VALUE': <function Parser.<lambda>>, 'CAST': <function Parser.<lambda>>, 'CONCAT': <function Parser.<lambda>>, 'CONCAT_WS': <function Parser.<lambda>>, 'CONVERT': <function Parser.<lambda>>, 'DECODE': <function Parser.<lambda>>, 'EXTRACT': <function Parser.<lambda>>, 'JSON_OBJECT': <function Parser.<lambda>>, 'LOG': <function Parser.<lambda>>, 'MATCH': <function Parser.<lambda>>, 'OPENJSON': <function Parser.<lambda>>, 'POSITION': <function Parser.<lambda>>, 'SAFE_CAST': <function Parser.<lambda>>, 'STRING_AGG': <function Parser.<lambda>>, 'SUBSTRING': <function Parser.<lambda>>, 'TRY_CAST': <function Parser.<lambda>>, 'TRY_CONVERT': <function Parser.<lambda>>, 'ARRAY': <function BigQuery.Parser.<lambda>>}
NO_PAREN_FUNCTIONS = {<TokenType.CURRENT_DATE: 'CURRENT_DATE'>: <class 'sqlglot.expressions.CurrentDate'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>: <class 'sqlglot.expressions.CurrentDatetime'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>: <class 'sqlglot.expressions.CurrentTime'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>: <class 'sqlglot.expressions.CurrentTimestamp'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>: <class 'sqlglot.expressions.CurrentUser'>}
NESTED_TYPE_TOKENS = {<TokenType.MAP: 'MAP'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>, <TokenType.NESTED: 'NESTED'>, <TokenType.TABLE: 'TABLE'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.STRUCT: 'STRUCT'>}
ID_VAR_TOKENS = {<TokenType.TEXT: 'TEXT'>, <TokenType.TINYTEXT: 'TINYTEXT'>, <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, <TokenType.SMALLINT: 'SMALLINT'>, <TokenType.DEFAULT: 'DEFAULT'>, <TokenType.PROCEDURE: 'PROCEDURE'>, <TokenType.ROWS: 'ROWS'>, <TokenType.UUID: 'UUID'>, <TokenType.ALL: 'ALL'>, <TokenType.INT4RANGE: 'INT4RANGE'>, <TokenType.FIRST: 'FIRST'>, <TokenType.SETTINGS: 'SETTINGS'>, <TokenType.ANY: 'ANY'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.BEGIN: 'BEGIN'>, <TokenType.DESC: 'DESC'>, <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, <TokenType.INT8RANGE: 'INT8RANGE'>, <TokenType.COLLATE: 'COLLATE'>, <TokenType.VAR: 'VAR'>, <TokenType.VALUES: 'VALUES'>, <TokenType.VIEW: 'VIEW'>, <TokenType.FALSE: 'FALSE'>, <TokenType.ENUM: 'ENUM'>, <TokenType.PRAGMA: 'PRAGMA'>, <TokenType.KEEP: 'KEEP'>, <TokenType.UINT256: 'UINT256'>, <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, <TokenType.SEMI: 'SEMI'>, <TokenType.IS: 'IS'>, <TokenType.OBJECT: 'OBJECT'>, <TokenType.ASC: 'ASC'>, <TokenType.INTERVAL: 'INTERVAL'>, <TokenType.DELETE: 'DELETE'>, <TokenType.VARBINARY: 'VARBINARY'>, <TokenType.NEXT: 'NEXT'>, <TokenType.IPPREFIX: 'IPPREFIX'>, <TokenType.FUNCTION: 'FUNCTION'>, <TokenType.DATERANGE: 'DATERANGE'>, <TokenType.LONGTEXT: 'LONGTEXT'>, <TokenType.JSON: 'JSON'>, <TokenType.COLUMN: 'COLUMN'>, <TokenType.INT: 'INT'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, <TokenType.BIGINT: 'BIGINT'>, <TokenType.SCHEMA: 'SCHEMA'>, <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>, <TokenType.NULL: 'NULL'>, <TokenType.DATETIME64: 'DATETIME64'>, <TokenType.ISNULL: 'ISNULL'>, <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, <TokenType.ANTI: 'ANTI'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, <TokenType.OFFSET: 'OFFSET'>, <TokenType.COMMENT: 'COMMENT'>, <TokenType.UNKNOWN: 'UNKNOWN'>, <TokenType.UMEDIUMINT: 'UMEDIUMINT'>, <TokenType.SET: 'SET'>, <TokenType.GEOMETRY: 'GEOMETRY'>, <TokenType.LEFT: 'LEFT'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>, <TokenType.TINYINT: 'TINYINT'>, <TokenType.CHAR: 'CHAR'>, <TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>, <TokenType.DATETIME: 'DATETIME'>, <TokenType.UNIQUE: 'UNIQUE'>, <TokenType.ESCAPE: 'ESCAPE'>, <TokenType.MEDIUMINT: 'MEDIUMINT'>, <TokenType.TEMPORARY: 'TEMPORARY'>, <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, <TokenType.PIVOT: 'PIVOT'>, <TokenType.FULL: 'FULL'>, <TokenType.MAP: 'MAP'>, <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, <TokenType.UPDATE: 'UPDATE'>, <TokenType.VARCHAR: 'VARCHAR'>, <TokenType.IMAGE: 'IMAGE'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.COMMIT: 'COMMIT'>, <TokenType.NATURAL: 'NATURAL'>, <TokenType.YEAR: 'YEAR'>, <TokenType.RANGE: 'RANGE'>, <TokenType.DOUBLE: 'DOUBLE'>, <TokenType.VARIANT: 'VARIANT'>, <TokenType.DICTIONARY: 'DICTIONARY'>, <TokenType.BIGSERIAL: 'BIGSERIAL'>, <TokenType.STRUCT: 'STRUCT'>, <TokenType.SUPER: 'SUPER'>, <TokenType.INT256: 'INT256'>, <TokenType.JSONB: 'JSONB'>, <TokenType.ENUM8: 'ENUM8'>, <TokenType.FIXEDSTRING: 'FIXEDSTRING'>, <TokenType.WINDOW: 'WINDOW'>, <TokenType.XML: 'XML'>, <TokenType.FORMAT: 'FORMAT'>, <TokenType.MONEY: 'MONEY'>, <TokenType.REFERENCES: 'REFERENCES'>, <TokenType.PERCENT: 'PERCENT'>, <TokenType.USMALLINT: 'USMALLINT'>, <TokenType.CACHE: 'CACHE'>, <TokenType.FILTER: 'FILTER'>, <TokenType.MERGE: 'MERGE'>, <TokenType.VOLATILE: 'VOLATILE'>, <TokenType.TOP: 'TOP'>, <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, <TokenType.DATABASE: 'DATABASE'>, <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, <TokenType.FLOAT: 'FLOAT'>, <TokenType.OVERWRITE: 'OVERWRITE'>, <TokenType.NVARCHAR: 'NVARCHAR'>, <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, <TokenType.ROW: 'ROW'>, <TokenType.PARTITION: 'PARTITION'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.LOAD: 'LOAD'>, <TokenType.INET: 'INET'>, <TokenType.NUMRANGE: 'NUMRANGE'>, <TokenType.UINT128: 'UINT128'>, <TokenType.TRUE: 'TRUE'>, <TokenType.UNIQUEIDENTIFIER: 'UNIQUEIDENTIFIER'>, <TokenType.TIMETZ: 'TIMETZ'>, <TokenType.DESCRIBE: 'DESCRIBE'>, <TokenType.SMALLSERIAL: 'SMALLSERIAL'>, <TokenType.RIGHT: 'RIGHT'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.INT128: 'INT128'>, <TokenType.BIT: 'BIT'>, <TokenType.UTINYINT: 'UTINYINT'>, <TokenType.HSTORE: 'HSTORE'>, <TokenType.TABLE: 'TABLE'>, <TokenType.SMALLMONEY: 'SMALLMONEY'>, <TokenType.TSRANGE: 'TSRANGE'>, <TokenType.NCHAR: 'NCHAR'>, <TokenType.UNPIVOT: 'UNPIVOT'>, <TokenType.TIME: 'TIME'>, <TokenType.DIV: 'DIV'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, <TokenType.ROWVERSION: 'ROWVERSION'>, <TokenType.TINYBLOB: 'TINYBLOB'>, <TokenType.ENUM16: 'ENUM16'>, <TokenType.ORDINALITY: 'ORDINALITY'>, <TokenType.SHOW: 'SHOW'>, <TokenType.END: 'END'>, <TokenType.SOME: 'SOME'>, <TokenType.EXISTS: 'EXISTS'>, <TokenType.BOOLEAN: 'BOOLEAN'>, <TokenType.BINARY: 'BINARY'>, <TokenType.HLLSKETCH: 'HLLSKETCH'>, <TokenType.UINT: 'UINT'>, <TokenType.DECIMAL: 'DECIMAL'>, <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, <TokenType.SERIAL: 'SERIAL'>, <TokenType.DATE: 'DATE'>, <TokenType.INDEX: 'INDEX'>, <TokenType.USERDEFINED: 'USERDEFINED'>, <TokenType.UBIGINT: 'UBIGINT'>, <TokenType.TSTZRANGE: 'TSTZRANGE'>, <TokenType.NESTED: 'NESTED'>, <TokenType.CASE: 'CASE'>, <TokenType.APPLY: 'APPLY'>, <TokenType.LONGBLOB: 'LONGBLOB'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.IPADDRESS: 'IPADDRESS'>, <TokenType.CONSTRAINT: 'CONSTRAINT'>, <TokenType.EXECUTE: 'EXECUTE'>}
PROPERTY_PARSERS = {'ALGORITHM': <function Parser.<lambda>>, 'AUTO_INCREMENT': <function Parser.<lambda>>, 'BLOCKCOMPRESSION': <function Parser.<lambda>>, 'CHARACTER SET': <function Parser.<lambda>>, 'CHECKSUM': <function Parser.<lambda>>, 'CLUSTER BY': <function Parser.<lambda>>, 'CLUSTERED': <function Parser.<lambda>>, 'COLLATE': <function Parser.<lambda>>, 'COMMENT': <function Parser.<lambda>>, 'COPY': <function Parser.<lambda>>, 'DATABLOCKSIZE': <function Parser.<lambda>>, 'DEFINER': <function Parser.<lambda>>, 'DETERMINISTIC': <function Parser.<lambda>>, 'DISTKEY': <function Parser.<lambda>>, 'DISTSTYLE': <function Parser.<lambda>>, 'ENGINE': <function Parser.<lambda>>, 'EXECUTE': <function Parser.<lambda>>, 'EXTERNAL': <function Parser.<lambda>>, 'FALLBACK': <function Parser.<lambda>>, 'FORMAT': <function Parser.<lambda>>, 'FREESPACE': <function Parser.<lambda>>, 'HEAP': <function Parser.<lambda>>, 'IMMUTABLE': <function Parser.<lambda>>, 'JOURNAL': <function Parser.<lambda>>, 'LANGUAGE': <function Parser.<lambda>>, 'LAYOUT': <function Parser.<lambda>>, 'LIFETIME': <function Parser.<lambda>>, 'LIKE': <function Parser.<lambda>>, 'LOCATION': <function Parser.<lambda>>, 'LOCK': <function Parser.<lambda>>, 'LOCKING': <function Parser.<lambda>>, 'LOG': <function Parser.<lambda>>, 'MATERIALIZED': <function Parser.<lambda>>, 'MERGEBLOCKRATIO': <function Parser.<lambda>>, 'MULTISET': <function Parser.<lambda>>, 'NO': <function Parser.<lambda>>, 'ON': <function Parser.<lambda>>, 'ORDER BY': <function Parser.<lambda>>, 'PARTITION BY': <function Parser.<lambda>>, 'PARTITIONED BY': <function Parser.<lambda>>, 'PARTITIONED_BY': <function Parser.<lambda>>, 'PRIMARY KEY': <function Parser.<lambda>>, 'RANGE': <function Parser.<lambda>>, 'RETURNS': <function Parser.<lambda>>, 'ROW': <function Parser.<lambda>>, 'ROW_FORMAT': <function Parser.<lambda>>, 'SET': <function Parser.<lambda>>, 'SETTINGS': <function Parser.<lambda>>, 'SORTKEY': <function Parser.<lambda>>, 'SOURCE': <function Parser.<lambda>>, 'STABLE': <function Parser.<lambda>>, 'STORED': <function Parser.<lambda>>, 'TBLPROPERTIES': <function Parser.<lambda>>, 'TEMP': <function Parser.<lambda>>, 'TEMPORARY': <function Parser.<lambda>>, 'TO': <function Parser.<lambda>>, 'TRANSIENT': <function Parser.<lambda>>, 'TTL': <function Parser.<lambda>>, 'USING': <function Parser.<lambda>>, 'VOLATILE': <function Parser.<lambda>>, 'WITH': <function Parser.<lambda>>, 'NOT DETERMINISTIC': <function BigQuery.Parser.<lambda>>, 'OPTIONS': <function BigQuery.Parser.<lambda>>}
CONSTRAINT_PARSERS = {'AUTOINCREMENT': <function Parser.<lambda>>, 'AUTO_INCREMENT': <function Parser.<lambda>>, 'CASESPECIFIC': <function Parser.<lambda>>, 'CHARACTER SET': <function Parser.<lambda>>, 'CHECK': <function Parser.<lambda>>, 'COLLATE': <function Parser.<lambda>>, 'COMMENT': <function Parser.<lambda>>, 'COMPRESS': <function Parser.<lambda>>, 'CLUSTERED': <function Parser.<lambda>>, 'NONCLUSTERED': <function Parser.<lambda>>, 'DEFAULT': <function Parser.<lambda>>, 'ENCODE': <function Parser.<lambda>>, 'FOREIGN KEY': <function Parser.<lambda>>, 'FORMAT': <function Parser.<lambda>>, 'GENERATED': <function Parser.<lambda>>, 'IDENTITY': <function Parser.<lambda>>, 'INLINE': <function Parser.<lambda>>, 'LIKE': <function Parser.<lambda>>, 'NOT': <function Parser.<lambda>>, 'NULL': <function Parser.<lambda>>, 'ON': <function Parser.<lambda>>, 'PATH': <function Parser.<lambda>>, 'PRIMARY KEY': <function Parser.<lambda>>, 'REFERENCES': <function Parser.<lambda>>, 'TITLE': <function Parser.<lambda>>, 'TTL': <function Parser.<lambda>>, 'UNIQUE': <function Parser.<lambda>>, 'UPPERCASE': <function Parser.<lambda>>, 'WITH': <function Parser.<lambda>>, 'OPTIONS': <function BigQuery.Parser.<lambda>>}
TOKENIZER_CLASS: Type[sqlglot.tokens.Tokenizer] = <class 'BigQuery.Tokenizer'>
UNNEST_COLUMN_ONLY: bool = True
SUPPORTS_USER_DEFINED_TYPES = False
NORMALIZE_FUNCTIONS = False
SHOW_TRIE: Dict = {}
SET_TRIE: Dict = {'GLOBAL': {0: True}, 'LOCAL': {0: True}, 'SESSION': {0: True}, 'TRANSACTION': {0: True}}
FORMAT_MAPPING: Dict[str, str] = {'DD': '%d', 'MM': '%m', 'MON': '%b', 'MONTH': '%B', 'YYYY': '%Y', 'YY': '%y', 'HH': '%I', 'HH12': '%I', 'HH24': '%H', 'MI': '%M', 'SS': '%S', 'SSSSS': '%f', 'TZH': '%z'}
FORMAT_TRIE: Dict = {'D': {'D': {0: True}}, 'M': {'M': {0: True}, 'O': {'N': {0: True, 'T': {'H': {0: True}}}}, 'I': {0: True}}, 'Y': {'Y': {'Y': {'Y': {0: True}}, 0: True}}, 'H': {'H': {0: True, '1': {'2': {0: True}}, '2': {'4': {0: True}}}}, 'S': {'S': {0: True, 'S': {'S': {'S': {0: True}}}}}, 'T': {'Z': {'H': {0: True}}}}
TIME_MAPPING: Dict[str, str] = {'%D': '%m/%d/%y'}
TIME_TRIE: Dict = {'%': {'D': {0: True}}}
class BigQuery.Generator(sqlglot.generator.Generator):
421    class Generator(generator.Generator):
422        EXPLICIT_UNION = True
423        INTERVAL_ALLOWS_PLURAL_FORM = False
424        JOIN_HINTS = False
425        QUERY_HINTS = False
426        TABLE_HINTS = False
427        LIMIT_FETCH = "LIMIT"
428        RENAME_TABLE_WITH_DB = False
429        ESCAPE_LINE_BREAK = True
430        NVL2_SUPPORTED = False
431
432        TRANSFORMS = {
433            **generator.Generator.TRANSFORMS,
434            exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"),
435            exp.ArraySize: rename_func("ARRAY_LENGTH"),
436            exp.Cast: transforms.preprocess([transforms.remove_precision_parameterized_types]),
437            exp.Create: _create_sql,
438            exp.CTE: transforms.preprocess([_pushdown_cte_column_names]),
439            exp.DateAdd: _date_add_sql("DATE", "ADD"),
440            exp.DateDiff: lambda self, e: f"DATE_DIFF({self.sql(e, 'this')}, {self.sql(e, 'expression')}, {self.sql(e.args.get('unit', 'DAY'))})",
441            exp.DateFromParts: rename_func("DATE"),
442            exp.DateStrToDate: datestrtodate_sql,
443            exp.DateSub: _date_add_sql("DATE", "SUB"),
444            exp.DatetimeAdd: _date_add_sql("DATETIME", "ADD"),
445            exp.DatetimeSub: _date_add_sql("DATETIME", "SUB"),
446            exp.DateTrunc: lambda self, e: self.func("DATE_TRUNC", e.this, e.text("unit")),
447            exp.GenerateSeries: rename_func("GENERATE_ARRAY"),
448            exp.GroupConcat: rename_func("STRING_AGG"),
449            exp.Hex: rename_func("TO_HEX"),
450            exp.ILike: no_ilike_sql,
451            exp.IntDiv: rename_func("DIV"),
452            exp.JSONFormat: rename_func("TO_JSON_STRING"),
453            exp.JSONKeyValue: json_keyvalue_comma_sql,
454            exp.Max: max_or_greatest,
455            exp.MD5: lambda self, e: self.func("TO_HEX", self.func("MD5", e.this)),
456            exp.MD5Digest: rename_func("MD5"),
457            exp.Min: min_or_least,
458            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
459            exp.RegexpExtract: lambda self, e: self.func(
460                "REGEXP_EXTRACT",
461                e.this,
462                e.expression,
463                e.args.get("position"),
464                e.args.get("occurrence"),
465            ),
466            exp.RegexpReplace: regexp_replace_sql,
467            exp.RegexpLike: rename_func("REGEXP_CONTAINS"),
468            exp.ReturnsProperty: _returnsproperty_sql,
469            exp.Select: transforms.preprocess(
470                [
471                    transforms.explode_to_unnest,
472                    _unqualify_unnest,
473                    transforms.eliminate_distinct_on,
474                    _alias_ordered_group,
475                ]
476            ),
477            exp.SHA2: lambda self, e: self.func(
478                f"SHA256" if e.text("length") == "256" else "SHA512", e.this
479            ),
480            exp.StabilityProperty: lambda self, e: f"DETERMINISTIC"
481            if e.name == "IMMUTABLE"
482            else "NOT DETERMINISTIC",
483            exp.StrToDate: lambda self, e: f"PARSE_DATE({self.format_time(e)}, {self.sql(e, 'this')})",
484            exp.StrToTime: lambda self, e: self.func(
485                "PARSE_TIMESTAMP", self.format_time(e), e.this, e.args.get("zone")
486            ),
487            exp.TimeAdd: _date_add_sql("TIME", "ADD"),
488            exp.TimeSub: _date_add_sql("TIME", "SUB"),
489            exp.TimestampAdd: _date_add_sql("TIMESTAMP", "ADD"),
490            exp.TimestampSub: _date_add_sql("TIMESTAMP", "SUB"),
491            exp.TimeStrToTime: timestrtotime_sql,
492            exp.Trim: lambda self, e: self.func(f"TRIM", e.this, e.expression),
493            exp.TsOrDsAdd: _date_add_sql("DATE", "ADD"),
494            exp.TsOrDsToDate: ts_or_ds_to_date_sql("bigquery"),
495            exp.Unhex: rename_func("FROM_HEX"),
496            exp.Values: _derived_table_values_to_unnest,
497            exp.VariancePop: rename_func("VAR_POP"),
498        }
499
500        TYPE_MAPPING = {
501            **generator.Generator.TYPE_MAPPING,
502            exp.DataType.Type.BIGDECIMAL: "BIGNUMERIC",
503            exp.DataType.Type.BIGINT: "INT64",
504            exp.DataType.Type.BINARY: "BYTES",
505            exp.DataType.Type.BOOLEAN: "BOOL",
506            exp.DataType.Type.CHAR: "STRING",
507            exp.DataType.Type.DECIMAL: "NUMERIC",
508            exp.DataType.Type.DOUBLE: "FLOAT64",
509            exp.DataType.Type.FLOAT: "FLOAT64",
510            exp.DataType.Type.INT: "INT64",
511            exp.DataType.Type.NCHAR: "STRING",
512            exp.DataType.Type.NVARCHAR: "STRING",
513            exp.DataType.Type.SMALLINT: "INT64",
514            exp.DataType.Type.TEXT: "STRING",
515            exp.DataType.Type.TIMESTAMP: "DATETIME",
516            exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP",
517            exp.DataType.Type.TIMESTAMPLTZ: "TIMESTAMP",
518            exp.DataType.Type.TINYINT: "INT64",
519            exp.DataType.Type.VARBINARY: "BYTES",
520            exp.DataType.Type.VARCHAR: "STRING",
521            exp.DataType.Type.VARIANT: "ANY TYPE",
522        }
523
524        PROPERTIES_LOCATION = {
525            **generator.Generator.PROPERTIES_LOCATION,
526            exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA,
527            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
528        }
529
530        # from: https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#reserved_keywords
531        RESERVED_KEYWORDS = {
532            *generator.Generator.RESERVED_KEYWORDS,
533            "all",
534            "and",
535            "any",
536            "array",
537            "as",
538            "asc",
539            "assert_rows_modified",
540            "at",
541            "between",
542            "by",
543            "case",
544            "cast",
545            "collate",
546            "contains",
547            "create",
548            "cross",
549            "cube",
550            "current",
551            "default",
552            "define",
553            "desc",
554            "distinct",
555            "else",
556            "end",
557            "enum",
558            "escape",
559            "except",
560            "exclude",
561            "exists",
562            "extract",
563            "false",
564            "fetch",
565            "following",
566            "for",
567            "from",
568            "full",
569            "group",
570            "grouping",
571            "groups",
572            "hash",
573            "having",
574            "if",
575            "ignore",
576            "in",
577            "inner",
578            "intersect",
579            "interval",
580            "into",
581            "is",
582            "join",
583            "lateral",
584            "left",
585            "like",
586            "limit",
587            "lookup",
588            "merge",
589            "natural",
590            "new",
591            "no",
592            "not",
593            "null",
594            "nulls",
595            "of",
596            "on",
597            "or",
598            "order",
599            "outer",
600            "over",
601            "partition",
602            "preceding",
603            "proto",
604            "qualify",
605            "range",
606            "recursive",
607            "respect",
608            "right",
609            "rollup",
610            "rows",
611            "select",
612            "set",
613            "some",
614            "struct",
615            "tablesample",
616            "then",
617            "to",
618            "treat",
619            "true",
620            "unbounded",
621            "union",
622            "unnest",
623            "using",
624            "when",
625            "where",
626            "window",
627            "with",
628            "within",
629        }
630
631        def attimezone_sql(self, expression: exp.AtTimeZone) -> str:
632            parent = expression.parent
633
634            # BigQuery allows CAST(.. AS {STRING|TIMESTAMP} [FORMAT <fmt> [AT TIME ZONE <tz>]]).
635            # Only the TIMESTAMP one should use the below conversion, when AT TIME ZONE is included.
636            if not isinstance(parent, exp.Cast) or not parent.to.is_type("text"):
637                return self.func(
638                    "TIMESTAMP", self.func("DATETIME", expression.this, expression.args.get("zone"))
639                )
640
641            return super().attimezone_sql(expression)
642
643        def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str:
644            # https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#json_literals
645            if expression.is_type("json"):
646                return f"JSON {self.sql(expression, 'this')}"
647
648            return super().cast_sql(expression, safe_prefix=safe_prefix)
649
650        def trycast_sql(self, expression: exp.TryCast) -> str:
651            return self.cast_sql(expression, safe_prefix="SAFE_")
652
653        def cte_sql(self, expression: exp.CTE) -> str:
654            if expression.alias_column_names:
655                self.unsupported("Column names in CTE definition are not supported.")
656            return super().cte_sql(expression)
657
658        def array_sql(self, expression: exp.Array) -> str:
659            first_arg = seq_get(expression.expressions, 0)
660            if isinstance(first_arg, exp.Subqueryable):
661                return f"ARRAY{self.wrap(self.sql(first_arg))}"
662
663            return inline_array_sql(self, expression)
664
665        def transaction_sql(self, *_) -> str:
666            return "BEGIN TRANSACTION"
667
668        def commit_sql(self, *_) -> str:
669            return "COMMIT TRANSACTION"
670
671        def rollback_sql(self, *_) -> str:
672            return "ROLLBACK TRANSACTION"
673
674        def in_unnest_op(self, expression: exp.Unnest) -> str:
675            return self.sql(expression)
676
677        def except_op(self, expression: exp.Except) -> str:
678            if not expression.args.get("distinct", False):
679                self.unsupported("EXCEPT without DISTINCT is not supported in BigQuery")
680            return f"EXCEPT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}"
681
682        def intersect_op(self, expression: exp.Intersect) -> str:
683            if not expression.args.get("distinct", False):
684                self.unsupported("INTERSECT without DISTINCT is not supported in BigQuery")
685            return f"INTERSECT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}"
686
687        def with_properties(self, properties: exp.Properties) -> str:
688            return self.properties(properties, prefix=self.seg("OPTIONS"))
689
690        def version_sql(self, expression: exp.Version) -> str:
691            if expression.name == "TIMESTAMP":
692                expression = expression.copy()
693                expression.set("this", "SYSTEM_TIME")
694            return super().version_sql(expression)

Generator converts a given syntax tree to the corresponding SQL string.

Arguments:
  • pretty: Whether or not to format the produced SQL string. Default: False.
  • identify: Determines when an identifier should be quoted. Possible values are: False (default): Never quote, except in cases where it's mandatory by the dialect. True or 'always': Always quote. 'safe': Only quote identifiers that are case insensitive.
  • normalize: Whether or not to normalize identifiers to lowercase. Default: False.
  • pad: Determines the pad size in a formatted string. Default: 2.
  • indent: Determines the indentation size in a formatted string. Default: 2.
  • normalize_functions: Whether or not to normalize all function names. Possible values are: "upper" or True (default): Convert names to uppercase. "lower": Convert names to lowercase. False: Disables function name normalization.
  • unsupported_level: Determines the generator's behavior when it encounters unsupported expressions. Default ErrorLevel.WARN.
  • max_unsupported: Maximum number of unsupported messages to include in a raised UnsupportedError. This is only relevant if unsupported_level is ErrorLevel.RAISE. Default: 3
  • leading_comma: Determines whether or not the comma is leading or trailing in select expressions. This is only relevant when generating in pretty mode. Default: False
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode. The default is on the smaller end because the length only represents a segment and not the true line length. Default: 80
  • comments: Whether or not to preserve comments in the output SQL code. Default: True
EXPLICIT_UNION = True
INTERVAL_ALLOWS_PLURAL_FORM = False
JOIN_HINTS = False
QUERY_HINTS = False
TABLE_HINTS = False
LIMIT_FETCH = 'LIMIT'
RENAME_TABLE_WITH_DB = False
ESCAPE_LINE_BREAK = True
NVL2_SUPPORTED = False
TRANSFORMS = {<class 'sqlglot.expressions.DateAdd'>: <function _date_add_sql.<locals>.func>, <class 'sqlglot.expressions.TsOrDsAdd'>: <function _date_add_sql.<locals>.func>, <class 'sqlglot.expressions.CaseSpecificColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CharacterSetColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CharacterSetProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CheckColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ClusteredColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CollateColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CopyGrantsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CommentColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.DateFormatColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.DefaultColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.EncodeColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ExecuteAsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ExternalProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.HeapProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.InlineLengthColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.IntervalSpan'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.LanguageProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.LocationProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.LogProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.MaterializedProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NoPrimaryIndexProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NonClusteredColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NotForReplicationColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnCommitProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnUpdateColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.PathColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ReturnsProperty'>: <function _returnsproperty_sql>, <class 'sqlglot.expressions.SetProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SettingsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SqlSecurityProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.StabilityProperty'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.TemporaryProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ToTableProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TransientProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TitleColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.UppercaseColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.VarMap'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.VolatileProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.WithJournalTableProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ApproxDistinct'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ArraySize'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Cast'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.Create'>: <function _create_sql>, <class 'sqlglot.expressions.CTE'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.DateDiff'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.DateFromParts'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.DateStrToDate'>: <function datestrtodate_sql>, <class 'sqlglot.expressions.DateSub'>: <function _date_add_sql.<locals>.func>, <class 'sqlglot.expressions.DatetimeAdd'>: <function _date_add_sql.<locals>.func>, <class 'sqlglot.expressions.DatetimeSub'>: <function _date_add_sql.<locals>.func>, <class 'sqlglot.expressions.DateTrunc'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.GenerateSeries'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.GroupConcat'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Hex'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ILike'>: <function no_ilike_sql>, <class 'sqlglot.expressions.IntDiv'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.JSONFormat'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.JSONKeyValue'>: <function json_keyvalue_comma_sql>, <class 'sqlglot.expressions.Max'>: <function max_or_greatest>, <class 'sqlglot.expressions.MD5'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.MD5Digest'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Min'>: <function min_or_least>, <class 'sqlglot.expressions.PartitionedByProperty'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.RegexpExtract'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.RegexpReplace'>: <function regexp_replace_sql>, <class 'sqlglot.expressions.RegexpLike'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Select'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.SHA2'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.StrToDate'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.StrToTime'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.TimeAdd'>: <function _date_add_sql.<locals>.func>, <class 'sqlglot.expressions.TimeSub'>: <function _date_add_sql.<locals>.func>, <class 'sqlglot.expressions.TimestampAdd'>: <function _date_add_sql.<locals>.func>, <class 'sqlglot.expressions.TimestampSub'>: <function _date_add_sql.<locals>.func>, <class 'sqlglot.expressions.TimeStrToTime'>: <function timestrtotime_sql>, <class 'sqlglot.expressions.Trim'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.TsOrDsToDate'>: <function ts_or_ds_to_date_sql.<locals>._ts_or_ds_to_date_sql>, <class 'sqlglot.expressions.Unhex'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Values'>: <function _derived_table_values_to_unnest>, <class 'sqlglot.expressions.VariancePop'>: <function rename_func.<locals>.<lambda>>}
TYPE_MAPPING = {<Type.NCHAR: 'NCHAR'>: 'STRING', <Type.NVARCHAR: 'NVARCHAR'>: 'STRING', <Type.MEDIUMTEXT: 'MEDIUMTEXT'>: 'TEXT', <Type.LONGTEXT: 'LONGTEXT'>: 'TEXT', <Type.TINYTEXT: 'TINYTEXT'>: 'TEXT', <Type.MEDIUMBLOB: 'MEDIUMBLOB'>: 'BLOB', <Type.LONGBLOB: 'LONGBLOB'>: 'BLOB', <Type.TINYBLOB: 'TINYBLOB'>: 'BLOB', <Type.INET: 'INET'>: 'INET', <Type.BIGDECIMAL: 'BIGDECIMAL'>: 'BIGNUMERIC', <Type.BIGINT: 'BIGINT'>: 'INT64', <Type.BINARY: 'BINARY'>: 'BYTES', <Type.BOOLEAN: 'BOOLEAN'>: 'BOOL', <Type.CHAR: 'CHAR'>: 'STRING', <Type.DECIMAL: 'DECIMAL'>: 'NUMERIC', <Type.DOUBLE: 'DOUBLE'>: 'FLOAT64', <Type.FLOAT: 'FLOAT'>: 'FLOAT64', <Type.INT: 'INT'>: 'INT64', <Type.SMALLINT: 'SMALLINT'>: 'INT64', <Type.TEXT: 'TEXT'>: 'STRING', <Type.TIMESTAMP: 'TIMESTAMP'>: 'DATETIME', <Type.TIMESTAMPTZ: 'TIMESTAMPTZ'>: 'TIMESTAMP', <Type.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>: 'TIMESTAMP', <Type.TINYINT: 'TINYINT'>: 'INT64', <Type.VARBINARY: 'VARBINARY'>: 'BYTES', <Type.VARCHAR: 'VARCHAR'>: 'STRING', <Type.VARIANT: 'VARIANT'>: 'ANY TYPE'}
PROPERTIES_LOCATION = {<class 'sqlglot.expressions.AlgorithmProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.AutoIncrementProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.BlockCompressionProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.CharacterSetProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ChecksumProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.CollateProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.CopyGrantsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Cluster'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ClusteredByProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DataBlocksizeProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.DefinerProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.DictRange'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DictProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DistKeyProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DistStyleProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.EngineProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ExecuteAsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ExternalProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.FallbackProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.FileFormatProperty'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.FreespaceProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.HeapProperty'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.IsolatedLoadingProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.JournalProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.LanguageProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LikeProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LocationProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LockingProperty'>: <Location.POST_ALIAS: 'POST_ALIAS'>, <class 'sqlglot.expressions.LogProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.MaterializedProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.MergeBlockRatioProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.NoPrimaryIndexProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.OnProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.OnCommitProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.Order'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.PartitionedByProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.PrimaryKey'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Property'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.ReturnsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatDelimitedProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatSerdeProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SchemaCommentProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SerdeProperties'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Set'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SettingsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SetProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.SortKeyProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SqlSecurityProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.StabilityProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.TemporaryProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.ToTableProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.TransientProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.MergeTreeTTL'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.VolatileProperty'>: <Location.UNSUPPORTED: 'UNSUPPORTED'>, <class 'sqlglot.expressions.WithDataProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.WithJournalTableProperty'>: <Location.POST_NAME: 'POST_NAME'>}
RESERVED_KEYWORDS = {'at', 'else', 'some', 'order', 'union', 'set', 'like', 'groups', 'having', 'define', 'by', 'false', 'no', 'collate', 'in', 'using', 'where', 'range', 'with', 'when', 'and', 'between', 'merge', 'cube', 'qualify', 'lookup', 'join', 'hash', 'right', 'any', 'true', 'proto', 'null', 'window', 'all', 'asc', 'escape', 'limit', 'preceding', 'then', 'from', 'create', 'extract', 'unbounded', 'except', 'or', 'to', 'nulls', 'desc', 'enum', 'fetch', 'natural', 'as', 'recursive', 'respect', 'distinct', 'following', 'group', 'new', 'exists', 'treat', 'ignore', 'within', 'if', 'for', 'select', 'end', 'on', 'outer', 'grouping', 'unnest', 'tablesample', 'full', 'lateral', 'rows', 'cross', 'not', 'is', 'struct', 'assert_rows_modified', 'intersect', 'left', 'exclude', 'cast', 'case', 'inner', 'array', 'contains', 'of', 'current', 'partition', 'interval', 'default', 'rollup', 'into', 'over'}
def attimezone_sql(self, expression: sqlglot.expressions.AtTimeZone) -> str:
631        def attimezone_sql(self, expression: exp.AtTimeZone) -> str:
632            parent = expression.parent
633
634            # BigQuery allows CAST(.. AS {STRING|TIMESTAMP} [FORMAT <fmt> [AT TIME ZONE <tz>]]).
635            # Only the TIMESTAMP one should use the below conversion, when AT TIME ZONE is included.
636            if not isinstance(parent, exp.Cast) or not parent.to.is_type("text"):
637                return self.func(
638                    "TIMESTAMP", self.func("DATETIME", expression.this, expression.args.get("zone"))
639                )
640
641            return super().attimezone_sql(expression)
def cast_sql( self, expression: sqlglot.expressions.Cast, safe_prefix: Optional[str] = None) -> str:
643        def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str:
644            # https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#json_literals
645            if expression.is_type("json"):
646                return f"JSON {self.sql(expression, 'this')}"
647
648            return super().cast_sql(expression, safe_prefix=safe_prefix)
def trycast_sql(self, expression: sqlglot.expressions.TryCast) -> str:
650        def trycast_sql(self, expression: exp.TryCast) -> str:
651            return self.cast_sql(expression, safe_prefix="SAFE_")
def cte_sql(self, expression: sqlglot.expressions.CTE) -> str:
653        def cte_sql(self, expression: exp.CTE) -> str:
654            if expression.alias_column_names:
655                self.unsupported("Column names in CTE definition are not supported.")
656            return super().cte_sql(expression)
def array_sql(self, expression: sqlglot.expressions.Array) -> str:
658        def array_sql(self, expression: exp.Array) -> str:
659            first_arg = seq_get(expression.expressions, 0)
660            if isinstance(first_arg, exp.Subqueryable):
661                return f"ARRAY{self.wrap(self.sql(first_arg))}"
662
663            return inline_array_sql(self, expression)
def transaction_sql(self, *_) -> str:
665        def transaction_sql(self, *_) -> str:
666            return "BEGIN TRANSACTION"
def commit_sql(self, *_) -> str:
668        def commit_sql(self, *_) -> str:
669            return "COMMIT TRANSACTION"
def rollback_sql(self, *_) -> str:
671        def rollback_sql(self, *_) -> str:
672            return "ROLLBACK TRANSACTION"
def in_unnest_op(self, expression: sqlglot.expressions.Unnest) -> str:
674        def in_unnest_op(self, expression: exp.Unnest) -> str:
675            return self.sql(expression)
def except_op(self, expression: sqlglot.expressions.Except) -> str:
677        def except_op(self, expression: exp.Except) -> str:
678            if not expression.args.get("distinct", False):
679                self.unsupported("EXCEPT without DISTINCT is not supported in BigQuery")
680            return f"EXCEPT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}"
def intersect_op(self, expression: sqlglot.expressions.Intersect) -> str:
682        def intersect_op(self, expression: exp.Intersect) -> str:
683            if not expression.args.get("distinct", False):
684                self.unsupported("INTERSECT without DISTINCT is not supported in BigQuery")
685            return f"INTERSECT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}"
def with_properties(self, properties: sqlglot.expressions.Properties) -> str:
687        def with_properties(self, properties: exp.Properties) -> str:
688            return self.properties(properties, prefix=self.seg("OPTIONS"))
def version_sql(self, expression: sqlglot.expressions.Version) -> str:
690        def version_sql(self, expression: exp.Version) -> str:
691            if expression.name == "TIMESTAMP":
692                expression = expression.copy()
693                expression.set("this", "SYSTEM_TIME")
694            return super().version_sql(expression)
INVERSE_TIME_MAPPING: Dict[str, str] = {'%m/%d/%y': '%D'}
INVERSE_TIME_TRIE: Dict = {'%': {'m': {'/': {'%': {'d': {'/': {'%': {'y': {0: True}}}}}}}}}
UNNEST_COLUMN_ONLY = True
NORMALIZE_FUNCTIONS: bool | str = False
@classmethod
def can_identify(text: str, identify: str | bool = 'safe') -> bool:
260    @classmethod
261    def can_identify(cls, text: str, identify: str | bool = "safe") -> bool:
262        """Checks if text can be identified given an identify option.
263
264        Args:
265            text: The text to check.
266            identify:
267                "always" or `True`: Always returns true.
268                "safe": True if the identifier is case-insensitive.
269
270        Returns:
271            Whether or not the given text can be identified.
272        """
273        if identify is True or identify == "always":
274            return True
275
276        if identify == "safe":
277            return not cls.case_sensitive(text)
278
279        return False

Checks if text can be identified given an identify option.

Arguments:
  • text: The text to check.
  • identify: "always" or True: Always returns true. "safe": True if the identifier is case-insensitive.
Returns:

Whether or not the given text can be identified.

QUOTE_START = "'"
QUOTE_END = "'"
IDENTIFIER_START = '`'
IDENTIFIER_END = '`'
TOKENIZER_CLASS = <class 'BigQuery.Tokenizer'>
BIT_START: Optional[str] = None
BIT_END: Optional[str] = None
HEX_START: Optional[str] = '0x'
HEX_END: Optional[str] = ''
BYTE_START: Optional[str] = "b'"
BYTE_END: Optional[str] = "'"
Inherited Members
sqlglot.generator.Generator
Generator
NULL_ORDERING_SUPPORTED
LOCKING_READS_SUPPORTED
WRAP_DERIVED_VALUES
CREATE_FUNCTION_RETURN_AS
MATCHED_BY_SOURCE
SINGLE_STRING_INTERVAL
TABLESAMPLE_WITH_METHOD
TABLESAMPLE_SIZE_IS_PERCENT
GROUPINGS_SEP
INDEX_ON
QUERY_HINT_SEP
IS_BOOL_ALLOWED
DUPLICATE_KEY_UPDATE_WITH_SET
LIMIT_IS_TOP
RETURNING_END
COLUMN_JOIN_MARKS_SUPPORTED
EXTRACT_ALLOWS_QUOTES
TZ_TO_WITH_TIME_ZONE
SELECT_KINDS
VALUES_AS_TABLE
ALTER_TABLE_ADD_COLUMN_KEYWORD
STAR_MAPPING
TIME_PART_SINGULARS
TOKEN_MAPPING
STRUCT_DELIMITER
PARAMETER_TOKEN
WITH_SEPARATED_COMMENTS
UNWRAPPED_INTERVAL_VALUES
SENTINEL_LINE_BREAK
INDEX_OFFSET
ALIAS_POST_TABLESAMPLE
IDENTIFIERS_CAN_START_WITH_DIGIT
STRICT_STRING_CONCAT
NULL_ORDERING
pretty
identify
normalize
pad
unsupported_level
max_unsupported
leading_comma
max_text_width
comments
normalize_functions
unsupported_messages
generate
unsupported
sep
seg
pad_comment
maybe_comment
wrap
no_identify
normalize_func
indent
sql
uncache_sql
cache_sql
characterset_sql
column_sql
columnposition_sql
columndef_sql
columnconstraint_sql
computedcolumnconstraint_sql
autoincrementcolumnconstraint_sql
compresscolumnconstraint_sql
generatedasidentitycolumnconstraint_sql
notnullcolumnconstraint_sql
primarykeycolumnconstraint_sql
uniquecolumnconstraint_sql
createable_sql
create_sql
clone_sql
describe_sql
prepend_ctes
with_sql
tablealias_sql
bitstring_sql
hexstring_sql
bytestring_sql
rawstring_sql
datatypeparam_sql
datatype_sql
directory_sql
delete_sql
drop_sql
except_sql
fetch_sql
filter_sql
hint_sql
index_sql
identifier_sql
inputoutputformat_sql
national_sql
partition_sql
properties_sql
root_properties
properties
locate_properties
property_sql
likeproperty_sql
fallbackproperty_sql
journalproperty_sql
freespaceproperty_sql
checksumproperty_sql
mergeblockratioproperty_sql
datablocksizeproperty_sql
blockcompressionproperty_sql
isolatedloadingproperty_sql
lockingproperty_sql
withdataproperty_sql
insert_sql
intersect_sql
introducer_sql
pseudotype_sql
objectidentifier_sql
onconflict_sql
returning_sql
rowformatdelimitedproperty_sql
withtablehint_sql
indextablehint_sql
table_sql
tablesample_sql
pivot_sql
tuple_sql
update_sql
values_sql
var_sql
into_sql
from_sql
group_sql
having_sql
connect_sql
prior_sql
join_sql
lambda_sql
lateral_sql
limit_sql
offset_sql
setitem_sql
set_sql
pragma_sql
lock_sql
literal_sql
escape_str
loaddata_sql
null_sql
boolean_sql
order_sql
cluster_sql
distribute_sql
sort_sql
ordered_sql
matchrecognize_sql
query_modifiers
offset_limit_modifiers
after_having_modifiers
after_limit_modifiers
select_sql
schema_sql
schema_columns_sql
star_sql
parameter_sql
sessionparameter_sql
placeholder_sql
subquery_sql
qualify_sql
union_sql
union_op
unnest_sql
where_sql
window_sql
partition_by_sql
windowspec_sql
withingroup_sql
between_sql
bracket_sql
safebracket_sql
all_sql
any_sql
exists_sql
case_sql
constraint_sql
nextvaluefor_sql
extract_sql
trim_sql
safeconcat_sql
check_sql
foreignkey_sql
primarykey_sql
if_sql
matchagainst_sql
jsonkeyvalue_sql
formatjson_sql
jsonobject_sql
jsonarray_sql
jsonarrayagg_sql
jsoncolumndef_sql
jsontable_sql
openjsoncolumndef_sql
openjson_sql
in_sql
interval_sql
return_sql
reference_sql
anonymous_sql
paren_sql
neg_sql
not_sql
alias_sql
aliases_sql
add_sql
and_sql
xor_sql
connector_sql
bitwiseand_sql
bitwiseleftshift_sql
bitwisenot_sql
bitwiseor_sql
bitwiserightshift_sql
bitwisexor_sql
currentdate_sql
collate_sql
command_sql
comment_sql
mergetreettlaction_sql
mergetreettl_sql
altercolumn_sql
renametable_sql
altertable_sql
droppartition_sql
addconstraint_sql
distinct_sql
ignorenulls_sql
respectnulls_sql
intdiv_sql
dpipe_sql
safedpipe_sql
div_sql
overlaps_sql
distance_sql
dot_sql
eq_sql
escape_sql
glob_sql
gt_sql
gte_sql
ilike_sql
ilikeany_sql
is_sql
like_sql
likeany_sql
similarto_sql
lt_sql
lte_sql
mod_sql
mul_sql
neq_sql
nullsafeeq_sql
nullsafeneq_sql
or_sql
slice_sql
sub_sql
use_sql
binary
function_fallback_sql
func
format_args
text_width
format_time
expressions
op_expressions
naked_property
set_operation
tag_sql
token_sql
userdefinedfunction_sql
joinhint_sql
kwarg_sql
when_sql
merge_sql
tochar_sql
dictproperty_sql
dictrange_sql
dictsubproperty_sql
oncluster_sql
clusteredbyproperty_sql
anyvalue_sql
querytransform_sql
indexconstraintoption_sql
indexcolumnconstraint_sql
nvl2_sql
comprehension_sql
columnprefix_sql