Edit on GitHub

sqlglot.dialects.bigquery

  1from __future__ import annotations
  2
  3import logging
  4import re
  5import typing as t
  6
  7from sqlglot import exp, generator, parser, tokens, transforms
  8from sqlglot._typing import E
  9from sqlglot.dialects.dialect import (
 10    Dialect,
 11    datestrtodate_sql,
 12    format_time_lambda,
 13    inline_array_sql,
 14    max_or_greatest,
 15    min_or_least,
 16    no_ilike_sql,
 17    parse_date_delta_with_interval,
 18    rename_func,
 19    timestrtotime_sql,
 20    ts_or_ds_to_date_sql,
 21)
 22from sqlglot.helper import seq_get, split_num_words
 23from sqlglot.tokens import TokenType
 24
 25logger = logging.getLogger("sqlglot")
 26
 27
 28def _date_add_sql(
 29    data_type: str, kind: str
 30) -> t.Callable[[generator.Generator, exp.Expression], str]:
 31    def func(self, expression):
 32        this = self.sql(expression, "this")
 33        unit = expression.args.get("unit")
 34        unit = exp.var(unit.name.upper() if unit else "DAY")
 35        interval = exp.Interval(this=expression.expression, unit=unit)
 36        return f"{data_type}_{kind}({this}, {self.sql(interval)})"
 37
 38    return func
 39
 40
 41def _derived_table_values_to_unnest(self: generator.Generator, expression: exp.Values) -> str:
 42    if not isinstance(expression.unnest().parent, exp.From):
 43        return self.values_sql(expression)
 44
 45    alias = expression.args.get("alias")
 46
 47    structs = [
 48        exp.Struct(
 49            expressions=[
 50                exp.alias_(value, column_name)
 51                for value, column_name in zip(
 52                    t.expressions,
 53                    alias.columns
 54                    if alias and alias.columns
 55                    else (f"_c{i}" for i in range(len(t.expressions))),
 56                )
 57            ]
 58        )
 59        for t in expression.find_all(exp.Tuple)
 60    ]
 61
 62    return self.unnest_sql(exp.Unnest(expressions=[exp.Array(expressions=structs)]))
 63
 64
 65def _returnsproperty_sql(self: generator.Generator, expression: exp.ReturnsProperty) -> str:
 66    this = expression.this
 67    if isinstance(this, exp.Schema):
 68        this = f"{this.this} <{self.expressions(this)}>"
 69    else:
 70        this = self.sql(this)
 71    return f"RETURNS {this}"
 72
 73
 74def _create_sql(self: generator.Generator, expression: exp.Create) -> str:
 75    kind = expression.args["kind"]
 76    returns = expression.find(exp.ReturnsProperty)
 77    if kind.upper() == "FUNCTION" and returns and returns.args.get("is_table"):
 78        expression = expression.copy()
 79        expression.set("kind", "TABLE FUNCTION")
 80        if isinstance(
 81            expression.expression,
 82            (
 83                exp.Subquery,
 84                exp.Literal,
 85            ),
 86        ):
 87            expression.set("expression", expression.expression.this)
 88
 89        return self.create_sql(expression)
 90
 91    return self.create_sql(expression)
 92
 93
 94def _unqualify_unnest(expression: exp.Expression) -> exp.Expression:
 95    """Remove references to unnest table aliases since bigquery doesn't allow them.
 96
 97    These are added by the optimizer's qualify_column step.
 98    """
 99    from sqlglot.optimizer.scope import Scope
100
101    if isinstance(expression, exp.Select):
102        for unnest in expression.find_all(exp.Unnest):
103            if isinstance(unnest.parent, (exp.From, exp.Join)) and unnest.alias:
104                for column in Scope(expression).find_all(exp.Column):
105                    if column.table == unnest.alias:
106                        column.set("table", None)
107
108    return expression
109
110
111# https://issuetracker.google.com/issues/162294746
112# workaround for bigquery bug when grouping by an expression and then ordering
113# WITH x AS (SELECT 1 y)
114# SELECT y + 1 z
115# FROM x
116# GROUP BY x + 1
117# ORDER by z
118def _alias_ordered_group(expression: exp.Expression) -> exp.Expression:
119    if isinstance(expression, exp.Select):
120        group = expression.args.get("group")
121        order = expression.args.get("order")
122
123        if group and order:
124            aliases = {
125                select.this: select.args["alias"]
126                for select in expression.selects
127                if isinstance(select, exp.Alias)
128            }
129
130            for e in group.expressions:
131                alias = aliases.get(e)
132
133                if alias:
134                    e.replace(exp.column(alias))
135
136    return expression
137
138
139def _pushdown_cte_column_names(expression: exp.Expression) -> exp.Expression:
140    """BigQuery doesn't allow column names when defining a CTE, so we try to push them down."""
141    if isinstance(expression, exp.CTE) and expression.alias_column_names:
142        cte_query = expression.this
143
144        if cte_query.is_star:
145            logger.warning(
146                "Can't push down CTE column names for star queries. Run the query through"
147                " the optimizer or use 'qualify' to expand the star projections first."
148            )
149            return expression
150
151        column_names = expression.alias_column_names
152        expression.args["alias"].set("columns", None)
153
154        for name, select in zip(column_names, cte_query.selects):
155            to_replace = select
156
157            if isinstance(select, exp.Alias):
158                select = select.this
159
160            # Inner aliases are shadowed by the CTE column names
161            to_replace.replace(exp.alias_(select, name))
162
163    return expression
164
165
166def _parse_timestamp(args: t.List) -> exp.StrToTime:
167    this = format_time_lambda(exp.StrToTime, "bigquery")([seq_get(args, 1), seq_get(args, 0)])
168    this.set("zone", seq_get(args, 2))
169    return this
170
171
172def _parse_date(args: t.List) -> exp.Date | exp.DateFromParts:
173    expr_type = exp.DateFromParts if len(args) == 3 else exp.Date
174    return expr_type.from_arg_list(args)
175
176
177class BigQuery(Dialect):
178    UNNEST_COLUMN_ONLY = True
179
180    # https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#case_sensitivity
181    RESOLVES_IDENTIFIERS_AS_UPPERCASE = None
182
183    # bigquery udfs are case sensitive
184    NORMALIZE_FUNCTIONS = False
185
186    TIME_MAPPING = {
187        "%D": "%m/%d/%y",
188    }
189
190    FORMAT_MAPPING = {
191        "DD": "%d",
192        "MM": "%m",
193        "MON": "%b",
194        "MONTH": "%B",
195        "YYYY": "%Y",
196        "YY": "%y",
197        "HH": "%I",
198        "HH12": "%I",
199        "HH24": "%H",
200        "MI": "%M",
201        "SS": "%S",
202        "SSSSS": "%f",
203        "TZH": "%z",
204    }
205
206    @classmethod
207    def normalize_identifier(cls, expression: E) -> E:
208        # In BigQuery, CTEs aren't case-sensitive, but table names are (by default, at least).
209        # The following check is essentially a heuristic to detect tables based on whether or
210        # not they're qualified.
211        if isinstance(expression, exp.Identifier):
212            parent = expression.parent
213
214            while isinstance(parent, exp.Dot):
215                parent = parent.parent
216
217            if (
218                not isinstance(parent, exp.UserDefinedFunction)
219                and not (isinstance(parent, exp.Table) and parent.db)
220                and not expression.meta.get("is_table")
221            ):
222                expression.set("this", expression.this.lower())
223
224        return expression
225
226    class Tokenizer(tokens.Tokenizer):
227        QUOTES = ["'", '"', '"""', "'''"]
228        COMMENTS = ["--", "#", ("/*", "*/")]
229        IDENTIFIERS = ["`"]
230        STRING_ESCAPES = ["\\"]
231
232        HEX_STRINGS = [("0x", ""), ("0X", "")]
233
234        BYTE_STRINGS = [
235            (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("b", "B")
236        ]
237
238        RAW_STRINGS = [
239            (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("r", "R")
240        ]
241
242        KEYWORDS = {
243            **tokens.Tokenizer.KEYWORDS,
244            "ANY TYPE": TokenType.VARIANT,
245            "BEGIN": TokenType.COMMAND,
246            "BEGIN TRANSACTION": TokenType.BEGIN,
247            "CURRENT_DATETIME": TokenType.CURRENT_DATETIME,
248            "BYTES": TokenType.BINARY,
249            "DECLARE": TokenType.COMMAND,
250            "FLOAT64": TokenType.DOUBLE,
251            "INT64": TokenType.BIGINT,
252            "RECORD": TokenType.STRUCT,
253            "TIMESTAMP": TokenType.TIMESTAMPTZ,
254            "NOT DETERMINISTIC": TokenType.VOLATILE,
255            "UNKNOWN": TokenType.NULL,
256        }
257        KEYWORDS.pop("DIV")
258
259    class Parser(parser.Parser):
260        PREFIXED_PIVOT_COLUMNS = True
261
262        LOG_BASE_FIRST = False
263        LOG_DEFAULTS_TO_LN = True
264
265        FUNCTIONS = {
266            **parser.Parser.FUNCTIONS,
267            "DATE": _parse_date,
268            "DATE_ADD": parse_date_delta_with_interval(exp.DateAdd),
269            "DATE_SUB": parse_date_delta_with_interval(exp.DateSub),
270            "DATE_TRUNC": lambda args: exp.DateTrunc(
271                unit=exp.Literal.string(str(seq_get(args, 1))),
272                this=seq_get(args, 0),
273            ),
274            "DATETIME_ADD": parse_date_delta_with_interval(exp.DatetimeAdd),
275            "DATETIME_SUB": parse_date_delta_with_interval(exp.DatetimeSub),
276            "DIV": lambda args: exp.IntDiv(this=seq_get(args, 0), expression=seq_get(args, 1)),
277            "GENERATE_ARRAY": exp.GenerateSeries.from_arg_list,
278            "PARSE_DATE": lambda args: format_time_lambda(exp.StrToDate, "bigquery")(
279                [seq_get(args, 1), seq_get(args, 0)]
280            ),
281            "PARSE_TIMESTAMP": _parse_timestamp,
282            "REGEXP_CONTAINS": exp.RegexpLike.from_arg_list,
283            "REGEXP_EXTRACT": lambda args: exp.RegexpExtract(
284                this=seq_get(args, 0),
285                expression=seq_get(args, 1),
286                position=seq_get(args, 2),
287                occurrence=seq_get(args, 3),
288                group=exp.Literal.number(1)
289                if re.compile(str(seq_get(args, 1))).groups == 1
290                else None,
291            ),
292            "SPLIT": lambda args: exp.Split(
293                # https://cloud.google.com/bigquery/docs/reference/standard-sql/string_functions#split
294                this=seq_get(args, 0),
295                expression=seq_get(args, 1) or exp.Literal.string(","),
296            ),
297            "TIME_ADD": parse_date_delta_with_interval(exp.TimeAdd),
298            "TIME_SUB": parse_date_delta_with_interval(exp.TimeSub),
299            "TIMESTAMP_ADD": parse_date_delta_with_interval(exp.TimestampAdd),
300            "TIMESTAMP_SUB": parse_date_delta_with_interval(exp.TimestampSub),
301            "TO_JSON_STRING": exp.JSONFormat.from_arg_list,
302        }
303
304        FUNCTION_PARSERS = {
305            **parser.Parser.FUNCTION_PARSERS,
306            "ARRAY": lambda self: self.expression(exp.Array, expressions=[self._parse_statement()]),
307        }
308        FUNCTION_PARSERS.pop("TRIM")
309
310        NO_PAREN_FUNCTIONS = {
311            **parser.Parser.NO_PAREN_FUNCTIONS,
312            TokenType.CURRENT_DATETIME: exp.CurrentDatetime,
313        }
314
315        NESTED_TYPE_TOKENS = {
316            *parser.Parser.NESTED_TYPE_TOKENS,
317            TokenType.TABLE,
318        }
319
320        ID_VAR_TOKENS = {
321            *parser.Parser.ID_VAR_TOKENS,
322            TokenType.VALUES,
323        }
324
325        PROPERTY_PARSERS = {
326            **parser.Parser.PROPERTY_PARSERS,
327            "NOT DETERMINISTIC": lambda self: self.expression(
328                exp.StabilityProperty, this=exp.Literal.string("VOLATILE")
329            ),
330            "OPTIONS": lambda self: self._parse_with_property(),
331        }
332
333        CONSTRAINT_PARSERS = {
334            **parser.Parser.CONSTRAINT_PARSERS,
335            "OPTIONS": lambda self: exp.Properties(expressions=self._parse_with_property()),
336        }
337
338        def _parse_table_part(self, schema: bool = False) -> t.Optional[exp.Expression]:
339            this = super()._parse_table_part(schema=schema)
340
341            # https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#table_names
342            if isinstance(this, exp.Identifier):
343                table_name = this.name
344                while self._match(TokenType.DASH, advance=False) and self._next:
345                    self._advance(2)
346                    table_name += f"-{self._prev.text}"
347
348                this = exp.Identifier(this=table_name, quoted=this.args.get("quoted"))
349
350            return this
351
352        def _parse_table_parts(self, schema: bool = False) -> exp.Table:
353            table = super()._parse_table_parts(schema=schema)
354            if isinstance(table.this, exp.Identifier) and "." in table.name:
355                catalog, db, this, *rest = (
356                    t.cast(t.Optional[exp.Expression], exp.to_identifier(x))
357                    for x in split_num_words(table.name, ".", 3)
358                )
359
360                if rest and this:
361                    this = exp.Dot.build(t.cast(t.List[exp.Expression], [this, *rest]))
362
363                table = exp.Table(this=this, db=db, catalog=catalog)
364
365            return table
366
367    class Generator(generator.Generator):
368        EXPLICIT_UNION = True
369        INTERVAL_ALLOWS_PLURAL_FORM = False
370        JOIN_HINTS = False
371        QUERY_HINTS = False
372        TABLE_HINTS = False
373        LIMIT_FETCH = "LIMIT"
374        RENAME_TABLE_WITH_DB = False
375        ESCAPE_LINE_BREAK = True
376
377        TRANSFORMS = {
378            **generator.Generator.TRANSFORMS,
379            exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"),
380            exp.ArraySize: rename_func("ARRAY_LENGTH"),
381            exp.Cast: transforms.preprocess([transforms.remove_precision_parameterized_types]),
382            exp.CTE: transforms.preprocess([_pushdown_cte_column_names]),
383            exp.DateAdd: _date_add_sql("DATE", "ADD"),
384            exp.DateFromParts: rename_func("DATE"),
385            exp.DateSub: _date_add_sql("DATE", "SUB"),
386            exp.DatetimeAdd: _date_add_sql("DATETIME", "ADD"),
387            exp.DatetimeSub: _date_add_sql("DATETIME", "SUB"),
388            exp.DateDiff: lambda self, e: f"DATE_DIFF({self.sql(e, 'this')}, {self.sql(e, 'expression')}, {self.sql(e.args.get('unit', 'DAY'))})",
389            exp.DateStrToDate: datestrtodate_sql,
390            exp.DateTrunc: lambda self, e: self.func("DATE_TRUNC", e.this, e.text("unit")),
391            exp.JSONFormat: rename_func("TO_JSON_STRING"),
392            exp.GenerateSeries: rename_func("GENERATE_ARRAY"),
393            exp.GroupConcat: rename_func("STRING_AGG"),
394            exp.ILike: no_ilike_sql,
395            exp.IntDiv: rename_func("DIV"),
396            exp.Max: max_or_greatest,
397            exp.Min: min_or_least,
398            exp.RegexpExtract: lambda self, e: self.func(
399                "REGEXP_EXTRACT",
400                e.this,
401                e.expression,
402                e.args.get("position"),
403                e.args.get("occurrence"),
404            ),
405            exp.RegexpLike: rename_func("REGEXP_CONTAINS"),
406            exp.Select: transforms.preprocess(
407                [
408                    transforms.explode_to_unnest,
409                    _unqualify_unnest,
410                    transforms.eliminate_distinct_on,
411                    _alias_ordered_group,
412                ]
413            ),
414            exp.StrToDate: lambda self, e: f"PARSE_DATE({self.format_time(e)}, {self.sql(e, 'this')})",
415            exp.StrToTime: lambda self, e: self.func(
416                "PARSE_TIMESTAMP", self.format_time(e), e.this, e.args.get("zone")
417            ),
418            exp.TimeAdd: _date_add_sql("TIME", "ADD"),
419            exp.TimeSub: _date_add_sql("TIME", "SUB"),
420            exp.TimestampAdd: _date_add_sql("TIMESTAMP", "ADD"),
421            exp.TimestampSub: _date_add_sql("TIMESTAMP", "SUB"),
422            exp.TimeStrToTime: timestrtotime_sql,
423            exp.TsOrDsToDate: ts_or_ds_to_date_sql("bigquery"),
424            exp.TsOrDsAdd: _date_add_sql("DATE", "ADD"),
425            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
426            exp.VariancePop: rename_func("VAR_POP"),
427            exp.Values: _derived_table_values_to_unnest,
428            exp.ReturnsProperty: _returnsproperty_sql,
429            exp.Create: _create_sql,
430            exp.Trim: lambda self, e: self.func(f"TRIM", e.this, e.expression),
431            exp.StabilityProperty: lambda self, e: f"DETERMINISTIC"
432            if e.name == "IMMUTABLE"
433            else "NOT DETERMINISTIC",
434        }
435
436        TYPE_MAPPING = {
437            **generator.Generator.TYPE_MAPPING,
438            exp.DataType.Type.BIGDECIMAL: "BIGNUMERIC",
439            exp.DataType.Type.BIGINT: "INT64",
440            exp.DataType.Type.BINARY: "BYTES",
441            exp.DataType.Type.BOOLEAN: "BOOL",
442            exp.DataType.Type.CHAR: "STRING",
443            exp.DataType.Type.DECIMAL: "NUMERIC",
444            exp.DataType.Type.DOUBLE: "FLOAT64",
445            exp.DataType.Type.FLOAT: "FLOAT64",
446            exp.DataType.Type.INT: "INT64",
447            exp.DataType.Type.NCHAR: "STRING",
448            exp.DataType.Type.NVARCHAR: "STRING",
449            exp.DataType.Type.SMALLINT: "INT64",
450            exp.DataType.Type.TEXT: "STRING",
451            exp.DataType.Type.TIMESTAMP: "DATETIME",
452            exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP",
453            exp.DataType.Type.TIMESTAMPLTZ: "TIMESTAMP",
454            exp.DataType.Type.TINYINT: "INT64",
455            exp.DataType.Type.VARBINARY: "BYTES",
456            exp.DataType.Type.VARCHAR: "STRING",
457            exp.DataType.Type.VARIANT: "ANY TYPE",
458        }
459
460        PROPERTIES_LOCATION = {
461            **generator.Generator.PROPERTIES_LOCATION,
462            exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA,
463            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
464        }
465
466        # from: https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#reserved_keywords
467        RESERVED_KEYWORDS = {
468            *generator.Generator.RESERVED_KEYWORDS,
469            "all",
470            "and",
471            "any",
472            "array",
473            "as",
474            "asc",
475            "assert_rows_modified",
476            "at",
477            "between",
478            "by",
479            "case",
480            "cast",
481            "collate",
482            "contains",
483            "create",
484            "cross",
485            "cube",
486            "current",
487            "default",
488            "define",
489            "desc",
490            "distinct",
491            "else",
492            "end",
493            "enum",
494            "escape",
495            "except",
496            "exclude",
497            "exists",
498            "extract",
499            "false",
500            "fetch",
501            "following",
502            "for",
503            "from",
504            "full",
505            "group",
506            "grouping",
507            "groups",
508            "hash",
509            "having",
510            "if",
511            "ignore",
512            "in",
513            "inner",
514            "intersect",
515            "interval",
516            "into",
517            "is",
518            "join",
519            "lateral",
520            "left",
521            "like",
522            "limit",
523            "lookup",
524            "merge",
525            "natural",
526            "new",
527            "no",
528            "not",
529            "null",
530            "nulls",
531            "of",
532            "on",
533            "or",
534            "order",
535            "outer",
536            "over",
537            "partition",
538            "preceding",
539            "proto",
540            "qualify",
541            "range",
542            "recursive",
543            "respect",
544            "right",
545            "rollup",
546            "rows",
547            "select",
548            "set",
549            "some",
550            "struct",
551            "tablesample",
552            "then",
553            "to",
554            "treat",
555            "true",
556            "unbounded",
557            "union",
558            "unnest",
559            "using",
560            "when",
561            "where",
562            "window",
563            "with",
564            "within",
565        }
566
567        def attimezone_sql(self, expression: exp.AtTimeZone) -> str:
568            parent = expression.parent
569
570            # BigQuery allows CAST(.. AS {STRING|TIMESTAMP} [FORMAT <fmt> [AT TIME ZONE <tz>]]).
571            # Only the TIMESTAMP one should use the below conversion, when AT TIME ZONE is included.
572            if not isinstance(parent, exp.Cast) or not parent.to.is_type("text"):
573                return self.func(
574                    "TIMESTAMP", self.func("DATETIME", expression.this, expression.args.get("zone"))
575                )
576
577            return super().attimezone_sql(expression)
578
579        def trycast_sql(self, expression: exp.TryCast) -> str:
580            return self.cast_sql(expression, safe_prefix="SAFE_")
581
582        def cte_sql(self, expression: exp.CTE) -> str:
583            if expression.alias_column_names:
584                self.unsupported("Column names in CTE definition are not supported.")
585            return super().cte_sql(expression)
586
587        def array_sql(self, expression: exp.Array) -> str:
588            first_arg = seq_get(expression.expressions, 0)
589            if isinstance(first_arg, exp.Subqueryable):
590                return f"ARRAY{self.wrap(self.sql(first_arg))}"
591
592            return inline_array_sql(self, expression)
593
594        def transaction_sql(self, *_) -> str:
595            return "BEGIN TRANSACTION"
596
597        def commit_sql(self, *_) -> str:
598            return "COMMIT TRANSACTION"
599
600        def rollback_sql(self, *_) -> str:
601            return "ROLLBACK TRANSACTION"
602
603        def in_unnest_op(self, expression: exp.Unnest) -> str:
604            return self.sql(expression)
605
606        def except_op(self, expression: exp.Except) -> str:
607            if not expression.args.get("distinct", False):
608                self.unsupported("EXCEPT without DISTINCT is not supported in BigQuery")
609            return f"EXCEPT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}"
610
611        def intersect_op(self, expression: exp.Intersect) -> str:
612            if not expression.args.get("distinct", False):
613                self.unsupported("INTERSECT without DISTINCT is not supported in BigQuery")
614            return f"INTERSECT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}"
615
616        def with_properties(self, properties: exp.Properties) -> str:
617            return self.properties(properties, prefix=self.seg("OPTIONS"))
logger = <Logger sqlglot (WARNING)>
class BigQuery(sqlglot.dialects.dialect.Dialect):
178class BigQuery(Dialect):
179    UNNEST_COLUMN_ONLY = True
180
181    # https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#case_sensitivity
182    RESOLVES_IDENTIFIERS_AS_UPPERCASE = None
183
184    # bigquery udfs are case sensitive
185    NORMALIZE_FUNCTIONS = False
186
187    TIME_MAPPING = {
188        "%D": "%m/%d/%y",
189    }
190
191    FORMAT_MAPPING = {
192        "DD": "%d",
193        "MM": "%m",
194        "MON": "%b",
195        "MONTH": "%B",
196        "YYYY": "%Y",
197        "YY": "%y",
198        "HH": "%I",
199        "HH12": "%I",
200        "HH24": "%H",
201        "MI": "%M",
202        "SS": "%S",
203        "SSSSS": "%f",
204        "TZH": "%z",
205    }
206
207    @classmethod
208    def normalize_identifier(cls, expression: E) -> E:
209        # In BigQuery, CTEs aren't case-sensitive, but table names are (by default, at least).
210        # The following check is essentially a heuristic to detect tables based on whether or
211        # not they're qualified.
212        if isinstance(expression, exp.Identifier):
213            parent = expression.parent
214
215            while isinstance(parent, exp.Dot):
216                parent = parent.parent
217
218            if (
219                not isinstance(parent, exp.UserDefinedFunction)
220                and not (isinstance(parent, exp.Table) and parent.db)
221                and not expression.meta.get("is_table")
222            ):
223                expression.set("this", expression.this.lower())
224
225        return expression
226
227    class Tokenizer(tokens.Tokenizer):
228        QUOTES = ["'", '"', '"""', "'''"]
229        COMMENTS = ["--", "#", ("/*", "*/")]
230        IDENTIFIERS = ["`"]
231        STRING_ESCAPES = ["\\"]
232
233        HEX_STRINGS = [("0x", ""), ("0X", "")]
234
235        BYTE_STRINGS = [
236            (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("b", "B")
237        ]
238
239        RAW_STRINGS = [
240            (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("r", "R")
241        ]
242
243        KEYWORDS = {
244            **tokens.Tokenizer.KEYWORDS,
245            "ANY TYPE": TokenType.VARIANT,
246            "BEGIN": TokenType.COMMAND,
247            "BEGIN TRANSACTION": TokenType.BEGIN,
248            "CURRENT_DATETIME": TokenType.CURRENT_DATETIME,
249            "BYTES": TokenType.BINARY,
250            "DECLARE": TokenType.COMMAND,
251            "FLOAT64": TokenType.DOUBLE,
252            "INT64": TokenType.BIGINT,
253            "RECORD": TokenType.STRUCT,
254            "TIMESTAMP": TokenType.TIMESTAMPTZ,
255            "NOT DETERMINISTIC": TokenType.VOLATILE,
256            "UNKNOWN": TokenType.NULL,
257        }
258        KEYWORDS.pop("DIV")
259
260    class Parser(parser.Parser):
261        PREFIXED_PIVOT_COLUMNS = True
262
263        LOG_BASE_FIRST = False
264        LOG_DEFAULTS_TO_LN = True
265
266        FUNCTIONS = {
267            **parser.Parser.FUNCTIONS,
268            "DATE": _parse_date,
269            "DATE_ADD": parse_date_delta_with_interval(exp.DateAdd),
270            "DATE_SUB": parse_date_delta_with_interval(exp.DateSub),
271            "DATE_TRUNC": lambda args: exp.DateTrunc(
272                unit=exp.Literal.string(str(seq_get(args, 1))),
273                this=seq_get(args, 0),
274            ),
275            "DATETIME_ADD": parse_date_delta_with_interval(exp.DatetimeAdd),
276            "DATETIME_SUB": parse_date_delta_with_interval(exp.DatetimeSub),
277            "DIV": lambda args: exp.IntDiv(this=seq_get(args, 0), expression=seq_get(args, 1)),
278            "GENERATE_ARRAY": exp.GenerateSeries.from_arg_list,
279            "PARSE_DATE": lambda args: format_time_lambda(exp.StrToDate, "bigquery")(
280                [seq_get(args, 1), seq_get(args, 0)]
281            ),
282            "PARSE_TIMESTAMP": _parse_timestamp,
283            "REGEXP_CONTAINS": exp.RegexpLike.from_arg_list,
284            "REGEXP_EXTRACT": lambda args: exp.RegexpExtract(
285                this=seq_get(args, 0),
286                expression=seq_get(args, 1),
287                position=seq_get(args, 2),
288                occurrence=seq_get(args, 3),
289                group=exp.Literal.number(1)
290                if re.compile(str(seq_get(args, 1))).groups == 1
291                else None,
292            ),
293            "SPLIT": lambda args: exp.Split(
294                # https://cloud.google.com/bigquery/docs/reference/standard-sql/string_functions#split
295                this=seq_get(args, 0),
296                expression=seq_get(args, 1) or exp.Literal.string(","),
297            ),
298            "TIME_ADD": parse_date_delta_with_interval(exp.TimeAdd),
299            "TIME_SUB": parse_date_delta_with_interval(exp.TimeSub),
300            "TIMESTAMP_ADD": parse_date_delta_with_interval(exp.TimestampAdd),
301            "TIMESTAMP_SUB": parse_date_delta_with_interval(exp.TimestampSub),
302            "TO_JSON_STRING": exp.JSONFormat.from_arg_list,
303        }
304
305        FUNCTION_PARSERS = {
306            **parser.Parser.FUNCTION_PARSERS,
307            "ARRAY": lambda self: self.expression(exp.Array, expressions=[self._parse_statement()]),
308        }
309        FUNCTION_PARSERS.pop("TRIM")
310
311        NO_PAREN_FUNCTIONS = {
312            **parser.Parser.NO_PAREN_FUNCTIONS,
313            TokenType.CURRENT_DATETIME: exp.CurrentDatetime,
314        }
315
316        NESTED_TYPE_TOKENS = {
317            *parser.Parser.NESTED_TYPE_TOKENS,
318            TokenType.TABLE,
319        }
320
321        ID_VAR_TOKENS = {
322            *parser.Parser.ID_VAR_TOKENS,
323            TokenType.VALUES,
324        }
325
326        PROPERTY_PARSERS = {
327            **parser.Parser.PROPERTY_PARSERS,
328            "NOT DETERMINISTIC": lambda self: self.expression(
329                exp.StabilityProperty, this=exp.Literal.string("VOLATILE")
330            ),
331            "OPTIONS": lambda self: self._parse_with_property(),
332        }
333
334        CONSTRAINT_PARSERS = {
335            **parser.Parser.CONSTRAINT_PARSERS,
336            "OPTIONS": lambda self: exp.Properties(expressions=self._parse_with_property()),
337        }
338
339        def _parse_table_part(self, schema: bool = False) -> t.Optional[exp.Expression]:
340            this = super()._parse_table_part(schema=schema)
341
342            # https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#table_names
343            if isinstance(this, exp.Identifier):
344                table_name = this.name
345                while self._match(TokenType.DASH, advance=False) and self._next:
346                    self._advance(2)
347                    table_name += f"-{self._prev.text}"
348
349                this = exp.Identifier(this=table_name, quoted=this.args.get("quoted"))
350
351            return this
352
353        def _parse_table_parts(self, schema: bool = False) -> exp.Table:
354            table = super()._parse_table_parts(schema=schema)
355            if isinstance(table.this, exp.Identifier) and "." in table.name:
356                catalog, db, this, *rest = (
357                    t.cast(t.Optional[exp.Expression], exp.to_identifier(x))
358                    for x in split_num_words(table.name, ".", 3)
359                )
360
361                if rest and this:
362                    this = exp.Dot.build(t.cast(t.List[exp.Expression], [this, *rest]))
363
364                table = exp.Table(this=this, db=db, catalog=catalog)
365
366            return table
367
368    class Generator(generator.Generator):
369        EXPLICIT_UNION = True
370        INTERVAL_ALLOWS_PLURAL_FORM = False
371        JOIN_HINTS = False
372        QUERY_HINTS = False
373        TABLE_HINTS = False
374        LIMIT_FETCH = "LIMIT"
375        RENAME_TABLE_WITH_DB = False
376        ESCAPE_LINE_BREAK = True
377
378        TRANSFORMS = {
379            **generator.Generator.TRANSFORMS,
380            exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"),
381            exp.ArraySize: rename_func("ARRAY_LENGTH"),
382            exp.Cast: transforms.preprocess([transforms.remove_precision_parameterized_types]),
383            exp.CTE: transforms.preprocess([_pushdown_cte_column_names]),
384            exp.DateAdd: _date_add_sql("DATE", "ADD"),
385            exp.DateFromParts: rename_func("DATE"),
386            exp.DateSub: _date_add_sql("DATE", "SUB"),
387            exp.DatetimeAdd: _date_add_sql("DATETIME", "ADD"),
388            exp.DatetimeSub: _date_add_sql("DATETIME", "SUB"),
389            exp.DateDiff: lambda self, e: f"DATE_DIFF({self.sql(e, 'this')}, {self.sql(e, 'expression')}, {self.sql(e.args.get('unit', 'DAY'))})",
390            exp.DateStrToDate: datestrtodate_sql,
391            exp.DateTrunc: lambda self, e: self.func("DATE_TRUNC", e.this, e.text("unit")),
392            exp.JSONFormat: rename_func("TO_JSON_STRING"),
393            exp.GenerateSeries: rename_func("GENERATE_ARRAY"),
394            exp.GroupConcat: rename_func("STRING_AGG"),
395            exp.ILike: no_ilike_sql,
396            exp.IntDiv: rename_func("DIV"),
397            exp.Max: max_or_greatest,
398            exp.Min: min_or_least,
399            exp.RegexpExtract: lambda self, e: self.func(
400                "REGEXP_EXTRACT",
401                e.this,
402                e.expression,
403                e.args.get("position"),
404                e.args.get("occurrence"),
405            ),
406            exp.RegexpLike: rename_func("REGEXP_CONTAINS"),
407            exp.Select: transforms.preprocess(
408                [
409                    transforms.explode_to_unnest,
410                    _unqualify_unnest,
411                    transforms.eliminate_distinct_on,
412                    _alias_ordered_group,
413                ]
414            ),
415            exp.StrToDate: lambda self, e: f"PARSE_DATE({self.format_time(e)}, {self.sql(e, 'this')})",
416            exp.StrToTime: lambda self, e: self.func(
417                "PARSE_TIMESTAMP", self.format_time(e), e.this, e.args.get("zone")
418            ),
419            exp.TimeAdd: _date_add_sql("TIME", "ADD"),
420            exp.TimeSub: _date_add_sql("TIME", "SUB"),
421            exp.TimestampAdd: _date_add_sql("TIMESTAMP", "ADD"),
422            exp.TimestampSub: _date_add_sql("TIMESTAMP", "SUB"),
423            exp.TimeStrToTime: timestrtotime_sql,
424            exp.TsOrDsToDate: ts_or_ds_to_date_sql("bigquery"),
425            exp.TsOrDsAdd: _date_add_sql("DATE", "ADD"),
426            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
427            exp.VariancePop: rename_func("VAR_POP"),
428            exp.Values: _derived_table_values_to_unnest,
429            exp.ReturnsProperty: _returnsproperty_sql,
430            exp.Create: _create_sql,
431            exp.Trim: lambda self, e: self.func(f"TRIM", e.this, e.expression),
432            exp.StabilityProperty: lambda self, e: f"DETERMINISTIC"
433            if e.name == "IMMUTABLE"
434            else "NOT DETERMINISTIC",
435        }
436
437        TYPE_MAPPING = {
438            **generator.Generator.TYPE_MAPPING,
439            exp.DataType.Type.BIGDECIMAL: "BIGNUMERIC",
440            exp.DataType.Type.BIGINT: "INT64",
441            exp.DataType.Type.BINARY: "BYTES",
442            exp.DataType.Type.BOOLEAN: "BOOL",
443            exp.DataType.Type.CHAR: "STRING",
444            exp.DataType.Type.DECIMAL: "NUMERIC",
445            exp.DataType.Type.DOUBLE: "FLOAT64",
446            exp.DataType.Type.FLOAT: "FLOAT64",
447            exp.DataType.Type.INT: "INT64",
448            exp.DataType.Type.NCHAR: "STRING",
449            exp.DataType.Type.NVARCHAR: "STRING",
450            exp.DataType.Type.SMALLINT: "INT64",
451            exp.DataType.Type.TEXT: "STRING",
452            exp.DataType.Type.TIMESTAMP: "DATETIME",
453            exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP",
454            exp.DataType.Type.TIMESTAMPLTZ: "TIMESTAMP",
455            exp.DataType.Type.TINYINT: "INT64",
456            exp.DataType.Type.VARBINARY: "BYTES",
457            exp.DataType.Type.VARCHAR: "STRING",
458            exp.DataType.Type.VARIANT: "ANY TYPE",
459        }
460
461        PROPERTIES_LOCATION = {
462            **generator.Generator.PROPERTIES_LOCATION,
463            exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA,
464            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
465        }
466
467        # from: https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#reserved_keywords
468        RESERVED_KEYWORDS = {
469            *generator.Generator.RESERVED_KEYWORDS,
470            "all",
471            "and",
472            "any",
473            "array",
474            "as",
475            "asc",
476            "assert_rows_modified",
477            "at",
478            "between",
479            "by",
480            "case",
481            "cast",
482            "collate",
483            "contains",
484            "create",
485            "cross",
486            "cube",
487            "current",
488            "default",
489            "define",
490            "desc",
491            "distinct",
492            "else",
493            "end",
494            "enum",
495            "escape",
496            "except",
497            "exclude",
498            "exists",
499            "extract",
500            "false",
501            "fetch",
502            "following",
503            "for",
504            "from",
505            "full",
506            "group",
507            "grouping",
508            "groups",
509            "hash",
510            "having",
511            "if",
512            "ignore",
513            "in",
514            "inner",
515            "intersect",
516            "interval",
517            "into",
518            "is",
519            "join",
520            "lateral",
521            "left",
522            "like",
523            "limit",
524            "lookup",
525            "merge",
526            "natural",
527            "new",
528            "no",
529            "not",
530            "null",
531            "nulls",
532            "of",
533            "on",
534            "or",
535            "order",
536            "outer",
537            "over",
538            "partition",
539            "preceding",
540            "proto",
541            "qualify",
542            "range",
543            "recursive",
544            "respect",
545            "right",
546            "rollup",
547            "rows",
548            "select",
549            "set",
550            "some",
551            "struct",
552            "tablesample",
553            "then",
554            "to",
555            "treat",
556            "true",
557            "unbounded",
558            "union",
559            "unnest",
560            "using",
561            "when",
562            "where",
563            "window",
564            "with",
565            "within",
566        }
567
568        def attimezone_sql(self, expression: exp.AtTimeZone) -> str:
569            parent = expression.parent
570
571            # BigQuery allows CAST(.. AS {STRING|TIMESTAMP} [FORMAT <fmt> [AT TIME ZONE <tz>]]).
572            # Only the TIMESTAMP one should use the below conversion, when AT TIME ZONE is included.
573            if not isinstance(parent, exp.Cast) or not parent.to.is_type("text"):
574                return self.func(
575                    "TIMESTAMP", self.func("DATETIME", expression.this, expression.args.get("zone"))
576                )
577
578            return super().attimezone_sql(expression)
579
580        def trycast_sql(self, expression: exp.TryCast) -> str:
581            return self.cast_sql(expression, safe_prefix="SAFE_")
582
583        def cte_sql(self, expression: exp.CTE) -> str:
584            if expression.alias_column_names:
585                self.unsupported("Column names in CTE definition are not supported.")
586            return super().cte_sql(expression)
587
588        def array_sql(self, expression: exp.Array) -> str:
589            first_arg = seq_get(expression.expressions, 0)
590            if isinstance(first_arg, exp.Subqueryable):
591                return f"ARRAY{self.wrap(self.sql(first_arg))}"
592
593            return inline_array_sql(self, expression)
594
595        def transaction_sql(self, *_) -> str:
596            return "BEGIN TRANSACTION"
597
598        def commit_sql(self, *_) -> str:
599            return "COMMIT TRANSACTION"
600
601        def rollback_sql(self, *_) -> str:
602            return "ROLLBACK TRANSACTION"
603
604        def in_unnest_op(self, expression: exp.Unnest) -> str:
605            return self.sql(expression)
606
607        def except_op(self, expression: exp.Except) -> str:
608            if not expression.args.get("distinct", False):
609                self.unsupported("EXCEPT without DISTINCT is not supported in BigQuery")
610            return f"EXCEPT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}"
611
612        def intersect_op(self, expression: exp.Intersect) -> str:
613            if not expression.args.get("distinct", False):
614                self.unsupported("INTERSECT without DISTINCT is not supported in BigQuery")
615            return f"INTERSECT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}"
616
617        def with_properties(self, properties: exp.Properties) -> str:
618            return self.properties(properties, prefix=self.seg("OPTIONS"))
UNNEST_COLUMN_ONLY = True
RESOLVES_IDENTIFIERS_AS_UPPERCASE: Optional[bool] = None
NORMALIZE_FUNCTIONS: bool | str = False
TIME_MAPPING: Dict[str, str] = {'%D': '%m/%d/%y'}
FORMAT_MAPPING: Dict[str, str] = {'DD': '%d', 'MM': '%m', 'MON': '%b', 'MONTH': '%B', 'YYYY': '%Y', 'YY': '%y', 'HH': '%I', 'HH12': '%I', 'HH24': '%H', 'MI': '%M', 'SS': '%S', 'SSSSS': '%f', 'TZH': '%z'}
@classmethod
def normalize_identifier(cls, expression: ~E) -> ~E:
207    @classmethod
208    def normalize_identifier(cls, expression: E) -> E:
209        # In BigQuery, CTEs aren't case-sensitive, but table names are (by default, at least).
210        # The following check is essentially a heuristic to detect tables based on whether or
211        # not they're qualified.
212        if isinstance(expression, exp.Identifier):
213            parent = expression.parent
214
215            while isinstance(parent, exp.Dot):
216                parent = parent.parent
217
218            if (
219                not isinstance(parent, exp.UserDefinedFunction)
220                and not (isinstance(parent, exp.Table) and parent.db)
221                and not expression.meta.get("is_table")
222            ):
223                expression.set("this", expression.this.lower())
224
225        return expression

Normalizes an unquoted identifier to either lower or upper case, thus essentially making it case-insensitive. If a dialect treats all identifiers as case-insensitive, they will be normalized regardless of being quoted or not.

tokenizer_class = <class 'sqlglot.dialects.bigquery.BigQuery.Tokenizer'>
generator_class = <class 'sqlglot.dialects.bigquery.BigQuery.Generator'>
TIME_TRIE: Dict = {'%': {'D': {0: True}}}
FORMAT_TRIE: Dict = {'D': {'D': {0: True}}, 'M': {'M': {0: True}, 'O': {'N': {0: True, 'T': {'H': {0: True}}}}, 'I': {0: True}}, 'Y': {'Y': {'Y': {'Y': {0: True}}, 0: True}}, 'H': {'H': {0: True, '1': {'2': {0: True}}, '2': {'4': {0: True}}}}, 'S': {'S': {0: True, 'S': {'S': {'S': {0: True}}}}}, 'T': {'Z': {'H': {0: True}}}}
INVERSE_TIME_MAPPING: Dict[str, str] = {'%m/%d/%y': '%D'}
INVERSE_TIME_TRIE: Dict = {'%': {'m': {'/': {'%': {'d': {'/': {'%': {'y': {0: True}}}}}}}}}
QUOTE_START = "'"
QUOTE_END = "'"
IDENTIFIER_START = '`'
IDENTIFIER_END = '`'
BIT_START = None
BIT_END = None
HEX_START = '0x'
HEX_END = ''
BYTE_START = "b'"
BYTE_END = "'"
class BigQuery.Tokenizer(sqlglot.tokens.Tokenizer):
227    class Tokenizer(tokens.Tokenizer):
228        QUOTES = ["'", '"', '"""', "'''"]
229        COMMENTS = ["--", "#", ("/*", "*/")]
230        IDENTIFIERS = ["`"]
231        STRING_ESCAPES = ["\\"]
232
233        HEX_STRINGS = [("0x", ""), ("0X", "")]
234
235        BYTE_STRINGS = [
236            (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("b", "B")
237        ]
238
239        RAW_STRINGS = [
240            (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("r", "R")
241        ]
242
243        KEYWORDS = {
244            **tokens.Tokenizer.KEYWORDS,
245            "ANY TYPE": TokenType.VARIANT,
246            "BEGIN": TokenType.COMMAND,
247            "BEGIN TRANSACTION": TokenType.BEGIN,
248            "CURRENT_DATETIME": TokenType.CURRENT_DATETIME,
249            "BYTES": TokenType.BINARY,
250            "DECLARE": TokenType.COMMAND,
251            "FLOAT64": TokenType.DOUBLE,
252            "INT64": TokenType.BIGINT,
253            "RECORD": TokenType.STRUCT,
254            "TIMESTAMP": TokenType.TIMESTAMPTZ,
255            "NOT DETERMINISTIC": TokenType.VOLATILE,
256            "UNKNOWN": TokenType.NULL,
257        }
258        KEYWORDS.pop("DIV")
QUOTES = ["'", '"', '"""', "'''"]
COMMENTS = ['--', '#', ('/*', '*/')]
IDENTIFIERS = ['`']
STRING_ESCAPES = ['\\']
HEX_STRINGS = [('0x', ''), ('0X', '')]
BYTE_STRINGS = [("b'", "'"), ("B'", "'"), ('b"', '"'), ('B"', '"'), ('b"""', '"""'), ('B"""', '"""'), ("b'''", "'''"), ("B'''", "'''")]
RAW_STRINGS = [("r'", "'"), ("R'", "'"), ('r"', '"'), ('R"', '"'), ('r"""', '"""'), ('R"""', '"""'), ("r'''", "'''"), ("R'''", "'''")]
KEYWORDS = {'{%': <TokenType.BLOCK_START: 'BLOCK_START'>, '{%+': <TokenType.BLOCK_START: 'BLOCK_START'>, '{%-': <TokenType.BLOCK_START: 'BLOCK_START'>, '%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '+%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '-%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '{{+': <TokenType.BLOCK_START: 'BLOCK_START'>, '{{-': <TokenType.BLOCK_START: 'BLOCK_START'>, '+}}': <TokenType.BLOCK_END: 'BLOCK_END'>, '-}}': <TokenType.BLOCK_END: 'BLOCK_END'>, '/*+': <TokenType.HINT: 'HINT'>, '==': <TokenType.EQ: 'EQ'>, '::': <TokenType.DCOLON: 'DCOLON'>, '||': <TokenType.DPIPE: 'DPIPE'>, '>=': <TokenType.GTE: 'GTE'>, '<=': <TokenType.LTE: 'LTE'>, '<>': <TokenType.NEQ: 'NEQ'>, '!=': <TokenType.NEQ: 'NEQ'>, '<=>': <TokenType.NULLSAFE_EQ: 'NULLSAFE_EQ'>, '->': <TokenType.ARROW: 'ARROW'>, '->>': <TokenType.DARROW: 'DARROW'>, '=>': <TokenType.FARROW: 'FARROW'>, '#>': <TokenType.HASH_ARROW: 'HASH_ARROW'>, '#>>': <TokenType.DHASH_ARROW: 'DHASH_ARROW'>, '<->': <TokenType.LR_ARROW: 'LR_ARROW'>, '&&': <TokenType.DAMP: 'DAMP'>, 'ALL': <TokenType.ALL: 'ALL'>, 'ALWAYS': <TokenType.ALWAYS: 'ALWAYS'>, 'AND': <TokenType.AND: 'AND'>, 'ANTI': <TokenType.ANTI: 'ANTI'>, 'ANY': <TokenType.ANY: 'ANY'>, 'ASC': <TokenType.ASC: 'ASC'>, 'AS': <TokenType.ALIAS: 'ALIAS'>, 'ASOF': <TokenType.ASOF: 'ASOF'>, 'AUTOINCREMENT': <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, 'AUTO_INCREMENT': <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, 'BEGIN': <TokenType.COMMAND: 'COMMAND'>, 'BETWEEN': <TokenType.BETWEEN: 'BETWEEN'>, 'CACHE': <TokenType.CACHE: 'CACHE'>, 'UNCACHE': <TokenType.UNCACHE: 'UNCACHE'>, 'CASE': <TokenType.CASE: 'CASE'>, 'CHARACTER SET': <TokenType.CHARACTER_SET: 'CHARACTER_SET'>, 'CLUSTER BY': <TokenType.CLUSTER_BY: 'CLUSTER_BY'>, 'COLLATE': <TokenType.COLLATE: 'COLLATE'>, 'COLUMN': <TokenType.COLUMN: 'COLUMN'>, 'COMMIT': <TokenType.COMMIT: 'COMMIT'>, 'CONSTRAINT': <TokenType.CONSTRAINT: 'CONSTRAINT'>, 'CREATE': <TokenType.CREATE: 'CREATE'>, 'CROSS': <TokenType.CROSS: 'CROSS'>, 'CUBE': <TokenType.CUBE: 'CUBE'>, 'CURRENT_DATE': <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, 'CURRENT_TIME': <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, 'CURRENT_TIMESTAMP': <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, 'CURRENT_USER': <TokenType.CURRENT_USER: 'CURRENT_USER'>, 'DATABASE': <TokenType.DATABASE: 'DATABASE'>, 'DEFAULT': <TokenType.DEFAULT: 'DEFAULT'>, 'DELETE': <TokenType.DELETE: 'DELETE'>, 'DESC': <TokenType.DESC: 'DESC'>, 'DESCRIBE': <TokenType.DESCRIBE: 'DESCRIBE'>, 'DISTINCT': <TokenType.DISTINCT: 'DISTINCT'>, 'DISTRIBUTE BY': <TokenType.DISTRIBUTE_BY: 'DISTRIBUTE_BY'>, 'DROP': <TokenType.DROP: 'DROP'>, 'ELSE': <TokenType.ELSE: 'ELSE'>, 'END': <TokenType.END: 'END'>, 'ESCAPE': <TokenType.ESCAPE: 'ESCAPE'>, 'EXCEPT': <TokenType.EXCEPT: 'EXCEPT'>, 'EXECUTE': <TokenType.EXECUTE: 'EXECUTE'>, 'EXISTS': <TokenType.EXISTS: 'EXISTS'>, 'FALSE': <TokenType.FALSE: 'FALSE'>, 'FETCH': <TokenType.FETCH: 'FETCH'>, 'FILTER': <TokenType.FILTER: 'FILTER'>, 'FIRST': <TokenType.FIRST: 'FIRST'>, 'FULL': <TokenType.FULL: 'FULL'>, 'FUNCTION': <TokenType.FUNCTION: 'FUNCTION'>, 'FOR': <TokenType.FOR: 'FOR'>, 'FOREIGN KEY': <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, 'FORMAT': <TokenType.FORMAT: 'FORMAT'>, 'FROM': <TokenType.FROM: 'FROM'>, 'GEOGRAPHY': <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, 'GEOMETRY': <TokenType.GEOMETRY: 'GEOMETRY'>, 'GLOB': <TokenType.GLOB: 'GLOB'>, 'GROUP BY': <TokenType.GROUP_BY: 'GROUP_BY'>, 'GROUPING SETS': <TokenType.GROUPING_SETS: 'GROUPING_SETS'>, 'HAVING': <TokenType.HAVING: 'HAVING'>, 'IF': <TokenType.IF: 'IF'>, 'ILIKE': <TokenType.ILIKE: 'ILIKE'>, 'IN': <TokenType.IN: 'IN'>, 'INDEX': <TokenType.INDEX: 'INDEX'>, 'INET': <TokenType.INET: 'INET'>, 'INNER': <TokenType.INNER: 'INNER'>, 'INSERT': <TokenType.INSERT: 'INSERT'>, 'INTERVAL': <TokenType.INTERVAL: 'INTERVAL'>, 'INTERSECT': <TokenType.INTERSECT: 'INTERSECT'>, 'INTO': <TokenType.INTO: 'INTO'>, 'IS': <TokenType.IS: 'IS'>, 'ISNULL': <TokenType.ISNULL: 'ISNULL'>, 'JOIN': <TokenType.JOIN: 'JOIN'>, 'KEEP': <TokenType.KEEP: 'KEEP'>, 'LATERAL': <TokenType.LATERAL: 'LATERAL'>, 'LEFT': <TokenType.LEFT: 'LEFT'>, 'LIKE': <TokenType.LIKE: 'LIKE'>, 'LIMIT': <TokenType.LIMIT: 'LIMIT'>, 'LOAD': <TokenType.LOAD: 'LOAD'>, 'LOCK': <TokenType.LOCK: 'LOCK'>, 'MERGE': <TokenType.MERGE: 'MERGE'>, 'NATURAL': <TokenType.NATURAL: 'NATURAL'>, 'NEXT': <TokenType.NEXT: 'NEXT'>, 'NEXT VALUE FOR': <TokenType.NEXT_VALUE_FOR: 'NEXT_VALUE_FOR'>, 'NOT': <TokenType.NOT: 'NOT'>, 'NOTNULL': <TokenType.NOTNULL: 'NOTNULL'>, 'NULL': <TokenType.NULL: 'NULL'>, 'OBJECT': <TokenType.OBJECT: 'OBJECT'>, 'OFFSET': <TokenType.OFFSET: 'OFFSET'>, 'ON': <TokenType.ON: 'ON'>, 'OR': <TokenType.OR: 'OR'>, 'ORDER BY': <TokenType.ORDER_BY: 'ORDER_BY'>, 'ORDINALITY': <TokenType.ORDINALITY: 'ORDINALITY'>, 'OUTER': <TokenType.OUTER: 'OUTER'>, 'OVER': <TokenType.OVER: 'OVER'>, 'OVERLAPS': <TokenType.OVERLAPS: 'OVERLAPS'>, 'OVERWRITE': <TokenType.OVERWRITE: 'OVERWRITE'>, 'PARTITION': <TokenType.PARTITION: 'PARTITION'>, 'PARTITION BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PARTITIONED BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PARTITIONED_BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PERCENT': <TokenType.PERCENT: 'PERCENT'>, 'PIVOT': <TokenType.PIVOT: 'PIVOT'>, 'PRAGMA': <TokenType.PRAGMA: 'PRAGMA'>, 'PRIMARY KEY': <TokenType.PRIMARY_KEY: 'PRIMARY_KEY'>, 'PROCEDURE': <TokenType.PROCEDURE: 'PROCEDURE'>, 'QUALIFY': <TokenType.QUALIFY: 'QUALIFY'>, 'RANGE': <TokenType.RANGE: 'RANGE'>, 'RECURSIVE': <TokenType.RECURSIVE: 'RECURSIVE'>, 'REGEXP': <TokenType.RLIKE: 'RLIKE'>, 'REPLACE': <TokenType.REPLACE: 'REPLACE'>, 'RETURNING': <TokenType.RETURNING: 'RETURNING'>, 'REFERENCES': <TokenType.REFERENCES: 'REFERENCES'>, 'RIGHT': <TokenType.RIGHT: 'RIGHT'>, 'RLIKE': <TokenType.RLIKE: 'RLIKE'>, 'ROLLBACK': <TokenType.ROLLBACK: 'ROLLBACK'>, 'ROLLUP': <TokenType.ROLLUP: 'ROLLUP'>, 'ROW': <TokenType.ROW: 'ROW'>, 'ROWS': <TokenType.ROWS: 'ROWS'>, 'SCHEMA': <TokenType.SCHEMA: 'SCHEMA'>, 'SELECT': <TokenType.SELECT: 'SELECT'>, 'SEMI': <TokenType.SEMI: 'SEMI'>, 'SET': <TokenType.SET: 'SET'>, 'SETTINGS': <TokenType.SETTINGS: 'SETTINGS'>, 'SHOW': <TokenType.SHOW: 'SHOW'>, 'SIMILAR TO': <TokenType.SIMILAR_TO: 'SIMILAR_TO'>, 'SOME': <TokenType.SOME: 'SOME'>, 'SORT BY': <TokenType.SORT_BY: 'SORT_BY'>, 'TABLE': <TokenType.TABLE: 'TABLE'>, 'TABLESAMPLE': <TokenType.TABLE_SAMPLE: 'TABLE_SAMPLE'>, 'TEMP': <TokenType.TEMPORARY: 'TEMPORARY'>, 'TEMPORARY': <TokenType.TEMPORARY: 'TEMPORARY'>, 'THEN': <TokenType.THEN: 'THEN'>, 'TRUE': <TokenType.TRUE: 'TRUE'>, 'UNION': <TokenType.UNION: 'UNION'>, 'UNNEST': <TokenType.UNNEST: 'UNNEST'>, 'UNPIVOT': <TokenType.UNPIVOT: 'UNPIVOT'>, 'UPDATE': <TokenType.UPDATE: 'UPDATE'>, 'USE': <TokenType.USE: 'USE'>, 'USING': <TokenType.USING: 'USING'>, 'UUID': <TokenType.UUID: 'UUID'>, 'VALUES': <TokenType.VALUES: 'VALUES'>, 'VIEW': <TokenType.VIEW: 'VIEW'>, 'VOLATILE': <TokenType.VOLATILE: 'VOLATILE'>, 'WHEN': <TokenType.WHEN: 'WHEN'>, 'WHERE': <TokenType.WHERE: 'WHERE'>, 'WINDOW': <TokenType.WINDOW: 'WINDOW'>, 'WITH': <TokenType.WITH: 'WITH'>, 'APPLY': <TokenType.APPLY: 'APPLY'>, 'ARRAY': <TokenType.ARRAY: 'ARRAY'>, 'BIT': <TokenType.BIT: 'BIT'>, 'BOOL': <TokenType.BOOLEAN: 'BOOLEAN'>, 'BOOLEAN': <TokenType.BOOLEAN: 'BOOLEAN'>, 'BYTE': <TokenType.TINYINT: 'TINYINT'>, 'TINYINT': <TokenType.TINYINT: 'TINYINT'>, 'SHORT': <TokenType.SMALLINT: 'SMALLINT'>, 'SMALLINT': <TokenType.SMALLINT: 'SMALLINT'>, 'INT2': <TokenType.SMALLINT: 'SMALLINT'>, 'INTEGER': <TokenType.INT: 'INT'>, 'INT': <TokenType.INT: 'INT'>, 'INT4': <TokenType.INT: 'INT'>, 'LONG': <TokenType.BIGINT: 'BIGINT'>, 'BIGINT': <TokenType.BIGINT: 'BIGINT'>, 'INT8': <TokenType.BIGINT: 'BIGINT'>, 'DEC': <TokenType.DECIMAL: 'DECIMAL'>, 'DECIMAL': <TokenType.DECIMAL: 'DECIMAL'>, 'BIGDECIMAL': <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, 'BIGNUMERIC': <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, 'MAP': <TokenType.MAP: 'MAP'>, 'NULLABLE': <TokenType.NULLABLE: 'NULLABLE'>, 'NUMBER': <TokenType.DECIMAL: 'DECIMAL'>, 'NUMERIC': <TokenType.DECIMAL: 'DECIMAL'>, 'FIXED': <TokenType.DECIMAL: 'DECIMAL'>, 'REAL': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT4': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT8': <TokenType.DOUBLE: 'DOUBLE'>, 'DOUBLE': <TokenType.DOUBLE: 'DOUBLE'>, 'DOUBLE PRECISION': <TokenType.DOUBLE: 'DOUBLE'>, 'JSON': <TokenType.JSON: 'JSON'>, 'CHAR': <TokenType.CHAR: 'CHAR'>, 'CHARACTER': <TokenType.CHAR: 'CHAR'>, 'NCHAR': <TokenType.NCHAR: 'NCHAR'>, 'VARCHAR': <TokenType.VARCHAR: 'VARCHAR'>, 'VARCHAR2': <TokenType.VARCHAR: 'VARCHAR'>, 'NVARCHAR': <TokenType.NVARCHAR: 'NVARCHAR'>, 'NVARCHAR2': <TokenType.NVARCHAR: 'NVARCHAR'>, 'STR': <TokenType.TEXT: 'TEXT'>, 'STRING': <TokenType.TEXT: 'TEXT'>, 'TEXT': <TokenType.TEXT: 'TEXT'>, 'CLOB': <TokenType.TEXT: 'TEXT'>, 'LONGVARCHAR': <TokenType.TEXT: 'TEXT'>, 'BINARY': <TokenType.BINARY: 'BINARY'>, 'BLOB': <TokenType.VARBINARY: 'VARBINARY'>, 'BYTEA': <TokenType.VARBINARY: 'VARBINARY'>, 'VARBINARY': <TokenType.VARBINARY: 'VARBINARY'>, 'TIME': <TokenType.TIME: 'TIME'>, 'TIMESTAMP': <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, 'TIMESTAMPTZ': <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, 'TIMESTAMPLTZ': <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, 'DATE': <TokenType.DATE: 'DATE'>, 'DATETIME': <TokenType.DATETIME: 'DATETIME'>, 'INT4RANGE': <TokenType.INT4RANGE: 'INT4RANGE'>, 'INT4MULTIRANGE': <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, 'INT8RANGE': <TokenType.INT8RANGE: 'INT8RANGE'>, 'INT8MULTIRANGE': <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, 'NUMRANGE': <TokenType.NUMRANGE: 'NUMRANGE'>, 'NUMMULTIRANGE': <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, 'TSRANGE': <TokenType.TSRANGE: 'TSRANGE'>, 'TSMULTIRANGE': <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, 'TSTZRANGE': <TokenType.TSTZRANGE: 'TSTZRANGE'>, 'TSTZMULTIRANGE': <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, 'DATERANGE': <TokenType.DATERANGE: 'DATERANGE'>, 'DATEMULTIRANGE': <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, 'UNIQUE': <TokenType.UNIQUE: 'UNIQUE'>, 'STRUCT': <TokenType.STRUCT: 'STRUCT'>, 'VARIANT': <TokenType.VARIANT: 'VARIANT'>, 'ALTER': <TokenType.ALTER: 'ALTER'>, 'ANALYZE': <TokenType.COMMAND: 'COMMAND'>, 'CALL': <TokenType.COMMAND: 'COMMAND'>, 'COMMENT': <TokenType.COMMENT: 'COMMENT'>, 'COPY': <TokenType.COMMAND: 'COMMAND'>, 'EXPLAIN': <TokenType.COMMAND: 'COMMAND'>, 'GRANT': <TokenType.COMMAND: 'COMMAND'>, 'OPTIMIZE': <TokenType.COMMAND: 'COMMAND'>, 'PREPARE': <TokenType.COMMAND: 'COMMAND'>, 'TRUNCATE': <TokenType.COMMAND: 'COMMAND'>, 'VACUUM': <TokenType.COMMAND: 'COMMAND'>, 'USER-DEFINED': <TokenType.USERDEFINED: 'USERDEFINED'>, 'ANY TYPE': <TokenType.VARIANT: 'VARIANT'>, 'BEGIN TRANSACTION': <TokenType.BEGIN: 'BEGIN'>, 'CURRENT_DATETIME': <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, 'BYTES': <TokenType.BINARY: 'BINARY'>, 'DECLARE': <TokenType.COMMAND: 'COMMAND'>, 'FLOAT64': <TokenType.DOUBLE: 'DOUBLE'>, 'INT64': <TokenType.BIGINT: 'BIGINT'>, 'RECORD': <TokenType.STRUCT: 'STRUCT'>, 'NOT DETERMINISTIC': <TokenType.VOLATILE: 'VOLATILE'>, 'UNKNOWN': <TokenType.NULL: 'NULL'>}
class BigQuery.Parser(sqlglot.parser.Parser):
260    class Parser(parser.Parser):
261        PREFIXED_PIVOT_COLUMNS = True
262
263        LOG_BASE_FIRST = False
264        LOG_DEFAULTS_TO_LN = True
265
266        FUNCTIONS = {
267            **parser.Parser.FUNCTIONS,
268            "DATE": _parse_date,
269            "DATE_ADD": parse_date_delta_with_interval(exp.DateAdd),
270            "DATE_SUB": parse_date_delta_with_interval(exp.DateSub),
271            "DATE_TRUNC": lambda args: exp.DateTrunc(
272                unit=exp.Literal.string(str(seq_get(args, 1))),
273                this=seq_get(args, 0),
274            ),
275            "DATETIME_ADD": parse_date_delta_with_interval(exp.DatetimeAdd),
276            "DATETIME_SUB": parse_date_delta_with_interval(exp.DatetimeSub),
277            "DIV": lambda args: exp.IntDiv(this=seq_get(args, 0), expression=seq_get(args, 1)),
278            "GENERATE_ARRAY": exp.GenerateSeries.from_arg_list,
279            "PARSE_DATE": lambda args: format_time_lambda(exp.StrToDate, "bigquery")(
280                [seq_get(args, 1), seq_get(args, 0)]
281            ),
282            "PARSE_TIMESTAMP": _parse_timestamp,
283            "REGEXP_CONTAINS": exp.RegexpLike.from_arg_list,
284            "REGEXP_EXTRACT": lambda args: exp.RegexpExtract(
285                this=seq_get(args, 0),
286                expression=seq_get(args, 1),
287                position=seq_get(args, 2),
288                occurrence=seq_get(args, 3),
289                group=exp.Literal.number(1)
290                if re.compile(str(seq_get(args, 1))).groups == 1
291                else None,
292            ),
293            "SPLIT": lambda args: exp.Split(
294                # https://cloud.google.com/bigquery/docs/reference/standard-sql/string_functions#split
295                this=seq_get(args, 0),
296                expression=seq_get(args, 1) or exp.Literal.string(","),
297            ),
298            "TIME_ADD": parse_date_delta_with_interval(exp.TimeAdd),
299            "TIME_SUB": parse_date_delta_with_interval(exp.TimeSub),
300            "TIMESTAMP_ADD": parse_date_delta_with_interval(exp.TimestampAdd),
301            "TIMESTAMP_SUB": parse_date_delta_with_interval(exp.TimestampSub),
302            "TO_JSON_STRING": exp.JSONFormat.from_arg_list,
303        }
304
305        FUNCTION_PARSERS = {
306            **parser.Parser.FUNCTION_PARSERS,
307            "ARRAY": lambda self: self.expression(exp.Array, expressions=[self._parse_statement()]),
308        }
309        FUNCTION_PARSERS.pop("TRIM")
310
311        NO_PAREN_FUNCTIONS = {
312            **parser.Parser.NO_PAREN_FUNCTIONS,
313            TokenType.CURRENT_DATETIME: exp.CurrentDatetime,
314        }
315
316        NESTED_TYPE_TOKENS = {
317            *parser.Parser.NESTED_TYPE_TOKENS,
318            TokenType.TABLE,
319        }
320
321        ID_VAR_TOKENS = {
322            *parser.Parser.ID_VAR_TOKENS,
323            TokenType.VALUES,
324        }
325
326        PROPERTY_PARSERS = {
327            **parser.Parser.PROPERTY_PARSERS,
328            "NOT DETERMINISTIC": lambda self: self.expression(
329                exp.StabilityProperty, this=exp.Literal.string("VOLATILE")
330            ),
331            "OPTIONS": lambda self: self._parse_with_property(),
332        }
333
334        CONSTRAINT_PARSERS = {
335            **parser.Parser.CONSTRAINT_PARSERS,
336            "OPTIONS": lambda self: exp.Properties(expressions=self._parse_with_property()),
337        }
338
339        def _parse_table_part(self, schema: bool = False) -> t.Optional[exp.Expression]:
340            this = super()._parse_table_part(schema=schema)
341
342            # https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#table_names
343            if isinstance(this, exp.Identifier):
344                table_name = this.name
345                while self._match(TokenType.DASH, advance=False) and self._next:
346                    self._advance(2)
347                    table_name += f"-{self._prev.text}"
348
349                this = exp.Identifier(this=table_name, quoted=this.args.get("quoted"))
350
351            return this
352
353        def _parse_table_parts(self, schema: bool = False) -> exp.Table:
354            table = super()._parse_table_parts(schema=schema)
355            if isinstance(table.this, exp.Identifier) and "." in table.name:
356                catalog, db, this, *rest = (
357                    t.cast(t.Optional[exp.Expression], exp.to_identifier(x))
358                    for x in split_num_words(table.name, ".", 3)
359                )
360
361                if rest and this:
362                    this = exp.Dot.build(t.cast(t.List[exp.Expression], [this, *rest]))
363
364                table = exp.Table(this=this, db=db, catalog=catalog)
365
366            return table

Parser consumes a list of tokens produced by the Tokenizer and produces a parsed syntax tree.

Arguments:
  • error_level: The desired error level. Default: ErrorLevel.IMMEDIATE
  • error_message_context: Determines the amount of context to capture from a query string when displaying the error message (in number of characters). Default: 100
  • max_errors: Maximum number of error messages to include in a raised ParseError. This is only relevant if error_level is ErrorLevel.RAISE. Default: 3
PREFIXED_PIVOT_COLUMNS = True
LOG_BASE_FIRST = False
LOG_DEFAULTS_TO_LN = True
FUNCTIONS = {'ABS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Abs'>>, 'ANY_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.AnyValue'>>, 'APPROX_DISTINCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxDistinct'>>, 'APPROX_COUNT_DISTINCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxDistinct'>>, 'APPROX_QUANTILE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxQuantile'>>, 'ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Array'>>, 'ARRAY_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAgg'>>, 'ARRAY_ALL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAll'>>, 'ARRAY_ANY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAny'>>, 'ARRAY_CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayConcat'>>, 'ARRAY_CONTAINS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayContains'>>, 'FILTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayFilter'>>, 'ARRAY_FILTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayFilter'>>, 'ARRAY_JOIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayJoin'>>, 'ARRAY_SIZE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySize'>>, 'ARRAY_SORT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySort'>>, 'ARRAY_SUM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySum'>>, 'ARRAY_UNION_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayUnionAgg'>>, 'AVG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Avg'>>, 'CASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Case'>>, 'CAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Cast'>>, 'CAST_TO_STR_TYPE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CastToStrType'>>, 'CEIL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ceil'>>, 'CEILING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ceil'>>, 'COALESCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Coalesce'>>, 'IFNULL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Coalesce'>>, 'NVL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Coalesce'>>, 'CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Concat'>>, 'CONCAT_WS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ConcatWs'>>, 'COUNT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Count'>>, 'COUNT_IF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CountIf'>>, 'CURRENT_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentDate'>>, 'CURRENT_DATETIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentDatetime'>>, 'CURRENT_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentTime'>>, 'CURRENT_TIMESTAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentTimestamp'>>, 'CURRENT_USER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentUser'>>, 'DATE': <function _parse_date>, 'DATE_ADD': <function parse_date_delta_with_interval.<locals>.func>, 'DATEDIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateDiff'>>, 'DATE_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateDiff'>>, 'DATEFROMPARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateFromParts'>>, 'DATE_STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateStrToDate'>>, 'DATE_SUB': <function parse_date_delta_with_interval.<locals>.func>, 'DATE_TO_DATE_STR': <function Parser.<lambda>>, 'DATE_TO_DI': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateToDi'>>, 'DATE_TRUNC': <function BigQuery.Parser.<lambda>>, 'DATETIME_ADD': <function parse_date_delta_with_interval.<locals>.func>, 'DATETIME_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeDiff'>>, 'DATETIME_SUB': <function parse_date_delta_with_interval.<locals>.func>, 'DATETIME_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeTrunc'>>, 'DAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Day'>>, 'DAY_OF_MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfMonth'>>, 'DAYOFMONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfMonth'>>, 'DAY_OF_WEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeek'>>, 'DAYOFWEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeek'>>, 'DAY_OF_YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfYear'>>, 'DAYOFYEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfYear'>>, 'DECODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Decode'>>, 'DI_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DiToDate'>>, 'ENCODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Encode'>>, 'EXP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Exp'>>, 'EXPLODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Explode'>>, 'EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Extract'>>, 'FLOOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Floor'>>, 'FROM_BASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FromBase'>>, 'FROM_BASE64': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FromBase64'>>, 'GENERATE_SERIES': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GenerateSeries'>>, 'GREATEST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Greatest'>>, 'GROUP_CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GroupConcat'>>, 'HEX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Hex'>>, 'HLL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Hll'>>, 'IF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.If'>>, 'INITCAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Initcap'>>, 'JSON_ARRAY_CONTAINS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArrayContains'>>, 'JSONB_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBExtract'>>, 'JSONB_EXTRACT_SCALAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBExtractScalar'>>, 'JSON_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONExtract'>>, 'JSON_EXTRACT_SCALAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONExtractScalar'>>, 'JSON_FORMAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONFormat'>>, 'J_S_O_N_OBJECT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONObject'>>, 'LAST_DATE_OF_MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LastDateOfMonth'>>, 'LEAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Least'>>, 'LEFT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Left'>>, 'LENGTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Length'>>, 'LEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Length'>>, 'LEVENSHTEIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Levenshtein'>>, 'LN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ln'>>, 'LOG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Log'>>, 'LOG10': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Log10'>>, 'LOG2': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Log2'>>, 'LOGICAL_AND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'BOOL_AND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'BOOLAND_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'LOGICAL_OR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'BOOL_OR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'BOOLOR_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'LOWER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lower'>>, 'LCASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lower'>>, 'MD5': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MD5'>>, 'MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Map'>>, 'MAP_FROM_ENTRIES': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MapFromEntries'>>, 'MATCH_AGAINST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MatchAgainst'>>, 'MAX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Max'>>, 'MIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Min'>>, 'MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Month'>>, 'NEXT_VALUE_FOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.NextValueFor'>>, 'NUMBER_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.NumberToStr'>>, 'NVL2': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Nvl2'>>, 'OPEN_J_S_O_N': <bound method Func.from_arg_list of <class 'sqlglot.expressions.OpenJSON'>>, 'PARAMETERIZED_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ParameterizedAgg'>>, 'PERCENTILE_CONT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PercentileCont'>>, 'PERCENTILE_DISC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PercentileDisc'>>, 'POSEXPLODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Posexplode'>>, 'POWER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Pow'>>, 'POW': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Pow'>>, 'QUANTILE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Quantile'>>, 'RANGE_N': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RangeN'>>, 'READ_CSV': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ReadCSV'>>, 'REDUCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Reduce'>>, 'REGEXP_EXTRACT': <function BigQuery.Parser.<lambda>>, 'REGEXP_I_LIKE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpILike'>>, 'REGEXP_LIKE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpLike'>>, 'REGEXP_SPLIT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpSplit'>>, 'REPEAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Repeat'>>, 'RIGHT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Right'>>, 'ROUND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Round'>>, 'ROW_NUMBER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RowNumber'>>, 'SHA': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA'>>, 'SHA1': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA'>>, 'SHA2': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA2'>>, 'SAFE_CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SafeConcat'>>, 'SAFE_DIVIDE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SafeDivide'>>, 'SET_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SetAgg'>>, 'SORT_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SortArray'>>, 'SPLIT': <function BigQuery.Parser.<lambda>>, 'SQRT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sqrt'>>, 'STANDARD_HASH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StandardHash'>>, 'STAR_MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StarMap'>>, 'STDDEV': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stddev'>>, 'STDDEV_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StddevPop'>>, 'STDDEV_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StddevSamp'>>, 'STR_POSITION': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrPosition'>>, 'STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToDate'>>, 'STR_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToTime'>>, 'STR_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToUnix'>>, 'STRUCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Struct'>>, 'STRUCT_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StructExtract'>>, 'SUBSTRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Substring'>>, 'SUM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sum'>>, 'TIME_ADD': <function parse_date_delta_with_interval.<locals>.func>, 'TIME_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeDiff'>>, 'TIME_STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToDate'>>, 'TIME_STR_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToTime'>>, 'TIME_STR_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToUnix'>>, 'TIME_SUB': <function parse_date_delta_with_interval.<locals>.func>, 'TIME_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeToStr'>>, 'TIME_TO_TIME_STR': <function Parser.<lambda>>, 'TIME_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeToUnix'>>, 'TIME_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeTrunc'>>, 'TIMESTAMP_ADD': <function parse_date_delta_with_interval.<locals>.func>, 'TIMESTAMP_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampDiff'>>, 'TIMESTAMP_SUB': <function parse_date_delta_with_interval.<locals>.func>, 'TIMESTAMP_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampTrunc'>>, 'TO_BASE64': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToBase64'>>, 'TO_CHAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToChar'>>, 'TRIM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Trim'>>, 'TRY_CAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TryCast'>>, 'TS_OR_DI_TO_DI': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDiToDi'>>, 'TS_OR_DS_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsAdd'>>, 'TS_OR_DS_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsToDate'>>, 'TS_OR_DS_TO_DATE_STR': <function Parser.<lambda>>, 'UNHEX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Unhex'>>, 'UNIX_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToStr'>>, 'UNIX_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToTime'>>, 'UNIX_TO_TIME_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToTimeStr'>>, 'UPPER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Upper'>>, 'UCASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Upper'>>, 'VAR_MAP': <function parse_var_map>, 'VARIANCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VARIANCE_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VAR_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VARIANCE_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.VariancePop'>>, 'VAR_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.VariancePop'>>, 'WEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Week'>>, 'WEEK_OF_YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.WeekOfYear'>>, 'WEEKOFYEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.WeekOfYear'>>, 'WHEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.When'>>, 'X_M_L_TABLE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.XMLTable'>>, 'YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Year'>>, 'GLOB': <function Parser.<lambda>>, 'LIKE': <function parse_like>, 'DIV': <function BigQuery.Parser.<lambda>>, 'GENERATE_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GenerateSeries'>>, 'PARSE_DATE': <function BigQuery.Parser.<lambda>>, 'PARSE_TIMESTAMP': <function _parse_timestamp>, 'REGEXP_CONTAINS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpLike'>>, 'TO_JSON_STRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONFormat'>>}
FUNCTION_PARSERS = {'ANY_VALUE': <function Parser.<lambda>>, 'CAST': <function Parser.<lambda>>, 'CONCAT': <function Parser.<lambda>>, 'CONVERT': <function Parser.<lambda>>, 'DECODE': <function Parser.<lambda>>, 'EXTRACT': <function Parser.<lambda>>, 'JSON_OBJECT': <function Parser.<lambda>>, 'LOG': <function Parser.<lambda>>, 'MATCH': <function Parser.<lambda>>, 'OPENJSON': <function Parser.<lambda>>, 'POSITION': <function Parser.<lambda>>, 'SAFE_CAST': <function Parser.<lambda>>, 'STRING_AGG': <function Parser.<lambda>>, 'SUBSTRING': <function Parser.<lambda>>, 'TRY_CAST': <function Parser.<lambda>>, 'TRY_CONVERT': <function Parser.<lambda>>, 'ARRAY': <function BigQuery.Parser.<lambda>>}
NO_PAREN_FUNCTIONS = {<TokenType.CURRENT_DATE: 'CURRENT_DATE'>: <class 'sqlglot.expressions.CurrentDate'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>: <class 'sqlglot.expressions.CurrentDatetime'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>: <class 'sqlglot.expressions.CurrentTime'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>: <class 'sqlglot.expressions.CurrentTimestamp'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>: <class 'sqlglot.expressions.CurrentUser'>}
NESTED_TYPE_TOKENS = {<TokenType.ARRAY: 'ARRAY'>, <TokenType.MAP: 'MAP'>, <TokenType.STRUCT: 'STRUCT'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.TABLE: 'TABLE'>}
ID_VAR_TOKENS = {<TokenType.CACHE: 'CACHE'>, <TokenType.DEFAULT: 'DEFAULT'>, <TokenType.DELETE: 'DELETE'>, <TokenType.JSONB: 'JSONB'>, <TokenType.MONEY: 'MONEY'>, <TokenType.JSON: 'JSON'>, <TokenType.INT8RANGE: 'INT8RANGE'>, <TokenType.DIV: 'DIV'>, <TokenType.DESC: 'DESC'>, <TokenType.STRUCT: 'STRUCT'>, <TokenType.PROCEDURE: 'PROCEDURE'>, <TokenType.DESCRIBE: 'DESCRIBE'>, <TokenType.UUID: 'UUID'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.FULL: 'FULL'>, <TokenType.NUMRANGE: 'NUMRANGE'>, <TokenType.PRAGMA: 'PRAGMA'>, <TokenType.MAP: 'MAP'>, <TokenType.DATETIME64: 'DATETIME64'>, <TokenType.FORMAT: 'FORMAT'>, <TokenType.SOME: 'SOME'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, <TokenType.ISNULL: 'ISNULL'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.INET: 'INET'>, <TokenType.BIGINT: 'BIGINT'>, <TokenType.DICTIONARY: 'DICTIONARY'>, <TokenType.UPDATE: 'UPDATE'>, <TokenType.COLUMN: 'COLUMN'>, <TokenType.SETTINGS: 'SETTINGS'>, <TokenType.SHOW: 'SHOW'>, <TokenType.XML: 'XML'>, <TokenType.TOP: 'TOP'>, <TokenType.COMMIT: 'COMMIT'>, <TokenType.UINT128: 'UINT128'>, <TokenType.TSTZRANGE: 'TSTZRANGE'>, <TokenType.ROW: 'ROW'>, <TokenType.LEFT: 'LEFT'>, <TokenType.MERGE: 'MERGE'>, <TokenType.IS: 'IS'>, <TokenType.UNIQUEIDENTIFIER: 'UNIQUEIDENTIFIER'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, <TokenType.HLLSKETCH: 'HLLSKETCH'>, <TokenType.LONGTEXT: 'LONGTEXT'>, <TokenType.BINARY: 'BINARY'>, <TokenType.NATURAL: 'NATURAL'>, <TokenType.FLOAT: 'FLOAT'>, <TokenType.IF: 'IF'>, <TokenType.WINDOW: 'WINDOW'>, <TokenType.PERCENT: 'PERCENT'>, <TokenType.ENUM: 'ENUM'>, <TokenType.INT128: 'INT128'>, <TokenType.NVARCHAR: 'NVARCHAR'>, <TokenType.SERIAL: 'SERIAL'>, <TokenType.USMALLINT: 'USMALLINT'>, <TokenType.VAR: 'VAR'>, <TokenType.REFERENCES: 'REFERENCES'>, <TokenType.CONSTRAINT: 'CONSTRAINT'>, <TokenType.INT: 'INT'>, <TokenType.VARCHAR: 'VARCHAR'>, <TokenType.UINT: 'UINT'>, <TokenType.INT4RANGE: 'INT4RANGE'>, <TokenType.SMALLMONEY: 'SMALLMONEY'>, <TokenType.TSRANGE: 'TSRANGE'>, <TokenType.CHAR: 'CHAR'>, <TokenType.UINT256: 'UINT256'>, <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.NEXT: 'NEXT'>, <TokenType.DATERANGE: 'DATERANGE'>, <TokenType.LONGBLOB: 'LONGBLOB'>, <TokenType.SEMI: 'SEMI'>, <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, <TokenType.LOAD: 'LOAD'>, <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, <TokenType.SMALLSERIAL: 'SMALLSERIAL'>, <TokenType.HSTORE: 'HSTORE'>, <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, <TokenType.ANTI: 'ANTI'>, <TokenType.CASE: 'CASE'>, <TokenType.FUNCTION: 'FUNCTION'>, <TokenType.PIVOT: 'PIVOT'>, <TokenType.FIRST: 'FIRST'>, <TokenType.TEMPORARY: 'TEMPORARY'>, <TokenType.SUPER: 'SUPER'>, <TokenType.ASC: 'ASC'>, <TokenType.IMAGE: 'IMAGE'>, <TokenType.SCHEMA: 'SCHEMA'>, <TokenType.FALSE: 'FALSE'>, <TokenType.INTERVAL: 'INTERVAL'>, <TokenType.COMMENT: 'COMMENT'>, <TokenType.UNPIVOT: 'UNPIVOT'>, <TokenType.ALL: 'ALL'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.BIT: 'BIT'>, <TokenType.VOLATILE: 'VOLATILE'>, <TokenType.RANGE: 'RANGE'>, <TokenType.VIEW: 'VIEW'>, <TokenType.ROWVERSION: 'ROWVERSION'>, <TokenType.INDEX: 'INDEX'>, <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, <TokenType.DECIMAL: 'DECIMAL'>, <TokenType.BIGSERIAL: 'BIGSERIAL'>, <TokenType.OBJECT: 'OBJECT'>, <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.BEGIN: 'BEGIN'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, <TokenType.SMALLINT: 'SMALLINT'>, <TokenType.ANY: 'ANY'>, <TokenType.ORDINALITY: 'ORDINALITY'>, <TokenType.END: 'END'>, <TokenType.OVERWRITE: 'OVERWRITE'>, <TokenType.EXISTS: 'EXISTS'>, <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, <TokenType.TABLE: 'TABLE'>, <TokenType.UTINYINT: 'UTINYINT'>, <TokenType.SET: 'SET'>, <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>, <TokenType.DATE: 'DATE'>, <TokenType.DOUBLE: 'DOUBLE'>, <TokenType.COLLATE: 'COLLATE'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>, <TokenType.TEXT: 'TEXT'>, <TokenType.ESCAPE: 'ESCAPE'>, <TokenType.FILTER: 'FILTER'>, <TokenType.DATABASE: 'DATABASE'>, <TokenType.APPLY: 'APPLY'>, <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, <TokenType.VARBINARY: 'VARBINARY'>, <TokenType.TRUE: 'TRUE'>, <TokenType.OFFSET: 'OFFSET'>, <TokenType.RIGHT: 'RIGHT'>, <TokenType.TIME: 'TIME'>, <TokenType.DATETIME: 'DATETIME'>, <TokenType.GEOMETRY: 'GEOMETRY'>, <TokenType.NCHAR: 'NCHAR'>, <TokenType.ROWS: 'ROWS'>, <TokenType.UNIQUE: 'UNIQUE'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, <TokenType.USERDEFINED: 'USERDEFINED'>, <TokenType.INT256: 'INT256'>, <TokenType.BOOLEAN: 'BOOLEAN'>, <TokenType.KEEP: 'KEEP'>, <TokenType.UBIGINT: 'UBIGINT'>, <TokenType.VARIANT: 'VARIANT'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, <TokenType.PARTITION: 'PARTITION'>, <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, <TokenType.VALUES: 'VALUES'>, <TokenType.TINYINT: 'TINYINT'>}
PROPERTY_PARSERS = {'ALGORITHM': <function Parser.<lambda>>, 'AUTO_INCREMENT': <function Parser.<lambda>>, 'BLOCKCOMPRESSION': <function Parser.<lambda>>, 'CHARACTER SET': <function Parser.<lambda>>, 'CHECKSUM': <function Parser.<lambda>>, 'CLUSTER BY': <function Parser.<lambda>>, 'CLUSTERED': <function Parser.<lambda>>, 'COLLATE': <function Parser.<lambda>>, 'COMMENT': <function Parser.<lambda>>, 'COPY': <function Parser.<lambda>>, 'DATABLOCKSIZE': <function Parser.<lambda>>, 'DEFINER': <function Parser.<lambda>>, 'DETERMINISTIC': <function Parser.<lambda>>, 'DISTKEY': <function Parser.<lambda>>, 'DISTSTYLE': <function Parser.<lambda>>, 'ENGINE': <function Parser.<lambda>>, 'EXECUTE': <function Parser.<lambda>>, 'EXTERNAL': <function Parser.<lambda>>, 'FALLBACK': <function Parser.<lambda>>, 'FORMAT': <function Parser.<lambda>>, 'FREESPACE': <function Parser.<lambda>>, 'IMMUTABLE': <function Parser.<lambda>>, 'JOURNAL': <function Parser.<lambda>>, 'LANGUAGE': <function Parser.<lambda>>, 'LAYOUT': <function Parser.<lambda>>, 'LIFETIME': <function Parser.<lambda>>, 'LIKE': <function Parser.<lambda>>, 'LOCATION': <function Parser.<lambda>>, 'LOCK': <function Parser.<lambda>>, 'LOCKING': <function Parser.<lambda>>, 'LOG': <function Parser.<lambda>>, 'MATERIALIZED': <function Parser.<lambda>>, 'MERGEBLOCKRATIO': <function Parser.<lambda>>, 'MULTISET': <function Parser.<lambda>>, 'NO': <function Parser.<lambda>>, 'ON': <function Parser.<lambda>>, 'ORDER BY': <function Parser.<lambda>>, 'PARTITION BY': <function Parser.<lambda>>, 'PARTITIONED BY': <function Parser.<lambda>>, 'PARTITIONED_BY': <function Parser.<lambda>>, 'PRIMARY KEY': <function Parser.<lambda>>, 'RANGE': <function Parser.<lambda>>, 'RETURNS': <function Parser.<lambda>>, 'ROW': <function Parser.<lambda>>, 'ROW_FORMAT': <function Parser.<lambda>>, 'SET': <function Parser.<lambda>>, 'SETTINGS': <function Parser.<lambda>>, 'SORTKEY': <function Parser.<lambda>>, 'SOURCE': <function Parser.<lambda>>, 'STABLE': <function Parser.<lambda>>, 'STORED': <function Parser.<lambda>>, 'TBLPROPERTIES': <function Parser.<lambda>>, 'TEMP': <function Parser.<lambda>>, 'TEMPORARY': <function Parser.<lambda>>, 'TO': <function Parser.<lambda>>, 'TRANSIENT': <function Parser.<lambda>>, 'TTL': <function Parser.<lambda>>, 'USING': <function Parser.<lambda>>, 'VOLATILE': <function Parser.<lambda>>, 'WITH': <function Parser.<lambda>>, 'NOT DETERMINISTIC': <function BigQuery.Parser.<lambda>>, 'OPTIONS': <function BigQuery.Parser.<lambda>>}
CONSTRAINT_PARSERS = {'AUTOINCREMENT': <function Parser.<lambda>>, 'AUTO_INCREMENT': <function Parser.<lambda>>, 'CASESPECIFIC': <function Parser.<lambda>>, 'CHARACTER SET': <function Parser.<lambda>>, 'CHECK': <function Parser.<lambda>>, 'COLLATE': <function Parser.<lambda>>, 'COMMENT': <function Parser.<lambda>>, 'COMPRESS': <function Parser.<lambda>>, 'DEFAULT': <function Parser.<lambda>>, 'ENCODE': <function Parser.<lambda>>, 'FOREIGN KEY': <function Parser.<lambda>>, 'FORMAT': <function Parser.<lambda>>, 'GENERATED': <function Parser.<lambda>>, 'IDENTITY': <function Parser.<lambda>>, 'INLINE': <function Parser.<lambda>>, 'LIKE': <function Parser.<lambda>>, 'NOT': <function Parser.<lambda>>, 'NULL': <function Parser.<lambda>>, 'ON': <function Parser.<lambda>>, 'PATH': <function Parser.<lambda>>, 'PRIMARY KEY': <function Parser.<lambda>>, 'REFERENCES': <function Parser.<lambda>>, 'TITLE': <function Parser.<lambda>>, 'TTL': <function Parser.<lambda>>, 'UNIQUE': <function Parser.<lambda>>, 'UPPERCASE': <function Parser.<lambda>>, 'OPTIONS': <function BigQuery.Parser.<lambda>>}
UNNEST_COLUMN_ONLY: bool = True
SHOW_TRIE: Dict = {}
SET_TRIE: Dict = {'GLOBAL': {0: True}, 'LOCAL': {0: True}, 'SESSION': {0: True}, 'TRANSACTION': {0: True}}
FORMAT_MAPPING: Dict[str, str] = {'DD': '%d', 'MM': '%m', 'MON': '%b', 'MONTH': '%B', 'YYYY': '%Y', 'YY': '%y', 'HH': '%I', 'HH12': '%I', 'HH24': '%H', 'MI': '%M', 'SS': '%S', 'SSSSS': '%f', 'TZH': '%z'}
FORMAT_TRIE: Dict = {'D': {'D': {0: True}}, 'M': {'M': {0: True}, 'O': {'N': {0: True, 'T': {'H': {0: True}}}}, 'I': {0: True}}, 'Y': {'Y': {'Y': {'Y': {0: True}}, 0: True}}, 'H': {'H': {0: True, '1': {'2': {0: True}}, '2': {'4': {0: True}}}}, 'S': {'S': {0: True, 'S': {'S': {'S': {0: True}}}}}, 'T': {'Z': {'H': {0: True}}}}
TIME_MAPPING: Dict[str, str] = {'%D': '%m/%d/%y'}
TIME_TRIE: Dict = {'%': {'D': {0: True}}}
class BigQuery.Generator(sqlglot.generator.Generator):
368    class Generator(generator.Generator):
369        EXPLICIT_UNION = True
370        INTERVAL_ALLOWS_PLURAL_FORM = False
371        JOIN_HINTS = False
372        QUERY_HINTS = False
373        TABLE_HINTS = False
374        LIMIT_FETCH = "LIMIT"
375        RENAME_TABLE_WITH_DB = False
376        ESCAPE_LINE_BREAK = True
377
378        TRANSFORMS = {
379            **generator.Generator.TRANSFORMS,
380            exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"),
381            exp.ArraySize: rename_func("ARRAY_LENGTH"),
382            exp.Cast: transforms.preprocess([transforms.remove_precision_parameterized_types]),
383            exp.CTE: transforms.preprocess([_pushdown_cte_column_names]),
384            exp.DateAdd: _date_add_sql("DATE", "ADD"),
385            exp.DateFromParts: rename_func("DATE"),
386            exp.DateSub: _date_add_sql("DATE", "SUB"),
387            exp.DatetimeAdd: _date_add_sql("DATETIME", "ADD"),
388            exp.DatetimeSub: _date_add_sql("DATETIME", "SUB"),
389            exp.DateDiff: lambda self, e: f"DATE_DIFF({self.sql(e, 'this')}, {self.sql(e, 'expression')}, {self.sql(e.args.get('unit', 'DAY'))})",
390            exp.DateStrToDate: datestrtodate_sql,
391            exp.DateTrunc: lambda self, e: self.func("DATE_TRUNC", e.this, e.text("unit")),
392            exp.JSONFormat: rename_func("TO_JSON_STRING"),
393            exp.GenerateSeries: rename_func("GENERATE_ARRAY"),
394            exp.GroupConcat: rename_func("STRING_AGG"),
395            exp.ILike: no_ilike_sql,
396            exp.IntDiv: rename_func("DIV"),
397            exp.Max: max_or_greatest,
398            exp.Min: min_or_least,
399            exp.RegexpExtract: lambda self, e: self.func(
400                "REGEXP_EXTRACT",
401                e.this,
402                e.expression,
403                e.args.get("position"),
404                e.args.get("occurrence"),
405            ),
406            exp.RegexpLike: rename_func("REGEXP_CONTAINS"),
407            exp.Select: transforms.preprocess(
408                [
409                    transforms.explode_to_unnest,
410                    _unqualify_unnest,
411                    transforms.eliminate_distinct_on,
412                    _alias_ordered_group,
413                ]
414            ),
415            exp.StrToDate: lambda self, e: f"PARSE_DATE({self.format_time(e)}, {self.sql(e, 'this')})",
416            exp.StrToTime: lambda self, e: self.func(
417                "PARSE_TIMESTAMP", self.format_time(e), e.this, e.args.get("zone")
418            ),
419            exp.TimeAdd: _date_add_sql("TIME", "ADD"),
420            exp.TimeSub: _date_add_sql("TIME", "SUB"),
421            exp.TimestampAdd: _date_add_sql("TIMESTAMP", "ADD"),
422            exp.TimestampSub: _date_add_sql("TIMESTAMP", "SUB"),
423            exp.TimeStrToTime: timestrtotime_sql,
424            exp.TsOrDsToDate: ts_or_ds_to_date_sql("bigquery"),
425            exp.TsOrDsAdd: _date_add_sql("DATE", "ADD"),
426            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
427            exp.VariancePop: rename_func("VAR_POP"),
428            exp.Values: _derived_table_values_to_unnest,
429            exp.ReturnsProperty: _returnsproperty_sql,
430            exp.Create: _create_sql,
431            exp.Trim: lambda self, e: self.func(f"TRIM", e.this, e.expression),
432            exp.StabilityProperty: lambda self, e: f"DETERMINISTIC"
433            if e.name == "IMMUTABLE"
434            else "NOT DETERMINISTIC",
435        }
436
437        TYPE_MAPPING = {
438            **generator.Generator.TYPE_MAPPING,
439            exp.DataType.Type.BIGDECIMAL: "BIGNUMERIC",
440            exp.DataType.Type.BIGINT: "INT64",
441            exp.DataType.Type.BINARY: "BYTES",
442            exp.DataType.Type.BOOLEAN: "BOOL",
443            exp.DataType.Type.CHAR: "STRING",
444            exp.DataType.Type.DECIMAL: "NUMERIC",
445            exp.DataType.Type.DOUBLE: "FLOAT64",
446            exp.DataType.Type.FLOAT: "FLOAT64",
447            exp.DataType.Type.INT: "INT64",
448            exp.DataType.Type.NCHAR: "STRING",
449            exp.DataType.Type.NVARCHAR: "STRING",
450            exp.DataType.Type.SMALLINT: "INT64",
451            exp.DataType.Type.TEXT: "STRING",
452            exp.DataType.Type.TIMESTAMP: "DATETIME",
453            exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP",
454            exp.DataType.Type.TIMESTAMPLTZ: "TIMESTAMP",
455            exp.DataType.Type.TINYINT: "INT64",
456            exp.DataType.Type.VARBINARY: "BYTES",
457            exp.DataType.Type.VARCHAR: "STRING",
458            exp.DataType.Type.VARIANT: "ANY TYPE",
459        }
460
461        PROPERTIES_LOCATION = {
462            **generator.Generator.PROPERTIES_LOCATION,
463            exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA,
464            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
465        }
466
467        # from: https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#reserved_keywords
468        RESERVED_KEYWORDS = {
469            *generator.Generator.RESERVED_KEYWORDS,
470            "all",
471            "and",
472            "any",
473            "array",
474            "as",
475            "asc",
476            "assert_rows_modified",
477            "at",
478            "between",
479            "by",
480            "case",
481            "cast",
482            "collate",
483            "contains",
484            "create",
485            "cross",
486            "cube",
487            "current",
488            "default",
489            "define",
490            "desc",
491            "distinct",
492            "else",
493            "end",
494            "enum",
495            "escape",
496            "except",
497            "exclude",
498            "exists",
499            "extract",
500            "false",
501            "fetch",
502            "following",
503            "for",
504            "from",
505            "full",
506            "group",
507            "grouping",
508            "groups",
509            "hash",
510            "having",
511            "if",
512            "ignore",
513            "in",
514            "inner",
515            "intersect",
516            "interval",
517            "into",
518            "is",
519            "join",
520            "lateral",
521            "left",
522            "like",
523            "limit",
524            "lookup",
525            "merge",
526            "natural",
527            "new",
528            "no",
529            "not",
530            "null",
531            "nulls",
532            "of",
533            "on",
534            "or",
535            "order",
536            "outer",
537            "over",
538            "partition",
539            "preceding",
540            "proto",
541            "qualify",
542            "range",
543            "recursive",
544            "respect",
545            "right",
546            "rollup",
547            "rows",
548            "select",
549            "set",
550            "some",
551            "struct",
552            "tablesample",
553            "then",
554            "to",
555            "treat",
556            "true",
557            "unbounded",
558            "union",
559            "unnest",
560            "using",
561            "when",
562            "where",
563            "window",
564            "with",
565            "within",
566        }
567
568        def attimezone_sql(self, expression: exp.AtTimeZone) -> str:
569            parent = expression.parent
570
571            # BigQuery allows CAST(.. AS {STRING|TIMESTAMP} [FORMAT <fmt> [AT TIME ZONE <tz>]]).
572            # Only the TIMESTAMP one should use the below conversion, when AT TIME ZONE is included.
573            if not isinstance(parent, exp.Cast) or not parent.to.is_type("text"):
574                return self.func(
575                    "TIMESTAMP", self.func("DATETIME", expression.this, expression.args.get("zone"))
576                )
577
578            return super().attimezone_sql(expression)
579
580        def trycast_sql(self, expression: exp.TryCast) -> str:
581            return self.cast_sql(expression, safe_prefix="SAFE_")
582
583        def cte_sql(self, expression: exp.CTE) -> str:
584            if expression.alias_column_names:
585                self.unsupported("Column names in CTE definition are not supported.")
586            return super().cte_sql(expression)
587
588        def array_sql(self, expression: exp.Array) -> str:
589            first_arg = seq_get(expression.expressions, 0)
590            if isinstance(first_arg, exp.Subqueryable):
591                return f"ARRAY{self.wrap(self.sql(first_arg))}"
592
593            return inline_array_sql(self, expression)
594
595        def transaction_sql(self, *_) -> str:
596            return "BEGIN TRANSACTION"
597
598        def commit_sql(self, *_) -> str:
599            return "COMMIT TRANSACTION"
600
601        def rollback_sql(self, *_) -> str:
602            return "ROLLBACK TRANSACTION"
603
604        def in_unnest_op(self, expression: exp.Unnest) -> str:
605            return self.sql(expression)
606
607        def except_op(self, expression: exp.Except) -> str:
608            if not expression.args.get("distinct", False):
609                self.unsupported("EXCEPT without DISTINCT is not supported in BigQuery")
610            return f"EXCEPT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}"
611
612        def intersect_op(self, expression: exp.Intersect) -> str:
613            if not expression.args.get("distinct", False):
614                self.unsupported("INTERSECT without DISTINCT is not supported in BigQuery")
615            return f"INTERSECT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}"
616
617        def with_properties(self, properties: exp.Properties) -> str:
618            return self.properties(properties, prefix=self.seg("OPTIONS"))

Generator converts a given syntax tree to the corresponding SQL string.

Arguments:
  • pretty: Whether or not to format the produced SQL string. Default: False.
  • identify: Determines when an identifier should be quoted. Possible values are: False (default): Never quote, except in cases where it's mandatory by the dialect. True or 'always': Always quote. 'safe': Only quote identifiers that are case insensitive.
  • normalize: Whether or not to normalize identifiers to lowercase. Default: False.
  • pad: Determines the pad size in a formatted string. Default: 2.
  • indent: Determines the indentation size in a formatted string. Default: 2.
  • normalize_functions: Whether or not to normalize all function names. Possible values are: "upper" or True (default): Convert names to uppercase. "lower": Convert names to lowercase. False: Disables function name normalization.
  • unsupported_level: Determines the generator's behavior when it encounters unsupported expressions. Default ErrorLevel.WARN.
  • max_unsupported: Maximum number of unsupported messages to include in a raised UnsupportedError. This is only relevant if unsupported_level is ErrorLevel.RAISE. Default: 3
  • leading_comma: Determines whether or not the comma is leading or trailing in select expressions. This is only relevant when generating in pretty mode. Default: False
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode. The default is on the smaller end because the length only represents a segment and not the true line length. Default: 80
  • comments: Whether or not to preserve comments in the output SQL code. Default: True
EXPLICIT_UNION = True
INTERVAL_ALLOWS_PLURAL_FORM = False
JOIN_HINTS = False
QUERY_HINTS = False
TABLE_HINTS = False
LIMIT_FETCH = 'LIMIT'
RENAME_TABLE_WITH_DB = False
ESCAPE_LINE_BREAK = True
TRANSFORMS = {<class 'sqlglot.expressions.DateAdd'>: <function _date_add_sql.<locals>.func>, <class 'sqlglot.expressions.TsOrDsAdd'>: <function _date_add_sql.<locals>.func>, <class 'sqlglot.expressions.CaseSpecificColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CharacterSetColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CharacterSetProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CheckColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CollateColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CopyGrantsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CommentColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.DateFormatColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.DefaultColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.EncodeColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ExecuteAsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ExternalProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.InlineLengthColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.LanguageProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.LocationProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.LogProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.MaterializedProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NoPrimaryIndexProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnCommitProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnUpdateColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.PathColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ReturnsProperty'>: <function _returnsproperty_sql>, <class 'sqlglot.expressions.SetProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SettingsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SqlSecurityProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.StabilityProperty'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.TemporaryProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ToTableProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TransientProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TitleColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.UppercaseColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.VarMap'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.VolatileProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.WithJournalTableProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ApproxDistinct'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ArraySize'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Cast'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.CTE'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.DateFromParts'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.DateSub'>: <function _date_add_sql.<locals>.func>, <class 'sqlglot.expressions.DatetimeAdd'>: <function _date_add_sql.<locals>.func>, <class 'sqlglot.expressions.DatetimeSub'>: <function _date_add_sql.<locals>.func>, <class 'sqlglot.expressions.DateDiff'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.DateStrToDate'>: <function datestrtodate_sql>, <class 'sqlglot.expressions.DateTrunc'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.JSONFormat'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.GenerateSeries'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.GroupConcat'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ILike'>: <function no_ilike_sql>, <class 'sqlglot.expressions.IntDiv'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Max'>: <function max_or_greatest>, <class 'sqlglot.expressions.Min'>: <function min_or_least>, <class 'sqlglot.expressions.RegexpExtract'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.RegexpLike'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Select'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.StrToDate'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.StrToTime'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.TimeAdd'>: <function _date_add_sql.<locals>.func>, <class 'sqlglot.expressions.TimeSub'>: <function _date_add_sql.<locals>.func>, <class 'sqlglot.expressions.TimestampAdd'>: <function _date_add_sql.<locals>.func>, <class 'sqlglot.expressions.TimestampSub'>: <function _date_add_sql.<locals>.func>, <class 'sqlglot.expressions.TimeStrToTime'>: <function timestrtotime_sql>, <class 'sqlglot.expressions.TsOrDsToDate'>: <function ts_or_ds_to_date_sql.<locals>._ts_or_ds_to_date_sql>, <class 'sqlglot.expressions.PartitionedByProperty'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.VariancePop'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Values'>: <function _derived_table_values_to_unnest>, <class 'sqlglot.expressions.Create'>: <function _create_sql>, <class 'sqlglot.expressions.Trim'>: <function BigQuery.Generator.<lambda>>}
TYPE_MAPPING = {<Type.NCHAR: 'NCHAR'>: 'STRING', <Type.NVARCHAR: 'NVARCHAR'>: 'STRING', <Type.MEDIUMTEXT: 'MEDIUMTEXT'>: 'TEXT', <Type.LONGTEXT: 'LONGTEXT'>: 'TEXT', <Type.MEDIUMBLOB: 'MEDIUMBLOB'>: 'BLOB', <Type.LONGBLOB: 'LONGBLOB'>: 'BLOB', <Type.INET: 'INET'>: 'INET', <Type.BIGDECIMAL: 'BIGDECIMAL'>: 'BIGNUMERIC', <Type.BIGINT: 'BIGINT'>: 'INT64', <Type.BINARY: 'BINARY'>: 'BYTES', <Type.BOOLEAN: 'BOOLEAN'>: 'BOOL', <Type.CHAR: 'CHAR'>: 'STRING', <Type.DECIMAL: 'DECIMAL'>: 'NUMERIC', <Type.DOUBLE: 'DOUBLE'>: 'FLOAT64', <Type.FLOAT: 'FLOAT'>: 'FLOAT64', <Type.INT: 'INT'>: 'INT64', <Type.SMALLINT: 'SMALLINT'>: 'INT64', <Type.TEXT: 'TEXT'>: 'STRING', <Type.TIMESTAMP: 'TIMESTAMP'>: 'DATETIME', <Type.TIMESTAMPTZ: 'TIMESTAMPTZ'>: 'TIMESTAMP', <Type.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>: 'TIMESTAMP', <Type.TINYINT: 'TINYINT'>: 'INT64', <Type.VARBINARY: 'VARBINARY'>: 'BYTES', <Type.VARCHAR: 'VARCHAR'>: 'STRING', <Type.VARIANT: 'VARIANT'>: 'ANY TYPE'}
PROPERTIES_LOCATION = {<class 'sqlglot.expressions.AlgorithmProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.AutoIncrementProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.BlockCompressionProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.CharacterSetProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ChecksumProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.CollateProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.CopyGrantsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Cluster'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ClusteredByProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DataBlocksizeProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.DefinerProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.DictRange'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DictProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DistKeyProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DistStyleProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.EngineProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ExecuteAsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ExternalProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.FallbackProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.FileFormatProperty'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.FreespaceProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.IsolatedLoadingProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.JournalProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.LanguageProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LikeProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LocationProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LockingProperty'>: <Location.POST_ALIAS: 'POST_ALIAS'>, <class 'sqlglot.expressions.LogProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.MaterializedProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.MergeBlockRatioProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.NoPrimaryIndexProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.OnCommitProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.Order'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.PartitionedByProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.PrimaryKey'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Property'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.ReturnsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatDelimitedProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatSerdeProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SchemaCommentProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SerdeProperties'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Set'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SettingsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SetProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.SortKeyProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SqlSecurityProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.StabilityProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.TemporaryProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.ToTableProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.TransientProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.MergeTreeTTL'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.VolatileProperty'>: <Location.UNSUPPORTED: 'UNSUPPORTED'>, <class 'sqlglot.expressions.WithDataProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.WithJournalTableProperty'>: <Location.POST_NAME: 'POST_NAME'>}
RESERVED_KEYWORDS = {'when', 'group', 'null', 'treat', 'then', 'order', 'rollup', 'as', 'some', 'and', 'fetch', 'create', 'end', 'array', 'or', 'on', 'intersect', 'grouping', 'partition', 'from', 'recursive', 'preceding', 'merge', 'having', 'exclude', 'escape', 'set', 'into', 'groups', 'like', 'by', 'lateral', 'over', 'join', 'no', 'else', 'where', 'in', 'between', 'union', 'all', 'cube', 'outer', 'contains', 'distinct', 'unbounded', 'case', 'tablesample', 'window', 'with', 'of', 'if', 'left', 'asc', 'natural', 'ignore', 'desc', 'nulls', 'define', 'new', 'select', 'cross', 'at', 'not', 'any', 'lookup', 'inner', 'following', 'full', 'range', 'is', 'within', 'extract', 'cast', 'exists', 'for', 'qualify', 'false', 'assert_rows_modified', 'to', 'current', 'except', 'proto', 'interval', 'true', 'struct', 'enum', 'unnest', 'hash', 'limit', 'rows', 'using', 'right', 'respect', 'collate', 'default'}
def attimezone_sql(self, expression: sqlglot.expressions.AtTimeZone) -> str:
568        def attimezone_sql(self, expression: exp.AtTimeZone) -> str:
569            parent = expression.parent
570
571            # BigQuery allows CAST(.. AS {STRING|TIMESTAMP} [FORMAT <fmt> [AT TIME ZONE <tz>]]).
572            # Only the TIMESTAMP one should use the below conversion, when AT TIME ZONE is included.
573            if not isinstance(parent, exp.Cast) or not parent.to.is_type("text"):
574                return self.func(
575                    "TIMESTAMP", self.func("DATETIME", expression.this, expression.args.get("zone"))
576                )
577
578            return super().attimezone_sql(expression)
def trycast_sql(self, expression: sqlglot.expressions.TryCast) -> str:
580        def trycast_sql(self, expression: exp.TryCast) -> str:
581            return self.cast_sql(expression, safe_prefix="SAFE_")
def cte_sql(self, expression: sqlglot.expressions.CTE) -> str:
583        def cte_sql(self, expression: exp.CTE) -> str:
584            if expression.alias_column_names:
585                self.unsupported("Column names in CTE definition are not supported.")
586            return super().cte_sql(expression)
def array_sql(self, expression: sqlglot.expressions.Array) -> str:
588        def array_sql(self, expression: exp.Array) -> str:
589            first_arg = seq_get(expression.expressions, 0)
590            if isinstance(first_arg, exp.Subqueryable):
591                return f"ARRAY{self.wrap(self.sql(first_arg))}"
592
593            return inline_array_sql(self, expression)
def transaction_sql(self, *_) -> str:
595        def transaction_sql(self, *_) -> str:
596            return "BEGIN TRANSACTION"
def commit_sql(self, *_) -> str:
598        def commit_sql(self, *_) -> str:
599            return "COMMIT TRANSACTION"
def rollback_sql(self, *_) -> str:
601        def rollback_sql(self, *_) -> str:
602            return "ROLLBACK TRANSACTION"
def in_unnest_op(self, expression: sqlglot.expressions.Unnest) -> str:
604        def in_unnest_op(self, expression: exp.Unnest) -> str:
605            return self.sql(expression)
def except_op(self, expression: sqlglot.expressions.Except) -> str:
607        def except_op(self, expression: exp.Except) -> str:
608            if not expression.args.get("distinct", False):
609                self.unsupported("EXCEPT without DISTINCT is not supported in BigQuery")
610            return f"EXCEPT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}"
def intersect_op(self, expression: sqlglot.expressions.Intersect) -> str:
612        def intersect_op(self, expression: exp.Intersect) -> str:
613            if not expression.args.get("distinct", False):
614                self.unsupported("INTERSECT without DISTINCT is not supported in BigQuery")
615            return f"INTERSECT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}"
def with_properties(self, properties: sqlglot.expressions.Properties) -> str:
617        def with_properties(self, properties: exp.Properties) -> str:
618            return self.properties(properties, prefix=self.seg("OPTIONS"))
INVERSE_TIME_MAPPING: Dict[str, str] = {'%m/%d/%y': '%D'}
INVERSE_TIME_TRIE: Dict = {'%': {'m': {'/': {'%': {'d': {'/': {'%': {'y': {0: True}}}}}}}}}
UNNEST_COLUMN_ONLY = True
NORMALIZE_FUNCTIONS: bool | str = False
@classmethod
def can_identify(text: str, identify: str | bool = 'safe') -> bool:
246    @classmethod
247    def can_identify(cls, text: str, identify: str | bool = "safe") -> bool:
248        """Checks if text can be identified given an identify option.
249
250        Args:
251            text: The text to check.
252            identify:
253                "always" or `True`: Always returns true.
254                "safe": True if the identifier is case-insensitive.
255
256        Returns:
257            Whether or not the given text can be identified.
258        """
259        if identify is True or identify == "always":
260            return True
261
262        if identify == "safe":
263            return not cls.case_sensitive(text)
264
265        return False

Checks if text can be identified given an identify option.

Arguments:
  • text: The text to check.
  • identify: "always" or True: Always returns true. "safe": True if the identifier is case-insensitive.
Returns:

Whether or not the given text can be identified.

QUOTE_START = "'"
QUOTE_END = "'"
IDENTIFIER_START = '`'
IDENTIFIER_END = '`'
STRING_ESCAPE = '\\'
IDENTIFIER_ESCAPE = '"'
BIT_START: Optional[str] = None
BIT_END: Optional[str] = None
HEX_START: Optional[str] = '0x'
HEX_END: Optional[str] = ''
BYTE_START: Optional[str] = "b'"
BYTE_END: Optional[str] = "'"
Inherited Members
sqlglot.generator.Generator
Generator
NULL_ORDERING_SUPPORTED
LOCKING_READS_SUPPORTED
WRAP_DERIVED_VALUES
CREATE_FUNCTION_RETURN_AS
MATCHED_BY_SOURCE
SINGLE_STRING_INTERVAL
TABLESAMPLE_WITH_METHOD
TABLESAMPLE_SIZE_IS_PERCENT
GROUPINGS_SEP
INDEX_ON
QUERY_HINT_SEP
IS_BOOL_ALLOWED
DUPLICATE_KEY_UPDATE_WITH_SET
LIMIT_IS_TOP
SELECT_KINDS
STAR_MAPPING
TIME_PART_SINGULARS
TOKEN_MAPPING
STRUCT_DELIMITER
PARAMETER_TOKEN
WITH_SEPARATED_COMMENTS
UNWRAPPED_INTERVAL_VALUES
SENTINEL_LINE_BREAK
INDEX_OFFSET
ALIAS_POST_TABLESAMPLE
IDENTIFIERS_CAN_START_WITH_DIGIT
STRICT_STRING_CONCAT
NULL_ORDERING
pretty
identify
normalize
pad
unsupported_level
max_unsupported
leading_comma
max_text_width
comments
normalize_functions
unsupported_messages
generate
unsupported
sep
seg
pad_comment
maybe_comment
wrap
no_identify
normalize_func
indent
sql
uncache_sql
cache_sql
characterset_sql
column_sql
columnposition_sql
columndef_sql
columnconstraint_sql
autoincrementcolumnconstraint_sql
compresscolumnconstraint_sql
generatedasidentitycolumnconstraint_sql
notnullcolumnconstraint_sql
primarykeycolumnconstraint_sql
uniquecolumnconstraint_sql
createable_sql
create_sql
clone_sql
describe_sql
prepend_ctes
with_sql
tablealias_sql
bitstring_sql
hexstring_sql
bytestring_sql
rawstring_sql
datatypesize_sql
datatype_sql
directory_sql
delete_sql
drop_sql
except_sql
fetch_sql
filter_sql
hint_sql
index_sql
identifier_sql
inputoutputformat_sql
national_sql
partition_sql
properties_sql
root_properties
properties
locate_properties
property_sql
likeproperty_sql
fallbackproperty_sql
journalproperty_sql
freespaceproperty_sql
checksumproperty_sql
mergeblockratioproperty_sql
datablocksizeproperty_sql
blockcompressionproperty_sql
isolatedloadingproperty_sql
lockingproperty_sql
withdataproperty_sql
insert_sql
intersect_sql
introducer_sql
pseudotype_sql
onconflict_sql
returning_sql
rowformatdelimitedproperty_sql
withtablehint_sql
indextablehint_sql
table_sql
tablesample_sql
pivot_sql
tuple_sql
update_sql
values_sql
var_sql
into_sql
from_sql
group_sql
having_sql
join_sql
lambda_sql
lateral_sql
limit_sql
offset_sql
setitem_sql
set_sql
pragma_sql
lock_sql
literal_sql
escape_str
loaddata_sql
null_sql
boolean_sql
order_sql
cluster_sql
distribute_sql
sort_sql
ordered_sql
matchrecognize_sql
query_modifiers
offset_limit_modifiers
after_having_modifiers
after_limit_modifiers
select_sql
schema_sql
schema_columns_sql
star_sql
parameter_sql
sessionparameter_sql
placeholder_sql
subquery_sql
qualify_sql
union_sql
union_op
unnest_sql
where_sql
window_sql
partition_by_sql
windowspec_sql
withingroup_sql
between_sql
bracket_sql
safebracket_sql
all_sql
any_sql
exists_sql
case_sql
constraint_sql
nextvaluefor_sql
extract_sql
trim_sql
safeconcat_sql
check_sql
foreignkey_sql
primarykey_sql
if_sql
matchagainst_sql
jsonkeyvalue_sql
jsonobject_sql
openjsoncolumndef_sql
openjson_sql
in_sql
interval_sql
return_sql
reference_sql
anonymous_sql
paren_sql
neg_sql
not_sql
alias_sql
aliases_sql
add_sql
and_sql
connector_sql
bitwiseand_sql
bitwiseleftshift_sql
bitwisenot_sql
bitwiseor_sql
bitwiserightshift_sql
bitwisexor_sql
cast_sql
currentdate_sql
collate_sql
command_sql
comment_sql
mergetreettlaction_sql
mergetreettl_sql
altercolumn_sql
renametable_sql
altertable_sql
droppartition_sql
addconstraint_sql
distinct_sql
ignorenulls_sql
respectnulls_sql
intdiv_sql
dpipe_sql
safedpipe_sql
div_sql
overlaps_sql
distance_sql
dot_sql
eq_sql
escape_sql
glob_sql
gt_sql
gte_sql
ilike_sql
ilikeany_sql
is_sql
like_sql
likeany_sql
similarto_sql
lt_sql
lte_sql
mod_sql
mul_sql
neq_sql
nullsafeeq_sql
nullsafeneq_sql
or_sql
slice_sql
sub_sql
use_sql
binary
function_fallback_sql
func
format_args
text_width
format_time
expressions
op_expressions
naked_property
set_operation
tag_sql
token_sql
userdefinedfunction_sql
joinhint_sql
kwarg_sql
when_sql
merge_sql
tochar_sql
dictproperty_sql
dictrange_sql
dictsubproperty_sql
oncluster_sql
clusteredbyproperty_sql
anyvalue_sql