From beba715b97dd2349e01dde9b077d2535680ebdca Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Wed, 10 May 2023 08:44:58 +0200 Subject: Merging upstream version 12.2.0. Signed-off-by: Daniel Baumann --- docs/sqlglot/dialects/postgres.html | 1372 +++++++++++++++++------------------ 1 file changed, 686 insertions(+), 686 deletions(-) (limited to 'docs/sqlglot/dialects/postgres.html') diff --git a/docs/sqlglot/dialects/postgres.html b/docs/sqlglot/dialects/postgres.html index 9eb542b..a322f4e 100644 --- a/docs/sqlglot/dialects/postgres.html +++ b/docs/sqlglot/dialects/postgres.html @@ -79,7 +79,7 @@
  1from __future__ import annotations
   2
-  3from sqlglot import exp, generator, parser, tokens
+  3from sqlglot import exp, generator, parser, tokens, transforms
   4from sqlglot.dialects.dialect import (
   5    Dialect,
   6    arrow_json_extract_scalar_sql,
@@ -99,353 +99,352 @@
  20from sqlglot.helper import seq_get
  21from sqlglot.parser import binary_range_parser
  22from sqlglot.tokens import TokenType
- 23from sqlglot.transforms import preprocess, remove_target_from_merge
- 24
- 25DATE_DIFF_FACTOR = {
- 26    "MICROSECOND": " * 1000000",
- 27    "MILLISECOND": " * 1000",
- 28    "SECOND": "",
- 29    "MINUTE": " / 60",
- 30    "HOUR": " / 3600",
- 31    "DAY": " / 86400",
- 32}
+ 23
+ 24DATE_DIFF_FACTOR = {
+ 25    "MICROSECOND": " * 1000000",
+ 26    "MILLISECOND": " * 1000",
+ 27    "SECOND": "",
+ 28    "MINUTE": " / 60",
+ 29    "HOUR": " / 3600",
+ 30    "DAY": " / 86400",
+ 31}
+ 32
  33
- 34
- 35def _date_add_sql(kind):
- 36    def func(self, expression):
- 37        from sqlglot.optimizer.simplify import simplify
- 38
- 39        this = self.sql(expression, "this")
- 40        unit = expression.args.get("unit")
- 41        expression = simplify(expression.args["expression"])
- 42
- 43        if not isinstance(expression, exp.Literal):
- 44            self.unsupported("Cannot add non literal")
- 45
- 46        expression = expression.copy()
- 47        expression.args["is_string"] = True
- 48        return f"{this} {kind} {self.sql(exp.Interval(this=expression, unit=unit))}"
- 49
- 50    return func
+ 34def _date_add_sql(kind):
+ 35    def func(self, expression):
+ 36        from sqlglot.optimizer.simplify import simplify
+ 37
+ 38        this = self.sql(expression, "this")
+ 39        unit = expression.args.get("unit")
+ 40        expression = simplify(expression.args["expression"])
+ 41
+ 42        if not isinstance(expression, exp.Literal):
+ 43            self.unsupported("Cannot add non literal")
+ 44
+ 45        expression = expression.copy()
+ 46        expression.args["is_string"] = True
+ 47        return f"{this} {kind} {self.sql(exp.Interval(this=expression, unit=unit))}"
+ 48
+ 49    return func
+ 50
  51
- 52
- 53def _date_diff_sql(self, expression):
- 54    unit = expression.text("unit").upper()
- 55    factor = DATE_DIFF_FACTOR.get(unit)
- 56
- 57    end = f"CAST({expression.this} AS TIMESTAMP)"
- 58    start = f"CAST({expression.expression} AS TIMESTAMP)"
- 59
- 60    if factor is not None:
- 61        return f"CAST(EXTRACT(epoch FROM {end} - {start}){factor} AS BIGINT)"
- 62
- 63    age = f"AGE({end}, {start})"
- 64
- 65    if unit == "WEEK":
- 66        unit = f"EXTRACT(year FROM {age}) * 48 + EXTRACT(month FROM {age}) * 4 + EXTRACT(day FROM {age}) / 7"
- 67    elif unit == "MONTH":
- 68        unit = f"EXTRACT(year FROM {age}) * 12 + EXTRACT(month FROM {age})"
- 69    elif unit == "QUARTER":
- 70        unit = f"EXTRACT(year FROM {age}) * 4 + EXTRACT(month FROM {age}) / 3"
- 71    elif unit == "YEAR":
- 72        unit = f"EXTRACT(year FROM {age})"
- 73    else:
- 74        unit = age
- 75
- 76    return f"CAST({unit} AS BIGINT)"
+ 52def _date_diff_sql(self, expression):
+ 53    unit = expression.text("unit").upper()
+ 54    factor = DATE_DIFF_FACTOR.get(unit)
+ 55
+ 56    end = f"CAST({expression.this} AS TIMESTAMP)"
+ 57    start = f"CAST({expression.expression} AS TIMESTAMP)"
+ 58
+ 59    if factor is not None:
+ 60        return f"CAST(EXTRACT(epoch FROM {end} - {start}){factor} AS BIGINT)"
+ 61
+ 62    age = f"AGE({end}, {start})"
+ 63
+ 64    if unit == "WEEK":
+ 65        unit = f"EXTRACT(year FROM {age}) * 48 + EXTRACT(month FROM {age}) * 4 + EXTRACT(day FROM {age}) / 7"
+ 66    elif unit == "MONTH":
+ 67        unit = f"EXTRACT(year FROM {age}) * 12 + EXTRACT(month FROM {age})"
+ 68    elif unit == "QUARTER":
+ 69        unit = f"EXTRACT(year FROM {age}) * 4 + EXTRACT(month FROM {age}) / 3"
+ 70    elif unit == "YEAR":
+ 71        unit = f"EXTRACT(year FROM {age})"
+ 72    else:
+ 73        unit = age
+ 74
+ 75    return f"CAST({unit} AS BIGINT)"
+ 76
  77
- 78
- 79def _substring_sql(self, expression):
- 80    this = self.sql(expression, "this")
- 81    start = self.sql(expression, "start")
- 82    length = self.sql(expression, "length")
- 83
- 84    from_part = f" FROM {start}" if start else ""
- 85    for_part = f" FOR {length}" if length else ""
- 86
- 87    return f"SUBSTRING({this}{from_part}{for_part})"
+ 78def _substring_sql(self, expression):
+ 79    this = self.sql(expression, "this")
+ 80    start = self.sql(expression, "start")
+ 81    length = self.sql(expression, "length")
+ 82
+ 83    from_part = f" FROM {start}" if start else ""
+ 84    for_part = f" FOR {length}" if length else ""
+ 85
+ 86    return f"SUBSTRING({this}{from_part}{for_part})"
+ 87
  88
- 89
- 90def _string_agg_sql(self, expression):
- 91    expression = expression.copy()
- 92    separator = expression.args.get("separator") or exp.Literal.string(",")
- 93
- 94    order = ""
- 95    this = expression.this
- 96    if isinstance(this, exp.Order):
- 97        if this.this:
- 98            this = this.this.pop()
- 99        order = self.sql(expression.this)  # Order has a leading space
-100
-101    return f"STRING_AGG({self.format_args(this, separator)}{order})"
+ 89def _string_agg_sql(self, expression):
+ 90    expression = expression.copy()
+ 91    separator = expression.args.get("separator") or exp.Literal.string(",")
+ 92
+ 93    order = ""
+ 94    this = expression.this
+ 95    if isinstance(this, exp.Order):
+ 96        if this.this:
+ 97            this = this.this.pop()
+ 98        order = self.sql(expression.this)  # Order has a leading space
+ 99
+100    return f"STRING_AGG({self.format_args(this, separator)}{order})"
+101
 102
-103
-104def _datatype_sql(self, expression):
-105    if expression.this == exp.DataType.Type.ARRAY:
-106        return f"{self.expressions(expression, flat=True)}[]"
-107    return self.datatype_sql(expression)
+103def _datatype_sql(self, expression):
+104    if expression.this == exp.DataType.Type.ARRAY:
+105        return f"{self.expressions(expression, flat=True)}[]"
+106    return self.datatype_sql(expression)
+107
 108
-109
-110def _auto_increment_to_serial(expression):
-111    auto = expression.find(exp.AutoIncrementColumnConstraint)
-112
-113    if auto:
-114        expression = expression.copy()
-115        expression.args["constraints"].remove(auto.parent)
-116        kind = expression.args["kind"]
-117
-118        if kind.this == exp.DataType.Type.INT:
-119            kind.replace(exp.DataType(this=exp.DataType.Type.SERIAL))
-120        elif kind.this == exp.DataType.Type.SMALLINT:
-121            kind.replace(exp.DataType(this=exp.DataType.Type.SMALLSERIAL))
-122        elif kind.this == exp.DataType.Type.BIGINT:
-123            kind.replace(exp.DataType(this=exp.DataType.Type.BIGSERIAL))
-124
-125    return expression
+109def _auto_increment_to_serial(expression):
+110    auto = expression.find(exp.AutoIncrementColumnConstraint)
+111
+112    if auto:
+113        expression = expression.copy()
+114        expression.args["constraints"].remove(auto.parent)
+115        kind = expression.args["kind"]
+116
+117        if kind.this == exp.DataType.Type.INT:
+118            kind.replace(exp.DataType(this=exp.DataType.Type.SERIAL))
+119        elif kind.this == exp.DataType.Type.SMALLINT:
+120            kind.replace(exp.DataType(this=exp.DataType.Type.SMALLSERIAL))
+121        elif kind.this == exp.DataType.Type.BIGINT:
+122            kind.replace(exp.DataType(this=exp.DataType.Type.BIGSERIAL))
+123
+124    return expression
+125
 126
-127
-128def _serial_to_generated(expression):
-129    kind = expression.args["kind"]
-130
-131    if kind.this == exp.DataType.Type.SERIAL:
-132        data_type = exp.DataType(this=exp.DataType.Type.INT)
-133    elif kind.this == exp.DataType.Type.SMALLSERIAL:
-134        data_type = exp.DataType(this=exp.DataType.Type.SMALLINT)
-135    elif kind.this == exp.DataType.Type.BIGSERIAL:
-136        data_type = exp.DataType(this=exp.DataType.Type.BIGINT)
-137    else:
-138        data_type = None
-139
-140    if data_type:
-141        expression = expression.copy()
-142        expression.args["kind"].replace(data_type)
-143        constraints = expression.args["constraints"]
-144        generated = exp.ColumnConstraint(kind=exp.GeneratedAsIdentityColumnConstraint(this=False))
-145        notnull = exp.ColumnConstraint(kind=exp.NotNullColumnConstraint())
-146        if notnull not in constraints:
-147            constraints.insert(0, notnull)
-148        if generated not in constraints:
-149            constraints.insert(0, generated)
-150
-151    return expression
+127def _serial_to_generated(expression):
+128    kind = expression.args["kind"]
+129
+130    if kind.this == exp.DataType.Type.SERIAL:
+131        data_type = exp.DataType(this=exp.DataType.Type.INT)
+132    elif kind.this == exp.DataType.Type.SMALLSERIAL:
+133        data_type = exp.DataType(this=exp.DataType.Type.SMALLINT)
+134    elif kind.this == exp.DataType.Type.BIGSERIAL:
+135        data_type = exp.DataType(this=exp.DataType.Type.BIGINT)
+136    else:
+137        data_type = None
+138
+139    if data_type:
+140        expression = expression.copy()
+141        expression.args["kind"].replace(data_type)
+142        constraints = expression.args["constraints"]
+143        generated = exp.ColumnConstraint(kind=exp.GeneratedAsIdentityColumnConstraint(this=False))
+144        notnull = exp.ColumnConstraint(kind=exp.NotNullColumnConstraint())
+145        if notnull not in constraints:
+146            constraints.insert(0, notnull)
+147        if generated not in constraints:
+148            constraints.insert(0, generated)
+149
+150    return expression
+151
 152
-153
-154def _generate_series(args):
-155    # The goal is to convert step values like '1 day' or INTERVAL '1 day' into INTERVAL '1' day
-156    step = seq_get(args, 2)
-157
-158    if step is None:
-159        # Postgres allows calls with just two arguments -- the "step" argument defaults to 1
-160        return exp.GenerateSeries.from_arg_list(args)
-161
-162    if step.is_string:
-163        args[2] = exp.to_interval(step.this)
-164    elif isinstance(step, exp.Interval) and not step.args.get("unit"):
-165        args[2] = exp.to_interval(step.this.this)
-166
-167    return exp.GenerateSeries.from_arg_list(args)
+153def _generate_series(args):
+154    # The goal is to convert step values like '1 day' or INTERVAL '1 day' into INTERVAL '1' day
+155    step = seq_get(args, 2)
+156
+157    if step is None:
+158        # Postgres allows calls with just two arguments -- the "step" argument defaults to 1
+159        return exp.GenerateSeries.from_arg_list(args)
+160
+161    if step.is_string:
+162        args[2] = exp.to_interval(step.this)
+163    elif isinstance(step, exp.Interval) and not step.args.get("unit"):
+164        args[2] = exp.to_interval(step.this.this)
+165
+166    return exp.GenerateSeries.from_arg_list(args)
+167
 168
-169
-170def _to_timestamp(args):
-171    # TO_TIMESTAMP accepts either a single double argument or (text, text)
-172    if len(args) == 1:
-173        # https://www.postgresql.org/docs/current/functions-datetime.html#FUNCTIONS-DATETIME-TABLE
-174        return exp.UnixToTime.from_arg_list(args)
-175    # https://www.postgresql.org/docs/current/functions-formatting.html
-176    return format_time_lambda(exp.StrToTime, "postgres")(args)
+169def _to_timestamp(args):
+170    # TO_TIMESTAMP accepts either a single double argument or (text, text)
+171    if len(args) == 1:
+172        # https://www.postgresql.org/docs/current/functions-datetime.html#FUNCTIONS-DATETIME-TABLE
+173        return exp.UnixToTime.from_arg_list(args)
+174    # https://www.postgresql.org/docs/current/functions-formatting.html
+175    return format_time_lambda(exp.StrToTime, "postgres")(args)
+176
 177
-178
-179class Postgres(Dialect):
-180    null_ordering = "nulls_are_large"
-181    time_format = "'YYYY-MM-DD HH24:MI:SS'"
-182    time_mapping = {
-183        "AM": "%p",
-184        "PM": "%p",
-185        "D": "%u",  # 1-based day of week
-186        "DD": "%d",  # day of month
-187        "DDD": "%j",  # zero padded day of year
-188        "FMDD": "%-d",  # - is no leading zero for Python; same for FM in postgres
-189        "FMDDD": "%-j",  # day of year
-190        "FMHH12": "%-I",  # 9
-191        "FMHH24": "%-H",  # 9
-192        "FMMI": "%-M",  # Minute
-193        "FMMM": "%-m",  # 1
-194        "FMSS": "%-S",  # Second
-195        "HH12": "%I",  # 09
-196        "HH24": "%H",  # 09
-197        "MI": "%M",  # zero padded minute
-198        "MM": "%m",  # 01
-199        "OF": "%z",  # utc offset
-200        "SS": "%S",  # zero padded second
-201        "TMDay": "%A",  # TM is locale dependent
-202        "TMDy": "%a",
-203        "TMMon": "%b",  # Sep
-204        "TMMonth": "%B",  # September
-205        "TZ": "%Z",  # uppercase timezone name
-206        "US": "%f",  # zero padded microsecond
-207        "WW": "%U",  # 1-based week of year
-208        "YY": "%y",  # 15
-209        "YYYY": "%Y",  # 2015
-210    }
-211
-212    class Tokenizer(tokens.Tokenizer):
-213        QUOTES = ["'", "$$"]
-214
-215        BIT_STRINGS = [("b'", "'"), ("B'", "'")]
-216        HEX_STRINGS = [("x'", "'"), ("X'", "'")]
-217        BYTE_STRINGS = [("e'", "'"), ("E'", "'")]
-218
-219        KEYWORDS = {
-220            **tokens.Tokenizer.KEYWORDS,
-221            "~~": TokenType.LIKE,
-222            "~~*": TokenType.ILIKE,
-223            "~*": TokenType.IRLIKE,
-224            "~": TokenType.RLIKE,
-225            "@>": TokenType.AT_GT,
-226            "<@": TokenType.LT_AT,
-227            "BEGIN": TokenType.COMMAND,
-228            "BEGIN TRANSACTION": TokenType.BEGIN,
-229            "BIGSERIAL": TokenType.BIGSERIAL,
-230            "CHARACTER VARYING": TokenType.VARCHAR,
-231            "DECLARE": TokenType.COMMAND,
-232            "DO": TokenType.COMMAND,
-233            "HSTORE": TokenType.HSTORE,
-234            "JSONB": TokenType.JSONB,
-235            "REFRESH": TokenType.COMMAND,
-236            "REINDEX": TokenType.COMMAND,
-237            "RESET": TokenType.COMMAND,
-238            "RETURNING": TokenType.RETURNING,
-239            "REVOKE": TokenType.COMMAND,
-240            "SERIAL": TokenType.SERIAL,
-241            "SMALLSERIAL": TokenType.SMALLSERIAL,
-242            "TEMP": TokenType.TEMPORARY,
-243            "CSTRING": TokenType.PSEUDO_TYPE,
-244        }
-245
-246        SINGLE_TOKENS = {
-247            **tokens.Tokenizer.SINGLE_TOKENS,
-248            "$": TokenType.PARAMETER,
-249        }
-250
-251        VAR_SINGLE_TOKENS = {"$"}
-252
-253    class Parser(parser.Parser):
-254        STRICT_CAST = False
-255
-256        FUNCTIONS = {
-257            **parser.Parser.FUNCTIONS,  # type: ignore
-258            "DATE_TRUNC": lambda args: exp.TimestampTrunc(
-259                this=seq_get(args, 1), unit=seq_get(args, 0)
-260            ),
-261            "GENERATE_SERIES": _generate_series,
-262            "NOW": exp.CurrentTimestamp.from_arg_list,
-263            "TO_CHAR": format_time_lambda(exp.TimeToStr, "postgres"),
-264            "TO_TIMESTAMP": _to_timestamp,
-265        }
-266
-267        FUNCTION_PARSERS = {
-268            **parser.Parser.FUNCTION_PARSERS,
-269            "DATE_PART": lambda self: self._parse_date_part(),
-270        }
-271
-272        BITWISE = {
-273            **parser.Parser.BITWISE,  # type: ignore
-274            TokenType.HASH: exp.BitwiseXor,
-275        }
-276
-277        FACTOR = {
-278            **parser.Parser.FACTOR,
-279            TokenType.CARET: exp.Pow,
-280        }
-281
-282        RANGE_PARSERS = {
-283            **parser.Parser.RANGE_PARSERS,  # type: ignore
-284            TokenType.DAMP: binary_range_parser(exp.ArrayOverlaps),
-285            TokenType.AT_GT: binary_range_parser(exp.ArrayContains),
-286            TokenType.LT_AT: binary_range_parser(exp.ArrayContained),
-287        }
-288
-289        def _parse_date_part(self) -> exp.Expression:
-290            part = self._parse_type()
-291            self._match(TokenType.COMMA)
-292            value = self._parse_bitwise()
-293
-294            if part and part.is_string:
-295                part = exp.Var(this=part.name)
-296
-297            return self.expression(exp.Extract, this=part, expression=value)
-298
-299    class Generator(generator.Generator):
-300        INTERVAL_ALLOWS_PLURAL_FORM = False
-301        LOCKING_READS_SUPPORTED = True
-302        JOIN_HINTS = False
-303        TABLE_HINTS = False
-304        PARAMETER_TOKEN = "$"
-305
-306        TYPE_MAPPING = {
-307            **generator.Generator.TYPE_MAPPING,  # type: ignore
-308            exp.DataType.Type.TINYINT: "SMALLINT",
-309            exp.DataType.Type.FLOAT: "REAL",
-310            exp.DataType.Type.DOUBLE: "DOUBLE PRECISION",
-311            exp.DataType.Type.BINARY: "BYTEA",
-312            exp.DataType.Type.VARBINARY: "BYTEA",
-313            exp.DataType.Type.DATETIME: "TIMESTAMP",
-314        }
-315
-316        TRANSFORMS = {
-317            **generator.Generator.TRANSFORMS,  # type: ignore
-318            exp.BitwiseXor: lambda self, e: self.binary(e, "#"),
-319            exp.ColumnDef: preprocess(
-320                [
-321                    _auto_increment_to_serial,
-322                    _serial_to_generated,
-323                ],
-324            ),
-325            exp.JSONExtract: arrow_json_extract_sql,
-326            exp.JSONExtractScalar: arrow_json_extract_scalar_sql,
-327            exp.JSONBExtract: lambda self, e: self.binary(e, "#>"),
-328            exp.JSONBExtractScalar: lambda self, e: self.binary(e, "#>>"),
-329            exp.JSONBContains: lambda self, e: self.binary(e, "?"),
-330            exp.Pow: lambda self, e: self.binary(e, "^"),
-331            exp.CurrentDate: no_paren_current_date_sql,
-332            exp.CurrentTimestamp: lambda *_: "CURRENT_TIMESTAMP",
-333            exp.DateAdd: _date_add_sql("+"),
-334            exp.DateStrToDate: datestrtodate_sql,
-335            exp.DateSub: _date_add_sql("-"),
-336            exp.DateDiff: _date_diff_sql,
-337            exp.LogicalOr: rename_func("BOOL_OR"),
-338            exp.LogicalAnd: rename_func("BOOL_AND"),
-339            exp.Max: max_or_greatest,
-340            exp.Min: min_or_least,
-341            exp.ArrayOverlaps: lambda self, e: self.binary(e, "&&"),
-342            exp.ArrayContains: lambda self, e: self.binary(e, "@>"),
-343            exp.ArrayContained: lambda self, e: self.binary(e, "<@"),
-344            exp.Merge: preprocess([remove_target_from_merge]),
-345            exp.RegexpLike: lambda self, e: self.binary(e, "~"),
-346            exp.RegexpILike: lambda self, e: self.binary(e, "~*"),
-347            exp.StrPosition: str_position_sql,
-348            exp.StrToTime: lambda self, e: f"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})",
-349            exp.Substring: _substring_sql,
-350            exp.TimestampTrunc: timestamptrunc_sql,
-351            exp.TimeStrToTime: lambda self, e: f"CAST({self.sql(e, 'this')} AS TIMESTAMP)",
-352            exp.TimeToStr: lambda self, e: f"TO_CHAR({self.sql(e, 'this')}, {self.format_time(e)})",
-353            exp.TableSample: no_tablesample_sql,
-354            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
-355            exp.Trim: trim_sql,
-356            exp.TryCast: no_trycast_sql,
-357            exp.UnixToTime: lambda self, e: f"TO_TIMESTAMP({self.sql(e, 'this')})",
-358            exp.DataType: _datatype_sql,
-359            exp.GroupConcat: _string_agg_sql,
-360            exp.Array: lambda self, e: f"{self.normalize_func('ARRAY')}({self.sql(e.expressions[0])})"
-361            if isinstance(seq_get(e.expressions, 0), exp.Select)
-362            else f"{self.normalize_func('ARRAY')}[{self.expressions(e, flat=True)}]",
-363        }
-364
-365        PROPERTIES_LOCATION = {
-366            **generator.Generator.PROPERTIES_LOCATION,  # type: ignore
-367            exp.TransientProperty: exp.Properties.Location.UNSUPPORTED,
-368            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
-369        }
+178class Postgres(Dialect):
+179    null_ordering = "nulls_are_large"
+180    time_format = "'YYYY-MM-DD HH24:MI:SS'"
+181    time_mapping = {
+182        "AM": "%p",
+183        "PM": "%p",
+184        "D": "%u",  # 1-based day of week
+185        "DD": "%d",  # day of month
+186        "DDD": "%j",  # zero padded day of year
+187        "FMDD": "%-d",  # - is no leading zero for Python; same for FM in postgres
+188        "FMDDD": "%-j",  # day of year
+189        "FMHH12": "%-I",  # 9
+190        "FMHH24": "%-H",  # 9
+191        "FMMI": "%-M",  # Minute
+192        "FMMM": "%-m",  # 1
+193        "FMSS": "%-S",  # Second
+194        "HH12": "%I",  # 09
+195        "HH24": "%H",  # 09
+196        "MI": "%M",  # zero padded minute
+197        "MM": "%m",  # 01
+198        "OF": "%z",  # utc offset
+199        "SS": "%S",  # zero padded second
+200        "TMDay": "%A",  # TM is locale dependent
+201        "TMDy": "%a",
+202        "TMMon": "%b",  # Sep
+203        "TMMonth": "%B",  # September
+204        "TZ": "%Z",  # uppercase timezone name
+205        "US": "%f",  # zero padded microsecond
+206        "WW": "%U",  # 1-based week of year
+207        "YY": "%y",  # 15
+208        "YYYY": "%Y",  # 2015
+209    }
+210
+211    class Tokenizer(tokens.Tokenizer):
+212        QUOTES = ["'", "$$"]
+213
+214        BIT_STRINGS = [("b'", "'"), ("B'", "'")]
+215        HEX_STRINGS = [("x'", "'"), ("X'", "'")]
+216        BYTE_STRINGS = [("e'", "'"), ("E'", "'")]
+217
+218        KEYWORDS = {
+219            **tokens.Tokenizer.KEYWORDS,
+220            "~~": TokenType.LIKE,
+221            "~~*": TokenType.ILIKE,
+222            "~*": TokenType.IRLIKE,
+223            "~": TokenType.RLIKE,
+224            "@>": TokenType.AT_GT,
+225            "<@": TokenType.LT_AT,
+226            "BEGIN": TokenType.COMMAND,
+227            "BEGIN TRANSACTION": TokenType.BEGIN,
+228            "BIGSERIAL": TokenType.BIGSERIAL,
+229            "CHARACTER VARYING": TokenType.VARCHAR,
+230            "DECLARE": TokenType.COMMAND,
+231            "DO": TokenType.COMMAND,
+232            "HSTORE": TokenType.HSTORE,
+233            "JSONB": TokenType.JSONB,
+234            "REFRESH": TokenType.COMMAND,
+235            "REINDEX": TokenType.COMMAND,
+236            "RESET": TokenType.COMMAND,
+237            "RETURNING": TokenType.RETURNING,
+238            "REVOKE": TokenType.COMMAND,
+239            "SERIAL": TokenType.SERIAL,
+240            "SMALLSERIAL": TokenType.SMALLSERIAL,
+241            "TEMP": TokenType.TEMPORARY,
+242            "CSTRING": TokenType.PSEUDO_TYPE,
+243        }
+244
+245        SINGLE_TOKENS = {
+246            **tokens.Tokenizer.SINGLE_TOKENS,
+247            "$": TokenType.PARAMETER,
+248        }
+249
+250        VAR_SINGLE_TOKENS = {"$"}
+251
+252    class Parser(parser.Parser):
+253        STRICT_CAST = False
+254
+255        FUNCTIONS = {
+256            **parser.Parser.FUNCTIONS,  # type: ignore
+257            "DATE_TRUNC": lambda args: exp.TimestampTrunc(
+258                this=seq_get(args, 1), unit=seq_get(args, 0)
+259            ),
+260            "GENERATE_SERIES": _generate_series,
+261            "NOW": exp.CurrentTimestamp.from_arg_list,
+262            "TO_CHAR": format_time_lambda(exp.TimeToStr, "postgres"),
+263            "TO_TIMESTAMP": _to_timestamp,
+264        }
+265
+266        FUNCTION_PARSERS = {
+267            **parser.Parser.FUNCTION_PARSERS,
+268            "DATE_PART": lambda self: self._parse_date_part(),
+269        }
+270
+271        BITWISE = {
+272            **parser.Parser.BITWISE,  # type: ignore
+273            TokenType.HASH: exp.BitwiseXor,
+274        }
+275
+276        FACTOR = {
+277            **parser.Parser.FACTOR,
+278            TokenType.CARET: exp.Pow,
+279        }
+280
+281        RANGE_PARSERS = {
+282            **parser.Parser.RANGE_PARSERS,  # type: ignore
+283            TokenType.DAMP: binary_range_parser(exp.ArrayOverlaps),
+284            TokenType.AT_GT: binary_range_parser(exp.ArrayContains),
+285            TokenType.LT_AT: binary_range_parser(exp.ArrayContained),
+286        }
+287
+288        def _parse_date_part(self) -> exp.Expression:
+289            part = self._parse_type()
+290            self._match(TokenType.COMMA)
+291            value = self._parse_bitwise()
+292
+293            if part and part.is_string:
+294                part = exp.Var(this=part.name)
+295
+296            return self.expression(exp.Extract, this=part, expression=value)
+297
+298    class Generator(generator.Generator):
+299        INTERVAL_ALLOWS_PLURAL_FORM = False
+300        LOCKING_READS_SUPPORTED = True
+301        JOIN_HINTS = False
+302        TABLE_HINTS = False
+303        PARAMETER_TOKEN = "$"
+304
+305        TYPE_MAPPING = {
+306            **generator.Generator.TYPE_MAPPING,  # type: ignore
+307            exp.DataType.Type.TINYINT: "SMALLINT",
+308            exp.DataType.Type.FLOAT: "REAL",
+309            exp.DataType.Type.DOUBLE: "DOUBLE PRECISION",
+310            exp.DataType.Type.BINARY: "BYTEA",
+311            exp.DataType.Type.VARBINARY: "BYTEA",
+312            exp.DataType.Type.DATETIME: "TIMESTAMP",
+313        }
+314
+315        TRANSFORMS = {
+316            **generator.Generator.TRANSFORMS,  # type: ignore
+317            exp.BitwiseXor: lambda self, e: self.binary(e, "#"),
+318            exp.ColumnDef: transforms.preprocess(
+319                [
+320                    _auto_increment_to_serial,
+321                    _serial_to_generated,
+322                ],
+323            ),
+324            exp.JSONExtract: arrow_json_extract_sql,
+325            exp.JSONExtractScalar: arrow_json_extract_scalar_sql,
+326            exp.JSONBExtract: lambda self, e: self.binary(e, "#>"),
+327            exp.JSONBExtractScalar: lambda self, e: self.binary(e, "#>>"),
+328            exp.JSONBContains: lambda self, e: self.binary(e, "?"),
+329            exp.Pow: lambda self, e: self.binary(e, "^"),
+330            exp.CurrentDate: no_paren_current_date_sql,
+331            exp.CurrentTimestamp: lambda *_: "CURRENT_TIMESTAMP",
+332            exp.DateAdd: _date_add_sql("+"),
+333            exp.DateStrToDate: datestrtodate_sql,
+334            exp.DateSub: _date_add_sql("-"),
+335            exp.DateDiff: _date_diff_sql,
+336            exp.LogicalOr: rename_func("BOOL_OR"),
+337            exp.LogicalAnd: rename_func("BOOL_AND"),
+338            exp.Max: max_or_greatest,
+339            exp.Min: min_or_least,
+340            exp.ArrayOverlaps: lambda self, e: self.binary(e, "&&"),
+341            exp.ArrayContains: lambda self, e: self.binary(e, "@>"),
+342            exp.ArrayContained: lambda self, e: self.binary(e, "<@"),
+343            exp.Merge: transforms.preprocess([transforms.remove_target_from_merge]),
+344            exp.RegexpLike: lambda self, e: self.binary(e, "~"),
+345            exp.RegexpILike: lambda self, e: self.binary(e, "~*"),
+346            exp.StrPosition: str_position_sql,
+347            exp.StrToTime: lambda self, e: f"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})",
+348            exp.Substring: _substring_sql,
+349            exp.TimestampTrunc: timestamptrunc_sql,
+350            exp.TimeStrToTime: lambda self, e: f"CAST({self.sql(e, 'this')} AS TIMESTAMP)",
+351            exp.TimeToStr: lambda self, e: f"TO_CHAR({self.sql(e, 'this')}, {self.format_time(e)})",
+352            exp.TableSample: no_tablesample_sql,
+353            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
+354            exp.Trim: trim_sql,
+355            exp.TryCast: no_trycast_sql,
+356            exp.UnixToTime: lambda self, e: f"TO_TIMESTAMP({self.sql(e, 'this')})",
+357            exp.DataType: _datatype_sql,
+358            exp.GroupConcat: _string_agg_sql,
+359            exp.Array: lambda self, e: f"{self.normalize_func('ARRAY')}({self.sql(e.expressions[0])})"
+360            if isinstance(seq_get(e.expressions, 0), exp.Select)
+361            else f"{self.normalize_func('ARRAY')}[{self.expressions(e, flat=True)}]",
+362        }
+363
+364        PROPERTIES_LOCATION = {
+365            **generator.Generator.PROPERTIES_LOCATION,  # type: ignore
+366            exp.TransientProperty: exp.Properties.Location.UNSUPPORTED,
+367            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
+368        }
 
@@ -461,197 +460,197 @@ -
180class Postgres(Dialect):
-181    null_ordering = "nulls_are_large"
-182    time_format = "'YYYY-MM-DD HH24:MI:SS'"
-183    time_mapping = {
-184        "AM": "%p",
-185        "PM": "%p",
-186        "D": "%u",  # 1-based day of week
-187        "DD": "%d",  # day of month
-188        "DDD": "%j",  # zero padded day of year
-189        "FMDD": "%-d",  # - is no leading zero for Python; same for FM in postgres
-190        "FMDDD": "%-j",  # day of year
-191        "FMHH12": "%-I",  # 9
-192        "FMHH24": "%-H",  # 9
-193        "FMMI": "%-M",  # Minute
-194        "FMMM": "%-m",  # 1
-195        "FMSS": "%-S",  # Second
-196        "HH12": "%I",  # 09
-197        "HH24": "%H",  # 09
-198        "MI": "%M",  # zero padded minute
-199        "MM": "%m",  # 01
-200        "OF": "%z",  # utc offset
-201        "SS": "%S",  # zero padded second
-202        "TMDay": "%A",  # TM is locale dependent
-203        "TMDy": "%a",
-204        "TMMon": "%b",  # Sep
-205        "TMMonth": "%B",  # September
-206        "TZ": "%Z",  # uppercase timezone name
-207        "US": "%f",  # zero padded microsecond
-208        "WW": "%U",  # 1-based week of year
-209        "YY": "%y",  # 15
-210        "YYYY": "%Y",  # 2015
-211    }
-212
-213    class Tokenizer(tokens.Tokenizer):
-214        QUOTES = ["'", "$$"]
-215
-216        BIT_STRINGS = [("b'", "'"), ("B'", "'")]
-217        HEX_STRINGS = [("x'", "'"), ("X'", "'")]
-218        BYTE_STRINGS = [("e'", "'"), ("E'", "'")]
-219
-220        KEYWORDS = {
-221            **tokens.Tokenizer.KEYWORDS,
-222            "~~": TokenType.LIKE,
-223            "~~*": TokenType.ILIKE,
-224            "~*": TokenType.IRLIKE,
-225            "~": TokenType.RLIKE,
-226            "@>": TokenType.AT_GT,
-227            "<@": TokenType.LT_AT,
-228            "BEGIN": TokenType.COMMAND,
-229            "BEGIN TRANSACTION": TokenType.BEGIN,
-230            "BIGSERIAL": TokenType.BIGSERIAL,
-231            "CHARACTER VARYING": TokenType.VARCHAR,
-232            "DECLARE": TokenType.COMMAND,
-233            "DO": TokenType.COMMAND,
-234            "HSTORE": TokenType.HSTORE,
-235            "JSONB": TokenType.JSONB,
-236            "REFRESH": TokenType.COMMAND,
-237            "REINDEX": TokenType.COMMAND,
-238            "RESET": TokenType.COMMAND,
-239            "RETURNING": TokenType.RETURNING,
-240            "REVOKE": TokenType.COMMAND,
-241            "SERIAL": TokenType.SERIAL,
-242            "SMALLSERIAL": TokenType.SMALLSERIAL,
-243            "TEMP": TokenType.TEMPORARY,
-244            "CSTRING": TokenType.PSEUDO_TYPE,
-245        }
-246
-247        SINGLE_TOKENS = {
-248            **tokens.Tokenizer.SINGLE_TOKENS,
-249            "$": TokenType.PARAMETER,
-250        }
-251
-252        VAR_SINGLE_TOKENS = {"$"}
-253
-254    class Parser(parser.Parser):
-255        STRICT_CAST = False
-256
-257        FUNCTIONS = {
-258            **parser.Parser.FUNCTIONS,  # type: ignore
-259            "DATE_TRUNC": lambda args: exp.TimestampTrunc(
-260                this=seq_get(args, 1), unit=seq_get(args, 0)
-261            ),
-262            "GENERATE_SERIES": _generate_series,
-263            "NOW": exp.CurrentTimestamp.from_arg_list,
-264            "TO_CHAR": format_time_lambda(exp.TimeToStr, "postgres"),
-265            "TO_TIMESTAMP": _to_timestamp,
-266        }
-267
-268        FUNCTION_PARSERS = {
-269            **parser.Parser.FUNCTION_PARSERS,
-270            "DATE_PART": lambda self: self._parse_date_part(),
-271        }
-272
-273        BITWISE = {
-274            **parser.Parser.BITWISE,  # type: ignore
-275            TokenType.HASH: exp.BitwiseXor,
-276        }
-277
-278        FACTOR = {
-279            **parser.Parser.FACTOR,
-280            TokenType.CARET: exp.Pow,
-281        }
-282
-283        RANGE_PARSERS = {
-284            **parser.Parser.RANGE_PARSERS,  # type: ignore
-285            TokenType.DAMP: binary_range_parser(exp.ArrayOverlaps),
-286            TokenType.AT_GT: binary_range_parser(exp.ArrayContains),
-287            TokenType.LT_AT: binary_range_parser(exp.ArrayContained),
-288        }
-289
-290        def _parse_date_part(self) -> exp.Expression:
-291            part = self._parse_type()
-292            self._match(TokenType.COMMA)
-293            value = self._parse_bitwise()
-294
-295            if part and part.is_string:
-296                part = exp.Var(this=part.name)
-297
-298            return self.expression(exp.Extract, this=part, expression=value)
-299
-300    class Generator(generator.Generator):
-301        INTERVAL_ALLOWS_PLURAL_FORM = False
-302        LOCKING_READS_SUPPORTED = True
-303        JOIN_HINTS = False
-304        TABLE_HINTS = False
-305        PARAMETER_TOKEN = "$"
-306
-307        TYPE_MAPPING = {
-308            **generator.Generator.TYPE_MAPPING,  # type: ignore
-309            exp.DataType.Type.TINYINT: "SMALLINT",
-310            exp.DataType.Type.FLOAT: "REAL",
-311            exp.DataType.Type.DOUBLE: "DOUBLE PRECISION",
-312            exp.DataType.Type.BINARY: "BYTEA",
-313            exp.DataType.Type.VARBINARY: "BYTEA",
-314            exp.DataType.Type.DATETIME: "TIMESTAMP",
-315        }
-316
-317        TRANSFORMS = {
-318            **generator.Generator.TRANSFORMS,  # type: ignore
-319            exp.BitwiseXor: lambda self, e: self.binary(e, "#"),
-320            exp.ColumnDef: preprocess(
-321                [
-322                    _auto_increment_to_serial,
-323                    _serial_to_generated,
-324                ],
-325            ),
-326            exp.JSONExtract: arrow_json_extract_sql,
-327            exp.JSONExtractScalar: arrow_json_extract_scalar_sql,
-328            exp.JSONBExtract: lambda self, e: self.binary(e, "#>"),
-329            exp.JSONBExtractScalar: lambda self, e: self.binary(e, "#>>"),
-330            exp.JSONBContains: lambda self, e: self.binary(e, "?"),
-331            exp.Pow: lambda self, e: self.binary(e, "^"),
-332            exp.CurrentDate: no_paren_current_date_sql,
-333            exp.CurrentTimestamp: lambda *_: "CURRENT_TIMESTAMP",
-334            exp.DateAdd: _date_add_sql("+"),
-335            exp.DateStrToDate: datestrtodate_sql,
-336            exp.DateSub: _date_add_sql("-"),
-337            exp.DateDiff: _date_diff_sql,
-338            exp.LogicalOr: rename_func("BOOL_OR"),
-339            exp.LogicalAnd: rename_func("BOOL_AND"),
-340            exp.Max: max_or_greatest,
-341            exp.Min: min_or_least,
-342            exp.ArrayOverlaps: lambda self, e: self.binary(e, "&&"),
-343            exp.ArrayContains: lambda self, e: self.binary(e, "@>"),
-344            exp.ArrayContained: lambda self, e: self.binary(e, "<@"),
-345            exp.Merge: preprocess([remove_target_from_merge]),
-346            exp.RegexpLike: lambda self, e: self.binary(e, "~"),
-347            exp.RegexpILike: lambda self, e: self.binary(e, "~*"),
-348            exp.StrPosition: str_position_sql,
-349            exp.StrToTime: lambda self, e: f"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})",
-350            exp.Substring: _substring_sql,
-351            exp.TimestampTrunc: timestamptrunc_sql,
-352            exp.TimeStrToTime: lambda self, e: f"CAST({self.sql(e, 'this')} AS TIMESTAMP)",
-353            exp.TimeToStr: lambda self, e: f"TO_CHAR({self.sql(e, 'this')}, {self.format_time(e)})",
-354            exp.TableSample: no_tablesample_sql,
-355            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
-356            exp.Trim: trim_sql,
-357            exp.TryCast: no_trycast_sql,
-358            exp.UnixToTime: lambda self, e: f"TO_TIMESTAMP({self.sql(e, 'this')})",
-359            exp.DataType: _datatype_sql,
-360            exp.GroupConcat: _string_agg_sql,
-361            exp.Array: lambda self, e: f"{self.normalize_func('ARRAY')}({self.sql(e.expressions[0])})"
-362            if isinstance(seq_get(e.expressions, 0), exp.Select)
-363            else f"{self.normalize_func('ARRAY')}[{self.expressions(e, flat=True)}]",
-364        }
-365
-366        PROPERTIES_LOCATION = {
-367            **generator.Generator.PROPERTIES_LOCATION,  # type: ignore
-368            exp.TransientProperty: exp.Properties.Location.UNSUPPORTED,
-369            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
-370        }
+            
179class Postgres(Dialect):
+180    null_ordering = "nulls_are_large"
+181    time_format = "'YYYY-MM-DD HH24:MI:SS'"
+182    time_mapping = {
+183        "AM": "%p",
+184        "PM": "%p",
+185        "D": "%u",  # 1-based day of week
+186        "DD": "%d",  # day of month
+187        "DDD": "%j",  # zero padded day of year
+188        "FMDD": "%-d",  # - is no leading zero for Python; same for FM in postgres
+189        "FMDDD": "%-j",  # day of year
+190        "FMHH12": "%-I",  # 9
+191        "FMHH24": "%-H",  # 9
+192        "FMMI": "%-M",  # Minute
+193        "FMMM": "%-m",  # 1
+194        "FMSS": "%-S",  # Second
+195        "HH12": "%I",  # 09
+196        "HH24": "%H",  # 09
+197        "MI": "%M",  # zero padded minute
+198        "MM": "%m",  # 01
+199        "OF": "%z",  # utc offset
+200        "SS": "%S",  # zero padded second
+201        "TMDay": "%A",  # TM is locale dependent
+202        "TMDy": "%a",
+203        "TMMon": "%b",  # Sep
+204        "TMMonth": "%B",  # September
+205        "TZ": "%Z",  # uppercase timezone name
+206        "US": "%f",  # zero padded microsecond
+207        "WW": "%U",  # 1-based week of year
+208        "YY": "%y",  # 15
+209        "YYYY": "%Y",  # 2015
+210    }
+211
+212    class Tokenizer(tokens.Tokenizer):
+213        QUOTES = ["'", "$$"]
+214
+215        BIT_STRINGS = [("b'", "'"), ("B'", "'")]
+216        HEX_STRINGS = [("x'", "'"), ("X'", "'")]
+217        BYTE_STRINGS = [("e'", "'"), ("E'", "'")]
+218
+219        KEYWORDS = {
+220            **tokens.Tokenizer.KEYWORDS,
+221            "~~": TokenType.LIKE,
+222            "~~*": TokenType.ILIKE,
+223            "~*": TokenType.IRLIKE,
+224            "~": TokenType.RLIKE,
+225            "@>": TokenType.AT_GT,
+226            "<@": TokenType.LT_AT,
+227            "BEGIN": TokenType.COMMAND,
+228            "BEGIN TRANSACTION": TokenType.BEGIN,
+229            "BIGSERIAL": TokenType.BIGSERIAL,
+230            "CHARACTER VARYING": TokenType.VARCHAR,
+231            "DECLARE": TokenType.COMMAND,
+232            "DO": TokenType.COMMAND,
+233            "HSTORE": TokenType.HSTORE,
+234            "JSONB": TokenType.JSONB,
+235            "REFRESH": TokenType.COMMAND,
+236            "REINDEX": TokenType.COMMAND,
+237            "RESET": TokenType.COMMAND,
+238            "RETURNING": TokenType.RETURNING,
+239            "REVOKE": TokenType.COMMAND,
+240            "SERIAL": TokenType.SERIAL,
+241            "SMALLSERIAL": TokenType.SMALLSERIAL,
+242            "TEMP": TokenType.TEMPORARY,
+243            "CSTRING": TokenType.PSEUDO_TYPE,
+244        }
+245
+246        SINGLE_TOKENS = {
+247            **tokens.Tokenizer.SINGLE_TOKENS,
+248            "$": TokenType.PARAMETER,
+249        }
+250
+251        VAR_SINGLE_TOKENS = {"$"}
+252
+253    class Parser(parser.Parser):
+254        STRICT_CAST = False
+255
+256        FUNCTIONS = {
+257            **parser.Parser.FUNCTIONS,  # type: ignore
+258            "DATE_TRUNC": lambda args: exp.TimestampTrunc(
+259                this=seq_get(args, 1), unit=seq_get(args, 0)
+260            ),
+261            "GENERATE_SERIES": _generate_series,
+262            "NOW": exp.CurrentTimestamp.from_arg_list,
+263            "TO_CHAR": format_time_lambda(exp.TimeToStr, "postgres"),
+264            "TO_TIMESTAMP": _to_timestamp,
+265        }
+266
+267        FUNCTION_PARSERS = {
+268            **parser.Parser.FUNCTION_PARSERS,
+269            "DATE_PART": lambda self: self._parse_date_part(),
+270        }
+271
+272        BITWISE = {
+273            **parser.Parser.BITWISE,  # type: ignore
+274            TokenType.HASH: exp.BitwiseXor,
+275        }
+276
+277        FACTOR = {
+278            **parser.Parser.FACTOR,
+279            TokenType.CARET: exp.Pow,
+280        }
+281
+282        RANGE_PARSERS = {
+283            **parser.Parser.RANGE_PARSERS,  # type: ignore
+284            TokenType.DAMP: binary_range_parser(exp.ArrayOverlaps),
+285            TokenType.AT_GT: binary_range_parser(exp.ArrayContains),
+286            TokenType.LT_AT: binary_range_parser(exp.ArrayContained),
+287        }
+288
+289        def _parse_date_part(self) -> exp.Expression:
+290            part = self._parse_type()
+291            self._match(TokenType.COMMA)
+292            value = self._parse_bitwise()
+293
+294            if part and part.is_string:
+295                part = exp.Var(this=part.name)
+296
+297            return self.expression(exp.Extract, this=part, expression=value)
+298
+299    class Generator(generator.Generator):
+300        INTERVAL_ALLOWS_PLURAL_FORM = False
+301        LOCKING_READS_SUPPORTED = True
+302        JOIN_HINTS = False
+303        TABLE_HINTS = False
+304        PARAMETER_TOKEN = "$"
+305
+306        TYPE_MAPPING = {
+307            **generator.Generator.TYPE_MAPPING,  # type: ignore
+308            exp.DataType.Type.TINYINT: "SMALLINT",
+309            exp.DataType.Type.FLOAT: "REAL",
+310            exp.DataType.Type.DOUBLE: "DOUBLE PRECISION",
+311            exp.DataType.Type.BINARY: "BYTEA",
+312            exp.DataType.Type.VARBINARY: "BYTEA",
+313            exp.DataType.Type.DATETIME: "TIMESTAMP",
+314        }
+315
+316        TRANSFORMS = {
+317            **generator.Generator.TRANSFORMS,  # type: ignore
+318            exp.BitwiseXor: lambda self, e: self.binary(e, "#"),
+319            exp.ColumnDef: transforms.preprocess(
+320                [
+321                    _auto_increment_to_serial,
+322                    _serial_to_generated,
+323                ],
+324            ),
+325            exp.JSONExtract: arrow_json_extract_sql,
+326            exp.JSONExtractScalar: arrow_json_extract_scalar_sql,
+327            exp.JSONBExtract: lambda self, e: self.binary(e, "#>"),
+328            exp.JSONBExtractScalar: lambda self, e: self.binary(e, "#>>"),
+329            exp.JSONBContains: lambda self, e: self.binary(e, "?"),
+330            exp.Pow: lambda self, e: self.binary(e, "^"),
+331            exp.CurrentDate: no_paren_current_date_sql,
+332            exp.CurrentTimestamp: lambda *_: "CURRENT_TIMESTAMP",
+333            exp.DateAdd: _date_add_sql("+"),
+334            exp.DateStrToDate: datestrtodate_sql,
+335            exp.DateSub: _date_add_sql("-"),
+336            exp.DateDiff: _date_diff_sql,
+337            exp.LogicalOr: rename_func("BOOL_OR"),
+338            exp.LogicalAnd: rename_func("BOOL_AND"),
+339            exp.Max: max_or_greatest,
+340            exp.Min: min_or_least,
+341            exp.ArrayOverlaps: lambda self, e: self.binary(e, "&&"),
+342            exp.ArrayContains: lambda self, e: self.binary(e, "@>"),
+343            exp.ArrayContained: lambda self, e: self.binary(e, "<@"),
+344            exp.Merge: transforms.preprocess([transforms.remove_target_from_merge]),
+345            exp.RegexpLike: lambda self, e: self.binary(e, "~"),
+346            exp.RegexpILike: lambda self, e: self.binary(e, "~*"),
+347            exp.StrPosition: str_position_sql,
+348            exp.StrToTime: lambda self, e: f"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})",
+349            exp.Substring: _substring_sql,
+350            exp.TimestampTrunc: timestamptrunc_sql,
+351            exp.TimeStrToTime: lambda self, e: f"CAST({self.sql(e, 'this')} AS TIMESTAMP)",
+352            exp.TimeToStr: lambda self, e: f"TO_CHAR({self.sql(e, 'this')}, {self.format_time(e)})",
+353            exp.TableSample: no_tablesample_sql,
+354            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
+355            exp.Trim: trim_sql,
+356            exp.TryCast: no_trycast_sql,
+357            exp.UnixToTime: lambda self, e: f"TO_TIMESTAMP({self.sql(e, 'this')})",
+358            exp.DataType: _datatype_sql,
+359            exp.GroupConcat: _string_agg_sql,
+360            exp.Array: lambda self, e: f"{self.normalize_func('ARRAY')}({self.sql(e.expressions[0])})"
+361            if isinstance(seq_get(e.expressions, 0), exp.Select)
+362            else f"{self.normalize_func('ARRAY')}[{self.expressions(e, flat=True)}]",
+363        }
+364
+365        PROPERTIES_LOCATION = {
+366            **generator.Generator.PROPERTIES_LOCATION,  # type: ignore
+367            exp.TransientProperty: exp.Properties.Location.UNSUPPORTED,
+368            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
+369        }
 
@@ -686,46 +685,46 @@
-
213    class Tokenizer(tokens.Tokenizer):
-214        QUOTES = ["'", "$$"]
-215
-216        BIT_STRINGS = [("b'", "'"), ("B'", "'")]
-217        HEX_STRINGS = [("x'", "'"), ("X'", "'")]
-218        BYTE_STRINGS = [("e'", "'"), ("E'", "'")]
-219
-220        KEYWORDS = {
-221            **tokens.Tokenizer.KEYWORDS,
-222            "~~": TokenType.LIKE,
-223            "~~*": TokenType.ILIKE,
-224            "~*": TokenType.IRLIKE,
-225            "~": TokenType.RLIKE,
-226            "@>": TokenType.AT_GT,
-227            "<@": TokenType.LT_AT,
-228            "BEGIN": TokenType.COMMAND,
-229            "BEGIN TRANSACTION": TokenType.BEGIN,
-230            "BIGSERIAL": TokenType.BIGSERIAL,
-231            "CHARACTER VARYING": TokenType.VARCHAR,
-232            "DECLARE": TokenType.COMMAND,
-233            "DO": TokenType.COMMAND,
-234            "HSTORE": TokenType.HSTORE,
-235            "JSONB": TokenType.JSONB,
-236            "REFRESH": TokenType.COMMAND,
-237            "REINDEX": TokenType.COMMAND,
-238            "RESET": TokenType.COMMAND,
-239            "RETURNING": TokenType.RETURNING,
-240            "REVOKE": TokenType.COMMAND,
-241            "SERIAL": TokenType.SERIAL,
-242            "SMALLSERIAL": TokenType.SMALLSERIAL,
-243            "TEMP": TokenType.TEMPORARY,
-244            "CSTRING": TokenType.PSEUDO_TYPE,
-245        }
-246
-247        SINGLE_TOKENS = {
-248            **tokens.Tokenizer.SINGLE_TOKENS,
-249            "$": TokenType.PARAMETER,
-250        }
-251
-252        VAR_SINGLE_TOKENS = {"$"}
+            
212    class Tokenizer(tokens.Tokenizer):
+213        QUOTES = ["'", "$$"]
+214
+215        BIT_STRINGS = [("b'", "'"), ("B'", "'")]
+216        HEX_STRINGS = [("x'", "'"), ("X'", "'")]
+217        BYTE_STRINGS = [("e'", "'"), ("E'", "'")]
+218
+219        KEYWORDS = {
+220            **tokens.Tokenizer.KEYWORDS,
+221            "~~": TokenType.LIKE,
+222            "~~*": TokenType.ILIKE,
+223            "~*": TokenType.IRLIKE,
+224            "~": TokenType.RLIKE,
+225            "@>": TokenType.AT_GT,
+226            "<@": TokenType.LT_AT,
+227            "BEGIN": TokenType.COMMAND,
+228            "BEGIN TRANSACTION": TokenType.BEGIN,
+229            "BIGSERIAL": TokenType.BIGSERIAL,
+230            "CHARACTER VARYING": TokenType.VARCHAR,
+231            "DECLARE": TokenType.COMMAND,
+232            "DO": TokenType.COMMAND,
+233            "HSTORE": TokenType.HSTORE,
+234            "JSONB": TokenType.JSONB,
+235            "REFRESH": TokenType.COMMAND,
+236            "REINDEX": TokenType.COMMAND,
+237            "RESET": TokenType.COMMAND,
+238            "RETURNING": TokenType.RETURNING,
+239            "REVOKE": TokenType.COMMAND,
+240            "SERIAL": TokenType.SERIAL,
+241            "SMALLSERIAL": TokenType.SMALLSERIAL,
+242            "TEMP": TokenType.TEMPORARY,
+243            "CSTRING": TokenType.PSEUDO_TYPE,
+244        }
+245
+246        SINGLE_TOKENS = {
+247            **tokens.Tokenizer.SINGLE_TOKENS,
+248            "$": TokenType.PARAMETER,
+249        }
+250
+251        VAR_SINGLE_TOKENS = {"$"}
 
@@ -753,51 +752,51 @@
-
254    class Parser(parser.Parser):
-255        STRICT_CAST = False
-256
-257        FUNCTIONS = {
-258            **parser.Parser.FUNCTIONS,  # type: ignore
-259            "DATE_TRUNC": lambda args: exp.TimestampTrunc(
-260                this=seq_get(args, 1), unit=seq_get(args, 0)
-261            ),
-262            "GENERATE_SERIES": _generate_series,
-263            "NOW": exp.CurrentTimestamp.from_arg_list,
-264            "TO_CHAR": format_time_lambda(exp.TimeToStr, "postgres"),
-265            "TO_TIMESTAMP": _to_timestamp,
-266        }
-267
-268        FUNCTION_PARSERS = {
-269            **parser.Parser.FUNCTION_PARSERS,
-270            "DATE_PART": lambda self: self._parse_date_part(),
-271        }
-272
-273        BITWISE = {
-274            **parser.Parser.BITWISE,  # type: ignore
-275            TokenType.HASH: exp.BitwiseXor,
-276        }
-277
-278        FACTOR = {
-279            **parser.Parser.FACTOR,
-280            TokenType.CARET: exp.Pow,
-281        }
-282
-283        RANGE_PARSERS = {
-284            **parser.Parser.RANGE_PARSERS,  # type: ignore
-285            TokenType.DAMP: binary_range_parser(exp.ArrayOverlaps),
-286            TokenType.AT_GT: binary_range_parser(exp.ArrayContains),
-287            TokenType.LT_AT: binary_range_parser(exp.ArrayContained),
-288        }
-289
-290        def _parse_date_part(self) -> exp.Expression:
-291            part = self._parse_type()
-292            self._match(TokenType.COMMA)
-293            value = self._parse_bitwise()
-294
-295            if part and part.is_string:
-296                part = exp.Var(this=part.name)
-297
-298            return self.expression(exp.Extract, this=part, expression=value)
+            
253    class Parser(parser.Parser):
+254        STRICT_CAST = False
+255
+256        FUNCTIONS = {
+257            **parser.Parser.FUNCTIONS,  # type: ignore
+258            "DATE_TRUNC": lambda args: exp.TimestampTrunc(
+259                this=seq_get(args, 1), unit=seq_get(args, 0)
+260            ),
+261            "GENERATE_SERIES": _generate_series,
+262            "NOW": exp.CurrentTimestamp.from_arg_list,
+263            "TO_CHAR": format_time_lambda(exp.TimeToStr, "postgres"),
+264            "TO_TIMESTAMP": _to_timestamp,
+265        }
+266
+267        FUNCTION_PARSERS = {
+268            **parser.Parser.FUNCTION_PARSERS,
+269            "DATE_PART": lambda self: self._parse_date_part(),
+270        }
+271
+272        BITWISE = {
+273            **parser.Parser.BITWISE,  # type: ignore
+274            TokenType.HASH: exp.BitwiseXor,
+275        }
+276
+277        FACTOR = {
+278            **parser.Parser.FACTOR,
+279            TokenType.CARET: exp.Pow,
+280        }
+281
+282        RANGE_PARSERS = {
+283            **parser.Parser.RANGE_PARSERS,  # type: ignore
+284            TokenType.DAMP: binary_range_parser(exp.ArrayOverlaps),
+285            TokenType.AT_GT: binary_range_parser(exp.ArrayContains),
+286            TokenType.LT_AT: binary_range_parser(exp.ArrayContained),
+287        }
+288
+289        def _parse_date_part(self) -> exp.Expression:
+290            part = self._parse_type()
+291            self._match(TokenType.COMMA)
+292            value = self._parse_bitwise()
+293
+294            if part and part.is_string:
+295                part = exp.Var(this=part.name)
+296
+297            return self.expression(exp.Extract, this=part, expression=value)
 
@@ -854,77 +853,77 @@ Default: "nulls_are_small"
-
300    class Generator(generator.Generator):
-301        INTERVAL_ALLOWS_PLURAL_FORM = False
-302        LOCKING_READS_SUPPORTED = True
-303        JOIN_HINTS = False
-304        TABLE_HINTS = False
-305        PARAMETER_TOKEN = "$"
-306
-307        TYPE_MAPPING = {
-308            **generator.Generator.TYPE_MAPPING,  # type: ignore
-309            exp.DataType.Type.TINYINT: "SMALLINT",
-310            exp.DataType.Type.FLOAT: "REAL",
-311            exp.DataType.Type.DOUBLE: "DOUBLE PRECISION",
-312            exp.DataType.Type.BINARY: "BYTEA",
-313            exp.DataType.Type.VARBINARY: "BYTEA",
-314            exp.DataType.Type.DATETIME: "TIMESTAMP",
-315        }
-316
-317        TRANSFORMS = {
-318            **generator.Generator.TRANSFORMS,  # type: ignore
-319            exp.BitwiseXor: lambda self, e: self.binary(e, "#"),
-320            exp.ColumnDef: preprocess(
-321                [
-322                    _auto_increment_to_serial,
-323                    _serial_to_generated,
-324                ],
-325            ),
-326            exp.JSONExtract: arrow_json_extract_sql,
-327            exp.JSONExtractScalar: arrow_json_extract_scalar_sql,
-328            exp.JSONBExtract: lambda self, e: self.binary(e, "#>"),
-329            exp.JSONBExtractScalar: lambda self, e: self.binary(e, "#>>"),
-330            exp.JSONBContains: lambda self, e: self.binary(e, "?"),
-331            exp.Pow: lambda self, e: self.binary(e, "^"),
-332            exp.CurrentDate: no_paren_current_date_sql,
-333            exp.CurrentTimestamp: lambda *_: "CURRENT_TIMESTAMP",
-334            exp.DateAdd: _date_add_sql("+"),
-335            exp.DateStrToDate: datestrtodate_sql,
-336            exp.DateSub: _date_add_sql("-"),
-337            exp.DateDiff: _date_diff_sql,
-338            exp.LogicalOr: rename_func("BOOL_OR"),
-339            exp.LogicalAnd: rename_func("BOOL_AND"),
-340            exp.Max: max_or_greatest,
-341            exp.Min: min_or_least,
-342            exp.ArrayOverlaps: lambda self, e: self.binary(e, "&&"),
-343            exp.ArrayContains: lambda self, e: self.binary(e, "@>"),
-344            exp.ArrayContained: lambda self, e: self.binary(e, "<@"),
-345            exp.Merge: preprocess([remove_target_from_merge]),
-346            exp.RegexpLike: lambda self, e: self.binary(e, "~"),
-347            exp.RegexpILike: lambda self, e: self.binary(e, "~*"),
-348            exp.StrPosition: str_position_sql,
-349            exp.StrToTime: lambda self, e: f"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})",
-350            exp.Substring: _substring_sql,
-351            exp.TimestampTrunc: timestamptrunc_sql,
-352            exp.TimeStrToTime: lambda self, e: f"CAST({self.sql(e, 'this')} AS TIMESTAMP)",
-353            exp.TimeToStr: lambda self, e: f"TO_CHAR({self.sql(e, 'this')}, {self.format_time(e)})",
-354            exp.TableSample: no_tablesample_sql,
-355            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
-356            exp.Trim: trim_sql,
-357            exp.TryCast: no_trycast_sql,
-358            exp.UnixToTime: lambda self, e: f"TO_TIMESTAMP({self.sql(e, 'this')})",
-359            exp.DataType: _datatype_sql,
-360            exp.GroupConcat: _string_agg_sql,
-361            exp.Array: lambda self, e: f"{self.normalize_func('ARRAY')}({self.sql(e.expressions[0])})"
-362            if isinstance(seq_get(e.expressions, 0), exp.Select)
-363            else f"{self.normalize_func('ARRAY')}[{self.expressions(e, flat=True)}]",
-364        }
-365
-366        PROPERTIES_LOCATION = {
-367            **generator.Generator.PROPERTIES_LOCATION,  # type: ignore
-368            exp.TransientProperty: exp.Properties.Location.UNSUPPORTED,
-369            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
-370        }
+            
299    class Generator(generator.Generator):
+300        INTERVAL_ALLOWS_PLURAL_FORM = False
+301        LOCKING_READS_SUPPORTED = True
+302        JOIN_HINTS = False
+303        TABLE_HINTS = False
+304        PARAMETER_TOKEN = "$"
+305
+306        TYPE_MAPPING = {
+307            **generator.Generator.TYPE_MAPPING,  # type: ignore
+308            exp.DataType.Type.TINYINT: "SMALLINT",
+309            exp.DataType.Type.FLOAT: "REAL",
+310            exp.DataType.Type.DOUBLE: "DOUBLE PRECISION",
+311            exp.DataType.Type.BINARY: "BYTEA",
+312            exp.DataType.Type.VARBINARY: "BYTEA",
+313            exp.DataType.Type.DATETIME: "TIMESTAMP",
+314        }
+315
+316        TRANSFORMS = {
+317            **generator.Generator.TRANSFORMS,  # type: ignore
+318            exp.BitwiseXor: lambda self, e: self.binary(e, "#"),
+319            exp.ColumnDef: transforms.preprocess(
+320                [
+321                    _auto_increment_to_serial,
+322                    _serial_to_generated,
+323                ],
+324            ),
+325            exp.JSONExtract: arrow_json_extract_sql,
+326            exp.JSONExtractScalar: arrow_json_extract_scalar_sql,
+327            exp.JSONBExtract: lambda self, e: self.binary(e, "#>"),
+328            exp.JSONBExtractScalar: lambda self, e: self.binary(e, "#>>"),
+329            exp.JSONBContains: lambda self, e: self.binary(e, "?"),
+330            exp.Pow: lambda self, e: self.binary(e, "^"),
+331            exp.CurrentDate: no_paren_current_date_sql,
+332            exp.CurrentTimestamp: lambda *_: "CURRENT_TIMESTAMP",
+333            exp.DateAdd: _date_add_sql("+"),
+334            exp.DateStrToDate: datestrtodate_sql,
+335            exp.DateSub: _date_add_sql("-"),
+336            exp.DateDiff: _date_diff_sql,
+337            exp.LogicalOr: rename_func("BOOL_OR"),
+338            exp.LogicalAnd: rename_func("BOOL_AND"),
+339            exp.Max: max_or_greatest,
+340            exp.Min: min_or_least,
+341            exp.ArrayOverlaps: lambda self, e: self.binary(e, "&&"),
+342            exp.ArrayContains: lambda self, e: self.binary(e, "@>"),
+343            exp.ArrayContained: lambda self, e: self.binary(e, "<@"),
+344            exp.Merge: transforms.preprocess([transforms.remove_target_from_merge]),
+345            exp.RegexpLike: lambda self, e: self.binary(e, "~"),
+346            exp.RegexpILike: lambda self, e: self.binary(e, "~*"),
+347            exp.StrPosition: str_position_sql,
+348            exp.StrToTime: lambda self, e: f"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})",
+349            exp.Substring: _substring_sql,
+350            exp.TimestampTrunc: timestamptrunc_sql,
+351            exp.TimeStrToTime: lambda self, e: f"CAST({self.sql(e, 'this')} AS TIMESTAMP)",
+352            exp.TimeToStr: lambda self, e: f"TO_CHAR({self.sql(e, 'this')}, {self.format_time(e)})",
+353            exp.TableSample: no_tablesample_sql,
+354            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
+355            exp.Trim: trim_sql,
+356            exp.TryCast: no_trycast_sql,
+357            exp.UnixToTime: lambda self, e: f"TO_TIMESTAMP({self.sql(e, 'this')})",
+358            exp.DataType: _datatype_sql,
+359            exp.GroupConcat: _string_agg_sql,
+360            exp.Array: lambda self, e: f"{self.normalize_func('ARRAY')}({self.sql(e.expressions[0])})"
+361            if isinstance(seq_get(e.expressions, 0), exp.Select)
+362            else f"{self.normalize_func('ARRAY')}[{self.expressions(e, flat=True)}]",
+363        }
+364
+365        PROPERTIES_LOCATION = {
+366            **generator.Generator.PROPERTIES_LOCATION,  # type: ignore
+367            exp.TransientProperty: exp.Properties.Location.UNSUPPORTED,
+368            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
+369        }
 
@@ -1096,7 +1095,7 @@ Default: True
where_sql
window_sql
partition_by_sql
-
window_spec_sql
+
windowspec_sql
withingroup_sql
between_sql
bracket_sql
@@ -1105,6 +1104,7 @@ Default: True
exists_sql
case_sql
constraint_sql
+
nextvaluefor_sql
extract_sql
trim_sql
concat_sql
-- cgit v1.2.3