From 7457677bc603569692329e39a59ccb018306e2a6 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Sun, 12 Mar 2023 11:17:16 +0100 Subject: Merging upstream version 11.3.6. Signed-off-by: Daniel Baumann --- docs/sqlglot/dialects/redshift.html | 740 ++++++++++++++++++------------------ 1 file changed, 373 insertions(+), 367 deletions(-) (limited to 'docs/sqlglot/dialects/redshift.html') diff --git a/docs/sqlglot/dialects/redshift.html b/docs/sqlglot/dialects/redshift.html index 790d909..7e69e65 100644 --- a/docs/sqlglot/dialects/redshift.html +++ b/docs/sqlglot/dialects/redshift.html @@ -120,113 +120,115 @@ 29 "NVL": exp.Coalesce.from_arg_list, 30 } 31 - 32 def _parse_types(self, check_func: bool = False) -> t.Optional[exp.Expression]: - 33 this = super()._parse_types(check_func=check_func) - 34 - 35 if ( - 36 isinstance(this, exp.DataType) - 37 and this.this == exp.DataType.Type.VARCHAR - 38 and this.expressions - 39 and this.expressions[0] == exp.column("MAX") - 40 ): - 41 this.set("expressions", [exp.Var(this="MAX")]) - 42 - 43 return this + 32 CONVERT_TYPE_FIRST = True + 33 + 34 def _parse_types(self, check_func: bool = False) -> t.Optional[exp.Expression]: + 35 this = super()._parse_types(check_func=check_func) + 36 + 37 if ( + 38 isinstance(this, exp.DataType) + 39 and this.this == exp.DataType.Type.VARCHAR + 40 and this.expressions + 41 and this.expressions[0] == exp.column("MAX") + 42 ): + 43 this.set("expressions", [exp.Var(this="MAX")]) 44 - 45 class Tokenizer(Postgres.Tokenizer): - 46 STRING_ESCAPES = ["\\"] - 47 - 48 KEYWORDS = { - 49 **Postgres.Tokenizer.KEYWORDS, # type: ignore - 50 "GEOMETRY": TokenType.GEOMETRY, - 51 "GEOGRAPHY": TokenType.GEOGRAPHY, - 52 "HLLSKETCH": TokenType.HLLSKETCH, - 53 "SUPER": TokenType.SUPER, - 54 "TIME": TokenType.TIMESTAMP, - 55 "TIMETZ": TokenType.TIMESTAMPTZ, - 56 "TOP": TokenType.TOP, - 57 "UNLOAD": TokenType.COMMAND, - 58 "VARBYTE": TokenType.VARBINARY, - 59 } - 60 - 61 class Generator(Postgres.Generator): - 62 TYPE_MAPPING = { - 63 **Postgres.Generator.TYPE_MAPPING, # type: ignore - 64 exp.DataType.Type.BINARY: "VARBYTE", - 65 exp.DataType.Type.VARBINARY: "VARBYTE", - 66 exp.DataType.Type.INT: "INTEGER", - 67 } - 68 - 69 PROPERTIES_LOCATION = { - 70 **Postgres.Generator.PROPERTIES_LOCATION, # type: ignore - 71 exp.LikeProperty: exp.Properties.Location.POST_WITH, - 72 } - 73 - 74 TRANSFORMS = { - 75 **Postgres.Generator.TRANSFORMS, # type: ignore - 76 **transforms.ELIMINATE_DISTINCT_ON, # type: ignore - 77 exp.DateDiff: lambda self, e: self.func( - 78 "DATEDIFF", e.args.get("unit") or "day", e.expression, e.this - 79 ), - 80 exp.DistKeyProperty: lambda self, e: f"DISTKEY({e.name})", - 81 exp.SortKeyProperty: lambda self, e: f"{'COMPOUND ' if e.args['compound'] else ''}SORTKEY({self.format_args(*e.this)})", - 82 exp.DistStyleProperty: lambda self, e: self.naked_property(e), - 83 exp.Matches: rename_func("DECODE"), - 84 } - 85 - 86 def values_sql(self, expression: exp.Values) -> str: - 87 """ - 88 Converts `VALUES...` expression into a series of unions. - 89 - 90 Note: If you have a lot of unions then this will result in a large number of recursive statements to - 91 evaluate the expression. You may need to increase `sys.setrecursionlimit` to run and it can also be - 92 very slow. - 93 """ - 94 if not isinstance(expression.unnest().parent, exp.From): - 95 return super().values_sql(expression) - 96 rows = [tuple_exp.expressions for tuple_exp in expression.expressions] - 97 selects = [] - 98 for i, row in enumerate(rows): - 99 if i == 0 and expression.alias: -100 row = [ -101 exp.alias_(value, column_name) -102 for value, column_name in zip(row, expression.args["alias"].args["columns"]) -103 ] -104 selects.append(exp.Select(expressions=row)) -105 subquery_expression = selects[0] -106 if len(selects) > 1: -107 for select in selects[1:]: -108 subquery_expression = exp.union(subquery_expression, select, distinct=False) -109 return self.subquery_sql(subquery_expression.subquery(expression.alias)) -110 -111 def with_properties(self, properties: exp.Properties) -> str: -112 """Redshift doesn't have `WITH` as part of their with_properties so we remove it""" -113 return self.properties(properties, prefix=" ", suffix="") -114 -115 def renametable_sql(self, expression: exp.RenameTable) -> str: -116 """Redshift only supports defining the table name itself (not the db) when renaming tables""" -117 expression = expression.copy() -118 target_table = expression.this -119 for arg in target_table.args: -120 if arg != "this": -121 target_table.set(arg, None) -122 this = self.sql(expression, "this") -123 return f"RENAME TO {this}" -124 -125 def datatype_sql(self, expression: exp.DataType) -> str: -126 """ -127 Redshift converts the `TEXT` data type to `VARCHAR(255)` by default when people more generally mean -128 VARCHAR of max length which is `VARCHAR(max)` in Redshift. Therefore if we get a `TEXT` data type -129 without precision we convert it to `VARCHAR(max)` and if it does have precision then we just convert -130 `TEXT` to `VARCHAR`. -131 """ -132 if expression.this == exp.DataType.Type.TEXT: -133 expression = expression.copy() -134 expression.set("this", exp.DataType.Type.VARCHAR) -135 precision = expression.args.get("expressions") -136 if not precision: -137 expression.append("expressions", exp.Var(this="MAX")) -138 return super().datatype_sql(expression) + 45 return this + 46 + 47 class Tokenizer(Postgres.Tokenizer): + 48 STRING_ESCAPES = ["\\"] + 49 + 50 KEYWORDS = { + 51 **Postgres.Tokenizer.KEYWORDS, # type: ignore + 52 "GEOMETRY": TokenType.GEOMETRY, + 53 "GEOGRAPHY": TokenType.GEOGRAPHY, + 54 "HLLSKETCH": TokenType.HLLSKETCH, + 55 "SUPER": TokenType.SUPER, + 56 "TIME": TokenType.TIMESTAMP, + 57 "TIMETZ": TokenType.TIMESTAMPTZ, + 58 "TOP": TokenType.TOP, + 59 "UNLOAD": TokenType.COMMAND, + 60 "VARBYTE": TokenType.VARBINARY, + 61 } + 62 + 63 class Generator(Postgres.Generator): + 64 TYPE_MAPPING = { + 65 **Postgres.Generator.TYPE_MAPPING, # type: ignore + 66 exp.DataType.Type.BINARY: "VARBYTE", + 67 exp.DataType.Type.VARBINARY: "VARBYTE", + 68 exp.DataType.Type.INT: "INTEGER", + 69 } + 70 + 71 PROPERTIES_LOCATION = { + 72 **Postgres.Generator.PROPERTIES_LOCATION, # type: ignore + 73 exp.LikeProperty: exp.Properties.Location.POST_WITH, + 74 } + 75 + 76 TRANSFORMS = { + 77 **Postgres.Generator.TRANSFORMS, # type: ignore + 78 **transforms.ELIMINATE_DISTINCT_ON, # type: ignore + 79 exp.DateDiff: lambda self, e: self.func( + 80 "DATEDIFF", e.args.get("unit") or "day", e.expression, e.this + 81 ), + 82 exp.DistKeyProperty: lambda self, e: f"DISTKEY({e.name})", + 83 exp.SortKeyProperty: lambda self, e: f"{'COMPOUND ' if e.args['compound'] else ''}SORTKEY({self.format_args(*e.this)})", + 84 exp.DistStyleProperty: lambda self, e: self.naked_property(e), + 85 exp.Matches: rename_func("DECODE"), + 86 } + 87 + 88 def values_sql(self, expression: exp.Values) -> str: + 89 """ + 90 Converts `VALUES...` expression into a series of unions. + 91 + 92 Note: If you have a lot of unions then this will result in a large number of recursive statements to + 93 evaluate the expression. You may need to increase `sys.setrecursionlimit` to run and it can also be + 94 very slow. + 95 """ + 96 if not isinstance(expression.unnest().parent, exp.From): + 97 return super().values_sql(expression) + 98 rows = [tuple_exp.expressions for tuple_exp in expression.expressions] + 99 selects = [] +100 for i, row in enumerate(rows): +101 if i == 0 and expression.alias: +102 row = [ +103 exp.alias_(value, column_name) +104 for value, column_name in zip(row, expression.args["alias"].args["columns"]) +105 ] +106 selects.append(exp.Select(expressions=row)) +107 subquery_expression = selects[0] +108 if len(selects) > 1: +109 for select in selects[1:]: +110 subquery_expression = exp.union(subquery_expression, select, distinct=False) +111 return self.subquery_sql(subquery_expression.subquery(expression.alias)) +112 +113 def with_properties(self, properties: exp.Properties) -> str: +114 """Redshift doesn't have `WITH` as part of their with_properties so we remove it""" +115 return self.properties(properties, prefix=" ", suffix="") +116 +117 def renametable_sql(self, expression: exp.RenameTable) -> str: +118 """Redshift only supports defining the table name itself (not the db) when renaming tables""" +119 expression = expression.copy() +120 target_table = expression.this +121 for arg in target_table.args: +122 if arg != "this": +123 target_table.set(arg, None) +124 this = self.sql(expression, "this") +125 return f"RENAME TO {this}" +126 +127 def datatype_sql(self, expression: exp.DataType) -> str: +128 """ +129 Redshift converts the `TEXT` data type to `VARCHAR(255)` by default when people more generally mean +130 VARCHAR of max length which is `VARCHAR(max)` in Redshift. Therefore if we get a `TEXT` data type +131 without precision we convert it to `VARCHAR(max)` and if it does have precision then we just convert +132 `TEXT` to `VARCHAR`. +133 """ +134 if expression.this == exp.DataType.Type.TEXT: +135 expression = expression.copy() +136 expression.set("this", exp.DataType.Type.VARCHAR) +137 precision = expression.args.get("expressions") +138 if not precision: +139 expression.append("expressions", exp.Var(this="MAX")) +140 return super().datatype_sql(expression) @@ -262,113 +264,115 @@ 30 "NVL": exp.Coalesce.from_arg_list, 31 } 32 - 33 def _parse_types(self, check_func: bool = False) -> t.Optional[exp.Expression]: - 34 this = super()._parse_types(check_func=check_func) - 35 - 36 if ( - 37 isinstance(this, exp.DataType) - 38 and this.this == exp.DataType.Type.VARCHAR - 39 and this.expressions - 40 and this.expressions[0] == exp.column("MAX") - 41 ): - 42 this.set("expressions", [exp.Var(this="MAX")]) - 43 - 44 return this + 33 CONVERT_TYPE_FIRST = True + 34 + 35 def _parse_types(self, check_func: bool = False) -> t.Optional[exp.Expression]: + 36 this = super()._parse_types(check_func=check_func) + 37 + 38 if ( + 39 isinstance(this, exp.DataType) + 40 and this.this == exp.DataType.Type.VARCHAR + 41 and this.expressions + 42 and this.expressions[0] == exp.column("MAX") + 43 ): + 44 this.set("expressions", [exp.Var(this="MAX")]) 45 - 46 class Tokenizer(Postgres.Tokenizer): - 47 STRING_ESCAPES = ["\\"] - 48 - 49 KEYWORDS = { - 50 **Postgres.Tokenizer.KEYWORDS, # type: ignore - 51 "GEOMETRY": TokenType.GEOMETRY, - 52 "GEOGRAPHY": TokenType.GEOGRAPHY, - 53 "HLLSKETCH": TokenType.HLLSKETCH, - 54 "SUPER": TokenType.SUPER, - 55 "TIME": TokenType.TIMESTAMP, - 56 "TIMETZ": TokenType.TIMESTAMPTZ, - 57 "TOP": TokenType.TOP, - 58 "UNLOAD": TokenType.COMMAND, - 59 "VARBYTE": TokenType.VARBINARY, - 60 } - 61 - 62 class Generator(Postgres.Generator): - 63 TYPE_MAPPING = { - 64 **Postgres.Generator.TYPE_MAPPING, # type: ignore - 65 exp.DataType.Type.BINARY: "VARBYTE", - 66 exp.DataType.Type.VARBINARY: "VARBYTE", - 67 exp.DataType.Type.INT: "INTEGER", - 68 } - 69 - 70 PROPERTIES_LOCATION = { - 71 **Postgres.Generator.PROPERTIES_LOCATION, # type: ignore - 72 exp.LikeProperty: exp.Properties.Location.POST_WITH, - 73 } - 74 - 75 TRANSFORMS = { - 76 **Postgres.Generator.TRANSFORMS, # type: ignore - 77 **transforms.ELIMINATE_DISTINCT_ON, # type: ignore - 78 exp.DateDiff: lambda self, e: self.func( - 79 "DATEDIFF", e.args.get("unit") or "day", e.expression, e.this - 80 ), - 81 exp.DistKeyProperty: lambda self, e: f"DISTKEY({e.name})", - 82 exp.SortKeyProperty: lambda self, e: f"{'COMPOUND ' if e.args['compound'] else ''}SORTKEY({self.format_args(*e.this)})", - 83 exp.DistStyleProperty: lambda self, e: self.naked_property(e), - 84 exp.Matches: rename_func("DECODE"), - 85 } - 86 - 87 def values_sql(self, expression: exp.Values) -> str: - 88 """ - 89 Converts `VALUES...` expression into a series of unions. - 90 - 91 Note: If you have a lot of unions then this will result in a large number of recursive statements to - 92 evaluate the expression. You may need to increase `sys.setrecursionlimit` to run and it can also be - 93 very slow. - 94 """ - 95 if not isinstance(expression.unnest().parent, exp.From): - 96 return super().values_sql(expression) - 97 rows = [tuple_exp.expressions for tuple_exp in expression.expressions] - 98 selects = [] - 99 for i, row in enumerate(rows): -100 if i == 0 and expression.alias: -101 row = [ -102 exp.alias_(value, column_name) -103 for value, column_name in zip(row, expression.args["alias"].args["columns"]) -104 ] -105 selects.append(exp.Select(expressions=row)) -106 subquery_expression = selects[0] -107 if len(selects) > 1: -108 for select in selects[1:]: -109 subquery_expression = exp.union(subquery_expression, select, distinct=False) -110 return self.subquery_sql(subquery_expression.subquery(expression.alias)) -111 -112 def with_properties(self, properties: exp.Properties) -> str: -113 """Redshift doesn't have `WITH` as part of their with_properties so we remove it""" -114 return self.properties(properties, prefix=" ", suffix="") -115 -116 def renametable_sql(self, expression: exp.RenameTable) -> str: -117 """Redshift only supports defining the table name itself (not the db) when renaming tables""" -118 expression = expression.copy() -119 target_table = expression.this -120 for arg in target_table.args: -121 if arg != "this": -122 target_table.set(arg, None) -123 this = self.sql(expression, "this") -124 return f"RENAME TO {this}" -125 -126 def datatype_sql(self, expression: exp.DataType) -> str: -127 """ -128 Redshift converts the `TEXT` data type to `VARCHAR(255)` by default when people more generally mean -129 VARCHAR of max length which is `VARCHAR(max)` in Redshift. Therefore if we get a `TEXT` data type -130 without precision we convert it to `VARCHAR(max)` and if it does have precision then we just convert -131 `TEXT` to `VARCHAR`. -132 """ -133 if expression.this == exp.DataType.Type.TEXT: -134 expression = expression.copy() -135 expression.set("this", exp.DataType.Type.VARCHAR) -136 precision = expression.args.get("expressions") -137 if not precision: -138 expression.append("expressions", exp.Var(this="MAX")) -139 return super().datatype_sql(expression) + 46 return this + 47 + 48 class Tokenizer(Postgres.Tokenizer): + 49 STRING_ESCAPES = ["\\"] + 50 + 51 KEYWORDS = { + 52 **Postgres.Tokenizer.KEYWORDS, # type: ignore + 53 "GEOMETRY": TokenType.GEOMETRY, + 54 "GEOGRAPHY": TokenType.GEOGRAPHY, + 55 "HLLSKETCH": TokenType.HLLSKETCH, + 56 "SUPER": TokenType.SUPER, + 57 "TIME": TokenType.TIMESTAMP, + 58 "TIMETZ": TokenType.TIMESTAMPTZ, + 59 "TOP": TokenType.TOP, + 60 "UNLOAD": TokenType.COMMAND, + 61 "VARBYTE": TokenType.VARBINARY, + 62 } + 63 + 64 class Generator(Postgres.Generator): + 65 TYPE_MAPPING = { + 66 **Postgres.Generator.TYPE_MAPPING, # type: ignore + 67 exp.DataType.Type.BINARY: "VARBYTE", + 68 exp.DataType.Type.VARBINARY: "VARBYTE", + 69 exp.DataType.Type.INT: "INTEGER", + 70 } + 71 + 72 PROPERTIES_LOCATION = { + 73 **Postgres.Generator.PROPERTIES_LOCATION, # type: ignore + 74 exp.LikeProperty: exp.Properties.Location.POST_WITH, + 75 } + 76 + 77 TRANSFORMS = { + 78 **Postgres.Generator.TRANSFORMS, # type: ignore + 79 **transforms.ELIMINATE_DISTINCT_ON, # type: ignore + 80 exp.DateDiff: lambda self, e: self.func( + 81 "DATEDIFF", e.args.get("unit") or "day", e.expression, e.this + 82 ), + 83 exp.DistKeyProperty: lambda self, e: f"DISTKEY({e.name})", + 84 exp.SortKeyProperty: lambda self, e: f"{'COMPOUND ' if e.args['compound'] else ''}SORTKEY({self.format_args(*e.this)})", + 85 exp.DistStyleProperty: lambda self, e: self.naked_property(e), + 86 exp.Matches: rename_func("DECODE"), + 87 } + 88 + 89 def values_sql(self, expression: exp.Values) -> str: + 90 """ + 91 Converts `VALUES...` expression into a series of unions. + 92 + 93 Note: If you have a lot of unions then this will result in a large number of recursive statements to + 94 evaluate the expression. You may need to increase `sys.setrecursionlimit` to run and it can also be + 95 very slow. + 96 """ + 97 if not isinstance(expression.unnest().parent, exp.From): + 98 return super().values_sql(expression) + 99 rows = [tuple_exp.expressions for tuple_exp in expression.expressions] +100 selects = [] +101 for i, row in enumerate(rows): +102 if i == 0 and expression.alias: +103 row = [ +104 exp.alias_(value, column_name) +105 for value, column_name in zip(row, expression.args["alias"].args["columns"]) +106 ] +107 selects.append(exp.Select(expressions=row)) +108 subquery_expression = selects[0] +109 if len(selects) > 1: +110 for select in selects[1:]: +111 subquery_expression = exp.union(subquery_expression, select, distinct=False) +112 return self.subquery_sql(subquery_expression.subquery(expression.alias)) +113 +114 def with_properties(self, properties: exp.Properties) -> str: +115 """Redshift doesn't have `WITH` as part of their with_properties so we remove it""" +116 return self.properties(properties, prefix=" ", suffix="") +117 +118 def renametable_sql(self, expression: exp.RenameTable) -> str: +119 """Redshift only supports defining the table name itself (not the db) when renaming tables""" +120 expression = expression.copy() +121 target_table = expression.this +122 for arg in target_table.args: +123 if arg != "this": +124 target_table.set(arg, None) +125 this = self.sql(expression, "this") +126 return f"RENAME TO {this}" +127 +128 def datatype_sql(self, expression: exp.DataType) -> str: +129 """ +130 Redshift converts the `TEXT` data type to `VARCHAR(255)` by default when people more generally mean +131 VARCHAR of max length which is `VARCHAR(max)` in Redshift. Therefore if we get a `TEXT` data type +132 without precision we convert it to `VARCHAR(max)` and if it does have precision then we just convert +133 `TEXT` to `VARCHAR`. +134 """ +135 if expression.this == exp.DataType.Type.TEXT: +136 expression = expression.copy() +137 expression.set("this", exp.DataType.Type.VARCHAR) +138 precision = expression.args.get("expressions") +139 if not precision: +140 expression.append("expressions", exp.Var(this="MAX")) +141 return super().datatype_sql(expression) @@ -414,18 +418,20 @@ 30 "NVL": exp.Coalesce.from_arg_list, 31 } 32 -33 def _parse_types(self, check_func: bool = False) -> t.Optional[exp.Expression]: -34 this = super()._parse_types(check_func=check_func) -35 -36 if ( -37 isinstance(this, exp.DataType) -38 and this.this == exp.DataType.Type.VARCHAR -39 and this.expressions -40 and this.expressions[0] == exp.column("MAX") -41 ): -42 this.set("expressions", [exp.Var(this="MAX")]) -43 -44 return this +33 CONVERT_TYPE_FIRST = True +34 +35 def _parse_types(self, check_func: bool = False) -> t.Optional[exp.Expression]: +36 this = super()._parse_types(check_func=check_func) +37 +38 if ( +39 isinstance(this, exp.DataType) +40 and this.this == exp.DataType.Type.VARCHAR +41 and this.expressions +42 and this.expressions[0] == exp.column("MAX") +43 ): +44 this.set("expressions", [exp.Var(this="MAX")]) +45 +46 return this @@ -482,21 +488,21 @@ Default: "nulls_are_small" -
46    class Tokenizer(Postgres.Tokenizer):
-47        STRING_ESCAPES = ["\\"]
-48
-49        KEYWORDS = {
-50            **Postgres.Tokenizer.KEYWORDS,  # type: ignore
-51            "GEOMETRY": TokenType.GEOMETRY,
-52            "GEOGRAPHY": TokenType.GEOGRAPHY,
-53            "HLLSKETCH": TokenType.HLLSKETCH,
-54            "SUPER": TokenType.SUPER,
-55            "TIME": TokenType.TIMESTAMP,
-56            "TIMETZ": TokenType.TIMESTAMPTZ,
-57            "TOP": TokenType.TOP,
-58            "UNLOAD": TokenType.COMMAND,
-59            "VARBYTE": TokenType.VARBINARY,
-60        }
+            
48    class Tokenizer(Postgres.Tokenizer):
+49        STRING_ESCAPES = ["\\"]
+50
+51        KEYWORDS = {
+52            **Postgres.Tokenizer.KEYWORDS,  # type: ignore
+53            "GEOMETRY": TokenType.GEOMETRY,
+54            "GEOGRAPHY": TokenType.GEOGRAPHY,
+55            "HLLSKETCH": TokenType.HLLSKETCH,
+56            "SUPER": TokenType.SUPER,
+57            "TIME": TokenType.TIMESTAMP,
+58            "TIMETZ": TokenType.TIMESTAMPTZ,
+59            "TOP": TokenType.TOP,
+60            "UNLOAD": TokenType.COMMAND,
+61            "VARBYTE": TokenType.VARBINARY,
+62        }
 
@@ -524,84 +530,84 @@ Default: "nulls_are_small"
-
 62    class Generator(Postgres.Generator):
- 63        TYPE_MAPPING = {
- 64            **Postgres.Generator.TYPE_MAPPING,  # type: ignore
- 65            exp.DataType.Type.BINARY: "VARBYTE",
- 66            exp.DataType.Type.VARBINARY: "VARBYTE",
- 67            exp.DataType.Type.INT: "INTEGER",
- 68        }
- 69
- 70        PROPERTIES_LOCATION = {
- 71            **Postgres.Generator.PROPERTIES_LOCATION,  # type: ignore
- 72            exp.LikeProperty: exp.Properties.Location.POST_WITH,
- 73        }
- 74
- 75        TRANSFORMS = {
- 76            **Postgres.Generator.TRANSFORMS,  # type: ignore
- 77            **transforms.ELIMINATE_DISTINCT_ON,  # type: ignore
- 78            exp.DateDiff: lambda self, e: self.func(
- 79                "DATEDIFF", e.args.get("unit") or "day", e.expression, e.this
- 80            ),
- 81            exp.DistKeyProperty: lambda self, e: f"DISTKEY({e.name})",
- 82            exp.SortKeyProperty: lambda self, e: f"{'COMPOUND ' if e.args['compound'] else ''}SORTKEY({self.format_args(*e.this)})",
- 83            exp.DistStyleProperty: lambda self, e: self.naked_property(e),
- 84            exp.Matches: rename_func("DECODE"),
- 85        }
- 86
- 87        def values_sql(self, expression: exp.Values) -> str:
- 88            """
- 89            Converts `VALUES...` expression into a series of unions.
- 90
- 91            Note: If you have a lot of unions then this will result in a large number of recursive statements to
- 92            evaluate the expression. You may need to increase `sys.setrecursionlimit` to run and it can also be
- 93            very slow.
- 94            """
- 95            if not isinstance(expression.unnest().parent, exp.From):
- 96                return super().values_sql(expression)
- 97            rows = [tuple_exp.expressions for tuple_exp in expression.expressions]
- 98            selects = []
- 99            for i, row in enumerate(rows):
-100                if i == 0 and expression.alias:
-101                    row = [
-102                        exp.alias_(value, column_name)
-103                        for value, column_name in zip(row, expression.args["alias"].args["columns"])
-104                    ]
-105                selects.append(exp.Select(expressions=row))
-106            subquery_expression = selects[0]
-107            if len(selects) > 1:
-108                for select in selects[1:]:
-109                    subquery_expression = exp.union(subquery_expression, select, distinct=False)
-110            return self.subquery_sql(subquery_expression.subquery(expression.alias))
-111
-112        def with_properties(self, properties: exp.Properties) -> str:
-113            """Redshift doesn't have `WITH` as part of their with_properties so we remove it"""
-114            return self.properties(properties, prefix=" ", suffix="")
-115
-116        def renametable_sql(self, expression: exp.RenameTable) -> str:
-117            """Redshift only supports defining the table name itself (not the db) when renaming tables"""
-118            expression = expression.copy()
-119            target_table = expression.this
-120            for arg in target_table.args:
-121                if arg != "this":
-122                    target_table.set(arg, None)
-123            this = self.sql(expression, "this")
-124            return f"RENAME TO {this}"
-125
-126        def datatype_sql(self, expression: exp.DataType) -> str:
-127            """
-128            Redshift converts the `TEXT` data type to `VARCHAR(255)` by default when people more generally mean
-129            VARCHAR of max length which is `VARCHAR(max)` in Redshift. Therefore if we get a `TEXT` data type
-130            without precision we convert it to `VARCHAR(max)` and if it does have precision then we just convert
-131            `TEXT` to `VARCHAR`.
-132            """
-133            if expression.this == exp.DataType.Type.TEXT:
-134                expression = expression.copy()
-135                expression.set("this", exp.DataType.Type.VARCHAR)
-136                precision = expression.args.get("expressions")
-137                if not precision:
-138                    expression.append("expressions", exp.Var(this="MAX"))
-139            return super().datatype_sql(expression)
+            
 64    class Generator(Postgres.Generator):
+ 65        TYPE_MAPPING = {
+ 66            **Postgres.Generator.TYPE_MAPPING,  # type: ignore
+ 67            exp.DataType.Type.BINARY: "VARBYTE",
+ 68            exp.DataType.Type.VARBINARY: "VARBYTE",
+ 69            exp.DataType.Type.INT: "INTEGER",
+ 70        }
+ 71
+ 72        PROPERTIES_LOCATION = {
+ 73            **Postgres.Generator.PROPERTIES_LOCATION,  # type: ignore
+ 74            exp.LikeProperty: exp.Properties.Location.POST_WITH,
+ 75        }
+ 76
+ 77        TRANSFORMS = {
+ 78            **Postgres.Generator.TRANSFORMS,  # type: ignore
+ 79            **transforms.ELIMINATE_DISTINCT_ON,  # type: ignore
+ 80            exp.DateDiff: lambda self, e: self.func(
+ 81                "DATEDIFF", e.args.get("unit") or "day", e.expression, e.this
+ 82            ),
+ 83            exp.DistKeyProperty: lambda self, e: f"DISTKEY({e.name})",
+ 84            exp.SortKeyProperty: lambda self, e: f"{'COMPOUND ' if e.args['compound'] else ''}SORTKEY({self.format_args(*e.this)})",
+ 85            exp.DistStyleProperty: lambda self, e: self.naked_property(e),
+ 86            exp.Matches: rename_func("DECODE"),
+ 87        }
+ 88
+ 89        def values_sql(self, expression: exp.Values) -> str:
+ 90            """
+ 91            Converts `VALUES...` expression into a series of unions.
+ 92
+ 93            Note: If you have a lot of unions then this will result in a large number of recursive statements to
+ 94            evaluate the expression. You may need to increase `sys.setrecursionlimit` to run and it can also be
+ 95            very slow.
+ 96            """
+ 97            if not isinstance(expression.unnest().parent, exp.From):
+ 98                return super().values_sql(expression)
+ 99            rows = [tuple_exp.expressions for tuple_exp in expression.expressions]
+100            selects = []
+101            for i, row in enumerate(rows):
+102                if i == 0 and expression.alias:
+103                    row = [
+104                        exp.alias_(value, column_name)
+105                        for value, column_name in zip(row, expression.args["alias"].args["columns"])
+106                    ]
+107                selects.append(exp.Select(expressions=row))
+108            subquery_expression = selects[0]
+109            if len(selects) > 1:
+110                for select in selects[1:]:
+111                    subquery_expression = exp.union(subquery_expression, select, distinct=False)
+112            return self.subquery_sql(subquery_expression.subquery(expression.alias))
+113
+114        def with_properties(self, properties: exp.Properties) -> str:
+115            """Redshift doesn't have `WITH` as part of their with_properties so we remove it"""
+116            return self.properties(properties, prefix=" ", suffix="")
+117
+118        def renametable_sql(self, expression: exp.RenameTable) -> str:
+119            """Redshift only supports defining the table name itself (not the db) when renaming tables"""
+120            expression = expression.copy()
+121            target_table = expression.this
+122            for arg in target_table.args:
+123                if arg != "this":
+124                    target_table.set(arg, None)
+125            this = self.sql(expression, "this")
+126            return f"RENAME TO {this}"
+127
+128        def datatype_sql(self, expression: exp.DataType) -> str:
+129            """
+130            Redshift converts the `TEXT` data type to `VARCHAR(255)` by default when people more generally mean
+131            VARCHAR of max length which is `VARCHAR(max)` in Redshift. Therefore if we get a `TEXT` data type
+132            without precision we convert it to `VARCHAR(max)` and if it does have precision then we just convert
+133            `TEXT` to `VARCHAR`.
+134            """
+135            if expression.this == exp.DataType.Type.TEXT:
+136                expression = expression.copy()
+137                expression.set("this", exp.DataType.Type.VARCHAR)
+138                precision = expression.args.get("expressions")
+139                if not precision:
+140                    expression.append("expressions", exp.Var(this="MAX"))
+141            return super().datatype_sql(expression)
 
@@ -661,30 +667,30 @@ Default: True
-
 87        def values_sql(self, expression: exp.Values) -> str:
- 88            """
- 89            Converts `VALUES...` expression into a series of unions.
- 90
- 91            Note: If you have a lot of unions then this will result in a large number of recursive statements to
- 92            evaluate the expression. You may need to increase `sys.setrecursionlimit` to run and it can also be
- 93            very slow.
- 94            """
- 95            if not isinstance(expression.unnest().parent, exp.From):
- 96                return super().values_sql(expression)
- 97            rows = [tuple_exp.expressions for tuple_exp in expression.expressions]
- 98            selects = []
- 99            for i, row in enumerate(rows):
-100                if i == 0 and expression.alias:
-101                    row = [
-102                        exp.alias_(value, column_name)
-103                        for value, column_name in zip(row, expression.args["alias"].args["columns"])
-104                    ]
-105                selects.append(exp.Select(expressions=row))
-106            subquery_expression = selects[0]
-107            if len(selects) > 1:
-108                for select in selects[1:]:
-109                    subquery_expression = exp.union(subquery_expression, select, distinct=False)
-110            return self.subquery_sql(subquery_expression.subquery(expression.alias))
+            
 89        def values_sql(self, expression: exp.Values) -> str:
+ 90            """
+ 91            Converts `VALUES...` expression into a series of unions.
+ 92
+ 93            Note: If you have a lot of unions then this will result in a large number of recursive statements to
+ 94            evaluate the expression. You may need to increase `sys.setrecursionlimit` to run and it can also be
+ 95            very slow.
+ 96            """
+ 97            if not isinstance(expression.unnest().parent, exp.From):
+ 98                return super().values_sql(expression)
+ 99            rows = [tuple_exp.expressions for tuple_exp in expression.expressions]
+100            selects = []
+101            for i, row in enumerate(rows):
+102                if i == 0 and expression.alias:
+103                    row = [
+104                        exp.alias_(value, column_name)
+105                        for value, column_name in zip(row, expression.args["alias"].args["columns"])
+106                    ]
+107                selects.append(exp.Select(expressions=row))
+108            subquery_expression = selects[0]
+109            if len(selects) > 1:
+110                for select in selects[1:]:
+111                    subquery_expression = exp.union(subquery_expression, select, distinct=False)
+112            return self.subquery_sql(subquery_expression.subquery(expression.alias))
 
@@ -708,9 +714,9 @@ very slow.

-
112        def with_properties(self, properties: exp.Properties) -> str:
-113            """Redshift doesn't have `WITH` as part of their with_properties so we remove it"""
-114            return self.properties(properties, prefix=" ", suffix="")
+            
114        def with_properties(self, properties: exp.Properties) -> str:
+115            """Redshift doesn't have `WITH` as part of their with_properties so we remove it"""
+116            return self.properties(properties, prefix=" ", suffix="")
 
@@ -730,15 +736,15 @@ very slow.

-
116        def renametable_sql(self, expression: exp.RenameTable) -> str:
-117            """Redshift only supports defining the table name itself (not the db) when renaming tables"""
-118            expression = expression.copy()
-119            target_table = expression.this
-120            for arg in target_table.args:
-121                if arg != "this":
-122                    target_table.set(arg, None)
-123            this = self.sql(expression, "this")
-124            return f"RENAME TO {this}"
+            
118        def renametable_sql(self, expression: exp.RenameTable) -> str:
+119            """Redshift only supports defining the table name itself (not the db) when renaming tables"""
+120            expression = expression.copy()
+121            target_table = expression.this
+122            for arg in target_table.args:
+123                if arg != "this":
+124                    target_table.set(arg, None)
+125            this = self.sql(expression, "this")
+126            return f"RENAME TO {this}"
 
@@ -758,20 +764,20 @@ very slow.

-
126        def datatype_sql(self, expression: exp.DataType) -> str:
-127            """
-128            Redshift converts the `TEXT` data type to `VARCHAR(255)` by default when people more generally mean
-129            VARCHAR of max length which is `VARCHAR(max)` in Redshift. Therefore if we get a `TEXT` data type
-130            without precision we convert it to `VARCHAR(max)` and if it does have precision then we just convert
-131            `TEXT` to `VARCHAR`.
-132            """
-133            if expression.this == exp.DataType.Type.TEXT:
-134                expression = expression.copy()
-135                expression.set("this", exp.DataType.Type.VARCHAR)
-136                precision = expression.args.get("expressions")
-137                if not precision:
-138                    expression.append("expressions", exp.Var(this="MAX"))
-139            return super().datatype_sql(expression)
+            
128        def datatype_sql(self, expression: exp.DataType) -> str:
+129            """
+130            Redshift converts the `TEXT` data type to `VARCHAR(255)` by default when people more generally mean
+131            VARCHAR of max length which is `VARCHAR(max)` in Redshift. Therefore if we get a `TEXT` data type
+132            without precision we convert it to `VARCHAR(max)` and if it does have precision then we just convert
+133            `TEXT` to `VARCHAR`.
+134            """
+135            if expression.this == exp.DataType.Type.TEXT:
+136                expression = expression.copy()
+137                expression.set("this", exp.DataType.Type.VARCHAR)
+138                precision = expression.args.get("expressions")
+139                if not precision:
+140                    expression.append("expressions", exp.Var(this="MAX"))
+141            return super().datatype_sql(expression)
 
-- cgit v1.2.3