diff options
Diffstat (limited to 'sqlglot/dialects')
-rw-r--r-- | sqlglot/dialects/bigquery.py | 2 | ||||
-rw-r--r-- | sqlglot/dialects/clickhouse.py | 56 | ||||
-rw-r--r-- | sqlglot/dialects/dialect.py | 27 | ||||
-rw-r--r-- | sqlglot/dialects/duckdb.py | 2 | ||||
-rw-r--r-- | sqlglot/dialects/hive.py | 41 | ||||
-rw-r--r-- | sqlglot/dialects/mysql.py | 3 | ||||
-rw-r--r-- | sqlglot/dialects/postgres.py | 7 | ||||
-rw-r--r-- | sqlglot/dialects/presto.py | 11 | ||||
-rw-r--r-- | sqlglot/dialects/redshift.py | 20 | ||||
-rw-r--r-- | sqlglot/dialects/snowflake.py | 4 | ||||
-rw-r--r-- | sqlglot/dialects/spark2.py | 23 | ||||
-rw-r--r-- | sqlglot/dialects/sqlite.py | 2 | ||||
-rw-r--r-- | sqlglot/dialects/teradata.py | 17 |
13 files changed, 172 insertions, 43 deletions
diff --git a/sqlglot/dialects/bigquery.py b/sqlglot/dialects/bigquery.py index 1a58337..5b10852 100644 --- a/sqlglot/dialects/bigquery.py +++ b/sqlglot/dialects/bigquery.py @@ -327,6 +327,8 @@ class BigQuery(Dialect): exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED, } + RESERVED_KEYWORDS = {*generator.Generator.RESERVED_KEYWORDS, "hash"} + def array_sql(self, expression: exp.Array) -> str: first_arg = seq_get(expression.expressions, 0) if isinstance(first_arg, exp.Subqueryable): diff --git a/sqlglot/dialects/clickhouse.py b/sqlglot/dialects/clickhouse.py index c8a9525..fc48379 100644 --- a/sqlglot/dialects/clickhouse.py +++ b/sqlglot/dialects/clickhouse.py @@ -27,14 +27,15 @@ class ClickHouse(Dialect): class Tokenizer(tokens.Tokenizer): COMMENTS = ["--", "#", "#!", ("/*", "*/")] IDENTIFIERS = ['"', "`"] + STRING_ESCAPES = ["'", "\\"] BIT_STRINGS = [("0b", "")] HEX_STRINGS = [("0x", ""), ("0X", "")] KEYWORDS = { **tokens.Tokenizer.KEYWORDS, - "ASOF": TokenType.ASOF, "ATTACH": TokenType.COMMAND, "DATETIME64": TokenType.DATETIME64, + "DICTIONARY": TokenType.DICTIONARY, "FINAL": TokenType.FINAL, "FLOAT32": TokenType.FLOAT, "FLOAT64": TokenType.DOUBLE, @@ -97,7 +98,6 @@ class ClickHouse(Dialect): TABLE_ALIAS_TOKENS = {*parser.Parser.TABLE_ALIAS_TOKENS} - { TokenType.ANY, - TokenType.ASOF, TokenType.SEMI, TokenType.ANTI, TokenType.SETTINGS, @@ -182,7 +182,7 @@ class ClickHouse(Dialect): return self.expression(exp.CTE, this=statement, alias=statement and statement.this) - def _parse_join_side_and_kind( + def _parse_join_parts( self, ) -> t.Tuple[t.Optional[Token], t.Optional[Token], t.Optional[Token]]: is_global = self._match(TokenType.GLOBAL) and self._prev @@ -201,7 +201,7 @@ class ClickHouse(Dialect): join = super()._parse_join(skip_join_token) if join: - join.set("global", join.args.pop("natural", None)) + join.set("global", join.args.pop("method", None)) return join def _parse_function( @@ -245,6 +245,23 @@ class ClickHouse(Dialect): ) -> t.List[t.Optional[exp.Expression]]: return super()._parse_wrapped_id_vars(optional=True) + def _parse_primary_key( + self, wrapped_optional: bool = False, in_props: bool = False + ) -> exp.Expression: + return super()._parse_primary_key( + wrapped_optional=wrapped_optional or in_props, in_props=in_props + ) + + def _parse_on_property(self) -> t.Optional[exp.Property]: + index = self._index + if self._match_text_seq("CLUSTER"): + this = self._parse_id_var() + if this: + return self.expression(exp.OnCluster, this=this) + else: + self._retreat(index) + return None + class Generator(generator.Generator): STRUCT_DELIMITER = ("(", ")") @@ -292,6 +309,7 @@ class ClickHouse(Dialect): **generator.Generator.PROPERTIES_LOCATION, exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED, exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA, + exp.OnCluster: exp.Properties.Location.POST_NAME, } JOIN_HINTS = False @@ -299,6 +317,18 @@ class ClickHouse(Dialect): EXPLICIT_UNION = True GROUPINGS_SEP = "" + # there's no list in docs, but it can be found in Clickhouse code + # see `ClickHouse/src/Parsers/ParserCreate*.cpp` + ON_CLUSTER_TARGETS = { + "DATABASE", + "TABLE", + "VIEW", + "DICTIONARY", + "INDEX", + "FUNCTION", + "NAMED COLLECTION", + } + def cte_sql(self, expression: exp.CTE) -> str: if isinstance(expression.this, exp.Alias): return self.sql(expression, "this") @@ -321,3 +351,21 @@ class ClickHouse(Dialect): def placeholder_sql(self, expression: exp.Placeholder) -> str: return f"{{{expression.name}: {self.sql(expression, 'kind')}}}" + + def oncluster_sql(self, expression: exp.OnCluster) -> str: + return f"ON CLUSTER {self.sql(expression, 'this')}" + + def createable_sql( + self, + expression: exp.Create, + locations: dict[exp.Properties.Location, list[exp.Property]], + ) -> str: + kind = self.sql(expression, "kind").upper() + if kind in self.ON_CLUSTER_TARGETS and locations.get(exp.Properties.Location.POST_NAME): + this_name = self.sql(expression.this, "this") + this_properties = " ".join( + [self.sql(prop) for prop in locations[exp.Properties.Location.POST_NAME]] + ) + this_schema = self.schema_columns_sql(expression.this) + return f"{this_name}{self.sep()}{this_properties}{self.sep()}{this_schema}" + return super().createable_sql(expression, locations) diff --git a/sqlglot/dialects/dialect.py b/sqlglot/dialects/dialect.py index 890a3c3..4958bc6 100644 --- a/sqlglot/dialects/dialect.py +++ b/sqlglot/dialects/dialect.py @@ -104,6 +104,10 @@ class _Dialect(type): klass.byte_start, klass.byte_end = get_start_end(TokenType.BYTE_STRING) klass.raw_start, klass.raw_end = get_start_end(TokenType.RAW_STRING) + klass.tokenizer_class.identifiers_can_start_with_digit = ( + klass.identifiers_can_start_with_digit + ) + return klass @@ -111,6 +115,7 @@ class Dialect(metaclass=_Dialect): index_offset = 0 unnest_column_only = False alias_post_tablesample = False + identifiers_can_start_with_digit = False normalize_functions: t.Optional[str] = "upper" null_ordering = "nulls_are_small" @@ -231,6 +236,7 @@ class Dialect(metaclass=_Dialect): "time_trie": self.inverse_time_trie, "unnest_column_only": self.unnest_column_only, "alias_post_tablesample": self.alias_post_tablesample, + "identifiers_can_start_with_digit": self.identifiers_can_start_with_digit, "normalize_functions": self.normalize_functions, "null_ordering": self.null_ordering, **opts, @@ -443,7 +449,7 @@ def date_trunc_to_time(args: t.List) -> exp.DateTrunc | exp.TimestampTrunc: unit = seq_get(args, 0) this = seq_get(args, 1) - if isinstance(this, exp.Cast) and this.is_type(exp.DataType.Type.DATE): + if isinstance(this, exp.Cast) and this.is_type("date"): return exp.DateTrunc(unit=unit, this=this) return exp.TimestampTrunc(this=this, unit=unit) @@ -468,6 +474,25 @@ def strposition_to_locate_sql(self: Generator, expression: exp.StrPosition) -> s ) +def left_to_substring_sql(self: Generator, expression: exp.Left) -> str: + expression = expression.copy() + return self.sql( + exp.Substring( + this=expression.this, start=exp.Literal.number(1), length=expression.expression + ) + ) + + +def right_to_substring_sql(self: Generator, expression: exp.Left) -> str: + expression = expression.copy() + return self.sql( + exp.Substring( + this=expression.this, + start=exp.Length(this=expression.this) - exp.paren(expression.expression - 1), + ) + ) + + def timestrtotime_sql(self: Generator, expression: exp.TimeStrToTime) -> str: return f"CAST({self.sql(expression, 'this')} AS TIMESTAMP)" diff --git a/sqlglot/dialects/duckdb.py b/sqlglot/dialects/duckdb.py index 662882d..f31da73 100644 --- a/sqlglot/dialects/duckdb.py +++ b/sqlglot/dialects/duckdb.py @@ -71,7 +71,7 @@ def _struct_sql(self: generator.Generator, expression: exp.Struct) -> str: def _datatype_sql(self: generator.Generator, expression: exp.DataType) -> str: - if expression.this == exp.DataType.Type.ARRAY: + if expression.is_type("array"): return f"{self.expressions(expression, flat=True)}[]" return self.datatype_sql(expression) diff --git a/sqlglot/dialects/hive.py b/sqlglot/dialects/hive.py index fbd626a..650a1e1 100644 --- a/sqlglot/dialects/hive.py +++ b/sqlglot/dialects/hive.py @@ -9,6 +9,7 @@ from sqlglot.dialects.dialect import ( create_with_partitions_sql, format_time_lambda, if_sql, + left_to_substring_sql, locate_to_strposition, max_or_greatest, min_or_least, @@ -17,6 +18,7 @@ from sqlglot.dialects.dialect import ( no_safe_divide_sql, no_trycast_sql, rename_func, + right_to_substring_sql, strposition_to_locate_sql, struct_extract_sql, timestrtotime_sql, @@ -89,7 +91,7 @@ def _json_format_sql(self: generator.Generator, expression: exp.JSONFormat) -> s annotate_types(this) - if this.type.is_type(exp.DataType.Type.JSON): + if this.type.is_type("json"): return self.sql(this) return self.func("TO_JSON", this, expression.args.get("options")) @@ -149,6 +151,7 @@ def _to_date_sql(self: generator.Generator, expression: exp.TsOrDsToDate) -> str class Hive(Dialect): alias_post_tablesample = True + identifiers_can_start_with_digit = True time_mapping = { "y": "%Y", @@ -190,7 +193,6 @@ class Hive(Dialect): IDENTIFIERS = ["`"] STRING_ESCAPES = ["\\"] ENCODE = "utf-8" - IDENTIFIER_CAN_START_WITH_DIGIT = True KEYWORDS = { **tokens.Tokenizer.KEYWORDS, @@ -276,6 +278,39 @@ class Hive(Dialect): "cluster": lambda self: self._parse_sort(exp.Cluster, "CLUSTER", "BY"), } + def _parse_types( + self, check_func: bool = False, schema: bool = False + ) -> t.Optional[exp.Expression]: + """ + Spark (and most likely Hive) treats casts to CHAR(length) and VARCHAR(length) as casts to + STRING in all contexts except for schema definitions. For example, this is in Spark v3.4.0: + + spark-sql (default)> select cast(1234 as varchar(2)); + 23/06/06 15:51:18 WARN CharVarcharUtils: The Spark cast operator does not support + char/varchar type and simply treats them as string type. Please use string type + directly to avoid confusion. Otherwise, you can set spark.sql.legacy.charVarcharAsString + to true, so that Spark treat them as string type as same as Spark 3.0 and earlier + + 1234 + Time taken: 4.265 seconds, Fetched 1 row(s) + + This shows that Spark doesn't truncate the value into '12', which is inconsistent with + what other dialects (e.g. postgres) do, so we need to drop the length to transpile correctly. + + Reference: https://spark.apache.org/docs/latest/sql-ref-datatypes.html + """ + this = super()._parse_types(check_func=check_func, schema=schema) + + if this and not schema: + return this.transform( + lambda node: node.replace(exp.DataType.build("text")) + if isinstance(node, exp.DataType) and node.is_type("char", "varchar") + else node, + copy=False, + ) + + return this + class Generator(generator.Generator): LIMIT_FETCH = "LIMIT" TABLESAMPLE_WITH_METHOD = False @@ -323,6 +358,7 @@ class Hive(Dialect): exp.JSONExtract: rename_func("GET_JSON_OBJECT"), exp.JSONExtractScalar: rename_func("GET_JSON_OBJECT"), exp.JSONFormat: _json_format_sql, + exp.Left: left_to_substring_sql, exp.Map: var_map_sql, exp.Max: max_or_greatest, exp.Min: min_or_least, @@ -332,6 +368,7 @@ class Hive(Dialect): exp.ApproxQuantile: rename_func("PERCENTILE_APPROX"), exp.RegexpLike: lambda self, e: self.binary(e, "RLIKE"), exp.RegexpSplit: rename_func("SPLIT"), + exp.Right: right_to_substring_sql, exp.SafeDivide: no_safe_divide_sql, exp.SchemaCommentProperty: lambda self, e: self.naked_property(e), exp.SetAgg: rename_func("COLLECT_SET"), diff --git a/sqlglot/dialects/mysql.py b/sqlglot/dialects/mysql.py index 2b41860..75023ff 100644 --- a/sqlglot/dialects/mysql.py +++ b/sqlglot/dialects/mysql.py @@ -186,9 +186,6 @@ class MySQL(Dialect): "DATE_FORMAT": format_time_lambda(exp.TimeToStr, "mysql"), "DATE_SUB": parse_date_delta_with_interval(exp.DateSub), "INSTR": lambda args: exp.StrPosition(substr=seq_get(args, 1), this=seq_get(args, 0)), - "LEFT": lambda args: exp.Substring( - this=seq_get(args, 0), start=exp.Literal.number(1), length=seq_get(args, 1) - ), "LOCATE": locate_to_strposition, "STR_TO_DATE": _str_to_date, } diff --git a/sqlglot/dialects/postgres.py b/sqlglot/dialects/postgres.py index ab61880..8d84024 100644 --- a/sqlglot/dialects/postgres.py +++ b/sqlglot/dialects/postgres.py @@ -18,7 +18,9 @@ from sqlglot.dialects.dialect import ( rename_func, str_position_sql, timestamptrunc_sql, + timestrtotime_sql, trim_sql, + ts_or_ds_to_date_sql, ) from sqlglot.helper import seq_get from sqlglot.parser import binary_range_parser @@ -104,7 +106,7 @@ def _string_agg_sql(self: generator.Generator, expression: exp.GroupConcat) -> s def _datatype_sql(self: generator.Generator, expression: exp.DataType) -> str: - if expression.this == exp.DataType.Type.ARRAY: + if expression.is_type("array"): return f"{self.expressions(expression, flat=True)}[]" return self.datatype_sql(expression) @@ -353,12 +355,13 @@ class Postgres(Dialect): exp.StrToTime: lambda self, e: f"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})", exp.Substring: _substring_sql, exp.TimestampTrunc: timestamptrunc_sql, - exp.TimeStrToTime: lambda self, e: f"CAST({self.sql(e, 'this')} AS TIMESTAMP)", + exp.TimeStrToTime: timestrtotime_sql, exp.TimeToStr: lambda self, e: f"TO_CHAR({self.sql(e, 'this')}, {self.format_time(e)})", exp.TableSample: no_tablesample_sql, exp.ToChar: lambda self, e: self.function_fallback_sql(e), exp.Trim: trim_sql, exp.TryCast: no_trycast_sql, + exp.TsOrDsToDate: ts_or_ds_to_date_sql("postgres"), exp.UnixToTime: lambda self, e: f"TO_TIMESTAMP({self.sql(e, 'this')})", exp.DataType: _datatype_sql, exp.GroupConcat: _string_agg_sql, diff --git a/sqlglot/dialects/presto.py b/sqlglot/dialects/presto.py index 52a04a4..d839864 100644 --- a/sqlglot/dialects/presto.py +++ b/sqlglot/dialects/presto.py @@ -8,10 +8,12 @@ from sqlglot.dialects.dialect import ( date_trunc_to_time, format_time_lambda, if_sql, + left_to_substring_sql, no_ilike_sql, no_pivot_sql, no_safe_divide_sql, rename_func, + right_to_substring_sql, struct_extract_sql, timestamptrunc_sql, timestrtotime_sql, @@ -30,7 +32,7 @@ def _approx_distinct_sql(self: generator.Generator, expression: exp.ApproxDistin def _datatype_sql(self: generator.Generator, expression: exp.DataType) -> str: sql = self.datatype_sql(expression) - if expression.this == exp.DataType.Type.TIMESTAMPTZ: + if expression.is_type("timestamptz"): sql = f"{sql} WITH TIME ZONE" return sql @@ -240,6 +242,7 @@ class Presto(Dialect): INTERVAL_ALLOWS_PLURAL_FORM = False JOIN_HINTS = False TABLE_HINTS = False + IS_BOOL = False STRUCT_DELIMITER = ("(", ")") PROPERTIES_LOCATION = { @@ -272,6 +275,7 @@ class Presto(Dialect): exp.BitwiseOr: lambda self, e: f"BITWISE_OR({self.sql(e, 'this')}, {self.sql(e, 'expression')})", exp.BitwiseRightShift: lambda self, e: f"BITWISE_ARITHMETIC_SHIFT_RIGHT({self.sql(e, 'this')}, {self.sql(e, 'expression')})", exp.BitwiseXor: lambda self, e: f"BITWISE_XOR({self.sql(e, 'this')}, {self.sql(e, 'expression')})", + exp.Cast: transforms.preprocess([transforms.epoch_cast_to_ts]), exp.CurrentTimestamp: lambda *_: "CURRENT_TIMESTAMP", exp.DataType: _datatype_sql, exp.DateAdd: lambda self, e: self.func( @@ -292,11 +296,13 @@ class Presto(Dialect): exp.ILike: no_ilike_sql, exp.Initcap: _initcap_sql, exp.Lateral: _explode_to_unnest_sql, + exp.Left: left_to_substring_sql, exp.Levenshtein: rename_func("LEVENSHTEIN_DISTANCE"), exp.LogicalAnd: rename_func("BOOL_AND"), exp.LogicalOr: rename_func("BOOL_OR"), exp.Pivot: no_pivot_sql, exp.Quantile: _quantile_sql, + exp.Right: right_to_substring_sql, exp.SafeDivide: no_safe_divide_sql, exp.Schema: _schema_sql, exp.Select: transforms.preprocess( @@ -319,6 +325,7 @@ class Presto(Dialect): exp.TimeStrToUnix: lambda self, e: f"TO_UNIXTIME(DATE_PARSE({self.sql(e, 'this')}, {Presto.time_format}))", exp.TimeToStr: lambda self, e: f"DATE_FORMAT({self.sql(e, 'this')}, {self.format_time(e)})", exp.TimeToUnix: rename_func("TO_UNIXTIME"), + exp.TryCast: transforms.preprocess([transforms.epoch_cast_to_ts]), exp.TsOrDiToDi: lambda self, e: f"CAST(SUBSTR(REPLACE(CAST({self.sql(e, 'this')} AS VARCHAR), '-', ''), 1, 8) AS INT)", exp.TsOrDsAdd: _ts_or_ds_add_sql, exp.TsOrDsToDate: _ts_or_ds_to_date_sql, @@ -356,7 +363,7 @@ class Presto(Dialect): else: target_type = None - if target_type and target_type.is_type(exp.DataType.Type.TIMESTAMP): + if target_type and target_type.is_type("timestamp"): to = target_type.copy() if target_type is start.to: diff --git a/sqlglot/dialects/redshift.py b/sqlglot/dialects/redshift.py index 55e393a..b0a6774 100644 --- a/sqlglot/dialects/redshift.py +++ b/sqlglot/dialects/redshift.py @@ -3,6 +3,7 @@ from __future__ import annotations import typing as t from sqlglot import exp, transforms +from sqlglot.dialects.dialect import rename_func from sqlglot.dialects.postgres import Postgres from sqlglot.helper import seq_get from sqlglot.tokens import TokenType @@ -24,26 +25,29 @@ class Redshift(Postgres): FUNCTIONS = { **Postgres.Parser.FUNCTIONS, "DATEADD": lambda args: exp.DateAdd( - this=seq_get(args, 2), + this=exp.TsOrDsToDate(this=seq_get(args, 2)), expression=seq_get(args, 1), unit=seq_get(args, 0), ), "DATEDIFF": lambda args: exp.DateDiff( - this=seq_get(args, 2), - expression=seq_get(args, 1), + this=exp.TsOrDsToDate(this=seq_get(args, 2)), + expression=exp.TsOrDsToDate(this=seq_get(args, 1)), unit=seq_get(args, 0), ), "NVL": exp.Coalesce.from_arg_list, + "STRTOL": exp.FromBase.from_arg_list, } CONVERT_TYPE_FIRST = True - def _parse_types(self, check_func: bool = False) -> t.Optional[exp.Expression]: - this = super()._parse_types(check_func=check_func) + def _parse_types( + self, check_func: bool = False, schema: bool = False + ) -> t.Optional[exp.Expression]: + this = super()._parse_types(check_func=check_func, schema=schema) if ( isinstance(this, exp.DataType) - and this.this == exp.DataType.Type.VARCHAR + and this.is_type("varchar") and this.expressions and this.expressions[0].this == exp.column("MAX") ): @@ -99,10 +103,12 @@ class Redshift(Postgres): ), exp.DistKeyProperty: lambda self, e: f"DISTKEY({e.name})", exp.DistStyleProperty: lambda self, e: self.naked_property(e), + exp.FromBase: rename_func("STRTOL"), exp.JSONExtract: _json_sql, exp.JSONExtractScalar: _json_sql, exp.Select: transforms.preprocess([transforms.eliminate_distinct_on]), exp.SortKeyProperty: lambda self, e: f"{'COMPOUND ' if e.args['compound'] else ''}SORTKEY({self.format_args(*e.this)})", + exp.TsOrDsToDate: lambda self, e: self.sql(e.this), } # Postgres maps exp.Pivot to no_pivot_sql, but Redshift support pivots @@ -158,7 +164,7 @@ class Redshift(Postgres): without precision we convert it to `VARCHAR(max)` and if it does have precision then we just convert `TEXT` to `VARCHAR`. """ - if expression.this == exp.DataType.Type.TEXT: + if expression.is_type("text"): expression = expression.copy() expression.set("this", exp.DataType.Type.VARCHAR) precision = expression.args.get("expressions") diff --git a/sqlglot/dialects/snowflake.py b/sqlglot/dialects/snowflake.py index 756e8e9..821d991 100644 --- a/sqlglot/dialects/snowflake.py +++ b/sqlglot/dialects/snowflake.py @@ -153,9 +153,9 @@ def _nullifzero_to_if(args: t.List) -> exp.Expression: def _datatype_sql(self: generator.Generator, expression: exp.DataType) -> str: - if expression.this == exp.DataType.Type.ARRAY: + if expression.is_type("array"): return "ARRAY" - elif expression.this == exp.DataType.Type.MAP: + elif expression.is_type("map"): return "OBJECT" return self.datatype_sql(expression) diff --git a/sqlglot/dialects/spark2.py b/sqlglot/dialects/spark2.py index 912b86b..bf24240 100644 --- a/sqlglot/dialects/spark2.py +++ b/sqlglot/dialects/spark2.py @@ -110,11 +110,6 @@ class Spark2(Hive): **Hive.Parser.FUNCTIONS, "MAP_FROM_ARRAYS": exp.Map.from_arg_list, "TO_UNIX_TIMESTAMP": exp.StrToUnix.from_arg_list, - "LEFT": lambda args: exp.Substring( - this=seq_get(args, 0), - start=exp.Literal.number(1), - length=seq_get(args, 1), - ), "SHIFTLEFT": lambda args: exp.BitwiseLeftShift( this=seq_get(args, 0), expression=seq_get(args, 1), @@ -123,14 +118,6 @@ class Spark2(Hive): this=seq_get(args, 0), expression=seq_get(args, 1), ), - "RIGHT": lambda args: exp.Substring( - this=seq_get(args, 0), - start=exp.Sub( - this=exp.Length(this=seq_get(args, 0)), - expression=exp.Add(this=seq_get(args, 1), expression=exp.Literal.number(1)), - ), - length=seq_get(args, 1), - ), "APPROX_PERCENTILE": exp.ApproxQuantile.from_arg_list, "IIF": exp.If.from_arg_list, "AGGREGATE": exp.Reduce.from_arg_list, @@ -240,17 +227,17 @@ class Spark2(Hive): TRANSFORMS.pop(exp.ArrayJoin) TRANSFORMS.pop(exp.ArraySort) TRANSFORMS.pop(exp.ILike) + TRANSFORMS.pop(exp.Left) + TRANSFORMS.pop(exp.Right) WRAP_DERIVED_VALUES = False CREATE_FUNCTION_RETURN_AS = False def cast_sql(self, expression: exp.Cast) -> str: - if isinstance(expression.this, exp.Cast) and expression.this.is_type( - exp.DataType.Type.JSON - ): + if isinstance(expression.this, exp.Cast) and expression.this.is_type("json"): schema = f"'{self.sql(expression, 'to')}'" return self.func("FROM_JSON", expression.this.this, schema) - if expression.to.is_type(exp.DataType.Type.JSON): + if expression.is_type("json"): return self.func("TO_JSON", expression.this) return super(Hive.Generator, self).cast_sql(expression) @@ -260,7 +247,7 @@ class Spark2(Hive): expression, sep=": " if isinstance(expression.parent, exp.DataType) - and expression.parent.is_type(exp.DataType.Type.STRUCT) + and expression.parent.is_type("struct") else sep, ) diff --git a/sqlglot/dialects/sqlite.py b/sqlglot/dialects/sqlite.py index 56e7773..4e800b0 100644 --- a/sqlglot/dialects/sqlite.py +++ b/sqlglot/dialects/sqlite.py @@ -132,7 +132,7 @@ class SQLite(Dialect): LIMIT_FETCH = "LIMIT" def cast_sql(self, expression: exp.Cast) -> str: - if expression.to.this == exp.DataType.Type.DATE: + if expression.is_type("date"): return self.func("DATE", expression.this) return super().cast_sql(expression) diff --git a/sqlglot/dialects/teradata.py b/sqlglot/dialects/teradata.py index 9b39178..514aecb 100644 --- a/sqlglot/dialects/teradata.py +++ b/sqlglot/dialects/teradata.py @@ -183,3 +183,20 @@ class Teradata(Dialect): each_sql = f" EACH {each_sql}" if each_sql else "" return f"RANGE_N({this} BETWEEN {expressions_sql}{each_sql})" + + def createable_sql( + self, + expression: exp.Create, + locations: dict[exp.Properties.Location, list[exp.Property]], + ) -> str: + kind = self.sql(expression, "kind").upper() + if kind == "TABLE" and locations.get(exp.Properties.Location.POST_NAME): + this_name = self.sql(expression.this, "this") + this_properties = self.properties( + exp.Properties(expressions=locations[exp.Properties.Location.POST_NAME]), + wrapped=False, + prefix=",", + ) + this_schema = self.schema_columns_sql(expression.this) + return f"{this_name}{this_properties}{self.sep()}{this_schema}" + return super().createable_sql(expression, locations) |