summaryrefslogtreecommitdiffstats
path: root/tests/dialects
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-06-03 20:08:03 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-06-03 20:08:03 +0000
commitcf606e6b9c2bd703d35537ef8880a8b80edf52a5 (patch)
treeac3c8e940fabdaa513920df2a58284fe4e8c84ee /tests/dialects
parentReleasing debian version 24.0.0-1. (diff)
downloadsqlglot-cf606e6b9c2bd703d35537ef8880a8b80edf52a5.tar.xz
sqlglot-cf606e6b9c2bd703d35537ef8880a8b80edf52a5.zip
Merging upstream version 24.1.0.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'tests/dialects')
-rw-r--r--tests/dialects/test_clickhouse.py1
-rw-r--r--tests/dialects/test_databricks.py3
-rw-r--r--tests/dialects/test_duckdb.py2
-rw-r--r--tests/dialects/test_hive.py4
-rw-r--r--tests/dialects/test_postgres.py24
-rw-r--r--tests/dialects/test_presto.py2
-rw-r--r--tests/dialects/test_redshift.py11
-rw-r--r--tests/dialects/test_snowflake.py88
-rw-r--r--tests/dialects/test_spark.py2
-rw-r--r--tests/dialects/test_tsql.py10
10 files changed, 121 insertions, 26 deletions
diff --git a/tests/dialects/test_clickhouse.py b/tests/dialects/test_clickhouse.py
index 15adda8..72634a8 100644
--- a/tests/dialects/test_clickhouse.py
+++ b/tests/dialects/test_clickhouse.py
@@ -42,6 +42,7 @@ class TestClickhouse(Validator):
self.assertEqual(expr.sql(dialect="clickhouse"), "COUNT(x)")
self.assertIsNone(expr._meta)
+ self.validate_identity("SELECT CAST(x AS Tuple(String, Array(Nullable(Float64))))")
self.validate_identity("countIf(x, y)")
self.validate_identity("x = y")
self.validate_identity("x <> y")
diff --git a/tests/dialects/test_databricks.py b/tests/dialects/test_databricks.py
index f050cfa..9ef3b86 100644
--- a/tests/dialects/test_databricks.py
+++ b/tests/dialects/test_databricks.py
@@ -9,6 +9,7 @@ class TestDatabricks(Validator):
def test_databricks(self):
self.validate_identity("DESCRIBE HISTORY a.b")
self.validate_identity("DESCRIBE history.tbl")
+ self.validate_identity("CREATE TABLE t (a STRUCT<c: MAP<STRING, STRING>>)")
self.validate_identity("CREATE TABLE t (c STRUCT<interval: DOUBLE COMMENT 'aaa'>)")
self.validate_identity("CREATE TABLE my_table TBLPROPERTIES (a.b=15)")
self.validate_identity("CREATE TABLE my_table TBLPROPERTIES ('a.b'=15)")
@@ -50,7 +51,7 @@ class TestDatabricks(Validator):
"TRUNCATE TABLE t1 PARTITION(age = 10, name = 'test', city LIKE 'LA')"
)
self.validate_identity(
- "COPY INTO target FROM `s3://link` FILEFORMAT = AVRO VALIDATE = ALL FILES = ('file1', 'file2') FORMAT_OPTIONS(opt1 = TRUE, opt2 = 'test') COPY_OPTIONS(opt3 = 5)"
+ "COPY INTO target FROM `s3://link` FILEFORMAT = AVRO VALIDATE = ALL FILES = ('file1', 'file2') FORMAT_OPTIONS ('opt1'='true', 'opt2'='test') COPY_OPTIONS ('mergeSchema'='true')"
)
self.validate_all(
diff --git a/tests/dialects/test_duckdb.py b/tests/dialects/test_duckdb.py
index 03dea93..bbf665d 100644
--- a/tests/dialects/test_duckdb.py
+++ b/tests/dialects/test_duckdb.py
@@ -735,7 +735,7 @@ class TestDuckDB(Validator):
)
self.validate_identity(
- "COPY lineitem FROM 'lineitem.ndjson' WITH (FORMAT JSON, DELIMITER ',', AUTO_DETECT TRUE, COMPRESSION SNAPPY, CODEC ZSTD, FORCE_NOT_NULL(col1, col2))"
+ "COPY lineitem FROM 'lineitem.ndjson' WITH (FORMAT JSON, DELIMITER ',', AUTO_DETECT TRUE, COMPRESSION SNAPPY, CODEC ZSTD, FORCE_NOT_NULL (col1, col2))"
)
self.validate_identity(
"COPY (SELECT 42 AS a, 'hello' AS b) TO 'query.json' WITH (FORMAT JSON, ARRAY TRUE)"
diff --git a/tests/dialects/test_hive.py b/tests/dialects/test_hive.py
index dfce446..3ebaded 100644
--- a/tests/dialects/test_hive.py
+++ b/tests/dialects/test_hive.py
@@ -406,7 +406,9 @@ class TestHive(Validator):
self.validate_identity("(VALUES (1 AS a, 2 AS b, 3))")
self.validate_identity("SELECT * FROM my_table TIMESTAMP AS OF DATE_ADD(CURRENT_DATE, -1)")
self.validate_identity("SELECT * FROM my_table VERSION AS OF DATE_ADD(CURRENT_DATE, -1)")
-
+ self.validate_identity(
+ "SELECT WEEKOFYEAR('2024-05-22'), DAYOFMONTH('2024-05-22'), DAYOFWEEK('2024-05-22')"
+ )
self.validate_identity(
"SELECT ROW() OVER (DISTRIBUTE BY x SORT BY y)",
"SELECT ROW() OVER (PARTITION BY x ORDER BY y)",
diff --git a/tests/dialects/test_postgres.py b/tests/dialects/test_postgres.py
index 8ba4e96..38c262f 100644
--- a/tests/dialects/test_postgres.py
+++ b/tests/dialects/test_postgres.py
@@ -22,6 +22,7 @@ class TestPostgres(Validator):
self.assertIsInstance(expr, exp.AlterTable)
self.assertEqual(expr.sql(dialect="postgres"), alter_table_only)
+ self.validate_identity("STRING_TO_ARRAY('xx~^~yy~^~zz', '~^~', 'yy')")
self.validate_identity("SELECT x FROM t WHERE CAST($1 AS TEXT) = 'ok'")
self.validate_identity("SELECT * FROM t TABLESAMPLE SYSTEM (50) REPEATABLE (55)")
self.validate_identity("x @@ y")
@@ -328,6 +329,16 @@ class TestPostgres(Validator):
)
self.validate_all(
+ "STRING_TO_ARRAY('xx~^~yy~^~zz', '~^~', 'yy')",
+ read={
+ "doris": "SPLIT_BY_STRING('xx~^~yy~^~zz', '~^~', 'yy')",
+ },
+ write={
+ "doris": "SPLIT_BY_STRING('xx~^~yy~^~zz', '~^~', 'yy')",
+ "postgres": "STRING_TO_ARRAY('xx~^~yy~^~zz', '~^~', 'yy')",
+ },
+ )
+ self.validate_all(
"SELECT ARRAY[1, 2, 3] @> ARRAY[1, 2]",
read={
"duckdb": "SELECT ARRAY_HAS_ALL([1, 2, 3], [1, 2])",
@@ -706,6 +717,9 @@ class TestPostgres(Validator):
self.validate_identity(
"COPY (SELECT * FROM t) TO 'file' WITH (FORMAT format, HEADER MATCH, FREEZE TRUE)"
)
+ self.validate_identity("cast(a as FLOAT)", "CAST(a AS DOUBLE PRECISION)")
+ self.validate_identity("cast(a as FLOAT8)", "CAST(a AS DOUBLE PRECISION)")
+ self.validate_identity("cast(a as FLOAT4)", "CAST(a AS REAL)")
def test_ddl(self):
# Checks that user-defined types are parsed into DataType instead of Identifier
@@ -723,6 +737,8 @@ class TestPostgres(Validator):
cdef.args["kind"].assert_is(exp.DataType)
self.assertEqual(expr.sql(dialect="postgres"), "CREATE TABLE t (x INTERVAL DAY)")
+ self.validate_identity("CREATE TABLE t (col INT[3][5])")
+ self.validate_identity("CREATE TABLE t (col INT[3])")
self.validate_identity("CREATE INDEX IF NOT EXISTS ON t(c)")
self.validate_identity("CREATE INDEX et_vid_idx ON et(vid) INCLUDE (fid)")
self.validate_identity("CREATE INDEX idx_x ON x USING BTREE(x, y) WHERE (NOT y IS NULL)")
@@ -846,6 +862,14 @@ class TestPostgres(Validator):
"CREATE UNLOGGED TABLE foo AS WITH t(c) AS (SELECT 1) SELECT * FROM (SELECT c AS c FROM t) AS temp"
)
self.validate_identity(
+ "CREATE TABLE t (col integer ARRAY[3])",
+ "CREATE TABLE t (col INT[3])",
+ )
+ self.validate_identity(
+ "CREATE TABLE t (col integer ARRAY)",
+ "CREATE TABLE t (col INT[])",
+ )
+ self.validate_identity(
"CREATE FUNCTION x(INT) RETURNS INT SET search_path TO 'public'",
"CREATE FUNCTION x(INT) RETURNS INT SET search_path = 'public'",
)
diff --git a/tests/dialects/test_presto.py b/tests/dialects/test_presto.py
index 108e916..f1bbcc1 100644
--- a/tests/dialects/test_presto.py
+++ b/tests/dialects/test_presto.py
@@ -210,6 +210,8 @@ class TestPresto(Validator):
"bigquery": f"SELECT INTERVAL '1' {expected}",
"presto": f"SELECT INTERVAL '1' {expected}",
"trino": f"SELECT INTERVAL '1' {expected}",
+ "mysql": f"SELECT INTERVAL '1' {expected}",
+ "doris": f"SELECT INTERVAL '1' {expected}",
},
)
diff --git a/tests/dialects/test_redshift.py b/tests/dialects/test_redshift.py
index 3925e32..ccabe2d 100644
--- a/tests/dialects/test_redshift.py
+++ b/tests/dialects/test_redshift.py
@@ -7,6 +7,13 @@ class TestRedshift(Validator):
def test_redshift(self):
self.validate_all(
+ "SELECT SPLIT_TO_ARRAY('12,345,6789')",
+ write={
+ "postgres": "SELECT STRING_TO_ARRAY('12,345,6789', ',')",
+ "redshift": "SELECT SPLIT_TO_ARRAY('12,345,6789', ',')",
+ },
+ )
+ self.validate_all(
"GETDATE()",
read={
"duckdb": "CURRENT_TIMESTAMP",
@@ -473,6 +480,10 @@ FROM (
self.validate_identity("CREATE TABLE table_backup BACKUP YES AS SELECT * FROM event")
self.validate_identity("CREATE TABLE table_backup (i INTEGER, b VARCHAR) BACKUP NO")
self.validate_identity("CREATE TABLE table_backup (i INTEGER, b VARCHAR) BACKUP YES")
+ self.validate_identity(
+ "select foo, bar from table_1 minus select foo, bar from table_2",
+ "SELECT foo, bar FROM table_1 EXCEPT SELECT foo, bar FROM table_2",
+ )
def test_create_table_like(self):
self.validate_identity(
diff --git a/tests/dialects/test_snowflake.py b/tests/dialects/test_snowflake.py
index f8c2ea1..ba85719 100644
--- a/tests/dialects/test_snowflake.py
+++ b/tests/dialects/test_snowflake.py
@@ -2,6 +2,7 @@ from unittest import mock
from sqlglot import UnsupportedError, exp, parse_one
from sqlglot.optimizer.normalize_identifiers import normalize_identifiers
+from sqlglot.optimizer.qualify_columns import quote_identifiers
from tests.dialects.test_dialect import Validator
@@ -48,6 +49,8 @@ WHERE
)""",
)
+ self.validate_identity("SELECT number").selects[0].assert_is(exp.Column)
+ self.validate_identity("INTERVAL '4 years, 5 months, 3 hours'")
self.validate_identity("ALTER TABLE table1 CLUSTER BY (name DESC)")
self.validate_identity("SELECT rename, replace")
self.validate_identity("SELECT TIMEADD(HOUR, 2, CAST('09:05:03' AS TIME))")
@@ -111,6 +114,10 @@ WHERE
"SELECT * FROM DATA AS DATA_L ASOF JOIN DATA AS DATA_R MATCH_CONDITION (DATA_L.VAL > DATA_R.VAL) ON DATA_L.ID = DATA_R.ID"
)
self.validate_identity(
+ "CURRENT_TIMESTAMP - INTERVAL '1 w' AND (1 = 1)",
+ "CURRENT_TIMESTAMP() - INTERVAL '1 WEEK' AND (1 = 1)",
+ )
+ self.validate_identity(
"REGEXP_REPLACE('target', 'pattern', '\n')",
"REGEXP_REPLACE('target', 'pattern', '\\n')",
)
@@ -1186,6 +1193,25 @@ WHERE
)
def test_ddl(self):
+ for constraint_prefix in ("WITH ", ""):
+ with self.subTest(f"Constraint prefix: {constraint_prefix}"):
+ self.validate_identity(
+ f"CREATE TABLE t (id INT {constraint_prefix}MASKING POLICY p)",
+ "CREATE TABLE t (id INT MASKING POLICY p)",
+ )
+ self.validate_identity(
+ f"CREATE TABLE t (id INT {constraint_prefix}MASKING POLICY p USING (c1, c2, c3))",
+ "CREATE TABLE t (id INT MASKING POLICY p USING (c1, c2, c3))",
+ )
+ self.validate_identity(
+ f"CREATE TABLE t (id INT {constraint_prefix}PROJECTION POLICY p)",
+ "CREATE TABLE t (id INT PROJECTION POLICY p)",
+ )
+ self.validate_identity(
+ f"CREATE TABLE t (id INT {constraint_prefix}TAG (key1='value_1', key2='value_2'))",
+ "CREATE TABLE t (id INT TAG (key1='value_1', key2='value_2'))",
+ )
+
self.validate_identity(
"""create external table et2(
col1 date as (parse_json(metadata$external_table_partition):COL1::date),
@@ -1211,6 +1237,9 @@ WHERE
"CREATE OR REPLACE TAG IF NOT EXISTS cost_center COMMENT='cost_center tag'"
).this.assert_is(exp.Identifier)
self.validate_identity(
+ "ALTER TABLE db_name.schmaName.tblName ADD COLUMN COLUMN_1 VARCHAR NOT NULL TAG (key1='value_1')"
+ )
+ self.validate_identity(
"DROP FUNCTION my_udf (OBJECT(city VARCHAR, zipcode DECIMAL(38, 0), val ARRAY(BOOLEAN)))"
)
self.validate_identity(
@@ -1283,6 +1312,20 @@ WHERE
write={"snowflake": "CREATE TABLE a (b INT)"},
)
+ for action in ("SET", "DROP"):
+ with self.subTest(f"ALTER COLUMN {action} NOT NULL"):
+ self.validate_all(
+ f"""
+ ALTER TABLE a
+ ALTER COLUMN my_column {action} NOT NULL;
+ """,
+ write={
+ "snowflake": f"ALTER TABLE a ALTER COLUMN my_column {action} NOT NULL",
+ "duckdb": f"ALTER TABLE a ALTER COLUMN my_column {action} NOT NULL",
+ "postgres": f"ALTER TABLE a ALTER COLUMN my_column {action} NOT NULL",
+ },
+ )
+
def test_user_defined_functions(self):
self.validate_all(
"CREATE FUNCTION a(x DATE, y BIGINT) RETURNS ARRAY LANGUAGE JAVASCRIPT AS $$ SELECT 1 $$",
@@ -1880,22 +1923,21 @@ STORAGE_ALLOWED_LOCATIONS=('s3://mybucket1/path1/', 's3://mybucket2/path2/')""",
def test_copy(self):
self.validate_identity("COPY INTO test (c1) FROM (SELECT $1.c1 FROM @mystage)")
self.validate_identity(
- """COPY INTO temp FROM @random_stage/path/ FILE_FORMAT = (TYPE = CSV FIELD_DELIMITER = '|' NULL_IF = () FIELD_OPTIONALLY_ENCLOSED_BY = '"' TIMESTAMP_FORMAT = 'TZHTZM YYYY-MM-DD HH24:MI:SS.FF9' DATE_FORMAT = 'TZHTZM YYYY-MM-DD HH24:MI:SS.FF9' BINARY_FORMAT = BASE64) VALIDATION_MODE = 'RETURN_3_ROWS'"""
+ """COPY INTO temp FROM @random_stage/path/ FILE_FORMAT = (TYPE=CSV FIELD_DELIMITER='|' NULL_IF=('str1', 'str2') FIELD_OPTIONALLY_ENCLOSED_BY='"' TIMESTAMP_FORMAT='TZHTZM YYYY-MM-DD HH24:MI:SS.FF9' DATE_FORMAT='TZHTZM YYYY-MM-DD HH24:MI:SS.FF9' BINARY_FORMAT=BASE64) VALIDATION_MODE = 'RETURN_3_ROWS'"""
)
self.validate_identity(
- """COPY INTO load1 FROM @%load1/data1/ FILES = ('test1.csv', 'test2.csv') FORCE = TRUE"""
+ """COPY INTO load1 FROM @%load1/data1/ CREDENTIALS = (AWS_KEY_ID='id' AWS_SECRET_KEY='key' AWS_TOKEN='token') FILES = ('test1.csv', 'test2.csv') FORCE = TRUE"""
)
self.validate_identity(
- """COPY INTO mytable FROM 'azure://myaccount.blob.core.windows.net/mycontainer/data/files' CREDENTIALS = (AZURE_SAS_TOKEN = 'token') ENCRYPTION = (TYPE = 'AZURE_CSE' MASTER_KEY = 'kPx...') FILE_FORMAT = (FORMAT_NAME = my_csv_format)"""
+ """COPY INTO mytable FROM 'azure://myaccount.blob.core.windows.net/mycontainer/data/files' CREDENTIALS = (AZURE_SAS_TOKEN='token') ENCRYPTION = (TYPE='AZURE_CSE' MASTER_KEY='kPx...') FILE_FORMAT = (FORMAT_NAME=my_csv_format)"""
)
self.validate_identity(
- """COPY INTO mytable (col1, col2) FROM 's3://mybucket/data/files' FILES = ('file1', 'file2') PATTERN = 'pattern' FILE_FORMAT = (FORMAT_NAME = my_csv_format NULL_IF = ('str1', 'str2')) PARSE_HEADER = TRUE"""
+ """COPY INTO mytable (col1, col2) FROM 's3://mybucket/data/files' STORAGE_INTEGRATION = "storage" ENCRYPTION = (TYPE='NONE' MASTER_KEY='key') FILES = ('file1', 'file2') PATTERN = 'pattern' FILE_FORMAT = (FORMAT_NAME=my_csv_format NULL_IF=('')) PARSE_HEADER = TRUE"""
)
self.validate_all(
"""COPY INTO 's3://example/data.csv'
FROM EXTRA.EXAMPLE.TABLE
- credentials = (x)
- STORAGE_INTEGRATION = S3_INTEGRATION
+ CREDENTIALS = ()
FILE_FORMAT = (TYPE = CSV COMPRESSION = NONE NULL_IF = ('') FIELD_OPTIONALLY_ENCLOSED_BY = '"')
HEADER = TRUE
OVERWRITE = TRUE
@@ -1904,22 +1946,20 @@ STORAGE_ALLOWED_LOCATIONS=('s3://mybucket1/path1/', 's3://mybucket2/path2/')""",
write={
"": """COPY INTO 's3://example/data.csv'
FROM EXTRA.EXAMPLE.TABLE
-CREDENTIALS = (x) WITH (
- STORAGE_INTEGRATION S3_INTEGRATION,
- FILE_FORMAT = (TYPE = CSV COMPRESSION = NONE NULL_IF = (
+CREDENTIALS = () WITH (
+ FILE_FORMAT = (TYPE=CSV COMPRESSION=NONE NULL_IF=(
''
- ) FIELD_OPTIONALLY_ENCLOSED_BY = '"'),
+ ) FIELD_OPTIONALLY_ENCLOSED_BY='"'),
HEADER TRUE,
OVERWRITE TRUE,
SINGLE TRUE
)""",
"snowflake": """COPY INTO 's3://example/data.csv'
FROM EXTRA.EXAMPLE.TABLE
-CREDENTIALS = (x)
-STORAGE_INTEGRATION = S3_INTEGRATION
-FILE_FORMAT = (TYPE = CSV COMPRESSION = NONE NULL_IF = (
+CREDENTIALS = ()
+FILE_FORMAT = (TYPE=CSV COMPRESSION=NONE NULL_IF=(
''
-) FIELD_OPTIONALLY_ENCLOSED_BY = '"')
+) FIELD_OPTIONALLY_ENCLOSED_BY='"')
HEADER = TRUE
OVERWRITE = TRUE
SINGLE = TRUE""",
@@ -1929,19 +1969,27 @@ SINGLE = TRUE""",
self.validate_all(
"""COPY INTO 's3://example/data.csv'
FROM EXTRA.EXAMPLE.TABLE
- credentials = (x)
STORAGE_INTEGRATION = S3_INTEGRATION
- FILE_FORMAT = (TYPE = CSV COMPRESSION = NONE NULL_IF = ('') FIELD_OPTIONALLY_ENCLOSED_BY = '"')
+ FILE_FORMAT = (TYPE=CSV COMPRESSION=NONE NULL_IF=('') FIELD_OPTIONALLY_ENCLOSED_BY='"')
HEADER = TRUE
OVERWRITE = TRUE
SINGLE = TRUE
""",
write={
- "": """COPY INTO 's3://example/data.csv' FROM EXTRA.EXAMPLE.TABLE CREDENTIALS = (x) WITH (STORAGE_INTEGRATION S3_INTEGRATION, FILE_FORMAT = (TYPE = CSV COMPRESSION = NONE NULL_IF = ('') FIELD_OPTIONALLY_ENCLOSED_BY = '"'), HEADER TRUE, OVERWRITE TRUE, SINGLE TRUE)""",
- "snowflake": """COPY INTO 's3://example/data.csv' FROM EXTRA.EXAMPLE.TABLE CREDENTIALS = (x) STORAGE_INTEGRATION = S3_INTEGRATION FILE_FORMAT = (TYPE = CSV COMPRESSION = NONE NULL_IF = ('') FIELD_OPTIONALLY_ENCLOSED_BY = '"') HEADER = TRUE OVERWRITE = TRUE SINGLE = TRUE""",
+ "": """COPY INTO 's3://example/data.csv' FROM EXTRA.EXAMPLE.TABLE STORAGE_INTEGRATION = S3_INTEGRATION WITH (FILE_FORMAT = (TYPE=CSV COMPRESSION=NONE NULL_IF=('') FIELD_OPTIONALLY_ENCLOSED_BY='"'), HEADER TRUE, OVERWRITE TRUE, SINGLE TRUE)""",
+ "snowflake": """COPY INTO 's3://example/data.csv' FROM EXTRA.EXAMPLE.TABLE STORAGE_INTEGRATION = S3_INTEGRATION FILE_FORMAT = (TYPE=CSV COMPRESSION=NONE NULL_IF=('') FIELD_OPTIONALLY_ENCLOSED_BY='"') HEADER = TRUE OVERWRITE = TRUE SINGLE = TRUE""",
},
)
+ copy_ast = parse_one(
+ """COPY INTO 's3://example/contacts.csv' FROM db.tbl STORAGE_INTEGRATION = PROD_S3_SIDETRADE_INTEGRATION FILE_FORMAT = (FORMAT_NAME=my_csv_format TYPE=CSV COMPRESSION=NONE NULL_IF=('') FIELD_OPTIONALLY_ENCLOSED_BY='"') MATCH_BY_COLUMN_NAME = CASE_SENSITIVE OVERWRITE = TRUE SINGLE = TRUE INCLUDE_METADATA = (col1 = METADATA$START_SCAN_TIME)""",
+ read="snowflake",
+ )
+ self.assertEqual(
+ quote_identifiers(copy_ast, dialect="snowflake").sql(dialect="snowflake"),
+ """COPY INTO 's3://example/contacts.csv' FROM "db"."tbl" STORAGE_INTEGRATION = "PROD_S3_SIDETRADE_INTEGRATION" FILE_FORMAT = (FORMAT_NAME="my_csv_format" TYPE=CSV COMPRESSION=NONE NULL_IF=('') FIELD_OPTIONALLY_ENCLOSED_BY='"') MATCH_BY_COLUMN_NAME = CASE_SENSITIVE OVERWRITE = TRUE SINGLE = TRUE INCLUDE_METADATA = ("col1" = "METADATA$START_SCAN_TIME")""",
+ )
+
def test_querying_semi_structured_data(self):
self.validate_identity("SELECT $1")
self.validate_identity("SELECT $1.elem")
@@ -1958,10 +2006,10 @@ SINGLE = TRUE""",
self.validate_identity("ALTER TABLE table1 SET TAG foo.bar = 'baz'")
self.validate_identity("ALTER TABLE IF EXISTS foo SET TAG a = 'a', b = 'b', c = 'c'")
self.validate_identity(
- """ALTER TABLE tbl SET STAGE_FILE_FORMAT = (TYPE = CSV FIELD_DELIMITER = '|' NULL_IF = () FIELD_OPTIONALLY_ENCLOSED_BY = '"' TIMESTAMP_FORMAT = 'TZHTZM YYYY-MM-DD HH24:MI:SS.FF9' DATE_FORMAT = 'TZHTZM YYYY-MM-DD HH24:MI:SS.FF9' BINARY_FORMAT = BASE64)""",
+ """ALTER TABLE tbl SET STAGE_FILE_FORMAT = (TYPE=CSV FIELD_DELIMITER='|' NULL_IF=('') FIELD_OPTIONALLY_ENCLOSED_BY='"' TIMESTAMP_FORMAT='TZHTZM YYYY-MM-DD HH24:MI:SS.FF9' DATE_FORMAT='TZHTZM YYYY-MM-DD HH24:MI:SS.FF9' BINARY_FORMAT=BASE64)""",
)
self.validate_identity(
- """ALTER TABLE tbl SET STAGE_COPY_OPTIONS = (ON_ERROR = SKIP_FILE SIZE_LIMIT = 5 PURGE = TRUE MATCH_BY_COLUMN_NAME = CASE_SENSITIVE)"""
+ """ALTER TABLE tbl SET STAGE_COPY_OPTIONS = (ON_ERROR=SKIP_FILE SIZE_LIMIT=5 PURGE=TRUE MATCH_BY_COLUMN_NAME=CASE_SENSITIVE)"""
)
self.validate_identity("ALTER TABLE foo UNSET TAG a, b, c")
diff --git a/tests/dialects/test_spark.py b/tests/dialects/test_spark.py
index 069ae42..ecc152f 100644
--- a/tests/dialects/test_spark.py
+++ b/tests/dialects/test_spark.py
@@ -325,7 +325,7 @@ TBLPROPERTIES (
write={
"clickhouse": "WITH tbl AS (SELECT 1 AS id, 'eggy' AS name UNION ALL SELECT NULL AS id, 'jake' AS name) SELECT COUNT(DISTINCT id, name) AS cnt FROM tbl",
"databricks": "WITH tbl AS (SELECT 1 AS id, 'eggy' AS name UNION ALL SELECT NULL AS id, 'jake' AS name) SELECT COUNT(DISTINCT id, name) AS cnt FROM tbl",
- "doris": "WITH tbl AS (SELECT 1 AS id, 'eggy' AS name UNION ALL SELECT NULL AS id, 'jake' AS name) SELECT COUNT(DISTINCT id, name) AS cnt FROM tbl",
+ "doris": "WITH tbl AS (SELECT 1 AS id, 'eggy' AS `name` UNION ALL SELECT NULL AS id, 'jake' AS `name`) SELECT COUNT(DISTINCT id, `name`) AS cnt FROM tbl",
"duckdb": "WITH tbl AS (SELECT 1 AS id, 'eggy' AS name UNION ALL SELECT NULL AS id, 'jake' AS name) SELECT COUNT(DISTINCT CASE WHEN id IS NULL THEN NULL WHEN name IS NULL THEN NULL ELSE (id, name) END) AS cnt FROM tbl",
"hive": "WITH tbl AS (SELECT 1 AS id, 'eggy' AS name UNION ALL SELECT NULL AS id, 'jake' AS name) SELECT COUNT(DISTINCT id, name) AS cnt FROM tbl",
"mysql": "WITH tbl AS (SELECT 1 AS id, 'eggy' AS name UNION ALL SELECT NULL AS id, 'jake' AS name) SELECT COUNT(DISTINCT id, name) AS cnt FROM tbl",
diff --git a/tests/dialects/test_tsql.py b/tests/dialects/test_tsql.py
index 45a4657..92adf7a 100644
--- a/tests/dialects/test_tsql.py
+++ b/tests/dialects/test_tsql.py
@@ -31,7 +31,7 @@ class TestTSQL(Validator):
self.validate_identity("CAST(x AS int) OR y", "CAST(x AS INTEGER) <> 0 OR y <> 0")
self.validate_identity("TRUNCATE TABLE t1 WITH (PARTITIONS(1, 2 TO 5, 10 TO 20, 84))")
self.validate_identity(
- "COPY INTO test_1 FROM 'path' WITH (FILE_TYPE = 'CSV', CREDENTIAL = (IDENTITY = 'Shared Access Signature', SECRET = 'token'), FIELDTERMINATOR = ';', ROWTERMINATOR = '0X0A', ENCODING = 'UTF8', DATEFORMAT = 'ymd', MAXERRORS = 10, ERRORFILE = 'errorsfolder', IDENTITY_INSERT = 'ON')"
+ "COPY INTO test_1 FROM 'path' WITH (FORMAT_NAME = test, FILE_TYPE = 'CSV', CREDENTIAL = (IDENTITY='Shared Access Signature', SECRET='token'), FIELDTERMINATOR = ';', ROWTERMINATOR = '0X0A', ENCODING = 'UTF8', DATEFORMAT = 'ymd', MAXERRORS = 10, ERRORFILE = 'errorsfolder', IDENTITY_INSERT = 'ON')"
)
self.validate_all(
@@ -1093,7 +1093,13 @@ WHERE
self.validate_all("LEN('x')", write={"tsql": "LEN('x')", "spark": "LENGTH('x')"})
def test_replicate(self):
- self.validate_all("REPLICATE('x', 2)", write={"spark": "REPEAT('x', 2)"})
+ self.validate_all(
+ "REPLICATE('x', 2)",
+ write={
+ "spark": "REPEAT('x', 2)",
+ "tsql": "REPLICATE('x', 2)",
+ },
+ )
def test_isnull(self):
self.validate_all("ISNULL(x, y)", write={"spark": "COALESCE(x, y)"})