summaryrefslogtreecommitdiffstats
path: root/tests/dialects
diff options
context:
space:
mode:
Diffstat (limited to 'tests/dialects')
-rw-r--r--tests/dialects/test_bigquery.py4
-rw-r--r--tests/dialects/test_clickhouse.py3
-rw-r--r--tests/dialects/test_databricks.py1
-rw-r--r--tests/dialects/test_dialect.py2
-rw-r--r--tests/dialects/test_duckdb.py17
-rw-r--r--tests/dialects/test_hive.py2
-rw-r--r--tests/dialects/test_mysql.py3
-rw-r--r--tests/dialects/test_postgres.py60
-rw-r--r--tests/dialects/test_presto.py21
-rw-r--r--tests/dialects/test_snowflake.py25
-rw-r--r--tests/dialects/test_tsql.py19
11 files changed, 141 insertions, 16 deletions
diff --git a/tests/dialects/test_bigquery.py b/tests/dialects/test_bigquery.py
index 16a89b8..e3fb04f 100644
--- a/tests/dialects/test_bigquery.py
+++ b/tests/dialects/test_bigquery.py
@@ -612,6 +612,7 @@ LANGUAGE js AS
write={
"bigquery": "SELECT DATETIME_ADD('2023-01-01T00:00:00', INTERVAL 1 MILLISECOND)",
"databricks": "SELECT TIMESTAMPADD(MILLISECOND, 1, '2023-01-01T00:00:00')",
+ "duckdb": "SELECT CAST('2023-01-01T00:00:00' AS DATETIME) + INTERVAL 1 MILLISECOND",
},
),
)
@@ -621,6 +622,7 @@ LANGUAGE js AS
write={
"bigquery": "SELECT DATETIME_SUB('2023-01-01T00:00:00', INTERVAL 1 MILLISECOND)",
"databricks": "SELECT TIMESTAMPADD(MILLISECOND, 1 * -1, '2023-01-01T00:00:00')",
+ "duckdb": "SELECT CAST('2023-01-01T00:00:00' AS DATETIME) - INTERVAL 1 MILLISECOND",
},
),
)
@@ -1016,7 +1018,7 @@ LANGUAGE js AS
write={
"bigquery": "SELECT * FROM UNNEST(['7', '14']) AS x",
"presto": "SELECT * FROM UNNEST(ARRAY['7', '14']) AS _t0(x)",
- "spark": "SELECT * FROM UNNEST(ARRAY('7', '14')) AS _t0(x)",
+ "spark": "SELECT * FROM EXPLODE(ARRAY('7', '14')) AS _t0(x)",
},
)
self.validate_all(
diff --git a/tests/dialects/test_clickhouse.py b/tests/dialects/test_clickhouse.py
index 8f8c7f0..92fbd4b 100644
--- a/tests/dialects/test_clickhouse.py
+++ b/tests/dialects/test_clickhouse.py
@@ -7,6 +7,9 @@ class TestClickhouse(Validator):
dialect = "clickhouse"
def test_clickhouse(self):
+ self.validate_identity("SELECT toFloat(like)")
+ self.validate_identity("SELECT like")
+
string_types = [
"BLOB",
"LONGBLOB",
diff --git a/tests/dialects/test_databricks.py b/tests/dialects/test_databricks.py
index 86612cc..90499fb 100644
--- a/tests/dialects/test_databricks.py
+++ b/tests/dialects/test_databricks.py
@@ -7,6 +7,7 @@ class TestDatabricks(Validator):
dialect = "databricks"
def test_databricks(self):
+ self.validate_identity("ALTER TABLE labels ADD COLUMN label_score FLOAT")
self.validate_identity("DESCRIBE HISTORY a.b")
self.validate_identity("DESCRIBE history.tbl")
self.validate_identity("CREATE TABLE t (a STRUCT<c: MAP<STRING, STRING>>)")
diff --git a/tests/dialects/test_dialect.py b/tests/dialects/test_dialect.py
index e31b114..522c42c 100644
--- a/tests/dialects/test_dialect.py
+++ b/tests/dialects/test_dialect.py
@@ -628,7 +628,7 @@ class TestDialect(Validator):
write={
"duckdb": "EPOCH(STRPTIME('2020-01-01', '%Y-%m-%d'))",
"hive": "UNIX_TIMESTAMP('2020-01-01', 'yyyy-MM-dd')",
- "presto": "TO_UNIXTIME(COALESCE(TRY(DATE_PARSE(CAST('2020-01-01' AS VARCHAR), '%Y-%m-%d')), PARSE_DATETIME(CAST('2020-01-01' AS VARCHAR), 'yyyy-MM-dd')))",
+ "presto": "TO_UNIXTIME(COALESCE(TRY(DATE_PARSE(CAST('2020-01-01' AS VARCHAR), '%Y-%m-%d')), PARSE_DATETIME(DATE_FORMAT(CAST('2020-01-01' AS TIMESTAMP), '%Y-%m-%d'), 'yyyy-MM-dd')))",
"starrocks": "UNIX_TIMESTAMP('2020-01-01', '%Y-%m-%d')",
"doris": "UNIX_TIMESTAMP('2020-01-01', '%Y-%m-%d')",
},
diff --git a/tests/dialects/test_duckdb.py b/tests/dialects/test_duckdb.py
index bcabd20..d092d3b 100644
--- a/tests/dialects/test_duckdb.py
+++ b/tests/dialects/test_duckdb.py
@@ -309,6 +309,14 @@ class TestDuckDB(Validator):
"SELECT (c -> '$.k1') = 'v1'",
)
self.validate_identity(
+ "SELECT JSON_EXTRACT(c, '$[*].id')[0:2]",
+ "SELECT (c -> '$[*].id')[0 : 2]",
+ )
+ self.validate_identity(
+ "SELECT JSON_EXTRACT_STRING(c, '$[*].id')[0:2]",
+ "SELECT (c ->> '$[*].id')[0 : 2]",
+ )
+ self.validate_identity(
"""SELECT '{"foo": [1, 2, 3]}' -> 'foo' -> 0""",
"""SELECT '{"foo": [1, 2, 3]}' -> '$.foo' -> '$[0]'""",
)
@@ -1048,7 +1056,14 @@ class TestDuckDB(Validator):
"CAST([STRUCT_PACK(a := 1)] AS STRUCT(a BIGINT)[])",
"CAST([ROW(1)] AS STRUCT(a BIGINT)[])",
)
-
+ self.validate_identity(
+ "STRUCT_PACK(a := 'b')::json",
+ "CAST({'a': 'b'} AS JSON)",
+ )
+ self.validate_identity(
+ "STRUCT_PACK(a := 'b')::STRUCT(a TEXT)",
+ "CAST(ROW('b') AS STRUCT(a TEXT))",
+ )
self.validate_all(
"CAST(x AS VARCHAR(5))",
write={
diff --git a/tests/dialects/test_hive.py b/tests/dialects/test_hive.py
index 0311336..65ea0e5 100644
--- a/tests/dialects/test_hive.py
+++ b/tests/dialects/test_hive.py
@@ -372,7 +372,7 @@ class TestHive(Validator):
"UNIX_TIMESTAMP(x)",
write={
"duckdb": "EPOCH(STRPTIME(x, '%Y-%m-%d %H:%M:%S'))",
- "presto": "TO_UNIXTIME(COALESCE(TRY(DATE_PARSE(CAST(x AS VARCHAR), '%Y-%m-%d %T')), PARSE_DATETIME(CAST(x AS VARCHAR), 'yyyy-MM-dd HH:mm:ss')))",
+ "presto": "TO_UNIXTIME(COALESCE(TRY(DATE_PARSE(CAST(x AS VARCHAR), '%Y-%m-%d %T')), PARSE_DATETIME(DATE_FORMAT(x, '%Y-%m-%d %T'), 'yyyy-MM-dd HH:mm:ss')))",
"hive": "UNIX_TIMESTAMP(x)",
"spark": "UNIX_TIMESTAMP(x)",
"": "STR_TO_UNIX(x, '%Y-%m-%d %H:%M:%S')",
diff --git a/tests/dialects/test_mysql.py b/tests/dialects/test_mysql.py
index bfdb2a6..65b4c4a 100644
--- a/tests/dialects/test_mysql.py
+++ b/tests/dialects/test_mysql.py
@@ -25,6 +25,9 @@ class TestMySQL(Validator):
self.validate_identity("ALTER TABLE t ADD UNIQUE `i` (`c`)")
self.validate_identity("ALTER TABLE test_table MODIFY COLUMN test_column LONGTEXT")
self.validate_identity(
+ "INSERT INTO things (a, b) VALUES (1, 2) AS new_data ON DUPLICATE KEY UPDATE id = LAST_INSERT_ID(id), a = new_data.a, b = new_data.b"
+ )
+ self.validate_identity(
"CREATE TABLE `oauth_consumer` (`key` VARCHAR(32) NOT NULL, UNIQUE `OAUTH_CONSUMER_KEY` (`key`))"
)
self.validate_identity(
diff --git a/tests/dialects/test_postgres.py b/tests/dialects/test_postgres.py
index 16191c5..21c7e2b 100644
--- a/tests/dialects/test_postgres.py
+++ b/tests/dialects/test_postgres.py
@@ -17,9 +17,6 @@ class TestPostgres(Validator):
)
self.validate_identity("SHA384(x)")
- self.validate_identity(
- 'CREATE TABLE x (a TEXT COLLATE "de_DE")', "CREATE TABLE x (a TEXT COLLATE de_DE)"
- )
self.validate_identity("1.x", "1. AS x")
self.validate_identity("|/ x", "SQRT(x)")
self.validate_identity("||/ x", "CBRT(x)")
@@ -565,24 +562,33 @@ class TestPostgres(Validator):
"postgres": "GENERATE_SERIES(CAST('2019-01-01' AS TIMESTAMP), CURRENT_TIMESTAMP, INTERVAL '1 DAY')",
"presto": "SEQUENCE(CAST('2019-01-01' AS TIMESTAMP), CAST(CURRENT_TIMESTAMP AS TIMESTAMP), INTERVAL '1' DAY)",
"trino": "SEQUENCE(CAST('2019-01-01' AS TIMESTAMP), CAST(CURRENT_TIMESTAMP AS TIMESTAMP), INTERVAL '1' DAY)",
+ "hive": "SEQUENCE(CAST('2019-01-01' AS TIMESTAMP), CAST(CURRENT_TIMESTAMP() AS TIMESTAMP), INTERVAL '1' DAY)",
+ "spark2": "SEQUENCE(CAST('2019-01-01' AS TIMESTAMP), CAST(CURRENT_TIMESTAMP() AS TIMESTAMP), INTERVAL '1' DAY)",
+ "spark": "SEQUENCE(CAST('2019-01-01' AS TIMESTAMP), CAST(CURRENT_TIMESTAMP() AS TIMESTAMP), INTERVAL '1' DAY)",
+ "databricks": "SEQUENCE(CAST('2019-01-01' AS TIMESTAMP), CAST(CURRENT_TIMESTAMP() AS TIMESTAMP), INTERVAL '1' DAY)",
},
)
self.validate_all(
"GENERATE_SERIES(a, b)",
- write={
+ read={
"postgres": "GENERATE_SERIES(a, b)",
"presto": "SEQUENCE(a, b)",
"trino": "SEQUENCE(a, b)",
"tsql": "GENERATE_SERIES(a, b)",
+ "hive": "SEQUENCE(a, b)",
+ "spark2": "SEQUENCE(a, b)",
+ "spark": "SEQUENCE(a, b)",
+ "databricks": "SEQUENCE(a, b)",
},
- )
- self.validate_all(
- "GENERATE_SERIES(a, b)",
- read={
+ write={
"postgres": "GENERATE_SERIES(a, b)",
"presto": "SEQUENCE(a, b)",
"trino": "SEQUENCE(a, b)",
"tsql": "GENERATE_SERIES(a, b)",
+ "hive": "SEQUENCE(a, b)",
+ "spark2": "SEQUENCE(a, b)",
+ "spark": "SEQUENCE(a, b)",
+ "databricks": "SEQUENCE(a, b)",
},
)
self.validate_all(
@@ -759,6 +765,14 @@ class TestPostgres(Validator):
},
)
+ self.validate_all(
+ "SELECT TO_DATE('01/01/2000', 'MM/DD/YYYY')",
+ write={
+ "duckdb": "SELECT CAST(STRPTIME('01/01/2000', '%m/%d/%Y') AS DATE)",
+ "postgres": "SELECT TO_DATE('01/01/2000', 'MM/DD/YYYY')",
+ },
+ )
+
def test_ddl(self):
# Checks that user-defined types are parsed into DataType instead of Identifier
self.parse_one("CREATE TABLE t (a udt)").this.expressions[0].args["kind"].assert_is(
@@ -775,6 +789,8 @@ class TestPostgres(Validator):
cdef.args["kind"].assert_is(exp.DataType)
self.assertEqual(expr.sql(dialect="postgres"), "CREATE TABLE t (x INTERVAL DAY)")
+ self.validate_identity('CREATE TABLE x (a TEXT COLLATE "de_DE")')
+ self.validate_identity('CREATE TABLE x (a TEXT COLLATE pg_catalog."default")')
self.validate_identity("CREATE TABLE t (col INT[3][5])")
self.validate_identity("CREATE TABLE t (col INT[3])")
self.validate_identity("CREATE INDEX IF NOT EXISTS ON t(c)")
@@ -981,6 +997,34 @@ class TestPostgres(Validator):
self.validate_identity("CREATE TABLE tbl (col UUID UNIQUE DEFAULT GEN_RANDOM_UUID())")
self.validate_identity("CREATE TABLE tbl (col UUID, UNIQUE NULLS NOT DISTINCT (col))")
+ self.validate_identity("CREATE INDEX CONCURRENTLY ix_table_id ON tbl USING btree(id)")
+ self.validate_identity(
+ "CREATE INDEX CONCURRENTLY IF NOT EXISTS ix_table_id ON tbl USING btree(id)"
+ )
+
+ self.validate_identity(
+ """
+ CREATE TABLE IF NOT EXISTS public.rental
+ (
+ inventory_id INT NOT NULL,
+ CONSTRAINT rental_customer_id_fkey FOREIGN KEY (customer_id)
+ REFERENCES public.customer (customer_id) MATCH FULL
+ ON UPDATE CASCADE
+ ON DELETE RESTRICT,
+ CONSTRAINT rental_inventory_id_fkey FOREIGN KEY (inventory_id)
+ REFERENCES public.inventory (inventory_id) MATCH PARTIAL
+ ON UPDATE CASCADE
+ ON DELETE RESTRICT,
+ CONSTRAINT rental_staff_id_fkey FOREIGN KEY (staff_id)
+ REFERENCES public.staff (staff_id) MATCH SIMPLE
+ ON UPDATE CASCADE
+ ON DELETE RESTRICT,
+ INITIALLY IMMEDIATE
+ )
+ """,
+ "CREATE TABLE IF NOT EXISTS public.rental (inventory_id INT NOT NULL, CONSTRAINT rental_customer_id_fkey FOREIGN KEY (customer_id) REFERENCES public.customer (customer_id) MATCH FULL ON UPDATE CASCADE ON DELETE RESTRICT, CONSTRAINT rental_inventory_id_fkey FOREIGN KEY (inventory_id) REFERENCES public.inventory (inventory_id) MATCH PARTIAL ON UPDATE CASCADE ON DELETE RESTRICT, CONSTRAINT rental_staff_id_fkey FOREIGN KEY (staff_id) REFERENCES public.staff (staff_id) MATCH SIMPLE ON UPDATE CASCADE ON DELETE RESTRICT, INITIALLY IMMEDIATE)",
+ )
+
with self.assertRaises(ParseError):
transpile("CREATE TABLE products (price DECIMAL CHECK price > 0)", read="postgres")
with self.assertRaises(ParseError):
diff --git a/tests/dialects/test_presto.py b/tests/dialects/test_presto.py
index a56d4a8..f89b8e8 100644
--- a/tests/dialects/test_presto.py
+++ b/tests/dialects/test_presto.py
@@ -413,6 +413,19 @@ class TestPresto(Validator):
},
)
+ self.validate_identity("DATE_ADD('DAY', FLOOR(5), y)")
+ self.validate_identity(
+ """SELECT DATE_ADD('DAY', MOD(5, 2.5), y), DATE_ADD('DAY', CEIL(5.5), y)""",
+ """SELECT DATE_ADD('DAY', CAST(5 % 2.5 AS BIGINT), y), DATE_ADD('DAY', CAST(CEIL(5.5) AS BIGINT), y)""",
+ )
+
+ self.validate_all(
+ "DATE_ADD('MINUTE', CAST(FLOOR(CAST(EXTRACT(MINUTE FROM CURRENT_TIMESTAMP) AS DOUBLE) / NULLIF(30, 0)) * 30 AS BIGINT), col)",
+ read={
+ "spark": "TIMESTAMPADD(MINUTE, FLOOR(EXTRACT(MINUTE FROM CURRENT_TIMESTAMP)/30)*30, col)",
+ },
+ )
+
def test_ddl(self):
self.validate_all(
"CREATE TABLE test WITH (FORMAT = 'PARQUET') AS SELECT 1",
@@ -942,8 +955,8 @@ class TestPresto(Validator):
write={
"bigquery": "SELECT * FROM UNNEST(['7', '14'])",
"presto": "SELECT * FROM UNNEST(ARRAY['7', '14']) AS x",
- "hive": "SELECT * FROM UNNEST(ARRAY('7', '14')) AS x",
- "spark": "SELECT * FROM UNNEST(ARRAY('7', '14')) AS x",
+ "hive": "SELECT * FROM EXPLODE(ARRAY('7', '14')) AS x",
+ "spark": "SELECT * FROM EXPLODE(ARRAY('7', '14')) AS x",
},
)
self.validate_all(
@@ -951,8 +964,8 @@ class TestPresto(Validator):
write={
"bigquery": "SELECT * FROM UNNEST(['7', '14']) AS y",
"presto": "SELECT * FROM UNNEST(ARRAY['7', '14']) AS x(y)",
- "hive": "SELECT * FROM UNNEST(ARRAY('7', '14')) AS x(y)",
- "spark": "SELECT * FROM UNNEST(ARRAY('7', '14')) AS x(y)",
+ "hive": "SELECT * FROM EXPLODE(ARRAY('7', '14')) AS x(y)",
+ "spark": "SELECT * FROM EXPLODE(ARRAY('7', '14')) AS x(y)",
},
)
self.validate_all(
diff --git a/tests/dialects/test_snowflake.py b/tests/dialects/test_snowflake.py
index 88b2148..3686de5 100644
--- a/tests/dialects/test_snowflake.py
+++ b/tests/dialects/test_snowflake.py
@@ -11,6 +11,10 @@ class TestSnowflake(Validator):
dialect = "snowflake"
def test_snowflake(self):
+ self.validate_identity(
+ "SELECT * FROM table AT (TIMESTAMP => '2024-07-24') UNPIVOT(a FOR b IN (c)) AS pivot_table"
+ )
+
self.assertEqual(
# Ensures we don't fail when generating ParseJSON with the `safe` arg set to `True`
self.validate_identity("""SELECT TRY_PARSE_JSON('{"x: 1}')""").sql(),
@@ -827,6 +831,22 @@ WHERE
},
)
+ self.validate_all(
+ "SELECT OBJECT_INSERT(OBJECT_INSERT(OBJECT_INSERT(OBJECT_CONSTRUCT('key5', 'value5'), 'key1', 5), 'key2', 2.2), 'key3', 'value3')",
+ write={
+ "snowflake": "SELECT OBJECT_INSERT(OBJECT_INSERT(OBJECT_INSERT(OBJECT_CONSTRUCT('key5', 'value5'), 'key1', 5), 'key2', 2.2), 'key3', 'value3')",
+ "duckdb": "SELECT STRUCT_INSERT(STRUCT_INSERT(STRUCT_INSERT({'key5': 'value5'}, key1 := 5), key2 := 2.2), key3 := 'value3')",
+ },
+ )
+
+ self.validate_all(
+ "SELECT OBJECT_INSERT(OBJECT_INSERT(OBJECT_INSERT(OBJECT_CONSTRUCT(), 'key1', 5), 'key2', 2.2), 'key3', 'value3')",
+ write={
+ "snowflake": "SELECT OBJECT_INSERT(OBJECT_INSERT(OBJECT_INSERT(OBJECT_CONSTRUCT(), 'key1', 5), 'key2', 2.2), 'key3', 'value3')",
+ "duckdb": "SELECT STRUCT_INSERT(STRUCT_INSERT(STRUCT_PACK(key1 := 5), key2 := 2.2), key3 := 'value3')",
+ },
+ )
+
def test_null_treatment(self):
self.validate_all(
r"SELECT FIRST_VALUE(TABLE1.COLUMN1) OVER (PARTITION BY RANDOM_COLUMN1, RANDOM_COLUMN2 ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) AS MY_ALIAS FROM TABLE1",
@@ -899,6 +919,11 @@ WHERE
"SELECT * FROM @foo/bar (FILE_FORMAT => ds_sandbox.test.my_csv_format, PATTERN => 'test') AS bla",
)
+ self.validate_identity(
+ "SELECT * FROM @test.public.thing/location/somefile.csv( FILE_FORMAT => 'fmt' )",
+ "SELECT * FROM @test.public.thing/location/somefile.csv (FILE_FORMAT => 'fmt')",
+ )
+
def test_sample(self):
self.validate_identity("SELECT * FROM testtable TABLESAMPLE BERNOULLI (20.3)")
self.validate_identity("SELECT * FROM testtable TABLESAMPLE SYSTEM (3) SEED (82)")
diff --git a/tests/dialects/test_tsql.py b/tests/dialects/test_tsql.py
index 11d60e7..9658a02 100644
--- a/tests/dialects/test_tsql.py
+++ b/tests/dialects/test_tsql.py
@@ -391,6 +391,17 @@ class TestTSQL(Validator):
self.validate_identity("HASHBYTES('MD2', 'x')")
self.validate_identity("LOG(n, b)")
+ self.validate_all(
+ "STDEV(x)",
+ read={
+ "": "STDDEV(x)",
+ },
+ write={
+ "": "STDDEV(x)",
+ "tsql": "STDEV(x)",
+ },
+ )
+
def test_option(self):
possible_options = [
"HASH GROUP",
@@ -888,6 +899,14 @@ class TestTSQL(Validator):
},
)
+ for colstore in ("NONCLUSTERED COLUMNSTORE", "CLUSTERED COLUMNSTORE"):
+ self.validate_identity(f"CREATE {colstore} INDEX index_name ON foo.bar")
+
+ self.validate_identity(
+ "CREATE COLUMNSTORE INDEX index_name ON foo.bar",
+ "CREATE NONCLUSTERED COLUMNSTORE INDEX index_name ON foo.bar",
+ )
+
def test_insert_cte(self):
self.validate_all(
"INSERT INTO foo.bar WITH cte AS (SELECT 1 AS one) SELECT * FROM cte",