summaryrefslogtreecommitdiffstats
path: root/tests/dialects/test_dialect.py
diff options
context:
space:
mode:
Diffstat (limited to 'tests/dialects/test_dialect.py')
-rw-r--r--tests/dialects/test_dialect.py133
1 files changed, 115 insertions, 18 deletions
diff --git a/tests/dialects/test_dialect.py b/tests/dialects/test_dialect.py
index 3993565..6b7bfd3 100644
--- a/tests/dialects/test_dialect.py
+++ b/tests/dialects/test_dialect.py
@@ -36,9 +36,7 @@ class Validator(unittest.TestCase):
for read_dialect, read_sql in (read or {}).items():
with self.subTest(f"{read_dialect} -> {sql}"):
self.assertEqual(
- parse_one(read_sql, read_dialect).sql(
- self.dialect, unsupported_level=ErrorLevel.IGNORE
- ),
+ parse_one(read_sql, read_dialect).sql(self.dialect, unsupported_level=ErrorLevel.IGNORE),
sql,
)
@@ -46,9 +44,7 @@ class Validator(unittest.TestCase):
with self.subTest(f"{sql} -> {write_dialect}"):
if write_sql is UnsupportedError:
with self.assertRaises(UnsupportedError):
- expression.sql(
- write_dialect, unsupported_level=ErrorLevel.RAISE
- )
+ expression.sql(write_dialect, unsupported_level=ErrorLevel.RAISE)
else:
self.assertEqual(
expression.sql(
@@ -82,12 +78,20 @@ class TestDialect(Validator):
"oracle": "CAST(a AS CLOB)",
"postgres": "CAST(a AS TEXT)",
"presto": "CAST(a AS VARCHAR)",
+ "redshift": "CAST(a AS TEXT)",
"snowflake": "CAST(a AS TEXT)",
"spark": "CAST(a AS STRING)",
"starrocks": "CAST(a AS STRING)",
},
)
self.validate_all(
+ "CAST(a AS DATETIME)",
+ write={
+ "postgres": "CAST(a AS TIMESTAMP)",
+ "sqlite": "CAST(a AS DATETIME)",
+ },
+ )
+ self.validate_all(
"CAST(a AS STRING)",
write={
"bigquery": "CAST(a AS STRING)",
@@ -97,6 +101,7 @@ class TestDialect(Validator):
"oracle": "CAST(a AS CLOB)",
"postgres": "CAST(a AS TEXT)",
"presto": "CAST(a AS VARCHAR)",
+ "redshift": "CAST(a AS TEXT)",
"snowflake": "CAST(a AS TEXT)",
"spark": "CAST(a AS STRING)",
"starrocks": "CAST(a AS STRING)",
@@ -112,6 +117,7 @@ class TestDialect(Validator):
"oracle": "CAST(a AS VARCHAR2)",
"postgres": "CAST(a AS VARCHAR)",
"presto": "CAST(a AS VARCHAR)",
+ "redshift": "CAST(a AS VARCHAR)",
"snowflake": "CAST(a AS VARCHAR)",
"spark": "CAST(a AS STRING)",
"starrocks": "CAST(a AS VARCHAR)",
@@ -127,6 +133,7 @@ class TestDialect(Validator):
"oracle": "CAST(a AS VARCHAR2(3))",
"postgres": "CAST(a AS VARCHAR(3))",
"presto": "CAST(a AS VARCHAR(3))",
+ "redshift": "CAST(a AS VARCHAR(3))",
"snowflake": "CAST(a AS VARCHAR(3))",
"spark": "CAST(a AS VARCHAR(3))",
"starrocks": "CAST(a AS VARCHAR(3))",
@@ -142,6 +149,7 @@ class TestDialect(Validator):
"oracle": "CAST(a AS NUMBER)",
"postgres": "CAST(a AS SMALLINT)",
"presto": "CAST(a AS SMALLINT)",
+ "redshift": "CAST(a AS SMALLINT)",
"snowflake": "CAST(a AS SMALLINT)",
"spark": "CAST(a AS SHORT)",
"sqlite": "CAST(a AS INTEGER)",
@@ -149,6 +157,19 @@ class TestDialect(Validator):
},
)
self.validate_all(
+ "TRY_CAST(a AS DOUBLE)",
+ read={
+ "postgres": "CAST(a AS DOUBLE PRECISION)",
+ "redshift": "CAST(a AS DOUBLE PRECISION)",
+ },
+ write={
+ "duckdb": "TRY_CAST(a AS DOUBLE)",
+ "postgres": "CAST(a AS DOUBLE PRECISION)",
+ "redshift": "CAST(a AS DOUBLE PRECISION)",
+ },
+ )
+
+ self.validate_all(
"CAST(a AS DOUBLE)",
write={
"bigquery": "CAST(a AS FLOAT64)",
@@ -159,16 +180,32 @@ class TestDialect(Validator):
"oracle": "CAST(a AS DOUBLE PRECISION)",
"postgres": "CAST(a AS DOUBLE PRECISION)",
"presto": "CAST(a AS DOUBLE)",
+ "redshift": "CAST(a AS DOUBLE PRECISION)",
"snowflake": "CAST(a AS DOUBLE)",
"spark": "CAST(a AS DOUBLE)",
"starrocks": "CAST(a AS DOUBLE)",
},
)
self.validate_all(
- "CAST(a AS TIMESTAMP)", write={"starrocks": "CAST(a AS DATETIME)"}
+ "CAST('1 DAY' AS INTERVAL)",
+ write={
+ "postgres": "CAST('1 DAY' AS INTERVAL)",
+ "redshift": "CAST('1 DAY' AS INTERVAL)",
+ },
)
self.validate_all(
- "CAST(a AS TIMESTAMPTZ)", write={"starrocks": "CAST(a AS DATETIME)"}
+ "CAST(a AS TIMESTAMP)",
+ write={
+ "starrocks": "CAST(a AS DATETIME)",
+ "redshift": "CAST(a AS TIMESTAMP)",
+ },
+ )
+ self.validate_all(
+ "CAST(a AS TIMESTAMPTZ)",
+ write={
+ "starrocks": "CAST(a AS DATETIME)",
+ "redshift": "CAST(a AS TIMESTAMPTZ)",
+ },
)
self.validate_all("CAST(a AS TINYINT)", write={"oracle": "CAST(a AS NUMBER)"})
self.validate_all("CAST(a AS SMALLINT)", write={"oracle": "CAST(a AS NUMBER)"})
@@ -552,6 +589,7 @@ class TestDialect(Validator):
write={
"bigquery": "SELECT fname, lname, age FROM person ORDER BY age DESC NULLS FIRST, fname NULLS LAST, lname",
"duckdb": "SELECT fname, lname, age FROM person ORDER BY age DESC NULLS FIRST, fname NULLS LAST, lname",
+ "oracle": "SELECT fname, lname, age FROM person ORDER BY age DESC NULLS FIRST, fname NULLS LAST, lname",
"presto": "SELECT fname, lname, age FROM person ORDER BY age DESC NULLS FIRST, fname, lname NULLS FIRST",
"hive": "SELECT fname, lname, age FROM person ORDER BY age DESC NULLS FIRST, fname NULLS LAST, lname",
"spark": "SELECT fname, lname, age FROM person ORDER BY age DESC NULLS FIRST, fname NULLS LAST, lname",
@@ -566,6 +604,7 @@ class TestDialect(Validator):
"presto": "JSON_EXTRACT(x, 'y')",
},
write={
+ "oracle": "JSON_EXTRACT(x, 'y')",
"postgres": "x->'y'",
"presto": "JSON_EXTRACT(x, 'y')",
},
@@ -623,6 +662,37 @@ class TestDialect(Validator):
},
)
+ # https://dev.mysql.com/doc/refman/8.0/en/join.html
+ # https://www.postgresql.org/docs/current/queries-table-expressions.html
+ def test_joined_tables(self):
+ self.validate_identity("SELECT * FROM (tbl1 LEFT JOIN tbl2 ON 1 = 1)")
+ self.validate_identity("SELECT * FROM (tbl1 JOIN tbl2 JOIN tbl3)")
+ self.validate_identity("SELECT * FROM (tbl1 JOIN (tbl2 JOIN tbl3) ON bla = foo)")
+ self.validate_identity("SELECT * FROM (tbl1 JOIN LATERAL (SELECT * FROM bla) AS tbl)")
+
+ self.validate_all(
+ "SELECT * FROM (tbl1 LEFT JOIN tbl2 ON 1 = 1)",
+ write={
+ "postgres": "SELECT * FROM (tbl1 LEFT JOIN tbl2 ON 1 = 1)",
+ "mysql": "SELECT * FROM (tbl1 LEFT JOIN tbl2 ON 1 = 1)",
+ },
+ )
+ self.validate_all(
+ "SELECT * FROM (tbl1 JOIN LATERAL (SELECT * FROM bla) AS tbl)",
+ write={
+ "postgres": "SELECT * FROM (tbl1 JOIN LATERAL (SELECT * FROM bla) AS tbl)",
+ "mysql": "SELECT * FROM (tbl1 JOIN LATERAL (SELECT * FROM bla) AS tbl)",
+ },
+ )
+
+ def test_lateral_subquery(self):
+ self.validate_identity(
+ "SELECT art FROM tbl1 INNER JOIN LATERAL (SELECT art FROM tbl2) AS tbl2 ON tbl1.art = tbl2.art"
+ )
+ self.validate_identity(
+ "SELECT * FROM tbl AS t LEFT JOIN LATERAL (SELECT * FROM b WHERE b.t_id = t.t_id) AS t ON TRUE"
+ )
+
def test_set_operators(self):
self.validate_all(
"SELECT * FROM a UNION SELECT * FROM b",
@@ -731,6 +801,9 @@ class TestDialect(Validator):
)
def test_operators(self):
+ self.validate_identity("some.column LIKE 'foo' || another.column || 'bar' || LOWER(x)")
+ self.validate_identity("some.column LIKE 'foo' + another.column + 'bar'")
+
self.validate_all(
"x ILIKE '%y'",
read={
@@ -874,16 +947,8 @@ class TestDialect(Validator):
"spark": "FILTER(the_array, x -> x > 0)",
},
)
- self.validate_all(
- "SELECT a AS b FROM x GROUP BY b",
- write={
- "duckdb": "SELECT a AS b FROM x GROUP BY b",
- "presto": "SELECT a AS b FROM x GROUP BY 1",
- "hive": "SELECT a AS b FROM x GROUP BY 1",
- "oracle": "SELECT a AS b FROM x GROUP BY 1",
- "spark": "SELECT a AS b FROM x GROUP BY 1",
- },
- )
+
+ def test_limit(self):
self.validate_all(
"SELECT x FROM y LIMIT 10",
write={
@@ -915,6 +980,7 @@ class TestDialect(Validator):
read={
"clickhouse": '`x` + "y"',
"sqlite": '`x` + "y"',
+ "redshift": '"x" + "y"',
},
)
self.validate_all(
@@ -977,5 +1043,36 @@ class TestDialect(Validator):
"oracle": "CREATE TABLE t (b1 BLOB, b2 BLOB(1024), c1 CLOB, c2 CLOB(1024))",
"postgres": "CREATE TABLE t (b1 BYTEA, b2 BYTEA(1024), c1 TEXT, c2 TEXT(1024))",
"sqlite": "CREATE TABLE t (b1 BLOB, b2 BLOB(1024), c1 TEXT, c2 TEXT(1024))",
+ "redshift": "CREATE TABLE t (b1 VARBYTE, b2 VARBYTE(1024), c1 TEXT, c2 TEXT(1024))",
+ },
+ )
+
+ def test_alias(self):
+ self.validate_all(
+ "SELECT a AS b FROM x GROUP BY b",
+ write={
+ "duckdb": "SELECT a AS b FROM x GROUP BY b",
+ "presto": "SELECT a AS b FROM x GROUP BY 1",
+ "hive": "SELECT a AS b FROM x GROUP BY 1",
+ "oracle": "SELECT a AS b FROM x GROUP BY 1",
+ "spark": "SELECT a AS b FROM x GROUP BY 1",
+ },
+ )
+ self.validate_all(
+ "SELECT y x FROM my_table t",
+ write={
+ "hive": "SELECT y AS x FROM my_table AS t",
+ "oracle": "SELECT y AS x FROM my_table t",
+ "postgres": "SELECT y AS x FROM my_table AS t",
+ "sqlite": "SELECT y AS x FROM my_table AS t",
+ },
+ )
+ self.validate_all(
+ "WITH cte1 AS (SELECT a, b FROM table1), cte2 AS (SELECT c, e AS d FROM table2) SELECT b, d AS dd FROM cte1 AS t JOIN cte2 WHERE cte1.a = cte2.c",
+ write={
+ "hive": "WITH cte1 AS (SELECT a, b FROM table1), cte2 AS (SELECT c, e AS d FROM table2) SELECT b, d AS dd FROM cte1 AS t JOIN cte2 WHERE cte1.a = cte2.c",
+ "oracle": "WITH cte1 AS (SELECT a, b FROM table1), cte2 AS (SELECT c, e AS d FROM table2) SELECT b, d AS dd FROM cte1 t JOIN cte2 WHERE cte1.a = cte2.c",
+ "postgres": "WITH cte1 AS (SELECT a, b FROM table1), cte2 AS (SELECT c, e AS d FROM table2) SELECT b, d AS dd FROM cte1 AS t JOIN cte2 WHERE cte1.a = cte2.c",
+ "sqlite": "WITH cte1 AS (SELECT a, b FROM table1), cte2 AS (SELECT c, e AS d FROM table2) SELECT b, d AS dd FROM cte1 AS t JOIN cte2 WHERE cte1.a = cte2.c",
},
)