From d6dd6db5060d5d7a4abbc132cb1240973e6f9487 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Thu, 22 Sep 2022 06:30:57 +0200 Subject: Adding upstream version 6.1.1. Signed-off-by: Daniel Baumann --- tests/dialects/test_dialect.py | 133 +++++++++++++++++++++++++++++++++------ tests/dialects/test_hive.py | 15 +++++ tests/dialects/test_mysql.py | 52 +++++++++++++-- tests/dialects/test_postgres.py | 93 +++++++++++++++++++++++---- tests/dialects/test_redshift.py | 64 +++++++++++++++++++ tests/dialects/test_snowflake.py | 32 ++++++++++ tests/dialects/test_sqlite.py | 18 ++++++ 7 files changed, 373 insertions(+), 34 deletions(-) create mode 100644 tests/dialects/test_redshift.py (limited to 'tests/dialects') diff --git a/tests/dialects/test_dialect.py b/tests/dialects/test_dialect.py index 3993565..6b7bfd3 100644 --- a/tests/dialects/test_dialect.py +++ b/tests/dialects/test_dialect.py @@ -36,9 +36,7 @@ class Validator(unittest.TestCase): for read_dialect, read_sql in (read or {}).items(): with self.subTest(f"{read_dialect} -> {sql}"): self.assertEqual( - parse_one(read_sql, read_dialect).sql( - self.dialect, unsupported_level=ErrorLevel.IGNORE - ), + parse_one(read_sql, read_dialect).sql(self.dialect, unsupported_level=ErrorLevel.IGNORE), sql, ) @@ -46,9 +44,7 @@ class Validator(unittest.TestCase): with self.subTest(f"{sql} -> {write_dialect}"): if write_sql is UnsupportedError: with self.assertRaises(UnsupportedError): - expression.sql( - write_dialect, unsupported_level=ErrorLevel.RAISE - ) + expression.sql(write_dialect, unsupported_level=ErrorLevel.RAISE) else: self.assertEqual( expression.sql( @@ -82,11 +78,19 @@ class TestDialect(Validator): "oracle": "CAST(a AS CLOB)", "postgres": "CAST(a AS TEXT)", "presto": "CAST(a AS VARCHAR)", + "redshift": "CAST(a AS TEXT)", "snowflake": "CAST(a AS TEXT)", "spark": "CAST(a AS STRING)", "starrocks": "CAST(a AS STRING)", }, ) + self.validate_all( + "CAST(a AS DATETIME)", + write={ + "postgres": "CAST(a AS TIMESTAMP)", + "sqlite": "CAST(a AS DATETIME)", + }, + ) self.validate_all( "CAST(a AS STRING)", write={ @@ -97,6 +101,7 @@ class TestDialect(Validator): "oracle": "CAST(a AS CLOB)", "postgres": "CAST(a AS TEXT)", "presto": "CAST(a AS VARCHAR)", + "redshift": "CAST(a AS TEXT)", "snowflake": "CAST(a AS TEXT)", "spark": "CAST(a AS STRING)", "starrocks": "CAST(a AS STRING)", @@ -112,6 +117,7 @@ class TestDialect(Validator): "oracle": "CAST(a AS VARCHAR2)", "postgres": "CAST(a AS VARCHAR)", "presto": "CAST(a AS VARCHAR)", + "redshift": "CAST(a AS VARCHAR)", "snowflake": "CAST(a AS VARCHAR)", "spark": "CAST(a AS STRING)", "starrocks": "CAST(a AS VARCHAR)", @@ -127,6 +133,7 @@ class TestDialect(Validator): "oracle": "CAST(a AS VARCHAR2(3))", "postgres": "CAST(a AS VARCHAR(3))", "presto": "CAST(a AS VARCHAR(3))", + "redshift": "CAST(a AS VARCHAR(3))", "snowflake": "CAST(a AS VARCHAR(3))", "spark": "CAST(a AS VARCHAR(3))", "starrocks": "CAST(a AS VARCHAR(3))", @@ -142,12 +149,26 @@ class TestDialect(Validator): "oracle": "CAST(a AS NUMBER)", "postgres": "CAST(a AS SMALLINT)", "presto": "CAST(a AS SMALLINT)", + "redshift": "CAST(a AS SMALLINT)", "snowflake": "CAST(a AS SMALLINT)", "spark": "CAST(a AS SHORT)", "sqlite": "CAST(a AS INTEGER)", "starrocks": "CAST(a AS SMALLINT)", }, ) + self.validate_all( + "TRY_CAST(a AS DOUBLE)", + read={ + "postgres": "CAST(a AS DOUBLE PRECISION)", + "redshift": "CAST(a AS DOUBLE PRECISION)", + }, + write={ + "duckdb": "TRY_CAST(a AS DOUBLE)", + "postgres": "CAST(a AS DOUBLE PRECISION)", + "redshift": "CAST(a AS DOUBLE PRECISION)", + }, + ) + self.validate_all( "CAST(a AS DOUBLE)", write={ @@ -159,16 +180,32 @@ class TestDialect(Validator): "oracle": "CAST(a AS DOUBLE PRECISION)", "postgres": "CAST(a AS DOUBLE PRECISION)", "presto": "CAST(a AS DOUBLE)", + "redshift": "CAST(a AS DOUBLE PRECISION)", "snowflake": "CAST(a AS DOUBLE)", "spark": "CAST(a AS DOUBLE)", "starrocks": "CAST(a AS DOUBLE)", }, ) self.validate_all( - "CAST(a AS TIMESTAMP)", write={"starrocks": "CAST(a AS DATETIME)"} + "CAST('1 DAY' AS INTERVAL)", + write={ + "postgres": "CAST('1 DAY' AS INTERVAL)", + "redshift": "CAST('1 DAY' AS INTERVAL)", + }, ) self.validate_all( - "CAST(a AS TIMESTAMPTZ)", write={"starrocks": "CAST(a AS DATETIME)"} + "CAST(a AS TIMESTAMP)", + write={ + "starrocks": "CAST(a AS DATETIME)", + "redshift": "CAST(a AS TIMESTAMP)", + }, + ) + self.validate_all( + "CAST(a AS TIMESTAMPTZ)", + write={ + "starrocks": "CAST(a AS DATETIME)", + "redshift": "CAST(a AS TIMESTAMPTZ)", + }, ) self.validate_all("CAST(a AS TINYINT)", write={"oracle": "CAST(a AS NUMBER)"}) self.validate_all("CAST(a AS SMALLINT)", write={"oracle": "CAST(a AS NUMBER)"}) @@ -552,6 +589,7 @@ class TestDialect(Validator): write={ "bigquery": "SELECT fname, lname, age FROM person ORDER BY age DESC NULLS FIRST, fname NULLS LAST, lname", "duckdb": "SELECT fname, lname, age FROM person ORDER BY age DESC NULLS FIRST, fname NULLS LAST, lname", + "oracle": "SELECT fname, lname, age FROM person ORDER BY age DESC NULLS FIRST, fname NULLS LAST, lname", "presto": "SELECT fname, lname, age FROM person ORDER BY age DESC NULLS FIRST, fname, lname NULLS FIRST", "hive": "SELECT fname, lname, age FROM person ORDER BY age DESC NULLS FIRST, fname NULLS LAST, lname", "spark": "SELECT fname, lname, age FROM person ORDER BY age DESC NULLS FIRST, fname NULLS LAST, lname", @@ -566,6 +604,7 @@ class TestDialect(Validator): "presto": "JSON_EXTRACT(x, 'y')", }, write={ + "oracle": "JSON_EXTRACT(x, 'y')", "postgres": "x->'y'", "presto": "JSON_EXTRACT(x, 'y')", }, @@ -623,6 +662,37 @@ class TestDialect(Validator): }, ) + # https://dev.mysql.com/doc/refman/8.0/en/join.html + # https://www.postgresql.org/docs/current/queries-table-expressions.html + def test_joined_tables(self): + self.validate_identity("SELECT * FROM (tbl1 LEFT JOIN tbl2 ON 1 = 1)") + self.validate_identity("SELECT * FROM (tbl1 JOIN tbl2 JOIN tbl3)") + self.validate_identity("SELECT * FROM (tbl1 JOIN (tbl2 JOIN tbl3) ON bla = foo)") + self.validate_identity("SELECT * FROM (tbl1 JOIN LATERAL (SELECT * FROM bla) AS tbl)") + + self.validate_all( + "SELECT * FROM (tbl1 LEFT JOIN tbl2 ON 1 = 1)", + write={ + "postgres": "SELECT * FROM (tbl1 LEFT JOIN tbl2 ON 1 = 1)", + "mysql": "SELECT * FROM (tbl1 LEFT JOIN tbl2 ON 1 = 1)", + }, + ) + self.validate_all( + "SELECT * FROM (tbl1 JOIN LATERAL (SELECT * FROM bla) AS tbl)", + write={ + "postgres": "SELECT * FROM (tbl1 JOIN LATERAL (SELECT * FROM bla) AS tbl)", + "mysql": "SELECT * FROM (tbl1 JOIN LATERAL (SELECT * FROM bla) AS tbl)", + }, + ) + + def test_lateral_subquery(self): + self.validate_identity( + "SELECT art FROM tbl1 INNER JOIN LATERAL (SELECT art FROM tbl2) AS tbl2 ON tbl1.art = tbl2.art" + ) + self.validate_identity( + "SELECT * FROM tbl AS t LEFT JOIN LATERAL (SELECT * FROM b WHERE b.t_id = t.t_id) AS t ON TRUE" + ) + def test_set_operators(self): self.validate_all( "SELECT * FROM a UNION SELECT * FROM b", @@ -731,6 +801,9 @@ class TestDialect(Validator): ) def test_operators(self): + self.validate_identity("some.column LIKE 'foo' || another.column || 'bar' || LOWER(x)") + self.validate_identity("some.column LIKE 'foo' + another.column + 'bar'") + self.validate_all( "x ILIKE '%y'", read={ @@ -874,16 +947,8 @@ class TestDialect(Validator): "spark": "FILTER(the_array, x -> x > 0)", }, ) - self.validate_all( - "SELECT a AS b FROM x GROUP BY b", - write={ - "duckdb": "SELECT a AS b FROM x GROUP BY b", - "presto": "SELECT a AS b FROM x GROUP BY 1", - "hive": "SELECT a AS b FROM x GROUP BY 1", - "oracle": "SELECT a AS b FROM x GROUP BY 1", - "spark": "SELECT a AS b FROM x GROUP BY 1", - }, - ) + + def test_limit(self): self.validate_all( "SELECT x FROM y LIMIT 10", write={ @@ -915,6 +980,7 @@ class TestDialect(Validator): read={ "clickhouse": '`x` + "y"', "sqlite": '`x` + "y"', + "redshift": '"x" + "y"', }, ) self.validate_all( @@ -977,5 +1043,36 @@ class TestDialect(Validator): "oracle": "CREATE TABLE t (b1 BLOB, b2 BLOB(1024), c1 CLOB, c2 CLOB(1024))", "postgres": "CREATE TABLE t (b1 BYTEA, b2 BYTEA(1024), c1 TEXT, c2 TEXT(1024))", "sqlite": "CREATE TABLE t (b1 BLOB, b2 BLOB(1024), c1 TEXT, c2 TEXT(1024))", + "redshift": "CREATE TABLE t (b1 VARBYTE, b2 VARBYTE(1024), c1 TEXT, c2 TEXT(1024))", + }, + ) + + def test_alias(self): + self.validate_all( + "SELECT a AS b FROM x GROUP BY b", + write={ + "duckdb": "SELECT a AS b FROM x GROUP BY b", + "presto": "SELECT a AS b FROM x GROUP BY 1", + "hive": "SELECT a AS b FROM x GROUP BY 1", + "oracle": "SELECT a AS b FROM x GROUP BY 1", + "spark": "SELECT a AS b FROM x GROUP BY 1", + }, + ) + self.validate_all( + "SELECT y x FROM my_table t", + write={ + "hive": "SELECT y AS x FROM my_table AS t", + "oracle": "SELECT y AS x FROM my_table t", + "postgres": "SELECT y AS x FROM my_table AS t", + "sqlite": "SELECT y AS x FROM my_table AS t", + }, + ) + self.validate_all( + "WITH cte1 AS (SELECT a, b FROM table1), cte2 AS (SELECT c, e AS d FROM table2) SELECT b, d AS dd FROM cte1 AS t JOIN cte2 WHERE cte1.a = cte2.c", + write={ + "hive": "WITH cte1 AS (SELECT a, b FROM table1), cte2 AS (SELECT c, e AS d FROM table2) SELECT b, d AS dd FROM cte1 AS t JOIN cte2 WHERE cte1.a = cte2.c", + "oracle": "WITH cte1 AS (SELECT a, b FROM table1), cte2 AS (SELECT c, e AS d FROM table2) SELECT b, d AS dd FROM cte1 t JOIN cte2 WHERE cte1.a = cte2.c", + "postgres": "WITH cte1 AS (SELECT a, b FROM table1), cte2 AS (SELECT c, e AS d FROM table2) SELECT b, d AS dd FROM cte1 AS t JOIN cte2 WHERE cte1.a = cte2.c", + "sqlite": "WITH cte1 AS (SELECT a, b FROM table1), cte2 AS (SELECT c, e AS d FROM table2) SELECT b, d AS dd FROM cte1 AS t JOIN cte2 WHERE cte1.a = cte2.c", }, ) diff --git a/tests/dialects/test_hive.py b/tests/dialects/test_hive.py index eccd75a..55086e3 100644 --- a/tests/dialects/test_hive.py +++ b/tests/dialects/test_hive.py @@ -341,6 +341,21 @@ class TestHive(Validator): "spark": "PERCENTILE(x, 0.5)", }, ) + self.validate_all( + "PERCENTILE_APPROX(x, 0.5)", + read={ + "hive": "PERCENTILE_APPROX(x, 0.5)", + "presto": "APPROX_PERCENTILE(x, 0.5)", + "duckdb": "APPROX_QUANTILE(x, 0.5)", + "spark": "PERCENTILE_APPROX(x, 0.5)", + }, + write={ + "hive": "PERCENTILE_APPROX(x, 0.5)", + "presto": "APPROX_PERCENTILE(x, 0.5)", + "duckdb": "APPROX_QUANTILE(x, 0.5)", + "spark": "PERCENTILE_APPROX(x, 0.5)", + }, + ) self.validate_all( "APPROX_COUNT_DISTINCT(a)", write={ diff --git a/tests/dialects/test_mysql.py b/tests/dialects/test_mysql.py index ee0c5f5..87a3d64 100644 --- a/tests/dialects/test_mysql.py +++ b/tests/dialects/test_mysql.py @@ -15,6 +15,10 @@ class TestMySQL(Validator): def test_identity(self): self.validate_identity("SELECT CAST(`a`.`b` AS INT) FROM foo") + self.validate_identity("SELECT TRIM(LEADING 'bla' FROM ' XXX ')") + self.validate_identity("SELECT TRIM(TRAILING 'bla' FROM ' XXX ')") + self.validate_identity("SELECT TRIM(BOTH 'bla' FROM ' XXX ')") + self.validate_identity("SELECT TRIM('bla' FROM ' XXX ')") def test_introducers(self): self.validate_all( @@ -27,12 +31,22 @@ class TestMySQL(Validator): }, ) - def test_binary_literal(self): + def test_hexadecimal_literal(self): self.validate_all( "SELECT 0xCC", write={ - "mysql": "SELECT b'11001100'", - "spark": "SELECT X'11001100'", + "mysql": "SELECT x'CC'", + "sqlite": "SELECT x'CC'", + "spark": "SELECT X'CC'", + "trino": "SELECT X'CC'", + "bigquery": "SELECT 0xCC", + "oracle": "SELECT 204", + }, + ) + self.validate_all( + "SELECT X'1A'", + write={ + "mysql": "SELECT x'1A'", }, ) self.validate_all( @@ -41,10 +55,22 @@ class TestMySQL(Validator): "mysql": "SELECT `0xz`", }, ) + + def test_bits_literal(self): + self.validate_all( + "SELECT 0b1011", + write={ + "mysql": "SELECT b'1011'", + "postgres": "SELECT b'1011'", + "oracle": "SELECT 11", + }, + ) self.validate_all( - "SELECT 0XCC", + "SELECT B'1011'", write={ - "mysql": "SELECT 0 AS XCC", + "mysql": "SELECT b'1011'", + "postgres": "SELECT b'1011'", + "oracle": "SELECT 11", }, ) @@ -77,3 +103,19 @@ class TestMySQL(Validator): "mysql": "SELECT 1", }, ) + + def test_mysql(self): + self.validate_all( + "GROUP_CONCAT(DISTINCT x ORDER BY y DESC)", + write={ + "mysql": "GROUP_CONCAT(DISTINCT x ORDER BY y DESC SEPARATOR ',')", + "sqlite": "GROUP_CONCAT(DISTINCT x ORDER BY y DESC)", + }, + ) + self.validate_all( + "GROUP_CONCAT(DISTINCT x ORDER BY y DESC SEPARATOR '')", + write={ + "mysql": "GROUP_CONCAT(DISTINCT x ORDER BY y DESC SEPARATOR '')", + "sqlite": "GROUP_CONCAT(DISTINCT x ORDER BY y DESC, '')", + }, + ) diff --git a/tests/dialects/test_postgres.py b/tests/dialects/test_postgres.py index 15dbfd0..e0934d7 100644 --- a/tests/dialects/test_postgres.py +++ b/tests/dialects/test_postgres.py @@ -8,9 +8,7 @@ class TestPostgres(Validator): def test_ddl(self): self.validate_all( "CREATE TABLE products (product_no INT UNIQUE, name TEXT, price DECIMAL)", - write={ - "postgres": "CREATE TABLE products (product_no INT UNIQUE, name TEXT, price DECIMAL)" - }, + write={"postgres": "CREATE TABLE products (product_no INT UNIQUE, name TEXT, price DECIMAL)"}, ) self.validate_all( "CREATE TABLE products (product_no INT CONSTRAINT must_be_different UNIQUE, name TEXT CONSTRAINT present NOT NULL, price DECIMAL)", @@ -42,11 +40,17 @@ class TestPostgres(Validator): " CONSTRAINT valid_discount CHECK (price > discounted_price))" }, ) + self.validate_all( + "CREATE TABLE products (x INT GENERATED BY DEFAULT AS IDENTITY)", + write={"postgres": "CREATE TABLE products (x INT GENERATED BY DEFAULT AS IDENTITY)"}, + ) + self.validate_all( + "CREATE TABLE products (x INT GENERATED ALWAYS AS IDENTITY)", + write={"postgres": "CREATE TABLE products (x INT GENERATED ALWAYS AS IDENTITY)"}, + ) with self.assertRaises(ParseError): - transpile( - "CREATE TABLE products (price DECIMAL CHECK price > 0)", read="postgres" - ) + transpile("CREATE TABLE products (price DECIMAL CHECK price > 0)", read="postgres") with self.assertRaises(ParseError): transpile( "CREATE TABLE products (price DECIMAL, CHECK price > 1)", @@ -54,11 +58,16 @@ class TestPostgres(Validator): ) def test_postgres(self): - self.validate_all( - "CREATE TABLE x (a INT SERIAL)", - read={"sqlite": "CREATE TABLE x (a INTEGER AUTOINCREMENT)"}, - write={"sqlite": "CREATE TABLE x (a INTEGER AUTOINCREMENT)"}, - ) + self.validate_identity("SELECT CASE WHEN SUBSTRING('abcdefg') IN ('ab') THEN 1 ELSE 0 END") + self.validate_identity("SELECT CASE WHEN SUBSTRING('abcdefg' FROM 1) IN ('ab') THEN 1 ELSE 0 END") + self.validate_identity("SELECT CASE WHEN SUBSTRING('abcdefg' FROM 1 FOR 2) IN ('ab') THEN 1 ELSE 0 END") + self.validate_identity('SELECT * FROM "x" WHERE SUBSTRING("x"."foo" FROM 1 FOR 2) IN (\'mas\')') + self.validate_identity("SELECT * FROM x WHERE SUBSTRING('Thomas' FROM '...$') IN ('mas')") + self.validate_identity("SELECT * FROM x WHERE SUBSTRING('Thomas' FROM '%#\"o_a#\"_' FOR '#') IN ('mas')") + self.validate_identity("SELECT SUBSTRING('bla' + 'foo' || 'bar' FROM 3 - 1 + 5 FOR 4 + SOME_FUNC(arg1, arg2))") + self.validate_identity("SELECT TRIM(' X' FROM ' XXX ')") + self.validate_identity("SELECT TRIM(LEADING 'bla' FROM ' XXX ' COLLATE utf8_bin)") + self.validate_all( "CREATE TABLE x (a UUID, b BYTEA)", write={ @@ -91,3 +100,65 @@ class TestPostgres(Validator): "spark": "SELECT fname, lname, age FROM person ORDER BY age DESC NULLS FIRST, fname NULLS LAST, lname NULLS LAST", }, ) + self.validate_all( + "SELECT CASE WHEN SUBSTRING('abcdefg' FROM 1 FOR 2) IN ('ab') THEN 1 ELSE 0 END", + write={ + "hive": "SELECT CASE WHEN SUBSTRING('abcdefg', 1, 2) IN ('ab') THEN 1 ELSE 0 END", + "spark": "SELECT CASE WHEN SUBSTRING('abcdefg', 1, 2) IN ('ab') THEN 1 ELSE 0 END", + }, + ) + self.validate_all( + "SELECT * FROM x WHERE SUBSTRING(col1 FROM 3 + LENGTH(col1) - 10 FOR 10) IN (col2)", + write={ + "hive": "SELECT * FROM x WHERE SUBSTRING(col1, 3 + LENGTH(col1) - 10, 10) IN (col2)", + "spark": "SELECT * FROM x WHERE SUBSTRING(col1, 3 + LENGTH(col1) - 10, 10) IN (col2)", + }, + ) + self.validate_all( + "SELECT SUBSTRING(CAST(2022 AS CHAR(4)) || LPAD(CAST(3 AS CHAR(2)), 2, '0') FROM 3 FOR 4)", + read={ + "postgres": "SELECT SUBSTRING(2022::CHAR(4) || LPAD(3::CHAR(2), 2, '0') FROM 3 FOR 4)", + }, + ) + self.validate_all( + "SELECT TRIM(BOTH ' XXX ')", + write={ + "mysql": "SELECT TRIM(' XXX ')", + "postgres": "SELECT TRIM(' XXX ')", + "hive": "SELECT TRIM(' XXX ')", + }, + ) + self.validate_all( + "TRIM(LEADING FROM ' XXX ')", + write={ + "mysql": "LTRIM(' XXX ')", + "postgres": "LTRIM(' XXX ')", + "hive": "LTRIM(' XXX ')", + "presto": "LTRIM(' XXX ')", + }, + ) + self.validate_all( + "TRIM(TRAILING FROM ' XXX ')", + write={ + "mysql": "RTRIM(' XXX ')", + "postgres": "RTRIM(' XXX ')", + "hive": "RTRIM(' XXX ')", + "presto": "RTRIM(' XXX ')", + }, + ) + self.validate_all( + "SELECT * FROM foo, LATERAL (SELECT * FROM bar WHERE bar.id = foo.bar_id) AS ss", + read={"postgres": "SELECT * FROM foo, LATERAL (SELECT * FROM bar WHERE bar.id = foo.bar_id) AS ss"}, + ) + self.validate_all( + "SELECT m.name FROM manufacturers AS m LEFT JOIN LATERAL GET_PRODUCT_NAMES(m.id) AS pname ON TRUE WHERE pname IS NULL", + read={ + "postgres": "SELECT m.name FROM manufacturers AS m LEFT JOIN LATERAL GET_PRODUCT_NAMES(m.id) AS pname ON TRUE WHERE pname IS NULL", + }, + ) + self.validate_all( + "SELECT p1.id, p2.id, v1, v2 FROM polygons AS p1, polygons AS p2, LATERAL VERTICES(p1.poly) v1, LATERAL VERTICES(p2.poly) v2 WHERE (v1 <-> v2) < 10 AND p1.id <> p2.id", + read={ + "postgres": "SELECT p1.id, p2.id, v1, v2 FROM polygons p1, polygons p2, LATERAL VERTICES(p1.poly) v1, LATERAL VERTICES(p2.poly) v2 WHERE (v1 <-> v2) < 10 AND p1.id != p2.id", + }, + ) diff --git a/tests/dialects/test_redshift.py b/tests/dialects/test_redshift.py new file mode 100644 index 0000000..1ed2bb6 --- /dev/null +++ b/tests/dialects/test_redshift.py @@ -0,0 +1,64 @@ +from tests.dialects.test_dialect import Validator + + +class TestRedshift(Validator): + dialect = "redshift" + + def test_redshift(self): + self.validate_all( + 'create table "group" ("col" char(10))', + write={ + "redshift": 'CREATE TABLE "group" ("col" CHAR(10))', + "mysql": "CREATE TABLE `group` (`col` CHAR(10))", + }, + ) + self.validate_all( + 'create table if not exists city_slash_id("city/id" integer not null, state char(2) not null)', + write={ + "redshift": 'CREATE TABLE IF NOT EXISTS city_slash_id ("city/id" INTEGER NOT NULL, state CHAR(2) NOT NULL)', + "presto": 'CREATE TABLE IF NOT EXISTS city_slash_id ("city/id" INTEGER NOT NULL, state CHAR(2) NOT NULL)', + }, + ) + self.validate_all( + "SELECT ST_AsEWKT(ST_GeomFromEWKT('SRID=4326;POINT(10 20)')::geography)", + write={ + "redshift": "SELECT ST_ASEWKT(CAST(ST_GEOMFROMEWKT('SRID=4326;POINT(10 20)') AS GEOGRAPHY))", + "bigquery": "SELECT ST_ASEWKT(CAST(ST_GEOMFROMEWKT('SRID=4326;POINT(10 20)') AS GEOGRAPHY))", + }, + ) + self.validate_all( + "SELECT ST_AsEWKT(ST_GeogFromText('LINESTRING(110 40, 2 3, -10 80, -7 9)')::geometry)", + write={ + "redshift": "SELECT ST_ASEWKT(CAST(ST_GEOGFROMTEXT('LINESTRING(110 40, 2 3, -10 80, -7 9)') AS GEOMETRY))", + }, + ) + self.validate_all( + "SELECT 'abc'::BINARY", + write={ + "redshift": "SELECT CAST('abc' AS VARBYTE)", + }, + ) + self.validate_all( + "SELECT * FROM venue WHERE (venuecity, venuestate) IN (('Miami', 'FL'), ('Tampa', 'FL')) ORDER BY venueid", + write={ + "redshift": "SELECT * FROM venue WHERE (venuecity, venuestate) IN (('Miami', 'FL'), ('Tampa', 'FL')) ORDER BY venueid", + }, + ) + self.validate_all( + 'SELECT tablename, "column" FROM pg_table_def WHERE "column" LIKE \'%start\\_%\' LIMIT 5', + write={ + "redshift": 'SELECT tablename, "column" FROM pg_table_def WHERE "column" LIKE \'%start\\\\_%\' LIMIT 5' + }, + ) + + def test_identity(self): + self.validate_identity("CAST('bla' AS SUPER)") + self.validate_identity("CREATE TABLE real1 (realcol REAL)") + self.validate_identity("CAST('foo' AS HLLSKETCH)") + self.validate_identity("SELECT DATEADD(day, 1, 'today')") + self.validate_identity("'abc' SIMILAR TO '(b|c)%'") + self.validate_identity( + "SELECT caldate + INTERVAL '1 second' AS dateplus FROM date WHERE caldate = '12-31-2008'" + ) + self.validate_identity("CREATE TABLE datetable (start_date DATE, end_date DATE)") + self.validate_identity("SELECT COUNT(*) FROM event WHERE eventname LIKE '%Ring%' OR eventname LIKE '%Die%'") diff --git a/tests/dialects/test_snowflake.py b/tests/dialects/test_snowflake.py index 62f78e1..2eeff52 100644 --- a/tests/dialects/test_snowflake.py +++ b/tests/dialects/test_snowflake.py @@ -143,3 +143,35 @@ class TestSnowflake(Validator): "snowflake": r"SELECT 'a \' \\ \\t \\x21 z $ '", }, ) + + def test_null_treatment(self): + self.validate_all( + r"SELECT FIRST_VALUE(TABLE1.COLUMN1) OVER (PARTITION BY RANDOM_COLUMN1, RANDOM_COLUMN2 ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) AS MY_ALIAS FROM TABLE1", + write={ + "snowflake": r"SELECT FIRST_VALUE(TABLE1.COLUMN1) OVER (PARTITION BY RANDOM_COLUMN1, RANDOM_COLUMN2 ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) AS MY_ALIAS FROM TABLE1" + }, + ) + self.validate_all( + r"SELECT FIRST_VALUE(TABLE1.COLUMN1 RESPECT NULLS) OVER (PARTITION BY RANDOM_COLUMN1, RANDOM_COLUMN2 ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) AS MY_ALIAS FROM TABLE1", + write={ + "snowflake": r"SELECT FIRST_VALUE(TABLE1.COLUMN1) OVER (PARTITION BY RANDOM_COLUMN1, RANDOM_COLUMN2 ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) AS MY_ALIAS FROM TABLE1" + }, + ) + self.validate_all( + r"SELECT FIRST_VALUE(TABLE1.COLUMN1) RESPECT NULLS OVER (PARTITION BY RANDOM_COLUMN1, RANDOM_COLUMN2 ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) AS MY_ALIAS FROM TABLE1", + write={ + "snowflake": r"SELECT FIRST_VALUE(TABLE1.COLUMN1) RESPECT NULLS OVER (PARTITION BY RANDOM_COLUMN1, RANDOM_COLUMN2 ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) AS MY_ALIAS FROM TABLE1" + }, + ) + self.validate_all( + r"SELECT FIRST_VALUE(TABLE1.COLUMN1 IGNORE NULLS) OVER (PARTITION BY RANDOM_COLUMN1, RANDOM_COLUMN2 ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) AS MY_ALIAS FROM TABLE1", + write={ + "snowflake": r"SELECT FIRST_VALUE(TABLE1.COLUMN1 IGNORE NULLS) OVER (PARTITION BY RANDOM_COLUMN1, RANDOM_COLUMN2 ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) AS MY_ALIAS FROM TABLE1" + }, + ) + self.validate_all( + r"SELECT FIRST_VALUE(TABLE1.COLUMN1) IGNORE NULLS OVER (PARTITION BY RANDOM_COLUMN1, RANDOM_COLUMN2 ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) AS MY_ALIAS FROM TABLE1", + write={ + "snowflake": r"SELECT FIRST_VALUE(TABLE1.COLUMN1) IGNORE NULLS OVER (PARTITION BY RANDOM_COLUMN1, RANDOM_COLUMN2 ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) AS MY_ALIAS FROM TABLE1" + }, + ) diff --git a/tests/dialects/test_sqlite.py b/tests/dialects/test_sqlite.py index a0576de..3cc974c 100644 --- a/tests/dialects/test_sqlite.py +++ b/tests/dialects/test_sqlite.py @@ -34,6 +34,7 @@ class TestSQLite(Validator): write={ "sqlite": "CREATE TABLE z (a INTEGER UNIQUE PRIMARY KEY AUTOINCREMENT)", "mysql": "CREATE TABLE z (a INT UNIQUE PRIMARY KEY AUTO_INCREMENT)", + "postgres": "CREATE TABLE z (a INT GENERATED BY DEFAULT AS IDENTITY NOT NULL UNIQUE PRIMARY KEY)", }, ) self.validate_all( @@ -70,3 +71,20 @@ class TestSQLite(Validator): "sqlite": "SELECT fname, lname, age FROM person ORDER BY age DESC NULLS FIRST, fname NULLS LAST, lname", }, ) + + def test_hexadecimal_literal(self): + self.validate_all( + "SELECT 0XCC", + write={ + "sqlite": "SELECT x'CC'", + "mysql": "SELECT x'CC'", + }, + ) + + def test_window_null_treatment(self): + self.validate_all( + "SELECT FIRST_VALUE(Name) OVER (PARTITION BY AlbumId ORDER BY Bytes DESC) AS LargestTrack FROM tracks", + write={ + "sqlite": "SELECT FIRST_VALUE(Name) OVER (PARTITION BY AlbumId ORDER BY Bytes DESC) AS LargestTrack FROM tracks" + }, + ) -- cgit v1.2.3