summaryrefslogtreecommitdiffstats
path: root/tests
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2023-08-10 09:23:50 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2023-08-10 09:23:50 +0000
commit4cc7d5a6dcda8f275b4156a9a23bbe5380be1b53 (patch)
tree1084b1a2dd9f2782031b4aa79608db08968a5837 /tests
parentReleasing debian version 17.9.1-1. (diff)
downloadsqlglot-4cc7d5a6dcda8f275b4156a9a23bbe5380be1b53.tar.xz
sqlglot-4cc7d5a6dcda8f275b4156a9a23bbe5380be1b53.zip
Merging upstream version 17.11.0.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'tests')
-rw-r--r--tests/dialects/test_bigquery.py21
-rw-r--r--tests/dialects/test_duckdb.py32
-rw-r--r--tests/dialects/test_hive.py1
-rw-r--r--tests/dialects/test_presto.py31
-rw-r--r--tests/dialects/test_redshift.py43
-rw-r--r--tests/dialects/test_snowflake.py17
-rw-r--r--tests/dialects/test_starrocks.py16
-rw-r--r--tests/dialects/test_tsql.py8
-rw-r--r--tests/fixtures/identity.sql2
-rw-r--r--tests/fixtures/optimizer/optimizer.sql37
-rw-r--r--tests/fixtures/optimizer/pushdown_projections.sql9
-rw-r--r--tests/fixtures/optimizer/qualify_columns.sql23
-rw-r--r--tests/fixtures/optimizer/qualify_columns__invalid.sql1
-rw-r--r--tests/test_expressions.py4
-rw-r--r--tests/test_optimizer.py2
-rw-r--r--tests/test_parser.py14
16 files changed, 215 insertions, 46 deletions
diff --git a/tests/dialects/test_bigquery.py b/tests/dialects/test_bigquery.py
index 8d01ebe..b5f91cf 100644
--- a/tests/dialects/test_bigquery.py
+++ b/tests/dialects/test_bigquery.py
@@ -200,8 +200,15 @@ class TestBigQuery(Validator):
)
self.validate_all(
"REGEXP_CONTAINS('foo', '.*')",
- read={"bigquery": "REGEXP_CONTAINS('foo', '.*')"},
- write={"mysql": "REGEXP_LIKE('foo', '.*')"},
+ read={
+ "bigquery": "REGEXP_CONTAINS('foo', '.*')",
+ "mysql": "REGEXP_LIKE('foo', '.*')",
+ "starrocks": "REGEXP('foo', '.*')",
+ },
+ write={
+ "mysql": "REGEXP_LIKE('foo', '.*')",
+ "starrocks": "REGEXP('foo', '.*')",
+ },
),
self.validate_all(
'"""x"""',
@@ -481,13 +488,21 @@ class TestBigQuery(Validator):
write={
"bigquery": "DATE_DIFF(CAST('2010-07-07' AS DATE), CAST('2008-12-25' AS DATE), DAY)",
"mysql": "DATEDIFF(CAST('2010-07-07' AS DATE), CAST('2008-12-25' AS DATE))",
+ "starrocks": "DATE_DIFF('DAY', CAST('2010-07-07' AS DATE), CAST('2008-12-25' AS DATE))",
+ },
+ )
+ self.validate_all(
+ "DATE_DIFF(CAST('2010-07-07' AS DATE), CAST('2008-12-25' AS DATE), DAY)",
+ read={
+ "mysql": "DATEDIFF(CAST('2010-07-07' AS DATE), CAST('2008-12-25' AS DATE))",
+ "starrocks": "DATEDIFF(CAST('2010-07-07' AS DATE), CAST('2008-12-25' AS DATE))",
},
)
self.validate_all(
"DATE_DIFF(DATE '2010-07-07', DATE '2008-12-25', MINUTE)",
write={
"bigquery": "DATE_DIFF(CAST('2010-07-07' AS DATE), CAST('2008-12-25' AS DATE), MINUTE)",
- "mysql": "DATEDIFF(CAST('2010-07-07' AS DATE), CAST('2008-12-25' AS DATE))",
+ "starrocks": "DATE_DIFF('MINUTE', CAST('2010-07-07' AS DATE), CAST('2008-12-25' AS DATE))",
},
)
self.validate_all(
diff --git a/tests/dialects/test_duckdb.py b/tests/dialects/test_duckdb.py
index 5284700..5c35d8f 100644
--- a/tests/dialects/test_duckdb.py
+++ b/tests/dialects/test_duckdb.py
@@ -583,6 +583,38 @@ class TestDuckDB(Validator):
write={"duckdb": "SELECT a, BOOL_OR(b) FROM table GROUP BY a"},
)
+ def test_encode_decode(self):
+ self.validate_all(
+ "ENCODE(x)",
+ read={
+ "spark": "ENCODE(x, 'utf-8')",
+ "presto": "TO_UTF8(x)",
+ },
+ write={
+ "duckdb": "ENCODE(x)",
+ "spark": "ENCODE(x, 'utf-8')",
+ "presto": "TO_UTF8(x)",
+ },
+ )
+ self.validate_all(
+ "DECODE(x)",
+ read={
+ "spark": "DECODE(x, 'utf-8')",
+ "presto": "FROM_UTF8(x)",
+ },
+ write={
+ "duckdb": "DECODE(x)",
+ "spark": "DECODE(x, 'utf-8')",
+ "presto": "FROM_UTF8(x)",
+ },
+ )
+ self.validate_all(
+ "DECODE(x)",
+ read={
+ "presto": "FROM_UTF8(x, y)",
+ },
+ )
+
def test_rename_table(self):
self.validate_all(
"ALTER TABLE db.t1 RENAME TO db.t2",
diff --git a/tests/dialects/test_hive.py b/tests/dialects/test_hive.py
index 6dd484f..4c463f7 100644
--- a/tests/dialects/test_hive.py
+++ b/tests/dialects/test_hive.py
@@ -390,6 +390,7 @@ class TestHive(Validator):
)
def test_hive(self):
+ self.validate_identity("SELECT transform")
self.validate_identity("SELECT * FROM test DISTRIBUTE BY y SORT BY x DESC ORDER BY l")
self.validate_identity(
"SELECT * FROM test WHERE RAND() <= 0.1 DISTRIBUTE BY RAND() SORT BY RAND()"
diff --git a/tests/dialects/test_presto.py b/tests/dialects/test_presto.py
index a2800bd..ec1ad30 100644
--- a/tests/dialects/test_presto.py
+++ b/tests/dialects/test_presto.py
@@ -466,6 +466,7 @@ class TestPresto(Validator):
read={"spark": "STARTSWITH('abc', 'a')"},
write={
"presto": "STARTS_WITH('abc', 'a')",
+ "snowflake": "STARTSWITH('abc', 'a')",
"spark": "STARTSWITH('abc', 'a')",
},
)
@@ -740,46 +741,44 @@ class TestPresto(Validator):
)
def test_encode_decode(self):
+ self.validate_identity("FROM_UTF8(x, y)")
+
self.validate_all(
"TO_UTF8(x)",
+ read={
+ "duckdb": "ENCODE(x)",
+ "spark": "ENCODE(x, 'utf-8')",
+ },
write={
+ "duckdb": "ENCODE(x)",
+ "presto": "TO_UTF8(x)",
"spark": "ENCODE(x, 'utf-8')",
},
)
self.validate_all(
"FROM_UTF8(x)",
- write={
+ read={
+ "duckdb": "DECODE(x)",
"spark": "DECODE(x, 'utf-8')",
},
- )
- self.validate_all(
- "FROM_UTF8(x, y)",
- write={
- "presto": "FROM_UTF8(x, y)",
- },
- )
- self.validate_all(
- "ENCODE(x, 'utf-8')",
- write={
- "presto": "TO_UTF8(x)",
- },
- )
- self.validate_all(
- "DECODE(x, 'utf-8')",
write={
+ "duckdb": "DECODE(x)",
"presto": "FROM_UTF8(x)",
+ "spark": "DECODE(x, 'utf-8')",
},
)
self.validate_all(
"ENCODE(x, 'invalid')",
write={
"presto": UnsupportedError,
+ "duckdb": UnsupportedError,
},
)
self.validate_all(
"DECODE(x, 'invalid')",
write={
"presto": UnsupportedError,
+ "duckdb": UnsupportedError,
},
)
diff --git a/tests/dialects/test_redshift.py b/tests/dialects/test_redshift.py
index 90c953f..96e9e20 100644
--- a/tests/dialects/test_redshift.py
+++ b/tests/dialects/test_redshift.py
@@ -5,15 +5,16 @@ class TestRedshift(Validator):
dialect = "redshift"
def test_redshift(self):
- self.validate_identity("SELECT * FROM #x")
- self.validate_identity("SELECT INTERVAL '5 day'")
- self.validate_identity("foo$")
- self.validate_identity("$foo")
-
+ self.validate_all(
+ "SELECT INTERVAL '5 days'",
+ read={
+ "": "SELECT INTERVAL '5' days",
+ },
+ )
self.validate_all(
"SELECT ADD_MONTHS('2008-03-31', 1)",
write={
- "redshift": "SELECT DATEADD(month, 1, '2008-03-31')",
+ "redshift": "SELECT DATEADD(month, 1, CAST('2008-03-31' AS DATE))",
"trino": "SELECT DATE_ADD('month', 1, CAST(CAST('2008-03-31' AS TIMESTAMP) AS DATE))",
},
)
@@ -88,11 +89,6 @@ class TestRedshift(Validator):
"snowflake": "SELECT DATE_PART(month, CAST('20220502' AS DATE))",
},
)
- self.validate_all("SELECT INTERVAL '5 days'", read={"": "SELECT INTERVAL '5' days"})
- self.validate_all("CONVERT(INT, x)", write={"redshift": "CAST(x AS INTEGER)"})
- self.validate_all(
- "DATEADD('day', ndays, caldate)", write={"redshift": "DATEADD(day, ndays, caldate)"}
- )
self.validate_all(
'create table "group" ("col" char(10))',
write={
@@ -186,7 +182,7 @@ class TestRedshift(Validator):
self.validate_all(
"DATEDIFF('day', a, b)",
write={
- "redshift": "DATEDIFF(day, a, b)",
+ "redshift": "DATEDIFF(day, CAST(a AS DATE), CAST(b AS DATE))",
"presto": "DATE_DIFF('day', CAST(CAST(a AS TIMESTAMP) AS DATE), CAST(CAST(b AS TIMESTAMP) AS DATE))",
},
)
@@ -198,15 +194,18 @@ class TestRedshift(Validator):
)
def test_identity(self):
+ self.validate_identity("SELECT * FROM #x")
+ self.validate_identity("SELECT INTERVAL '5 day'")
+ self.validate_identity("foo$")
+ self.validate_identity("$foo")
self.validate_identity("CAST('bla' AS SUPER)")
self.validate_identity("CREATE TABLE real1 (realcol REAL)")
self.validate_identity("CAST('foo' AS HLLSKETCH)")
- self.validate_identity("SELECT DATEADD(day, 1, 'today')")
self.validate_identity("'abc' SIMILAR TO '(b|c)%'")
+ self.validate_identity("CREATE TABLE datetable (start_date DATE, end_date DATE)")
self.validate_identity(
"SELECT caldate + INTERVAL '1 second' AS dateplus FROM date WHERE caldate = '12-31-2008'"
)
- self.validate_identity("CREATE TABLE datetable (start_date DATE, end_date DATE)")
self.validate_identity(
"SELECT COUNT(*) FROM event WHERE eventname LIKE '%Ring%' OR eventname LIKE '%Die%'"
)
@@ -225,6 +224,22 @@ class TestRedshift(Validator):
self.validate_identity(
"CREATE TABLE SOUP (SOUP1 VARCHAR(50) NOT NULL ENCODE ZSTD, SOUP2 VARCHAR(70) NULL ENCODE DELTA)"
)
+ self.validate_identity(
+ "SELECT DATEADD(day, 1, 'today')",
+ "SELECT DATEADD(day, 1, CAST('today' AS DATE))",
+ )
+ self.validate_identity(
+ "SELECT DATEADD('day', ndays, caldate)",
+ "SELECT DATEADD(day, ndays, CAST(caldate AS DATE))",
+ )
+ self.validate_identity(
+ "CONVERT(INT, x)",
+ "CAST(x AS INTEGER)",
+ )
+ self.validate_identity(
+ "SELECT DATE_ADD('day', 1, DATE('2023-01-01'))",
+ "SELECT DATEADD(day, 1, CAST(DATE('2023-01-01') AS DATE))",
+ )
def test_values(self):
self.validate_all(
diff --git a/tests/dialects/test_snowflake.py b/tests/dialects/test_snowflake.py
index a889e1d..3053d47 100644
--- a/tests/dialects/test_snowflake.py
+++ b/tests/dialects/test_snowflake.py
@@ -8,6 +8,7 @@ class TestSnowflake(Validator):
dialect = "snowflake"
def test_snowflake(self):
+ self.validate_identity("WEEKOFYEAR(tstamp)")
self.validate_identity("SELECT SUM(amount) FROM mytable GROUP BY ALL")
self.validate_identity("WITH x AS (SELECT 1 AS foo) SELECT foo FROM IDENTIFIER('x')")
self.validate_identity("WITH x AS (SELECT 1 AS foo) SELECT IDENTIFIER('foo') FROM x")
@@ -33,12 +34,21 @@ class TestSnowflake(Validator):
self.validate_identity("ALTER TABLE foo UNSET DATA_RETENTION_TIME_IN_DAYS, CHANGE_TRACKING")
self.validate_identity("COMMENT IF EXISTS ON TABLE foo IS 'bar'")
self.validate_identity("SELECT CONVERT_TIMEZONE('UTC', 'America/Los_Angeles', col)")
+ self.validate_identity("REGEXP_REPLACE('target', 'pattern', '\n')")
self.validate_identity(
'COPY INTO NEW_TABLE ("foo", "bar") FROM (SELECT $1, $2, $3, $4 FROM @%old_table)'
)
self.validate_identity(
"SELECT state, city, SUM(retail_price * quantity) AS gross_revenue FROM sales GROUP BY ALL"
)
+ self.validate_identity(
+ r"SELECT RLIKE(a, $$regular expression with \ characters: \d{2}-\d{3}-\d{4}$$, 'i') FROM log_source",
+ r"SELECT REGEXP_LIKE(a, 'regular expression with \\ characters: \\d{2}-\\d{3}-\\d{4}', 'i') FROM log_source",
+ )
+ self.validate_identity(
+ r"SELECT $$a ' \ \t \x21 z $ $$",
+ r"SELECT 'a \' \\ \\t \\x21 z $ '",
+ )
self.validate_all("CAST(x AS BYTEINT)", write={"snowflake": "CAST(x AS INT)"})
self.validate_all("CAST(x AS CHAR VARYING)", write={"snowflake": "CAST(x AS VARCHAR)"})
@@ -385,13 +395,6 @@ class TestSnowflake(Validator):
},
)
self.validate_all(
- r"SELECT $$a ' \ \t \x21 z $ $$",
- write={
- "snowflake": r"SELECT 'a \' \ \t \x21 z $ '",
- },
- )
- self.validate_identity("REGEXP_REPLACE('target', 'pattern', '\n')")
- self.validate_all(
"SELECT RLIKE(a, b)",
write={
"hive": "SELECT a RLIKE b",
diff --git a/tests/dialects/test_starrocks.py b/tests/dialects/test_starrocks.py
index 96e20da..21a89d7 100644
--- a/tests/dialects/test_starrocks.py
+++ b/tests/dialects/test_starrocks.py
@@ -1,7 +1,7 @@
from tests.dialects.test_dialect import Validator
-class TestMySQL(Validator):
+class TestStarrocks(Validator):
dialect = "starrocks"
def test_identity(self):
@@ -10,11 +10,21 @@ class TestMySQL(Validator):
def test_time(self):
self.validate_identity("TIMESTAMP('2022-01-01')")
+ self.validate_identity(
+ "SELECT DATE_DIFF('second', '2010-11-30 23:59:59', '2010-11-30 20:58:59')"
+ )
+ self.validate_identity(
+ "SELECT DATE_DIFF('minute', '2010-11-30 23:59:59', '2010-11-30 20:58:59')"
+ )
def test_regex(self):
self.validate_all(
- "SELECT REGEXP_LIKE(abc, '%foo%')",
- write={
+ "SELECT REGEXP(abc, '%foo%')",
+ read={
+ "mysql": "SELECT REGEXP_LIKE(abc, '%foo%')",
"starrocks": "SELECT REGEXP(abc, '%foo%')",
},
+ write={
+ "mysql": "SELECT REGEXP_LIKE(abc, '%foo%')",
+ },
)
diff --git a/tests/dialects/test_tsql.py b/tests/dialects/test_tsql.py
index 5266bd4..f43b41b 100644
--- a/tests/dialects/test_tsql.py
+++ b/tests/dialects/test_tsql.py
@@ -6,6 +6,10 @@ class TestTSQL(Validator):
dialect = "tsql"
def test_tsql(self):
+ projection = parse_one("SELECT a = 1", read="tsql").selects[0]
+ projection.assert_is(exp.Alias)
+ projection.args["alias"].assert_is(exp.Identifier)
+
self.validate_identity("UPDATE x SET y = 1 OUTPUT x.a, x.b INTO @y FROM y")
self.validate_identity("UPDATE x SET y = 1 OUTPUT x.a, x.b FROM y")
self.validate_identity("INSERT INTO x (y) OUTPUT x.a, x.b INTO l SELECT * FROM z")
@@ -25,6 +29,10 @@ class TestTSQL(Validator):
self.validate_identity('SELECT "x"."y" FROM foo')
self.validate_identity("SELECT * FROM #foo")
self.validate_identity("SELECT * FROM ##foo")
+ self.validate_identity("SELECT a = 1", "SELECT 1 AS a")
+ self.validate_identity(
+ "SELECT a = 1 UNION ALL SELECT a = b", "SELECT 1 AS a UNION ALL SELECT b AS a"
+ )
self.validate_identity(
"SELECT x FROM @MyTableVar AS m JOIN Employee ON m.EmployeeID = Employee.EmployeeID"
)
diff --git a/tests/fixtures/identity.sql b/tests/fixtures/identity.sql
index 10f77ac..0690421 100644
--- a/tests/fixtures/identity.sql
+++ b/tests/fixtures/identity.sql
@@ -87,6 +87,7 @@ x IN ('a', 'a''a')
x IN ((1))
x BETWEEN -1 AND 1
x BETWEEN 'a' || b AND 'c' || d
+((a, b) AS c)
NOT x IS NULL
x IS TRUE
x IS FALSE
@@ -852,3 +853,4 @@ SELECT * FROM (tbl1 CROSS JOIN (SELECT * FROM tbl2) AS t1)
/* comment1 */ INSERT INTO x /* comment2 */ VALUES (1, 2, 3)
/* comment1 */ UPDATE tbl /* comment2 */ SET x = 2 WHERE x < 2
/* comment1 */ DELETE FROM x /* comment2 */ WHERE y > 1
+SELECT next, transform, if
diff --git a/tests/fixtures/optimizer/optimizer.sql b/tests/fixtures/optimizer/optimizer.sql
index 981e052..74572d2 100644
--- a/tests/fixtures/optimizer/optimizer.sql
+++ b/tests/fixtures/optimizer/optimizer.sql
@@ -907,3 +907,40 @@ JOIN "x" AS "x"
ON "y"."b" = "x"."b"
GROUP BY
"x"."a";
+
+# title: select * from a cte, which had one of its two columns aliased
+WITH cte(x, y) AS (SELECT 1, 2) SELECT * FROM cte AS cte(a);
+WITH "cte" AS (
+ SELECT
+ 1 AS "x",
+ 2 AS "y"
+)
+SELECT
+ "cte"."a" AS "a",
+ "cte"."y" AS "y"
+FROM "cte" AS "cte"("a");
+
+# title: select single column from a cte using its alias
+WITH cte(x) AS (SELECT 1) SELECT a FROM cte AS cte(a);
+WITH "cte" AS (
+ SELECT
+ 1 AS "x"
+)
+SELECT
+ "cte"."a" AS "a"
+FROM "cte" AS "cte"("a");
+
+# title: joined ctes with a "using" clause, one of which has had its column aliased
+WITH m(a) AS (SELECT 1), n(b) AS (SELECT 1) SELECT * FROM m JOIN n AS foo(a) USING (a);
+WITH "m" AS (
+ SELECT
+ 1 AS "a"
+), "n" AS (
+ SELECT
+ 1 AS "b"
+)
+SELECT
+ COALESCE("m"."a", "foo"."a") AS "a"
+FROM "m"
+JOIN "n" AS "foo"("a")
+ ON "m"."a" = "foo"."a";
diff --git a/tests/fixtures/optimizer/pushdown_projections.sql b/tests/fixtures/optimizer/pushdown_projections.sql
index 86dea8c..70fd9b0 100644
--- a/tests/fixtures/optimizer/pushdown_projections.sql
+++ b/tests/fixtures/optimizer/pushdown_projections.sql
@@ -61,6 +61,15 @@ SELECT i.a AS a FROM x AS i LEFT JOIN (SELECT _q_0.a AS a FROM (SELECT x.a AS a
WITH cte AS (SELECT source.a AS a, ROW_NUMBER() OVER (PARTITION BY source.id, source.timestamp ORDER BY source.a DESC) AS index FROM source AS source QUALIFY index) SELECT cte.a AS a FROM cte;
WITH cte AS (SELECT source.a AS a FROM source AS source QUALIFY ROW_NUMBER() OVER (PARTITION BY source.id, source.timestamp ORDER BY source.a DESC)) SELECT cte.a AS a FROM cte;
+WITH cte AS (SELECT 1 AS x, 2 AS y, 3 AS z) SELECT cte.a FROM cte AS cte(a);
+WITH cte AS (SELECT 1 AS x) SELECT cte.a AS a FROM cte AS cte(a);
+
+WITH cte(x, y, z) AS (SELECT 1, 2, 3) SELECT a, z FROM cte AS cte(a);
+WITH cte AS (SELECT 1 AS x, 3 AS z) SELECT cte.a AS a, cte.z AS z FROM cte AS cte(a);
+
+WITH cte(x, y, z) AS (SELECT 1, 2, 3) SELECT a, z FROM (SELECT * FROM cte AS cte(b)) AS cte(a);
+WITH cte AS (SELECT 1 AS x, 3 AS z) SELECT cte.a AS a, cte.z AS z FROM (SELECT cte.b AS a, cte.z AS z FROM cte AS cte(b)) AS cte;
+
--------------------------------------
-- Unknown Star Expansion
--------------------------------------
diff --git a/tests/fixtures/optimizer/qualify_columns.sql b/tests/fixtures/optimizer/qualify_columns.sql
index 8a2519e..7ba8e54 100644
--- a/tests/fixtures/optimizer/qualify_columns.sql
+++ b/tests/fixtures/optimizer/qualify_columns.sql
@@ -125,7 +125,7 @@ SELECT COALESCE(x.a) AS d FROM x JOIN y ON x.b = y.b GROUP BY d;
SELECT COALESCE(x.a) AS d FROM x AS x JOIN y AS y ON x.b = y.b GROUP BY COALESCE(x.a);
SELECT a + 1 AS d FROM x WHERE d > 1;
-SELECT x.a + 1 AS d FROM x AS x WHERE x.a + 1 > 1;
+SELECT x.a + 1 AS d FROM x AS x WHERE (x.a + 1) > 1;
# execute: false
SELECT a + 1 AS d, d + 2 FROM x;
@@ -300,6 +300,12 @@ WITH z AS (SELECT x.a AS a, x.b AS b FROM x AS x), q AS (SELECT z.b AS b FROM z)
WITH z AS ((SELECT b FROM x UNION ALL SELECT b FROM y) ORDER BY b) SELECT * FROM z;
WITH z AS ((SELECT x.b AS b FROM x AS x UNION ALL SELECT y.b AS b FROM y AS y) ORDER BY b) SELECT z.b AS b FROM z;
+WITH cte(x) AS (SELECT 1) SELECT * FROM cte AS cte(a);
+WITH cte AS (SELECT 1 AS x) SELECT cte.a AS a FROM cte AS cte(a);
+
+WITH cte(x, y) AS (SELECT 1, 2) SELECT cte.* FROM cte AS cte(a);
+WITH cte AS (SELECT 1 AS x, 2 AS y) SELECT cte.a AS a, cte.y AS y FROM cte AS cte(a);
+
--------------------------------------
-- Except and Replace
--------------------------------------
@@ -383,6 +389,9 @@ SELECT x.b AS b FROM t AS t JOIN x AS x ON t.a = x.a;
SELECT a FROM t1 JOIN t2 USING(a);
SELECT COALESCE(t1.a, t2.a) AS a FROM t1 AS t1 JOIN t2 AS t2 ON t1.a = t2.a;
+WITH m(a) AS (SELECT 1), n(b) AS (SELECT 1) SELECT * FROM m JOIN n AS foo(a) USING (a);
+WITH m AS (SELECT 1 AS a), n AS (SELECT 1 AS b) SELECT COALESCE(m.a, foo.a) AS a FROM m JOIN n AS foo(a) ON m.a = foo.a;
+
--------------------------------------
-- Hint with table reference
--------------------------------------
@@ -444,7 +453,7 @@ SELECT x.a AS a, x.b AS b FROM x AS x QUALIFY COUNT(x.a) OVER (PARTITION BY x.b)
-- Expand laterals
--------------------------------------
# execute: false
-select 2 AS d, d + 1 FROM x WHERE d = 2 GROUP BY d;
+SELECT 2 AS d, d + 1 FROM x WHERE d = 2 GROUP BY d;
SELECT 2 AS d, 2 + 1 AS _col_1 FROM x AS x WHERE 2 = 2 GROUP BY 1;
# title: expand alias reference
@@ -471,6 +480,16 @@ FROM (
);
SELECT _q_0.i AS i, _q_0.j AS j FROM (SELECT x.a + 1 AS i, x.a + 1 + 1 AS j FROM x AS x) AS _q_0;
+# title: wrap expanded alias to ensure operator precedence isn't broken
+# execute: false
+SELECT x.a + x.b AS f, f * x.b FROM x;
+SELECT x.a + x.b AS f, (x.a + x.b) * x.b AS _col_1 FROM x AS x;
+
+# title: no need to wrap expanded alias
+# execute: false
+SELECT x.a + x.b AS f, f, f + 5 FROM x;
+SELECT x.a + x.b AS f, x.a + x.b AS _col_1, x.a + x.b + 5 AS _col_2 FROM x AS x;
+
--------------------------------------
-- Wrapped tables / join constructs
--------------------------------------
diff --git a/tests/fixtures/optimizer/qualify_columns__invalid.sql b/tests/fixtures/optimizer/qualify_columns__invalid.sql
index f3d8b6a..09a9ddc 100644
--- a/tests/fixtures/optimizer/qualify_columns__invalid.sql
+++ b/tests/fixtures/optimizer/qualify_columns__invalid.sql
@@ -11,3 +11,4 @@ SELECT x.a FROM x JOIN y USING (a);
SELECT a, SUM(b) FROM x GROUP BY 3;
SELECT p FROM (SELECT x from xx) y CROSS JOIN yy CROSS JOIN zz
SELECT a FROM (SELECT * FROM x CROSS JOIN y);
+SELECT x FROM tbl AS tbl(a);
diff --git a/tests/test_expressions.py b/tests/test_expressions.py
index 1aab1c0..f68ced2 100644
--- a/tests/test_expressions.py
+++ b/tests/test_expressions.py
@@ -328,6 +328,10 @@ class TestExpressions(unittest.TestCase):
cte = expression.find(exp.CTE)
self.assertEqual(cte.alias_column_names, ["a", "b"])
+ expression = parse_one("SELECT * FROM tbl AS tbl(a, b)")
+ table = expression.find(exp.Table)
+ self.assertEqual(table.alias_column_names, ["a", "b"])
+
def test_ctes(self):
expression = parse_one("SELECT a FROM x")
self.assertEqual(expression.ctes, [])
diff --git a/tests/test_optimizer.py b/tests/test_optimizer.py
index 64d7db7..3fe53e4 100644
--- a/tests/test_optimizer.py
+++ b/tests/test_optimizer.py
@@ -291,7 +291,7 @@ class TestOptimizer(unittest.TestCase):
# check order of lateral expansion with no schema
self.assertEqual(
optimizer.optimize("SELECT a + 1 AS d, d + 1 AS e FROM x WHERE e > 1 GROUP BY e").sql(),
- 'SELECT "x"."a" + 1 AS "d", "x"."a" + 1 + 1 AS "e" FROM "x" AS "x" WHERE "x"."a" + 2 > 1 GROUP BY "x"."a" + 1 + 1',
+ 'SELECT "x"."a" + 1 AS "d", "x"."a" + 1 + 1 AS "e" FROM "x" AS "x" WHERE ("x"."a" + 2) > 1 GROUP BY "x"."a" + 1 + 1',
)
self.assertEqual(
diff --git a/tests/test_parser.py b/tests/test_parser.py
index 027a9ca..e7b0ca9 100644
--- a/tests/test_parser.py
+++ b/tests/test_parser.py
@@ -219,6 +219,9 @@ class TestParser(unittest.TestCase):
with self.assertRaises(ParseError):
parse_one("WITH cte AS (SELECT * FROM x)")
+ with self.assertRaises(ParseError):
+ parse_one("SELECT foo( FROM bar")
+
self.assertEqual(
parse_one(
"CREATE TABLE t (i UInt8) ENGINE = AggregatingMergeTree() ORDER BY tuple()",
@@ -694,3 +697,14 @@ class TestParser(unittest.TestCase):
def test_parse_floats(self):
self.assertTrue(parse_one("1. ").is_number)
+
+ def test_parse_terse_coalesce(self):
+ self.assertIsNotNone(parse_one("SELECT x ?? y FROM z").find(exp.Coalesce))
+ self.assertEqual(
+ parse_one("SELECT a, b ?? 'No Data' FROM z").sql(),
+ "SELECT a, COALESCE(b, 'No Data') FROM z",
+ )
+ self.assertEqual(
+ parse_one("SELECT a, b ?? c ?? 'No Data' FROM z").sql(),
+ "SELECT a, COALESCE(COALESCE(b, c), 'No Data') FROM z",
+ )