summaryrefslogtreecommitdiffstats
path: root/tests/dialects
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2023-07-24 08:03:48 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2023-07-24 08:03:48 +0000
commit36db14f4c6c28209371d563d76697df0172e337f (patch)
tree527347be54a38fc9b14fe1fa04c27a4bbae44016 /tests/dialects
parentReleasing debian version 17.4.1-1. (diff)
downloadsqlglot-36db14f4c6c28209371d563d76697df0172e337f.tar.xz
sqlglot-36db14f4c6c28209371d563d76697df0172e337f.zip
Merging upstream version 17.7.0.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'tests/dialects')
-rw-r--r--tests/dialects/test_bigquery.py10
-rw-r--r--tests/dialects/test_clickhouse.py34
-rw-r--r--tests/dialects/test_dialect.py15
-rw-r--r--tests/dialects/test_duckdb.py325
-rw-r--r--tests/dialects/test_hive.py44
-rw-r--r--tests/dialects/test_mysql.py10
-rw-r--r--tests/dialects/test_postgres.py8
-rw-r--r--tests/dialects/test_presto.py10
-rw-r--r--tests/dialects/test_redshift.py15
-rw-r--r--tests/dialects/test_snowflake.py54
-rw-r--r--tests/dialects/test_spark.py76
-rw-r--r--tests/dialects/test_teradata.py3
-rw-r--r--tests/dialects/test_tsql.py75
13 files changed, 493 insertions, 186 deletions
diff --git a/tests/dialects/test_bigquery.py b/tests/dialects/test_bigquery.py
index 3939ba0..69042be 100644
--- a/tests/dialects/test_bigquery.py
+++ b/tests/dialects/test_bigquery.py
@@ -1,6 +1,6 @@
from unittest import mock
-from sqlglot import ErrorLevel, ParseError, UnsupportedError, transpile
+from sqlglot import ErrorLevel, ParseError, TokenError, UnsupportedError, transpile
from tests.dialects.test_dialect import Validator
@@ -8,7 +8,7 @@ class TestBigQuery(Validator):
dialect = "bigquery"
def test_bigquery(self):
- with self.assertRaises(ValueError):
+ with self.assertRaises(TokenError):
transpile("'\\'", read="bigquery")
# Reference: https://cloud.google.com/bigquery/docs/reference/standard-sql/query-syntax#set_operators
@@ -521,6 +521,12 @@ class TestBigQuery(Validator):
},
)
self.validate_all(
+ "SELECT * FROM UNNEST([STRUCT(1 AS id)]) CROSS JOIN UNNEST([STRUCT(1 AS id)])",
+ read={
+ "postgres": "SELECT * FROM (VALUES (1)) AS t1(id) CROSS JOIN (VALUES (1)) AS t2(id)",
+ },
+ )
+ self.validate_all(
"SELECT cola, colb, colc FROM (VALUES (1, 'test', NULL)) AS tab(cola, colb, colc)",
write={
"spark": "SELECT cola, colb, colc FROM VALUES (1, 'test', NULL) AS tab(cola, colb, colc)",
diff --git a/tests/dialects/test_clickhouse.py b/tests/dialects/test_clickhouse.py
index b0df4df..bc82645 100644
--- a/tests/dialects/test_clickhouse.py
+++ b/tests/dialects/test_clickhouse.py
@@ -6,6 +6,7 @@ class TestClickhouse(Validator):
dialect = "clickhouse"
def test_clickhouse(self):
+ self.validate_identity("SELECT xor(TRUE, FALSE)")
self.validate_identity("ATTACH DATABASE DEFAULT ENGINE = ORDINARY")
self.validate_identity("CAST(['hello'], 'Array(Enum8(''hello'' = 1))')")
self.validate_identity("SELECT x, COUNT() FROM y GROUP BY x WITH TOTALS")
@@ -53,6 +54,34 @@ class TestClickhouse(Validator):
)
self.validate_all(
+ "SELECT xor(1, 0)",
+ read={
+ "clickhouse": "SELECT xor(1, 0)",
+ "mysql": "SELECT 1 XOR 0",
+ },
+ write={
+ "mysql": "SELECT 1 XOR 0",
+ },
+ )
+ self.validate_all(
+ "SELECT xor(0, 1, xor(1, 0, 0))",
+ write={
+ "clickhouse": "SELECT xor(0, 1, xor(1, 0, 0))",
+ "mysql": "SELECT 0 XOR 1 XOR 1 XOR 0 XOR 0",
+ },
+ )
+ self.validate_all(
+ "SELECT xor(xor(1, 0), 1)",
+ read={
+ "clickhouse": "SELECT xor(xor(1, 0), 1)",
+ "mysql": "SELECT 1 XOR 0 XOR 1",
+ },
+ write={
+ "clickhouse": "SELECT xor(xor(1, 0), 1)",
+ "mysql": "SELECT 1 XOR 0 XOR 1",
+ },
+ )
+ self.validate_all(
"CONCAT(CASE WHEN COALESCE(CAST(a AS TEXT), '') IS NULL THEN COALESCE(CAST(a AS TEXT), '') ELSE CAST(COALESCE(CAST(a AS TEXT), '') AS TEXT) END, CASE WHEN COALESCE(CAST(b AS TEXT), '') IS NULL THEN COALESCE(CAST(b AS TEXT), '') ELSE CAST(COALESCE(CAST(b AS TEXT), '') AS TEXT) END)",
read={"postgres": "CONCAT(a, b)"},
)
@@ -137,6 +166,11 @@ class TestClickhouse(Validator):
+ " GROUP BY loyalty ORDER BY loyalty"
},
)
+ self.validate_identity("SELECT s, arr FROM arrays_test ARRAY JOIN arr")
+ self.validate_identity("SELECT s, arr, a FROM arrays_test LEFT ARRAY JOIN arr AS a")
+ self.validate_identity(
+ "SELECT s, arr_external FROM arrays_test ARRAY JOIN [1, 2, 3] AS arr_external"
+ )
def test_cte(self):
self.validate_identity("WITH 'x' AS foo SELECT foo")
diff --git a/tests/dialects/test_dialect.py b/tests/dialects/test_dialect.py
index 618b1b4..aaaffab 100644
--- a/tests/dialects/test_dialect.py
+++ b/tests/dialects/test_dialect.py
@@ -81,7 +81,7 @@ class TestDialect(Validator):
"clickhouse": "CAST(a AS TEXT)",
"drill": "CAST(a AS VARCHAR)",
"duckdb": "CAST(a AS TEXT)",
- "mysql": "CAST(a AS TEXT)",
+ "mysql": "CAST(a AS CHAR)",
"hive": "CAST(a AS STRING)",
"oracle": "CAST(a AS CLOB)",
"postgres": "CAST(a AS TEXT)",
@@ -160,7 +160,7 @@ class TestDialect(Validator):
"bigquery": "CAST(a AS STRING)",
"drill": "CAST(a AS VARCHAR)",
"duckdb": "CAST(a AS TEXT)",
- "mysql": "CAST(a AS TEXT)",
+ "mysql": "CAST(a AS CHAR)",
"hive": "CAST(a AS STRING)",
"oracle": "CAST(a AS CLOB)",
"postgres": "CAST(a AS TEXT)",
@@ -177,7 +177,7 @@ class TestDialect(Validator):
"bigquery": "CAST(a AS STRING)",
"drill": "CAST(a AS VARCHAR)",
"duckdb": "CAST(a AS TEXT)",
- "mysql": "CAST(a AS VARCHAR)",
+ "mysql": "CAST(a AS CHAR)",
"hive": "CAST(a AS STRING)",
"oracle": "CAST(a AS VARCHAR2)",
"postgres": "CAST(a AS VARCHAR)",
@@ -194,7 +194,7 @@ class TestDialect(Validator):
"bigquery": "CAST(a AS STRING)",
"drill": "CAST(a AS VARCHAR(3))",
"duckdb": "CAST(a AS TEXT(3))",
- "mysql": "CAST(a AS VARCHAR(3))",
+ "mysql": "CAST(a AS CHAR(3))",
"hive": "CAST(a AS VARCHAR(3))",
"oracle": "CAST(a AS VARCHAR2(3))",
"postgres": "CAST(a AS VARCHAR(3))",
@@ -224,13 +224,13 @@ class TestDialect(Validator):
},
)
self.validate_all(
- "TRY_CAST(a AS DOUBLE)",
+ "CAST(a AS DOUBLE)",
read={
"postgres": "CAST(a AS DOUBLE PRECISION)",
"redshift": "CAST(a AS DOUBLE PRECISION)",
},
write={
- "duckdb": "TRY_CAST(a AS DOUBLE)",
+ "duckdb": "CAST(a AS DOUBLE)",
"drill": "CAST(a AS DOUBLE)",
"postgres": "CAST(a AS DOUBLE PRECISION)",
"redshift": "CAST(a AS DOUBLE PRECISION)",
@@ -634,7 +634,7 @@ class TestDialect(Validator):
},
)
self.validate_all(
- "TIMESTAMP_TRUNC(TRY_CAST(x AS DATE), day)",
+ "TIMESTAMP_TRUNC(CAST(x AS DATE), day)",
read={"postgres": "DATE_TRUNC('day', x::DATE)"},
)
self.validate_all(
@@ -1512,6 +1512,7 @@ SELECT
"redshift": "BEGIN",
"snowflake": "BEGIN",
"sqlite": "BEGIN TRANSACTION",
+ "tsql": "BEGIN TRANSACTION",
},
)
self.validate_all(
diff --git a/tests/dialects/test_duckdb.py b/tests/dialects/test_duckdb.py
index 336f47d..5284700 100644
--- a/tests/dialects/test_duckdb.py
+++ b/tests/dialects/test_duckdb.py
@@ -5,156 +5,9 @@ from tests.dialects.test_dialect import Validator
class TestDuckDB(Validator):
dialect = "duckdb"
- def test_time(self):
- self.validate_identity("SELECT CURRENT_DATE")
- self.validate_identity("SELECT CURRENT_TIMESTAMP")
-
- self.validate_all(
- "SELECT MAKE_DATE(2016, 12, 25)", read={"bigquery": "SELECT DATE(2016, 12, 25)"}
- )
- self.validate_all(
- "SELECT CAST(CAST('2016-12-25 23:59:59' AS DATETIME) AS DATE)",
- read={"bigquery": "SELECT DATE(DATETIME '2016-12-25 23:59:59')"},
- )
- self.validate_all(
- "SELECT STRPTIME(STRFTIME(CAST(CAST('2016-12-25' AS TIMESTAMPTZ) AS DATE), '%d/%m/%Y') || ' ' || 'America/Los_Angeles', '%d/%m/%Y %Z')",
- read={
- "bigquery": "SELECT DATE(TIMESTAMP '2016-12-25', 'America/Los_Angeles')",
- },
- )
- self.validate_all(
- "SELECT CAST(CAST(STRPTIME('05/06/2020', '%m/%d/%Y') AS DATE) AS DATE)",
- read={"bigquery": "SELECT DATE(PARSE_DATE('%m/%d/%Y', '05/06/2020'))"},
- )
- self.validate_all(
- "SELECT CAST('2020-01-01' AS DATE) + INTERVAL (-1) DAY",
- read={"mysql": "SELECT DATE '2020-01-01' + INTERVAL -1 DAY"},
- )
- self.validate_all(
- "SELECT INTERVAL '1 quarter'",
- write={"duckdb": "SELECT (90 * INTERVAL '1' day)"},
- )
- self.validate_all(
- "SELECT ((DATE_TRUNC('DAY', CAST(CAST(DATE_TRUNC('DAY', CURRENT_TIMESTAMP) AS DATE) AS TIMESTAMP) + INTERVAL (0 - MOD((DAYOFWEEK(CAST(CAST(DATE_TRUNC('DAY', CURRENT_TIMESTAMP) AS DATE) AS TIMESTAMP)) % 7) - 1 + 7, 7)) day) + (7 * INTERVAL (-5) day))) AS t1",
- read={
- "presto": "SELECT ((DATE_ADD('week', -5, DATE_TRUNC('DAY', DATE_ADD('day', (0 - MOD((DAY_OF_WEEK(CAST(CAST(DATE_TRUNC('DAY', NOW()) AS DATE) AS TIMESTAMP)) % 7) - 1 + 7, 7)), CAST(CAST(DATE_TRUNC('DAY', NOW()) AS DATE) AS TIMESTAMP)))))) AS t1",
- },
- )
- self.validate_all(
- "EPOCH(x)",
- read={
- "presto": "TO_UNIXTIME(x)",
- },
- write={
- "bigquery": "TIME_TO_UNIX(x)",
- "duckdb": "EPOCH(x)",
- "presto": "TO_UNIXTIME(x)",
- "spark": "UNIX_TIMESTAMP(x)",
- },
- )
- self.validate_all(
- "EPOCH_MS(x)",
- write={
- "bigquery": "UNIX_TO_TIME(x / 1000)",
- "duckdb": "TO_TIMESTAMP(x / 1000)",
- "presto": "FROM_UNIXTIME(x / 1000)",
- "spark": "CAST(FROM_UNIXTIME(x / 1000) AS TIMESTAMP)",
- },
- )
- self.validate_all(
- "STRFTIME(x, '%y-%-m-%S')",
- write={
- "bigquery": "TIME_TO_STR(x, '%y-%-m-%S')",
- "duckdb": "STRFTIME(x, '%y-%-m-%S')",
- "postgres": "TO_CHAR(x, 'YY-FMMM-SS')",
- "presto": "DATE_FORMAT(x, '%y-%c-%S')",
- "spark": "DATE_FORMAT(x, 'yy-M-ss')",
- },
- )
- self.validate_all(
- "STRFTIME(x, '%Y-%m-%d %H:%M:%S')",
- write={
- "duckdb": "STRFTIME(x, '%Y-%m-%d %H:%M:%S')",
- "presto": "DATE_FORMAT(x, '%Y-%m-%d %T')",
- "hive": "DATE_FORMAT(x, 'yyyy-MM-dd HH:mm:ss')",
- },
- )
- self.validate_all(
- "STRPTIME(x, '%y-%-m')",
- write={
- "bigquery": "PARSE_TIMESTAMP('%y-%-m', x)",
- "duckdb": "STRPTIME(x, '%y-%-m')",
- "presto": "DATE_PARSE(x, '%y-%c')",
- "hive": "CAST(FROM_UNIXTIME(UNIX_TIMESTAMP(x, 'yy-M')) AS TIMESTAMP)",
- "spark": "TO_TIMESTAMP(x, 'yy-M')",
- },
- )
- self.validate_all(
- "TO_TIMESTAMP(x)",
- write={
- "bigquery": "UNIX_TO_TIME(x)",
- "duckdb": "TO_TIMESTAMP(x)",
- "presto": "FROM_UNIXTIME(x)",
- "hive": "FROM_UNIXTIME(x)",
- },
- )
- self.validate_all(
- "STRPTIME(x, '%-m/%-d/%y %-I:%M %p')",
- write={
- "bigquery": "PARSE_TIMESTAMP('%-m/%-d/%y %-I:%M %p', x)",
- "duckdb": "STRPTIME(x, '%-m/%-d/%y %-I:%M %p')",
- "presto": "DATE_PARSE(x, '%c/%e/%y %l:%i %p')",
- "hive": "CAST(FROM_UNIXTIME(UNIX_TIMESTAMP(x, 'M/d/yy h:mm a')) AS TIMESTAMP)",
- "spark": "TO_TIMESTAMP(x, 'M/d/yy h:mm a')",
- },
- )
- self.validate_all(
- "CAST(start AS TIMESTAMPTZ) AT TIME ZONE 'America/New_York'",
- read={
- "snowflake": "CONVERT_TIMEZONE('America/New_York', CAST(start AS TIMESTAMPTZ))",
- },
- write={
- "bigquery": "TIMESTAMP(DATETIME(CAST(start AS TIMESTAMP), 'America/New_York'))",
- "duckdb": "CAST(start AS TIMESTAMPTZ) AT TIME ZONE 'America/New_York'",
- "snowflake": "CONVERT_TIMEZONE('America/New_York', CAST(start AS TIMESTAMPTZ))",
- },
- )
-
- def test_sample(self):
- self.validate_all(
- "SELECT * FROM tbl USING SAMPLE 5",
- write={"duckdb": "SELECT * FROM tbl USING SAMPLE (5)"},
- )
- self.validate_all(
- "SELECT * FROM tbl USING SAMPLE 10%",
- write={"duckdb": "SELECT * FROM tbl USING SAMPLE (10 PERCENT)"},
- )
- self.validate_all(
- "SELECT * FROM tbl USING SAMPLE 10 PERCENT (bernoulli)",
- write={"duckdb": "SELECT * FROM tbl USING SAMPLE BERNOULLI (10 PERCENT)"},
- )
- self.validate_all(
- "SELECT * FROM tbl USING SAMPLE reservoir(50 ROWS) REPEATABLE (100)",
- write={"duckdb": "SELECT * FROM tbl USING SAMPLE RESERVOIR (50 ROWS) REPEATABLE (100)"},
- )
- self.validate_all(
- "SELECT * FROM tbl USING SAMPLE 10% (system, 377)",
- write={"duckdb": "SELECT * FROM tbl USING SAMPLE SYSTEM (10 PERCENT) REPEATABLE (377)"},
- )
- self.validate_all(
- "SELECT * FROM tbl TABLESAMPLE RESERVOIR(20%), tbl2 WHERE tbl.i=tbl2.i",
- write={
- "duckdb": "SELECT * FROM tbl TABLESAMPLE RESERVOIR (20 PERCENT), tbl2 WHERE tbl.i = tbl2.i"
- },
- )
- self.validate_all(
- "SELECT * FROM tbl, tbl2 WHERE tbl.i=tbl2.i USING SAMPLE RESERVOIR(20%)",
- write={
- "duckdb": "SELECT * FROM tbl, tbl2 WHERE tbl.i = tbl2.i USING SAMPLE RESERVOIR (20 PERCENT)"
- },
- )
-
def test_duckdb(self):
+ self.validate_identity("SELECT SUM(x) FILTER (x = 1)", "SELECT SUM(x) FILTER(WHERE x = 1)")
+
# https://github.com/duckdb/duckdb/releases/tag/v0.8.0
self.assertEqual(
parse_one("a / b", read="duckdb").assert_is(exp.Div).sql(dialect="duckdb"), "a / b"
@@ -204,6 +57,31 @@ class TestDuckDB(Validator):
self.validate_all("x ~ y", write={"duckdb": "REGEXP_MATCHES(x, y)"})
self.validate_all("SELECT * FROM 'x.y'", write={"duckdb": 'SELECT * FROM "x.y"'})
self.validate_all(
+ "DATE_DIFF('day', CAST(b AS DATE), CAST(a AS DATE))",
+ read={
+ "duckdb": "DATE_DIFF('day', CAST(b AS DATE), CAST(a AS DATE))",
+ "hive": "DATEDIFF(a, b)",
+ "spark": "DATEDIFF(a, b)",
+ "spark2": "DATEDIFF(a, b)",
+ },
+ )
+ self.validate_all(
+ "XOR(a, b)",
+ read={
+ "": "a ^ b",
+ "bigquery": "a ^ b",
+ "presto": "BITWISE_XOR(a, b)",
+ "postgres": "a # b",
+ },
+ write={
+ "": "a ^ b",
+ "bigquery": "a ^ b",
+ "duckdb": "XOR(a, b)",
+ "presto": "BITWISE_XOR(a, b)",
+ "postgres": "a # b",
+ },
+ )
+ self.validate_all(
"PIVOT_WIDER Cities ON Year USING SUM(Population)",
write={"duckdb": "PIVOT Cities ON Year USING SUM(Population)"},
)
@@ -459,6 +337,155 @@ class TestDuckDB(Validator):
unsupported_level=ErrorLevel.IMMEDIATE,
)
+ def test_time(self):
+ self.validate_identity("SELECT CURRENT_DATE")
+ self.validate_identity("SELECT CURRENT_TIMESTAMP")
+
+ self.validate_all(
+ "SELECT MAKE_DATE(2016, 12, 25)", read={"bigquery": "SELECT DATE(2016, 12, 25)"}
+ )
+ self.validate_all(
+ "SELECT CAST(CAST('2016-12-25 23:59:59' AS DATETIME) AS DATE)",
+ read={"bigquery": "SELECT DATE(DATETIME '2016-12-25 23:59:59')"},
+ )
+ self.validate_all(
+ "SELECT STRPTIME(STRFTIME(CAST(CAST('2016-12-25' AS TIMESTAMPTZ) AS DATE), '%d/%m/%Y') || ' ' || 'America/Los_Angeles', '%d/%m/%Y %Z')",
+ read={
+ "bigquery": "SELECT DATE(TIMESTAMP '2016-12-25', 'America/Los_Angeles')",
+ },
+ )
+ self.validate_all(
+ "SELECT CAST(CAST(STRPTIME('05/06/2020', '%m/%d/%Y') AS DATE) AS DATE)",
+ read={"bigquery": "SELECT DATE(PARSE_DATE('%m/%d/%Y', '05/06/2020'))"},
+ )
+ self.validate_all(
+ "SELECT CAST('2020-01-01' AS DATE) + INTERVAL (-1) DAY",
+ read={"mysql": "SELECT DATE '2020-01-01' + INTERVAL -1 DAY"},
+ )
+ self.validate_all(
+ "SELECT INTERVAL '1 quarter'",
+ write={"duckdb": "SELECT (90 * INTERVAL '1' day)"},
+ )
+ self.validate_all(
+ "SELECT ((DATE_TRUNC('DAY', CAST(CAST(DATE_TRUNC('DAY', CURRENT_TIMESTAMP) AS DATE) AS TIMESTAMP) + INTERVAL (0 - MOD((DAYOFWEEK(CAST(CAST(DATE_TRUNC('DAY', CURRENT_TIMESTAMP) AS DATE) AS TIMESTAMP)) % 7) - 1 + 7, 7)) day) + (7 * INTERVAL (-5) day))) AS t1",
+ read={
+ "presto": "SELECT ((DATE_ADD('week', -5, DATE_TRUNC('DAY', DATE_ADD('day', (0 - MOD((DAY_OF_WEEK(CAST(CAST(DATE_TRUNC('DAY', NOW()) AS DATE) AS TIMESTAMP)) % 7) - 1 + 7, 7)), CAST(CAST(DATE_TRUNC('DAY', NOW()) AS DATE) AS TIMESTAMP)))))) AS t1",
+ },
+ )
+ self.validate_all(
+ "EPOCH(x)",
+ read={
+ "presto": "TO_UNIXTIME(x)",
+ },
+ write={
+ "bigquery": "TIME_TO_UNIX(x)",
+ "duckdb": "EPOCH(x)",
+ "presto": "TO_UNIXTIME(x)",
+ "spark": "UNIX_TIMESTAMP(x)",
+ },
+ )
+ self.validate_all(
+ "EPOCH_MS(x)",
+ write={
+ "bigquery": "UNIX_TO_TIME(x / 1000)",
+ "duckdb": "TO_TIMESTAMP(x / 1000)",
+ "presto": "FROM_UNIXTIME(x / 1000)",
+ "spark": "CAST(FROM_UNIXTIME(x / 1000) AS TIMESTAMP)",
+ },
+ )
+ self.validate_all(
+ "STRFTIME(x, '%y-%-m-%S')",
+ write={
+ "bigquery": "TIME_TO_STR(x, '%y-%-m-%S')",
+ "duckdb": "STRFTIME(x, '%y-%-m-%S')",
+ "postgres": "TO_CHAR(x, 'YY-FMMM-SS')",
+ "presto": "DATE_FORMAT(x, '%y-%c-%S')",
+ "spark": "DATE_FORMAT(x, 'yy-M-ss')",
+ },
+ )
+ self.validate_all(
+ "STRFTIME(x, '%Y-%m-%d %H:%M:%S')",
+ write={
+ "duckdb": "STRFTIME(x, '%Y-%m-%d %H:%M:%S')",
+ "presto": "DATE_FORMAT(x, '%Y-%m-%d %T')",
+ "hive": "DATE_FORMAT(x, 'yyyy-MM-dd HH:mm:ss')",
+ },
+ )
+ self.validate_all(
+ "STRPTIME(x, '%y-%-m')",
+ write={
+ "bigquery": "PARSE_TIMESTAMP('%y-%-m', x)",
+ "duckdb": "STRPTIME(x, '%y-%-m')",
+ "presto": "DATE_PARSE(x, '%y-%c')",
+ "hive": "CAST(FROM_UNIXTIME(UNIX_TIMESTAMP(x, 'yy-M')) AS TIMESTAMP)",
+ "spark": "TO_TIMESTAMP(x, 'yy-M')",
+ },
+ )
+ self.validate_all(
+ "TO_TIMESTAMP(x)",
+ write={
+ "bigquery": "UNIX_TO_TIME(x)",
+ "duckdb": "TO_TIMESTAMP(x)",
+ "presto": "FROM_UNIXTIME(x)",
+ "hive": "FROM_UNIXTIME(x)",
+ },
+ )
+ self.validate_all(
+ "STRPTIME(x, '%-m/%-d/%y %-I:%M %p')",
+ write={
+ "bigquery": "PARSE_TIMESTAMP('%-m/%-d/%y %-I:%M %p', x)",
+ "duckdb": "STRPTIME(x, '%-m/%-d/%y %-I:%M %p')",
+ "presto": "DATE_PARSE(x, '%c/%e/%y %l:%i %p')",
+ "hive": "CAST(FROM_UNIXTIME(UNIX_TIMESTAMP(x, 'M/d/yy h:mm a')) AS TIMESTAMP)",
+ "spark": "TO_TIMESTAMP(x, 'M/d/yy h:mm a')",
+ },
+ )
+ self.validate_all(
+ "CAST(start AS TIMESTAMPTZ) AT TIME ZONE 'America/New_York'",
+ read={
+ "snowflake": "CONVERT_TIMEZONE('America/New_York', CAST(start AS TIMESTAMPTZ))",
+ },
+ write={
+ "bigquery": "TIMESTAMP(DATETIME(CAST(start AS TIMESTAMP), 'America/New_York'))",
+ "duckdb": "CAST(start AS TIMESTAMPTZ) AT TIME ZONE 'America/New_York'",
+ "snowflake": "CONVERT_TIMEZONE('America/New_York', CAST(start AS TIMESTAMPTZ))",
+ },
+ )
+
+ def test_sample(self):
+ self.validate_all(
+ "SELECT * FROM tbl USING SAMPLE 5",
+ write={"duckdb": "SELECT * FROM tbl USING SAMPLE (5)"},
+ )
+ self.validate_all(
+ "SELECT * FROM tbl USING SAMPLE 10%",
+ write={"duckdb": "SELECT * FROM tbl USING SAMPLE (10 PERCENT)"},
+ )
+ self.validate_all(
+ "SELECT * FROM tbl USING SAMPLE 10 PERCENT (bernoulli)",
+ write={"duckdb": "SELECT * FROM tbl USING SAMPLE BERNOULLI (10 PERCENT)"},
+ )
+ self.validate_all(
+ "SELECT * FROM tbl USING SAMPLE reservoir(50 ROWS) REPEATABLE (100)",
+ write={"duckdb": "SELECT * FROM tbl USING SAMPLE RESERVOIR (50 ROWS) REPEATABLE (100)"},
+ )
+ self.validate_all(
+ "SELECT * FROM tbl USING SAMPLE 10% (system, 377)",
+ write={"duckdb": "SELECT * FROM tbl USING SAMPLE SYSTEM (10 PERCENT) REPEATABLE (377)"},
+ )
+ self.validate_all(
+ "SELECT * FROM tbl TABLESAMPLE RESERVOIR(20%), tbl2 WHERE tbl.i=tbl2.i",
+ write={
+ "duckdb": "SELECT * FROM tbl TABLESAMPLE RESERVOIR (20 PERCENT), tbl2 WHERE tbl.i = tbl2.i"
+ },
+ )
+ self.validate_all(
+ "SELECT * FROM tbl, tbl2 WHERE tbl.i=tbl2.i USING SAMPLE RESERVOIR(20%)",
+ write={
+ "duckdb": "SELECT * FROM tbl, tbl2 WHERE tbl.i = tbl2.i USING SAMPLE RESERVOIR (20 PERCENT)"
+ },
+ )
+
def test_array(self):
self.validate_identity("ARRAY(SELECT id FROM t)")
self.validate_identity("ARRAY((SELECT id FROM t))")
diff --git a/tests/dialects/test_hive.py b/tests/dialects/test_hive.py
index 0503f6a..6dd484f 100644
--- a/tests/dialects/test_hive.py
+++ b/tests/dialects/test_hive.py
@@ -7,28 +7,57 @@ class TestHive(Validator):
def test_bits(self):
self.validate_all(
"x & 1",
- write={
+ read={
"duckdb": "x & 1",
"presto": "BITWISE_AND(x, 1)",
+ "spark": "x & 1",
+ },
+ write={
+ "duckdb": "x & 1",
"hive": "x & 1",
+ "presto": "BITWISE_AND(x, 1)",
"spark": "x & 1",
},
)
self.validate_all(
- "~x",
+ "x & 1 > 0",
+ read={
+ "duckdb": "x & 1 > 0",
+ "presto": "BITWISE_AND(x, 1) > 0",
+ "spark": "x & 1 > 0",
+ },
write={
+ "duckdb": "x & 1 > 0",
+ "presto": "BITWISE_AND(x, 1) > 0",
+ "hive": "x & 1 > 0",
+ "spark": "x & 1 > 0",
+ },
+ )
+ self.validate_all(
+ "~x",
+ read={
"duckdb": "~x",
"presto": "BITWISE_NOT(x)",
+ "spark": "~x",
+ },
+ write={
+ "duckdb": "~x",
"hive": "~x",
+ "presto": "BITWISE_NOT(x)",
"spark": "~x",
},
)
self.validate_all(
"x | 1",
- write={
+ read={
"duckdb": "x | 1",
"presto": "BITWISE_OR(x, 1)",
+ "spark": "x | 1",
+ },
+ write={
+ "duckdb": "x | 1",
"hive": "x | 1",
+ "presto": "BITWISE_OR(x, 1)",
"spark": "x | 1",
},
)
@@ -56,15 +85,6 @@ class TestHive(Validator):
"spark": "SHIFTRIGHT(x, 1)",
},
)
- self.validate_all(
- "x & 1 > 0",
- write={
- "duckdb": "x & 1 > 0",
- "presto": "BITWISE_AND(x, 1) > 0",
- "hive": "x & 1 > 0",
- "spark": "x & 1 > 0",
- },
- )
def test_cast(self):
self.validate_all(
diff --git a/tests/dialects/test_mysql.py b/tests/dialects/test_mysql.py
index 70ffcd9..ae2fa41 100644
--- a/tests/dialects/test_mysql.py
+++ b/tests/dialects/test_mysql.py
@@ -83,7 +83,7 @@ class TestMySQL(Validator):
self.validate_identity("CAST(x AS SET('a', 'b'))")
self.validate_identity("SELECT CURRENT_TIMESTAMP(6)")
self.validate_identity("x ->> '$.name'")
- self.validate_identity("SELECT CAST(`a`.`b` AS INT) FROM foo")
+ self.validate_identity("SELECT CAST(`a`.`b` AS CHAR) FROM foo")
self.validate_identity("SELECT TRIM(LEADING 'bla' FROM ' XXX ')")
self.validate_identity("SELECT TRIM(TRAILING 'bla' FROM ' XXX ')")
self.validate_identity("SELECT TRIM(BOTH 'bla' FROM ' XXX ')")
@@ -436,6 +436,14 @@ class TestMySQL(Validator):
self.validate_identity("TIME_STR_TO_UNIX(x)", "UNIX_TIMESTAMP(x)")
def test_mysql(self):
+ self.validate_all(
+ "CAST(x AS TEXT)",
+ write={
+ "mysql": "CAST(x AS CHAR)",
+ "presto": "CAST(x AS VARCHAR)",
+ "starrocks": "CAST(x AS STRING)",
+ },
+ )
self.validate_all("CAST(x AS SIGNED)", write={"mysql": "CAST(x AS SIGNED)"})
self.validate_all("CAST(x AS SIGNED INTEGER)", write={"mysql": "CAST(x AS SIGNED)"})
self.validate_all("CAST(x AS UNSIGNED)", write={"mysql": "CAST(x AS UNSIGNED)"})
diff --git a/tests/dialects/test_postgres.py b/tests/dialects/test_postgres.py
index 9fc18b7..b35665b 100644
--- a/tests/dialects/test_postgres.py
+++ b/tests/dialects/test_postgres.py
@@ -255,8 +255,8 @@ class TestPostgres(Validator):
"GENERATE_SERIES('2019-01-01'::TIMESTAMP, NOW(), '1day')",
write={
"postgres": "GENERATE_SERIES(CAST('2019-01-01' AS TIMESTAMP), CURRENT_TIMESTAMP, INTERVAL '1 day')",
- "presto": "SEQUENCE(TRY_CAST('2019-01-01' AS TIMESTAMP), CAST(CURRENT_TIMESTAMP AS TIMESTAMP), INTERVAL '1' day)",
- "trino": "SEQUENCE(TRY_CAST('2019-01-01' AS TIMESTAMP), CAST(CURRENT_TIMESTAMP AS TIMESTAMP), INTERVAL '1' day)",
+ "presto": "SEQUENCE(CAST('2019-01-01' AS TIMESTAMP), CAST(CURRENT_TIMESTAMP AS TIMESTAMP), INTERVAL '1' day)",
+ "trino": "SEQUENCE(CAST('2019-01-01' AS TIMESTAMP), CAST(CURRENT_TIMESTAMP AS TIMESTAMP), INTERVAL '1' day)",
},
)
self.validate_all(
@@ -470,7 +470,7 @@ class TestPostgres(Validator):
"""SELECT JSON_ARRAY_ELEMENTS((foo->'sections')::JSON) AS sections""",
write={
"postgres": """SELECT JSON_ARRAY_ELEMENTS(CAST((foo -> 'sections') AS JSON)) AS sections""",
- "presto": """SELECT JSON_ARRAY_ELEMENTS(TRY_CAST((JSON_EXTRACT(foo, 'sections')) AS JSON)) AS sections""",
+ "presto": """SELECT JSON_ARRAY_ELEMENTS(CAST((JSON_EXTRACT(foo, 'sections')) AS JSON)) AS sections""",
},
)
self.validate_all(
@@ -570,7 +570,7 @@ class TestPostgres(Validator):
},
)
- self.assertIsInstance(parse_one("id::UUID", read="postgres"), exp.TryCast)
+ self.assertIsInstance(parse_one("id::UUID", read="postgres"), exp.Cast)
def test_bool_or(self):
self.validate_all(
diff --git a/tests/dialects/test_presto.py b/tests/dialects/test_presto.py
index ddfa9e8..c0b77a3 100644
--- a/tests/dialects/test_presto.py
+++ b/tests/dialects/test_presto.py
@@ -9,6 +9,14 @@ class TestPresto(Validator):
def test_cast(self):
self.validate_all(
+ "SELECT CAST('10C' AS INTEGER)",
+ read={
+ "postgres": "SELECT CAST('10C' AS INTEGER)",
+ "presto": "SELECT CAST('10C' AS INTEGER)",
+ "redshift": "SELECT CAST('10C' AS INTEGER)",
+ },
+ )
+ self.validate_all(
"SELECT DATE_DIFF('week', CAST(CAST('2009-01-01' AS TIMESTAMP) AS DATE), CAST(CAST('2009-12-31' AS TIMESTAMP) AS DATE))",
read={"redshift": "SELECT DATEDIFF(week, '2009-01-01', '2009-12-31')"},
)
@@ -17,7 +25,7 @@ class TestPresto(Validator):
read={"redshift": "SELECT DATEADD(month, 18, '2008-02-28')"},
)
self.validate_all(
- "SELECT TRY_CAST('1970-01-01 00:00:00' AS TIMESTAMP)",
+ "SELECT CAST('1970-01-01 00:00:00' AS TIMESTAMP)",
read={"postgres": "SELECT 'epoch'::TIMESTAMP"},
)
self.validate_all(
diff --git a/tests/dialects/test_redshift.py b/tests/dialects/test_redshift.py
index 620aae2..90c953f 100644
--- a/tests/dialects/test_redshift.py
+++ b/tests/dialects/test_redshift.py
@@ -11,6 +11,13 @@ class TestRedshift(Validator):
self.validate_identity("$foo")
self.validate_all(
+ "SELECT ADD_MONTHS('2008-03-31', 1)",
+ write={
+ "redshift": "SELECT DATEADD(month, 1, '2008-03-31')",
+ "trino": "SELECT DATE_ADD('month', 1, CAST(CAST('2008-03-31' AS TIMESTAMP) AS DATE))",
+ },
+ )
+ self.validate_all(
"SELECT STRTOL('abc', 16)",
read={
"trino": "SELECT FROM_BASE('abc', 16)",
@@ -104,7 +111,7 @@ class TestRedshift(Validator):
"SELECT ST_AsEWKT(ST_GeomFromEWKT('SRID=4326;POINT(10 20)')::geography)",
write={
"redshift": "SELECT ST_ASEWKT(CAST(ST_GEOMFROMEWKT('SRID=4326;POINT(10 20)') AS GEOGRAPHY))",
- "bigquery": "SELECT ST_AsEWKT(SAFE_CAST(ST_GeomFromEWKT('SRID=4326;POINT(10 20)') AS GEOGRAPHY))",
+ "bigquery": "SELECT ST_AsEWKT(CAST(ST_GeomFromEWKT('SRID=4326;POINT(10 20)') AS GEOGRAPHY))",
},
)
self.validate_all(
@@ -222,7 +229,11 @@ class TestRedshift(Validator):
def test_values(self):
self.validate_all(
"SELECT * FROM (VALUES (1, 2)) AS t",
- write={"redshift": "SELECT * FROM (SELECT 1, 2) AS t"},
+ write={
+ "redshift": "SELECT * FROM (SELECT 1, 2) AS t",
+ "mysql": "SELECT * FROM (SELECT 1, 2) AS t",
+ "presto": "SELECT * FROM (VALUES (1, 2)) AS t",
+ },
)
self.validate_all(
"SELECT * FROM (VALUES (1)) AS t1(id) CROSS JOIN (VALUES (1)) AS t2(id)",
diff --git a/tests/dialects/test_snowflake.py b/tests/dialects/test_snowflake.py
index e20045b..82762e8 100644
--- a/tests/dialects/test_snowflake.py
+++ b/tests/dialects/test_snowflake.py
@@ -386,7 +386,9 @@ class TestSnowflake(Validator):
self.validate_all(
"SELECT RLIKE(a, b)",
write={
+ "hive": "SELECT a RLIKE b",
"snowflake": "SELECT REGEXP_LIKE(a, b)",
+ "spark": "SELECT a RLIKE b",
},
)
self.validate_all(
@@ -913,6 +915,58 @@ FROM persons AS p, LATERAL FLATTEN(input => p.c, path => 'contact') AS f, LATERA
},
)
+ @mock.patch("sqlglot.generator.logger")
+ def test_regexp_replace(self, logger):
+ self.validate_all(
+ "REGEXP_REPLACE(subject, pattern)",
+ write={
+ "bigquery": "REGEXP_REPLACE(subject, pattern, '')",
+ "duckdb": "REGEXP_REPLACE(subject, pattern, '')",
+ "hive": "REGEXP_REPLACE(subject, pattern, '')",
+ "snowflake": "REGEXP_REPLACE(subject, pattern, '')",
+ "spark": "REGEXP_REPLACE(subject, pattern, '')",
+ },
+ )
+ self.validate_all(
+ "REGEXP_REPLACE(subject, pattern, replacement)",
+ read={
+ "bigquery": "REGEXP_REPLACE(subject, pattern, replacement)",
+ "duckdb": "REGEXP_REPLACE(subject, pattern, replacement)",
+ "hive": "REGEXP_REPLACE(subject, pattern, replacement)",
+ "spark": "REGEXP_REPLACE(subject, pattern, replacement)",
+ },
+ write={
+ "bigquery": "REGEXP_REPLACE(subject, pattern, replacement)",
+ "duckdb": "REGEXP_REPLACE(subject, pattern, replacement)",
+ "hive": "REGEXP_REPLACE(subject, pattern, replacement)",
+ "snowflake": "REGEXP_REPLACE(subject, pattern, replacement)",
+ "spark": "REGEXP_REPLACE(subject, pattern, replacement)",
+ },
+ )
+ self.validate_all(
+ "REGEXP_REPLACE(subject, pattern, replacement, position)",
+ read={
+ "spark": "REGEXP_REPLACE(subject, pattern, replacement, position)",
+ },
+ write={
+ "bigquery": "REGEXP_REPLACE(subject, pattern, replacement)",
+ "duckdb": "REGEXP_REPLACE(subject, pattern, replacement)",
+ "hive": "REGEXP_REPLACE(subject, pattern, replacement)",
+ "snowflake": "REGEXP_REPLACE(subject, pattern, replacement, position)",
+ "spark": "REGEXP_REPLACE(subject, pattern, replacement, position)",
+ },
+ )
+ self.validate_all(
+ "REGEXP_REPLACE(subject, pattern, replacement, position, occurrence, parameters)",
+ write={
+ "bigquery": "REGEXP_REPLACE(subject, pattern, replacement)",
+ "duckdb": "REGEXP_REPLACE(subject, pattern, replacement)",
+ "hive": "REGEXP_REPLACE(subject, pattern, replacement)",
+ "snowflake": "REGEXP_REPLACE(subject, pattern, replacement, position, occurrence, parameters)",
+ "spark": "REGEXP_REPLACE(subject, pattern, replacement, position)",
+ },
+ )
+
def test_match_recognize(self):
for row in (
"ONE ROW PER MATCH",
diff --git a/tests/dialects/test_spark.py b/tests/dialects/test_spark.py
index 32be23e..2afa868 100644
--- a/tests/dialects/test_spark.py
+++ b/tests/dialects/test_spark.py
@@ -224,6 +224,9 @@ TBLPROPERTIES (
)
def test_spark(self):
+ self.validate_identity("SELECT TRANSFORM(ARRAY(1, 2, 3), x -> x + 1)")
+ self.validate_identity("SELECT TRANSFORM(ARRAY(1, 2, 3), (x, i) -> x + i)")
+ self.validate_identity("REFRESH table a.b.c")
self.validate_identity("INTERVAL -86 days")
self.validate_identity("SELECT UNIX_TIMESTAMP()")
self.validate_identity("TRIM(' SparkSQL ')")
@@ -233,6 +236,61 @@ TBLPROPERTIES (
self.validate_identity("SPLIT(str, pattern, lim)")
self.validate_all(
+ "SELECT DATEDIFF(month, CAST('1996-10-30' AS TIMESTAMP), CAST('1997-02-28 10:30:00' AS TIMESTAMP))",
+ read={
+ "duckdb": "SELECT DATEDIFF('month', CAST('1996-10-30' AS TIMESTAMP), CAST('1997-02-28 10:30:00' AS TIMESTAMP))",
+ },
+ write={
+ "spark": "SELECT DATEDIFF(month, TO_DATE(CAST('1996-10-30' AS TIMESTAMP)), TO_DATE(CAST('1997-02-28 10:30:00' AS TIMESTAMP)))",
+ "spark2": "SELECT MONTHS_BETWEEN(TO_DATE(CAST('1997-02-28 10:30:00' AS TIMESTAMP)), TO_DATE(CAST('1996-10-30' AS TIMESTAMP)))",
+ },
+ )
+ self.validate_all(
+ "SELECT MONTHS_BETWEEN('1997-02-28 10:30:00', '1996-10-30')",
+ write={
+ "duckdb": "SELECT DATEDIFF('month', CAST('1996-10-30' AS TIMESTAMP), CAST('1997-02-28 10:30:00' AS TIMESTAMP))",
+ "hive": "SELECT MONTHS_BETWEEN('1997-02-28 10:30:00', '1996-10-30')",
+ "spark": "SELECT MONTHS_BETWEEN('1997-02-28 10:30:00', '1996-10-30')",
+ },
+ )
+ self.validate_all(
+ "SELECT MONTHS_BETWEEN('1997-02-28 10:30:00', '1996-10-30', FALSE)",
+ write={
+ "duckdb": "SELECT DATEDIFF('month', CAST('1996-10-30' AS TIMESTAMP), CAST('1997-02-28 10:30:00' AS TIMESTAMP))",
+ "hive": "SELECT MONTHS_BETWEEN('1997-02-28 10:30:00', '1996-10-30')",
+ "spark": "SELECT MONTHS_BETWEEN('1997-02-28 10:30:00', '1996-10-30', FALSE)",
+ },
+ )
+ self.validate_all(
+ "SELECT TO_TIMESTAMP('2016-12-31 00:12:00')",
+ write={
+ "": "SELECT CAST('2016-12-31 00:12:00' AS TIMESTAMP)",
+ "duckdb": "SELECT CAST('2016-12-31 00:12:00' AS TIMESTAMP)",
+ "spark": "SELECT CAST('2016-12-31 00:12:00' AS TIMESTAMP)",
+ },
+ )
+ self.validate_all(
+ "SELECT TO_TIMESTAMP('2016-12-31', 'yyyy-MM-dd')",
+ read={
+ "duckdb": "SELECT STRPTIME('2016-12-31', '%Y-%m-%d')",
+ },
+ write={
+ "": "SELECT STR_TO_TIME('2016-12-31', '%Y-%m-%d')",
+ "duckdb": "SELECT STRPTIME('2016-12-31', '%Y-%m-%d')",
+ "spark": "SELECT TO_TIMESTAMP('2016-12-31', 'yyyy-MM-dd')",
+ },
+ )
+ self.validate_all(
+ "SELECT RLIKE('John Doe', 'John.*')",
+ write={
+ "bigquery": "SELECT REGEXP_CONTAINS('John Doe', 'John.*')",
+ "hive": "SELECT 'John Doe' RLIKE 'John.*'",
+ "postgres": "SELECT 'John Doe' ~ 'John.*'",
+ "snowflake": "SELECT REGEXP_LIKE('John Doe', 'John.*')",
+ "spark": "SELECT 'John Doe' RLIKE 'John.*'",
+ },
+ )
+ self.validate_all(
"UNHEX(MD5(x))",
write={
"bigquery": "FROM_HEX(TO_HEX(MD5(x)))",
@@ -446,3 +504,21 @@ TBLPROPERTIES (
"CURRENT_USER()",
write={"spark": "CURRENT_USER()"},
)
+
+ def test_transform_query(self):
+ self.validate_identity("SELECT TRANSFORM(x) USING 'x' AS (x INT) FROM t")
+ self.validate_identity(
+ "SELECT TRANSFORM(zip_code, name, age) USING 'cat' AS (a, b, c) FROM person WHERE zip_code > 94511"
+ )
+ self.validate_identity(
+ "SELECT TRANSFORM(zip_code, name, age) USING 'cat' AS (a STRING, b STRING, c STRING) FROM person WHERE zip_code > 94511"
+ )
+ self.validate_identity(
+ "SELECT TRANSFORM(name, age) ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' LINES TERMINATED BY '\n' NULL DEFINED AS 'NULL' USING 'cat' AS (name_age STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '@' LINES TERMINATED BY '\n' NULL DEFINED AS 'NULL' FROM person"
+ )
+ self.validate_identity(
+ "SELECT TRANSFORM(zip_code, name, age) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' WITH SERDEPROPERTIES ('field.delim'='\t') USING 'cat' AS (a STRING, b STRING, c STRING) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' WITH SERDEPROPERTIES ('field.delim'='\t') FROM person WHERE zip_code > 94511"
+ )
+ self.validate_identity(
+ "SELECT TRANSFORM(zip_code, name, age) USING 'cat' FROM person WHERE zip_code > 94500"
+ )
diff --git a/tests/dialects/test_teradata.py b/tests/dialects/test_teradata.py
index 6906e47..0df6d0b 100644
--- a/tests/dialects/test_teradata.py
+++ b/tests/dialects/test_teradata.py
@@ -61,6 +61,9 @@ class TestTeradata(Validator):
self.validate_identity(
"CREATE VOLATILE MULTISET TABLE a, NOT LOCAL AFTER JOURNAL, FREESPACE=1 PERCENT, DATABLOCKSIZE=10 BYTES, WITH NO CONCURRENT ISOLATED LOADING FOR ALL (a INT)"
)
+ self.validate_identity(
+ "CREATE VOLATILE SET TABLE example1 AS (SELECT col1, col2, col3 FROM table1) WITH DATA PRIMARY INDEX (col1) ON COMMIT PRESERVE ROWS"
+ )
self.validate_all(
"""
diff --git a/tests/dialects/test_tsql.py b/tests/dialects/test_tsql.py
index 065cdd0..f0a590f 100644
--- a/tests/dialects/test_tsql.py
+++ b/tests/dialects/test_tsql.py
@@ -33,15 +33,15 @@ class TestTSQL(Validator):
)
self.validate_all(
- "SELECT DATEPART(year, TRY_CAST('2017-01-01' AS DATE))",
+ "SELECT DATEPART(year, CAST('2017-01-01' AS DATE))",
read={"postgres": "SELECT DATE_PART('year', '2017-01-01'::DATE)"},
)
self.validate_all(
- "SELECT DATEPART(month, TRY_CAST('2017-03-01' AS DATE))",
+ "SELECT DATEPART(month, CAST('2017-03-01' AS DATE))",
read={"postgres": "SELECT DATE_PART('month', '2017-03-01'::DATE)"},
)
self.validate_all(
- "SELECT DATEPART(day, TRY_CAST('2017-01-02' AS DATE))",
+ "SELECT DATEPART(day, CAST('2017-01-02' AS DATE))",
read={"postgres": "SELECT DATE_PART('day', '2017-01-02'::DATE)"},
)
self.validate_all(
@@ -54,7 +54,7 @@ class TestTSQL(Validator):
self.validate_all(
"CONVERT(INT, CONVERT(NUMERIC, '444.75'))",
write={
- "mysql": "CAST(CAST('444.75' AS DECIMAL) AS INT)",
+ "mysql": "CAST(CAST('444.75' AS DECIMAL) AS SIGNED)",
"tsql": "CAST(CAST('444.75' AS NUMERIC) AS INTEGER)",
},
)
@@ -389,8 +389,59 @@ class TestTSQL(Validator):
},
)
+ def test_ddl(self):
+ self.validate_all(
+ "CREATE TABLE #mytemp (a INTEGER, b CHAR(2), c TIME(4), d FLOAT(24))",
+ write={
+ "tsql": "CREATE TABLE #mytemp (a INTEGER, b CHAR(2), c TIMESTAMP(4), d FLOAT(24))"
+ },
+ )
+
+ def test_transaction(self):
+ # BEGIN { TRAN | TRANSACTION }
+ # [ { transaction_name | @tran_name_variable }
+ # [ WITH MARK [ 'description' ] ]
+ # ]
+ # [ ; ]
+ self.validate_identity("BEGIN TRANSACTION")
+ self.validate_all("BEGIN TRAN", write={"tsql": "BEGIN TRANSACTION"})
+ self.validate_identity("BEGIN TRANSACTION transaction_name")
+ self.validate_identity("BEGIN TRANSACTION @tran_name_variable")
+ self.validate_identity("BEGIN TRANSACTION transaction_name WITH MARK 'description'")
+
+ def test_commit(self):
+ # COMMIT [ { TRAN | TRANSACTION } [ transaction_name | @tran_name_variable ] ] [ WITH ( DELAYED_DURABILITY = { OFF | ON } ) ] [ ; ]
+
+ self.validate_all("COMMIT", write={"tsql": "COMMIT TRANSACTION"})
+ self.validate_all("COMMIT TRAN", write={"tsql": "COMMIT TRANSACTION"})
+ self.validate_identity("COMMIT TRANSACTION")
+ self.validate_identity("COMMIT TRANSACTION transaction_name")
+ self.validate_identity("COMMIT TRANSACTION @tran_name_variable")
+
+ self.validate_identity(
+ "COMMIT TRANSACTION @tran_name_variable WITH (DELAYED_DURABILITY = ON)"
+ )
+ self.validate_identity(
+ "COMMIT TRANSACTION transaction_name WITH (DELAYED_DURABILITY = OFF)"
+ )
+
+ def test_rollback(self):
+ # Applies to SQL Server and Azure SQL Database
+ # ROLLBACK { TRAN | TRANSACTION }
+ # [ transaction_name | @tran_name_variable
+ # | savepoint_name | @savepoint_variable ]
+ # [ ; ]
+ self.validate_all("ROLLBACK", write={"tsql": "ROLLBACK TRANSACTION"})
+ self.validate_all("ROLLBACK TRAN", write={"tsql": "ROLLBACK TRANSACTION"})
+ self.validate_identity("ROLLBACK TRANSACTION")
+ self.validate_identity("ROLLBACK TRANSACTION transaction_name")
+ self.validate_identity("ROLLBACK TRANSACTION @tran_name_variable")
+
def test_udf(self):
self.validate_identity(
+ "DECLARE @DWH_DateCreated DATETIME = CONVERT(DATETIME, getdate(), 104)"
+ )
+ self.validate_identity(
"CREATE PROCEDURE foo @a INTEGER, @b INTEGER AS SELECT @a = SUM(bla) FROM baz AS bar"
)
self.validate_identity(
@@ -446,6 +497,12 @@ WHERE
pretty=True,
)
+ def test_procedure_keywords(self):
+ self.validate_identity("BEGIN")
+ self.validate_identity("END")
+ self.validate_identity("SET XACT_ABORT ON")
+
+ def test_fullproc(self):
sql = """
CREATE procedure [TRANSF].[SP_Merge_Sales_Real]
@Loadid INTEGER
@@ -696,14 +753,14 @@ WHERE
self.validate_all(
"SELECT CONVERT(VARCHAR(10), testdb.dbo.test.x, 120) y FROM testdb.dbo.test",
write={
- "mysql": "SELECT CAST(DATE_FORMAT(testdb.dbo.test.x, '%Y-%m-%d %T') AS VARCHAR(10)) AS y FROM testdb.dbo.test",
+ "mysql": "SELECT CAST(DATE_FORMAT(testdb.dbo.test.x, '%Y-%m-%d %T') AS CHAR(10)) AS y FROM testdb.dbo.test",
"spark": "SELECT CAST(DATE_FORMAT(testdb.dbo.test.x, 'yyyy-MM-dd HH:mm:ss') AS VARCHAR(10)) AS y FROM testdb.dbo.test",
},
)
self.validate_all(
"SELECT CONVERT(VARCHAR(10), y.x) z FROM testdb.dbo.test y",
write={
- "mysql": "SELECT CAST(y.x AS VARCHAR(10)) AS z FROM testdb.dbo.test AS y",
+ "mysql": "SELECT CAST(y.x AS CHAR(10)) AS z FROM testdb.dbo.test AS y",
"spark": "SELECT CAST(y.x AS VARCHAR(10)) AS z FROM testdb.dbo.test AS y",
},
)
@@ -838,7 +895,8 @@ WHERE
write={"spark": "SELECT FORMAT_NUMBER(1000000.01, '###,###.###')"},
)
self.validate_all(
- "SELECT FORMAT(1234567, 'f')", write={"spark": "SELECT FORMAT_NUMBER(1234567, 'f')"}
+ "SELECT FORMAT(1234567, 'f')",
+ write={"spark": "SELECT FORMAT_NUMBER(1234567, 'f')"},
)
self.validate_all(
"SELECT FORMAT('01-01-1991', 'dd.mm.yyyy')",
@@ -853,7 +911,8 @@ WHERE
write={"spark": "SELECT DATE_FORMAT(date_col, 'MMMM d')"},
)
self.validate_all(
- "SELECT FORMAT(num_col, 'c')", write={"spark": "SELECT FORMAT_NUMBER(num_col, 'c')"}
+ "SELECT FORMAT(num_col, 'c')",
+ write={"spark": "SELECT FORMAT_NUMBER(num_col, 'c')"},
)
def test_string(self):