summaryrefslogtreecommitdiffstats
path: root/tests/dialects/test_dialect.py
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2023-04-03 07:31:54 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2023-04-03 07:31:54 +0000
commitb38d717d5933fdae3fe85c87df7aee9a251fb58e (patch)
tree6db21a44ffea4c832dcab29688bfaf1c1dc124f9 /tests/dialects/test_dialect.py
parentReleasing debian version 11.4.1-1. (diff)
downloadsqlglot-b38d717d5933fdae3fe85c87df7aee9a251fb58e.tar.xz
sqlglot-b38d717d5933fdae3fe85c87df7aee9a251fb58e.zip
Merging upstream version 11.4.5.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
-rw-r--r--tests/dialects/test_dialect.py58
1 files changed, 58 insertions, 0 deletions
diff --git a/tests/dialects/test_dialect.py b/tests/dialects/test_dialect.py
index 6214c43..0805e9c 100644
--- a/tests/dialects/test_dialect.py
+++ b/tests/dialects/test_dialect.py
@@ -812,11 +812,13 @@ class TestDialect(Validator):
self.validate_all(
"JSON_EXTRACT(x, 'y')",
read={
+ "mysql": "JSON_EXTRACT(x, 'y')",
"postgres": "x->'y'",
"presto": "JSON_EXTRACT(x, 'y')",
"starrocks": "x -> 'y'",
},
write={
+ "mysql": "JSON_EXTRACT(x, 'y')",
"oracle": "JSON_EXTRACT(x, 'y')",
"postgres": "x -> 'y'",
"presto": "JSON_EXTRACT(x, 'y')",
@@ -835,6 +837,17 @@ class TestDialect(Validator):
},
)
self.validate_all(
+ "JSON_EXTRACT_SCALAR(stream_data, '$.data.results')",
+ read={
+ "hive": "GET_JSON_OBJECT(stream_data, '$.data.results')",
+ "mysql": "stream_data ->> '$.data.results'",
+ },
+ write={
+ "hive": "GET_JSON_OBJECT(stream_data, '$.data.results')",
+ "mysql": "stream_data ->> '$.data.results'",
+ },
+ )
+ self.validate_all(
"JSONB_EXTRACT(x, 'y')",
read={
"postgres": "x#>'y'",
@@ -1000,6 +1013,7 @@ class TestDialect(Validator):
self.validate_identity("some.column LIKE 'foo' || another.column || 'bar' || LOWER(x)")
self.validate_identity("some.column LIKE 'foo' + another.column + 'bar'")
+ self.validate_all("LIKE(x, 'z')", write={"": "'z' LIKE x"})
self.validate_all(
"x ILIKE '%y'",
read={
@@ -1196,9 +1210,13 @@ class TestDialect(Validator):
)
self.validate_all(
"SELECT x FROM y LIMIT 10",
+ read={
+ "tsql": "SELECT TOP 10 x FROM y",
+ },
write={
"sqlite": "SELECT x FROM y LIMIT 10",
"oracle": "SELECT x FROM y FETCH FIRST 10 ROWS ONLY",
+ "tsql": "SELECT x FROM y FETCH FIRST 10 ROWS ONLY",
},
)
self.validate_all(
@@ -1493,6 +1511,46 @@ SELECT
},
)
+ def test_logarithm(self):
+ self.validate_all(
+ "LOG(x)",
+ read={
+ "duckdb": "LOG(x)",
+ "postgres": "LOG(x)",
+ "redshift": "LOG(x)",
+ "sqlite": "LOG(x)",
+ "teradata": "LOG(x)",
+ },
+ )
+ self.validate_all(
+ "LN(x)",
+ read={
+ "bigquery": "LOG(x)",
+ "clickhouse": "LOG(x)",
+ "databricks": "LOG(x)",
+ "drill": "LOG(x)",
+ "hive": "LOG(x)",
+ "mysql": "LOG(x)",
+ "tsql": "LOG(x)",
+ },
+ )
+ self.validate_all(
+ "LOG(b, n)",
+ read={
+ "bigquery": "LOG(n, b)",
+ "databricks": "LOG(b, n)",
+ "drill": "LOG(b, n)",
+ "hive": "LOG(b, n)",
+ "mysql": "LOG(b, n)",
+ "oracle": "LOG(b, n)",
+ "postgres": "LOG(b, n)",
+ "snowflake": "LOG(b, n)",
+ "spark": "LOG(b, n)",
+ "sqlite": "LOG(b, n)",
+ "tsql": "LOG(n, b)",
+ },
+ )
+
def test_count_if(self):
self.validate_identity("COUNT_IF(DISTINCT cond)")