From b38d717d5933fdae3fe85c87df7aee9a251fb58e Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Mon, 3 Apr 2023 09:31:54 +0200 Subject: Merging upstream version 11.4.5. Signed-off-by: Daniel Baumann --- tests/dialects/test_dialect.py | 58 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 58 insertions(+) (limited to 'tests/dialects/test_dialect.py') diff --git a/tests/dialects/test_dialect.py b/tests/dialects/test_dialect.py index 6214c43..0805e9c 100644 --- a/tests/dialects/test_dialect.py +++ b/tests/dialects/test_dialect.py @@ -812,11 +812,13 @@ class TestDialect(Validator): self.validate_all( "JSON_EXTRACT(x, 'y')", read={ + "mysql": "JSON_EXTRACT(x, 'y')", "postgres": "x->'y'", "presto": "JSON_EXTRACT(x, 'y')", "starrocks": "x -> 'y'", }, write={ + "mysql": "JSON_EXTRACT(x, 'y')", "oracle": "JSON_EXTRACT(x, 'y')", "postgres": "x -> 'y'", "presto": "JSON_EXTRACT(x, 'y')", @@ -834,6 +836,17 @@ class TestDialect(Validator): "presto": "JSON_EXTRACT_SCALAR(x, 'y')", }, ) + self.validate_all( + "JSON_EXTRACT_SCALAR(stream_data, '$.data.results')", + read={ + "hive": "GET_JSON_OBJECT(stream_data, '$.data.results')", + "mysql": "stream_data ->> '$.data.results'", + }, + write={ + "hive": "GET_JSON_OBJECT(stream_data, '$.data.results')", + "mysql": "stream_data ->> '$.data.results'", + }, + ) self.validate_all( "JSONB_EXTRACT(x, 'y')", read={ @@ -1000,6 +1013,7 @@ class TestDialect(Validator): self.validate_identity("some.column LIKE 'foo' || another.column || 'bar' || LOWER(x)") self.validate_identity("some.column LIKE 'foo' + another.column + 'bar'") + self.validate_all("LIKE(x, 'z')", write={"": "'z' LIKE x"}) self.validate_all( "x ILIKE '%y'", read={ @@ -1196,9 +1210,13 @@ class TestDialect(Validator): ) self.validate_all( "SELECT x FROM y LIMIT 10", + read={ + "tsql": "SELECT TOP 10 x FROM y", + }, write={ "sqlite": "SELECT x FROM y LIMIT 10", "oracle": "SELECT x FROM y FETCH FIRST 10 ROWS ONLY", + "tsql": "SELECT x FROM y FETCH FIRST 10 ROWS ONLY", }, ) self.validate_all( @@ -1493,6 +1511,46 @@ SELECT }, ) + def test_logarithm(self): + self.validate_all( + "LOG(x)", + read={ + "duckdb": "LOG(x)", + "postgres": "LOG(x)", + "redshift": "LOG(x)", + "sqlite": "LOG(x)", + "teradata": "LOG(x)", + }, + ) + self.validate_all( + "LN(x)", + read={ + "bigquery": "LOG(x)", + "clickhouse": "LOG(x)", + "databricks": "LOG(x)", + "drill": "LOG(x)", + "hive": "LOG(x)", + "mysql": "LOG(x)", + "tsql": "LOG(x)", + }, + ) + self.validate_all( + "LOG(b, n)", + read={ + "bigquery": "LOG(n, b)", + "databricks": "LOG(b, n)", + "drill": "LOG(b, n)", + "hive": "LOG(b, n)", + "mysql": "LOG(b, n)", + "oracle": "LOG(b, n)", + "postgres": "LOG(b, n)", + "snowflake": "LOG(b, n)", + "spark": "LOG(b, n)", + "sqlite": "LOG(b, n)", + "tsql": "LOG(n, b)", + }, + ) + def test_count_if(self): self.validate_identity("COUNT_IF(DISTINCT cond)") -- cgit v1.2.3