summaryrefslogtreecommitdiffstats
path: root/tests/dataframe/integration/test_dataframe.py
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2023-06-02 23:59:11 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2023-06-02 23:59:11 +0000
commitcaea5267cb8e1fea3702adbdf6f68fd37d13b3b7 (patch)
treef06f1da1ab3b6906beca1c3c7222d28ff00766ac /tests/dataframe/integration/test_dataframe.py
parentAdding upstream version 12.2.0. (diff)
downloadsqlglot-0c9fd0a27262a4b82d2347fe92db95748c7421d4.tar.xz
sqlglot-0c9fd0a27262a4b82d2347fe92db95748c7421d4.zip
Adding upstream version 15.0.0.upstream/15.0.0
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'tests/dataframe/integration/test_dataframe.py')
-rw-r--r--tests/dataframe/integration/test_dataframe.py8
1 files changed, 6 insertions, 2 deletions
diff --git a/tests/dataframe/integration/test_dataframe.py b/tests/dataframe/integration/test_dataframe.py
index d00464b..702c6ee 100644
--- a/tests/dataframe/integration/test_dataframe.py
+++ b/tests/dataframe/integration/test_dataframe.py
@@ -1155,8 +1155,9 @@ class TestDataframeFunc(DataFrameValidator):
df, dfs = self.compare_spark_with_sqlglot(df_joined, dfs_joined)
self.assertIn("ResolvedHint (strategy=broadcast)", self.get_explain_plan(df))
self.assertIn("ResolvedHint (strategy=broadcast)", self.get_explain_plan(dfs))
-
- # TODO: Add test to make sure with and without alias are the same once ids are deterministic
+ self.assertEqual(
+ "'UnresolvedHint BROADCAST, ['a2]", self.get_explain_plan(dfs).split("\n")[1]
+ )
def test_broadcast_func(self):
df_joined = self.df_spark_employee.join(
@@ -1188,6 +1189,9 @@ class TestDataframeFunc(DataFrameValidator):
df, dfs = self.compare_spark_with_sqlglot(df_joined, dfs_joined)
self.assertIn("ResolvedHint (strategy=broadcast)", self.get_explain_plan(df))
self.assertIn("ResolvedHint (strategy=broadcast)", self.get_explain_plan(dfs))
+ self.assertEqual(
+ "'UnresolvedHint BROADCAST, ['a2]", self.get_explain_plan(dfs).split("\n")[1]
+ )
def test_repartition_by_num(self):
"""