summaryrefslogtreecommitdiffstats
path: root/sqlglot/dialects/oracle.py
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2023-09-13 09:17:40 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2023-09-13 09:17:40 +0000
commitbdf5cc7bdd5ec93dc928d81e286f7b1e678ba19d (patch)
tree4d46f9407b792f6fd5d767d510e6865ec9640569 /sqlglot/dialects/oracle.py
parentReleasing progress-linux version 18.3.0-1. (diff)
downloadsqlglot-bdf5cc7bdd5ec93dc928d81e286f7b1e678ba19d.tar.xz
sqlglot-bdf5cc7bdd5ec93dc928d81e286f7b1e678ba19d.zip
Merging upstream version 18.4.1.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'sqlglot/dialects/oracle.py')
-rw-r--r--sqlglot/dialects/oracle.py45
1 files changed, 45 insertions, 0 deletions
diff --git a/sqlglot/dialects/oracle.py b/sqlglot/dialects/oracle.py
index 279ed31..378df49 100644
--- a/sqlglot/dialects/oracle.py
+++ b/sqlglot/dialects/oracle.py
@@ -7,6 +7,9 @@ from sqlglot.dialects.dialect import Dialect, no_ilike_sql, rename_func, trim_sq
from sqlglot.helper import seq_get
from sqlglot.tokens import TokenType
+if t.TYPE_CHECKING:
+ from sqlglot._typing import E
+
def _parse_xml_table(self: Oracle.Parser) -> exp.XMLTable:
this = self._parse_string()
@@ -69,6 +72,16 @@ class Oracle(Dialect):
FUNCTION_PARSERS: t.Dict[str, t.Callable] = {
**parser.Parser.FUNCTION_PARSERS,
+ "JSON_ARRAY": lambda self: self._parse_json_array(
+ exp.JSONArray,
+ expressions=self._parse_csv(lambda: self._parse_format_json(self._parse_bitwise())),
+ ),
+ "JSON_ARRAYAGG": lambda self: self._parse_json_array(
+ exp.JSONArrayAgg,
+ this=self._parse_format_json(self._parse_bitwise()),
+ order=self._parse_order(),
+ ),
+ "JSON_TABLE": lambda self: self._parse_json_table(),
"XMLTABLE": _parse_xml_table,
}
@@ -82,6 +95,38 @@ class Oracle(Dialect):
# Reference: https://stackoverflow.com/a/336455
DISTINCT_TOKENS = {TokenType.DISTINCT, TokenType.UNIQUE}
+ # Note: this is currently incomplete; it only implements the "JSON_value_column" part
+ def _parse_json_column_def(self) -> exp.JSONColumnDef:
+ this = self._parse_id_var()
+ kind = self._parse_types(allow_identifiers=False)
+ path = self._match_text_seq("PATH") and self._parse_string()
+ return self.expression(exp.JSONColumnDef, this=this, kind=kind, path=path)
+
+ def _parse_json_table(self) -> exp.JSONTable:
+ this = self._parse_format_json(self._parse_bitwise())
+ path = self._match(TokenType.COMMA) and self._parse_string()
+ error_handling = self._parse_on_handling("ERROR", "ERROR", "NULL")
+ empty_handling = self._parse_on_handling("EMPTY", "ERROR", "NULL")
+ self._match(TokenType.COLUMN)
+ expressions = self._parse_wrapped_csv(self._parse_json_column_def, optional=True)
+
+ return exp.JSONTable(
+ this=this,
+ expressions=expressions,
+ path=path,
+ error_handling=error_handling,
+ empty_handling=empty_handling,
+ )
+
+ def _parse_json_array(self, expr_type: t.Type[E], **kwargs) -> E:
+ return self.expression(
+ expr_type,
+ null_handling=self._parse_on_handling("NULL", "NULL", "ABSENT"),
+ return_type=self._match_text_seq("RETURNING") and self._parse_type(),
+ strict=self._match_text_seq("STRICT"),
+ **kwargs,
+ )
+
def _parse_column(self) -> t.Optional[exp.Expression]:
column = super()._parse_column()
if column: