diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2023-09-20 09:22:22 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2023-09-20 09:22:22 +0000 |
commit | 281115ae80458761fb3593c81fe9488976b87c6d (patch) | |
tree | f159075e865f3bad227b365878dc315eeab64112 /sqlglot/parser.py | |
parent | Adding upstream version 18.4.1. (diff) | |
download | sqlglot-281115ae80458761fb3593c81fe9488976b87c6d.tar.xz sqlglot-281115ae80458761fb3593c81fe9488976b87c6d.zip |
Adding upstream version 18.5.1.upstream/18.5.1
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'sqlglot/parser.py')
-rw-r--r-- | sqlglot/parser.py | 28 |
1 files changed, 21 insertions, 7 deletions
diff --git a/sqlglot/parser.py b/sqlglot/parser.py index f721582..06bc1eb 100644 --- a/sqlglot/parser.py +++ b/sqlglot/parser.py @@ -820,7 +820,9 @@ class Parser(metaclass=_Parser): SHOW_PARSERS: t.Dict[str, t.Callable] = {} - TYPE_LITERAL_PARSERS: t.Dict[exp.DataType.Type, t.Callable] = {} + TYPE_LITERAL_PARSERS = { + exp.DataType.Type.JSON: lambda self, this, _: self.expression(exp.ParseJSON, this=this), + } MODIFIABLES = (exp.Subquery, exp.Subqueryable, exp.Table) @@ -848,6 +850,8 @@ class Parser(metaclass=_Parser): WINDOW_BEFORE_PAREN_TOKENS = {TokenType.OVER} WINDOW_SIDES = {"FOLLOWING", "PRECEDING"} + FETCH_TOKENS = ID_VAR_TOKENS - {TokenType.ROW, TokenType.ROWS, TokenType.PERCENT} + ADD_CONSTRAINT_TOKENS = {TokenType.CONSTRAINT, TokenType.PRIMARY_KEY, TokenType.FOREIGN_KEY} DISTINCT_TOKENS = {TokenType.DISTINCT} @@ -863,8 +867,6 @@ class Parser(metaclass=_Parser): LOG_BASE_FIRST = True LOG_DEFAULTS_TO_LN = False - SUPPORTS_USER_DEFINED_TYPES = True - # Whether or not ADD is present for each column added by ALTER TABLE ALTER_TABLE_ADD_COLUMN_KEYWORD = True @@ -892,6 +894,7 @@ class Parser(metaclass=_Parser): UNNEST_COLUMN_ONLY: bool = False ALIAS_POST_TABLESAMPLE: bool = False STRICT_STRING_CONCAT = False + SUPPORTS_USER_DEFINED_TYPES = True NORMALIZE_FUNCTIONS = "upper" NULL_ORDERING: str = "nulls_are_small" SHOW_TRIE: t.Dict = {} @@ -2692,7 +2695,7 @@ class Parser(metaclass=_Parser): expressions = self._parse_csv(self._parse_primary) else: expressions = None - num = self._parse_number() + num = self._parse_primary() if self._match_text_seq("BUCKET"): bucket_numerator = self._parse_number() @@ -2914,6 +2917,10 @@ class Parser(metaclass=_Parser): ) connect = self._parse_conjunction() self.NO_PAREN_FUNCTION_PARSERS.pop("PRIOR") + + if not start and self._match(TokenType.START_WITH): + start = self._parse_conjunction() + return self.expression(exp.Connect, start=start, connect=connect) def _parse_order( @@ -2985,7 +2992,7 @@ class Parser(metaclass=_Parser): direction = self._match_set((TokenType.FIRST, TokenType.NEXT)) direction = self._prev.text if direction else "FIRST" - count = self._parse_number() + count = self._parse_field(tokens=self.FETCH_TOKENS) percent = self._match(TokenType.PERCENT) self._match_set((TokenType.ROW, TokenType.ROWS)) @@ -3272,7 +3279,12 @@ class Parser(metaclass=_Parser): if tokens[0].token_type in self.TYPE_TOKENS: self._prev = tokens[0] elif self.SUPPORTS_USER_DEFINED_TYPES: - return exp.DataType.build(identifier.name, udt=True) + type_name = identifier.name + + while self._match(TokenType.DOT): + type_name = f"{type_name}.{self._advance_any() and self._prev.text}" + + return exp.DataType.build(type_name, udt=True) else: return None else: @@ -3816,7 +3828,9 @@ class Parser(metaclass=_Parser): def _parse_unique(self) -> exp.UniqueColumnConstraint: self._match_text_seq("KEY") return self.expression( - exp.UniqueColumnConstraint, this=self._parse_schema(self._parse_id_var(any_token=False)) + exp.UniqueColumnConstraint, + this=self._parse_schema(self._parse_id_var(any_token=False)), + index_type=self._match(TokenType.USING) and self._advance_any() and self._prev.text, ) def _parse_key_constraint_options(self) -> t.List[str]: |