summaryrefslogtreecommitdiffstats
path: root/sqlglot/parser.py
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-06-17 09:15:16 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-06-17 09:15:16 +0000
commit93346175ed97c685979fba99a6ae68268484d8c1 (patch)
tree7674a4f4c8e9b128d79559002aaaea2ead346242 /sqlglot/parser.py
parentAdding upstream version 25.0.3. (diff)
downloadsqlglot-9a88485d55a42a9e5b56df665fc76b9ac2a169ea.tar.xz
sqlglot-9a88485d55a42a9e5b56df665fc76b9ac2a169ea.zip
Adding upstream version 25.1.0.upstream/25.1.0
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'sqlglot/parser.py')
-rw-r--r--sqlglot/parser.py37
1 files changed, 27 insertions, 10 deletions
diff --git a/sqlglot/parser.py b/sqlglot/parser.py
index ed53b19..cf5cc9d 100644
--- a/sqlglot/parser.py
+++ b/sqlglot/parser.py
@@ -588,11 +588,12 @@ class Parser(metaclass=_Parser):
}
JOIN_KINDS = {
+ TokenType.ANTI,
+ TokenType.CROSS,
TokenType.INNER,
TokenType.OUTER,
- TokenType.CROSS,
TokenType.SEMI,
- TokenType.ANTI,
+ TokenType.STRAIGHT_JOIN,
}
JOIN_HINTS: t.Set[str] = set()
@@ -1065,7 +1066,7 @@ class Parser(metaclass=_Parser):
exp.DataType.Type.JSON: lambda self, this, _: self.expression(exp.ParseJSON, this=this),
}
- TYPE_CONVERTER: t.Dict[exp.DataType.Type, t.Callable[[exp.DataType], exp.DataType]] = {}
+ TYPE_CONVERTERS: t.Dict[exp.DataType.Type, t.Callable[[exp.DataType], exp.DataType]] = {}
DDL_SELECT_TOKENS = {TokenType.SELECT, TokenType.WITH, TokenType.L_PAREN}
@@ -1138,7 +1139,14 @@ class Parser(metaclass=_Parser):
FETCH_TOKENS = ID_VAR_TOKENS - {TokenType.ROW, TokenType.ROWS, TokenType.PERCENT}
- ADD_CONSTRAINT_TOKENS = {TokenType.CONSTRAINT, TokenType.PRIMARY_KEY, TokenType.FOREIGN_KEY}
+ ADD_CONSTRAINT_TOKENS = {
+ TokenType.CONSTRAINT,
+ TokenType.FOREIGN_KEY,
+ TokenType.INDEX,
+ TokenType.KEY,
+ TokenType.PRIMARY_KEY,
+ TokenType.UNIQUE,
+ }
DISTINCT_TOKENS = {TokenType.DISTINCT}
@@ -3099,7 +3107,7 @@ class Parser(metaclass=_Parser):
index = self._index
method, side, kind = self._parse_join_parts()
hint = self._prev.text if self._match_texts(self.JOIN_HINTS) else None
- join = self._match(TokenType.JOIN)
+ join = self._match(TokenType.JOIN) or (kind and kind.token_type == TokenType.STRAIGHT_JOIN)
if not skip_join_token and not join:
self._retreat(index)
@@ -3242,7 +3250,7 @@ class Parser(metaclass=_Parser):
while self._match_set(self.TABLE_INDEX_HINT_TOKENS):
hint = exp.IndexTableHint(this=self._prev.text.upper())
- self._match_texts(("INDEX", "KEY"))
+ self._match_set((TokenType.INDEX, TokenType.KEY))
if self._match(TokenType.FOR):
hint.set("target", self._advance_any() and self._prev.text.upper())
@@ -4464,8 +4472,8 @@ class Parser(metaclass=_Parser):
)
self._match(TokenType.R_BRACKET)
- if self.TYPE_CONVERTER and isinstance(this.this, exp.DataType.Type):
- converter = self.TYPE_CONVERTER.get(this.this)
+ if self.TYPE_CONVERTERS and isinstance(this.this, exp.DataType.Type):
+ converter = self.TYPE_CONVERTERS.get(this.this)
if converter:
this = converter(t.cast(exp.DataType, this))
@@ -4496,7 +4504,12 @@ class Parser(metaclass=_Parser):
def _parse_column(self) -> t.Optional[exp.Expression]:
this = self._parse_column_reference()
- return self._parse_column_ops(this) if this else self._parse_bracket(this)
+ column = self._parse_column_ops(this) if this else self._parse_bracket(this)
+
+ if self.dialect.SUPPORTS_COLUMN_JOIN_MARKS and column:
+ column.set("join_mark", self._match(TokenType.JOIN_MARKER))
+
+ return column
def _parse_column_reference(self) -> t.Optional[exp.Expression]:
this = self._parse_field()
@@ -4522,7 +4535,11 @@ class Parser(metaclass=_Parser):
while self._match(TokenType.COLON):
start_index = self._index
- path = self._parse_column_ops(self._parse_field(any_token=True))
+
+ # Snowflake allows reserved keywords as json keys but advance_any() excludes TokenType.SELECT from any_tokens=True
+ path = self._parse_column_ops(
+ self._parse_field(any_token=True, tokens=(TokenType.SELECT,))
+ )
# The cast :: operator has a lower precedence than the extraction operator :, so
# we rearrange the AST appropriately to avoid casting the JSON path