From 7b29f6168bf9fcb2d886447066a9bb51675e5665 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Tue, 4 Oct 2022 11:37:14 +0200 Subject: Merging upstream version 6.2.8. Signed-off-by: Daniel Baumann --- sqlglot/tokens.py | 24 +++++++++++++++++++++--- 1 file changed, 21 insertions(+), 3 deletions(-) (limited to 'sqlglot/tokens.py') diff --git a/sqlglot/tokens.py b/sqlglot/tokens.py index c81f0db..39bf421 100644 --- a/sqlglot/tokens.py +++ b/sqlglot/tokens.py @@ -136,6 +136,7 @@ class TokenType(AutoName): DEFAULT = auto() DELETE = auto() DESC = auto() + DETERMINISTIC = auto() DISTINCT = auto() DISTRIBUTE_BY = auto() DROP = auto() @@ -144,6 +145,7 @@ class TokenType(AutoName): ENGINE = auto() ESCAPE = auto() EXCEPT = auto() + EXECUTE = auto() EXISTS = auto() EXPLAIN = auto() FALSE = auto() @@ -167,6 +169,7 @@ class TokenType(AutoName): IF = auto() IGNORE_NULLS = auto() ILIKE = auto() + IMMUTABLE = auto() IN = auto() INDEX = auto() INNER = auto() @@ -215,6 +218,7 @@ class TokenType(AutoName): PLACEHOLDER = auto() PRECEDING = auto() PRIMARY_KEY = auto() + PROCEDURE = auto() PROPERTIES = auto() QUALIFY = auto() QUOTE = auto() @@ -238,6 +242,7 @@ class TokenType(AutoName): SIMILAR_TO = auto() SOME = auto() SORT_BY = auto() + STABLE = auto() STORED = auto() STRUCT = auto() TABLE_FORMAT = auto() @@ -258,6 +263,7 @@ class TokenType(AutoName): USING = auto() VALUES = auto() VIEW = auto() + VOLATILE = auto() WHEN = auto() WHERE = auto() WINDOW = auto() @@ -430,6 +436,7 @@ class Tokenizer(metaclass=_Tokenizer): "DEFAULT": TokenType.DEFAULT, "DELETE": TokenType.DELETE, "DESC": TokenType.DESC, + "DETERMINISTIC": TokenType.DETERMINISTIC, "DISTINCT": TokenType.DISTINCT, "DISTRIBUTE BY": TokenType.DISTRIBUTE_BY, "DROP": TokenType.DROP, @@ -438,6 +445,7 @@ class Tokenizer(metaclass=_Tokenizer): "ENGINE": TokenType.ENGINE, "ESCAPE": TokenType.ESCAPE, "EXCEPT": TokenType.EXCEPT, + "EXECUTE": TokenType.EXECUTE, "EXISTS": TokenType.EXISTS, "EXPLAIN": TokenType.EXPLAIN, "FALSE": TokenType.FALSE, @@ -456,6 +464,7 @@ class Tokenizer(metaclass=_Tokenizer): "HAVING": TokenType.HAVING, "IF": TokenType.IF, "ILIKE": TokenType.ILIKE, + "IMMUTABLE": TokenType.IMMUTABLE, "IGNORE NULLS": TokenType.IGNORE_NULLS, "IN": TokenType.IN, "INDEX": TokenType.INDEX, @@ -504,6 +513,7 @@ class Tokenizer(metaclass=_Tokenizer): "PIVOT": TokenType.PIVOT, "PRECEDING": TokenType.PRECEDING, "PRIMARY KEY": TokenType.PRIMARY_KEY, + "PROCEDURE": TokenType.PROCEDURE, "RANGE": TokenType.RANGE, "RECURSIVE": TokenType.RECURSIVE, "REGEXP": TokenType.RLIKE, @@ -522,6 +532,7 @@ class Tokenizer(metaclass=_Tokenizer): "SHOW": TokenType.SHOW, "SOME": TokenType.SOME, "SORT BY": TokenType.SORT_BY, + "STABLE": TokenType.STABLE, "STORED": TokenType.STORED, "TABLE": TokenType.TABLE, "TABLE_FORMAT": TokenType.TABLE_FORMAT, @@ -542,6 +553,7 @@ class Tokenizer(metaclass=_Tokenizer): "USING": TokenType.USING, "VALUES": TokenType.VALUES, "VIEW": TokenType.VIEW, + "VOLATILE": TokenType.VOLATILE, "WHEN": TokenType.WHEN, "WHERE": TokenType.WHERE, "WITH": TokenType.WITH, @@ -637,6 +649,7 @@ class Tokenizer(metaclass=_Tokenizer): "_char", "_end", "_peek", + "_prev_token_type", ) def __init__(self): @@ -657,6 +670,7 @@ class Tokenizer(metaclass=_Tokenizer): self._char = None self._end = None self._peek = None + self._prev_token_type = None def tokenize(self, sql): self.reset() @@ -706,8 +720,8 @@ class Tokenizer(metaclass=_Tokenizer): return self.sql[self._start : self._current] def _add(self, token_type, text=None): - text = self._text if text is None else text - self.tokens.append(Token(token_type, text, self._line, self._col)) + self._prev_token_type = token_type + self.tokens.append(Token(token_type, self._text if text is None else text, self._line, self._col)) if token_type in self.COMMANDS and (len(self.tokens) == 1 or self.tokens[-2].token_type == TokenType.SEMICOLON): self._start = self._current @@ -910,7 +924,11 @@ class Tokenizer(metaclass=_Tokenizer): self._advance() else: break - self._add(self.KEYWORDS.get(self._text.upper(), TokenType.VAR)) + self._add( + TokenType.VAR + if self._prev_token_type == TokenType.PARAMETER + else self.KEYWORDS.get(self._text.upper(), TokenType.VAR) + ) def _extract_string(self, delimiter): text = "" -- cgit v1.2.3