diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2023-01-30 17:08:33 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2023-01-30 17:08:33 +0000 |
commit | 75d158890b303b701c51f12b34c422fb823ba9aa (patch) | |
tree | 5f10a4a1eb612918ea94a934cfc9b9893ea19442 /sqlglot/tokens.py | |
parent | Adding upstream version 10.5.6. (diff) | |
download | sqlglot-811e32ae0b388d902ed1abd5a2087f3014b09a6a.tar.xz sqlglot-811e32ae0b388d902ed1abd5a2087f3014b09a6a.zip |
Adding upstream version 10.5.10.upstream/10.5.10
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'sqlglot/tokens.py')
-rw-r--r-- | sqlglot/tokens.py | 9 |
1 files changed, 8 insertions, 1 deletions
diff --git a/sqlglot/tokens.py b/sqlglot/tokens.py index f12528f..19dd1d6 100644 --- a/sqlglot/tokens.py +++ b/sqlglot/tokens.py @@ -84,6 +84,8 @@ class TokenType(AutoName): TEXT = auto() MEDIUMTEXT = auto() LONGTEXT = auto() + MEDIUMBLOB = auto() + LONGBLOB = auto() BINARY = auto() VARBINARY = auto() JSON = auto() @@ -587,6 +589,7 @@ class Tokenizer(metaclass=_Tokenizer): "PRECEDING": TokenType.PRECEDING, "PRIMARY KEY": TokenType.PRIMARY_KEY, "PROCEDURE": TokenType.PROCEDURE, + "QUALIFY": TokenType.QUALIFY, "RANGE": TokenType.RANGE, "RECURSIVE": TokenType.RECURSIVE, "REGEXP": TokenType.RLIKE, @@ -726,6 +729,8 @@ class Tokenizer(metaclass=_Tokenizer): TokenType.SHOW, } + COMMAND_PREFIX_TOKENS = {TokenType.SEMICOLON, TokenType.BEGIN} + # handle numeric literals like in hive (3L = BIGINT) NUMERIC_LITERALS: t.Dict[str, str] = {} ENCODE: t.Optional[str] = None @@ -842,8 +847,10 @@ class Tokenizer(metaclass=_Tokenizer): ) self._comments = [] + # If we have either a semicolon or a begin token before the command's token, we'll parse + # whatever follows the command's token as a string if token_type in self.COMMANDS and ( - len(self.tokens) == 1 or self.tokens[-2].token_type == TokenType.SEMICOLON + len(self.tokens) == 1 or self.tokens[-2].token_type in self.COMMAND_PREFIX_TOKENS ): start = self._current tokens = len(self.tokens) |