diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2023-01-30 17:08:37 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2023-01-30 17:08:37 +0000 |
commit | be1cb18ea28222fca384a5459a024b7e9af5cadb (patch) | |
tree | 4698c9069380a7c30ceb51129f93f6c8662315e4 /sqlglot/tokens.py | |
parent | Releasing debian version 10.5.6-1. (diff) | |
download | sqlglot-be1cb18ea28222fca384a5459a024b7e9af5cadb.tar.xz sqlglot-be1cb18ea28222fca384a5459a024b7e9af5cadb.zip |
Merging upstream version 10.5.10.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'sqlglot/tokens.py')
-rw-r--r-- | sqlglot/tokens.py | 9 |
1 files changed, 8 insertions, 1 deletions
diff --git a/sqlglot/tokens.py b/sqlglot/tokens.py index f12528f..19dd1d6 100644 --- a/sqlglot/tokens.py +++ b/sqlglot/tokens.py @@ -84,6 +84,8 @@ class TokenType(AutoName): TEXT = auto() MEDIUMTEXT = auto() LONGTEXT = auto() + MEDIUMBLOB = auto() + LONGBLOB = auto() BINARY = auto() VARBINARY = auto() JSON = auto() @@ -587,6 +589,7 @@ class Tokenizer(metaclass=_Tokenizer): "PRECEDING": TokenType.PRECEDING, "PRIMARY KEY": TokenType.PRIMARY_KEY, "PROCEDURE": TokenType.PROCEDURE, + "QUALIFY": TokenType.QUALIFY, "RANGE": TokenType.RANGE, "RECURSIVE": TokenType.RECURSIVE, "REGEXP": TokenType.RLIKE, @@ -726,6 +729,8 @@ class Tokenizer(metaclass=_Tokenizer): TokenType.SHOW, } + COMMAND_PREFIX_TOKENS = {TokenType.SEMICOLON, TokenType.BEGIN} + # handle numeric literals like in hive (3L = BIGINT) NUMERIC_LITERALS: t.Dict[str, str] = {} ENCODE: t.Optional[str] = None @@ -842,8 +847,10 @@ class Tokenizer(metaclass=_Tokenizer): ) self._comments = [] + # If we have either a semicolon or a begin token before the command's token, we'll parse + # whatever follows the command's token as a string if token_type in self.COMMANDS and ( - len(self.tokens) == 1 or self.tokens[-2].token_type == TokenType.SEMICOLON + len(self.tokens) == 1 or self.tokens[-2].token_type in self.COMMAND_PREFIX_TOKENS ): start = self._current tokens = len(self.tokens) |