summaryrefslogtreecommitdiffstats
path: root/sqlglot/tokens.py
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2023-06-22 18:53:34 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2023-06-22 18:53:34 +0000
commit8f88a01462641cbf930b3c43b780565d0fb7d37e (patch)
treee211588c29e6ce6d16fbbfd33d8cda63237c2e6e /sqlglot/tokens.py
parentReleasing debian version 16.2.1-1. (diff)
downloadsqlglot-8f88a01462641cbf930b3c43b780565d0fb7d37e.tar.xz
sqlglot-8f88a01462641cbf930b3c43b780565d0fb7d37e.zip
Merging upstream version 16.4.0.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'sqlglot/tokens.py')
-rw-r--r--sqlglot/tokens.py18
1 files changed, 14 insertions, 4 deletions
diff --git a/sqlglot/tokens.py b/sqlglot/tokens.py
index 42628b9..79f7a65 100644
--- a/sqlglot/tokens.py
+++ b/sqlglot/tokens.py
@@ -4,7 +4,7 @@ import typing as t
from enum import auto
from sqlglot.helper import AutoName
-from sqlglot.trie import in_trie, new_trie
+from sqlglot.trie import TrieResult, in_trie, new_trie
class TokenType(AutoName):
@@ -137,6 +137,7 @@ class TokenType(AutoName):
BIGSERIAL = auto()
XML = auto()
UNIQUEIDENTIFIER = auto()
+ USERDEFINED = auto()
MONEY = auto()
SMALLMONEY = auto()
ROWVERSION = auto()
@@ -163,6 +164,7 @@ class TokenType(AutoName):
CACHE = auto()
CASE = auto()
CHARACTER_SET = auto()
+ CLUSTER_BY = auto()
COLLATE = auto()
COMMAND = auto()
COMMENT = auto()
@@ -182,6 +184,7 @@ class TokenType(AutoName):
DESCRIBE = auto()
DICTIONARY = auto()
DISTINCT = auto()
+ DISTRIBUTE_BY = auto()
DIV = auto()
DROP = auto()
ELSE = auto()
@@ -196,6 +199,7 @@ class TokenType(AutoName):
FINAL = auto()
FIRST = auto()
FOR = auto()
+ FORCE = auto()
FOREIGN_KEY = auto()
FORMAT = auto()
FROM = auto()
@@ -208,6 +212,7 @@ class TokenType(AutoName):
HAVING = auto()
HINT = auto()
IF = auto()
+ IGNORE = auto()
ILIKE = auto()
ILIKE_ANY = auto()
IN = auto()
@@ -282,6 +287,7 @@ class TokenType(AutoName):
SHOW = auto()
SIMILAR_TO = auto()
SOME = auto()
+ SORT_BY = auto()
STRUCT = auto()
TABLE_SAMPLE = auto()
TEMPORARY = auto()
@@ -509,6 +515,7 @@ class Tokenizer(metaclass=_Tokenizer):
"UNCACHE": TokenType.UNCACHE,
"CASE": TokenType.CASE,
"CHARACTER SET": TokenType.CHARACTER_SET,
+ "CLUSTER BY": TokenType.CLUSTER_BY,
"COLLATE": TokenType.COLLATE,
"COLUMN": TokenType.COLUMN,
"COMMIT": TokenType.COMMIT,
@@ -526,6 +533,7 @@ class Tokenizer(metaclass=_Tokenizer):
"DESC": TokenType.DESC,
"DESCRIBE": TokenType.DESCRIBE,
"DISTINCT": TokenType.DISTINCT,
+ "DISTRIBUTE BY": TokenType.DISTRIBUTE_BY,
"DIV": TokenType.DIV,
"DROP": TokenType.DROP,
"ELSE": TokenType.ELSE,
@@ -617,6 +625,7 @@ class Tokenizer(metaclass=_Tokenizer):
"SHOW": TokenType.SHOW,
"SIMILAR TO": TokenType.SIMILAR_TO,
"SOME": TokenType.SOME,
+ "SORT BY": TokenType.SORT_BY,
"TABLE": TokenType.TABLE,
"TABLESAMPLE": TokenType.TABLE_SAMPLE,
"TEMP": TokenType.TEMPORARY,
@@ -717,6 +726,7 @@ class Tokenizer(metaclass=_Tokenizer):
"PREPARE": TokenType.COMMAND,
"TRUNCATE": TokenType.COMMAND,
"VACUUM": TokenType.COMMAND,
+ "USER-DEFINED": TokenType.USERDEFINED,
}
WHITE_SPACE: t.Dict[t.Optional[str], TokenType] = {
@@ -905,13 +915,13 @@ class Tokenizer(metaclass=_Tokenizer):
while chars:
if skip:
- result = 1
+ result = TrieResult.PREFIX
else:
result, trie = in_trie(trie, char.upper())
- if result == 0:
+ if result == TrieResult.FAILED:
break
- if result == 2:
+ if result == TrieResult.EXISTS:
word = chars
size += 1