Edit on GitHub

sqlglot.tokens

   1from __future__ import annotations
   2
   3import typing as t
   4from enum import auto
   5
   6from sqlglot.errors import TokenError
   7from sqlglot.helper import AutoName
   8from sqlglot.trie import TrieResult, in_trie, new_trie
   9
  10
  11class TokenType(AutoName):
  12    L_PAREN = auto()
  13    R_PAREN = auto()
  14    L_BRACKET = auto()
  15    R_BRACKET = auto()
  16    L_BRACE = auto()
  17    R_BRACE = auto()
  18    COMMA = auto()
  19    DOT = auto()
  20    DASH = auto()
  21    PLUS = auto()
  22    COLON = auto()
  23    DCOLON = auto()
  24    DQMARK = auto()
  25    SEMICOLON = auto()
  26    STAR = auto()
  27    BACKSLASH = auto()
  28    SLASH = auto()
  29    LT = auto()
  30    LTE = auto()
  31    GT = auto()
  32    GTE = auto()
  33    NOT = auto()
  34    EQ = auto()
  35    NEQ = auto()
  36    NULLSAFE_EQ = auto()
  37    AND = auto()
  38    OR = auto()
  39    AMP = auto()
  40    DPIPE = auto()
  41    PIPE = auto()
  42    CARET = auto()
  43    TILDA = auto()
  44    ARROW = auto()
  45    DARROW = auto()
  46    FARROW = auto()
  47    HASH = auto()
  48    HASH_ARROW = auto()
  49    DHASH_ARROW = auto()
  50    LR_ARROW = auto()
  51    DAT = auto()
  52    LT_AT = auto()
  53    AT_GT = auto()
  54    DOLLAR = auto()
  55    PARAMETER = auto()
  56    SESSION_PARAMETER = auto()
  57    DAMP = auto()
  58    XOR = auto()
  59
  60    BLOCK_START = auto()
  61    BLOCK_END = auto()
  62
  63    SPACE = auto()
  64    BREAK = auto()
  65
  66    STRING = auto()
  67    NUMBER = auto()
  68    IDENTIFIER = auto()
  69    DATABASE = auto()
  70    COLUMN = auto()
  71    COLUMN_DEF = auto()
  72    SCHEMA = auto()
  73    TABLE = auto()
  74    VAR = auto()
  75    BIT_STRING = auto()
  76    HEX_STRING = auto()
  77    BYTE_STRING = auto()
  78    NATIONAL_STRING = auto()
  79    RAW_STRING = auto()
  80    HEREDOC_STRING = auto()
  81
  82    # types
  83    BIT = auto()
  84    BOOLEAN = auto()
  85    TINYINT = auto()
  86    UTINYINT = auto()
  87    SMALLINT = auto()
  88    USMALLINT = auto()
  89    MEDIUMINT = auto()
  90    UMEDIUMINT = auto()
  91    INT = auto()
  92    UINT = auto()
  93    BIGINT = auto()
  94    UBIGINT = auto()
  95    INT128 = auto()
  96    UINT128 = auto()
  97    INT256 = auto()
  98    UINT256 = auto()
  99    FLOAT = auto()
 100    DOUBLE = auto()
 101    DECIMAL = auto()
 102    UDECIMAL = auto()
 103    BIGDECIMAL = auto()
 104    CHAR = auto()
 105    NCHAR = auto()
 106    VARCHAR = auto()
 107    NVARCHAR = auto()
 108    TEXT = auto()
 109    MEDIUMTEXT = auto()
 110    LONGTEXT = auto()
 111    MEDIUMBLOB = auto()
 112    LONGBLOB = auto()
 113    TINYBLOB = auto()
 114    TINYTEXT = auto()
 115    BINARY = auto()
 116    VARBINARY = auto()
 117    JSON = auto()
 118    JSONB = auto()
 119    TIME = auto()
 120    TIMETZ = auto()
 121    TIMESTAMP = auto()
 122    TIMESTAMPTZ = auto()
 123    TIMESTAMPLTZ = auto()
 124    DATETIME = auto()
 125    DATETIME64 = auto()
 126    DATE = auto()
 127    INT4RANGE = auto()
 128    INT4MULTIRANGE = auto()
 129    INT8RANGE = auto()
 130    INT8MULTIRANGE = auto()
 131    NUMRANGE = auto()
 132    NUMMULTIRANGE = auto()
 133    TSRANGE = auto()
 134    TSMULTIRANGE = auto()
 135    TSTZRANGE = auto()
 136    TSTZMULTIRANGE = auto()
 137    DATERANGE = auto()
 138    DATEMULTIRANGE = auto()
 139    UUID = auto()
 140    GEOGRAPHY = auto()
 141    NULLABLE = auto()
 142    GEOMETRY = auto()
 143    HLLSKETCH = auto()
 144    HSTORE = auto()
 145    SUPER = auto()
 146    SERIAL = auto()
 147    SMALLSERIAL = auto()
 148    BIGSERIAL = auto()
 149    XML = auto()
 150    YEAR = auto()
 151    UNIQUEIDENTIFIER = auto()
 152    USERDEFINED = auto()
 153    MONEY = auto()
 154    SMALLMONEY = auto()
 155    ROWVERSION = auto()
 156    IMAGE = auto()
 157    VARIANT = auto()
 158    OBJECT = auto()
 159    INET = auto()
 160    IPADDRESS = auto()
 161    IPPREFIX = auto()
 162    ENUM = auto()
 163    ENUM8 = auto()
 164    ENUM16 = auto()
 165    FIXEDSTRING = auto()
 166    LOWCARDINALITY = auto()
 167    NESTED = auto()
 168    UNKNOWN = auto()
 169
 170    # keywords
 171    ALIAS = auto()
 172    ALTER = auto()
 173    ALWAYS = auto()
 174    ALL = auto()
 175    ANTI = auto()
 176    ANY = auto()
 177    APPLY = auto()
 178    ARRAY = auto()
 179    ASC = auto()
 180    ASOF = auto()
 181    AUTO_INCREMENT = auto()
 182    BEGIN = auto()
 183    BETWEEN = auto()
 184    CACHE = auto()
 185    CASE = auto()
 186    CHARACTER_SET = auto()
 187    CLUSTER_BY = auto()
 188    COLLATE = auto()
 189    COMMAND = auto()
 190    COMMENT = auto()
 191    COMMIT = auto()
 192    CONNECT_BY = auto()
 193    CONSTRAINT = auto()
 194    CREATE = auto()
 195    CROSS = auto()
 196    CUBE = auto()
 197    CURRENT_DATE = auto()
 198    CURRENT_DATETIME = auto()
 199    CURRENT_TIME = auto()
 200    CURRENT_TIMESTAMP = auto()
 201    CURRENT_USER = auto()
 202    DEFAULT = auto()
 203    DELETE = auto()
 204    DESC = auto()
 205    DESCRIBE = auto()
 206    DICTIONARY = auto()
 207    DISTINCT = auto()
 208    DISTRIBUTE_BY = auto()
 209    DIV = auto()
 210    DROP = auto()
 211    ELSE = auto()
 212    END = auto()
 213    ESCAPE = auto()
 214    EXCEPT = auto()
 215    EXECUTE = auto()
 216    EXISTS = auto()
 217    FALSE = auto()
 218    FETCH = auto()
 219    FILTER = auto()
 220    FINAL = auto()
 221    FIRST = auto()
 222    FOR = auto()
 223    FORCE = auto()
 224    FOREIGN_KEY = auto()
 225    FORMAT = auto()
 226    FROM = auto()
 227    FULL = auto()
 228    FUNCTION = auto()
 229    GLOB = auto()
 230    GLOBAL = auto()
 231    GROUP_BY = auto()
 232    GROUPING_SETS = auto()
 233    HAVING = auto()
 234    HINT = auto()
 235    IGNORE = auto()
 236    ILIKE = auto()
 237    ILIKE_ANY = auto()
 238    IN = auto()
 239    INDEX = auto()
 240    INNER = auto()
 241    INSERT = auto()
 242    INTERSECT = auto()
 243    INTERVAL = auto()
 244    INTO = auto()
 245    INTRODUCER = auto()
 246    IRLIKE = auto()
 247    IS = auto()
 248    ISNULL = auto()
 249    JOIN = auto()
 250    JOIN_MARKER = auto()
 251    KEEP = auto()
 252    KILL = auto()
 253    LANGUAGE = auto()
 254    LATERAL = auto()
 255    LEFT = auto()
 256    LIKE = auto()
 257    LIKE_ANY = auto()
 258    LIMIT = auto()
 259    LOAD = auto()
 260    LOCK = auto()
 261    MAP = auto()
 262    MATCH_RECOGNIZE = auto()
 263    MEMBER_OF = auto()
 264    MERGE = auto()
 265    MOD = auto()
 266    MODEL = auto()
 267    NATURAL = auto()
 268    NEXT = auto()
 269    NOTNULL = auto()
 270    NULL = auto()
 271    OBJECT_IDENTIFIER = auto()
 272    OFFSET = auto()
 273    ON = auto()
 274    ORDER_BY = auto()
 275    ORDERED = auto()
 276    ORDINALITY = auto()
 277    OUTER = auto()
 278    OVER = auto()
 279    OVERLAPS = auto()
 280    OVERWRITE = auto()
 281    PARTITION = auto()
 282    PARTITION_BY = auto()
 283    PERCENT = auto()
 284    PIVOT = auto()
 285    PLACEHOLDER = auto()
 286    PRAGMA = auto()
 287    PRIMARY_KEY = auto()
 288    PROCEDURE = auto()
 289    PROPERTIES = auto()
 290    PSEUDO_TYPE = auto()
 291    QUALIFY = auto()
 292    QUOTE = auto()
 293    RANGE = auto()
 294    RECURSIVE = auto()
 295    REPLACE = auto()
 296    RETURNING = auto()
 297    REFERENCES = auto()
 298    RIGHT = auto()
 299    RLIKE = auto()
 300    ROLLBACK = auto()
 301    ROLLUP = auto()
 302    ROW = auto()
 303    ROWS = auto()
 304    SELECT = auto()
 305    SEMI = auto()
 306    SEPARATOR = auto()
 307    SERDE_PROPERTIES = auto()
 308    SET = auto()
 309    SETTINGS = auto()
 310    SHOW = auto()
 311    SIMILAR_TO = auto()
 312    SOME = auto()
 313    SORT_BY = auto()
 314    START_WITH = auto()
 315    STRUCT = auto()
 316    TABLE_SAMPLE = auto()
 317    TEMPORARY = auto()
 318    TOP = auto()
 319    THEN = auto()
 320    TRUE = auto()
 321    UNCACHE = auto()
 322    UNION = auto()
 323    UNNEST = auto()
 324    UNPIVOT = auto()
 325    UPDATE = auto()
 326    USE = auto()
 327    USING = auto()
 328    VALUES = auto()
 329    VIEW = auto()
 330    VOLATILE = auto()
 331    WHEN = auto()
 332    WHERE = auto()
 333    WINDOW = auto()
 334    WITH = auto()
 335    UNIQUE = auto()
 336    VERSION_SNAPSHOT = auto()
 337    TIMESTAMP_SNAPSHOT = auto()
 338
 339
 340class Token:
 341    __slots__ = ("token_type", "text", "line", "col", "start", "end", "comments")
 342
 343    @classmethod
 344    def number(cls, number: int) -> Token:
 345        """Returns a NUMBER token with `number` as its text."""
 346        return cls(TokenType.NUMBER, str(number))
 347
 348    @classmethod
 349    def string(cls, string: str) -> Token:
 350        """Returns a STRING token with `string` as its text."""
 351        return cls(TokenType.STRING, string)
 352
 353    @classmethod
 354    def identifier(cls, identifier: str) -> Token:
 355        """Returns an IDENTIFIER token with `identifier` as its text."""
 356        return cls(TokenType.IDENTIFIER, identifier)
 357
 358    @classmethod
 359    def var(cls, var: str) -> Token:
 360        """Returns an VAR token with `var` as its text."""
 361        return cls(TokenType.VAR, var)
 362
 363    def __init__(
 364        self,
 365        token_type: TokenType,
 366        text: str,
 367        line: int = 1,
 368        col: int = 1,
 369        start: int = 0,
 370        end: int = 0,
 371        comments: t.List[str] = [],
 372    ) -> None:
 373        """Token initializer.
 374
 375        Args:
 376            token_type: The TokenType Enum.
 377            text: The text of the token.
 378            line: The line that the token ends on.
 379            col: The column that the token ends on.
 380            start: The start index of the token.
 381            end: The ending index of the token.
 382            comments: The comments to attach to the token.
 383        """
 384        self.token_type = token_type
 385        self.text = text
 386        self.line = line
 387        self.col = col
 388        self.start = start
 389        self.end = end
 390        self.comments = comments
 391
 392    def __repr__(self) -> str:
 393        attributes = ", ".join(f"{k}: {getattr(self, k)}" for k in self.__slots__)
 394        return f"<Token {attributes}>"
 395
 396
 397class _Tokenizer(type):
 398    def __new__(cls, clsname, bases, attrs):
 399        klass = super().__new__(cls, clsname, bases, attrs)
 400
 401        def _convert_quotes(arr: t.List[str | t.Tuple[str, str]]) -> t.Dict[str, str]:
 402            return dict(
 403                (item, item) if isinstance(item, str) else (item[0], item[1]) for item in arr
 404            )
 405
 406        def _quotes_to_format(
 407            token_type: TokenType, arr: t.List[str | t.Tuple[str, str]]
 408        ) -> t.Dict[str, t.Tuple[str, TokenType]]:
 409            return {k: (v, token_type) for k, v in _convert_quotes(arr).items()}
 410
 411        klass._QUOTES = _convert_quotes(klass.QUOTES)
 412        klass._IDENTIFIERS = _convert_quotes(klass.IDENTIFIERS)
 413
 414        klass._FORMAT_STRINGS = {
 415            **{
 416                p + s: (e, TokenType.NATIONAL_STRING)
 417                for s, e in klass._QUOTES.items()
 418                for p in ("n", "N")
 419            },
 420            **_quotes_to_format(TokenType.BIT_STRING, klass.BIT_STRINGS),
 421            **_quotes_to_format(TokenType.BYTE_STRING, klass.BYTE_STRINGS),
 422            **_quotes_to_format(TokenType.HEX_STRING, klass.HEX_STRINGS),
 423            **_quotes_to_format(TokenType.RAW_STRING, klass.RAW_STRINGS),
 424            **_quotes_to_format(TokenType.HEREDOC_STRING, klass.HEREDOC_STRINGS),
 425        }
 426
 427        klass._STRING_ESCAPES = set(klass.STRING_ESCAPES)
 428        klass._IDENTIFIER_ESCAPES = set(klass.IDENTIFIER_ESCAPES)
 429        klass._COMMENTS = {
 430            **dict(
 431                (comment, None) if isinstance(comment, str) else (comment[0], comment[1])
 432                for comment in klass.COMMENTS
 433            ),
 434            "{#": "#}",  # Ensure Jinja comments are tokenized correctly in all dialects
 435        }
 436
 437        klass._KEYWORD_TRIE = new_trie(
 438            key.upper()
 439            for key in (
 440                *klass.KEYWORDS,
 441                *klass._COMMENTS,
 442                *klass._QUOTES,
 443                *klass._FORMAT_STRINGS,
 444            )
 445            if " " in key or any(single in key for single in klass.SINGLE_TOKENS)
 446        )
 447
 448        return klass
 449
 450
 451class Tokenizer(metaclass=_Tokenizer):
 452    SINGLE_TOKENS = {
 453        "(": TokenType.L_PAREN,
 454        ")": TokenType.R_PAREN,
 455        "[": TokenType.L_BRACKET,
 456        "]": TokenType.R_BRACKET,
 457        "{": TokenType.L_BRACE,
 458        "}": TokenType.R_BRACE,
 459        "&": TokenType.AMP,
 460        "^": TokenType.CARET,
 461        ":": TokenType.COLON,
 462        ",": TokenType.COMMA,
 463        ".": TokenType.DOT,
 464        "-": TokenType.DASH,
 465        "=": TokenType.EQ,
 466        ">": TokenType.GT,
 467        "<": TokenType.LT,
 468        "%": TokenType.MOD,
 469        "!": TokenType.NOT,
 470        "|": TokenType.PIPE,
 471        "+": TokenType.PLUS,
 472        ";": TokenType.SEMICOLON,
 473        "/": TokenType.SLASH,
 474        "\\": TokenType.BACKSLASH,
 475        "*": TokenType.STAR,
 476        "~": TokenType.TILDA,
 477        "?": TokenType.PLACEHOLDER,
 478        "@": TokenType.PARAMETER,
 479        # used for breaking a var like x'y' but nothing else
 480        # the token type doesn't matter
 481        "'": TokenType.QUOTE,
 482        "`": TokenType.IDENTIFIER,
 483        '"': TokenType.IDENTIFIER,
 484        "#": TokenType.HASH,
 485    }
 486
 487    BIT_STRINGS: t.List[str | t.Tuple[str, str]] = []
 488    BYTE_STRINGS: t.List[str | t.Tuple[str, str]] = []
 489    HEX_STRINGS: t.List[str | t.Tuple[str, str]] = []
 490    RAW_STRINGS: t.List[str | t.Tuple[str, str]] = []
 491    HEREDOC_STRINGS: t.List[str | t.Tuple[str, str]] = []
 492    IDENTIFIERS: t.List[str | t.Tuple[str, str]] = ['"']
 493    IDENTIFIER_ESCAPES = ['"']
 494    QUOTES: t.List[t.Tuple[str, str] | str] = ["'"]
 495    STRING_ESCAPES = ["'"]
 496    VAR_SINGLE_TOKENS: t.Set[str] = set()
 497    ESCAPE_SEQUENCES: t.Dict[str, str] = {}
 498
 499    # Autofilled
 500    IDENTIFIERS_CAN_START_WITH_DIGIT: bool = False
 501
 502    _COMMENTS: t.Dict[str, str] = {}
 503    _FORMAT_STRINGS: t.Dict[str, t.Tuple[str, TokenType]] = {}
 504    _IDENTIFIERS: t.Dict[str, str] = {}
 505    _IDENTIFIER_ESCAPES: t.Set[str] = set()
 506    _QUOTES: t.Dict[str, str] = {}
 507    _STRING_ESCAPES: t.Set[str] = set()
 508    _KEYWORD_TRIE: t.Dict = {}
 509
 510    KEYWORDS: t.Dict[str, TokenType] = {
 511        **{f"{{%{postfix}": TokenType.BLOCK_START for postfix in ("", "+", "-")},
 512        **{f"{prefix}%}}": TokenType.BLOCK_END for prefix in ("", "+", "-")},
 513        **{f"{{{{{postfix}": TokenType.BLOCK_START for postfix in ("+", "-")},
 514        **{f"{prefix}}}}}": TokenType.BLOCK_END for prefix in ("+", "-")},
 515        "/*+": TokenType.HINT,
 516        "==": TokenType.EQ,
 517        "::": TokenType.DCOLON,
 518        "||": TokenType.DPIPE,
 519        ">=": TokenType.GTE,
 520        "<=": TokenType.LTE,
 521        "<>": TokenType.NEQ,
 522        "!=": TokenType.NEQ,
 523        "<=>": TokenType.NULLSAFE_EQ,
 524        "->": TokenType.ARROW,
 525        "->>": TokenType.DARROW,
 526        "=>": TokenType.FARROW,
 527        "#>": TokenType.HASH_ARROW,
 528        "#>>": TokenType.DHASH_ARROW,
 529        "<->": TokenType.LR_ARROW,
 530        "&&": TokenType.DAMP,
 531        "??": TokenType.DQMARK,
 532        "ALL": TokenType.ALL,
 533        "ALWAYS": TokenType.ALWAYS,
 534        "AND": TokenType.AND,
 535        "ANTI": TokenType.ANTI,
 536        "ANY": TokenType.ANY,
 537        "ASC": TokenType.ASC,
 538        "AS": TokenType.ALIAS,
 539        "ASOF": TokenType.ASOF,
 540        "AUTOINCREMENT": TokenType.AUTO_INCREMENT,
 541        "AUTO_INCREMENT": TokenType.AUTO_INCREMENT,
 542        "BEGIN": TokenType.BEGIN,
 543        "BETWEEN": TokenType.BETWEEN,
 544        "CACHE": TokenType.CACHE,
 545        "UNCACHE": TokenType.UNCACHE,
 546        "CASE": TokenType.CASE,
 547        "CHARACTER SET": TokenType.CHARACTER_SET,
 548        "CLUSTER BY": TokenType.CLUSTER_BY,
 549        "COLLATE": TokenType.COLLATE,
 550        "COLUMN": TokenType.COLUMN,
 551        "COMMIT": TokenType.COMMIT,
 552        "CONNECT BY": TokenType.CONNECT_BY,
 553        "CONSTRAINT": TokenType.CONSTRAINT,
 554        "CREATE": TokenType.CREATE,
 555        "CROSS": TokenType.CROSS,
 556        "CUBE": TokenType.CUBE,
 557        "CURRENT_DATE": TokenType.CURRENT_DATE,
 558        "CURRENT_TIME": TokenType.CURRENT_TIME,
 559        "CURRENT_TIMESTAMP": TokenType.CURRENT_TIMESTAMP,
 560        "CURRENT_USER": TokenType.CURRENT_USER,
 561        "DATABASE": TokenType.DATABASE,
 562        "DEFAULT": TokenType.DEFAULT,
 563        "DELETE": TokenType.DELETE,
 564        "DESC": TokenType.DESC,
 565        "DESCRIBE": TokenType.DESCRIBE,
 566        "DISTINCT": TokenType.DISTINCT,
 567        "DISTRIBUTE BY": TokenType.DISTRIBUTE_BY,
 568        "DIV": TokenType.DIV,
 569        "DROP": TokenType.DROP,
 570        "ELSE": TokenType.ELSE,
 571        "END": TokenType.END,
 572        "ESCAPE": TokenType.ESCAPE,
 573        "EXCEPT": TokenType.EXCEPT,
 574        "EXECUTE": TokenType.EXECUTE,
 575        "EXISTS": TokenType.EXISTS,
 576        "FALSE": TokenType.FALSE,
 577        "FETCH": TokenType.FETCH,
 578        "FILTER": TokenType.FILTER,
 579        "FIRST": TokenType.FIRST,
 580        "FULL": TokenType.FULL,
 581        "FUNCTION": TokenType.FUNCTION,
 582        "FOR": TokenType.FOR,
 583        "FOREIGN KEY": TokenType.FOREIGN_KEY,
 584        "FORMAT": TokenType.FORMAT,
 585        "FROM": TokenType.FROM,
 586        "GEOGRAPHY": TokenType.GEOGRAPHY,
 587        "GEOMETRY": TokenType.GEOMETRY,
 588        "GLOB": TokenType.GLOB,
 589        "GROUP BY": TokenType.GROUP_BY,
 590        "GROUPING SETS": TokenType.GROUPING_SETS,
 591        "HAVING": TokenType.HAVING,
 592        "ILIKE": TokenType.ILIKE,
 593        "IN": TokenType.IN,
 594        "INDEX": TokenType.INDEX,
 595        "INET": TokenType.INET,
 596        "INNER": TokenType.INNER,
 597        "INSERT": TokenType.INSERT,
 598        "INTERVAL": TokenType.INTERVAL,
 599        "INTERSECT": TokenType.INTERSECT,
 600        "INTO": TokenType.INTO,
 601        "IS": TokenType.IS,
 602        "ISNULL": TokenType.ISNULL,
 603        "JOIN": TokenType.JOIN,
 604        "KEEP": TokenType.KEEP,
 605        "KILL": TokenType.KILL,
 606        "LATERAL": TokenType.LATERAL,
 607        "LEFT": TokenType.LEFT,
 608        "LIKE": TokenType.LIKE,
 609        "LIMIT": TokenType.LIMIT,
 610        "LOAD": TokenType.LOAD,
 611        "LOCK": TokenType.LOCK,
 612        "MERGE": TokenType.MERGE,
 613        "NATURAL": TokenType.NATURAL,
 614        "NEXT": TokenType.NEXT,
 615        "NOT": TokenType.NOT,
 616        "NOTNULL": TokenType.NOTNULL,
 617        "NULL": TokenType.NULL,
 618        "OBJECT": TokenType.OBJECT,
 619        "OFFSET": TokenType.OFFSET,
 620        "ON": TokenType.ON,
 621        "OR": TokenType.OR,
 622        "XOR": TokenType.XOR,
 623        "ORDER BY": TokenType.ORDER_BY,
 624        "ORDINALITY": TokenType.ORDINALITY,
 625        "OUTER": TokenType.OUTER,
 626        "OVER": TokenType.OVER,
 627        "OVERLAPS": TokenType.OVERLAPS,
 628        "OVERWRITE": TokenType.OVERWRITE,
 629        "PARTITION": TokenType.PARTITION,
 630        "PARTITION BY": TokenType.PARTITION_BY,
 631        "PARTITIONED BY": TokenType.PARTITION_BY,
 632        "PARTITIONED_BY": TokenType.PARTITION_BY,
 633        "PERCENT": TokenType.PERCENT,
 634        "PIVOT": TokenType.PIVOT,
 635        "PRAGMA": TokenType.PRAGMA,
 636        "PRIMARY KEY": TokenType.PRIMARY_KEY,
 637        "PROCEDURE": TokenType.PROCEDURE,
 638        "QUALIFY": TokenType.QUALIFY,
 639        "RANGE": TokenType.RANGE,
 640        "RECURSIVE": TokenType.RECURSIVE,
 641        "REGEXP": TokenType.RLIKE,
 642        "REPLACE": TokenType.REPLACE,
 643        "RETURNING": TokenType.RETURNING,
 644        "REFERENCES": TokenType.REFERENCES,
 645        "RIGHT": TokenType.RIGHT,
 646        "RLIKE": TokenType.RLIKE,
 647        "ROLLBACK": TokenType.ROLLBACK,
 648        "ROLLUP": TokenType.ROLLUP,
 649        "ROW": TokenType.ROW,
 650        "ROWS": TokenType.ROWS,
 651        "SCHEMA": TokenType.SCHEMA,
 652        "SELECT": TokenType.SELECT,
 653        "SEMI": TokenType.SEMI,
 654        "SET": TokenType.SET,
 655        "SETTINGS": TokenType.SETTINGS,
 656        "SHOW": TokenType.SHOW,
 657        "SIMILAR TO": TokenType.SIMILAR_TO,
 658        "SOME": TokenType.SOME,
 659        "SORT BY": TokenType.SORT_BY,
 660        "START WITH": TokenType.START_WITH,
 661        "TABLE": TokenType.TABLE,
 662        "TABLESAMPLE": TokenType.TABLE_SAMPLE,
 663        "TEMP": TokenType.TEMPORARY,
 664        "TEMPORARY": TokenType.TEMPORARY,
 665        "THEN": TokenType.THEN,
 666        "TRUE": TokenType.TRUE,
 667        "UNION": TokenType.UNION,
 668        "UNKNOWN": TokenType.UNKNOWN,
 669        "UNNEST": TokenType.UNNEST,
 670        "UNPIVOT": TokenType.UNPIVOT,
 671        "UPDATE": TokenType.UPDATE,
 672        "USE": TokenType.USE,
 673        "USING": TokenType.USING,
 674        "UUID": TokenType.UUID,
 675        "VALUES": TokenType.VALUES,
 676        "VIEW": TokenType.VIEW,
 677        "VOLATILE": TokenType.VOLATILE,
 678        "WHEN": TokenType.WHEN,
 679        "WHERE": TokenType.WHERE,
 680        "WINDOW": TokenType.WINDOW,
 681        "WITH": TokenType.WITH,
 682        "APPLY": TokenType.APPLY,
 683        "ARRAY": TokenType.ARRAY,
 684        "BIT": TokenType.BIT,
 685        "BOOL": TokenType.BOOLEAN,
 686        "BOOLEAN": TokenType.BOOLEAN,
 687        "BYTE": TokenType.TINYINT,
 688        "MEDIUMINT": TokenType.MEDIUMINT,
 689        "TINYINT": TokenType.TINYINT,
 690        "SHORT": TokenType.SMALLINT,
 691        "SMALLINT": TokenType.SMALLINT,
 692        "INT128": TokenType.INT128,
 693        "INT2": TokenType.SMALLINT,
 694        "INTEGER": TokenType.INT,
 695        "INT": TokenType.INT,
 696        "INT4": TokenType.INT,
 697        "LONG": TokenType.BIGINT,
 698        "BIGINT": TokenType.BIGINT,
 699        "INT8": TokenType.BIGINT,
 700        "DEC": TokenType.DECIMAL,
 701        "DECIMAL": TokenType.DECIMAL,
 702        "BIGDECIMAL": TokenType.BIGDECIMAL,
 703        "BIGNUMERIC": TokenType.BIGDECIMAL,
 704        "MAP": TokenType.MAP,
 705        "NULLABLE": TokenType.NULLABLE,
 706        "NUMBER": TokenType.DECIMAL,
 707        "NUMERIC": TokenType.DECIMAL,
 708        "FIXED": TokenType.DECIMAL,
 709        "REAL": TokenType.FLOAT,
 710        "FLOAT": TokenType.FLOAT,
 711        "FLOAT4": TokenType.FLOAT,
 712        "FLOAT8": TokenType.DOUBLE,
 713        "DOUBLE": TokenType.DOUBLE,
 714        "DOUBLE PRECISION": TokenType.DOUBLE,
 715        "JSON": TokenType.JSON,
 716        "CHAR": TokenType.CHAR,
 717        "CHARACTER": TokenType.CHAR,
 718        "NCHAR": TokenType.NCHAR,
 719        "VARCHAR": TokenType.VARCHAR,
 720        "VARCHAR2": TokenType.VARCHAR,
 721        "NVARCHAR": TokenType.NVARCHAR,
 722        "NVARCHAR2": TokenType.NVARCHAR,
 723        "STR": TokenType.TEXT,
 724        "STRING": TokenType.TEXT,
 725        "TEXT": TokenType.TEXT,
 726        "LONGTEXT": TokenType.LONGTEXT,
 727        "MEDIUMTEXT": TokenType.MEDIUMTEXT,
 728        "TINYTEXT": TokenType.TINYTEXT,
 729        "CLOB": TokenType.TEXT,
 730        "LONGVARCHAR": TokenType.TEXT,
 731        "BINARY": TokenType.BINARY,
 732        "BLOB": TokenType.VARBINARY,
 733        "LONGBLOB": TokenType.LONGBLOB,
 734        "MEDIUMBLOB": TokenType.MEDIUMBLOB,
 735        "TINYBLOB": TokenType.TINYBLOB,
 736        "BYTEA": TokenType.VARBINARY,
 737        "VARBINARY": TokenType.VARBINARY,
 738        "TIME": TokenType.TIME,
 739        "TIMETZ": TokenType.TIMETZ,
 740        "TIMESTAMP": TokenType.TIMESTAMP,
 741        "TIMESTAMPTZ": TokenType.TIMESTAMPTZ,
 742        "TIMESTAMPLTZ": TokenType.TIMESTAMPLTZ,
 743        "DATE": TokenType.DATE,
 744        "DATETIME": TokenType.DATETIME,
 745        "INT4RANGE": TokenType.INT4RANGE,
 746        "INT4MULTIRANGE": TokenType.INT4MULTIRANGE,
 747        "INT8RANGE": TokenType.INT8RANGE,
 748        "INT8MULTIRANGE": TokenType.INT8MULTIRANGE,
 749        "NUMRANGE": TokenType.NUMRANGE,
 750        "NUMMULTIRANGE": TokenType.NUMMULTIRANGE,
 751        "TSRANGE": TokenType.TSRANGE,
 752        "TSMULTIRANGE": TokenType.TSMULTIRANGE,
 753        "TSTZRANGE": TokenType.TSTZRANGE,
 754        "TSTZMULTIRANGE": TokenType.TSTZMULTIRANGE,
 755        "DATERANGE": TokenType.DATERANGE,
 756        "DATEMULTIRANGE": TokenType.DATEMULTIRANGE,
 757        "UNIQUE": TokenType.UNIQUE,
 758        "STRUCT": TokenType.STRUCT,
 759        "VARIANT": TokenType.VARIANT,
 760        "ALTER": TokenType.ALTER,
 761        "ANALYZE": TokenType.COMMAND,
 762        "CALL": TokenType.COMMAND,
 763        "COMMENT": TokenType.COMMENT,
 764        "COPY": TokenType.COMMAND,
 765        "EXPLAIN": TokenType.COMMAND,
 766        "GRANT": TokenType.COMMAND,
 767        "OPTIMIZE": TokenType.COMMAND,
 768        "PREPARE": TokenType.COMMAND,
 769        "TRUNCATE": TokenType.COMMAND,
 770        "VACUUM": TokenType.COMMAND,
 771        "USER-DEFINED": TokenType.USERDEFINED,
 772        "FOR VERSION": TokenType.VERSION_SNAPSHOT,
 773        "FOR TIMESTAMP": TokenType.TIMESTAMP_SNAPSHOT,
 774    }
 775
 776    WHITE_SPACE: t.Dict[t.Optional[str], TokenType] = {
 777        " ": TokenType.SPACE,
 778        "\t": TokenType.SPACE,
 779        "\n": TokenType.BREAK,
 780        "\r": TokenType.BREAK,
 781        "\r\n": TokenType.BREAK,
 782    }
 783
 784    COMMANDS = {
 785        TokenType.COMMAND,
 786        TokenType.EXECUTE,
 787        TokenType.FETCH,
 788        TokenType.SHOW,
 789    }
 790
 791    COMMAND_PREFIX_TOKENS = {TokenType.SEMICOLON, TokenType.BEGIN}
 792
 793    # handle numeric literals like in hive (3L = BIGINT)
 794    NUMERIC_LITERALS: t.Dict[str, str] = {}
 795    ENCODE: t.Optional[str] = None
 796
 797    COMMENTS = ["--", ("/*", "*/")]
 798
 799    __slots__ = (
 800        "sql",
 801        "size",
 802        "tokens",
 803        "_start",
 804        "_current",
 805        "_line",
 806        "_col",
 807        "_comments",
 808        "_char",
 809        "_end",
 810        "_peek",
 811        "_prev_token_line",
 812    )
 813
 814    def __init__(self) -> None:
 815        self.reset()
 816
 817    def reset(self) -> None:
 818        self.sql = ""
 819        self.size = 0
 820        self.tokens: t.List[Token] = []
 821        self._start = 0
 822        self._current = 0
 823        self._line = 1
 824        self._col = 0
 825        self._comments: t.List[str] = []
 826
 827        self._char = ""
 828        self._end = False
 829        self._peek = ""
 830        self._prev_token_line = -1
 831
 832    def tokenize(self, sql: str) -> t.List[Token]:
 833        """Returns a list of tokens corresponding to the SQL string `sql`."""
 834        self.reset()
 835        self.sql = sql
 836        self.size = len(sql)
 837
 838        try:
 839            self._scan()
 840        except Exception as e:
 841            start = max(self._current - 50, 0)
 842            end = min(self._current + 50, self.size - 1)
 843            context = self.sql[start:end]
 844            raise TokenError(f"Error tokenizing '{context}'") from e
 845
 846        return self.tokens
 847
 848    def _scan(self, until: t.Optional[t.Callable] = None) -> None:
 849        while self.size and not self._end:
 850            self._start = self._current
 851            self._advance()
 852
 853            if self._char is None:
 854                break
 855
 856            if self._char not in self.WHITE_SPACE:
 857                if self._char.isdigit():
 858                    self._scan_number()
 859                elif self._char in self._IDENTIFIERS:
 860                    self._scan_identifier(self._IDENTIFIERS[self._char])
 861                else:
 862                    self._scan_keywords()
 863
 864            if until and until():
 865                break
 866
 867        if self.tokens and self._comments:
 868            self.tokens[-1].comments.extend(self._comments)
 869
 870    def _chars(self, size: int) -> str:
 871        if size == 1:
 872            return self._char
 873
 874        start = self._current - 1
 875        end = start + size
 876
 877        return self.sql[start:end] if end <= self.size else ""
 878
 879    def _advance(self, i: int = 1, alnum: bool = False) -> None:
 880        if self.WHITE_SPACE.get(self._char) is TokenType.BREAK:
 881            self._col = 1
 882            self._line += 1
 883        else:
 884            self._col += i
 885
 886        self._current += i
 887        self._end = self._current >= self.size
 888        self._char = self.sql[self._current - 1]
 889        self._peek = "" if self._end else self.sql[self._current]
 890
 891        if alnum and self._char.isalnum():
 892            # Here we use local variables instead of attributes for better performance
 893            _col = self._col
 894            _current = self._current
 895            _end = self._end
 896            _peek = self._peek
 897
 898            while _peek.isalnum():
 899                _col += 1
 900                _current += 1
 901                _end = _current >= self.size
 902                _peek = "" if _end else self.sql[_current]
 903
 904            self._col = _col
 905            self._current = _current
 906            self._end = _end
 907            self._peek = _peek
 908            self._char = self.sql[_current - 1]
 909
 910    @property
 911    def _text(self) -> str:
 912        return self.sql[self._start : self._current]
 913
 914    def peek(self, i: int = 0) -> str:
 915        i = self._current + i
 916        if i < self.size:
 917            return self.sql[i]
 918        return ""
 919
 920    def _add(self, token_type: TokenType, text: t.Optional[str] = None) -> None:
 921        self._prev_token_line = self._line
 922
 923        if self._comments and token_type == TokenType.SEMICOLON and self.tokens:
 924            self.tokens[-1].comments.extend(self._comments)
 925            self._comments = []
 926
 927        self.tokens.append(
 928            Token(
 929                token_type,
 930                text=self._text if text is None else text,
 931                line=self._line,
 932                col=self._col,
 933                start=self._start,
 934                end=self._current - 1,
 935                comments=self._comments,
 936            )
 937        )
 938        self._comments = []
 939
 940        # If we have either a semicolon or a begin token before the command's token, we'll parse
 941        # whatever follows the command's token as a string
 942        if (
 943            token_type in self.COMMANDS
 944            and self._peek != ";"
 945            and (len(self.tokens) == 1 or self.tokens[-2].token_type in self.COMMAND_PREFIX_TOKENS)
 946        ):
 947            start = self._current
 948            tokens = len(self.tokens)
 949            self._scan(lambda: self._peek == ";")
 950            self.tokens = self.tokens[:tokens]
 951            text = self.sql[start : self._current].strip()
 952            if text:
 953                self._add(TokenType.STRING, text)
 954
 955    def _scan_keywords(self) -> None:
 956        size = 0
 957        word = None
 958        chars = self._text
 959        char = chars
 960        prev_space = False
 961        skip = False
 962        trie = self._KEYWORD_TRIE
 963        single_token = char in self.SINGLE_TOKENS
 964
 965        while chars:
 966            if skip:
 967                result = TrieResult.PREFIX
 968            else:
 969                result, trie = in_trie(trie, char.upper())
 970
 971            if result == TrieResult.FAILED:
 972                break
 973            if result == TrieResult.EXISTS:
 974                word = chars
 975
 976            end = self._current + size
 977            size += 1
 978
 979            if end < self.size:
 980                char = self.sql[end]
 981                single_token = single_token or char in self.SINGLE_TOKENS
 982                is_space = char in self.WHITE_SPACE
 983
 984                if not is_space or not prev_space:
 985                    if is_space:
 986                        char = " "
 987                    chars += char
 988                    prev_space = is_space
 989                    skip = False
 990                else:
 991                    skip = True
 992            else:
 993                char = ""
 994                chars = " "
 995
 996        if word:
 997            if self._scan_string(word):
 998                return
 999            if self._scan_comment(word):
1000                return
1001            if prev_space or single_token or not char:
1002                self._advance(size - 1)
1003                word = word.upper()
1004                self._add(self.KEYWORDS[word], text=word)
1005                return
1006
1007        if self._char in self.SINGLE_TOKENS:
1008            self._add(self.SINGLE_TOKENS[self._char], text=self._char)
1009            return
1010
1011        self._scan_var()
1012
1013    def _scan_comment(self, comment_start: str) -> bool:
1014        if comment_start not in self._COMMENTS:
1015            return False
1016
1017        comment_start_line = self._line
1018        comment_start_size = len(comment_start)
1019        comment_end = self._COMMENTS[comment_start]
1020
1021        if comment_end:
1022            # Skip the comment's start delimiter
1023            self._advance(comment_start_size)
1024
1025            comment_end_size = len(comment_end)
1026            while not self._end and self._chars(comment_end_size) != comment_end:
1027                self._advance(alnum=True)
1028
1029            self._comments.append(self._text[comment_start_size : -comment_end_size + 1])
1030            self._advance(comment_end_size - 1)
1031        else:
1032            while not self._end and not self.WHITE_SPACE.get(self._peek) is TokenType.BREAK:
1033                self._advance(alnum=True)
1034            self._comments.append(self._text[comment_start_size:])
1035
1036        # Leading comment is attached to the succeeding token, whilst trailing comment to the preceding.
1037        # Multiple consecutive comments are preserved by appending them to the current comments list.
1038        if comment_start_line == self._prev_token_line:
1039            self.tokens[-1].comments.extend(self._comments)
1040            self._comments = []
1041            self._prev_token_line = self._line
1042
1043        return True
1044
1045    def _scan_number(self) -> None:
1046        if self._char == "0":
1047            peek = self._peek.upper()
1048            if peek == "B":
1049                return self._scan_bits() if self.BIT_STRINGS else self._add(TokenType.NUMBER)
1050            elif peek == "X":
1051                return self._scan_hex() if self.HEX_STRINGS else self._add(TokenType.NUMBER)
1052
1053        decimal = False
1054        scientific = 0
1055
1056        while True:
1057            if self._peek.isdigit():
1058                self._advance()
1059            elif self._peek == "." and not decimal:
1060                after = self.peek(1)
1061                if after.isdigit() or not after.isalpha():
1062                    decimal = True
1063                    self._advance()
1064                else:
1065                    return self._add(TokenType.VAR)
1066            elif self._peek in ("-", "+") and scientific == 1:
1067                scientific += 1
1068                self._advance()
1069            elif self._peek.upper() == "E" and not scientific:
1070                scientific += 1
1071                self._advance()
1072            elif self._peek.isidentifier():
1073                number_text = self._text
1074                literal = ""
1075
1076                while self._peek.strip() and self._peek not in self.SINGLE_TOKENS:
1077                    literal += self._peek.upper()
1078                    self._advance()
1079
1080                token_type = self.KEYWORDS.get(self.NUMERIC_LITERALS.get(literal, ""))
1081
1082                if token_type:
1083                    self._add(TokenType.NUMBER, number_text)
1084                    self._add(TokenType.DCOLON, "::")
1085                    return self._add(token_type, literal)
1086                elif self.IDENTIFIERS_CAN_START_WITH_DIGIT:
1087                    return self._add(TokenType.VAR)
1088
1089                self._advance(-len(literal))
1090                return self._add(TokenType.NUMBER, number_text)
1091            else:
1092                return self._add(TokenType.NUMBER)
1093
1094    def _scan_bits(self) -> None:
1095        self._advance()
1096        value = self._extract_value()
1097        try:
1098            # If `value` can't be converted to a binary, fallback to tokenizing it as an identifier
1099            int(value, 2)
1100            self._add(TokenType.BIT_STRING, value[2:])  # Drop the 0b
1101        except ValueError:
1102            self._add(TokenType.IDENTIFIER)
1103
1104    def _scan_hex(self) -> None:
1105        self._advance()
1106        value = self._extract_value()
1107        try:
1108            # If `value` can't be converted to a hex, fallback to tokenizing it as an identifier
1109            int(value, 16)
1110            self._add(TokenType.HEX_STRING, value[2:])  # Drop the 0x
1111        except ValueError:
1112            self._add(TokenType.IDENTIFIER)
1113
1114    def _extract_value(self) -> str:
1115        while True:
1116            char = self._peek.strip()
1117            if char and char not in self.SINGLE_TOKENS:
1118                self._advance(alnum=True)
1119            else:
1120                break
1121
1122        return self._text
1123
1124    def _scan_string(self, start: str) -> bool:
1125        base = None
1126        token_type = TokenType.STRING
1127
1128        if start in self._QUOTES:
1129            end = self._QUOTES[start]
1130        elif start in self._FORMAT_STRINGS:
1131            end, token_type = self._FORMAT_STRINGS[start]
1132
1133            if token_type == TokenType.HEX_STRING:
1134                base = 16
1135            elif token_type == TokenType.BIT_STRING:
1136                base = 2
1137            elif token_type == TokenType.HEREDOC_STRING:
1138                self._advance()
1139                tag = "" if self._char == end else self._extract_string(end)
1140                end = f"{start}{tag}{end}"
1141        else:
1142            return False
1143
1144        self._advance(len(start))
1145        text = self._extract_string(end)
1146
1147        if base:
1148            try:
1149                int(text, base)
1150            except:
1151                raise TokenError(
1152                    f"Numeric string contains invalid characters from {self._line}:{self._start}"
1153                )
1154        else:
1155            text = text.encode(self.ENCODE).decode(self.ENCODE) if self.ENCODE else text
1156
1157        self._add(token_type, text)
1158        return True
1159
1160    def _scan_identifier(self, identifier_end: str) -> None:
1161        self._advance()
1162        text = self._extract_string(identifier_end, self._IDENTIFIER_ESCAPES)
1163        self._add(TokenType.IDENTIFIER, text)
1164
1165    def _scan_var(self) -> None:
1166        while True:
1167            char = self._peek.strip()
1168            if char and (char in self.VAR_SINGLE_TOKENS or char not in self.SINGLE_TOKENS):
1169                self._advance(alnum=True)
1170            else:
1171                break
1172
1173        self._add(
1174            TokenType.VAR
1175            if self.tokens and self.tokens[-1].token_type == TokenType.PARAMETER
1176            else self.KEYWORDS.get(self._text.upper(), TokenType.VAR)
1177        )
1178
1179    def _extract_string(self, delimiter: str, escapes=None) -> str:
1180        text = ""
1181        delim_size = len(delimiter)
1182        escapes = self._STRING_ESCAPES if escapes is None else escapes
1183
1184        while True:
1185            if (
1186                self._char in escapes
1187                and (self._peek == delimiter or self._peek in escapes)
1188                and (self._char not in self._QUOTES or self._char == self._peek)
1189            ):
1190                if self._peek == delimiter:
1191                    text += self._peek
1192                else:
1193                    text += self._char + self._peek
1194
1195                if self._current + 1 < self.size:
1196                    self._advance(2)
1197                else:
1198                    raise TokenError(f"Missing {delimiter} from {self._line}:{self._current}")
1199            else:
1200                if self._chars(delim_size) == delimiter:
1201                    if delim_size > 1:
1202                        self._advance(delim_size - 1)
1203                    break
1204
1205                if self._end:
1206                    raise TokenError(f"Missing {delimiter} from {self._line}:{self._start}")
1207
1208                if self.ESCAPE_SEQUENCES and self._peek and self._char in self.STRING_ESCAPES:
1209                    escaped_sequence = self.ESCAPE_SEQUENCES.get(self._char + self._peek)
1210                    if escaped_sequence:
1211                        self._advance(2)
1212                        text += escaped_sequence
1213                        continue
1214
1215                current = self._current - 1
1216                self._advance(alnum=True)
1217                text += self.sql[current : self._current - 1]
1218
1219        return text
class TokenType(sqlglot.helper.AutoName):
 12class TokenType(AutoName):
 13    L_PAREN = auto()
 14    R_PAREN = auto()
 15    L_BRACKET = auto()
 16    R_BRACKET = auto()
 17    L_BRACE = auto()
 18    R_BRACE = auto()
 19    COMMA = auto()
 20    DOT = auto()
 21    DASH = auto()
 22    PLUS = auto()
 23    COLON = auto()
 24    DCOLON = auto()
 25    DQMARK = auto()
 26    SEMICOLON = auto()
 27    STAR = auto()
 28    BACKSLASH = auto()
 29    SLASH = auto()
 30    LT = auto()
 31    LTE = auto()
 32    GT = auto()
 33    GTE = auto()
 34    NOT = auto()
 35    EQ = auto()
 36    NEQ = auto()
 37    NULLSAFE_EQ = auto()
 38    AND = auto()
 39    OR = auto()
 40    AMP = auto()
 41    DPIPE = auto()
 42    PIPE = auto()
 43    CARET = auto()
 44    TILDA = auto()
 45    ARROW = auto()
 46    DARROW = auto()
 47    FARROW = auto()
 48    HASH = auto()
 49    HASH_ARROW = auto()
 50    DHASH_ARROW = auto()
 51    LR_ARROW = auto()
 52    DAT = auto()
 53    LT_AT = auto()
 54    AT_GT = auto()
 55    DOLLAR = auto()
 56    PARAMETER = auto()
 57    SESSION_PARAMETER = auto()
 58    DAMP = auto()
 59    XOR = auto()
 60
 61    BLOCK_START = auto()
 62    BLOCK_END = auto()
 63
 64    SPACE = auto()
 65    BREAK = auto()
 66
 67    STRING = auto()
 68    NUMBER = auto()
 69    IDENTIFIER = auto()
 70    DATABASE = auto()
 71    COLUMN = auto()
 72    COLUMN_DEF = auto()
 73    SCHEMA = auto()
 74    TABLE = auto()
 75    VAR = auto()
 76    BIT_STRING = auto()
 77    HEX_STRING = auto()
 78    BYTE_STRING = auto()
 79    NATIONAL_STRING = auto()
 80    RAW_STRING = auto()
 81    HEREDOC_STRING = auto()
 82
 83    # types
 84    BIT = auto()
 85    BOOLEAN = auto()
 86    TINYINT = auto()
 87    UTINYINT = auto()
 88    SMALLINT = auto()
 89    USMALLINT = auto()
 90    MEDIUMINT = auto()
 91    UMEDIUMINT = auto()
 92    INT = auto()
 93    UINT = auto()
 94    BIGINT = auto()
 95    UBIGINT = auto()
 96    INT128 = auto()
 97    UINT128 = auto()
 98    INT256 = auto()
 99    UINT256 = auto()
100    FLOAT = auto()
101    DOUBLE = auto()
102    DECIMAL = auto()
103    UDECIMAL = auto()
104    BIGDECIMAL = auto()
105    CHAR = auto()
106    NCHAR = auto()
107    VARCHAR = auto()
108    NVARCHAR = auto()
109    TEXT = auto()
110    MEDIUMTEXT = auto()
111    LONGTEXT = auto()
112    MEDIUMBLOB = auto()
113    LONGBLOB = auto()
114    TINYBLOB = auto()
115    TINYTEXT = auto()
116    BINARY = auto()
117    VARBINARY = auto()
118    JSON = auto()
119    JSONB = auto()
120    TIME = auto()
121    TIMETZ = auto()
122    TIMESTAMP = auto()
123    TIMESTAMPTZ = auto()
124    TIMESTAMPLTZ = auto()
125    DATETIME = auto()
126    DATETIME64 = auto()
127    DATE = auto()
128    INT4RANGE = auto()
129    INT4MULTIRANGE = auto()
130    INT8RANGE = auto()
131    INT8MULTIRANGE = auto()
132    NUMRANGE = auto()
133    NUMMULTIRANGE = auto()
134    TSRANGE = auto()
135    TSMULTIRANGE = auto()
136    TSTZRANGE = auto()
137    TSTZMULTIRANGE = auto()
138    DATERANGE = auto()
139    DATEMULTIRANGE = auto()
140    UUID = auto()
141    GEOGRAPHY = auto()
142    NULLABLE = auto()
143    GEOMETRY = auto()
144    HLLSKETCH = auto()
145    HSTORE = auto()
146    SUPER = auto()
147    SERIAL = auto()
148    SMALLSERIAL = auto()
149    BIGSERIAL = auto()
150    XML = auto()
151    YEAR = auto()
152    UNIQUEIDENTIFIER = auto()
153    USERDEFINED = auto()
154    MONEY = auto()
155    SMALLMONEY = auto()
156    ROWVERSION = auto()
157    IMAGE = auto()
158    VARIANT = auto()
159    OBJECT = auto()
160    INET = auto()
161    IPADDRESS = auto()
162    IPPREFIX = auto()
163    ENUM = auto()
164    ENUM8 = auto()
165    ENUM16 = auto()
166    FIXEDSTRING = auto()
167    LOWCARDINALITY = auto()
168    NESTED = auto()
169    UNKNOWN = auto()
170
171    # keywords
172    ALIAS = auto()
173    ALTER = auto()
174    ALWAYS = auto()
175    ALL = auto()
176    ANTI = auto()
177    ANY = auto()
178    APPLY = auto()
179    ARRAY = auto()
180    ASC = auto()
181    ASOF = auto()
182    AUTO_INCREMENT = auto()
183    BEGIN = auto()
184    BETWEEN = auto()
185    CACHE = auto()
186    CASE = auto()
187    CHARACTER_SET = auto()
188    CLUSTER_BY = auto()
189    COLLATE = auto()
190    COMMAND = auto()
191    COMMENT = auto()
192    COMMIT = auto()
193    CONNECT_BY = auto()
194    CONSTRAINT = auto()
195    CREATE = auto()
196    CROSS = auto()
197    CUBE = auto()
198    CURRENT_DATE = auto()
199    CURRENT_DATETIME = auto()
200    CURRENT_TIME = auto()
201    CURRENT_TIMESTAMP = auto()
202    CURRENT_USER = auto()
203    DEFAULT = auto()
204    DELETE = auto()
205    DESC = auto()
206    DESCRIBE = auto()
207    DICTIONARY = auto()
208    DISTINCT = auto()
209    DISTRIBUTE_BY = auto()
210    DIV = auto()
211    DROP = auto()
212    ELSE = auto()
213    END = auto()
214    ESCAPE = auto()
215    EXCEPT = auto()
216    EXECUTE = auto()
217    EXISTS = auto()
218    FALSE = auto()
219    FETCH = auto()
220    FILTER = auto()
221    FINAL = auto()
222    FIRST = auto()
223    FOR = auto()
224    FORCE = auto()
225    FOREIGN_KEY = auto()
226    FORMAT = auto()
227    FROM = auto()
228    FULL = auto()
229    FUNCTION = auto()
230    GLOB = auto()
231    GLOBAL = auto()
232    GROUP_BY = auto()
233    GROUPING_SETS = auto()
234    HAVING = auto()
235    HINT = auto()
236    IGNORE = auto()
237    ILIKE = auto()
238    ILIKE_ANY = auto()
239    IN = auto()
240    INDEX = auto()
241    INNER = auto()
242    INSERT = auto()
243    INTERSECT = auto()
244    INTERVAL = auto()
245    INTO = auto()
246    INTRODUCER = auto()
247    IRLIKE = auto()
248    IS = auto()
249    ISNULL = auto()
250    JOIN = auto()
251    JOIN_MARKER = auto()
252    KEEP = auto()
253    KILL = auto()
254    LANGUAGE = auto()
255    LATERAL = auto()
256    LEFT = auto()
257    LIKE = auto()
258    LIKE_ANY = auto()
259    LIMIT = auto()
260    LOAD = auto()
261    LOCK = auto()
262    MAP = auto()
263    MATCH_RECOGNIZE = auto()
264    MEMBER_OF = auto()
265    MERGE = auto()
266    MOD = auto()
267    MODEL = auto()
268    NATURAL = auto()
269    NEXT = auto()
270    NOTNULL = auto()
271    NULL = auto()
272    OBJECT_IDENTIFIER = auto()
273    OFFSET = auto()
274    ON = auto()
275    ORDER_BY = auto()
276    ORDERED = auto()
277    ORDINALITY = auto()
278    OUTER = auto()
279    OVER = auto()
280    OVERLAPS = auto()
281    OVERWRITE = auto()
282    PARTITION = auto()
283    PARTITION_BY = auto()
284    PERCENT = auto()
285    PIVOT = auto()
286    PLACEHOLDER = auto()
287    PRAGMA = auto()
288    PRIMARY_KEY = auto()
289    PROCEDURE = auto()
290    PROPERTIES = auto()
291    PSEUDO_TYPE = auto()
292    QUALIFY = auto()
293    QUOTE = auto()
294    RANGE = auto()
295    RECURSIVE = auto()
296    REPLACE = auto()
297    RETURNING = auto()
298    REFERENCES = auto()
299    RIGHT = auto()
300    RLIKE = auto()
301    ROLLBACK = auto()
302    ROLLUP = auto()
303    ROW = auto()
304    ROWS = auto()
305    SELECT = auto()
306    SEMI = auto()
307    SEPARATOR = auto()
308    SERDE_PROPERTIES = auto()
309    SET = auto()
310    SETTINGS = auto()
311    SHOW = auto()
312    SIMILAR_TO = auto()
313    SOME = auto()
314    SORT_BY = auto()
315    START_WITH = auto()
316    STRUCT = auto()
317    TABLE_SAMPLE = auto()
318    TEMPORARY = auto()
319    TOP = auto()
320    THEN = auto()
321    TRUE = auto()
322    UNCACHE = auto()
323    UNION = auto()
324    UNNEST = auto()
325    UNPIVOT = auto()
326    UPDATE = auto()
327    USE = auto()
328    USING = auto()
329    VALUES = auto()
330    VIEW = auto()
331    VOLATILE = auto()
332    WHEN = auto()
333    WHERE = auto()
334    WINDOW = auto()
335    WITH = auto()
336    UNIQUE = auto()
337    VERSION_SNAPSHOT = auto()
338    TIMESTAMP_SNAPSHOT = auto()

An enumeration.

L_PAREN = <TokenType.L_PAREN: 'L_PAREN'>
R_PAREN = <TokenType.R_PAREN: 'R_PAREN'>
L_BRACKET = <TokenType.L_BRACKET: 'L_BRACKET'>
R_BRACKET = <TokenType.R_BRACKET: 'R_BRACKET'>
L_BRACE = <TokenType.L_BRACE: 'L_BRACE'>
R_BRACE = <TokenType.R_BRACE: 'R_BRACE'>
COMMA = <TokenType.COMMA: 'COMMA'>
DOT = <TokenType.DOT: 'DOT'>
DASH = <TokenType.DASH: 'DASH'>
PLUS = <TokenType.PLUS: 'PLUS'>
COLON = <TokenType.COLON: 'COLON'>
DCOLON = <TokenType.DCOLON: 'DCOLON'>
DQMARK = <TokenType.DQMARK: 'DQMARK'>
SEMICOLON = <TokenType.SEMICOLON: 'SEMICOLON'>
STAR = <TokenType.STAR: 'STAR'>
BACKSLASH = <TokenType.BACKSLASH: 'BACKSLASH'>
SLASH = <TokenType.SLASH: 'SLASH'>
LT = <TokenType.LT: 'LT'>
LTE = <TokenType.LTE: 'LTE'>
GT = <TokenType.GT: 'GT'>
GTE = <TokenType.GTE: 'GTE'>
NOT = <TokenType.NOT: 'NOT'>
EQ = <TokenType.EQ: 'EQ'>
NEQ = <TokenType.NEQ: 'NEQ'>
NULLSAFE_EQ = <TokenType.NULLSAFE_EQ: 'NULLSAFE_EQ'>
AND = <TokenType.AND: 'AND'>
OR = <TokenType.OR: 'OR'>
AMP = <TokenType.AMP: 'AMP'>
DPIPE = <TokenType.DPIPE: 'DPIPE'>
PIPE = <TokenType.PIPE: 'PIPE'>
CARET = <TokenType.CARET: 'CARET'>
TILDA = <TokenType.TILDA: 'TILDA'>
ARROW = <TokenType.ARROW: 'ARROW'>
DARROW = <TokenType.DARROW: 'DARROW'>
FARROW = <TokenType.FARROW: 'FARROW'>
HASH = <TokenType.HASH: 'HASH'>
HASH_ARROW = <TokenType.HASH_ARROW: 'HASH_ARROW'>
DHASH_ARROW = <TokenType.DHASH_ARROW: 'DHASH_ARROW'>
LR_ARROW = <TokenType.LR_ARROW: 'LR_ARROW'>
DAT = <TokenType.DAT: 'DAT'>
LT_AT = <TokenType.LT_AT: 'LT_AT'>
AT_GT = <TokenType.AT_GT: 'AT_GT'>
DOLLAR = <TokenType.DOLLAR: 'DOLLAR'>
PARAMETER = <TokenType.PARAMETER: 'PARAMETER'>
SESSION_PARAMETER = <TokenType.SESSION_PARAMETER: 'SESSION_PARAMETER'>
DAMP = <TokenType.DAMP: 'DAMP'>
XOR = <TokenType.XOR: 'XOR'>
BLOCK_START = <TokenType.BLOCK_START: 'BLOCK_START'>
BLOCK_END = <TokenType.BLOCK_END: 'BLOCK_END'>
SPACE = <TokenType.SPACE: 'SPACE'>
BREAK = <TokenType.BREAK: 'BREAK'>
STRING = <TokenType.STRING: 'STRING'>
NUMBER = <TokenType.NUMBER: 'NUMBER'>
IDENTIFIER = <TokenType.IDENTIFIER: 'IDENTIFIER'>
DATABASE = <TokenType.DATABASE: 'DATABASE'>
COLUMN = <TokenType.COLUMN: 'COLUMN'>
COLUMN_DEF = <TokenType.COLUMN_DEF: 'COLUMN_DEF'>
SCHEMA = <TokenType.SCHEMA: 'SCHEMA'>
TABLE = <TokenType.TABLE: 'TABLE'>
VAR = <TokenType.VAR: 'VAR'>
BIT_STRING = <TokenType.BIT_STRING: 'BIT_STRING'>
HEX_STRING = <TokenType.HEX_STRING: 'HEX_STRING'>
BYTE_STRING = <TokenType.BYTE_STRING: 'BYTE_STRING'>
NATIONAL_STRING = <TokenType.NATIONAL_STRING: 'NATIONAL_STRING'>
RAW_STRING = <TokenType.RAW_STRING: 'RAW_STRING'>
HEREDOC_STRING = <TokenType.HEREDOC_STRING: 'HEREDOC_STRING'>
BIT = <TokenType.BIT: 'BIT'>
BOOLEAN = <TokenType.BOOLEAN: 'BOOLEAN'>
TINYINT = <TokenType.TINYINT: 'TINYINT'>
UTINYINT = <TokenType.UTINYINT: 'UTINYINT'>
SMALLINT = <TokenType.SMALLINT: 'SMALLINT'>
USMALLINT = <TokenType.USMALLINT: 'USMALLINT'>
MEDIUMINT = <TokenType.MEDIUMINT: 'MEDIUMINT'>
UMEDIUMINT = <TokenType.UMEDIUMINT: 'UMEDIUMINT'>
INT = <TokenType.INT: 'INT'>
UINT = <TokenType.UINT: 'UINT'>
BIGINT = <TokenType.BIGINT: 'BIGINT'>
UBIGINT = <TokenType.UBIGINT: 'UBIGINT'>
INT128 = <TokenType.INT128: 'INT128'>
UINT128 = <TokenType.UINT128: 'UINT128'>
INT256 = <TokenType.INT256: 'INT256'>
UINT256 = <TokenType.UINT256: 'UINT256'>
FLOAT = <TokenType.FLOAT: 'FLOAT'>
DOUBLE = <TokenType.DOUBLE: 'DOUBLE'>
DECIMAL = <TokenType.DECIMAL: 'DECIMAL'>
UDECIMAL = <TokenType.UDECIMAL: 'UDECIMAL'>
BIGDECIMAL = <TokenType.BIGDECIMAL: 'BIGDECIMAL'>
CHAR = <TokenType.CHAR: 'CHAR'>
NCHAR = <TokenType.NCHAR: 'NCHAR'>
VARCHAR = <TokenType.VARCHAR: 'VARCHAR'>
NVARCHAR = <TokenType.NVARCHAR: 'NVARCHAR'>
TEXT = <TokenType.TEXT: 'TEXT'>
MEDIUMTEXT = <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>
LONGTEXT = <TokenType.LONGTEXT: 'LONGTEXT'>
MEDIUMBLOB = <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>
LONGBLOB = <TokenType.LONGBLOB: 'LONGBLOB'>
TINYBLOB = <TokenType.TINYBLOB: 'TINYBLOB'>
TINYTEXT = <TokenType.TINYTEXT: 'TINYTEXT'>
BINARY = <TokenType.BINARY: 'BINARY'>
VARBINARY = <TokenType.VARBINARY: 'VARBINARY'>
JSON = <TokenType.JSON: 'JSON'>
JSONB = <TokenType.JSONB: 'JSONB'>
TIME = <TokenType.TIME: 'TIME'>
TIMETZ = <TokenType.TIMETZ: 'TIMETZ'>
TIMESTAMP = <TokenType.TIMESTAMP: 'TIMESTAMP'>
TIMESTAMPTZ = <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>
TIMESTAMPLTZ = <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>
DATETIME = <TokenType.DATETIME: 'DATETIME'>
DATETIME64 = <TokenType.DATETIME64: 'DATETIME64'>
DATE = <TokenType.DATE: 'DATE'>
INT4RANGE = <TokenType.INT4RANGE: 'INT4RANGE'>
INT4MULTIRANGE = <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>
INT8RANGE = <TokenType.INT8RANGE: 'INT8RANGE'>
INT8MULTIRANGE = <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>
NUMRANGE = <TokenType.NUMRANGE: 'NUMRANGE'>
NUMMULTIRANGE = <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>
TSRANGE = <TokenType.TSRANGE: 'TSRANGE'>
TSMULTIRANGE = <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>
TSTZRANGE = <TokenType.TSTZRANGE: 'TSTZRANGE'>
TSTZMULTIRANGE = <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>
DATERANGE = <TokenType.DATERANGE: 'DATERANGE'>
DATEMULTIRANGE = <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>
UUID = <TokenType.UUID: 'UUID'>
GEOGRAPHY = <TokenType.GEOGRAPHY: 'GEOGRAPHY'>
NULLABLE = <TokenType.NULLABLE: 'NULLABLE'>
GEOMETRY = <TokenType.GEOMETRY: 'GEOMETRY'>
HLLSKETCH = <TokenType.HLLSKETCH: 'HLLSKETCH'>
HSTORE = <TokenType.HSTORE: 'HSTORE'>
SUPER = <TokenType.SUPER: 'SUPER'>
SERIAL = <TokenType.SERIAL: 'SERIAL'>
SMALLSERIAL = <TokenType.SMALLSERIAL: 'SMALLSERIAL'>
BIGSERIAL = <TokenType.BIGSERIAL: 'BIGSERIAL'>
XML = <TokenType.XML: 'XML'>
YEAR = <TokenType.YEAR: 'YEAR'>
UNIQUEIDENTIFIER = <TokenType.UNIQUEIDENTIFIER: 'UNIQUEIDENTIFIER'>
USERDEFINED = <TokenType.USERDEFINED: 'USERDEFINED'>
MONEY = <TokenType.MONEY: 'MONEY'>
SMALLMONEY = <TokenType.SMALLMONEY: 'SMALLMONEY'>
ROWVERSION = <TokenType.ROWVERSION: 'ROWVERSION'>
IMAGE = <TokenType.IMAGE: 'IMAGE'>
VARIANT = <TokenType.VARIANT: 'VARIANT'>
OBJECT = <TokenType.OBJECT: 'OBJECT'>
INET = <TokenType.INET: 'INET'>
IPADDRESS = <TokenType.IPADDRESS: 'IPADDRESS'>
IPPREFIX = <TokenType.IPPREFIX: 'IPPREFIX'>
ENUM = <TokenType.ENUM: 'ENUM'>
ENUM8 = <TokenType.ENUM8: 'ENUM8'>
ENUM16 = <TokenType.ENUM16: 'ENUM16'>
FIXEDSTRING = <TokenType.FIXEDSTRING: 'FIXEDSTRING'>
LOWCARDINALITY = <TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>
NESTED = <TokenType.NESTED: 'NESTED'>
UNKNOWN = <TokenType.UNKNOWN: 'UNKNOWN'>
ALIAS = <TokenType.ALIAS: 'ALIAS'>
ALTER = <TokenType.ALTER: 'ALTER'>
ALWAYS = <TokenType.ALWAYS: 'ALWAYS'>
ALL = <TokenType.ALL: 'ALL'>
ANTI = <TokenType.ANTI: 'ANTI'>
ANY = <TokenType.ANY: 'ANY'>
APPLY = <TokenType.APPLY: 'APPLY'>
ARRAY = <TokenType.ARRAY: 'ARRAY'>
ASC = <TokenType.ASC: 'ASC'>
ASOF = <TokenType.ASOF: 'ASOF'>
AUTO_INCREMENT = <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>
BEGIN = <TokenType.BEGIN: 'BEGIN'>
BETWEEN = <TokenType.BETWEEN: 'BETWEEN'>
CACHE = <TokenType.CACHE: 'CACHE'>
CASE = <TokenType.CASE: 'CASE'>
CHARACTER_SET = <TokenType.CHARACTER_SET: 'CHARACTER_SET'>
CLUSTER_BY = <TokenType.CLUSTER_BY: 'CLUSTER_BY'>
COLLATE = <TokenType.COLLATE: 'COLLATE'>
COMMAND = <TokenType.COMMAND: 'COMMAND'>
COMMENT = <TokenType.COMMENT: 'COMMENT'>
COMMIT = <TokenType.COMMIT: 'COMMIT'>
CONNECT_BY = <TokenType.CONNECT_BY: 'CONNECT_BY'>
CONSTRAINT = <TokenType.CONSTRAINT: 'CONSTRAINT'>
CREATE = <TokenType.CREATE: 'CREATE'>
CROSS = <TokenType.CROSS: 'CROSS'>
CUBE = <TokenType.CUBE: 'CUBE'>
CURRENT_DATE = <TokenType.CURRENT_DATE: 'CURRENT_DATE'>
CURRENT_DATETIME = <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>
CURRENT_TIME = <TokenType.CURRENT_TIME: 'CURRENT_TIME'>
CURRENT_TIMESTAMP = <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>
CURRENT_USER = <TokenType.CURRENT_USER: 'CURRENT_USER'>
DEFAULT = <TokenType.DEFAULT: 'DEFAULT'>
DELETE = <TokenType.DELETE: 'DELETE'>
DESC = <TokenType.DESC: 'DESC'>
DESCRIBE = <TokenType.DESCRIBE: 'DESCRIBE'>
DICTIONARY = <TokenType.DICTIONARY: 'DICTIONARY'>
DISTINCT = <TokenType.DISTINCT: 'DISTINCT'>
DISTRIBUTE_BY = <TokenType.DISTRIBUTE_BY: 'DISTRIBUTE_BY'>
DIV = <TokenType.DIV: 'DIV'>
DROP = <TokenType.DROP: 'DROP'>
ELSE = <TokenType.ELSE: 'ELSE'>
END = <TokenType.END: 'END'>
ESCAPE = <TokenType.ESCAPE: 'ESCAPE'>
EXCEPT = <TokenType.EXCEPT: 'EXCEPT'>
EXECUTE = <TokenType.EXECUTE: 'EXECUTE'>
EXISTS = <TokenType.EXISTS: 'EXISTS'>
FALSE = <TokenType.FALSE: 'FALSE'>
FETCH = <TokenType.FETCH: 'FETCH'>
FILTER = <TokenType.FILTER: 'FILTER'>
FINAL = <TokenType.FINAL: 'FINAL'>
FIRST = <TokenType.FIRST: 'FIRST'>
FOR = <TokenType.FOR: 'FOR'>
FORCE = <TokenType.FORCE: 'FORCE'>
FOREIGN_KEY = <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>
FORMAT = <TokenType.FORMAT: 'FORMAT'>
FROM = <TokenType.FROM: 'FROM'>
FULL = <TokenType.FULL: 'FULL'>
FUNCTION = <TokenType.FUNCTION: 'FUNCTION'>
GLOB = <TokenType.GLOB: 'GLOB'>
GLOBAL = <TokenType.GLOBAL: 'GLOBAL'>
GROUP_BY = <TokenType.GROUP_BY: 'GROUP_BY'>
GROUPING_SETS = <TokenType.GROUPING_SETS: 'GROUPING_SETS'>
HAVING = <TokenType.HAVING: 'HAVING'>
HINT = <TokenType.HINT: 'HINT'>
IGNORE = <TokenType.IGNORE: 'IGNORE'>
ILIKE = <TokenType.ILIKE: 'ILIKE'>
ILIKE_ANY = <TokenType.ILIKE_ANY: 'ILIKE_ANY'>
IN = <TokenType.IN: 'IN'>
INDEX = <TokenType.INDEX: 'INDEX'>
INNER = <TokenType.INNER: 'INNER'>
INSERT = <TokenType.INSERT: 'INSERT'>
INTERSECT = <TokenType.INTERSECT: 'INTERSECT'>
INTERVAL = <TokenType.INTERVAL: 'INTERVAL'>
INTO = <TokenType.INTO: 'INTO'>
INTRODUCER = <TokenType.INTRODUCER: 'INTRODUCER'>
IRLIKE = <TokenType.IRLIKE: 'IRLIKE'>
IS = <TokenType.IS: 'IS'>
ISNULL = <TokenType.ISNULL: 'ISNULL'>
JOIN = <TokenType.JOIN: 'JOIN'>
JOIN_MARKER = <TokenType.JOIN_MARKER: 'JOIN_MARKER'>
KEEP = <TokenType.KEEP: 'KEEP'>
KILL = <TokenType.KILL: 'KILL'>
LANGUAGE = <TokenType.LANGUAGE: 'LANGUAGE'>
LATERAL = <TokenType.LATERAL: 'LATERAL'>
LEFT = <TokenType.LEFT: 'LEFT'>
LIKE = <TokenType.LIKE: 'LIKE'>
LIKE_ANY = <TokenType.LIKE_ANY: 'LIKE_ANY'>
LIMIT = <TokenType.LIMIT: 'LIMIT'>
LOAD = <TokenType.LOAD: 'LOAD'>
LOCK = <TokenType.LOCK: 'LOCK'>
MAP = <TokenType.MAP: 'MAP'>
MATCH_RECOGNIZE = <TokenType.MATCH_RECOGNIZE: 'MATCH_RECOGNIZE'>
MEMBER_OF = <TokenType.MEMBER_OF: 'MEMBER_OF'>
MERGE = <TokenType.MERGE: 'MERGE'>
MOD = <TokenType.MOD: 'MOD'>
MODEL = <TokenType.MODEL: 'MODEL'>
NATURAL = <TokenType.NATURAL: 'NATURAL'>
NEXT = <TokenType.NEXT: 'NEXT'>
NOTNULL = <TokenType.NOTNULL: 'NOTNULL'>
NULL = <TokenType.NULL: 'NULL'>
OBJECT_IDENTIFIER = <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>
OFFSET = <TokenType.OFFSET: 'OFFSET'>
ON = <TokenType.ON: 'ON'>
ORDER_BY = <TokenType.ORDER_BY: 'ORDER_BY'>
ORDERED = <TokenType.ORDERED: 'ORDERED'>
ORDINALITY = <TokenType.ORDINALITY: 'ORDINALITY'>
OUTER = <TokenType.OUTER: 'OUTER'>
OVER = <TokenType.OVER: 'OVER'>
OVERLAPS = <TokenType.OVERLAPS: 'OVERLAPS'>
OVERWRITE = <TokenType.OVERWRITE: 'OVERWRITE'>
PARTITION = <TokenType.PARTITION: 'PARTITION'>
PARTITION_BY = <TokenType.PARTITION_BY: 'PARTITION_BY'>
PERCENT = <TokenType.PERCENT: 'PERCENT'>
PIVOT = <TokenType.PIVOT: 'PIVOT'>
PLACEHOLDER = <TokenType.PLACEHOLDER: 'PLACEHOLDER'>
PRAGMA = <TokenType.PRAGMA: 'PRAGMA'>
PRIMARY_KEY = <TokenType.PRIMARY_KEY: 'PRIMARY_KEY'>
PROCEDURE = <TokenType.PROCEDURE: 'PROCEDURE'>
PROPERTIES = <TokenType.PROPERTIES: 'PROPERTIES'>
PSEUDO_TYPE = <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>
QUALIFY = <TokenType.QUALIFY: 'QUALIFY'>
QUOTE = <TokenType.QUOTE: 'QUOTE'>
RANGE = <TokenType.RANGE: 'RANGE'>
RECURSIVE = <TokenType.RECURSIVE: 'RECURSIVE'>
REPLACE = <TokenType.REPLACE: 'REPLACE'>
RETURNING = <TokenType.RETURNING: 'RETURNING'>
REFERENCES = <TokenType.REFERENCES: 'REFERENCES'>
RIGHT = <TokenType.RIGHT: 'RIGHT'>
RLIKE = <TokenType.RLIKE: 'RLIKE'>
ROLLBACK = <TokenType.ROLLBACK: 'ROLLBACK'>
ROLLUP = <TokenType.ROLLUP: 'ROLLUP'>
ROW = <TokenType.ROW: 'ROW'>
ROWS = <TokenType.ROWS: 'ROWS'>
SELECT = <TokenType.SELECT: 'SELECT'>
SEMI = <TokenType.SEMI: 'SEMI'>
SEPARATOR = <TokenType.SEPARATOR: 'SEPARATOR'>
SERDE_PROPERTIES = <TokenType.SERDE_PROPERTIES: 'SERDE_PROPERTIES'>
SET = <TokenType.SET: 'SET'>
SETTINGS = <TokenType.SETTINGS: 'SETTINGS'>
SHOW = <TokenType.SHOW: 'SHOW'>
SIMILAR_TO = <TokenType.SIMILAR_TO: 'SIMILAR_TO'>
SOME = <TokenType.SOME: 'SOME'>
SORT_BY = <TokenType.SORT_BY: 'SORT_BY'>
START_WITH = <TokenType.START_WITH: 'START_WITH'>
STRUCT = <TokenType.STRUCT: 'STRUCT'>
TABLE_SAMPLE = <TokenType.TABLE_SAMPLE: 'TABLE_SAMPLE'>
TEMPORARY = <TokenType.TEMPORARY: 'TEMPORARY'>
TOP = <TokenType.TOP: 'TOP'>
THEN = <TokenType.THEN: 'THEN'>
TRUE = <TokenType.TRUE: 'TRUE'>
UNCACHE = <TokenType.UNCACHE: 'UNCACHE'>
UNION = <TokenType.UNION: 'UNION'>
UNNEST = <TokenType.UNNEST: 'UNNEST'>
UNPIVOT = <TokenType.UNPIVOT: 'UNPIVOT'>
UPDATE = <TokenType.UPDATE: 'UPDATE'>
USE = <TokenType.USE: 'USE'>
USING = <TokenType.USING: 'USING'>
VALUES = <TokenType.VALUES: 'VALUES'>
VIEW = <TokenType.VIEW: 'VIEW'>
VOLATILE = <TokenType.VOLATILE: 'VOLATILE'>
WHEN = <TokenType.WHEN: 'WHEN'>
WHERE = <TokenType.WHERE: 'WHERE'>
WINDOW = <TokenType.WINDOW: 'WINDOW'>
WITH = <TokenType.WITH: 'WITH'>
UNIQUE = <TokenType.UNIQUE: 'UNIQUE'>
VERSION_SNAPSHOT = <TokenType.VERSION_SNAPSHOT: 'VERSION_SNAPSHOT'>
TIMESTAMP_SNAPSHOT = <TokenType.TIMESTAMP_SNAPSHOT: 'TIMESTAMP_SNAPSHOT'>
Inherited Members
enum.Enum
name
value
class Token:
341class Token:
342    __slots__ = ("token_type", "text", "line", "col", "start", "end", "comments")
343
344    @classmethod
345    def number(cls, number: int) -> Token:
346        """Returns a NUMBER token with `number` as its text."""
347        return cls(TokenType.NUMBER, str(number))
348
349    @classmethod
350    def string(cls, string: str) -> Token:
351        """Returns a STRING token with `string` as its text."""
352        return cls(TokenType.STRING, string)
353
354    @classmethod
355    def identifier(cls, identifier: str) -> Token:
356        """Returns an IDENTIFIER token with `identifier` as its text."""
357        return cls(TokenType.IDENTIFIER, identifier)
358
359    @classmethod
360    def var(cls, var: str) -> Token:
361        """Returns an VAR token with `var` as its text."""
362        return cls(TokenType.VAR, var)
363
364    def __init__(
365        self,
366        token_type: TokenType,
367        text: str,
368        line: int = 1,
369        col: int = 1,
370        start: int = 0,
371        end: int = 0,
372        comments: t.List[str] = [],
373    ) -> None:
374        """Token initializer.
375
376        Args:
377            token_type: The TokenType Enum.
378            text: The text of the token.
379            line: The line that the token ends on.
380            col: The column that the token ends on.
381            start: The start index of the token.
382            end: The ending index of the token.
383            comments: The comments to attach to the token.
384        """
385        self.token_type = token_type
386        self.text = text
387        self.line = line
388        self.col = col
389        self.start = start
390        self.end = end
391        self.comments = comments
392
393    def __repr__(self) -> str:
394        attributes = ", ".join(f"{k}: {getattr(self, k)}" for k in self.__slots__)
395        return f"<Token {attributes}>"
Token( token_type: TokenType, text: str, line: int = 1, col: int = 1, start: int = 0, end: int = 0, comments: List[str] = [])
364    def __init__(
365        self,
366        token_type: TokenType,
367        text: str,
368        line: int = 1,
369        col: int = 1,
370        start: int = 0,
371        end: int = 0,
372        comments: t.List[str] = [],
373    ) -> None:
374        """Token initializer.
375
376        Args:
377            token_type: The TokenType Enum.
378            text: The text of the token.
379            line: The line that the token ends on.
380            col: The column that the token ends on.
381            start: The start index of the token.
382            end: The ending index of the token.
383            comments: The comments to attach to the token.
384        """
385        self.token_type = token_type
386        self.text = text
387        self.line = line
388        self.col = col
389        self.start = start
390        self.end = end
391        self.comments = comments

Token initializer.

Arguments:
  • token_type: The TokenType Enum.
  • text: The text of the token.
  • line: The line that the token ends on.
  • col: The column that the token ends on.
  • start: The start index of the token.
  • end: The ending index of the token.
  • comments: The comments to attach to the token.
@classmethod
def number(cls, number: int) -> Token:
344    @classmethod
345    def number(cls, number: int) -> Token:
346        """Returns a NUMBER token with `number` as its text."""
347        return cls(TokenType.NUMBER, str(number))

Returns a NUMBER token with number as its text.

@classmethod
def string(cls, string: str) -> Token:
349    @classmethod
350    def string(cls, string: str) -> Token:
351        """Returns a STRING token with `string` as its text."""
352        return cls(TokenType.STRING, string)

Returns a STRING token with string as its text.

@classmethod
def identifier(cls, identifier: str) -> Token:
354    @classmethod
355    def identifier(cls, identifier: str) -> Token:
356        """Returns an IDENTIFIER token with `identifier` as its text."""
357        return cls(TokenType.IDENTIFIER, identifier)

Returns an IDENTIFIER token with identifier as its text.

@classmethod
def var(cls, var: str) -> Token:
359    @classmethod
360    def var(cls, var: str) -> Token:
361        """Returns an VAR token with `var` as its text."""
362        return cls(TokenType.VAR, var)

Returns an VAR token with var as its text.

token_type
text
line
col
start
end
comments
class Tokenizer:
 452class Tokenizer(metaclass=_Tokenizer):
 453    SINGLE_TOKENS = {
 454        "(": TokenType.L_PAREN,
 455        ")": TokenType.R_PAREN,
 456        "[": TokenType.L_BRACKET,
 457        "]": TokenType.R_BRACKET,
 458        "{": TokenType.L_BRACE,
 459        "}": TokenType.R_BRACE,
 460        "&": TokenType.AMP,
 461        "^": TokenType.CARET,
 462        ":": TokenType.COLON,
 463        ",": TokenType.COMMA,
 464        ".": TokenType.DOT,
 465        "-": TokenType.DASH,
 466        "=": TokenType.EQ,
 467        ">": TokenType.GT,
 468        "<": TokenType.LT,
 469        "%": TokenType.MOD,
 470        "!": TokenType.NOT,
 471        "|": TokenType.PIPE,
 472        "+": TokenType.PLUS,
 473        ";": TokenType.SEMICOLON,
 474        "/": TokenType.SLASH,
 475        "\\": TokenType.BACKSLASH,
 476        "*": TokenType.STAR,
 477        "~": TokenType.TILDA,
 478        "?": TokenType.PLACEHOLDER,
 479        "@": TokenType.PARAMETER,
 480        # used for breaking a var like x'y' but nothing else
 481        # the token type doesn't matter
 482        "'": TokenType.QUOTE,
 483        "`": TokenType.IDENTIFIER,
 484        '"': TokenType.IDENTIFIER,
 485        "#": TokenType.HASH,
 486    }
 487
 488    BIT_STRINGS: t.List[str | t.Tuple[str, str]] = []
 489    BYTE_STRINGS: t.List[str | t.Tuple[str, str]] = []
 490    HEX_STRINGS: t.List[str | t.Tuple[str, str]] = []
 491    RAW_STRINGS: t.List[str | t.Tuple[str, str]] = []
 492    HEREDOC_STRINGS: t.List[str | t.Tuple[str, str]] = []
 493    IDENTIFIERS: t.List[str | t.Tuple[str, str]] = ['"']
 494    IDENTIFIER_ESCAPES = ['"']
 495    QUOTES: t.List[t.Tuple[str, str] | str] = ["'"]
 496    STRING_ESCAPES = ["'"]
 497    VAR_SINGLE_TOKENS: t.Set[str] = set()
 498    ESCAPE_SEQUENCES: t.Dict[str, str] = {}
 499
 500    # Autofilled
 501    IDENTIFIERS_CAN_START_WITH_DIGIT: bool = False
 502
 503    _COMMENTS: t.Dict[str, str] = {}
 504    _FORMAT_STRINGS: t.Dict[str, t.Tuple[str, TokenType]] = {}
 505    _IDENTIFIERS: t.Dict[str, str] = {}
 506    _IDENTIFIER_ESCAPES: t.Set[str] = set()
 507    _QUOTES: t.Dict[str, str] = {}
 508    _STRING_ESCAPES: t.Set[str] = set()
 509    _KEYWORD_TRIE: t.Dict = {}
 510
 511    KEYWORDS: t.Dict[str, TokenType] = {
 512        **{f"{{%{postfix}": TokenType.BLOCK_START for postfix in ("", "+", "-")},
 513        **{f"{prefix}%}}": TokenType.BLOCK_END for prefix in ("", "+", "-")},
 514        **{f"{{{{{postfix}": TokenType.BLOCK_START for postfix in ("+", "-")},
 515        **{f"{prefix}}}}}": TokenType.BLOCK_END for prefix in ("+", "-")},
 516        "/*+": TokenType.HINT,
 517        "==": TokenType.EQ,
 518        "::": TokenType.DCOLON,
 519        "||": TokenType.DPIPE,
 520        ">=": TokenType.GTE,
 521        "<=": TokenType.LTE,
 522        "<>": TokenType.NEQ,
 523        "!=": TokenType.NEQ,
 524        "<=>": TokenType.NULLSAFE_EQ,
 525        "->": TokenType.ARROW,
 526        "->>": TokenType.DARROW,
 527        "=>": TokenType.FARROW,
 528        "#>": TokenType.HASH_ARROW,
 529        "#>>": TokenType.DHASH_ARROW,
 530        "<->": TokenType.LR_ARROW,
 531        "&&": TokenType.DAMP,
 532        "??": TokenType.DQMARK,
 533        "ALL": TokenType.ALL,
 534        "ALWAYS": TokenType.ALWAYS,
 535        "AND": TokenType.AND,
 536        "ANTI": TokenType.ANTI,
 537        "ANY": TokenType.ANY,
 538        "ASC": TokenType.ASC,
 539        "AS": TokenType.ALIAS,
 540        "ASOF": TokenType.ASOF,
 541        "AUTOINCREMENT": TokenType.AUTO_INCREMENT,
 542        "AUTO_INCREMENT": TokenType.AUTO_INCREMENT,
 543        "BEGIN": TokenType.BEGIN,
 544        "BETWEEN": TokenType.BETWEEN,
 545        "CACHE": TokenType.CACHE,
 546        "UNCACHE": TokenType.UNCACHE,
 547        "CASE": TokenType.CASE,
 548        "CHARACTER SET": TokenType.CHARACTER_SET,
 549        "CLUSTER BY": TokenType.CLUSTER_BY,
 550        "COLLATE": TokenType.COLLATE,
 551        "COLUMN": TokenType.COLUMN,
 552        "COMMIT": TokenType.COMMIT,
 553        "CONNECT BY": TokenType.CONNECT_BY,
 554        "CONSTRAINT": TokenType.CONSTRAINT,
 555        "CREATE": TokenType.CREATE,
 556        "CROSS": TokenType.CROSS,
 557        "CUBE": TokenType.CUBE,
 558        "CURRENT_DATE": TokenType.CURRENT_DATE,
 559        "CURRENT_TIME": TokenType.CURRENT_TIME,
 560        "CURRENT_TIMESTAMP": TokenType.CURRENT_TIMESTAMP,
 561        "CURRENT_USER": TokenType.CURRENT_USER,
 562        "DATABASE": TokenType.DATABASE,
 563        "DEFAULT": TokenType.DEFAULT,
 564        "DELETE": TokenType.DELETE,
 565        "DESC": TokenType.DESC,
 566        "DESCRIBE": TokenType.DESCRIBE,
 567        "DISTINCT": TokenType.DISTINCT,
 568        "DISTRIBUTE BY": TokenType.DISTRIBUTE_BY,
 569        "DIV": TokenType.DIV,
 570        "DROP": TokenType.DROP,
 571        "ELSE": TokenType.ELSE,
 572        "END": TokenType.END,
 573        "ESCAPE": TokenType.ESCAPE,
 574        "EXCEPT": TokenType.EXCEPT,
 575        "EXECUTE": TokenType.EXECUTE,
 576        "EXISTS": TokenType.EXISTS,
 577        "FALSE": TokenType.FALSE,
 578        "FETCH": TokenType.FETCH,
 579        "FILTER": TokenType.FILTER,
 580        "FIRST": TokenType.FIRST,
 581        "FULL": TokenType.FULL,
 582        "FUNCTION": TokenType.FUNCTION,
 583        "FOR": TokenType.FOR,
 584        "FOREIGN KEY": TokenType.FOREIGN_KEY,
 585        "FORMAT": TokenType.FORMAT,
 586        "FROM": TokenType.FROM,
 587        "GEOGRAPHY": TokenType.GEOGRAPHY,
 588        "GEOMETRY": TokenType.GEOMETRY,
 589        "GLOB": TokenType.GLOB,
 590        "GROUP BY": TokenType.GROUP_BY,
 591        "GROUPING SETS": TokenType.GROUPING_SETS,
 592        "HAVING": TokenType.HAVING,
 593        "ILIKE": TokenType.ILIKE,
 594        "IN": TokenType.IN,
 595        "INDEX": TokenType.INDEX,
 596        "INET": TokenType.INET,
 597        "INNER": TokenType.INNER,
 598        "INSERT": TokenType.INSERT,
 599        "INTERVAL": TokenType.INTERVAL,
 600        "INTERSECT": TokenType.INTERSECT,
 601        "INTO": TokenType.INTO,
 602        "IS": TokenType.IS,
 603        "ISNULL": TokenType.ISNULL,
 604        "JOIN": TokenType.JOIN,
 605        "KEEP": TokenType.KEEP,
 606        "KILL": TokenType.KILL,
 607        "LATERAL": TokenType.LATERAL,
 608        "LEFT": TokenType.LEFT,
 609        "LIKE": TokenType.LIKE,
 610        "LIMIT": TokenType.LIMIT,
 611        "LOAD": TokenType.LOAD,
 612        "LOCK": TokenType.LOCK,
 613        "MERGE": TokenType.MERGE,
 614        "NATURAL": TokenType.NATURAL,
 615        "NEXT": TokenType.NEXT,
 616        "NOT": TokenType.NOT,
 617        "NOTNULL": TokenType.NOTNULL,
 618        "NULL": TokenType.NULL,
 619        "OBJECT": TokenType.OBJECT,
 620        "OFFSET": TokenType.OFFSET,
 621        "ON": TokenType.ON,
 622        "OR": TokenType.OR,
 623        "XOR": TokenType.XOR,
 624        "ORDER BY": TokenType.ORDER_BY,
 625        "ORDINALITY": TokenType.ORDINALITY,
 626        "OUTER": TokenType.OUTER,
 627        "OVER": TokenType.OVER,
 628        "OVERLAPS": TokenType.OVERLAPS,
 629        "OVERWRITE": TokenType.OVERWRITE,
 630        "PARTITION": TokenType.PARTITION,
 631        "PARTITION BY": TokenType.PARTITION_BY,
 632        "PARTITIONED BY": TokenType.PARTITION_BY,
 633        "PARTITIONED_BY": TokenType.PARTITION_BY,
 634        "PERCENT": TokenType.PERCENT,
 635        "PIVOT": TokenType.PIVOT,
 636        "PRAGMA": TokenType.PRAGMA,
 637        "PRIMARY KEY": TokenType.PRIMARY_KEY,
 638        "PROCEDURE": TokenType.PROCEDURE,
 639        "QUALIFY": TokenType.QUALIFY,
 640        "RANGE": TokenType.RANGE,
 641        "RECURSIVE": TokenType.RECURSIVE,
 642        "REGEXP": TokenType.RLIKE,
 643        "REPLACE": TokenType.REPLACE,
 644        "RETURNING": TokenType.RETURNING,
 645        "REFERENCES": TokenType.REFERENCES,
 646        "RIGHT": TokenType.RIGHT,
 647        "RLIKE": TokenType.RLIKE,
 648        "ROLLBACK": TokenType.ROLLBACK,
 649        "ROLLUP": TokenType.ROLLUP,
 650        "ROW": TokenType.ROW,
 651        "ROWS": TokenType.ROWS,
 652        "SCHEMA": TokenType.SCHEMA,
 653        "SELECT": TokenType.SELECT,
 654        "SEMI": TokenType.SEMI,
 655        "SET": TokenType.SET,
 656        "SETTINGS": TokenType.SETTINGS,
 657        "SHOW": TokenType.SHOW,
 658        "SIMILAR TO": TokenType.SIMILAR_TO,
 659        "SOME": TokenType.SOME,
 660        "SORT BY": TokenType.SORT_BY,
 661        "START WITH": TokenType.START_WITH,
 662        "TABLE": TokenType.TABLE,
 663        "TABLESAMPLE": TokenType.TABLE_SAMPLE,
 664        "TEMP": TokenType.TEMPORARY,
 665        "TEMPORARY": TokenType.TEMPORARY,
 666        "THEN": TokenType.THEN,
 667        "TRUE": TokenType.TRUE,
 668        "UNION": TokenType.UNION,
 669        "UNKNOWN": TokenType.UNKNOWN,
 670        "UNNEST": TokenType.UNNEST,
 671        "UNPIVOT": TokenType.UNPIVOT,
 672        "UPDATE": TokenType.UPDATE,
 673        "USE": TokenType.USE,
 674        "USING": TokenType.USING,
 675        "UUID": TokenType.UUID,
 676        "VALUES": TokenType.VALUES,
 677        "VIEW": TokenType.VIEW,
 678        "VOLATILE": TokenType.VOLATILE,
 679        "WHEN": TokenType.WHEN,
 680        "WHERE": TokenType.WHERE,
 681        "WINDOW": TokenType.WINDOW,
 682        "WITH": TokenType.WITH,
 683        "APPLY": TokenType.APPLY,
 684        "ARRAY": TokenType.ARRAY,
 685        "BIT": TokenType.BIT,
 686        "BOOL": TokenType.BOOLEAN,
 687        "BOOLEAN": TokenType.BOOLEAN,
 688        "BYTE": TokenType.TINYINT,
 689        "MEDIUMINT": TokenType.MEDIUMINT,
 690        "TINYINT": TokenType.TINYINT,
 691        "SHORT": TokenType.SMALLINT,
 692        "SMALLINT": TokenType.SMALLINT,
 693        "INT128": TokenType.INT128,
 694        "INT2": TokenType.SMALLINT,
 695        "INTEGER": TokenType.INT,
 696        "INT": TokenType.INT,
 697        "INT4": TokenType.INT,
 698        "LONG": TokenType.BIGINT,
 699        "BIGINT": TokenType.BIGINT,
 700        "INT8": TokenType.BIGINT,
 701        "DEC": TokenType.DECIMAL,
 702        "DECIMAL": TokenType.DECIMAL,
 703        "BIGDECIMAL": TokenType.BIGDECIMAL,
 704        "BIGNUMERIC": TokenType.BIGDECIMAL,
 705        "MAP": TokenType.MAP,
 706        "NULLABLE": TokenType.NULLABLE,
 707        "NUMBER": TokenType.DECIMAL,
 708        "NUMERIC": TokenType.DECIMAL,
 709        "FIXED": TokenType.DECIMAL,
 710        "REAL": TokenType.FLOAT,
 711        "FLOAT": TokenType.FLOAT,
 712        "FLOAT4": TokenType.FLOAT,
 713        "FLOAT8": TokenType.DOUBLE,
 714        "DOUBLE": TokenType.DOUBLE,
 715        "DOUBLE PRECISION": TokenType.DOUBLE,
 716        "JSON": TokenType.JSON,
 717        "CHAR": TokenType.CHAR,
 718        "CHARACTER": TokenType.CHAR,
 719        "NCHAR": TokenType.NCHAR,
 720        "VARCHAR": TokenType.VARCHAR,
 721        "VARCHAR2": TokenType.VARCHAR,
 722        "NVARCHAR": TokenType.NVARCHAR,
 723        "NVARCHAR2": TokenType.NVARCHAR,
 724        "STR": TokenType.TEXT,
 725        "STRING": TokenType.TEXT,
 726        "TEXT": TokenType.TEXT,
 727        "LONGTEXT": TokenType.LONGTEXT,
 728        "MEDIUMTEXT": TokenType.MEDIUMTEXT,
 729        "TINYTEXT": TokenType.TINYTEXT,
 730        "CLOB": TokenType.TEXT,
 731        "LONGVARCHAR": TokenType.TEXT,
 732        "BINARY": TokenType.BINARY,
 733        "BLOB": TokenType.VARBINARY,
 734        "LONGBLOB": TokenType.LONGBLOB,
 735        "MEDIUMBLOB": TokenType.MEDIUMBLOB,
 736        "TINYBLOB": TokenType.TINYBLOB,
 737        "BYTEA": TokenType.VARBINARY,
 738        "VARBINARY": TokenType.VARBINARY,
 739        "TIME": TokenType.TIME,
 740        "TIMETZ": TokenType.TIMETZ,
 741        "TIMESTAMP": TokenType.TIMESTAMP,
 742        "TIMESTAMPTZ": TokenType.TIMESTAMPTZ,
 743        "TIMESTAMPLTZ": TokenType.TIMESTAMPLTZ,
 744        "DATE": TokenType.DATE,
 745        "DATETIME": TokenType.DATETIME,
 746        "INT4RANGE": TokenType.INT4RANGE,
 747        "INT4MULTIRANGE": TokenType.INT4MULTIRANGE,
 748        "INT8RANGE": TokenType.INT8RANGE,
 749        "INT8MULTIRANGE": TokenType.INT8MULTIRANGE,
 750        "NUMRANGE": TokenType.NUMRANGE,
 751        "NUMMULTIRANGE": TokenType.NUMMULTIRANGE,
 752        "TSRANGE": TokenType.TSRANGE,
 753        "TSMULTIRANGE": TokenType.TSMULTIRANGE,
 754        "TSTZRANGE": TokenType.TSTZRANGE,
 755        "TSTZMULTIRANGE": TokenType.TSTZMULTIRANGE,
 756        "DATERANGE": TokenType.DATERANGE,
 757        "DATEMULTIRANGE": TokenType.DATEMULTIRANGE,
 758        "UNIQUE": TokenType.UNIQUE,
 759        "STRUCT": TokenType.STRUCT,
 760        "VARIANT": TokenType.VARIANT,
 761        "ALTER": TokenType.ALTER,
 762        "ANALYZE": TokenType.COMMAND,
 763        "CALL": TokenType.COMMAND,
 764        "COMMENT": TokenType.COMMENT,
 765        "COPY": TokenType.COMMAND,
 766        "EXPLAIN": TokenType.COMMAND,
 767        "GRANT": TokenType.COMMAND,
 768        "OPTIMIZE": TokenType.COMMAND,
 769        "PREPARE": TokenType.COMMAND,
 770        "TRUNCATE": TokenType.COMMAND,
 771        "VACUUM": TokenType.COMMAND,
 772        "USER-DEFINED": TokenType.USERDEFINED,
 773        "FOR VERSION": TokenType.VERSION_SNAPSHOT,
 774        "FOR TIMESTAMP": TokenType.TIMESTAMP_SNAPSHOT,
 775    }
 776
 777    WHITE_SPACE: t.Dict[t.Optional[str], TokenType] = {
 778        " ": TokenType.SPACE,
 779        "\t": TokenType.SPACE,
 780        "\n": TokenType.BREAK,
 781        "\r": TokenType.BREAK,
 782        "\r\n": TokenType.BREAK,
 783    }
 784
 785    COMMANDS = {
 786        TokenType.COMMAND,
 787        TokenType.EXECUTE,
 788        TokenType.FETCH,
 789        TokenType.SHOW,
 790    }
 791
 792    COMMAND_PREFIX_TOKENS = {TokenType.SEMICOLON, TokenType.BEGIN}
 793
 794    # handle numeric literals like in hive (3L = BIGINT)
 795    NUMERIC_LITERALS: t.Dict[str, str] = {}
 796    ENCODE: t.Optional[str] = None
 797
 798    COMMENTS = ["--", ("/*", "*/")]
 799
 800    __slots__ = (
 801        "sql",
 802        "size",
 803        "tokens",
 804        "_start",
 805        "_current",
 806        "_line",
 807        "_col",
 808        "_comments",
 809        "_char",
 810        "_end",
 811        "_peek",
 812        "_prev_token_line",
 813    )
 814
 815    def __init__(self) -> None:
 816        self.reset()
 817
 818    def reset(self) -> None:
 819        self.sql = ""
 820        self.size = 0
 821        self.tokens: t.List[Token] = []
 822        self._start = 0
 823        self._current = 0
 824        self._line = 1
 825        self._col = 0
 826        self._comments: t.List[str] = []
 827
 828        self._char = ""
 829        self._end = False
 830        self._peek = ""
 831        self._prev_token_line = -1
 832
 833    def tokenize(self, sql: str) -> t.List[Token]:
 834        """Returns a list of tokens corresponding to the SQL string `sql`."""
 835        self.reset()
 836        self.sql = sql
 837        self.size = len(sql)
 838
 839        try:
 840            self._scan()
 841        except Exception as e:
 842            start = max(self._current - 50, 0)
 843            end = min(self._current + 50, self.size - 1)
 844            context = self.sql[start:end]
 845            raise TokenError(f"Error tokenizing '{context}'") from e
 846
 847        return self.tokens
 848
 849    def _scan(self, until: t.Optional[t.Callable] = None) -> None:
 850        while self.size and not self._end:
 851            self._start = self._current
 852            self._advance()
 853
 854            if self._char is None:
 855                break
 856
 857            if self._char not in self.WHITE_SPACE:
 858                if self._char.isdigit():
 859                    self._scan_number()
 860                elif self._char in self._IDENTIFIERS:
 861                    self._scan_identifier(self._IDENTIFIERS[self._char])
 862                else:
 863                    self._scan_keywords()
 864
 865            if until and until():
 866                break
 867
 868        if self.tokens and self._comments:
 869            self.tokens[-1].comments.extend(self._comments)
 870
 871    def _chars(self, size: int) -> str:
 872        if size == 1:
 873            return self._char
 874
 875        start = self._current - 1
 876        end = start + size
 877
 878        return self.sql[start:end] if end <= self.size else ""
 879
 880    def _advance(self, i: int = 1, alnum: bool = False) -> None:
 881        if self.WHITE_SPACE.get(self._char) is TokenType.BREAK:
 882            self._col = 1
 883            self._line += 1
 884        else:
 885            self._col += i
 886
 887        self._current += i
 888        self._end = self._current >= self.size
 889        self._char = self.sql[self._current - 1]
 890        self._peek = "" if self._end else self.sql[self._current]
 891
 892        if alnum and self._char.isalnum():
 893            # Here we use local variables instead of attributes for better performance
 894            _col = self._col
 895            _current = self._current
 896            _end = self._end
 897            _peek = self._peek
 898
 899            while _peek.isalnum():
 900                _col += 1
 901                _current += 1
 902                _end = _current >= self.size
 903                _peek = "" if _end else self.sql[_current]
 904
 905            self._col = _col
 906            self._current = _current
 907            self._end = _end
 908            self._peek = _peek
 909            self._char = self.sql[_current - 1]
 910
 911    @property
 912    def _text(self) -> str:
 913        return self.sql[self._start : self._current]
 914
 915    def peek(self, i: int = 0) -> str:
 916        i = self._current + i
 917        if i < self.size:
 918            return self.sql[i]
 919        return ""
 920
 921    def _add(self, token_type: TokenType, text: t.Optional[str] = None) -> None:
 922        self._prev_token_line = self._line
 923
 924        if self._comments and token_type == TokenType.SEMICOLON and self.tokens:
 925            self.tokens[-1].comments.extend(self._comments)
 926            self._comments = []
 927
 928        self.tokens.append(
 929            Token(
 930                token_type,
 931                text=self._text if text is None else text,
 932                line=self._line,
 933                col=self._col,
 934                start=self._start,
 935                end=self._current - 1,
 936                comments=self._comments,
 937            )
 938        )
 939        self._comments = []
 940
 941        # If we have either a semicolon or a begin token before the command's token, we'll parse
 942        # whatever follows the command's token as a string
 943        if (
 944            token_type in self.COMMANDS
 945            and self._peek != ";"
 946            and (len(self.tokens) == 1 or self.tokens[-2].token_type in self.COMMAND_PREFIX_TOKENS)
 947        ):
 948            start = self._current
 949            tokens = len(self.tokens)
 950            self._scan(lambda: self._peek == ";")
 951            self.tokens = self.tokens[:tokens]
 952            text = self.sql[start : self._current].strip()
 953            if text:
 954                self._add(TokenType.STRING, text)
 955
 956    def _scan_keywords(self) -> None:
 957        size = 0
 958        word = None
 959        chars = self._text
 960        char = chars
 961        prev_space = False
 962        skip = False
 963        trie = self._KEYWORD_TRIE
 964        single_token = char in self.SINGLE_TOKENS
 965
 966        while chars:
 967            if skip:
 968                result = TrieResult.PREFIX
 969            else:
 970                result, trie = in_trie(trie, char.upper())
 971
 972            if result == TrieResult.FAILED:
 973                break
 974            if result == TrieResult.EXISTS:
 975                word = chars
 976
 977            end = self._current + size
 978            size += 1
 979
 980            if end < self.size:
 981                char = self.sql[end]
 982                single_token = single_token or char in self.SINGLE_TOKENS
 983                is_space = char in self.WHITE_SPACE
 984
 985                if not is_space or not prev_space:
 986                    if is_space:
 987                        char = " "
 988                    chars += char
 989                    prev_space = is_space
 990                    skip = False
 991                else:
 992                    skip = True
 993            else:
 994                char = ""
 995                chars = " "
 996
 997        if word:
 998            if self._scan_string(word):
 999                return
1000            if self._scan_comment(word):
1001                return
1002            if prev_space or single_token or not char:
1003                self._advance(size - 1)
1004                word = word.upper()
1005                self._add(self.KEYWORDS[word], text=word)
1006                return
1007
1008        if self._char in self.SINGLE_TOKENS:
1009            self._add(self.SINGLE_TOKENS[self._char], text=self._char)
1010            return
1011
1012        self._scan_var()
1013
1014    def _scan_comment(self, comment_start: str) -> bool:
1015        if comment_start not in self._COMMENTS:
1016            return False
1017
1018        comment_start_line = self._line
1019        comment_start_size = len(comment_start)
1020        comment_end = self._COMMENTS[comment_start]
1021
1022        if comment_end:
1023            # Skip the comment's start delimiter
1024            self._advance(comment_start_size)
1025
1026            comment_end_size = len(comment_end)
1027            while not self._end and self._chars(comment_end_size) != comment_end:
1028                self._advance(alnum=True)
1029
1030            self._comments.append(self._text[comment_start_size : -comment_end_size + 1])
1031            self._advance(comment_end_size - 1)
1032        else:
1033            while not self._end and not self.WHITE_SPACE.get(self._peek) is TokenType.BREAK:
1034                self._advance(alnum=True)
1035            self._comments.append(self._text[comment_start_size:])
1036
1037        # Leading comment is attached to the succeeding token, whilst trailing comment to the preceding.
1038        # Multiple consecutive comments are preserved by appending them to the current comments list.
1039        if comment_start_line == self._prev_token_line:
1040            self.tokens[-1].comments.extend(self._comments)
1041            self._comments = []
1042            self._prev_token_line = self._line
1043
1044        return True
1045
1046    def _scan_number(self) -> None:
1047        if self._char == "0":
1048            peek = self._peek.upper()
1049            if peek == "B":
1050                return self._scan_bits() if self.BIT_STRINGS else self._add(TokenType.NUMBER)
1051            elif peek == "X":
1052                return self._scan_hex() if self.HEX_STRINGS else self._add(TokenType.NUMBER)
1053
1054        decimal = False
1055        scientific = 0
1056
1057        while True:
1058            if self._peek.isdigit():
1059                self._advance()
1060            elif self._peek == "." and not decimal:
1061                after = self.peek(1)
1062                if after.isdigit() or not after.isalpha():
1063                    decimal = True
1064                    self._advance()
1065                else:
1066                    return self._add(TokenType.VAR)
1067            elif self._peek in ("-", "+") and scientific == 1:
1068                scientific += 1
1069                self._advance()
1070            elif self._peek.upper() == "E" and not scientific:
1071                scientific += 1
1072                self._advance()
1073            elif self._peek.isidentifier():
1074                number_text = self._text
1075                literal = ""
1076
1077                while self._peek.strip() and self._peek not in self.SINGLE_TOKENS:
1078                    literal += self._peek.upper()
1079                    self._advance()
1080
1081                token_type = self.KEYWORDS.get(self.NUMERIC_LITERALS.get(literal, ""))
1082
1083                if token_type:
1084                    self._add(TokenType.NUMBER, number_text)
1085                    self._add(TokenType.DCOLON, "::")
1086                    return self._add(token_type, literal)
1087                elif self.IDENTIFIERS_CAN_START_WITH_DIGIT:
1088                    return self._add(TokenType.VAR)
1089
1090                self._advance(-len(literal))
1091                return self._add(TokenType.NUMBER, number_text)
1092            else:
1093                return self._add(TokenType.NUMBER)
1094
1095    def _scan_bits(self) -> None:
1096        self._advance()
1097        value = self._extract_value()
1098        try:
1099            # If `value` can't be converted to a binary, fallback to tokenizing it as an identifier
1100            int(value, 2)
1101            self._add(TokenType.BIT_STRING, value[2:])  # Drop the 0b
1102        except ValueError:
1103            self._add(TokenType.IDENTIFIER)
1104
1105    def _scan_hex(self) -> None:
1106        self._advance()
1107        value = self._extract_value()
1108        try:
1109            # If `value` can't be converted to a hex, fallback to tokenizing it as an identifier
1110            int(value, 16)
1111            self._add(TokenType.HEX_STRING, value[2:])  # Drop the 0x
1112        except ValueError:
1113            self._add(TokenType.IDENTIFIER)
1114
1115    def _extract_value(self) -> str:
1116        while True:
1117            char = self._peek.strip()
1118            if char and char not in self.SINGLE_TOKENS:
1119                self._advance(alnum=True)
1120            else:
1121                break
1122
1123        return self._text
1124
1125    def _scan_string(self, start: str) -> bool:
1126        base = None
1127        token_type = TokenType.STRING
1128
1129        if start in self._QUOTES:
1130            end = self._QUOTES[start]
1131        elif start in self._FORMAT_STRINGS:
1132            end, token_type = self._FORMAT_STRINGS[start]
1133
1134            if token_type == TokenType.HEX_STRING:
1135                base = 16
1136            elif token_type == TokenType.BIT_STRING:
1137                base = 2
1138            elif token_type == TokenType.HEREDOC_STRING:
1139                self._advance()
1140                tag = "" if self._char == end else self._extract_string(end)
1141                end = f"{start}{tag}{end}"
1142        else:
1143            return False
1144
1145        self._advance(len(start))
1146        text = self._extract_string(end)
1147
1148        if base:
1149            try:
1150                int(text, base)
1151            except:
1152                raise TokenError(
1153                    f"Numeric string contains invalid characters from {self._line}:{self._start}"
1154                )
1155        else:
1156            text = text.encode(self.ENCODE).decode(self.ENCODE) if self.ENCODE else text
1157
1158        self._add(token_type, text)
1159        return True
1160
1161    def _scan_identifier(self, identifier_end: str) -> None:
1162        self._advance()
1163        text = self._extract_string(identifier_end, self._IDENTIFIER_ESCAPES)
1164        self._add(TokenType.IDENTIFIER, text)
1165
1166    def _scan_var(self) -> None:
1167        while True:
1168            char = self._peek.strip()
1169            if char and (char in self.VAR_SINGLE_TOKENS or char not in self.SINGLE_TOKENS):
1170                self._advance(alnum=True)
1171            else:
1172                break
1173
1174        self._add(
1175            TokenType.VAR
1176            if self.tokens and self.tokens[-1].token_type == TokenType.PARAMETER
1177            else self.KEYWORDS.get(self._text.upper(), TokenType.VAR)
1178        )
1179
1180    def _extract_string(self, delimiter: str, escapes=None) -> str:
1181        text = ""
1182        delim_size = len(delimiter)
1183        escapes = self._STRING_ESCAPES if escapes is None else escapes
1184
1185        while True:
1186            if (
1187                self._char in escapes
1188                and (self._peek == delimiter or self._peek in escapes)
1189                and (self._char not in self._QUOTES or self._char == self._peek)
1190            ):
1191                if self._peek == delimiter:
1192                    text += self._peek
1193                else:
1194                    text += self._char + self._peek
1195
1196                if self._current + 1 < self.size:
1197                    self._advance(2)
1198                else:
1199                    raise TokenError(f"Missing {delimiter} from {self._line}:{self._current}")
1200            else:
1201                if self._chars(delim_size) == delimiter:
1202                    if delim_size > 1:
1203                        self._advance(delim_size - 1)
1204                    break
1205
1206                if self._end:
1207                    raise TokenError(f"Missing {delimiter} from {self._line}:{self._start}")
1208
1209                if self.ESCAPE_SEQUENCES and self._peek and self._char in self.STRING_ESCAPES:
1210                    escaped_sequence = self.ESCAPE_SEQUENCES.get(self._char + self._peek)
1211                    if escaped_sequence:
1212                        self._advance(2)
1213                        text += escaped_sequence
1214                        continue
1215
1216                current = self._current - 1
1217                self._advance(alnum=True)
1218                text += self.sql[current : self._current - 1]
1219
1220        return text
SINGLE_TOKENS = {'(': <TokenType.L_PAREN: 'L_PAREN'>, ')': <TokenType.R_PAREN: 'R_PAREN'>, '[': <TokenType.L_BRACKET: 'L_BRACKET'>, ']': <TokenType.R_BRACKET: 'R_BRACKET'>, '{': <TokenType.L_BRACE: 'L_BRACE'>, '}': <TokenType.R_BRACE: 'R_BRACE'>, '&': <TokenType.AMP: 'AMP'>, '^': <TokenType.CARET: 'CARET'>, ':': <TokenType.COLON: 'COLON'>, ',': <TokenType.COMMA: 'COMMA'>, '.': <TokenType.DOT: 'DOT'>, '-': <TokenType.DASH: 'DASH'>, '=': <TokenType.EQ: 'EQ'>, '>': <TokenType.GT: 'GT'>, '<': <TokenType.LT: 'LT'>, '%': <TokenType.MOD: 'MOD'>, '!': <TokenType.NOT: 'NOT'>, '|': <TokenType.PIPE: 'PIPE'>, '+': <TokenType.PLUS: 'PLUS'>, ';': <TokenType.SEMICOLON: 'SEMICOLON'>, '/': <TokenType.SLASH: 'SLASH'>, '\\': <TokenType.BACKSLASH: 'BACKSLASH'>, '*': <TokenType.STAR: 'STAR'>, '~': <TokenType.TILDA: 'TILDA'>, '?': <TokenType.PLACEHOLDER: 'PLACEHOLDER'>, '@': <TokenType.PARAMETER: 'PARAMETER'>, "'": <TokenType.QUOTE: 'QUOTE'>, '`': <TokenType.IDENTIFIER: 'IDENTIFIER'>, '"': <TokenType.IDENTIFIER: 'IDENTIFIER'>, '#': <TokenType.HASH: 'HASH'>}
BIT_STRINGS: List[Union[str, Tuple[str, str]]] = []
BYTE_STRINGS: List[Union[str, Tuple[str, str]]] = []
HEX_STRINGS: List[Union[str, Tuple[str, str]]] = []
RAW_STRINGS: List[Union[str, Tuple[str, str]]] = []
HEREDOC_STRINGS: List[Union[str, Tuple[str, str]]] = []
IDENTIFIERS: List[Union[str, Tuple[str, str]]] = ['"']
IDENTIFIER_ESCAPES = ['"']
QUOTES: List[Union[str, Tuple[str, str]]] = ["'"]
STRING_ESCAPES = ["'"]
VAR_SINGLE_TOKENS: Set[str] = set()
ESCAPE_SEQUENCES: Dict[str, str] = {}
IDENTIFIERS_CAN_START_WITH_DIGIT: bool = False
KEYWORDS: Dict[str, TokenType] = {'{%': <TokenType.BLOCK_START: 'BLOCK_START'>, '{%+': <TokenType.BLOCK_START: 'BLOCK_START'>, '{%-': <TokenType.BLOCK_START: 'BLOCK_START'>, '%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '+%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '-%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '{{+': <TokenType.BLOCK_START: 'BLOCK_START'>, '{{-': <TokenType.BLOCK_START: 'BLOCK_START'>, '+}}': <TokenType.BLOCK_END: 'BLOCK_END'>, '-}}': <TokenType.BLOCK_END: 'BLOCK_END'>, '/*+': <TokenType.HINT: 'HINT'>, '==': <TokenType.EQ: 'EQ'>, '::': <TokenType.DCOLON: 'DCOLON'>, '||': <TokenType.DPIPE: 'DPIPE'>, '>=': <TokenType.GTE: 'GTE'>, '<=': <TokenType.LTE: 'LTE'>, '<>': <TokenType.NEQ: 'NEQ'>, '!=': <TokenType.NEQ: 'NEQ'>, '<=>': <TokenType.NULLSAFE_EQ: 'NULLSAFE_EQ'>, '->': <TokenType.ARROW: 'ARROW'>, '->>': <TokenType.DARROW: 'DARROW'>, '=>': <TokenType.FARROW: 'FARROW'>, '#>': <TokenType.HASH_ARROW: 'HASH_ARROW'>, '#>>': <TokenType.DHASH_ARROW: 'DHASH_ARROW'>, '<->': <TokenType.LR_ARROW: 'LR_ARROW'>, '&&': <TokenType.DAMP: 'DAMP'>, '??': <TokenType.DQMARK: 'DQMARK'>, 'ALL': <TokenType.ALL: 'ALL'>, 'ALWAYS': <TokenType.ALWAYS: 'ALWAYS'>, 'AND': <TokenType.AND: 'AND'>, 'ANTI': <TokenType.ANTI: 'ANTI'>, 'ANY': <TokenType.ANY: 'ANY'>, 'ASC': <TokenType.ASC: 'ASC'>, 'AS': <TokenType.ALIAS: 'ALIAS'>, 'ASOF': <TokenType.ASOF: 'ASOF'>, 'AUTOINCREMENT': <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, 'AUTO_INCREMENT': <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, 'BEGIN': <TokenType.BEGIN: 'BEGIN'>, 'BETWEEN': <TokenType.BETWEEN: 'BETWEEN'>, 'CACHE': <TokenType.CACHE: 'CACHE'>, 'UNCACHE': <TokenType.UNCACHE: 'UNCACHE'>, 'CASE': <TokenType.CASE: 'CASE'>, 'CHARACTER SET': <TokenType.CHARACTER_SET: 'CHARACTER_SET'>, 'CLUSTER BY': <TokenType.CLUSTER_BY: 'CLUSTER_BY'>, 'COLLATE': <TokenType.COLLATE: 'COLLATE'>, 'COLUMN': <TokenType.COLUMN: 'COLUMN'>, 'COMMIT': <TokenType.COMMIT: 'COMMIT'>, 'CONNECT BY': <TokenType.CONNECT_BY: 'CONNECT_BY'>, 'CONSTRAINT': <TokenType.CONSTRAINT: 'CONSTRAINT'>, 'CREATE': <TokenType.CREATE: 'CREATE'>, 'CROSS': <TokenType.CROSS: 'CROSS'>, 'CUBE': <TokenType.CUBE: 'CUBE'>, 'CURRENT_DATE': <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, 'CURRENT_TIME': <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, 'CURRENT_TIMESTAMP': <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, 'CURRENT_USER': <TokenType.CURRENT_USER: 'CURRENT_USER'>, 'DATABASE': <TokenType.DATABASE: 'DATABASE'>, 'DEFAULT': <TokenType.DEFAULT: 'DEFAULT'>, 'DELETE': <TokenType.DELETE: 'DELETE'>, 'DESC': <TokenType.DESC: 'DESC'>, 'DESCRIBE': <TokenType.DESCRIBE: 'DESCRIBE'>, 'DISTINCT': <TokenType.DISTINCT: 'DISTINCT'>, 'DISTRIBUTE BY': <TokenType.DISTRIBUTE_BY: 'DISTRIBUTE_BY'>, 'DIV': <TokenType.DIV: 'DIV'>, 'DROP': <TokenType.DROP: 'DROP'>, 'ELSE': <TokenType.ELSE: 'ELSE'>, 'END': <TokenType.END: 'END'>, 'ESCAPE': <TokenType.ESCAPE: 'ESCAPE'>, 'EXCEPT': <TokenType.EXCEPT: 'EXCEPT'>, 'EXECUTE': <TokenType.EXECUTE: 'EXECUTE'>, 'EXISTS': <TokenType.EXISTS: 'EXISTS'>, 'FALSE': <TokenType.FALSE: 'FALSE'>, 'FETCH': <TokenType.FETCH: 'FETCH'>, 'FILTER': <TokenType.FILTER: 'FILTER'>, 'FIRST': <TokenType.FIRST: 'FIRST'>, 'FULL': <TokenType.FULL: 'FULL'>, 'FUNCTION': <TokenType.FUNCTION: 'FUNCTION'>, 'FOR': <TokenType.FOR: 'FOR'>, 'FOREIGN KEY': <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, 'FORMAT': <TokenType.FORMAT: 'FORMAT'>, 'FROM': <TokenType.FROM: 'FROM'>, 'GEOGRAPHY': <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, 'GEOMETRY': <TokenType.GEOMETRY: 'GEOMETRY'>, 'GLOB': <TokenType.GLOB: 'GLOB'>, 'GROUP BY': <TokenType.GROUP_BY: 'GROUP_BY'>, 'GROUPING SETS': <TokenType.GROUPING_SETS: 'GROUPING_SETS'>, 'HAVING': <TokenType.HAVING: 'HAVING'>, 'ILIKE': <TokenType.ILIKE: 'ILIKE'>, 'IN': <TokenType.IN: 'IN'>, 'INDEX': <TokenType.INDEX: 'INDEX'>, 'INET': <TokenType.INET: 'INET'>, 'INNER': <TokenType.INNER: 'INNER'>, 'INSERT': <TokenType.INSERT: 'INSERT'>, 'INTERVAL': <TokenType.INTERVAL: 'INTERVAL'>, 'INTERSECT': <TokenType.INTERSECT: 'INTERSECT'>, 'INTO': <TokenType.INTO: 'INTO'>, 'IS': <TokenType.IS: 'IS'>, 'ISNULL': <TokenType.ISNULL: 'ISNULL'>, 'JOIN': <TokenType.JOIN: 'JOIN'>, 'KEEP': <TokenType.KEEP: 'KEEP'>, 'KILL': <TokenType.KILL: 'KILL'>, 'LATERAL': <TokenType.LATERAL: 'LATERAL'>, 'LEFT': <TokenType.LEFT: 'LEFT'>, 'LIKE': <TokenType.LIKE: 'LIKE'>, 'LIMIT': <TokenType.LIMIT: 'LIMIT'>, 'LOAD': <TokenType.LOAD: 'LOAD'>, 'LOCK': <TokenType.LOCK: 'LOCK'>, 'MERGE': <TokenType.MERGE: 'MERGE'>, 'NATURAL': <TokenType.NATURAL: 'NATURAL'>, 'NEXT': <TokenType.NEXT: 'NEXT'>, 'NOT': <TokenType.NOT: 'NOT'>, 'NOTNULL': <TokenType.NOTNULL: 'NOTNULL'>, 'NULL': <TokenType.NULL: 'NULL'>, 'OBJECT': <TokenType.OBJECT: 'OBJECT'>, 'OFFSET': <TokenType.OFFSET: 'OFFSET'>, 'ON': <TokenType.ON: 'ON'>, 'OR': <TokenType.OR: 'OR'>, 'XOR': <TokenType.XOR: 'XOR'>, 'ORDER BY': <TokenType.ORDER_BY: 'ORDER_BY'>, 'ORDINALITY': <TokenType.ORDINALITY: 'ORDINALITY'>, 'OUTER': <TokenType.OUTER: 'OUTER'>, 'OVER': <TokenType.OVER: 'OVER'>, 'OVERLAPS': <TokenType.OVERLAPS: 'OVERLAPS'>, 'OVERWRITE': <TokenType.OVERWRITE: 'OVERWRITE'>, 'PARTITION': <TokenType.PARTITION: 'PARTITION'>, 'PARTITION BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PARTITIONED BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PARTITIONED_BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PERCENT': <TokenType.PERCENT: 'PERCENT'>, 'PIVOT': <TokenType.PIVOT: 'PIVOT'>, 'PRAGMA': <TokenType.PRAGMA: 'PRAGMA'>, 'PRIMARY KEY': <TokenType.PRIMARY_KEY: 'PRIMARY_KEY'>, 'PROCEDURE': <TokenType.PROCEDURE: 'PROCEDURE'>, 'QUALIFY': <TokenType.QUALIFY: 'QUALIFY'>, 'RANGE': <TokenType.RANGE: 'RANGE'>, 'RECURSIVE': <TokenType.RECURSIVE: 'RECURSIVE'>, 'REGEXP': <TokenType.RLIKE: 'RLIKE'>, 'REPLACE': <TokenType.REPLACE: 'REPLACE'>, 'RETURNING': <TokenType.RETURNING: 'RETURNING'>, 'REFERENCES': <TokenType.REFERENCES: 'REFERENCES'>, 'RIGHT': <TokenType.RIGHT: 'RIGHT'>, 'RLIKE': <TokenType.RLIKE: 'RLIKE'>, 'ROLLBACK': <TokenType.ROLLBACK: 'ROLLBACK'>, 'ROLLUP': <TokenType.ROLLUP: 'ROLLUP'>, 'ROW': <TokenType.ROW: 'ROW'>, 'ROWS': <TokenType.ROWS: 'ROWS'>, 'SCHEMA': <TokenType.SCHEMA: 'SCHEMA'>, 'SELECT': <TokenType.SELECT: 'SELECT'>, 'SEMI': <TokenType.SEMI: 'SEMI'>, 'SET': <TokenType.SET: 'SET'>, 'SETTINGS': <TokenType.SETTINGS: 'SETTINGS'>, 'SHOW': <TokenType.SHOW: 'SHOW'>, 'SIMILAR TO': <TokenType.SIMILAR_TO: 'SIMILAR_TO'>, 'SOME': <TokenType.SOME: 'SOME'>, 'SORT BY': <TokenType.SORT_BY: 'SORT_BY'>, 'START WITH': <TokenType.START_WITH: 'START_WITH'>, 'TABLE': <TokenType.TABLE: 'TABLE'>, 'TABLESAMPLE': <TokenType.TABLE_SAMPLE: 'TABLE_SAMPLE'>, 'TEMP': <TokenType.TEMPORARY: 'TEMPORARY'>, 'TEMPORARY': <TokenType.TEMPORARY: 'TEMPORARY'>, 'THEN': <TokenType.THEN: 'THEN'>, 'TRUE': <TokenType.TRUE: 'TRUE'>, 'UNION': <TokenType.UNION: 'UNION'>, 'UNKNOWN': <TokenType.UNKNOWN: 'UNKNOWN'>, 'UNNEST': <TokenType.UNNEST: 'UNNEST'>, 'UNPIVOT': <TokenType.UNPIVOT: 'UNPIVOT'>, 'UPDATE': <TokenType.UPDATE: 'UPDATE'>, 'USE': <TokenType.USE: 'USE'>, 'USING': <TokenType.USING: 'USING'>, 'UUID': <TokenType.UUID: 'UUID'>, 'VALUES': <TokenType.VALUES: 'VALUES'>, 'VIEW': <TokenType.VIEW: 'VIEW'>, 'VOLATILE': <TokenType.VOLATILE: 'VOLATILE'>, 'WHEN': <TokenType.WHEN: 'WHEN'>, 'WHERE': <TokenType.WHERE: 'WHERE'>, 'WINDOW': <TokenType.WINDOW: 'WINDOW'>, 'WITH': <TokenType.WITH: 'WITH'>, 'APPLY': <TokenType.APPLY: 'APPLY'>, 'ARRAY': <TokenType.ARRAY: 'ARRAY'>, 'BIT': <TokenType.BIT: 'BIT'>, 'BOOL': <TokenType.BOOLEAN: 'BOOLEAN'>, 'BOOLEAN': <TokenType.BOOLEAN: 'BOOLEAN'>, 'BYTE': <TokenType.TINYINT: 'TINYINT'>, 'MEDIUMINT': <TokenType.MEDIUMINT: 'MEDIUMINT'>, 'TINYINT': <TokenType.TINYINT: 'TINYINT'>, 'SHORT': <TokenType.SMALLINT: 'SMALLINT'>, 'SMALLINT': <TokenType.SMALLINT: 'SMALLINT'>, 'INT128': <TokenType.INT128: 'INT128'>, 'INT2': <TokenType.SMALLINT: 'SMALLINT'>, 'INTEGER': <TokenType.INT: 'INT'>, 'INT': <TokenType.INT: 'INT'>, 'INT4': <TokenType.INT: 'INT'>, 'LONG': <TokenType.BIGINT: 'BIGINT'>, 'BIGINT': <TokenType.BIGINT: 'BIGINT'>, 'INT8': <TokenType.BIGINT: 'BIGINT'>, 'DEC': <TokenType.DECIMAL: 'DECIMAL'>, 'DECIMAL': <TokenType.DECIMAL: 'DECIMAL'>, 'BIGDECIMAL': <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, 'BIGNUMERIC': <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, 'MAP': <TokenType.MAP: 'MAP'>, 'NULLABLE': <TokenType.NULLABLE: 'NULLABLE'>, 'NUMBER': <TokenType.DECIMAL: 'DECIMAL'>, 'NUMERIC': <TokenType.DECIMAL: 'DECIMAL'>, 'FIXED': <TokenType.DECIMAL: 'DECIMAL'>, 'REAL': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT4': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT8': <TokenType.DOUBLE: 'DOUBLE'>, 'DOUBLE': <TokenType.DOUBLE: 'DOUBLE'>, 'DOUBLE PRECISION': <TokenType.DOUBLE: 'DOUBLE'>, 'JSON': <TokenType.JSON: 'JSON'>, 'CHAR': <TokenType.CHAR: 'CHAR'>, 'CHARACTER': <TokenType.CHAR: 'CHAR'>, 'NCHAR': <TokenType.NCHAR: 'NCHAR'>, 'VARCHAR': <TokenType.VARCHAR: 'VARCHAR'>, 'VARCHAR2': <TokenType.VARCHAR: 'VARCHAR'>, 'NVARCHAR': <TokenType.NVARCHAR: 'NVARCHAR'>, 'NVARCHAR2': <TokenType.NVARCHAR: 'NVARCHAR'>, 'STR': <TokenType.TEXT: 'TEXT'>, 'STRING': <TokenType.TEXT: 'TEXT'>, 'TEXT': <TokenType.TEXT: 'TEXT'>, 'LONGTEXT': <TokenType.LONGTEXT: 'LONGTEXT'>, 'MEDIUMTEXT': <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, 'TINYTEXT': <TokenType.TINYTEXT: 'TINYTEXT'>, 'CLOB': <TokenType.TEXT: 'TEXT'>, 'LONGVARCHAR': <TokenType.TEXT: 'TEXT'>, 'BINARY': <TokenType.BINARY: 'BINARY'>, 'BLOB': <TokenType.VARBINARY: 'VARBINARY'>, 'LONGBLOB': <TokenType.LONGBLOB: 'LONGBLOB'>, 'MEDIUMBLOB': <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, 'TINYBLOB': <TokenType.TINYBLOB: 'TINYBLOB'>, 'BYTEA': <TokenType.VARBINARY: 'VARBINARY'>, 'VARBINARY': <TokenType.VARBINARY: 'VARBINARY'>, 'TIME': <TokenType.TIME: 'TIME'>, 'TIMETZ': <TokenType.TIMETZ: 'TIMETZ'>, 'TIMESTAMP': <TokenType.TIMESTAMP: 'TIMESTAMP'>, 'TIMESTAMPTZ': <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, 'TIMESTAMPLTZ': <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, 'DATE': <TokenType.DATE: 'DATE'>, 'DATETIME': <TokenType.DATETIME: 'DATETIME'>, 'INT4RANGE': <TokenType.INT4RANGE: 'INT4RANGE'>, 'INT4MULTIRANGE': <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, 'INT8RANGE': <TokenType.INT8RANGE: 'INT8RANGE'>, 'INT8MULTIRANGE': <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, 'NUMRANGE': <TokenType.NUMRANGE: 'NUMRANGE'>, 'NUMMULTIRANGE': <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, 'TSRANGE': <TokenType.TSRANGE: 'TSRANGE'>, 'TSMULTIRANGE': <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, 'TSTZRANGE': <TokenType.TSTZRANGE: 'TSTZRANGE'>, 'TSTZMULTIRANGE': <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, 'DATERANGE': <TokenType.DATERANGE: 'DATERANGE'>, 'DATEMULTIRANGE': <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, 'UNIQUE': <TokenType.UNIQUE: 'UNIQUE'>, 'STRUCT': <TokenType.STRUCT: 'STRUCT'>, 'VARIANT': <TokenType.VARIANT: 'VARIANT'>, 'ALTER': <TokenType.ALTER: 'ALTER'>, 'ANALYZE': <TokenType.COMMAND: 'COMMAND'>, 'CALL': <TokenType.COMMAND: 'COMMAND'>, 'COMMENT': <TokenType.COMMENT: 'COMMENT'>, 'COPY': <TokenType.COMMAND: 'COMMAND'>, 'EXPLAIN': <TokenType.COMMAND: 'COMMAND'>, 'GRANT': <TokenType.COMMAND: 'COMMAND'>, 'OPTIMIZE': <TokenType.COMMAND: 'COMMAND'>, 'PREPARE': <TokenType.COMMAND: 'COMMAND'>, 'TRUNCATE': <TokenType.COMMAND: 'COMMAND'>, 'VACUUM': <TokenType.COMMAND: 'COMMAND'>, 'USER-DEFINED': <TokenType.USERDEFINED: 'USERDEFINED'>, 'FOR VERSION': <TokenType.VERSION_SNAPSHOT: 'VERSION_SNAPSHOT'>, 'FOR TIMESTAMP': <TokenType.TIMESTAMP_SNAPSHOT: 'TIMESTAMP_SNAPSHOT'>}
WHITE_SPACE: Dict[Optional[str], TokenType] = {' ': <TokenType.SPACE: 'SPACE'>, '\t': <TokenType.SPACE: 'SPACE'>, '\n': <TokenType.BREAK: 'BREAK'>, '\r': <TokenType.BREAK: 'BREAK'>, '\r\n': <TokenType.BREAK: 'BREAK'>}
COMMANDS = {<TokenType.FETCH: 'FETCH'>, <TokenType.SHOW: 'SHOW'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.COMMAND: 'COMMAND'>}
COMMAND_PREFIX_TOKENS = {<TokenType.BEGIN: 'BEGIN'>, <TokenType.SEMICOLON: 'SEMICOLON'>}
NUMERIC_LITERALS: Dict[str, str] = {}
ENCODE: Optional[str] = None
COMMENTS = ['--', ('/*', '*/')]
def reset(self) -> None:
818    def reset(self) -> None:
819        self.sql = ""
820        self.size = 0
821        self.tokens: t.List[Token] = []
822        self._start = 0
823        self._current = 0
824        self._line = 1
825        self._col = 0
826        self._comments: t.List[str] = []
827
828        self._char = ""
829        self._end = False
830        self._peek = ""
831        self._prev_token_line = -1
def tokenize(self, sql: str) -> List[Token]:
833    def tokenize(self, sql: str) -> t.List[Token]:
834        """Returns a list of tokens corresponding to the SQL string `sql`."""
835        self.reset()
836        self.sql = sql
837        self.size = len(sql)
838
839        try:
840            self._scan()
841        except Exception as e:
842            start = max(self._current - 50, 0)
843            end = min(self._current + 50, self.size - 1)
844            context = self.sql[start:end]
845            raise TokenError(f"Error tokenizing '{context}'") from e
846
847        return self.tokens

Returns a list of tokens corresponding to the SQL string sql.

def peek(self, i: int = 0) -> str:
915    def peek(self, i: int = 0) -> str:
916        i = self._current + i
917        if i < self.size:
918            return self.sql[i]
919        return ""
size
sql
tokens