Edit on GitHub

sqlglot.tokens

   1from __future__ import annotations
   2
   3import typing as t
   4from enum import auto
   5
   6from sqlglot.helper import AutoName
   7from sqlglot.trie import in_trie, new_trie
   8
   9
  10class TokenType(AutoName):
  11    L_PAREN = auto()
  12    R_PAREN = auto()
  13    L_BRACKET = auto()
  14    R_BRACKET = auto()
  15    L_BRACE = auto()
  16    R_BRACE = auto()
  17    COMMA = auto()
  18    DOT = auto()
  19    DASH = auto()
  20    PLUS = auto()
  21    COLON = auto()
  22    DCOLON = auto()
  23    SEMICOLON = auto()
  24    STAR = auto()
  25    BACKSLASH = auto()
  26    SLASH = auto()
  27    LT = auto()
  28    LTE = auto()
  29    GT = auto()
  30    GTE = auto()
  31    NOT = auto()
  32    EQ = auto()
  33    NEQ = auto()
  34    NULLSAFE_EQ = auto()
  35    AND = auto()
  36    OR = auto()
  37    AMP = auto()
  38    DPIPE = auto()
  39    PIPE = auto()
  40    CARET = auto()
  41    TILDA = auto()
  42    ARROW = auto()
  43    DARROW = auto()
  44    FARROW = auto()
  45    HASH = auto()
  46    HASH_ARROW = auto()
  47    DHASH_ARROW = auto()
  48    LR_ARROW = auto()
  49    LT_AT = auto()
  50    AT_GT = auto()
  51    DOLLAR = auto()
  52    PARAMETER = auto()
  53    SESSION_PARAMETER = auto()
  54    NATIONAL = auto()
  55    DAMP = auto()
  56
  57    BLOCK_START = auto()
  58    BLOCK_END = auto()
  59
  60    SPACE = auto()
  61    BREAK = auto()
  62
  63    STRING = auto()
  64    NUMBER = auto()
  65    IDENTIFIER = auto()
  66    DATABASE = auto()
  67    COLUMN = auto()
  68    COLUMN_DEF = auto()
  69    SCHEMA = auto()
  70    TABLE = auto()
  71    VAR = auto()
  72    BIT_STRING = auto()
  73    HEX_STRING = auto()
  74    BYTE_STRING = auto()
  75
  76    # types
  77    BIT = auto()
  78    BOOLEAN = auto()
  79    TINYINT = auto()
  80    UTINYINT = auto()
  81    SMALLINT = auto()
  82    USMALLINT = auto()
  83    INT = auto()
  84    UINT = auto()
  85    BIGINT = auto()
  86    UBIGINT = auto()
  87    FLOAT = auto()
  88    DOUBLE = auto()
  89    DECIMAL = auto()
  90    CHAR = auto()
  91    NCHAR = auto()
  92    VARCHAR = auto()
  93    NVARCHAR = auto()
  94    TEXT = auto()
  95    MEDIUMTEXT = auto()
  96    LONGTEXT = auto()
  97    MEDIUMBLOB = auto()
  98    LONGBLOB = auto()
  99    BINARY = auto()
 100    VARBINARY = auto()
 101    JSON = auto()
 102    JSONB = auto()
 103    TIME = auto()
 104    TIMESTAMP = auto()
 105    TIMESTAMPTZ = auto()
 106    TIMESTAMPLTZ = auto()
 107    DATETIME = auto()
 108    DATE = auto()
 109    UUID = auto()
 110    GEOGRAPHY = auto()
 111    NULLABLE = auto()
 112    GEOMETRY = auto()
 113    HLLSKETCH = auto()
 114    HSTORE = auto()
 115    SUPER = auto()
 116    SERIAL = auto()
 117    SMALLSERIAL = auto()
 118    BIGSERIAL = auto()
 119    XML = auto()
 120    UNIQUEIDENTIFIER = auto()
 121    MONEY = auto()
 122    SMALLMONEY = auto()
 123    ROWVERSION = auto()
 124    IMAGE = auto()
 125    VARIANT = auto()
 126    OBJECT = auto()
 127    INET = auto()
 128
 129    # keywords
 130    ALIAS = auto()
 131    ALTER = auto()
 132    ALWAYS = auto()
 133    ALL = auto()
 134    ANTI = auto()
 135    ANY = auto()
 136    APPLY = auto()
 137    ARRAY = auto()
 138    ASC = auto()
 139    ASOF = auto()
 140    AT_TIME_ZONE = auto()
 141    AUTO_INCREMENT = auto()
 142    BEGIN = auto()
 143    BETWEEN = auto()
 144    BOTH = auto()
 145    BUCKET = auto()
 146    BY_DEFAULT = auto()
 147    CACHE = auto()
 148    CASCADE = auto()
 149    CASE = auto()
 150    CHARACTER_SET = auto()
 151    CLUSTER_BY = auto()
 152    COLLATE = auto()
 153    COMMAND = auto()
 154    COMMENT = auto()
 155    COMMIT = auto()
 156    COMPOUND = auto()
 157    CONSTRAINT = auto()
 158    CREATE = auto()
 159    CROSS = auto()
 160    CUBE = auto()
 161    CURRENT_DATE = auto()
 162    CURRENT_DATETIME = auto()
 163    CURRENT_ROW = auto()
 164    CURRENT_TIME = auto()
 165    CURRENT_TIMESTAMP = auto()
 166    DEFAULT = auto()
 167    DELETE = auto()
 168    DESC = auto()
 169    DESCRIBE = auto()
 170    DISTINCT = auto()
 171    DISTINCT_FROM = auto()
 172    DISTRIBUTE_BY = auto()
 173    DIV = auto()
 174    DROP = auto()
 175    ELSE = auto()
 176    END = auto()
 177    ESCAPE = auto()
 178    EXCEPT = auto()
 179    EXECUTE = auto()
 180    EXISTS = auto()
 181    FALSE = auto()
 182    FETCH = auto()
 183    FILTER = auto()
 184    FINAL = auto()
 185    FIRST = auto()
 186    FOLLOWING = auto()
 187    FOR = auto()
 188    FOREIGN_KEY = auto()
 189    FORMAT = auto()
 190    FROM = auto()
 191    FULL = auto()
 192    FUNCTION = auto()
 193    GLOB = auto()
 194    GLOBAL = auto()
 195    GROUP_BY = auto()
 196    GROUPING_SETS = auto()
 197    HAVING = auto()
 198    HINT = auto()
 199    IF = auto()
 200    IGNORE_NULLS = auto()
 201    ILIKE = auto()
 202    ILIKE_ANY = auto()
 203    IN = auto()
 204    INDEX = auto()
 205    INNER = auto()
 206    INSERT = auto()
 207    INTERSECT = auto()
 208    INTERVAL = auto()
 209    INTO = auto()
 210    INTRODUCER = auto()
 211    IRLIKE = auto()
 212    IS = auto()
 213    ISNULL = auto()
 214    JOIN = auto()
 215    JOIN_MARKER = auto()
 216    LANGUAGE = auto()
 217    LATERAL = auto()
 218    LAZY = auto()
 219    LEADING = auto()
 220    LEFT = auto()
 221    LIKE = auto()
 222    LIKE_ANY = auto()
 223    LIMIT = auto()
 224    LOAD_DATA = auto()
 225    LOCAL = auto()
 226    MAP = auto()
 227    MATCH_RECOGNIZE = auto()
 228    MATERIALIZED = auto()
 229    MERGE = auto()
 230    MOD = auto()
 231    NATURAL = auto()
 232    NEXT = auto()
 233    NO_ACTION = auto()
 234    NOTNULL = auto()
 235    NULL = auto()
 236    NULLS_FIRST = auto()
 237    NULLS_LAST = auto()
 238    OFFSET = auto()
 239    ON = auto()
 240    ONLY = auto()
 241    OPTIONS = auto()
 242    ORDER_BY = auto()
 243    ORDERED = auto()
 244    ORDINALITY = auto()
 245    OUTER = auto()
 246    OUT_OF = auto()
 247    OVER = auto()
 248    OVERLAPS = auto()
 249    OVERWRITE = auto()
 250    PARTITION = auto()
 251    PARTITION_BY = auto()
 252    PERCENT = auto()
 253    PIVOT = auto()
 254    PLACEHOLDER = auto()
 255    PRAGMA = auto()
 256    PRECEDING = auto()
 257    PRIMARY_KEY = auto()
 258    PROCEDURE = auto()
 259    PROPERTIES = auto()
 260    PSEUDO_TYPE = auto()
 261    QUALIFY = auto()
 262    QUOTE = auto()
 263    RANGE = auto()
 264    RECURSIVE = auto()
 265    REPLACE = auto()
 266    RESPECT_NULLS = auto()
 267    RETURNING = auto()
 268    REFERENCES = auto()
 269    RIGHT = auto()
 270    RLIKE = auto()
 271    ROLLBACK = auto()
 272    ROLLUP = auto()
 273    ROW = auto()
 274    ROWS = auto()
 275    SEED = auto()
 276    SELECT = auto()
 277    SEMI = auto()
 278    SEPARATOR = auto()
 279    SERDE_PROPERTIES = auto()
 280    SET = auto()
 281    SHOW = auto()
 282    SIMILAR_TO = auto()
 283    SOME = auto()
 284    SORTKEY = auto()
 285    SORT_BY = auto()
 286    STRUCT = auto()
 287    TABLE_SAMPLE = auto()
 288    TEMPORARY = auto()
 289    TOP = auto()
 290    THEN = auto()
 291    TRAILING = auto()
 292    TRUE = auto()
 293    UNBOUNDED = auto()
 294    UNCACHE = auto()
 295    UNION = auto()
 296    UNLOGGED = auto()
 297    UNNEST = auto()
 298    UNPIVOT = auto()
 299    UPDATE = auto()
 300    USE = auto()
 301    USING = auto()
 302    VALUES = auto()
 303    VIEW = auto()
 304    VOLATILE = auto()
 305    WHEN = auto()
 306    WHERE = auto()
 307    WINDOW = auto()
 308    WITH = auto()
 309    WITH_TIME_ZONE = auto()
 310    WITH_LOCAL_TIME_ZONE = auto()
 311    WITHIN_GROUP = auto()
 312    WITHOUT_TIME_ZONE = auto()
 313    UNIQUE = auto()
 314
 315
 316class Token:
 317    __slots__ = ("token_type", "text", "line", "col", "comments")
 318
 319    @classmethod
 320    def number(cls, number: int) -> Token:
 321        """Returns a NUMBER token with `number` as its text."""
 322        return cls(TokenType.NUMBER, str(number))
 323
 324    @classmethod
 325    def string(cls, string: str) -> Token:
 326        """Returns a STRING token with `string` as its text."""
 327        return cls(TokenType.STRING, string)
 328
 329    @classmethod
 330    def identifier(cls, identifier: str) -> Token:
 331        """Returns an IDENTIFIER token with `identifier` as its text."""
 332        return cls(TokenType.IDENTIFIER, identifier)
 333
 334    @classmethod
 335    def var(cls, var: str) -> Token:
 336        """Returns an VAR token with `var` as its text."""
 337        return cls(TokenType.VAR, var)
 338
 339    def __init__(
 340        self,
 341        token_type: TokenType,
 342        text: str,
 343        line: int = 1,
 344        col: int = 1,
 345        comments: t.List[str] = [],
 346    ) -> None:
 347        self.token_type = token_type
 348        self.text = text
 349        self.line = line
 350        self.col = col - len(text)
 351        self.col = self.col if self.col > 1 else 1
 352        self.comments = comments
 353
 354    def __repr__(self) -> str:
 355        attributes = ", ".join(f"{k}: {getattr(self, k)}" for k in self.__slots__)
 356        return f"<Token {attributes}>"
 357
 358
 359class _Tokenizer(type):
 360    def __new__(cls, clsname, bases, attrs):  # type: ignore
 361        klass = super().__new__(cls, clsname, bases, attrs)
 362
 363        klass._QUOTES = {
 364            f"{prefix}{s}": e
 365            for s, e in cls._delimeter_list_to_dict(klass.QUOTES).items()
 366            for prefix in (("",) if s[0].isalpha() else ("", "n", "N"))
 367        }
 368        klass._BIT_STRINGS = cls._delimeter_list_to_dict(klass.BIT_STRINGS)
 369        klass._HEX_STRINGS = cls._delimeter_list_to_dict(klass.HEX_STRINGS)
 370        klass._BYTE_STRINGS = cls._delimeter_list_to_dict(klass.BYTE_STRINGS)
 371        klass._IDENTIFIERS = cls._delimeter_list_to_dict(klass.IDENTIFIERS)
 372        klass._STRING_ESCAPES = set(klass.STRING_ESCAPES)
 373        klass._IDENTIFIER_ESCAPES = set(klass.IDENTIFIER_ESCAPES)
 374        klass._COMMENTS = dict(
 375            (comment, None) if isinstance(comment, str) else (comment[0], comment[1])
 376            for comment in klass.COMMENTS
 377        )
 378
 379        klass.KEYWORD_TRIE = new_trie(
 380            key.upper()
 381            for key in {
 382                **klass.KEYWORDS,
 383                **{comment: TokenType.COMMENT for comment in klass._COMMENTS},
 384                **{quote: TokenType.QUOTE for quote in klass._QUOTES},
 385                **{bit_string: TokenType.BIT_STRING for bit_string in klass._BIT_STRINGS},
 386                **{hex_string: TokenType.HEX_STRING for hex_string in klass._HEX_STRINGS},
 387                **{byte_string: TokenType.BYTE_STRING for byte_string in klass._BYTE_STRINGS},
 388            }
 389            if " " in key or any(single in key for single in klass.SINGLE_TOKENS)
 390        )
 391
 392        return klass
 393
 394    @staticmethod
 395    def _delimeter_list_to_dict(list: t.List[str | t.Tuple[str, str]]) -> t.Dict[str, str]:
 396        return dict((item, item) if isinstance(item, str) else (item[0], item[1]) for item in list)
 397
 398
 399class Tokenizer(metaclass=_Tokenizer):
 400    SINGLE_TOKENS = {
 401        "(": TokenType.L_PAREN,
 402        ")": TokenType.R_PAREN,
 403        "[": TokenType.L_BRACKET,
 404        "]": TokenType.R_BRACKET,
 405        "{": TokenType.L_BRACE,
 406        "}": TokenType.R_BRACE,
 407        "&": TokenType.AMP,
 408        "^": TokenType.CARET,
 409        ":": TokenType.COLON,
 410        ",": TokenType.COMMA,
 411        ".": TokenType.DOT,
 412        "-": TokenType.DASH,
 413        "=": TokenType.EQ,
 414        ">": TokenType.GT,
 415        "<": TokenType.LT,
 416        "%": TokenType.MOD,
 417        "!": TokenType.NOT,
 418        "|": TokenType.PIPE,
 419        "+": TokenType.PLUS,
 420        ";": TokenType.SEMICOLON,
 421        "/": TokenType.SLASH,
 422        "\\": TokenType.BACKSLASH,
 423        "*": TokenType.STAR,
 424        "~": TokenType.TILDA,
 425        "?": TokenType.PLACEHOLDER,
 426        "@": TokenType.PARAMETER,
 427        # used for breaking a var like x'y' but nothing else
 428        # the token type doesn't matter
 429        "'": TokenType.QUOTE,
 430        "`": TokenType.IDENTIFIER,
 431        '"': TokenType.IDENTIFIER,
 432        "#": TokenType.HASH,
 433    }
 434
 435    QUOTES: t.List[t.Tuple[str, str] | str] = ["'"]
 436
 437    BIT_STRINGS: t.List[str | t.Tuple[str, str]] = []
 438
 439    HEX_STRINGS: t.List[str | t.Tuple[str, str]] = []
 440
 441    BYTE_STRINGS: t.List[str | t.Tuple[str, str]] = []
 442
 443    IDENTIFIERS: t.List[str | t.Tuple[str, str]] = ['"']
 444
 445    STRING_ESCAPES = ["'"]
 446
 447    _STRING_ESCAPES: t.Set[str] = set()
 448
 449    IDENTIFIER_ESCAPES = ['"']
 450
 451    _IDENTIFIER_ESCAPES: t.Set[str] = set()
 452
 453    KEYWORDS = {
 454        **{f"{{%{postfix}": TokenType.BLOCK_START for postfix in ("", "+", "-")},
 455        **{f"{prefix}%}}": TokenType.BLOCK_END for prefix in ("", "+", "-")},
 456        "{{+": TokenType.BLOCK_START,
 457        "{{-": TokenType.BLOCK_START,
 458        "+}}": TokenType.BLOCK_END,
 459        "-}}": TokenType.BLOCK_END,
 460        "/*+": TokenType.HINT,
 461        "==": TokenType.EQ,
 462        "::": TokenType.DCOLON,
 463        "||": TokenType.DPIPE,
 464        ">=": TokenType.GTE,
 465        "<=": TokenType.LTE,
 466        "<>": TokenType.NEQ,
 467        "!=": TokenType.NEQ,
 468        "<=>": TokenType.NULLSAFE_EQ,
 469        "->": TokenType.ARROW,
 470        "->>": TokenType.DARROW,
 471        "=>": TokenType.FARROW,
 472        "#>": TokenType.HASH_ARROW,
 473        "#>>": TokenType.DHASH_ARROW,
 474        "<->": TokenType.LR_ARROW,
 475        "&&": TokenType.DAMP,
 476        "ALL": TokenType.ALL,
 477        "ALWAYS": TokenType.ALWAYS,
 478        "AND": TokenType.AND,
 479        "ANTI": TokenType.ANTI,
 480        "ANY": TokenType.ANY,
 481        "ASC": TokenType.ASC,
 482        "AS": TokenType.ALIAS,
 483        "AT TIME ZONE": TokenType.AT_TIME_ZONE,
 484        "AUTOINCREMENT": TokenType.AUTO_INCREMENT,
 485        "AUTO_INCREMENT": TokenType.AUTO_INCREMENT,
 486        "BEGIN": TokenType.BEGIN,
 487        "BETWEEN": TokenType.BETWEEN,
 488        "BOTH": TokenType.BOTH,
 489        "BUCKET": TokenType.BUCKET,
 490        "BY DEFAULT": TokenType.BY_DEFAULT,
 491        "CACHE": TokenType.CACHE,
 492        "UNCACHE": TokenType.UNCACHE,
 493        "CASE": TokenType.CASE,
 494        "CASCADE": TokenType.CASCADE,
 495        "CHARACTER SET": TokenType.CHARACTER_SET,
 496        "CLUSTER BY": TokenType.CLUSTER_BY,
 497        "COLLATE": TokenType.COLLATE,
 498        "COLUMN": TokenType.COLUMN,
 499        "COMMIT": TokenType.COMMIT,
 500        "COMPOUND": TokenType.COMPOUND,
 501        "CONSTRAINT": TokenType.CONSTRAINT,
 502        "CREATE": TokenType.CREATE,
 503        "CROSS": TokenType.CROSS,
 504        "CUBE": TokenType.CUBE,
 505        "CURRENT_DATE": TokenType.CURRENT_DATE,
 506        "CURRENT ROW": TokenType.CURRENT_ROW,
 507        "CURRENT_TIME": TokenType.CURRENT_TIME,
 508        "CURRENT_TIMESTAMP": TokenType.CURRENT_TIMESTAMP,
 509        "DATABASE": TokenType.DATABASE,
 510        "DEFAULT": TokenType.DEFAULT,
 511        "DELETE": TokenType.DELETE,
 512        "DESC": TokenType.DESC,
 513        "DESCRIBE": TokenType.DESCRIBE,
 514        "DISTINCT": TokenType.DISTINCT,
 515        "DISTINCT FROM": TokenType.DISTINCT_FROM,
 516        "DISTRIBUTE BY": TokenType.DISTRIBUTE_BY,
 517        "DIV": TokenType.DIV,
 518        "DROP": TokenType.DROP,
 519        "ELSE": TokenType.ELSE,
 520        "END": TokenType.END,
 521        "ESCAPE": TokenType.ESCAPE,
 522        "EXCEPT": TokenType.EXCEPT,
 523        "EXECUTE": TokenType.EXECUTE,
 524        "EXISTS": TokenType.EXISTS,
 525        "FALSE": TokenType.FALSE,
 526        "FETCH": TokenType.FETCH,
 527        "FILTER": TokenType.FILTER,
 528        "FIRST": TokenType.FIRST,
 529        "FULL": TokenType.FULL,
 530        "FUNCTION": TokenType.FUNCTION,
 531        "FOLLOWING": TokenType.FOLLOWING,
 532        "FOR": TokenType.FOR,
 533        "FOREIGN KEY": TokenType.FOREIGN_KEY,
 534        "FORMAT": TokenType.FORMAT,
 535        "FROM": TokenType.FROM,
 536        "GLOB": TokenType.GLOB,
 537        "GROUP BY": TokenType.GROUP_BY,
 538        "GROUPING SETS": TokenType.GROUPING_SETS,
 539        "HAVING": TokenType.HAVING,
 540        "IF": TokenType.IF,
 541        "ILIKE": TokenType.ILIKE,
 542        "IGNORE NULLS": TokenType.IGNORE_NULLS,
 543        "IN": TokenType.IN,
 544        "INDEX": TokenType.INDEX,
 545        "INET": TokenType.INET,
 546        "INNER": TokenType.INNER,
 547        "INSERT": TokenType.INSERT,
 548        "INTERVAL": TokenType.INTERVAL,
 549        "INTERSECT": TokenType.INTERSECT,
 550        "INTO": TokenType.INTO,
 551        "IS": TokenType.IS,
 552        "ISNULL": TokenType.ISNULL,
 553        "JOIN": TokenType.JOIN,
 554        "LATERAL": TokenType.LATERAL,
 555        "LAZY": TokenType.LAZY,
 556        "LEADING": TokenType.LEADING,
 557        "LEFT": TokenType.LEFT,
 558        "LIKE": TokenType.LIKE,
 559        "LIMIT": TokenType.LIMIT,
 560        "LOAD DATA": TokenType.LOAD_DATA,
 561        "LOCAL": TokenType.LOCAL,
 562        "MATERIALIZED": TokenType.MATERIALIZED,
 563        "MERGE": TokenType.MERGE,
 564        "NATURAL": TokenType.NATURAL,
 565        "NEXT": TokenType.NEXT,
 566        "NO ACTION": TokenType.NO_ACTION,
 567        "NOT": TokenType.NOT,
 568        "NOTNULL": TokenType.NOTNULL,
 569        "NULL": TokenType.NULL,
 570        "NULLS FIRST": TokenType.NULLS_FIRST,
 571        "NULLS LAST": TokenType.NULLS_LAST,
 572        "OBJECT": TokenType.OBJECT,
 573        "OFFSET": TokenType.OFFSET,
 574        "ON": TokenType.ON,
 575        "ONLY": TokenType.ONLY,
 576        "OPTIONS": TokenType.OPTIONS,
 577        "OR": TokenType.OR,
 578        "ORDER BY": TokenType.ORDER_BY,
 579        "ORDINALITY": TokenType.ORDINALITY,
 580        "OUTER": TokenType.OUTER,
 581        "OUT OF": TokenType.OUT_OF,
 582        "OVER": TokenType.OVER,
 583        "OVERLAPS": TokenType.OVERLAPS,
 584        "OVERWRITE": TokenType.OVERWRITE,
 585        "PARTITION": TokenType.PARTITION,
 586        "PARTITION BY": TokenType.PARTITION_BY,
 587        "PARTITIONED BY": TokenType.PARTITION_BY,
 588        "PARTITIONED_BY": TokenType.PARTITION_BY,
 589        "PERCENT": TokenType.PERCENT,
 590        "PIVOT": TokenType.PIVOT,
 591        "PRAGMA": TokenType.PRAGMA,
 592        "PRECEDING": TokenType.PRECEDING,
 593        "PRIMARY KEY": TokenType.PRIMARY_KEY,
 594        "PROCEDURE": TokenType.PROCEDURE,
 595        "QUALIFY": TokenType.QUALIFY,
 596        "RANGE": TokenType.RANGE,
 597        "RECURSIVE": TokenType.RECURSIVE,
 598        "REGEXP": TokenType.RLIKE,
 599        "REPLACE": TokenType.REPLACE,
 600        "RESPECT NULLS": TokenType.RESPECT_NULLS,
 601        "REFERENCES": TokenType.REFERENCES,
 602        "RIGHT": TokenType.RIGHT,
 603        "RLIKE": TokenType.RLIKE,
 604        "ROLLBACK": TokenType.ROLLBACK,
 605        "ROLLUP": TokenType.ROLLUP,
 606        "ROW": TokenType.ROW,
 607        "ROWS": TokenType.ROWS,
 608        "SCHEMA": TokenType.SCHEMA,
 609        "SEED": TokenType.SEED,
 610        "SELECT": TokenType.SELECT,
 611        "SEMI": TokenType.SEMI,
 612        "SET": TokenType.SET,
 613        "SHOW": TokenType.SHOW,
 614        "SIMILAR TO": TokenType.SIMILAR_TO,
 615        "SOME": TokenType.SOME,
 616        "SORTKEY": TokenType.SORTKEY,
 617        "SORT BY": TokenType.SORT_BY,
 618        "TABLE": TokenType.TABLE,
 619        "TABLESAMPLE": TokenType.TABLE_SAMPLE,
 620        "TEMP": TokenType.TEMPORARY,
 621        "TEMPORARY": TokenType.TEMPORARY,
 622        "THEN": TokenType.THEN,
 623        "TRUE": TokenType.TRUE,
 624        "TRAILING": TokenType.TRAILING,
 625        "UNBOUNDED": TokenType.UNBOUNDED,
 626        "UNION": TokenType.UNION,
 627        "UNLOGGED": TokenType.UNLOGGED,
 628        "UNNEST": TokenType.UNNEST,
 629        "UNPIVOT": TokenType.UNPIVOT,
 630        "UPDATE": TokenType.UPDATE,
 631        "USE": TokenType.USE,
 632        "USING": TokenType.USING,
 633        "VALUES": TokenType.VALUES,
 634        "VIEW": TokenType.VIEW,
 635        "VOLATILE": TokenType.VOLATILE,
 636        "WHEN": TokenType.WHEN,
 637        "WHERE": TokenType.WHERE,
 638        "WINDOW": TokenType.WINDOW,
 639        "WITH": TokenType.WITH,
 640        "WITH TIME ZONE": TokenType.WITH_TIME_ZONE,
 641        "WITH LOCAL TIME ZONE": TokenType.WITH_LOCAL_TIME_ZONE,
 642        "WITHIN GROUP": TokenType.WITHIN_GROUP,
 643        "WITHOUT TIME ZONE": TokenType.WITHOUT_TIME_ZONE,
 644        "APPLY": TokenType.APPLY,
 645        "ARRAY": TokenType.ARRAY,
 646        "BIT": TokenType.BIT,
 647        "BOOL": TokenType.BOOLEAN,
 648        "BOOLEAN": TokenType.BOOLEAN,
 649        "BYTE": TokenType.TINYINT,
 650        "TINYINT": TokenType.TINYINT,
 651        "SHORT": TokenType.SMALLINT,
 652        "SMALLINT": TokenType.SMALLINT,
 653        "INT2": TokenType.SMALLINT,
 654        "INTEGER": TokenType.INT,
 655        "INT": TokenType.INT,
 656        "INT4": TokenType.INT,
 657        "LONG": TokenType.BIGINT,
 658        "BIGINT": TokenType.BIGINT,
 659        "INT8": TokenType.BIGINT,
 660        "DEC": TokenType.DECIMAL,
 661        "DECIMAL": TokenType.DECIMAL,
 662        "MAP": TokenType.MAP,
 663        "NULLABLE": TokenType.NULLABLE,
 664        "NUMBER": TokenType.DECIMAL,
 665        "NUMERIC": TokenType.DECIMAL,
 666        "FIXED": TokenType.DECIMAL,
 667        "REAL": TokenType.FLOAT,
 668        "FLOAT": TokenType.FLOAT,
 669        "FLOAT4": TokenType.FLOAT,
 670        "FLOAT8": TokenType.DOUBLE,
 671        "DOUBLE": TokenType.DOUBLE,
 672        "DOUBLE PRECISION": TokenType.DOUBLE,
 673        "JSON": TokenType.JSON,
 674        "CHAR": TokenType.CHAR,
 675        "CHARACTER": TokenType.CHAR,
 676        "NCHAR": TokenType.NCHAR,
 677        "VARCHAR": TokenType.VARCHAR,
 678        "VARCHAR2": TokenType.VARCHAR,
 679        "NVARCHAR": TokenType.NVARCHAR,
 680        "NVARCHAR2": TokenType.NVARCHAR,
 681        "STR": TokenType.TEXT,
 682        "STRING": TokenType.TEXT,
 683        "TEXT": TokenType.TEXT,
 684        "CLOB": TokenType.TEXT,
 685        "LONGVARCHAR": TokenType.TEXT,
 686        "BINARY": TokenType.BINARY,
 687        "BLOB": TokenType.VARBINARY,
 688        "BYTEA": TokenType.VARBINARY,
 689        "VARBINARY": TokenType.VARBINARY,
 690        "TIME": TokenType.TIME,
 691        "TIMESTAMP": TokenType.TIMESTAMP,
 692        "TIMESTAMPTZ": TokenType.TIMESTAMPTZ,
 693        "TIMESTAMPLTZ": TokenType.TIMESTAMPLTZ,
 694        "DATE": TokenType.DATE,
 695        "DATETIME": TokenType.DATETIME,
 696        "UNIQUE": TokenType.UNIQUE,
 697        "STRUCT": TokenType.STRUCT,
 698        "VARIANT": TokenType.VARIANT,
 699        "ALTER": TokenType.ALTER,
 700        "ALTER AGGREGATE": TokenType.COMMAND,
 701        "ALTER DEFAULT": TokenType.COMMAND,
 702        "ALTER DOMAIN": TokenType.COMMAND,
 703        "ALTER ROLE": TokenType.COMMAND,
 704        "ALTER RULE": TokenType.COMMAND,
 705        "ALTER SEQUENCE": TokenType.COMMAND,
 706        "ALTER TYPE": TokenType.COMMAND,
 707        "ALTER USER": TokenType.COMMAND,
 708        "ALTER VIEW": TokenType.COMMAND,
 709        "ANALYZE": TokenType.COMMAND,
 710        "CALL": TokenType.COMMAND,
 711        "COMMENT": TokenType.COMMENT,
 712        "COPY": TokenType.COMMAND,
 713        "EXPLAIN": TokenType.COMMAND,
 714        "GRANT": TokenType.COMMAND,
 715        "OPTIMIZE": TokenType.COMMAND,
 716        "PREPARE": TokenType.COMMAND,
 717        "TRUNCATE": TokenType.COMMAND,
 718        "VACUUM": TokenType.COMMAND,
 719    }
 720
 721    WHITE_SPACE: t.Dict[t.Optional[str], TokenType] = {
 722        " ": TokenType.SPACE,
 723        "\t": TokenType.SPACE,
 724        "\n": TokenType.BREAK,
 725        "\r": TokenType.BREAK,
 726        "\r\n": TokenType.BREAK,
 727    }
 728
 729    COMMANDS = {
 730        TokenType.COMMAND,
 731        TokenType.EXECUTE,
 732        TokenType.FETCH,
 733        TokenType.SHOW,
 734    }
 735
 736    COMMAND_PREFIX_TOKENS = {TokenType.SEMICOLON, TokenType.BEGIN}
 737
 738    # handle numeric literals like in hive (3L = BIGINT)
 739    NUMERIC_LITERALS: t.Dict[str, str] = {}
 740    ENCODE: t.Optional[str] = None
 741
 742    COMMENTS = ["--", ("/*", "*/"), ("{#", "#}")]
 743    KEYWORD_TRIE = None  # autofilled
 744
 745    IDENTIFIER_CAN_START_WITH_DIGIT = False
 746
 747    __slots__ = (
 748        "sql",
 749        "size",
 750        "tokens",
 751        "_start",
 752        "_current",
 753        "_line",
 754        "_col",
 755        "_comments",
 756        "_char",
 757        "_end",
 758        "_peek",
 759        "_prev_token_line",
 760        "_prev_token_comments",
 761        "_prev_token_type",
 762    )
 763
 764    def __init__(self) -> None:
 765        self.reset()
 766
 767    def reset(self) -> None:
 768        self.sql = ""
 769        self.size = 0
 770        self.tokens: t.List[Token] = []
 771        self._start = 0
 772        self._current = 0
 773        self._line = 1
 774        self._col = 1
 775        self._comments: t.List[str] = []
 776
 777        self._char = None
 778        self._end = None
 779        self._peek = None
 780        self._prev_token_line = -1
 781        self._prev_token_comments: t.List[str] = []
 782        self._prev_token_type = None
 783
 784    def tokenize(self, sql: str) -> t.List[Token]:
 785        """Returns a list of tokens corresponding to the SQL string `sql`."""
 786        self.reset()
 787        self.sql = sql
 788        self.size = len(sql)
 789        self._scan()
 790        return self.tokens
 791
 792    def _scan(self, until: t.Optional[t.Callable] = None) -> None:
 793        while self.size and not self._end:
 794            self._start = self._current
 795            self._advance()
 796
 797            if self._char is None:
 798                break
 799
 800            if self._char not in self.WHITE_SPACE:
 801                if self._char.isdigit():
 802                    self._scan_number()
 803                elif self._char in self._IDENTIFIERS:
 804                    self._scan_identifier(self._IDENTIFIERS[self._char])
 805                else:
 806                    self._scan_keywords()
 807
 808            if until and until():
 809                break
 810
 811    def _chars(self, size: int) -> str:
 812        if size == 1:
 813            return self._char  # type: ignore
 814        start = self._current - 1
 815        end = start + size
 816        if end <= self.size:
 817            return self.sql[start:end]
 818        return ""
 819
 820    def _advance(self, i: int = 1) -> None:
 821        if self.WHITE_SPACE.get(self._char) is TokenType.BREAK:
 822            self._set_new_line()
 823
 824        self._col += i
 825        self._current += i
 826        self._end = self._current >= self.size  # type: ignore
 827        self._char = self.sql[self._current - 1]  # type: ignore
 828        self._peek = self.sql[self._current] if self._current < self.size else ""  # type: ignore
 829
 830    def _set_new_line(self) -> None:
 831        self._col = 1
 832        self._line += 1
 833
 834    @property
 835    def _text(self) -> str:
 836        return self.sql[self._start : self._current]
 837
 838    def _add(self, token_type: TokenType, text: t.Optional[str] = None) -> None:
 839        self._prev_token_line = self._line
 840        self._prev_token_comments = self._comments
 841        self._prev_token_type = token_type  # type: ignore
 842        self.tokens.append(
 843            Token(
 844                token_type,
 845                self._text if text is None else text,
 846                self._line,
 847                self._col,
 848                self._comments,
 849            )
 850        )
 851        self._comments = []
 852
 853        # If we have either a semicolon or a begin token before the command's token, we'll parse
 854        # whatever follows the command's token as a string
 855        if (
 856            token_type in self.COMMANDS
 857            and self._peek != ";"
 858            and (len(self.tokens) == 1 or self.tokens[-2].token_type in self.COMMAND_PREFIX_TOKENS)
 859        ):
 860            start = self._current
 861            tokens = len(self.tokens)
 862            self._scan(lambda: self._peek == ";")
 863            self.tokens = self.tokens[:tokens]
 864            text = self.sql[start : self._current].strip()
 865            if text:
 866                self._add(TokenType.STRING, text)
 867
 868    def _scan_keywords(self) -> None:
 869        size = 0
 870        word = None
 871        chars = self._text
 872        char = chars
 873        prev_space = False
 874        skip = False
 875        trie = self.KEYWORD_TRIE
 876        single_token = char in self.SINGLE_TOKENS
 877
 878        while chars:
 879            if skip:
 880                result = 1
 881            else:
 882                result, trie = in_trie(trie, char.upper())  # type: ignore
 883
 884            if result == 0:
 885                break
 886            if result == 2:
 887                word = chars
 888            size += 1
 889            end = self._current - 1 + size
 890
 891            if end < self.size:
 892                char = self.sql[end]
 893                single_token = single_token or char in self.SINGLE_TOKENS
 894                is_space = char in self.WHITE_SPACE
 895
 896                if not is_space or not prev_space:
 897                    if is_space:
 898                        char = " "
 899                    chars += char
 900                    prev_space = is_space
 901                    skip = False
 902                else:
 903                    skip = True
 904            else:
 905                chars = " "
 906
 907        word = None if not single_token and chars[-1] not in self.WHITE_SPACE else word
 908
 909        if not word:
 910            if self._char in self.SINGLE_TOKENS:
 911                self._add(self.SINGLE_TOKENS[self._char])  # type: ignore
 912                return
 913            self._scan_var()
 914            return
 915
 916        if self._scan_string(word):
 917            return
 918        if self._scan_formatted_string(word):
 919            return
 920        if self._scan_comment(word):
 921            return
 922
 923        self._advance(size - 1)
 924        self._add(self.KEYWORDS[word.upper()])
 925
 926    def _scan_comment(self, comment_start: str) -> bool:
 927        if comment_start not in self._COMMENTS:  # type: ignore
 928            return False
 929
 930        comment_start_line = self._line
 931        comment_start_size = len(comment_start)
 932        comment_end = self._COMMENTS[comment_start]  # type: ignore
 933
 934        if comment_end:
 935            comment_end_size = len(comment_end)
 936
 937            while not self._end and self._chars(comment_end_size) != comment_end:
 938                self._advance()
 939
 940            self._comments.append(self._text[comment_start_size : -comment_end_size + 1])  # type: ignore
 941            self._advance(comment_end_size - 1)
 942        else:
 943            while not self._end and not self.WHITE_SPACE.get(self._peek) is TokenType.BREAK:
 944                self._advance()
 945            self._comments.append(self._text[comment_start_size:])  # type: ignore
 946
 947        # Leading comment is attached to the succeeding token, whilst trailing comment to the preceding.
 948        # Multiple consecutive comments are preserved by appending them to the current comments list.
 949        if comment_start_line == self._prev_token_line:
 950            self.tokens[-1].comments.extend(self._comments)
 951            self._comments = []
 952            self._prev_token_line = self._line
 953
 954        return True
 955
 956    def _scan_number(self) -> None:
 957        if self._char == "0":
 958            peek = self._peek.upper()  # type: ignore
 959            if peek == "B":
 960                return self._scan_bits()
 961            elif peek == "X":
 962                return self._scan_hex()
 963
 964        decimal = False
 965        scientific = 0
 966
 967        while True:
 968            if self._peek.isdigit():  # type: ignore
 969                self._advance()
 970            elif self._peek == "." and not decimal:
 971                decimal = True
 972                self._advance()
 973            elif self._peek in ("-", "+") and scientific == 1:
 974                scientific += 1
 975                self._advance()
 976            elif self._peek.upper() == "E" and not scientific:  # type: ignore
 977                scientific += 1
 978                self._advance()
 979            elif self._peek.isidentifier():  # type: ignore
 980                number_text = self._text
 981                literal = []
 982
 983                while self._peek.strip() and self._peek not in self.SINGLE_TOKENS:  # type: ignore
 984                    literal.append(self._peek.upper())  # type: ignore
 985                    self._advance()
 986
 987                literal = "".join(literal)  # type: ignore
 988                token_type = self.KEYWORDS.get(self.NUMERIC_LITERALS.get(literal))  # type: ignore
 989
 990                if token_type:
 991                    self._add(TokenType.NUMBER, number_text)
 992                    self._add(TokenType.DCOLON, "::")
 993                    return self._add(token_type, literal)  # type: ignore
 994                elif self.IDENTIFIER_CAN_START_WITH_DIGIT:
 995                    return self._add(TokenType.VAR)
 996
 997                self._add(TokenType.NUMBER, number_text)
 998                return self._advance(-len(literal))
 999            else:
1000                return self._add(TokenType.NUMBER)
1001
1002    def _scan_bits(self) -> None:
1003        self._advance()
1004        value = self._extract_value()
1005        try:
1006            self._add(TokenType.BIT_STRING, f"{int(value, 2)}")
1007        except ValueError:
1008            self._add(TokenType.IDENTIFIER)
1009
1010    def _scan_hex(self) -> None:
1011        self._advance()
1012        value = self._extract_value()
1013        try:
1014            self._add(TokenType.HEX_STRING, f"{int(value, 16)}")
1015        except ValueError:
1016            self._add(TokenType.IDENTIFIER)
1017
1018    def _extract_value(self) -> str:
1019        while True:
1020            char = self._peek.strip()  # type: ignore
1021            if char and char not in self.SINGLE_TOKENS:
1022                self._advance()
1023            else:
1024                break
1025
1026        return self._text
1027
1028    def _scan_string(self, quote: str) -> bool:
1029        quote_end = self._QUOTES.get(quote)  # type: ignore
1030        if quote_end is None:
1031            return False
1032
1033        self._advance(len(quote))
1034        text = self._extract_string(quote_end)
1035        text = text.encode(self.ENCODE).decode(self.ENCODE) if self.ENCODE else text  # type: ignore
1036        self._add(TokenType.NATIONAL if quote[0].upper() == "N" else TokenType.STRING, text)
1037        return True
1038
1039    # X'1234, b'0110', E'\\\\\' etc.
1040    def _scan_formatted_string(self, string_start: str) -> bool:
1041        if string_start in self._HEX_STRINGS:  # type: ignore
1042            delimiters = self._HEX_STRINGS  # type: ignore
1043            token_type = TokenType.HEX_STRING
1044            base = 16
1045        elif string_start in self._BIT_STRINGS:  # type: ignore
1046            delimiters = self._BIT_STRINGS  # type: ignore
1047            token_type = TokenType.BIT_STRING
1048            base = 2
1049        elif string_start in self._BYTE_STRINGS:  # type: ignore
1050            delimiters = self._BYTE_STRINGS  # type: ignore
1051            token_type = TokenType.BYTE_STRING
1052            base = None
1053        else:
1054            return False
1055
1056        self._advance(len(string_start))
1057        string_end = delimiters.get(string_start)
1058        text = self._extract_string(string_end)
1059
1060        if base is None:
1061            self._add(token_type, text)
1062        else:
1063            try:
1064                self._add(token_type, f"{int(text, base)}")
1065            except:
1066                raise RuntimeError(
1067                    f"Numeric string contains invalid characters from {self._line}:{self._start}"
1068                )
1069
1070        return True
1071
1072    def _scan_identifier(self, identifier_end: str) -> None:
1073        text = ""
1074        identifier_end_is_escape = identifier_end in self._IDENTIFIER_ESCAPES
1075
1076        while True:
1077            if self._end:
1078                raise RuntimeError(f"Missing {identifier_end} from {self._line}:{self._start}")
1079
1080            self._advance()
1081            if self._char == identifier_end:
1082                if identifier_end_is_escape and self._peek == identifier_end:
1083                    text += identifier_end  # type: ignore
1084                    self._advance()
1085                    continue
1086
1087                break
1088
1089            text += self._char  # type: ignore
1090
1091        self._add(TokenType.IDENTIFIER, text)
1092
1093    def _scan_var(self) -> None:
1094        while True:
1095            char = self._peek.strip()  # type: ignore
1096            if char and char not in self.SINGLE_TOKENS:
1097                self._advance()
1098            else:
1099                break
1100        self._add(
1101            TokenType.VAR
1102            if self._prev_token_type == TokenType.PARAMETER
1103            else self.KEYWORDS.get(self._text.upper(), TokenType.VAR)
1104        )
1105
1106    def _extract_string(self, delimiter: str) -> str:
1107        text = ""
1108        delim_size = len(delimiter)
1109
1110        while True:
1111            if self._char in self._STRING_ESCAPES and (
1112                self._peek == delimiter or self._peek in self._STRING_ESCAPES
1113            ):
1114                if self._peek == delimiter:
1115                    text += self._peek  # type: ignore
1116                else:
1117                    text += self._char + self._peek  # type: ignore
1118
1119                if self._current + 1 < self.size:
1120                    self._advance(2)
1121                else:
1122                    raise RuntimeError(f"Missing {delimiter} from {self._line}:{self._current}")
1123            else:
1124                if self._chars(delim_size) == delimiter:
1125                    if delim_size > 1:
1126                        self._advance(delim_size - 1)
1127                    break
1128
1129                if self._end:
1130                    raise RuntimeError(f"Missing {delimiter} from {self._line}:{self._start}")
1131                text += self._char  # type: ignore
1132                self._advance()
1133
1134        return text
class TokenType(sqlglot.helper.AutoName):
 11class TokenType(AutoName):
 12    L_PAREN = auto()
 13    R_PAREN = auto()
 14    L_BRACKET = auto()
 15    R_BRACKET = auto()
 16    L_BRACE = auto()
 17    R_BRACE = auto()
 18    COMMA = auto()
 19    DOT = auto()
 20    DASH = auto()
 21    PLUS = auto()
 22    COLON = auto()
 23    DCOLON = auto()
 24    SEMICOLON = auto()
 25    STAR = auto()
 26    BACKSLASH = auto()
 27    SLASH = auto()
 28    LT = auto()
 29    LTE = auto()
 30    GT = auto()
 31    GTE = auto()
 32    NOT = auto()
 33    EQ = auto()
 34    NEQ = auto()
 35    NULLSAFE_EQ = auto()
 36    AND = auto()
 37    OR = auto()
 38    AMP = auto()
 39    DPIPE = auto()
 40    PIPE = auto()
 41    CARET = auto()
 42    TILDA = auto()
 43    ARROW = auto()
 44    DARROW = auto()
 45    FARROW = auto()
 46    HASH = auto()
 47    HASH_ARROW = auto()
 48    DHASH_ARROW = auto()
 49    LR_ARROW = auto()
 50    LT_AT = auto()
 51    AT_GT = auto()
 52    DOLLAR = auto()
 53    PARAMETER = auto()
 54    SESSION_PARAMETER = auto()
 55    NATIONAL = auto()
 56    DAMP = auto()
 57
 58    BLOCK_START = auto()
 59    BLOCK_END = auto()
 60
 61    SPACE = auto()
 62    BREAK = auto()
 63
 64    STRING = auto()
 65    NUMBER = auto()
 66    IDENTIFIER = auto()
 67    DATABASE = auto()
 68    COLUMN = auto()
 69    COLUMN_DEF = auto()
 70    SCHEMA = auto()
 71    TABLE = auto()
 72    VAR = auto()
 73    BIT_STRING = auto()
 74    HEX_STRING = auto()
 75    BYTE_STRING = auto()
 76
 77    # types
 78    BIT = auto()
 79    BOOLEAN = auto()
 80    TINYINT = auto()
 81    UTINYINT = auto()
 82    SMALLINT = auto()
 83    USMALLINT = auto()
 84    INT = auto()
 85    UINT = auto()
 86    BIGINT = auto()
 87    UBIGINT = auto()
 88    FLOAT = auto()
 89    DOUBLE = auto()
 90    DECIMAL = auto()
 91    CHAR = auto()
 92    NCHAR = auto()
 93    VARCHAR = auto()
 94    NVARCHAR = auto()
 95    TEXT = auto()
 96    MEDIUMTEXT = auto()
 97    LONGTEXT = auto()
 98    MEDIUMBLOB = auto()
 99    LONGBLOB = auto()
100    BINARY = auto()
101    VARBINARY = auto()
102    JSON = auto()
103    JSONB = auto()
104    TIME = auto()
105    TIMESTAMP = auto()
106    TIMESTAMPTZ = auto()
107    TIMESTAMPLTZ = auto()
108    DATETIME = auto()
109    DATE = auto()
110    UUID = auto()
111    GEOGRAPHY = auto()
112    NULLABLE = auto()
113    GEOMETRY = auto()
114    HLLSKETCH = auto()
115    HSTORE = auto()
116    SUPER = auto()
117    SERIAL = auto()
118    SMALLSERIAL = auto()
119    BIGSERIAL = auto()
120    XML = auto()
121    UNIQUEIDENTIFIER = auto()
122    MONEY = auto()
123    SMALLMONEY = auto()
124    ROWVERSION = auto()
125    IMAGE = auto()
126    VARIANT = auto()
127    OBJECT = auto()
128    INET = auto()
129
130    # keywords
131    ALIAS = auto()
132    ALTER = auto()
133    ALWAYS = auto()
134    ALL = auto()
135    ANTI = auto()
136    ANY = auto()
137    APPLY = auto()
138    ARRAY = auto()
139    ASC = auto()
140    ASOF = auto()
141    AT_TIME_ZONE = auto()
142    AUTO_INCREMENT = auto()
143    BEGIN = auto()
144    BETWEEN = auto()
145    BOTH = auto()
146    BUCKET = auto()
147    BY_DEFAULT = auto()
148    CACHE = auto()
149    CASCADE = auto()
150    CASE = auto()
151    CHARACTER_SET = auto()
152    CLUSTER_BY = auto()
153    COLLATE = auto()
154    COMMAND = auto()
155    COMMENT = auto()
156    COMMIT = auto()
157    COMPOUND = auto()
158    CONSTRAINT = auto()
159    CREATE = auto()
160    CROSS = auto()
161    CUBE = auto()
162    CURRENT_DATE = auto()
163    CURRENT_DATETIME = auto()
164    CURRENT_ROW = auto()
165    CURRENT_TIME = auto()
166    CURRENT_TIMESTAMP = auto()
167    DEFAULT = auto()
168    DELETE = auto()
169    DESC = auto()
170    DESCRIBE = auto()
171    DISTINCT = auto()
172    DISTINCT_FROM = auto()
173    DISTRIBUTE_BY = auto()
174    DIV = auto()
175    DROP = auto()
176    ELSE = auto()
177    END = auto()
178    ESCAPE = auto()
179    EXCEPT = auto()
180    EXECUTE = auto()
181    EXISTS = auto()
182    FALSE = auto()
183    FETCH = auto()
184    FILTER = auto()
185    FINAL = auto()
186    FIRST = auto()
187    FOLLOWING = auto()
188    FOR = auto()
189    FOREIGN_KEY = auto()
190    FORMAT = auto()
191    FROM = auto()
192    FULL = auto()
193    FUNCTION = auto()
194    GLOB = auto()
195    GLOBAL = auto()
196    GROUP_BY = auto()
197    GROUPING_SETS = auto()
198    HAVING = auto()
199    HINT = auto()
200    IF = auto()
201    IGNORE_NULLS = auto()
202    ILIKE = auto()
203    ILIKE_ANY = auto()
204    IN = auto()
205    INDEX = auto()
206    INNER = auto()
207    INSERT = auto()
208    INTERSECT = auto()
209    INTERVAL = auto()
210    INTO = auto()
211    INTRODUCER = auto()
212    IRLIKE = auto()
213    IS = auto()
214    ISNULL = auto()
215    JOIN = auto()
216    JOIN_MARKER = auto()
217    LANGUAGE = auto()
218    LATERAL = auto()
219    LAZY = auto()
220    LEADING = auto()
221    LEFT = auto()
222    LIKE = auto()
223    LIKE_ANY = auto()
224    LIMIT = auto()
225    LOAD_DATA = auto()
226    LOCAL = auto()
227    MAP = auto()
228    MATCH_RECOGNIZE = auto()
229    MATERIALIZED = auto()
230    MERGE = auto()
231    MOD = auto()
232    NATURAL = auto()
233    NEXT = auto()
234    NO_ACTION = auto()
235    NOTNULL = auto()
236    NULL = auto()
237    NULLS_FIRST = auto()
238    NULLS_LAST = auto()
239    OFFSET = auto()
240    ON = auto()
241    ONLY = auto()
242    OPTIONS = auto()
243    ORDER_BY = auto()
244    ORDERED = auto()
245    ORDINALITY = auto()
246    OUTER = auto()
247    OUT_OF = auto()
248    OVER = auto()
249    OVERLAPS = auto()
250    OVERWRITE = auto()
251    PARTITION = auto()
252    PARTITION_BY = auto()
253    PERCENT = auto()
254    PIVOT = auto()
255    PLACEHOLDER = auto()
256    PRAGMA = auto()
257    PRECEDING = auto()
258    PRIMARY_KEY = auto()
259    PROCEDURE = auto()
260    PROPERTIES = auto()
261    PSEUDO_TYPE = auto()
262    QUALIFY = auto()
263    QUOTE = auto()
264    RANGE = auto()
265    RECURSIVE = auto()
266    REPLACE = auto()
267    RESPECT_NULLS = auto()
268    RETURNING = auto()
269    REFERENCES = auto()
270    RIGHT = auto()
271    RLIKE = auto()
272    ROLLBACK = auto()
273    ROLLUP = auto()
274    ROW = auto()
275    ROWS = auto()
276    SEED = auto()
277    SELECT = auto()
278    SEMI = auto()
279    SEPARATOR = auto()
280    SERDE_PROPERTIES = auto()
281    SET = auto()
282    SHOW = auto()
283    SIMILAR_TO = auto()
284    SOME = auto()
285    SORTKEY = auto()
286    SORT_BY = auto()
287    STRUCT = auto()
288    TABLE_SAMPLE = auto()
289    TEMPORARY = auto()
290    TOP = auto()
291    THEN = auto()
292    TRAILING = auto()
293    TRUE = auto()
294    UNBOUNDED = auto()
295    UNCACHE = auto()
296    UNION = auto()
297    UNLOGGED = auto()
298    UNNEST = auto()
299    UNPIVOT = auto()
300    UPDATE = auto()
301    USE = auto()
302    USING = auto()
303    VALUES = auto()
304    VIEW = auto()
305    VOLATILE = auto()
306    WHEN = auto()
307    WHERE = auto()
308    WINDOW = auto()
309    WITH = auto()
310    WITH_TIME_ZONE = auto()
311    WITH_LOCAL_TIME_ZONE = auto()
312    WITHIN_GROUP = auto()
313    WITHOUT_TIME_ZONE = auto()
314    UNIQUE = auto()

An enumeration.

L_PAREN = <TokenType.L_PAREN: 'L_PAREN'>
R_PAREN = <TokenType.R_PAREN: 'R_PAREN'>
L_BRACKET = <TokenType.L_BRACKET: 'L_BRACKET'>
R_BRACKET = <TokenType.R_BRACKET: 'R_BRACKET'>
L_BRACE = <TokenType.L_BRACE: 'L_BRACE'>
R_BRACE = <TokenType.R_BRACE: 'R_BRACE'>
COMMA = <TokenType.COMMA: 'COMMA'>
DOT = <TokenType.DOT: 'DOT'>
DASH = <TokenType.DASH: 'DASH'>
PLUS = <TokenType.PLUS: 'PLUS'>
COLON = <TokenType.COLON: 'COLON'>
DCOLON = <TokenType.DCOLON: 'DCOLON'>
SEMICOLON = <TokenType.SEMICOLON: 'SEMICOLON'>
STAR = <TokenType.STAR: 'STAR'>
BACKSLASH = <TokenType.BACKSLASH: 'BACKSLASH'>
SLASH = <TokenType.SLASH: 'SLASH'>
LT = <TokenType.LT: 'LT'>
LTE = <TokenType.LTE: 'LTE'>
GT = <TokenType.GT: 'GT'>
GTE = <TokenType.GTE: 'GTE'>
NOT = <TokenType.NOT: 'NOT'>
EQ = <TokenType.EQ: 'EQ'>
NEQ = <TokenType.NEQ: 'NEQ'>
NULLSAFE_EQ = <TokenType.NULLSAFE_EQ: 'NULLSAFE_EQ'>
AND = <TokenType.AND: 'AND'>
OR = <TokenType.OR: 'OR'>
AMP = <TokenType.AMP: 'AMP'>
DPIPE = <TokenType.DPIPE: 'DPIPE'>
PIPE = <TokenType.PIPE: 'PIPE'>
CARET = <TokenType.CARET: 'CARET'>
TILDA = <TokenType.TILDA: 'TILDA'>
ARROW = <TokenType.ARROW: 'ARROW'>
DARROW = <TokenType.DARROW: 'DARROW'>
FARROW = <TokenType.FARROW: 'FARROW'>
HASH = <TokenType.HASH: 'HASH'>
HASH_ARROW = <TokenType.HASH_ARROW: 'HASH_ARROW'>
DHASH_ARROW = <TokenType.DHASH_ARROW: 'DHASH_ARROW'>
LR_ARROW = <TokenType.LR_ARROW: 'LR_ARROW'>
LT_AT = <TokenType.LT_AT: 'LT_AT'>
AT_GT = <TokenType.AT_GT: 'AT_GT'>
DOLLAR = <TokenType.DOLLAR: 'DOLLAR'>
PARAMETER = <TokenType.PARAMETER: 'PARAMETER'>
SESSION_PARAMETER = <TokenType.SESSION_PARAMETER: 'SESSION_PARAMETER'>
NATIONAL = <TokenType.NATIONAL: 'NATIONAL'>
DAMP = <TokenType.DAMP: 'DAMP'>
BLOCK_START = <TokenType.BLOCK_START: 'BLOCK_START'>
BLOCK_END = <TokenType.BLOCK_END: 'BLOCK_END'>
SPACE = <TokenType.SPACE: 'SPACE'>
BREAK = <TokenType.BREAK: 'BREAK'>
STRING = <TokenType.STRING: 'STRING'>
NUMBER = <TokenType.NUMBER: 'NUMBER'>
IDENTIFIER = <TokenType.IDENTIFIER: 'IDENTIFIER'>
DATABASE = <TokenType.DATABASE: 'DATABASE'>
COLUMN = <TokenType.COLUMN: 'COLUMN'>
COLUMN_DEF = <TokenType.COLUMN_DEF: 'COLUMN_DEF'>
SCHEMA = <TokenType.SCHEMA: 'SCHEMA'>
TABLE = <TokenType.TABLE: 'TABLE'>
VAR = <TokenType.VAR: 'VAR'>
BIT_STRING = <TokenType.BIT_STRING: 'BIT_STRING'>
HEX_STRING = <TokenType.HEX_STRING: 'HEX_STRING'>
BYTE_STRING = <TokenType.BYTE_STRING: 'BYTE_STRING'>
BIT = <TokenType.BIT: 'BIT'>
BOOLEAN = <TokenType.BOOLEAN: 'BOOLEAN'>
TINYINT = <TokenType.TINYINT: 'TINYINT'>
UTINYINT = <TokenType.UTINYINT: 'UTINYINT'>
SMALLINT = <TokenType.SMALLINT: 'SMALLINT'>
USMALLINT = <TokenType.USMALLINT: 'USMALLINT'>
INT = <TokenType.INT: 'INT'>
UINT = <TokenType.UINT: 'UINT'>
BIGINT = <TokenType.BIGINT: 'BIGINT'>
UBIGINT = <TokenType.UBIGINT: 'UBIGINT'>
FLOAT = <TokenType.FLOAT: 'FLOAT'>
DOUBLE = <TokenType.DOUBLE: 'DOUBLE'>
DECIMAL = <TokenType.DECIMAL: 'DECIMAL'>
CHAR = <TokenType.CHAR: 'CHAR'>
NCHAR = <TokenType.NCHAR: 'NCHAR'>
VARCHAR = <TokenType.VARCHAR: 'VARCHAR'>
NVARCHAR = <TokenType.NVARCHAR: 'NVARCHAR'>
TEXT = <TokenType.TEXT: 'TEXT'>
MEDIUMTEXT = <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>
LONGTEXT = <TokenType.LONGTEXT: 'LONGTEXT'>
MEDIUMBLOB = <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>
LONGBLOB = <TokenType.LONGBLOB: 'LONGBLOB'>
BINARY = <TokenType.BINARY: 'BINARY'>
VARBINARY = <TokenType.VARBINARY: 'VARBINARY'>
JSON = <TokenType.JSON: 'JSON'>
JSONB = <TokenType.JSONB: 'JSONB'>
TIME = <TokenType.TIME: 'TIME'>
TIMESTAMP = <TokenType.TIMESTAMP: 'TIMESTAMP'>
TIMESTAMPTZ = <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>
TIMESTAMPLTZ = <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>
DATETIME = <TokenType.DATETIME: 'DATETIME'>
DATE = <TokenType.DATE: 'DATE'>
UUID = <TokenType.UUID: 'UUID'>
GEOGRAPHY = <TokenType.GEOGRAPHY: 'GEOGRAPHY'>
NULLABLE = <TokenType.NULLABLE: 'NULLABLE'>
GEOMETRY = <TokenType.GEOMETRY: 'GEOMETRY'>
HLLSKETCH = <TokenType.HLLSKETCH: 'HLLSKETCH'>
HSTORE = <TokenType.HSTORE: 'HSTORE'>
SUPER = <TokenType.SUPER: 'SUPER'>
SERIAL = <TokenType.SERIAL: 'SERIAL'>
SMALLSERIAL = <TokenType.SMALLSERIAL: 'SMALLSERIAL'>
BIGSERIAL = <TokenType.BIGSERIAL: 'BIGSERIAL'>
XML = <TokenType.XML: 'XML'>
UNIQUEIDENTIFIER = <TokenType.UNIQUEIDENTIFIER: 'UNIQUEIDENTIFIER'>
MONEY = <TokenType.MONEY: 'MONEY'>
SMALLMONEY = <TokenType.SMALLMONEY: 'SMALLMONEY'>
ROWVERSION = <TokenType.ROWVERSION: 'ROWVERSION'>
IMAGE = <TokenType.IMAGE: 'IMAGE'>
VARIANT = <TokenType.VARIANT: 'VARIANT'>
OBJECT = <TokenType.OBJECT: 'OBJECT'>
INET = <TokenType.INET: 'INET'>
ALIAS = <TokenType.ALIAS: 'ALIAS'>
ALTER = <TokenType.ALTER: 'ALTER'>
ALWAYS = <TokenType.ALWAYS: 'ALWAYS'>
ALL = <TokenType.ALL: 'ALL'>
ANTI = <TokenType.ANTI: 'ANTI'>
ANY = <TokenType.ANY: 'ANY'>
APPLY = <TokenType.APPLY: 'APPLY'>
ARRAY = <TokenType.ARRAY: 'ARRAY'>
ASC = <TokenType.ASC: 'ASC'>
ASOF = <TokenType.ASOF: 'ASOF'>
AT_TIME_ZONE = <TokenType.AT_TIME_ZONE: 'AT_TIME_ZONE'>
AUTO_INCREMENT = <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>
BEGIN = <TokenType.BEGIN: 'BEGIN'>
BETWEEN = <TokenType.BETWEEN: 'BETWEEN'>
BOTH = <TokenType.BOTH: 'BOTH'>
BUCKET = <TokenType.BUCKET: 'BUCKET'>
BY_DEFAULT = <TokenType.BY_DEFAULT: 'BY_DEFAULT'>
CACHE = <TokenType.CACHE: 'CACHE'>
CASCADE = <TokenType.CASCADE: 'CASCADE'>
CASE = <TokenType.CASE: 'CASE'>
CHARACTER_SET = <TokenType.CHARACTER_SET: 'CHARACTER_SET'>
CLUSTER_BY = <TokenType.CLUSTER_BY: 'CLUSTER_BY'>
COLLATE = <TokenType.COLLATE: 'COLLATE'>
COMMAND = <TokenType.COMMAND: 'COMMAND'>
COMMENT = <TokenType.COMMENT: 'COMMENT'>
COMMIT = <TokenType.COMMIT: 'COMMIT'>
COMPOUND = <TokenType.COMPOUND: 'COMPOUND'>
CONSTRAINT = <TokenType.CONSTRAINT: 'CONSTRAINT'>
CREATE = <TokenType.CREATE: 'CREATE'>
CROSS = <TokenType.CROSS: 'CROSS'>
CUBE = <TokenType.CUBE: 'CUBE'>
CURRENT_DATE = <TokenType.CURRENT_DATE: 'CURRENT_DATE'>
CURRENT_DATETIME = <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>
CURRENT_ROW = <TokenType.CURRENT_ROW: 'CURRENT_ROW'>
CURRENT_TIME = <TokenType.CURRENT_TIME: 'CURRENT_TIME'>
CURRENT_TIMESTAMP = <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>
DEFAULT = <TokenType.DEFAULT: 'DEFAULT'>
DELETE = <TokenType.DELETE: 'DELETE'>
DESC = <TokenType.DESC: 'DESC'>
DESCRIBE = <TokenType.DESCRIBE: 'DESCRIBE'>
DISTINCT = <TokenType.DISTINCT: 'DISTINCT'>
DISTINCT_FROM = <TokenType.DISTINCT_FROM: 'DISTINCT_FROM'>
DISTRIBUTE_BY = <TokenType.DISTRIBUTE_BY: 'DISTRIBUTE_BY'>
DIV = <TokenType.DIV: 'DIV'>
DROP = <TokenType.DROP: 'DROP'>
ELSE = <TokenType.ELSE: 'ELSE'>
END = <TokenType.END: 'END'>
ESCAPE = <TokenType.ESCAPE: 'ESCAPE'>
EXCEPT = <TokenType.EXCEPT: 'EXCEPT'>
EXECUTE = <TokenType.EXECUTE: 'EXECUTE'>
EXISTS = <TokenType.EXISTS: 'EXISTS'>
FALSE = <TokenType.FALSE: 'FALSE'>
FETCH = <TokenType.FETCH: 'FETCH'>
FILTER = <TokenType.FILTER: 'FILTER'>
FINAL = <TokenType.FINAL: 'FINAL'>
FIRST = <TokenType.FIRST: 'FIRST'>
FOLLOWING = <TokenType.FOLLOWING: 'FOLLOWING'>
FOR = <TokenType.FOR: 'FOR'>
FOREIGN_KEY = <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>
FORMAT = <TokenType.FORMAT: 'FORMAT'>
FROM = <TokenType.FROM: 'FROM'>
FULL = <TokenType.FULL: 'FULL'>
FUNCTION = <TokenType.FUNCTION: 'FUNCTION'>
GLOB = <TokenType.GLOB: 'GLOB'>
GLOBAL = <TokenType.GLOBAL: 'GLOBAL'>
GROUP_BY = <TokenType.GROUP_BY: 'GROUP_BY'>
GROUPING_SETS = <TokenType.GROUPING_SETS: 'GROUPING_SETS'>
HAVING = <TokenType.HAVING: 'HAVING'>
HINT = <TokenType.HINT: 'HINT'>
IF = <TokenType.IF: 'IF'>
IGNORE_NULLS = <TokenType.IGNORE_NULLS: 'IGNORE_NULLS'>
ILIKE = <TokenType.ILIKE: 'ILIKE'>
ILIKE_ANY = <TokenType.ILIKE_ANY: 'ILIKE_ANY'>
IN = <TokenType.IN: 'IN'>
INDEX = <TokenType.INDEX: 'INDEX'>
INNER = <TokenType.INNER: 'INNER'>
INSERT = <TokenType.INSERT: 'INSERT'>
INTERSECT = <TokenType.INTERSECT: 'INTERSECT'>
INTERVAL = <TokenType.INTERVAL: 'INTERVAL'>
INTO = <TokenType.INTO: 'INTO'>
INTRODUCER = <TokenType.INTRODUCER: 'INTRODUCER'>
IRLIKE = <TokenType.IRLIKE: 'IRLIKE'>
IS = <TokenType.IS: 'IS'>
ISNULL = <TokenType.ISNULL: 'ISNULL'>
JOIN = <TokenType.JOIN: 'JOIN'>
JOIN_MARKER = <TokenType.JOIN_MARKER: 'JOIN_MARKER'>
LANGUAGE = <TokenType.LANGUAGE: 'LANGUAGE'>
LATERAL = <TokenType.LATERAL: 'LATERAL'>
LAZY = <TokenType.LAZY: 'LAZY'>
LEADING = <TokenType.LEADING: 'LEADING'>
LEFT = <TokenType.LEFT: 'LEFT'>
LIKE = <TokenType.LIKE: 'LIKE'>
LIKE_ANY = <TokenType.LIKE_ANY: 'LIKE_ANY'>
LIMIT = <TokenType.LIMIT: 'LIMIT'>
LOAD_DATA = <TokenType.LOAD_DATA: 'LOAD_DATA'>
LOCAL = <TokenType.LOCAL: 'LOCAL'>
MAP = <TokenType.MAP: 'MAP'>
MATCH_RECOGNIZE = <TokenType.MATCH_RECOGNIZE: 'MATCH_RECOGNIZE'>
MATERIALIZED = <TokenType.MATERIALIZED: 'MATERIALIZED'>
MERGE = <TokenType.MERGE: 'MERGE'>
MOD = <TokenType.MOD: 'MOD'>
NATURAL = <TokenType.NATURAL: 'NATURAL'>
NEXT = <TokenType.NEXT: 'NEXT'>
NO_ACTION = <TokenType.NO_ACTION: 'NO_ACTION'>
NOTNULL = <TokenType.NOTNULL: 'NOTNULL'>
NULL = <TokenType.NULL: 'NULL'>
NULLS_FIRST = <TokenType.NULLS_FIRST: 'NULLS_FIRST'>
NULLS_LAST = <TokenType.NULLS_LAST: 'NULLS_LAST'>
OFFSET = <TokenType.OFFSET: 'OFFSET'>
ON = <TokenType.ON: 'ON'>
ONLY = <TokenType.ONLY: 'ONLY'>
OPTIONS = <TokenType.OPTIONS: 'OPTIONS'>
ORDER_BY = <TokenType.ORDER_BY: 'ORDER_BY'>
ORDERED = <TokenType.ORDERED: 'ORDERED'>
ORDINALITY = <TokenType.ORDINALITY: 'ORDINALITY'>
OUTER = <TokenType.OUTER: 'OUTER'>
OUT_OF = <TokenType.OUT_OF: 'OUT_OF'>
OVER = <TokenType.OVER: 'OVER'>
OVERLAPS = <TokenType.OVERLAPS: 'OVERLAPS'>
OVERWRITE = <TokenType.OVERWRITE: 'OVERWRITE'>
PARTITION = <TokenType.PARTITION: 'PARTITION'>
PARTITION_BY = <TokenType.PARTITION_BY: 'PARTITION_BY'>
PERCENT = <TokenType.PERCENT: 'PERCENT'>
PIVOT = <TokenType.PIVOT: 'PIVOT'>
PLACEHOLDER = <TokenType.PLACEHOLDER: 'PLACEHOLDER'>
PRAGMA = <TokenType.PRAGMA: 'PRAGMA'>
PRECEDING = <TokenType.PRECEDING: 'PRECEDING'>
PRIMARY_KEY = <TokenType.PRIMARY_KEY: 'PRIMARY_KEY'>
PROCEDURE = <TokenType.PROCEDURE: 'PROCEDURE'>
PROPERTIES = <TokenType.PROPERTIES: 'PROPERTIES'>
PSEUDO_TYPE = <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>
QUALIFY = <TokenType.QUALIFY: 'QUALIFY'>
QUOTE = <TokenType.QUOTE: 'QUOTE'>
RANGE = <TokenType.RANGE: 'RANGE'>
RECURSIVE = <TokenType.RECURSIVE: 'RECURSIVE'>
REPLACE = <TokenType.REPLACE: 'REPLACE'>
RESPECT_NULLS = <TokenType.RESPECT_NULLS: 'RESPECT_NULLS'>
RETURNING = <TokenType.RETURNING: 'RETURNING'>
REFERENCES = <TokenType.REFERENCES: 'REFERENCES'>
RIGHT = <TokenType.RIGHT: 'RIGHT'>
RLIKE = <TokenType.RLIKE: 'RLIKE'>
ROLLBACK = <TokenType.ROLLBACK: 'ROLLBACK'>
ROLLUP = <TokenType.ROLLUP: 'ROLLUP'>
ROW = <TokenType.ROW: 'ROW'>
ROWS = <TokenType.ROWS: 'ROWS'>
SEED = <TokenType.SEED: 'SEED'>
SELECT = <TokenType.SELECT: 'SELECT'>
SEMI = <TokenType.SEMI: 'SEMI'>
SEPARATOR = <TokenType.SEPARATOR: 'SEPARATOR'>
SERDE_PROPERTIES = <TokenType.SERDE_PROPERTIES: 'SERDE_PROPERTIES'>
SET = <TokenType.SET: 'SET'>
SHOW = <TokenType.SHOW: 'SHOW'>
SIMILAR_TO = <TokenType.SIMILAR_TO: 'SIMILAR_TO'>
SOME = <TokenType.SOME: 'SOME'>
SORTKEY = <TokenType.SORTKEY: 'SORTKEY'>
SORT_BY = <TokenType.SORT_BY: 'SORT_BY'>
STRUCT = <TokenType.STRUCT: 'STRUCT'>
TABLE_SAMPLE = <TokenType.TABLE_SAMPLE: 'TABLE_SAMPLE'>
TEMPORARY = <TokenType.TEMPORARY: 'TEMPORARY'>
TOP = <TokenType.TOP: 'TOP'>
THEN = <TokenType.THEN: 'THEN'>
TRAILING = <TokenType.TRAILING: 'TRAILING'>
TRUE = <TokenType.TRUE: 'TRUE'>
UNBOUNDED = <TokenType.UNBOUNDED: 'UNBOUNDED'>
UNCACHE = <TokenType.UNCACHE: 'UNCACHE'>
UNION = <TokenType.UNION: 'UNION'>
UNLOGGED = <TokenType.UNLOGGED: 'UNLOGGED'>
UNNEST = <TokenType.UNNEST: 'UNNEST'>
UNPIVOT = <TokenType.UNPIVOT: 'UNPIVOT'>
UPDATE = <TokenType.UPDATE: 'UPDATE'>
USE = <TokenType.USE: 'USE'>
USING = <TokenType.USING: 'USING'>
VALUES = <TokenType.VALUES: 'VALUES'>
VIEW = <TokenType.VIEW: 'VIEW'>
VOLATILE = <TokenType.VOLATILE: 'VOLATILE'>
WHEN = <TokenType.WHEN: 'WHEN'>
WHERE = <TokenType.WHERE: 'WHERE'>
WINDOW = <TokenType.WINDOW: 'WINDOW'>
WITH = <TokenType.WITH: 'WITH'>
WITH_TIME_ZONE = <TokenType.WITH_TIME_ZONE: 'WITH_TIME_ZONE'>
WITH_LOCAL_TIME_ZONE = <TokenType.WITH_LOCAL_TIME_ZONE: 'WITH_LOCAL_TIME_ZONE'>
WITHIN_GROUP = <TokenType.WITHIN_GROUP: 'WITHIN_GROUP'>
WITHOUT_TIME_ZONE = <TokenType.WITHOUT_TIME_ZONE: 'WITHOUT_TIME_ZONE'>
UNIQUE = <TokenType.UNIQUE: 'UNIQUE'>
Inherited Members
enum.Enum
name
value
class Token:
317class Token:
318    __slots__ = ("token_type", "text", "line", "col", "comments")
319
320    @classmethod
321    def number(cls, number: int) -> Token:
322        """Returns a NUMBER token with `number` as its text."""
323        return cls(TokenType.NUMBER, str(number))
324
325    @classmethod
326    def string(cls, string: str) -> Token:
327        """Returns a STRING token with `string` as its text."""
328        return cls(TokenType.STRING, string)
329
330    @classmethod
331    def identifier(cls, identifier: str) -> Token:
332        """Returns an IDENTIFIER token with `identifier` as its text."""
333        return cls(TokenType.IDENTIFIER, identifier)
334
335    @classmethod
336    def var(cls, var: str) -> Token:
337        """Returns an VAR token with `var` as its text."""
338        return cls(TokenType.VAR, var)
339
340    def __init__(
341        self,
342        token_type: TokenType,
343        text: str,
344        line: int = 1,
345        col: int = 1,
346        comments: t.List[str] = [],
347    ) -> None:
348        self.token_type = token_type
349        self.text = text
350        self.line = line
351        self.col = col - len(text)
352        self.col = self.col if self.col > 1 else 1
353        self.comments = comments
354
355    def __repr__(self) -> str:
356        attributes = ", ".join(f"{k}: {getattr(self, k)}" for k in self.__slots__)
357        return f"<Token {attributes}>"
Token( token_type: sqlglot.tokens.TokenType, text: str, line: int = 1, col: int = 1, comments: List[str] = [])
340    def __init__(
341        self,
342        token_type: TokenType,
343        text: str,
344        line: int = 1,
345        col: int = 1,
346        comments: t.List[str] = [],
347    ) -> None:
348        self.token_type = token_type
349        self.text = text
350        self.line = line
351        self.col = col - len(text)
352        self.col = self.col if self.col > 1 else 1
353        self.comments = comments
@classmethod
def number(cls, number: int) -> sqlglot.tokens.Token:
320    @classmethod
321    def number(cls, number: int) -> Token:
322        """Returns a NUMBER token with `number` as its text."""
323        return cls(TokenType.NUMBER, str(number))

Returns a NUMBER token with number as its text.

@classmethod
def string(cls, string: str) -> sqlglot.tokens.Token:
325    @classmethod
326    def string(cls, string: str) -> Token:
327        """Returns a STRING token with `string` as its text."""
328        return cls(TokenType.STRING, string)

Returns a STRING token with string as its text.

@classmethod
def identifier(cls, identifier: str) -> sqlglot.tokens.Token:
330    @classmethod
331    def identifier(cls, identifier: str) -> Token:
332        """Returns an IDENTIFIER token with `identifier` as its text."""
333        return cls(TokenType.IDENTIFIER, identifier)

Returns an IDENTIFIER token with identifier as its text.

@classmethod
def var(cls, var: str) -> sqlglot.tokens.Token:
335    @classmethod
336    def var(cls, var: str) -> Token:
337        """Returns an VAR token with `var` as its text."""
338        return cls(TokenType.VAR, var)

Returns an VAR token with var as its text.

class Tokenizer:
 400class Tokenizer(metaclass=_Tokenizer):
 401    SINGLE_TOKENS = {
 402        "(": TokenType.L_PAREN,
 403        ")": TokenType.R_PAREN,
 404        "[": TokenType.L_BRACKET,
 405        "]": TokenType.R_BRACKET,
 406        "{": TokenType.L_BRACE,
 407        "}": TokenType.R_BRACE,
 408        "&": TokenType.AMP,
 409        "^": TokenType.CARET,
 410        ":": TokenType.COLON,
 411        ",": TokenType.COMMA,
 412        ".": TokenType.DOT,
 413        "-": TokenType.DASH,
 414        "=": TokenType.EQ,
 415        ">": TokenType.GT,
 416        "<": TokenType.LT,
 417        "%": TokenType.MOD,
 418        "!": TokenType.NOT,
 419        "|": TokenType.PIPE,
 420        "+": TokenType.PLUS,
 421        ";": TokenType.SEMICOLON,
 422        "/": TokenType.SLASH,
 423        "\\": TokenType.BACKSLASH,
 424        "*": TokenType.STAR,
 425        "~": TokenType.TILDA,
 426        "?": TokenType.PLACEHOLDER,
 427        "@": TokenType.PARAMETER,
 428        # used for breaking a var like x'y' but nothing else
 429        # the token type doesn't matter
 430        "'": TokenType.QUOTE,
 431        "`": TokenType.IDENTIFIER,
 432        '"': TokenType.IDENTIFIER,
 433        "#": TokenType.HASH,
 434    }
 435
 436    QUOTES: t.List[t.Tuple[str, str] | str] = ["'"]
 437
 438    BIT_STRINGS: t.List[str | t.Tuple[str, str]] = []
 439
 440    HEX_STRINGS: t.List[str | t.Tuple[str, str]] = []
 441
 442    BYTE_STRINGS: t.List[str | t.Tuple[str, str]] = []
 443
 444    IDENTIFIERS: t.List[str | t.Tuple[str, str]] = ['"']
 445
 446    STRING_ESCAPES = ["'"]
 447
 448    _STRING_ESCAPES: t.Set[str] = set()
 449
 450    IDENTIFIER_ESCAPES = ['"']
 451
 452    _IDENTIFIER_ESCAPES: t.Set[str] = set()
 453
 454    KEYWORDS = {
 455        **{f"{{%{postfix}": TokenType.BLOCK_START for postfix in ("", "+", "-")},
 456        **{f"{prefix}%}}": TokenType.BLOCK_END for prefix in ("", "+", "-")},
 457        "{{+": TokenType.BLOCK_START,
 458        "{{-": TokenType.BLOCK_START,
 459        "+}}": TokenType.BLOCK_END,
 460        "-}}": TokenType.BLOCK_END,
 461        "/*+": TokenType.HINT,
 462        "==": TokenType.EQ,
 463        "::": TokenType.DCOLON,
 464        "||": TokenType.DPIPE,
 465        ">=": TokenType.GTE,
 466        "<=": TokenType.LTE,
 467        "<>": TokenType.NEQ,
 468        "!=": TokenType.NEQ,
 469        "<=>": TokenType.NULLSAFE_EQ,
 470        "->": TokenType.ARROW,
 471        "->>": TokenType.DARROW,
 472        "=>": TokenType.FARROW,
 473        "#>": TokenType.HASH_ARROW,
 474        "#>>": TokenType.DHASH_ARROW,
 475        "<->": TokenType.LR_ARROW,
 476        "&&": TokenType.DAMP,
 477        "ALL": TokenType.ALL,
 478        "ALWAYS": TokenType.ALWAYS,
 479        "AND": TokenType.AND,
 480        "ANTI": TokenType.ANTI,
 481        "ANY": TokenType.ANY,
 482        "ASC": TokenType.ASC,
 483        "AS": TokenType.ALIAS,
 484        "AT TIME ZONE": TokenType.AT_TIME_ZONE,
 485        "AUTOINCREMENT": TokenType.AUTO_INCREMENT,
 486        "AUTO_INCREMENT": TokenType.AUTO_INCREMENT,
 487        "BEGIN": TokenType.BEGIN,
 488        "BETWEEN": TokenType.BETWEEN,
 489        "BOTH": TokenType.BOTH,
 490        "BUCKET": TokenType.BUCKET,
 491        "BY DEFAULT": TokenType.BY_DEFAULT,
 492        "CACHE": TokenType.CACHE,
 493        "UNCACHE": TokenType.UNCACHE,
 494        "CASE": TokenType.CASE,
 495        "CASCADE": TokenType.CASCADE,
 496        "CHARACTER SET": TokenType.CHARACTER_SET,
 497        "CLUSTER BY": TokenType.CLUSTER_BY,
 498        "COLLATE": TokenType.COLLATE,
 499        "COLUMN": TokenType.COLUMN,
 500        "COMMIT": TokenType.COMMIT,
 501        "COMPOUND": TokenType.COMPOUND,
 502        "CONSTRAINT": TokenType.CONSTRAINT,
 503        "CREATE": TokenType.CREATE,
 504        "CROSS": TokenType.CROSS,
 505        "CUBE": TokenType.CUBE,
 506        "CURRENT_DATE": TokenType.CURRENT_DATE,
 507        "CURRENT ROW": TokenType.CURRENT_ROW,
 508        "CURRENT_TIME": TokenType.CURRENT_TIME,
 509        "CURRENT_TIMESTAMP": TokenType.CURRENT_TIMESTAMP,
 510        "DATABASE": TokenType.DATABASE,
 511        "DEFAULT": TokenType.DEFAULT,
 512        "DELETE": TokenType.DELETE,
 513        "DESC": TokenType.DESC,
 514        "DESCRIBE": TokenType.DESCRIBE,
 515        "DISTINCT": TokenType.DISTINCT,
 516        "DISTINCT FROM": TokenType.DISTINCT_FROM,
 517        "DISTRIBUTE BY": TokenType.DISTRIBUTE_BY,
 518        "DIV": TokenType.DIV,
 519        "DROP": TokenType.DROP,
 520        "ELSE": TokenType.ELSE,
 521        "END": TokenType.END,
 522        "ESCAPE": TokenType.ESCAPE,
 523        "EXCEPT": TokenType.EXCEPT,
 524        "EXECUTE": TokenType.EXECUTE,
 525        "EXISTS": TokenType.EXISTS,
 526        "FALSE": TokenType.FALSE,
 527        "FETCH": TokenType.FETCH,
 528        "FILTER": TokenType.FILTER,
 529        "FIRST": TokenType.FIRST,
 530        "FULL": TokenType.FULL,
 531        "FUNCTION": TokenType.FUNCTION,
 532        "FOLLOWING": TokenType.FOLLOWING,
 533        "FOR": TokenType.FOR,
 534        "FOREIGN KEY": TokenType.FOREIGN_KEY,
 535        "FORMAT": TokenType.FORMAT,
 536        "FROM": TokenType.FROM,
 537        "GLOB": TokenType.GLOB,
 538        "GROUP BY": TokenType.GROUP_BY,
 539        "GROUPING SETS": TokenType.GROUPING_SETS,
 540        "HAVING": TokenType.HAVING,
 541        "IF": TokenType.IF,
 542        "ILIKE": TokenType.ILIKE,
 543        "IGNORE NULLS": TokenType.IGNORE_NULLS,
 544        "IN": TokenType.IN,
 545        "INDEX": TokenType.INDEX,
 546        "INET": TokenType.INET,
 547        "INNER": TokenType.INNER,
 548        "INSERT": TokenType.INSERT,
 549        "INTERVAL": TokenType.INTERVAL,
 550        "INTERSECT": TokenType.INTERSECT,
 551        "INTO": TokenType.INTO,
 552        "IS": TokenType.IS,
 553        "ISNULL": TokenType.ISNULL,
 554        "JOIN": TokenType.JOIN,
 555        "LATERAL": TokenType.LATERAL,
 556        "LAZY": TokenType.LAZY,
 557        "LEADING": TokenType.LEADING,
 558        "LEFT": TokenType.LEFT,
 559        "LIKE": TokenType.LIKE,
 560        "LIMIT": TokenType.LIMIT,
 561        "LOAD DATA": TokenType.LOAD_DATA,
 562        "LOCAL": TokenType.LOCAL,
 563        "MATERIALIZED": TokenType.MATERIALIZED,
 564        "MERGE": TokenType.MERGE,
 565        "NATURAL": TokenType.NATURAL,
 566        "NEXT": TokenType.NEXT,
 567        "NO ACTION": TokenType.NO_ACTION,
 568        "NOT": TokenType.NOT,
 569        "NOTNULL": TokenType.NOTNULL,
 570        "NULL": TokenType.NULL,
 571        "NULLS FIRST": TokenType.NULLS_FIRST,
 572        "NULLS LAST": TokenType.NULLS_LAST,
 573        "OBJECT": TokenType.OBJECT,
 574        "OFFSET": TokenType.OFFSET,
 575        "ON": TokenType.ON,
 576        "ONLY": TokenType.ONLY,
 577        "OPTIONS": TokenType.OPTIONS,
 578        "OR": TokenType.OR,
 579        "ORDER BY": TokenType.ORDER_BY,
 580        "ORDINALITY": TokenType.ORDINALITY,
 581        "OUTER": TokenType.OUTER,
 582        "OUT OF": TokenType.OUT_OF,
 583        "OVER": TokenType.OVER,
 584        "OVERLAPS": TokenType.OVERLAPS,
 585        "OVERWRITE": TokenType.OVERWRITE,
 586        "PARTITION": TokenType.PARTITION,
 587        "PARTITION BY": TokenType.PARTITION_BY,
 588        "PARTITIONED BY": TokenType.PARTITION_BY,
 589        "PARTITIONED_BY": TokenType.PARTITION_BY,
 590        "PERCENT": TokenType.PERCENT,
 591        "PIVOT": TokenType.PIVOT,
 592        "PRAGMA": TokenType.PRAGMA,
 593        "PRECEDING": TokenType.PRECEDING,
 594        "PRIMARY KEY": TokenType.PRIMARY_KEY,
 595        "PROCEDURE": TokenType.PROCEDURE,
 596        "QUALIFY": TokenType.QUALIFY,
 597        "RANGE": TokenType.RANGE,
 598        "RECURSIVE": TokenType.RECURSIVE,
 599        "REGEXP": TokenType.RLIKE,
 600        "REPLACE": TokenType.REPLACE,
 601        "RESPECT NULLS": TokenType.RESPECT_NULLS,
 602        "REFERENCES": TokenType.REFERENCES,
 603        "RIGHT": TokenType.RIGHT,
 604        "RLIKE": TokenType.RLIKE,
 605        "ROLLBACK": TokenType.ROLLBACK,
 606        "ROLLUP": TokenType.ROLLUP,
 607        "ROW": TokenType.ROW,
 608        "ROWS": TokenType.ROWS,
 609        "SCHEMA": TokenType.SCHEMA,
 610        "SEED": TokenType.SEED,
 611        "SELECT": TokenType.SELECT,
 612        "SEMI": TokenType.SEMI,
 613        "SET": TokenType.SET,
 614        "SHOW": TokenType.SHOW,
 615        "SIMILAR TO": TokenType.SIMILAR_TO,
 616        "SOME": TokenType.SOME,
 617        "SORTKEY": TokenType.SORTKEY,
 618        "SORT BY": TokenType.SORT_BY,
 619        "TABLE": TokenType.TABLE,
 620        "TABLESAMPLE": TokenType.TABLE_SAMPLE,
 621        "TEMP": TokenType.TEMPORARY,
 622        "TEMPORARY": TokenType.TEMPORARY,
 623        "THEN": TokenType.THEN,
 624        "TRUE": TokenType.TRUE,
 625        "TRAILING": TokenType.TRAILING,
 626        "UNBOUNDED": TokenType.UNBOUNDED,
 627        "UNION": TokenType.UNION,
 628        "UNLOGGED": TokenType.UNLOGGED,
 629        "UNNEST": TokenType.UNNEST,
 630        "UNPIVOT": TokenType.UNPIVOT,
 631        "UPDATE": TokenType.UPDATE,
 632        "USE": TokenType.USE,
 633        "USING": TokenType.USING,
 634        "VALUES": TokenType.VALUES,
 635        "VIEW": TokenType.VIEW,
 636        "VOLATILE": TokenType.VOLATILE,
 637        "WHEN": TokenType.WHEN,
 638        "WHERE": TokenType.WHERE,
 639        "WINDOW": TokenType.WINDOW,
 640        "WITH": TokenType.WITH,
 641        "WITH TIME ZONE": TokenType.WITH_TIME_ZONE,
 642        "WITH LOCAL TIME ZONE": TokenType.WITH_LOCAL_TIME_ZONE,
 643        "WITHIN GROUP": TokenType.WITHIN_GROUP,
 644        "WITHOUT TIME ZONE": TokenType.WITHOUT_TIME_ZONE,
 645        "APPLY": TokenType.APPLY,
 646        "ARRAY": TokenType.ARRAY,
 647        "BIT": TokenType.BIT,
 648        "BOOL": TokenType.BOOLEAN,
 649        "BOOLEAN": TokenType.BOOLEAN,
 650        "BYTE": TokenType.TINYINT,
 651        "TINYINT": TokenType.TINYINT,
 652        "SHORT": TokenType.SMALLINT,
 653        "SMALLINT": TokenType.SMALLINT,
 654        "INT2": TokenType.SMALLINT,
 655        "INTEGER": TokenType.INT,
 656        "INT": TokenType.INT,
 657        "INT4": TokenType.INT,
 658        "LONG": TokenType.BIGINT,
 659        "BIGINT": TokenType.BIGINT,
 660        "INT8": TokenType.BIGINT,
 661        "DEC": TokenType.DECIMAL,
 662        "DECIMAL": TokenType.DECIMAL,
 663        "MAP": TokenType.MAP,
 664        "NULLABLE": TokenType.NULLABLE,
 665        "NUMBER": TokenType.DECIMAL,
 666        "NUMERIC": TokenType.DECIMAL,
 667        "FIXED": TokenType.DECIMAL,
 668        "REAL": TokenType.FLOAT,
 669        "FLOAT": TokenType.FLOAT,
 670        "FLOAT4": TokenType.FLOAT,
 671        "FLOAT8": TokenType.DOUBLE,
 672        "DOUBLE": TokenType.DOUBLE,
 673        "DOUBLE PRECISION": TokenType.DOUBLE,
 674        "JSON": TokenType.JSON,
 675        "CHAR": TokenType.CHAR,
 676        "CHARACTER": TokenType.CHAR,
 677        "NCHAR": TokenType.NCHAR,
 678        "VARCHAR": TokenType.VARCHAR,
 679        "VARCHAR2": TokenType.VARCHAR,
 680        "NVARCHAR": TokenType.NVARCHAR,
 681        "NVARCHAR2": TokenType.NVARCHAR,
 682        "STR": TokenType.TEXT,
 683        "STRING": TokenType.TEXT,
 684        "TEXT": TokenType.TEXT,
 685        "CLOB": TokenType.TEXT,
 686        "LONGVARCHAR": TokenType.TEXT,
 687        "BINARY": TokenType.BINARY,
 688        "BLOB": TokenType.VARBINARY,
 689        "BYTEA": TokenType.VARBINARY,
 690        "VARBINARY": TokenType.VARBINARY,
 691        "TIME": TokenType.TIME,
 692        "TIMESTAMP": TokenType.TIMESTAMP,
 693        "TIMESTAMPTZ": TokenType.TIMESTAMPTZ,
 694        "TIMESTAMPLTZ": TokenType.TIMESTAMPLTZ,
 695        "DATE": TokenType.DATE,
 696        "DATETIME": TokenType.DATETIME,
 697        "UNIQUE": TokenType.UNIQUE,
 698        "STRUCT": TokenType.STRUCT,
 699        "VARIANT": TokenType.VARIANT,
 700        "ALTER": TokenType.ALTER,
 701        "ALTER AGGREGATE": TokenType.COMMAND,
 702        "ALTER DEFAULT": TokenType.COMMAND,
 703        "ALTER DOMAIN": TokenType.COMMAND,
 704        "ALTER ROLE": TokenType.COMMAND,
 705        "ALTER RULE": TokenType.COMMAND,
 706        "ALTER SEQUENCE": TokenType.COMMAND,
 707        "ALTER TYPE": TokenType.COMMAND,
 708        "ALTER USER": TokenType.COMMAND,
 709        "ALTER VIEW": TokenType.COMMAND,
 710        "ANALYZE": TokenType.COMMAND,
 711        "CALL": TokenType.COMMAND,
 712        "COMMENT": TokenType.COMMENT,
 713        "COPY": TokenType.COMMAND,
 714        "EXPLAIN": TokenType.COMMAND,
 715        "GRANT": TokenType.COMMAND,
 716        "OPTIMIZE": TokenType.COMMAND,
 717        "PREPARE": TokenType.COMMAND,
 718        "TRUNCATE": TokenType.COMMAND,
 719        "VACUUM": TokenType.COMMAND,
 720    }
 721
 722    WHITE_SPACE: t.Dict[t.Optional[str], TokenType] = {
 723        " ": TokenType.SPACE,
 724        "\t": TokenType.SPACE,
 725        "\n": TokenType.BREAK,
 726        "\r": TokenType.BREAK,
 727        "\r\n": TokenType.BREAK,
 728    }
 729
 730    COMMANDS = {
 731        TokenType.COMMAND,
 732        TokenType.EXECUTE,
 733        TokenType.FETCH,
 734        TokenType.SHOW,
 735    }
 736
 737    COMMAND_PREFIX_TOKENS = {TokenType.SEMICOLON, TokenType.BEGIN}
 738
 739    # handle numeric literals like in hive (3L = BIGINT)
 740    NUMERIC_LITERALS: t.Dict[str, str] = {}
 741    ENCODE: t.Optional[str] = None
 742
 743    COMMENTS = ["--", ("/*", "*/"), ("{#", "#}")]
 744    KEYWORD_TRIE = None  # autofilled
 745
 746    IDENTIFIER_CAN_START_WITH_DIGIT = False
 747
 748    __slots__ = (
 749        "sql",
 750        "size",
 751        "tokens",
 752        "_start",
 753        "_current",
 754        "_line",
 755        "_col",
 756        "_comments",
 757        "_char",
 758        "_end",
 759        "_peek",
 760        "_prev_token_line",
 761        "_prev_token_comments",
 762        "_prev_token_type",
 763    )
 764
 765    def __init__(self) -> None:
 766        self.reset()
 767
 768    def reset(self) -> None:
 769        self.sql = ""
 770        self.size = 0
 771        self.tokens: t.List[Token] = []
 772        self._start = 0
 773        self._current = 0
 774        self._line = 1
 775        self._col = 1
 776        self._comments: t.List[str] = []
 777
 778        self._char = None
 779        self._end = None
 780        self._peek = None
 781        self._prev_token_line = -1
 782        self._prev_token_comments: t.List[str] = []
 783        self._prev_token_type = None
 784
 785    def tokenize(self, sql: str) -> t.List[Token]:
 786        """Returns a list of tokens corresponding to the SQL string `sql`."""
 787        self.reset()
 788        self.sql = sql
 789        self.size = len(sql)
 790        self._scan()
 791        return self.tokens
 792
 793    def _scan(self, until: t.Optional[t.Callable] = None) -> None:
 794        while self.size and not self._end:
 795            self._start = self._current
 796            self._advance()
 797
 798            if self._char is None:
 799                break
 800
 801            if self._char not in self.WHITE_SPACE:
 802                if self._char.isdigit():
 803                    self._scan_number()
 804                elif self._char in self._IDENTIFIERS:
 805                    self._scan_identifier(self._IDENTIFIERS[self._char])
 806                else:
 807                    self._scan_keywords()
 808
 809            if until and until():
 810                break
 811
 812    def _chars(self, size: int) -> str:
 813        if size == 1:
 814            return self._char  # type: ignore
 815        start = self._current - 1
 816        end = start + size
 817        if end <= self.size:
 818            return self.sql[start:end]
 819        return ""
 820
 821    def _advance(self, i: int = 1) -> None:
 822        if self.WHITE_SPACE.get(self._char) is TokenType.BREAK:
 823            self._set_new_line()
 824
 825        self._col += i
 826        self._current += i
 827        self._end = self._current >= self.size  # type: ignore
 828        self._char = self.sql[self._current - 1]  # type: ignore
 829        self._peek = self.sql[self._current] if self._current < self.size else ""  # type: ignore
 830
 831    def _set_new_line(self) -> None:
 832        self._col = 1
 833        self._line += 1
 834
 835    @property
 836    def _text(self) -> str:
 837        return self.sql[self._start : self._current]
 838
 839    def _add(self, token_type: TokenType, text: t.Optional[str] = None) -> None:
 840        self._prev_token_line = self._line
 841        self._prev_token_comments = self._comments
 842        self._prev_token_type = token_type  # type: ignore
 843        self.tokens.append(
 844            Token(
 845                token_type,
 846                self._text if text is None else text,
 847                self._line,
 848                self._col,
 849                self._comments,
 850            )
 851        )
 852        self._comments = []
 853
 854        # If we have either a semicolon or a begin token before the command's token, we'll parse
 855        # whatever follows the command's token as a string
 856        if (
 857            token_type in self.COMMANDS
 858            and self._peek != ";"
 859            and (len(self.tokens) == 1 or self.tokens[-2].token_type in self.COMMAND_PREFIX_TOKENS)
 860        ):
 861            start = self._current
 862            tokens = len(self.tokens)
 863            self._scan(lambda: self._peek == ";")
 864            self.tokens = self.tokens[:tokens]
 865            text = self.sql[start : self._current].strip()
 866            if text:
 867                self._add(TokenType.STRING, text)
 868
 869    def _scan_keywords(self) -> None:
 870        size = 0
 871        word = None
 872        chars = self._text
 873        char = chars
 874        prev_space = False
 875        skip = False
 876        trie = self.KEYWORD_TRIE
 877        single_token = char in self.SINGLE_TOKENS
 878
 879        while chars:
 880            if skip:
 881                result = 1
 882            else:
 883                result, trie = in_trie(trie, char.upper())  # type: ignore
 884
 885            if result == 0:
 886                break
 887            if result == 2:
 888                word = chars
 889            size += 1
 890            end = self._current - 1 + size
 891
 892            if end < self.size:
 893                char = self.sql[end]
 894                single_token = single_token or char in self.SINGLE_TOKENS
 895                is_space = char in self.WHITE_SPACE
 896
 897                if not is_space or not prev_space:
 898                    if is_space:
 899                        char = " "
 900                    chars += char
 901                    prev_space = is_space
 902                    skip = False
 903                else:
 904                    skip = True
 905            else:
 906                chars = " "
 907
 908        word = None if not single_token and chars[-1] not in self.WHITE_SPACE else word
 909
 910        if not word:
 911            if self._char in self.SINGLE_TOKENS:
 912                self._add(self.SINGLE_TOKENS[self._char])  # type: ignore
 913                return
 914            self._scan_var()
 915            return
 916
 917        if self._scan_string(word):
 918            return
 919        if self._scan_formatted_string(word):
 920            return
 921        if self._scan_comment(word):
 922            return
 923
 924        self._advance(size - 1)
 925        self._add(self.KEYWORDS[word.upper()])
 926
 927    def _scan_comment(self, comment_start: str) -> bool:
 928        if comment_start not in self._COMMENTS:  # type: ignore
 929            return False
 930
 931        comment_start_line = self._line
 932        comment_start_size = len(comment_start)
 933        comment_end = self._COMMENTS[comment_start]  # type: ignore
 934
 935        if comment_end:
 936            comment_end_size = len(comment_end)
 937
 938            while not self._end and self._chars(comment_end_size) != comment_end:
 939                self._advance()
 940
 941            self._comments.append(self._text[comment_start_size : -comment_end_size + 1])  # type: ignore
 942            self._advance(comment_end_size - 1)
 943        else:
 944            while not self._end and not self.WHITE_SPACE.get(self._peek) is TokenType.BREAK:
 945                self._advance()
 946            self._comments.append(self._text[comment_start_size:])  # type: ignore
 947
 948        # Leading comment is attached to the succeeding token, whilst trailing comment to the preceding.
 949        # Multiple consecutive comments are preserved by appending them to the current comments list.
 950        if comment_start_line == self._prev_token_line:
 951            self.tokens[-1].comments.extend(self._comments)
 952            self._comments = []
 953            self._prev_token_line = self._line
 954
 955        return True
 956
 957    def _scan_number(self) -> None:
 958        if self._char == "0":
 959            peek = self._peek.upper()  # type: ignore
 960            if peek == "B":
 961                return self._scan_bits()
 962            elif peek == "X":
 963                return self._scan_hex()
 964
 965        decimal = False
 966        scientific = 0
 967
 968        while True:
 969            if self._peek.isdigit():  # type: ignore
 970                self._advance()
 971            elif self._peek == "." and not decimal:
 972                decimal = True
 973                self._advance()
 974            elif self._peek in ("-", "+") and scientific == 1:
 975                scientific += 1
 976                self._advance()
 977            elif self._peek.upper() == "E" and not scientific:  # type: ignore
 978                scientific += 1
 979                self._advance()
 980            elif self._peek.isidentifier():  # type: ignore
 981                number_text = self._text
 982                literal = []
 983
 984                while self._peek.strip() and self._peek not in self.SINGLE_TOKENS:  # type: ignore
 985                    literal.append(self._peek.upper())  # type: ignore
 986                    self._advance()
 987
 988                literal = "".join(literal)  # type: ignore
 989                token_type = self.KEYWORDS.get(self.NUMERIC_LITERALS.get(literal))  # type: ignore
 990
 991                if token_type:
 992                    self._add(TokenType.NUMBER, number_text)
 993                    self._add(TokenType.DCOLON, "::")
 994                    return self._add(token_type, literal)  # type: ignore
 995                elif self.IDENTIFIER_CAN_START_WITH_DIGIT:
 996                    return self._add(TokenType.VAR)
 997
 998                self._add(TokenType.NUMBER, number_text)
 999                return self._advance(-len(literal))
1000            else:
1001                return self._add(TokenType.NUMBER)
1002
1003    def _scan_bits(self) -> None:
1004        self._advance()
1005        value = self._extract_value()
1006        try:
1007            self._add(TokenType.BIT_STRING, f"{int(value, 2)}")
1008        except ValueError:
1009            self._add(TokenType.IDENTIFIER)
1010
1011    def _scan_hex(self) -> None:
1012        self._advance()
1013        value = self._extract_value()
1014        try:
1015            self._add(TokenType.HEX_STRING, f"{int(value, 16)}")
1016        except ValueError:
1017            self._add(TokenType.IDENTIFIER)
1018
1019    def _extract_value(self) -> str:
1020        while True:
1021            char = self._peek.strip()  # type: ignore
1022            if char and char not in self.SINGLE_TOKENS:
1023                self._advance()
1024            else:
1025                break
1026
1027        return self._text
1028
1029    def _scan_string(self, quote: str) -> bool:
1030        quote_end = self._QUOTES.get(quote)  # type: ignore
1031        if quote_end is None:
1032            return False
1033
1034        self._advance(len(quote))
1035        text = self._extract_string(quote_end)
1036        text = text.encode(self.ENCODE).decode(self.ENCODE) if self.ENCODE else text  # type: ignore
1037        self._add(TokenType.NATIONAL if quote[0].upper() == "N" else TokenType.STRING, text)
1038        return True
1039
1040    # X'1234, b'0110', E'\\\\\' etc.
1041    def _scan_formatted_string(self, string_start: str) -> bool:
1042        if string_start in self._HEX_STRINGS:  # type: ignore
1043            delimiters = self._HEX_STRINGS  # type: ignore
1044            token_type = TokenType.HEX_STRING
1045            base = 16
1046        elif string_start in self._BIT_STRINGS:  # type: ignore
1047            delimiters = self._BIT_STRINGS  # type: ignore
1048            token_type = TokenType.BIT_STRING
1049            base = 2
1050        elif string_start in self._BYTE_STRINGS:  # type: ignore
1051            delimiters = self._BYTE_STRINGS  # type: ignore
1052            token_type = TokenType.BYTE_STRING
1053            base = None
1054        else:
1055            return False
1056
1057        self._advance(len(string_start))
1058        string_end = delimiters.get(string_start)
1059        text = self._extract_string(string_end)
1060
1061        if base is None:
1062            self._add(token_type, text)
1063        else:
1064            try:
1065                self._add(token_type, f"{int(text, base)}")
1066            except:
1067                raise RuntimeError(
1068                    f"Numeric string contains invalid characters from {self._line}:{self._start}"
1069                )
1070
1071        return True
1072
1073    def _scan_identifier(self, identifier_end: str) -> None:
1074        text = ""
1075        identifier_end_is_escape = identifier_end in self._IDENTIFIER_ESCAPES
1076
1077        while True:
1078            if self._end:
1079                raise RuntimeError(f"Missing {identifier_end} from {self._line}:{self._start}")
1080
1081            self._advance()
1082            if self._char == identifier_end:
1083                if identifier_end_is_escape and self._peek == identifier_end:
1084                    text += identifier_end  # type: ignore
1085                    self._advance()
1086                    continue
1087
1088                break
1089
1090            text += self._char  # type: ignore
1091
1092        self._add(TokenType.IDENTIFIER, text)
1093
1094    def _scan_var(self) -> None:
1095        while True:
1096            char = self._peek.strip()  # type: ignore
1097            if char and char not in self.SINGLE_TOKENS:
1098                self._advance()
1099            else:
1100                break
1101        self._add(
1102            TokenType.VAR
1103            if self._prev_token_type == TokenType.PARAMETER
1104            else self.KEYWORDS.get(self._text.upper(), TokenType.VAR)
1105        )
1106
1107    def _extract_string(self, delimiter: str) -> str:
1108        text = ""
1109        delim_size = len(delimiter)
1110
1111        while True:
1112            if self._char in self._STRING_ESCAPES and (
1113                self._peek == delimiter or self._peek in self._STRING_ESCAPES
1114            ):
1115                if self._peek == delimiter:
1116                    text += self._peek  # type: ignore
1117                else:
1118                    text += self._char + self._peek  # type: ignore
1119
1120                if self._current + 1 < self.size:
1121                    self._advance(2)
1122                else:
1123                    raise RuntimeError(f"Missing {delimiter} from {self._line}:{self._current}")
1124            else:
1125                if self._chars(delim_size) == delimiter:
1126                    if delim_size > 1:
1127                        self._advance(delim_size - 1)
1128                    break
1129
1130                if self._end:
1131                    raise RuntimeError(f"Missing {delimiter} from {self._line}:{self._start}")
1132                text += self._char  # type: ignore
1133                self._advance()
1134
1135        return text
def reset(self) -> None:
768    def reset(self) -> None:
769        self.sql = ""
770        self.size = 0
771        self.tokens: t.List[Token] = []
772        self._start = 0
773        self._current = 0
774        self._line = 1
775        self._col = 1
776        self._comments: t.List[str] = []
777
778        self._char = None
779        self._end = None
780        self._peek = None
781        self._prev_token_line = -1
782        self._prev_token_comments: t.List[str] = []
783        self._prev_token_type = None
def tokenize(self, sql: str) -> List[sqlglot.tokens.Token]:
785    def tokenize(self, sql: str) -> t.List[Token]:
786        """Returns a list of tokens corresponding to the SQL string `sql`."""
787        self.reset()
788        self.sql = sql
789        self.size = len(sql)
790        self._scan()
791        return self.tokens

Returns a list of tokens corresponding to the SQL string sql.