Edit on GitHub

sqlglot.tokens

   1from __future__ import annotations
   2
   3import typing as t
   4from enum import auto
   5
   6from sqlglot.helper import AutoName
   7from sqlglot.trie import in_trie, new_trie
   8
   9
  10class TokenType(AutoName):
  11    L_PAREN = auto()
  12    R_PAREN = auto()
  13    L_BRACKET = auto()
  14    R_BRACKET = auto()
  15    L_BRACE = auto()
  16    R_BRACE = auto()
  17    COMMA = auto()
  18    DOT = auto()
  19    DASH = auto()
  20    PLUS = auto()
  21    COLON = auto()
  22    DCOLON = auto()
  23    SEMICOLON = auto()
  24    STAR = auto()
  25    BACKSLASH = auto()
  26    SLASH = auto()
  27    LT = auto()
  28    LTE = auto()
  29    GT = auto()
  30    GTE = auto()
  31    NOT = auto()
  32    EQ = auto()
  33    NEQ = auto()
  34    NULLSAFE_EQ = auto()
  35    AND = auto()
  36    OR = auto()
  37    AMP = auto()
  38    DPIPE = auto()
  39    PIPE = auto()
  40    CARET = auto()
  41    TILDA = auto()
  42    ARROW = auto()
  43    DARROW = auto()
  44    FARROW = auto()
  45    HASH = auto()
  46    HASH_ARROW = auto()
  47    DHASH_ARROW = auto()
  48    LR_ARROW = auto()
  49    LT_AT = auto()
  50    AT_GT = auto()
  51    DOLLAR = auto()
  52    PARAMETER = auto()
  53    SESSION_PARAMETER = auto()
  54    DAMP = auto()
  55
  56    BLOCK_START = auto()
  57    BLOCK_END = auto()
  58
  59    SPACE = auto()
  60    BREAK = auto()
  61
  62    STRING = auto()
  63    NUMBER = auto()
  64    IDENTIFIER = auto()
  65    DATABASE = auto()
  66    COLUMN = auto()
  67    COLUMN_DEF = auto()
  68    SCHEMA = auto()
  69    TABLE = auto()
  70    VAR = auto()
  71    BIT_STRING = auto()
  72    HEX_STRING = auto()
  73    BYTE_STRING = auto()
  74    NATIONAL_STRING = auto()
  75    RAW_STRING = auto()
  76
  77    # types
  78    BIT = auto()
  79    BOOLEAN = auto()
  80    TINYINT = auto()
  81    UTINYINT = auto()
  82    SMALLINT = auto()
  83    USMALLINT = auto()
  84    INT = auto()
  85    UINT = auto()
  86    BIGINT = auto()
  87    UBIGINT = auto()
  88    INT128 = auto()
  89    UINT128 = auto()
  90    INT256 = auto()
  91    UINT256 = auto()
  92    FLOAT = auto()
  93    DOUBLE = auto()
  94    DECIMAL = auto()
  95    BIGDECIMAL = auto()
  96    CHAR = auto()
  97    NCHAR = auto()
  98    VARCHAR = auto()
  99    NVARCHAR = auto()
 100    TEXT = auto()
 101    MEDIUMTEXT = auto()
 102    LONGTEXT = auto()
 103    MEDIUMBLOB = auto()
 104    LONGBLOB = auto()
 105    BINARY = auto()
 106    VARBINARY = auto()
 107    JSON = auto()
 108    JSONB = auto()
 109    TIME = auto()
 110    TIMESTAMP = auto()
 111    TIMESTAMPTZ = auto()
 112    TIMESTAMPLTZ = auto()
 113    DATETIME = auto()
 114    DATETIME64 = auto()
 115    DATE = auto()
 116    INT4RANGE = auto()
 117    INT4MULTIRANGE = auto()
 118    INT8RANGE = auto()
 119    INT8MULTIRANGE = auto()
 120    NUMRANGE = auto()
 121    NUMMULTIRANGE = auto()
 122    TSRANGE = auto()
 123    TSMULTIRANGE = auto()
 124    TSTZRANGE = auto()
 125    TSTZMULTIRANGE = auto()
 126    DATERANGE = auto()
 127    DATEMULTIRANGE = auto()
 128    UUID = auto()
 129    GEOGRAPHY = auto()
 130    NULLABLE = auto()
 131    GEOMETRY = auto()
 132    HLLSKETCH = auto()
 133    HSTORE = auto()
 134    SUPER = auto()
 135    SERIAL = auto()
 136    SMALLSERIAL = auto()
 137    BIGSERIAL = auto()
 138    XML = auto()
 139    UNIQUEIDENTIFIER = auto()
 140    MONEY = auto()
 141    SMALLMONEY = auto()
 142    ROWVERSION = auto()
 143    IMAGE = auto()
 144    VARIANT = auto()
 145    OBJECT = auto()
 146    INET = auto()
 147    ENUM = auto()
 148
 149    # keywords
 150    ALIAS = auto()
 151    ALTER = auto()
 152    ALWAYS = auto()
 153    ALL = auto()
 154    ANTI = auto()
 155    ANY = auto()
 156    APPLY = auto()
 157    ARRAY = auto()
 158    ASC = auto()
 159    ASOF = auto()
 160    AUTO_INCREMENT = auto()
 161    BEGIN = auto()
 162    BETWEEN = auto()
 163    CACHE = auto()
 164    CASE = auto()
 165    CHARACTER_SET = auto()
 166    COLLATE = auto()
 167    COMMAND = auto()
 168    COMMENT = auto()
 169    COMMIT = auto()
 170    CONSTRAINT = auto()
 171    CREATE = auto()
 172    CROSS = auto()
 173    CUBE = auto()
 174    CURRENT_DATE = auto()
 175    CURRENT_DATETIME = auto()
 176    CURRENT_TIME = auto()
 177    CURRENT_TIMESTAMP = auto()
 178    CURRENT_USER = auto()
 179    DEFAULT = auto()
 180    DELETE = auto()
 181    DESC = auto()
 182    DESCRIBE = auto()
 183    DICTIONARY = auto()
 184    DISTINCT = auto()
 185    DIV = auto()
 186    DROP = auto()
 187    ELSE = auto()
 188    END = auto()
 189    ESCAPE = auto()
 190    EXCEPT = auto()
 191    EXECUTE = auto()
 192    EXISTS = auto()
 193    FALSE = auto()
 194    FETCH = auto()
 195    FILTER = auto()
 196    FINAL = auto()
 197    FIRST = auto()
 198    FOR = auto()
 199    FOREIGN_KEY = auto()
 200    FORMAT = auto()
 201    FROM = auto()
 202    FULL = auto()
 203    FUNCTION = auto()
 204    GLOB = auto()
 205    GLOBAL = auto()
 206    GROUP_BY = auto()
 207    GROUPING_SETS = auto()
 208    HAVING = auto()
 209    HINT = auto()
 210    IF = auto()
 211    ILIKE = auto()
 212    ILIKE_ANY = auto()
 213    IN = auto()
 214    INDEX = auto()
 215    INNER = auto()
 216    INSERT = auto()
 217    INTERSECT = auto()
 218    INTERVAL = auto()
 219    INTO = auto()
 220    INTRODUCER = auto()
 221    IRLIKE = auto()
 222    IS = auto()
 223    ISNULL = auto()
 224    JOIN = auto()
 225    JOIN_MARKER = auto()
 226    KEEP = auto()
 227    LANGUAGE = auto()
 228    LATERAL = auto()
 229    LEFT = auto()
 230    LIKE = auto()
 231    LIKE_ANY = auto()
 232    LIMIT = auto()
 233    LOAD = auto()
 234    LOCK = auto()
 235    MAP = auto()
 236    MATCH_RECOGNIZE = auto()
 237    MERGE = auto()
 238    MOD = auto()
 239    NATURAL = auto()
 240    NEXT = auto()
 241    NEXT_VALUE_FOR = auto()
 242    NOTNULL = auto()
 243    NULL = auto()
 244    OFFSET = auto()
 245    ON = auto()
 246    ORDER_BY = auto()
 247    ORDERED = auto()
 248    ORDINALITY = auto()
 249    OUTER = auto()
 250    OVER = auto()
 251    OVERLAPS = auto()
 252    OVERWRITE = auto()
 253    PARTITION = auto()
 254    PARTITION_BY = auto()
 255    PERCENT = auto()
 256    PIVOT = auto()
 257    PLACEHOLDER = auto()
 258    PRAGMA = auto()
 259    PRIMARY_KEY = auto()
 260    PROCEDURE = auto()
 261    PROPERTIES = auto()
 262    PSEUDO_TYPE = auto()
 263    QUALIFY = auto()
 264    QUOTE = auto()
 265    RANGE = auto()
 266    RECURSIVE = auto()
 267    REPLACE = auto()
 268    RETURNING = auto()
 269    REFERENCES = auto()
 270    RIGHT = auto()
 271    RLIKE = auto()
 272    ROLLBACK = auto()
 273    ROLLUP = auto()
 274    ROW = auto()
 275    ROWS = auto()
 276    SELECT = auto()
 277    SEMI = auto()
 278    SEPARATOR = auto()
 279    SERDE_PROPERTIES = auto()
 280    SET = auto()
 281    SETTINGS = auto()
 282    SHOW = auto()
 283    SIMILAR_TO = auto()
 284    SOME = auto()
 285    STRUCT = auto()
 286    TABLE_SAMPLE = auto()
 287    TEMPORARY = auto()
 288    TOP = auto()
 289    THEN = auto()
 290    TRUE = auto()
 291    UNCACHE = auto()
 292    UNION = auto()
 293    UNNEST = auto()
 294    UNPIVOT = auto()
 295    UPDATE = auto()
 296    USE = auto()
 297    USING = auto()
 298    VALUES = auto()
 299    VIEW = auto()
 300    VOLATILE = auto()
 301    WHEN = auto()
 302    WHERE = auto()
 303    WINDOW = auto()
 304    WITH = auto()
 305    UNIQUE = auto()
 306
 307
 308class Token:
 309    __slots__ = ("token_type", "text", "line", "col", "start", "end", "comments")
 310
 311    @classmethod
 312    def number(cls, number: int) -> Token:
 313        """Returns a NUMBER token with `number` as its text."""
 314        return cls(TokenType.NUMBER, str(number))
 315
 316    @classmethod
 317    def string(cls, string: str) -> Token:
 318        """Returns a STRING token with `string` as its text."""
 319        return cls(TokenType.STRING, string)
 320
 321    @classmethod
 322    def identifier(cls, identifier: str) -> Token:
 323        """Returns an IDENTIFIER token with `identifier` as its text."""
 324        return cls(TokenType.IDENTIFIER, identifier)
 325
 326    @classmethod
 327    def var(cls, var: str) -> Token:
 328        """Returns an VAR token with `var` as its text."""
 329        return cls(TokenType.VAR, var)
 330
 331    def __init__(
 332        self,
 333        token_type: TokenType,
 334        text: str,
 335        line: int = 1,
 336        col: int = 1,
 337        start: int = 0,
 338        end: int = 0,
 339        comments: t.List[str] = [],
 340    ) -> None:
 341        """Token initializer.
 342
 343        Args:
 344            token_type: The TokenType Enum.
 345            text: The text of the token.
 346            line: The line that the token ends on.
 347            col: The column that the token ends on.
 348            start: The start index of the token.
 349            end: The ending index of the token.
 350            comments: The comments to attach to the token.
 351        """
 352        self.token_type = token_type
 353        self.text = text
 354        self.line = line
 355        self.col = col
 356        self.start = start
 357        self.end = end
 358        self.comments = comments
 359
 360    def __repr__(self) -> str:
 361        attributes = ", ".join(f"{k}: {getattr(self, k)}" for k in self.__slots__)
 362        return f"<Token {attributes}>"
 363
 364
 365class _Tokenizer(type):
 366    def __new__(cls, clsname, bases, attrs):
 367        klass = super().__new__(cls, clsname, bases, attrs)
 368
 369        def _convert_quotes(arr: t.List[str | t.Tuple[str, str]]) -> t.Dict[str, str]:
 370            return dict(
 371                (item, item) if isinstance(item, str) else (item[0], item[1]) for item in arr
 372            )
 373
 374        def _quotes_to_format(
 375            token_type: TokenType, arr: t.List[str | t.Tuple[str, str]]
 376        ) -> t.Dict[str, t.Tuple[str, TokenType]]:
 377            return {k: (v, token_type) for k, v in _convert_quotes(arr).items()}
 378
 379        klass._QUOTES = _convert_quotes(klass.QUOTES)
 380        klass._IDENTIFIERS = _convert_quotes(klass.IDENTIFIERS)
 381
 382        klass._FORMAT_STRINGS = {
 383            **{
 384                p + s: (e, TokenType.NATIONAL_STRING)
 385                for s, e in klass._QUOTES.items()
 386                for p in ("n", "N")
 387            },
 388            **_quotes_to_format(TokenType.BIT_STRING, klass.BIT_STRINGS),
 389            **_quotes_to_format(TokenType.BYTE_STRING, klass.BYTE_STRINGS),
 390            **_quotes_to_format(TokenType.HEX_STRING, klass.HEX_STRINGS),
 391            **_quotes_to_format(TokenType.RAW_STRING, klass.RAW_STRINGS),
 392        }
 393
 394        klass._STRING_ESCAPES = set(klass.STRING_ESCAPES)
 395        klass._IDENTIFIER_ESCAPES = set(klass.IDENTIFIER_ESCAPES)
 396        klass._COMMENTS = {
 397            **dict(
 398                (comment, None) if isinstance(comment, str) else (comment[0], comment[1])
 399                for comment in klass.COMMENTS
 400            ),
 401            "{#": "#}",  # Ensure Jinja comments are tokenized correctly in all dialects
 402        }
 403
 404        klass._KEYWORD_TRIE = new_trie(
 405            key.upper()
 406            for key in (
 407                *klass.KEYWORDS,
 408                *klass._COMMENTS,
 409                *klass._QUOTES,
 410                *klass._FORMAT_STRINGS,
 411            )
 412            if " " in key or any(single in key for single in klass.SINGLE_TOKENS)
 413        )
 414
 415        return klass
 416
 417
 418class Tokenizer(metaclass=_Tokenizer):
 419    SINGLE_TOKENS = {
 420        "(": TokenType.L_PAREN,
 421        ")": TokenType.R_PAREN,
 422        "[": TokenType.L_BRACKET,
 423        "]": TokenType.R_BRACKET,
 424        "{": TokenType.L_BRACE,
 425        "}": TokenType.R_BRACE,
 426        "&": TokenType.AMP,
 427        "^": TokenType.CARET,
 428        ":": TokenType.COLON,
 429        ",": TokenType.COMMA,
 430        ".": TokenType.DOT,
 431        "-": TokenType.DASH,
 432        "=": TokenType.EQ,
 433        ">": TokenType.GT,
 434        "<": TokenType.LT,
 435        "%": TokenType.MOD,
 436        "!": TokenType.NOT,
 437        "|": TokenType.PIPE,
 438        "+": TokenType.PLUS,
 439        ";": TokenType.SEMICOLON,
 440        "/": TokenType.SLASH,
 441        "\\": TokenType.BACKSLASH,
 442        "*": TokenType.STAR,
 443        "~": TokenType.TILDA,
 444        "?": TokenType.PLACEHOLDER,
 445        "@": TokenType.PARAMETER,
 446        # used for breaking a var like x'y' but nothing else
 447        # the token type doesn't matter
 448        "'": TokenType.QUOTE,
 449        "`": TokenType.IDENTIFIER,
 450        '"': TokenType.IDENTIFIER,
 451        "#": TokenType.HASH,
 452    }
 453
 454    BIT_STRINGS: t.List[str | t.Tuple[str, str]] = []
 455    BYTE_STRINGS: t.List[str | t.Tuple[str, str]] = []
 456    HEX_STRINGS: t.List[str | t.Tuple[str, str]] = []
 457    RAW_STRINGS: t.List[str | t.Tuple[str, str]] = []
 458    IDENTIFIERS: t.List[str | t.Tuple[str, str]] = ['"']
 459    IDENTIFIER_ESCAPES = ['"']
 460    QUOTES: t.List[t.Tuple[str, str] | str] = ["'"]
 461    STRING_ESCAPES = ["'"]
 462    VAR_SINGLE_TOKENS: t.Set[str] = set()
 463
 464    # Autofilled
 465    IDENTIFIERS_CAN_START_WITH_DIGIT: bool = False
 466
 467    _COMMENTS: t.Dict[str, str] = {}
 468    _FORMAT_STRINGS: t.Dict[str, t.Tuple[str, TokenType]] = {}
 469    _IDENTIFIERS: t.Dict[str, str] = {}
 470    _IDENTIFIER_ESCAPES: t.Set[str] = set()
 471    _QUOTES: t.Dict[str, str] = {}
 472    _STRING_ESCAPES: t.Set[str] = set()
 473    _KEYWORD_TRIE: t.Dict = {}
 474
 475    KEYWORDS: t.Dict[str, TokenType] = {
 476        **{f"{{%{postfix}": TokenType.BLOCK_START for postfix in ("", "+", "-")},
 477        **{f"{prefix}%}}": TokenType.BLOCK_END for prefix in ("", "+", "-")},
 478        **{f"{{{{{postfix}": TokenType.BLOCK_START for postfix in ("+", "-")},
 479        **{f"{prefix}}}}}": TokenType.BLOCK_END for prefix in ("+", "-")},
 480        "/*+": TokenType.HINT,
 481        "==": TokenType.EQ,
 482        "::": TokenType.DCOLON,
 483        "||": TokenType.DPIPE,
 484        ">=": TokenType.GTE,
 485        "<=": TokenType.LTE,
 486        "<>": TokenType.NEQ,
 487        "!=": TokenType.NEQ,
 488        "<=>": TokenType.NULLSAFE_EQ,
 489        "->": TokenType.ARROW,
 490        "->>": TokenType.DARROW,
 491        "=>": TokenType.FARROW,
 492        "#>": TokenType.HASH_ARROW,
 493        "#>>": TokenType.DHASH_ARROW,
 494        "<->": TokenType.LR_ARROW,
 495        "&&": TokenType.DAMP,
 496        "ALL": TokenType.ALL,
 497        "ALWAYS": TokenType.ALWAYS,
 498        "AND": TokenType.AND,
 499        "ANTI": TokenType.ANTI,
 500        "ANY": TokenType.ANY,
 501        "ASC": TokenType.ASC,
 502        "AS": TokenType.ALIAS,
 503        "ASOF": TokenType.ASOF,
 504        "AUTOINCREMENT": TokenType.AUTO_INCREMENT,
 505        "AUTO_INCREMENT": TokenType.AUTO_INCREMENT,
 506        "BEGIN": TokenType.BEGIN,
 507        "BETWEEN": TokenType.BETWEEN,
 508        "CACHE": TokenType.CACHE,
 509        "UNCACHE": TokenType.UNCACHE,
 510        "CASE": TokenType.CASE,
 511        "CHARACTER SET": TokenType.CHARACTER_SET,
 512        "COLLATE": TokenType.COLLATE,
 513        "COLUMN": TokenType.COLUMN,
 514        "COMMIT": TokenType.COMMIT,
 515        "CONSTRAINT": TokenType.CONSTRAINT,
 516        "CREATE": TokenType.CREATE,
 517        "CROSS": TokenType.CROSS,
 518        "CUBE": TokenType.CUBE,
 519        "CURRENT_DATE": TokenType.CURRENT_DATE,
 520        "CURRENT_TIME": TokenType.CURRENT_TIME,
 521        "CURRENT_TIMESTAMP": TokenType.CURRENT_TIMESTAMP,
 522        "CURRENT_USER": TokenType.CURRENT_USER,
 523        "DATABASE": TokenType.DATABASE,
 524        "DEFAULT": TokenType.DEFAULT,
 525        "DELETE": TokenType.DELETE,
 526        "DESC": TokenType.DESC,
 527        "DESCRIBE": TokenType.DESCRIBE,
 528        "DISTINCT": TokenType.DISTINCT,
 529        "DIV": TokenType.DIV,
 530        "DROP": TokenType.DROP,
 531        "ELSE": TokenType.ELSE,
 532        "END": TokenType.END,
 533        "ESCAPE": TokenType.ESCAPE,
 534        "EXCEPT": TokenType.EXCEPT,
 535        "EXECUTE": TokenType.EXECUTE,
 536        "EXISTS": TokenType.EXISTS,
 537        "FALSE": TokenType.FALSE,
 538        "FETCH": TokenType.FETCH,
 539        "FILTER": TokenType.FILTER,
 540        "FIRST": TokenType.FIRST,
 541        "FULL": TokenType.FULL,
 542        "FUNCTION": TokenType.FUNCTION,
 543        "FOR": TokenType.FOR,
 544        "FOREIGN KEY": TokenType.FOREIGN_KEY,
 545        "FORMAT": TokenType.FORMAT,
 546        "FROM": TokenType.FROM,
 547        "GEOGRAPHY": TokenType.GEOGRAPHY,
 548        "GEOMETRY": TokenType.GEOMETRY,
 549        "GLOB": TokenType.GLOB,
 550        "GROUP BY": TokenType.GROUP_BY,
 551        "GROUPING SETS": TokenType.GROUPING_SETS,
 552        "HAVING": TokenType.HAVING,
 553        "IF": TokenType.IF,
 554        "ILIKE": TokenType.ILIKE,
 555        "IN": TokenType.IN,
 556        "INDEX": TokenType.INDEX,
 557        "INET": TokenType.INET,
 558        "INNER": TokenType.INNER,
 559        "INSERT": TokenType.INSERT,
 560        "INTERVAL": TokenType.INTERVAL,
 561        "INTERSECT": TokenType.INTERSECT,
 562        "INTO": TokenType.INTO,
 563        "IS": TokenType.IS,
 564        "ISNULL": TokenType.ISNULL,
 565        "JOIN": TokenType.JOIN,
 566        "KEEP": TokenType.KEEP,
 567        "LATERAL": TokenType.LATERAL,
 568        "LEFT": TokenType.LEFT,
 569        "LIKE": TokenType.LIKE,
 570        "LIMIT": TokenType.LIMIT,
 571        "LOAD": TokenType.LOAD,
 572        "LOCK": TokenType.LOCK,
 573        "MERGE": TokenType.MERGE,
 574        "NATURAL": TokenType.NATURAL,
 575        "NEXT": TokenType.NEXT,
 576        "NEXT VALUE FOR": TokenType.NEXT_VALUE_FOR,
 577        "NOT": TokenType.NOT,
 578        "NOTNULL": TokenType.NOTNULL,
 579        "NULL": TokenType.NULL,
 580        "OBJECT": TokenType.OBJECT,
 581        "OFFSET": TokenType.OFFSET,
 582        "ON": TokenType.ON,
 583        "OR": TokenType.OR,
 584        "ORDER BY": TokenType.ORDER_BY,
 585        "ORDINALITY": TokenType.ORDINALITY,
 586        "OUTER": TokenType.OUTER,
 587        "OVER": TokenType.OVER,
 588        "OVERLAPS": TokenType.OVERLAPS,
 589        "OVERWRITE": TokenType.OVERWRITE,
 590        "PARTITION": TokenType.PARTITION,
 591        "PARTITION BY": TokenType.PARTITION_BY,
 592        "PARTITIONED BY": TokenType.PARTITION_BY,
 593        "PARTITIONED_BY": TokenType.PARTITION_BY,
 594        "PERCENT": TokenType.PERCENT,
 595        "PIVOT": TokenType.PIVOT,
 596        "PRAGMA": TokenType.PRAGMA,
 597        "PRIMARY KEY": TokenType.PRIMARY_KEY,
 598        "PROCEDURE": TokenType.PROCEDURE,
 599        "QUALIFY": TokenType.QUALIFY,
 600        "RANGE": TokenType.RANGE,
 601        "RECURSIVE": TokenType.RECURSIVE,
 602        "REGEXP": TokenType.RLIKE,
 603        "REPLACE": TokenType.REPLACE,
 604        "RETURNING": TokenType.RETURNING,
 605        "REFERENCES": TokenType.REFERENCES,
 606        "RIGHT": TokenType.RIGHT,
 607        "RLIKE": TokenType.RLIKE,
 608        "ROLLBACK": TokenType.ROLLBACK,
 609        "ROLLUP": TokenType.ROLLUP,
 610        "ROW": TokenType.ROW,
 611        "ROWS": TokenType.ROWS,
 612        "SCHEMA": TokenType.SCHEMA,
 613        "SELECT": TokenType.SELECT,
 614        "SEMI": TokenType.SEMI,
 615        "SET": TokenType.SET,
 616        "SETTINGS": TokenType.SETTINGS,
 617        "SHOW": TokenType.SHOW,
 618        "SIMILAR TO": TokenType.SIMILAR_TO,
 619        "SOME": TokenType.SOME,
 620        "TABLE": TokenType.TABLE,
 621        "TABLESAMPLE": TokenType.TABLE_SAMPLE,
 622        "TEMP": TokenType.TEMPORARY,
 623        "TEMPORARY": TokenType.TEMPORARY,
 624        "THEN": TokenType.THEN,
 625        "TRUE": TokenType.TRUE,
 626        "UNION": TokenType.UNION,
 627        "UNNEST": TokenType.UNNEST,
 628        "UNPIVOT": TokenType.UNPIVOT,
 629        "UPDATE": TokenType.UPDATE,
 630        "USE": TokenType.USE,
 631        "USING": TokenType.USING,
 632        "UUID": TokenType.UUID,
 633        "VALUES": TokenType.VALUES,
 634        "VIEW": TokenType.VIEW,
 635        "VOLATILE": TokenType.VOLATILE,
 636        "WHEN": TokenType.WHEN,
 637        "WHERE": TokenType.WHERE,
 638        "WINDOW": TokenType.WINDOW,
 639        "WITH": TokenType.WITH,
 640        "APPLY": TokenType.APPLY,
 641        "ARRAY": TokenType.ARRAY,
 642        "BIT": TokenType.BIT,
 643        "BOOL": TokenType.BOOLEAN,
 644        "BOOLEAN": TokenType.BOOLEAN,
 645        "BYTE": TokenType.TINYINT,
 646        "TINYINT": TokenType.TINYINT,
 647        "SHORT": TokenType.SMALLINT,
 648        "SMALLINT": TokenType.SMALLINT,
 649        "INT2": TokenType.SMALLINT,
 650        "INTEGER": TokenType.INT,
 651        "INT": TokenType.INT,
 652        "INT4": TokenType.INT,
 653        "LONG": TokenType.BIGINT,
 654        "BIGINT": TokenType.BIGINT,
 655        "INT8": TokenType.BIGINT,
 656        "DEC": TokenType.DECIMAL,
 657        "DECIMAL": TokenType.DECIMAL,
 658        "BIGDECIMAL": TokenType.BIGDECIMAL,
 659        "BIGNUMERIC": TokenType.BIGDECIMAL,
 660        "MAP": TokenType.MAP,
 661        "NULLABLE": TokenType.NULLABLE,
 662        "NUMBER": TokenType.DECIMAL,
 663        "NUMERIC": TokenType.DECIMAL,
 664        "FIXED": TokenType.DECIMAL,
 665        "REAL": TokenType.FLOAT,
 666        "FLOAT": TokenType.FLOAT,
 667        "FLOAT4": TokenType.FLOAT,
 668        "FLOAT8": TokenType.DOUBLE,
 669        "DOUBLE": TokenType.DOUBLE,
 670        "DOUBLE PRECISION": TokenType.DOUBLE,
 671        "JSON": TokenType.JSON,
 672        "CHAR": TokenType.CHAR,
 673        "CHARACTER": TokenType.CHAR,
 674        "NCHAR": TokenType.NCHAR,
 675        "VARCHAR": TokenType.VARCHAR,
 676        "VARCHAR2": TokenType.VARCHAR,
 677        "NVARCHAR": TokenType.NVARCHAR,
 678        "NVARCHAR2": TokenType.NVARCHAR,
 679        "STR": TokenType.TEXT,
 680        "STRING": TokenType.TEXT,
 681        "TEXT": TokenType.TEXT,
 682        "CLOB": TokenType.TEXT,
 683        "LONGVARCHAR": TokenType.TEXT,
 684        "BINARY": TokenType.BINARY,
 685        "BLOB": TokenType.VARBINARY,
 686        "BYTEA": TokenType.VARBINARY,
 687        "VARBINARY": TokenType.VARBINARY,
 688        "TIME": TokenType.TIME,
 689        "TIMESTAMP": TokenType.TIMESTAMP,
 690        "TIMESTAMPTZ": TokenType.TIMESTAMPTZ,
 691        "TIMESTAMPLTZ": TokenType.TIMESTAMPLTZ,
 692        "DATE": TokenType.DATE,
 693        "DATETIME": TokenType.DATETIME,
 694        "INT4RANGE": TokenType.INT4RANGE,
 695        "INT4MULTIRANGE": TokenType.INT4MULTIRANGE,
 696        "INT8RANGE": TokenType.INT8RANGE,
 697        "INT8MULTIRANGE": TokenType.INT8MULTIRANGE,
 698        "NUMRANGE": TokenType.NUMRANGE,
 699        "NUMMULTIRANGE": TokenType.NUMMULTIRANGE,
 700        "TSRANGE": TokenType.TSRANGE,
 701        "TSMULTIRANGE": TokenType.TSMULTIRANGE,
 702        "TSTZRANGE": TokenType.TSTZRANGE,
 703        "TSTZMULTIRANGE": TokenType.TSTZMULTIRANGE,
 704        "DATERANGE": TokenType.DATERANGE,
 705        "DATEMULTIRANGE": TokenType.DATEMULTIRANGE,
 706        "UNIQUE": TokenType.UNIQUE,
 707        "STRUCT": TokenType.STRUCT,
 708        "VARIANT": TokenType.VARIANT,
 709        "ALTER": TokenType.ALTER,
 710        "ANALYZE": TokenType.COMMAND,
 711        "CALL": TokenType.COMMAND,
 712        "COMMENT": TokenType.COMMENT,
 713        "COPY": TokenType.COMMAND,
 714        "EXPLAIN": TokenType.COMMAND,
 715        "GRANT": TokenType.COMMAND,
 716        "OPTIMIZE": TokenType.COMMAND,
 717        "PREPARE": TokenType.COMMAND,
 718        "TRUNCATE": TokenType.COMMAND,
 719        "VACUUM": TokenType.COMMAND,
 720    }
 721
 722    WHITE_SPACE: t.Dict[t.Optional[str], TokenType] = {
 723        " ": TokenType.SPACE,
 724        "\t": TokenType.SPACE,
 725        "\n": TokenType.BREAK,
 726        "\r": TokenType.BREAK,
 727        "\r\n": TokenType.BREAK,
 728    }
 729
 730    COMMANDS = {
 731        TokenType.COMMAND,
 732        TokenType.EXECUTE,
 733        TokenType.FETCH,
 734        TokenType.SHOW,
 735    }
 736
 737    COMMAND_PREFIX_TOKENS = {TokenType.SEMICOLON, TokenType.BEGIN}
 738
 739    # handle numeric literals like in hive (3L = BIGINT)
 740    NUMERIC_LITERALS: t.Dict[str, str] = {}
 741    ENCODE: t.Optional[str] = None
 742
 743    COMMENTS = ["--", ("/*", "*/")]
 744
 745    __slots__ = (
 746        "sql",
 747        "size",
 748        "tokens",
 749        "_start",
 750        "_current",
 751        "_line",
 752        "_col",
 753        "_comments",
 754        "_char",
 755        "_end",
 756        "_peek",
 757        "_prev_token_line",
 758    )
 759
 760    def __init__(self) -> None:
 761        self.reset()
 762
 763    def reset(self) -> None:
 764        self.sql = ""
 765        self.size = 0
 766        self.tokens: t.List[Token] = []
 767        self._start = 0
 768        self._current = 0
 769        self._line = 1
 770        self._col = 0
 771        self._comments: t.List[str] = []
 772
 773        self._char = ""
 774        self._end = False
 775        self._peek = ""
 776        self._prev_token_line = -1
 777
 778    def tokenize(self, sql: str) -> t.List[Token]:
 779        """Returns a list of tokens corresponding to the SQL string `sql`."""
 780        self.reset()
 781        self.sql = sql
 782        self.size = len(sql)
 783
 784        try:
 785            self._scan()
 786        except Exception as e:
 787            start = max(self._current - 50, 0)
 788            end = min(self._current + 50, self.size - 1)
 789            context = self.sql[start:end]
 790            raise ValueError(f"Error tokenizing '{context}'") from e
 791
 792        return self.tokens
 793
 794    def _scan(self, until: t.Optional[t.Callable] = None) -> None:
 795        while self.size and not self._end:
 796            self._start = self._current
 797            self._advance()
 798
 799            if self._char is None:
 800                break
 801
 802            if self._char not in self.WHITE_SPACE:
 803                if self._char.isdigit():
 804                    self._scan_number()
 805                elif self._char in self._IDENTIFIERS:
 806                    self._scan_identifier(self._IDENTIFIERS[self._char])
 807                else:
 808                    self._scan_keywords()
 809
 810            if until and until():
 811                break
 812
 813        if self.tokens and self._comments:
 814            self.tokens[-1].comments.extend(self._comments)
 815
 816    def _chars(self, size: int) -> str:
 817        if size == 1:
 818            return self._char
 819
 820        start = self._current - 1
 821        end = start + size
 822
 823        return self.sql[start:end] if end <= self.size else ""
 824
 825    def _advance(self, i: int = 1, alnum: bool = False) -> None:
 826        if self.WHITE_SPACE.get(self._char) is TokenType.BREAK:
 827            self._col = 1
 828            self._line += 1
 829        else:
 830            self._col += i
 831
 832        self._current += i
 833        self._end = self._current >= self.size
 834        self._char = self.sql[self._current - 1]
 835        self._peek = "" if self._end else self.sql[self._current]
 836
 837        if alnum and self._char.isalnum():
 838            # Here we use local variables instead of attributes for better performance
 839            _col = self._col
 840            _current = self._current
 841            _end = self._end
 842            _peek = self._peek
 843
 844            while _peek.isalnum():
 845                _col += 1
 846                _current += 1
 847                _end = _current >= self.size
 848                _peek = "" if _end else self.sql[_current]
 849
 850            self._col = _col
 851            self._current = _current
 852            self._end = _end
 853            self._peek = _peek
 854            self._char = self.sql[_current - 1]
 855
 856    @property
 857    def _text(self) -> str:
 858        return self.sql[self._start : self._current]
 859
 860    def peek(self, i: int = 0) -> str:
 861        i = self._current + i
 862        if i < self.size:
 863            return self.sql[i]
 864        return ""
 865
 866    def _add(self, token_type: TokenType, text: t.Optional[str] = None) -> None:
 867        self._prev_token_line = self._line
 868        self.tokens.append(
 869            Token(
 870                token_type,
 871                text=self._text if text is None else text,
 872                line=self._line,
 873                col=self._col,
 874                start=self._start,
 875                end=self._current - 1,
 876                comments=self._comments,
 877            )
 878        )
 879        self._comments = []
 880
 881        # If we have either a semicolon or a begin token before the command's token, we'll parse
 882        # whatever follows the command's token as a string
 883        if (
 884            token_type in self.COMMANDS
 885            and self._peek != ";"
 886            and (len(self.tokens) == 1 or self.tokens[-2].token_type in self.COMMAND_PREFIX_TOKENS)
 887        ):
 888            start = self._current
 889            tokens = len(self.tokens)
 890            self._scan(lambda: self._peek == ";")
 891            self.tokens = self.tokens[:tokens]
 892            text = self.sql[start : self._current].strip()
 893            if text:
 894                self._add(TokenType.STRING, text)
 895
 896    def _scan_keywords(self) -> None:
 897        size = 0
 898        word = None
 899        chars = self._text
 900        char = chars
 901        prev_space = False
 902        skip = False
 903        trie = self._KEYWORD_TRIE
 904        single_token = char in self.SINGLE_TOKENS
 905
 906        while chars:
 907            if skip:
 908                result = 1
 909            else:
 910                result, trie = in_trie(trie, char.upper())
 911
 912            if result == 0:
 913                break
 914            if result == 2:
 915                word = chars
 916
 917            size += 1
 918            end = self._current - 1 + size
 919
 920            if end < self.size:
 921                char = self.sql[end]
 922                single_token = single_token or char in self.SINGLE_TOKENS
 923                is_space = char in self.WHITE_SPACE
 924
 925                if not is_space or not prev_space:
 926                    if is_space:
 927                        char = " "
 928                    chars += char
 929                    prev_space = is_space
 930                    skip = False
 931                else:
 932                    skip = True
 933            else:
 934                char = ""
 935                chars = " "
 936
 937        word = None if not single_token and chars[-1] not in self.WHITE_SPACE else word
 938
 939        if not word:
 940            if self._char in self.SINGLE_TOKENS:
 941                self._add(self.SINGLE_TOKENS[self._char], text=self._char)
 942                return
 943            self._scan_var()
 944            return
 945
 946        if self._scan_string(word):
 947            return
 948        if self._scan_comment(word):
 949            return
 950
 951        self._advance(size - 1)
 952        word = word.upper()
 953        self._add(self.KEYWORDS[word], text=word)
 954
 955    def _scan_comment(self, comment_start: str) -> bool:
 956        if comment_start not in self._COMMENTS:
 957            return False
 958
 959        comment_start_line = self._line
 960        comment_start_size = len(comment_start)
 961        comment_end = self._COMMENTS[comment_start]
 962
 963        if comment_end:
 964            # Skip the comment's start delimiter
 965            self._advance(comment_start_size)
 966
 967            comment_end_size = len(comment_end)
 968            while not self._end and self._chars(comment_end_size) != comment_end:
 969                self._advance(alnum=True)
 970
 971            self._comments.append(self._text[comment_start_size : -comment_end_size + 1])
 972            self._advance(comment_end_size - 1)
 973        else:
 974            while not self._end and not self.WHITE_SPACE.get(self._peek) is TokenType.BREAK:
 975                self._advance(alnum=True)
 976            self._comments.append(self._text[comment_start_size:])
 977
 978        # Leading comment is attached to the succeeding token, whilst trailing comment to the preceding.
 979        # Multiple consecutive comments are preserved by appending them to the current comments list.
 980        if comment_start_line == self._prev_token_line:
 981            self.tokens[-1].comments.extend(self._comments)
 982            self._comments = []
 983            self._prev_token_line = self._line
 984
 985        return True
 986
 987    def _scan_number(self) -> None:
 988        if self._char == "0":
 989            peek = self._peek.upper()
 990            if peek == "B":
 991                return self._scan_bits() if self.BIT_STRINGS else self._add(TokenType.NUMBER)
 992            elif peek == "X":
 993                return self._scan_hex() if self.HEX_STRINGS else self._add(TokenType.NUMBER)
 994
 995        decimal = False
 996        scientific = 0
 997
 998        while True:
 999            if self._peek.isdigit():
1000                self._advance()
1001            elif self._peek == "." and not decimal:
1002                after = self.peek(1)
1003                if after.isdigit() or not after.isalpha():
1004                    decimal = True
1005                    self._advance()
1006                else:
1007                    return self._add(TokenType.VAR)
1008            elif self._peek in ("-", "+") and scientific == 1:
1009                scientific += 1
1010                self._advance()
1011            elif self._peek.upper() == "E" and not scientific:
1012                scientific += 1
1013                self._advance()
1014            elif self._peek.isidentifier():
1015                number_text = self._text
1016                literal = ""
1017
1018                while self._peek.strip() and self._peek not in self.SINGLE_TOKENS:
1019                    literal += self._peek.upper()
1020                    self._advance()
1021
1022                token_type = self.KEYWORDS.get(self.NUMERIC_LITERALS.get(literal, ""))
1023
1024                if token_type:
1025                    self._add(TokenType.NUMBER, number_text)
1026                    self._add(TokenType.DCOLON, "::")
1027                    return self._add(token_type, literal)
1028                elif self.IDENTIFIERS_CAN_START_WITH_DIGIT:
1029                    return self._add(TokenType.VAR)
1030
1031                self._add(TokenType.NUMBER, number_text)
1032                return self._advance(-len(literal))
1033            else:
1034                return self._add(TokenType.NUMBER)
1035
1036    def _scan_bits(self) -> None:
1037        self._advance()
1038        value = self._extract_value()
1039        try:
1040            # If `value` can't be converted to a binary, fallback to tokenizing it as an identifier
1041            int(value, 2)
1042            self._add(TokenType.BIT_STRING, value[2:])  # Drop the 0b
1043        except ValueError:
1044            self._add(TokenType.IDENTIFIER)
1045
1046    def _scan_hex(self) -> None:
1047        self._advance()
1048        value = self._extract_value()
1049        try:
1050            # If `value` can't be converted to a hex, fallback to tokenizing it as an identifier
1051            int(value, 16)
1052            self._add(TokenType.HEX_STRING, value[2:])  # Drop the 0x
1053        except ValueError:
1054            self._add(TokenType.IDENTIFIER)
1055
1056    def _extract_value(self) -> str:
1057        while True:
1058            char = self._peek.strip()
1059            if char and char not in self.SINGLE_TOKENS:
1060                self._advance(alnum=True)
1061            else:
1062                break
1063
1064        return self._text
1065
1066    def _scan_string(self, start: str) -> bool:
1067        base = None
1068        token_type = TokenType.STRING
1069
1070        if start in self._QUOTES:
1071            end = self._QUOTES[start]
1072        elif start in self._FORMAT_STRINGS:
1073            end, token_type = self._FORMAT_STRINGS[start]
1074
1075            if token_type == TokenType.HEX_STRING:
1076                base = 16
1077            elif token_type == TokenType.BIT_STRING:
1078                base = 2
1079        else:
1080            return False
1081
1082        self._advance(len(start))
1083        text = self._extract_string(end)
1084
1085        if base:
1086            try:
1087                int(text, base)
1088            except:
1089                raise RuntimeError(
1090                    f"Numeric string contains invalid characters from {self._line}:{self._start}"
1091                )
1092        else:
1093            text = text.encode(self.ENCODE).decode(self.ENCODE) if self.ENCODE else text
1094
1095        self._add(token_type, text)
1096        return True
1097
1098    def _scan_identifier(self, identifier_end: str) -> None:
1099        self._advance()
1100        text = self._extract_string(identifier_end, self._IDENTIFIER_ESCAPES)
1101        self._add(TokenType.IDENTIFIER, text)
1102
1103    def _scan_var(self) -> None:
1104        while True:
1105            char = self._peek.strip()
1106            if char and (char in self.VAR_SINGLE_TOKENS or char not in self.SINGLE_TOKENS):
1107                self._advance(alnum=True)
1108            else:
1109                break
1110
1111        self._add(
1112            TokenType.VAR
1113            if self.tokens and self.tokens[-1].token_type == TokenType.PARAMETER
1114            else self.KEYWORDS.get(self._text.upper(), TokenType.VAR)
1115        )
1116
1117    def _extract_string(self, delimiter: str, escapes=None) -> str:
1118        text = ""
1119        delim_size = len(delimiter)
1120        escapes = self._STRING_ESCAPES if escapes is None else escapes
1121
1122        while True:
1123            if self._char in escapes and (self._peek == delimiter or self._peek in escapes):
1124                if self._peek == delimiter:
1125                    text += self._peek
1126                else:
1127                    text += self._char + self._peek
1128
1129                if self._current + 1 < self.size:
1130                    self._advance(2)
1131                else:
1132                    raise RuntimeError(f"Missing {delimiter} from {self._line}:{self._current}")
1133            else:
1134                if self._chars(delim_size) == delimiter:
1135                    if delim_size > 1:
1136                        self._advance(delim_size - 1)
1137                    break
1138
1139                if self._end:
1140                    raise RuntimeError(f"Missing {delimiter} from {self._line}:{self._start}")
1141
1142                current = self._current - 1
1143                self._advance(alnum=True)
1144                text += self.sql[current : self._current - 1]
1145
1146        return text
class TokenType(sqlglot.helper.AutoName):
 11class TokenType(AutoName):
 12    L_PAREN = auto()
 13    R_PAREN = auto()
 14    L_BRACKET = auto()
 15    R_BRACKET = auto()
 16    L_BRACE = auto()
 17    R_BRACE = auto()
 18    COMMA = auto()
 19    DOT = auto()
 20    DASH = auto()
 21    PLUS = auto()
 22    COLON = auto()
 23    DCOLON = auto()
 24    SEMICOLON = auto()
 25    STAR = auto()
 26    BACKSLASH = auto()
 27    SLASH = auto()
 28    LT = auto()
 29    LTE = auto()
 30    GT = auto()
 31    GTE = auto()
 32    NOT = auto()
 33    EQ = auto()
 34    NEQ = auto()
 35    NULLSAFE_EQ = auto()
 36    AND = auto()
 37    OR = auto()
 38    AMP = auto()
 39    DPIPE = auto()
 40    PIPE = auto()
 41    CARET = auto()
 42    TILDA = auto()
 43    ARROW = auto()
 44    DARROW = auto()
 45    FARROW = auto()
 46    HASH = auto()
 47    HASH_ARROW = auto()
 48    DHASH_ARROW = auto()
 49    LR_ARROW = auto()
 50    LT_AT = auto()
 51    AT_GT = auto()
 52    DOLLAR = auto()
 53    PARAMETER = auto()
 54    SESSION_PARAMETER = auto()
 55    DAMP = auto()
 56
 57    BLOCK_START = auto()
 58    BLOCK_END = auto()
 59
 60    SPACE = auto()
 61    BREAK = auto()
 62
 63    STRING = auto()
 64    NUMBER = auto()
 65    IDENTIFIER = auto()
 66    DATABASE = auto()
 67    COLUMN = auto()
 68    COLUMN_DEF = auto()
 69    SCHEMA = auto()
 70    TABLE = auto()
 71    VAR = auto()
 72    BIT_STRING = auto()
 73    HEX_STRING = auto()
 74    BYTE_STRING = auto()
 75    NATIONAL_STRING = auto()
 76    RAW_STRING = auto()
 77
 78    # types
 79    BIT = auto()
 80    BOOLEAN = auto()
 81    TINYINT = auto()
 82    UTINYINT = auto()
 83    SMALLINT = auto()
 84    USMALLINT = auto()
 85    INT = auto()
 86    UINT = auto()
 87    BIGINT = auto()
 88    UBIGINT = auto()
 89    INT128 = auto()
 90    UINT128 = auto()
 91    INT256 = auto()
 92    UINT256 = auto()
 93    FLOAT = auto()
 94    DOUBLE = auto()
 95    DECIMAL = auto()
 96    BIGDECIMAL = auto()
 97    CHAR = auto()
 98    NCHAR = auto()
 99    VARCHAR = auto()
100    NVARCHAR = auto()
101    TEXT = auto()
102    MEDIUMTEXT = auto()
103    LONGTEXT = auto()
104    MEDIUMBLOB = auto()
105    LONGBLOB = auto()
106    BINARY = auto()
107    VARBINARY = auto()
108    JSON = auto()
109    JSONB = auto()
110    TIME = auto()
111    TIMESTAMP = auto()
112    TIMESTAMPTZ = auto()
113    TIMESTAMPLTZ = auto()
114    DATETIME = auto()
115    DATETIME64 = auto()
116    DATE = auto()
117    INT4RANGE = auto()
118    INT4MULTIRANGE = auto()
119    INT8RANGE = auto()
120    INT8MULTIRANGE = auto()
121    NUMRANGE = auto()
122    NUMMULTIRANGE = auto()
123    TSRANGE = auto()
124    TSMULTIRANGE = auto()
125    TSTZRANGE = auto()
126    TSTZMULTIRANGE = auto()
127    DATERANGE = auto()
128    DATEMULTIRANGE = auto()
129    UUID = auto()
130    GEOGRAPHY = auto()
131    NULLABLE = auto()
132    GEOMETRY = auto()
133    HLLSKETCH = auto()
134    HSTORE = auto()
135    SUPER = auto()
136    SERIAL = auto()
137    SMALLSERIAL = auto()
138    BIGSERIAL = auto()
139    XML = auto()
140    UNIQUEIDENTIFIER = auto()
141    MONEY = auto()
142    SMALLMONEY = auto()
143    ROWVERSION = auto()
144    IMAGE = auto()
145    VARIANT = auto()
146    OBJECT = auto()
147    INET = auto()
148    ENUM = auto()
149
150    # keywords
151    ALIAS = auto()
152    ALTER = auto()
153    ALWAYS = auto()
154    ALL = auto()
155    ANTI = auto()
156    ANY = auto()
157    APPLY = auto()
158    ARRAY = auto()
159    ASC = auto()
160    ASOF = auto()
161    AUTO_INCREMENT = auto()
162    BEGIN = auto()
163    BETWEEN = auto()
164    CACHE = auto()
165    CASE = auto()
166    CHARACTER_SET = auto()
167    COLLATE = auto()
168    COMMAND = auto()
169    COMMENT = auto()
170    COMMIT = auto()
171    CONSTRAINT = auto()
172    CREATE = auto()
173    CROSS = auto()
174    CUBE = auto()
175    CURRENT_DATE = auto()
176    CURRENT_DATETIME = auto()
177    CURRENT_TIME = auto()
178    CURRENT_TIMESTAMP = auto()
179    CURRENT_USER = auto()
180    DEFAULT = auto()
181    DELETE = auto()
182    DESC = auto()
183    DESCRIBE = auto()
184    DICTIONARY = auto()
185    DISTINCT = auto()
186    DIV = auto()
187    DROP = auto()
188    ELSE = auto()
189    END = auto()
190    ESCAPE = auto()
191    EXCEPT = auto()
192    EXECUTE = auto()
193    EXISTS = auto()
194    FALSE = auto()
195    FETCH = auto()
196    FILTER = auto()
197    FINAL = auto()
198    FIRST = auto()
199    FOR = auto()
200    FOREIGN_KEY = auto()
201    FORMAT = auto()
202    FROM = auto()
203    FULL = auto()
204    FUNCTION = auto()
205    GLOB = auto()
206    GLOBAL = auto()
207    GROUP_BY = auto()
208    GROUPING_SETS = auto()
209    HAVING = auto()
210    HINT = auto()
211    IF = auto()
212    ILIKE = auto()
213    ILIKE_ANY = auto()
214    IN = auto()
215    INDEX = auto()
216    INNER = auto()
217    INSERT = auto()
218    INTERSECT = auto()
219    INTERVAL = auto()
220    INTO = auto()
221    INTRODUCER = auto()
222    IRLIKE = auto()
223    IS = auto()
224    ISNULL = auto()
225    JOIN = auto()
226    JOIN_MARKER = auto()
227    KEEP = auto()
228    LANGUAGE = auto()
229    LATERAL = auto()
230    LEFT = auto()
231    LIKE = auto()
232    LIKE_ANY = auto()
233    LIMIT = auto()
234    LOAD = auto()
235    LOCK = auto()
236    MAP = auto()
237    MATCH_RECOGNIZE = auto()
238    MERGE = auto()
239    MOD = auto()
240    NATURAL = auto()
241    NEXT = auto()
242    NEXT_VALUE_FOR = auto()
243    NOTNULL = auto()
244    NULL = auto()
245    OFFSET = auto()
246    ON = auto()
247    ORDER_BY = auto()
248    ORDERED = auto()
249    ORDINALITY = auto()
250    OUTER = auto()
251    OVER = auto()
252    OVERLAPS = auto()
253    OVERWRITE = auto()
254    PARTITION = auto()
255    PARTITION_BY = auto()
256    PERCENT = auto()
257    PIVOT = auto()
258    PLACEHOLDER = auto()
259    PRAGMA = auto()
260    PRIMARY_KEY = auto()
261    PROCEDURE = auto()
262    PROPERTIES = auto()
263    PSEUDO_TYPE = auto()
264    QUALIFY = auto()
265    QUOTE = auto()
266    RANGE = auto()
267    RECURSIVE = auto()
268    REPLACE = auto()
269    RETURNING = auto()
270    REFERENCES = auto()
271    RIGHT = auto()
272    RLIKE = auto()
273    ROLLBACK = auto()
274    ROLLUP = auto()
275    ROW = auto()
276    ROWS = auto()
277    SELECT = auto()
278    SEMI = auto()
279    SEPARATOR = auto()
280    SERDE_PROPERTIES = auto()
281    SET = auto()
282    SETTINGS = auto()
283    SHOW = auto()
284    SIMILAR_TO = auto()
285    SOME = auto()
286    STRUCT = auto()
287    TABLE_SAMPLE = auto()
288    TEMPORARY = auto()
289    TOP = auto()
290    THEN = auto()
291    TRUE = auto()
292    UNCACHE = auto()
293    UNION = auto()
294    UNNEST = auto()
295    UNPIVOT = auto()
296    UPDATE = auto()
297    USE = auto()
298    USING = auto()
299    VALUES = auto()
300    VIEW = auto()
301    VOLATILE = auto()
302    WHEN = auto()
303    WHERE = auto()
304    WINDOW = auto()
305    WITH = auto()
306    UNIQUE = auto()

An enumeration.

L_PAREN = <TokenType.L_PAREN: 'L_PAREN'>
R_PAREN = <TokenType.R_PAREN: 'R_PAREN'>
L_BRACKET = <TokenType.L_BRACKET: 'L_BRACKET'>
R_BRACKET = <TokenType.R_BRACKET: 'R_BRACKET'>
L_BRACE = <TokenType.L_BRACE: 'L_BRACE'>
R_BRACE = <TokenType.R_BRACE: 'R_BRACE'>
COMMA = <TokenType.COMMA: 'COMMA'>
DOT = <TokenType.DOT: 'DOT'>
DASH = <TokenType.DASH: 'DASH'>
PLUS = <TokenType.PLUS: 'PLUS'>
COLON = <TokenType.COLON: 'COLON'>
DCOLON = <TokenType.DCOLON: 'DCOLON'>
SEMICOLON = <TokenType.SEMICOLON: 'SEMICOLON'>
STAR = <TokenType.STAR: 'STAR'>
BACKSLASH = <TokenType.BACKSLASH: 'BACKSLASH'>
SLASH = <TokenType.SLASH: 'SLASH'>
LT = <TokenType.LT: 'LT'>
LTE = <TokenType.LTE: 'LTE'>
GT = <TokenType.GT: 'GT'>
GTE = <TokenType.GTE: 'GTE'>
NOT = <TokenType.NOT: 'NOT'>
EQ = <TokenType.EQ: 'EQ'>
NEQ = <TokenType.NEQ: 'NEQ'>
NULLSAFE_EQ = <TokenType.NULLSAFE_EQ: 'NULLSAFE_EQ'>
AND = <TokenType.AND: 'AND'>
OR = <TokenType.OR: 'OR'>
AMP = <TokenType.AMP: 'AMP'>
DPIPE = <TokenType.DPIPE: 'DPIPE'>
PIPE = <TokenType.PIPE: 'PIPE'>
CARET = <TokenType.CARET: 'CARET'>
TILDA = <TokenType.TILDA: 'TILDA'>
ARROW = <TokenType.ARROW: 'ARROW'>
DARROW = <TokenType.DARROW: 'DARROW'>
FARROW = <TokenType.FARROW: 'FARROW'>
HASH = <TokenType.HASH: 'HASH'>
HASH_ARROW = <TokenType.HASH_ARROW: 'HASH_ARROW'>
DHASH_ARROW = <TokenType.DHASH_ARROW: 'DHASH_ARROW'>
LR_ARROW = <TokenType.LR_ARROW: 'LR_ARROW'>
LT_AT = <TokenType.LT_AT: 'LT_AT'>
AT_GT = <TokenType.AT_GT: 'AT_GT'>
DOLLAR = <TokenType.DOLLAR: 'DOLLAR'>
PARAMETER = <TokenType.PARAMETER: 'PARAMETER'>
SESSION_PARAMETER = <TokenType.SESSION_PARAMETER: 'SESSION_PARAMETER'>
DAMP = <TokenType.DAMP: 'DAMP'>
BLOCK_START = <TokenType.BLOCK_START: 'BLOCK_START'>
BLOCK_END = <TokenType.BLOCK_END: 'BLOCK_END'>
SPACE = <TokenType.SPACE: 'SPACE'>
BREAK = <TokenType.BREAK: 'BREAK'>
STRING = <TokenType.STRING: 'STRING'>
NUMBER = <TokenType.NUMBER: 'NUMBER'>
IDENTIFIER = <TokenType.IDENTIFIER: 'IDENTIFIER'>
DATABASE = <TokenType.DATABASE: 'DATABASE'>
COLUMN = <TokenType.COLUMN: 'COLUMN'>
COLUMN_DEF = <TokenType.COLUMN_DEF: 'COLUMN_DEF'>
SCHEMA = <TokenType.SCHEMA: 'SCHEMA'>
TABLE = <TokenType.TABLE: 'TABLE'>
VAR = <TokenType.VAR: 'VAR'>
BIT_STRING = <TokenType.BIT_STRING: 'BIT_STRING'>
HEX_STRING = <TokenType.HEX_STRING: 'HEX_STRING'>
BYTE_STRING = <TokenType.BYTE_STRING: 'BYTE_STRING'>
NATIONAL_STRING = <TokenType.NATIONAL_STRING: 'NATIONAL_STRING'>
RAW_STRING = <TokenType.RAW_STRING: 'RAW_STRING'>
BIT = <TokenType.BIT: 'BIT'>
BOOLEAN = <TokenType.BOOLEAN: 'BOOLEAN'>
TINYINT = <TokenType.TINYINT: 'TINYINT'>
UTINYINT = <TokenType.UTINYINT: 'UTINYINT'>
SMALLINT = <TokenType.SMALLINT: 'SMALLINT'>
USMALLINT = <TokenType.USMALLINT: 'USMALLINT'>
INT = <TokenType.INT: 'INT'>
UINT = <TokenType.UINT: 'UINT'>
BIGINT = <TokenType.BIGINT: 'BIGINT'>
UBIGINT = <TokenType.UBIGINT: 'UBIGINT'>
INT128 = <TokenType.INT128: 'INT128'>
UINT128 = <TokenType.UINT128: 'UINT128'>
INT256 = <TokenType.INT256: 'INT256'>
UINT256 = <TokenType.UINT256: 'UINT256'>
FLOAT = <TokenType.FLOAT: 'FLOAT'>
DOUBLE = <TokenType.DOUBLE: 'DOUBLE'>
DECIMAL = <TokenType.DECIMAL: 'DECIMAL'>
BIGDECIMAL = <TokenType.BIGDECIMAL: 'BIGDECIMAL'>
CHAR = <TokenType.CHAR: 'CHAR'>
NCHAR = <TokenType.NCHAR: 'NCHAR'>
VARCHAR = <TokenType.VARCHAR: 'VARCHAR'>
NVARCHAR = <TokenType.NVARCHAR: 'NVARCHAR'>
TEXT = <TokenType.TEXT: 'TEXT'>
MEDIUMTEXT = <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>
LONGTEXT = <TokenType.LONGTEXT: 'LONGTEXT'>
MEDIUMBLOB = <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>
LONGBLOB = <TokenType.LONGBLOB: 'LONGBLOB'>
BINARY = <TokenType.BINARY: 'BINARY'>
VARBINARY = <TokenType.VARBINARY: 'VARBINARY'>
JSON = <TokenType.JSON: 'JSON'>
JSONB = <TokenType.JSONB: 'JSONB'>
TIME = <TokenType.TIME: 'TIME'>
TIMESTAMP = <TokenType.TIMESTAMP: 'TIMESTAMP'>
TIMESTAMPTZ = <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>
TIMESTAMPLTZ = <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>
DATETIME = <TokenType.DATETIME: 'DATETIME'>
DATETIME64 = <TokenType.DATETIME64: 'DATETIME64'>
DATE = <TokenType.DATE: 'DATE'>
INT4RANGE = <TokenType.INT4RANGE: 'INT4RANGE'>
INT4MULTIRANGE = <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>
INT8RANGE = <TokenType.INT8RANGE: 'INT8RANGE'>
INT8MULTIRANGE = <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>
NUMRANGE = <TokenType.NUMRANGE: 'NUMRANGE'>
NUMMULTIRANGE = <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>
TSRANGE = <TokenType.TSRANGE: 'TSRANGE'>
TSMULTIRANGE = <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>
TSTZRANGE = <TokenType.TSTZRANGE: 'TSTZRANGE'>
TSTZMULTIRANGE = <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>
DATERANGE = <TokenType.DATERANGE: 'DATERANGE'>
DATEMULTIRANGE = <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>
UUID = <TokenType.UUID: 'UUID'>
GEOGRAPHY = <TokenType.GEOGRAPHY: 'GEOGRAPHY'>
NULLABLE = <TokenType.NULLABLE: 'NULLABLE'>
GEOMETRY = <TokenType.GEOMETRY: 'GEOMETRY'>
HLLSKETCH = <TokenType.HLLSKETCH: 'HLLSKETCH'>
HSTORE = <TokenType.HSTORE: 'HSTORE'>
SUPER = <TokenType.SUPER: 'SUPER'>
SERIAL = <TokenType.SERIAL: 'SERIAL'>
SMALLSERIAL = <TokenType.SMALLSERIAL: 'SMALLSERIAL'>
BIGSERIAL = <TokenType.BIGSERIAL: 'BIGSERIAL'>
XML = <TokenType.XML: 'XML'>
UNIQUEIDENTIFIER = <TokenType.UNIQUEIDENTIFIER: 'UNIQUEIDENTIFIER'>
MONEY = <TokenType.MONEY: 'MONEY'>
SMALLMONEY = <TokenType.SMALLMONEY: 'SMALLMONEY'>
ROWVERSION = <TokenType.ROWVERSION: 'ROWVERSION'>
IMAGE = <TokenType.IMAGE: 'IMAGE'>
VARIANT = <TokenType.VARIANT: 'VARIANT'>
OBJECT = <TokenType.OBJECT: 'OBJECT'>
INET = <TokenType.INET: 'INET'>
ENUM = <TokenType.ENUM: 'ENUM'>
ALIAS = <TokenType.ALIAS: 'ALIAS'>
ALTER = <TokenType.ALTER: 'ALTER'>
ALWAYS = <TokenType.ALWAYS: 'ALWAYS'>
ALL = <TokenType.ALL: 'ALL'>
ANTI = <TokenType.ANTI: 'ANTI'>
ANY = <TokenType.ANY: 'ANY'>
APPLY = <TokenType.APPLY: 'APPLY'>
ARRAY = <TokenType.ARRAY: 'ARRAY'>
ASC = <TokenType.ASC: 'ASC'>
ASOF = <TokenType.ASOF: 'ASOF'>
AUTO_INCREMENT = <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>
BEGIN = <TokenType.BEGIN: 'BEGIN'>
BETWEEN = <TokenType.BETWEEN: 'BETWEEN'>
CACHE = <TokenType.CACHE: 'CACHE'>
CASE = <TokenType.CASE: 'CASE'>
CHARACTER_SET = <TokenType.CHARACTER_SET: 'CHARACTER_SET'>
COLLATE = <TokenType.COLLATE: 'COLLATE'>
COMMAND = <TokenType.COMMAND: 'COMMAND'>
COMMENT = <TokenType.COMMENT: 'COMMENT'>
COMMIT = <TokenType.COMMIT: 'COMMIT'>
CONSTRAINT = <TokenType.CONSTRAINT: 'CONSTRAINT'>
CREATE = <TokenType.CREATE: 'CREATE'>
CROSS = <TokenType.CROSS: 'CROSS'>
CUBE = <TokenType.CUBE: 'CUBE'>
CURRENT_DATE = <TokenType.CURRENT_DATE: 'CURRENT_DATE'>
CURRENT_DATETIME = <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>
CURRENT_TIME = <TokenType.CURRENT_TIME: 'CURRENT_TIME'>
CURRENT_TIMESTAMP = <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>
CURRENT_USER = <TokenType.CURRENT_USER: 'CURRENT_USER'>
DEFAULT = <TokenType.DEFAULT: 'DEFAULT'>
DELETE = <TokenType.DELETE: 'DELETE'>
DESC = <TokenType.DESC: 'DESC'>
DESCRIBE = <TokenType.DESCRIBE: 'DESCRIBE'>
DICTIONARY = <TokenType.DICTIONARY: 'DICTIONARY'>
DISTINCT = <TokenType.DISTINCT: 'DISTINCT'>
DIV = <TokenType.DIV: 'DIV'>
DROP = <TokenType.DROP: 'DROP'>
ELSE = <TokenType.ELSE: 'ELSE'>
END = <TokenType.END: 'END'>
ESCAPE = <TokenType.ESCAPE: 'ESCAPE'>
EXCEPT = <TokenType.EXCEPT: 'EXCEPT'>
EXECUTE = <TokenType.EXECUTE: 'EXECUTE'>
EXISTS = <TokenType.EXISTS: 'EXISTS'>
FALSE = <TokenType.FALSE: 'FALSE'>
FETCH = <TokenType.FETCH: 'FETCH'>
FILTER = <TokenType.FILTER: 'FILTER'>
FINAL = <TokenType.FINAL: 'FINAL'>
FIRST = <TokenType.FIRST: 'FIRST'>
FOR = <TokenType.FOR: 'FOR'>
FOREIGN_KEY = <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>
FORMAT = <TokenType.FORMAT: 'FORMAT'>
FROM = <TokenType.FROM: 'FROM'>
FULL = <TokenType.FULL: 'FULL'>
FUNCTION = <TokenType.FUNCTION: 'FUNCTION'>
GLOB = <TokenType.GLOB: 'GLOB'>
GLOBAL = <TokenType.GLOBAL: 'GLOBAL'>
GROUP_BY = <TokenType.GROUP_BY: 'GROUP_BY'>
GROUPING_SETS = <TokenType.GROUPING_SETS: 'GROUPING_SETS'>
HAVING = <TokenType.HAVING: 'HAVING'>
HINT = <TokenType.HINT: 'HINT'>
IF = <TokenType.IF: 'IF'>
ILIKE = <TokenType.ILIKE: 'ILIKE'>
ILIKE_ANY = <TokenType.ILIKE_ANY: 'ILIKE_ANY'>
IN = <TokenType.IN: 'IN'>
INDEX = <TokenType.INDEX: 'INDEX'>
INNER = <TokenType.INNER: 'INNER'>
INSERT = <TokenType.INSERT: 'INSERT'>
INTERSECT = <TokenType.INTERSECT: 'INTERSECT'>
INTERVAL = <TokenType.INTERVAL: 'INTERVAL'>
INTO = <TokenType.INTO: 'INTO'>
INTRODUCER = <TokenType.INTRODUCER: 'INTRODUCER'>
IRLIKE = <TokenType.IRLIKE: 'IRLIKE'>
IS = <TokenType.IS: 'IS'>
ISNULL = <TokenType.ISNULL: 'ISNULL'>
JOIN = <TokenType.JOIN: 'JOIN'>
JOIN_MARKER = <TokenType.JOIN_MARKER: 'JOIN_MARKER'>
KEEP = <TokenType.KEEP: 'KEEP'>
LANGUAGE = <TokenType.LANGUAGE: 'LANGUAGE'>
LATERAL = <TokenType.LATERAL: 'LATERAL'>
LEFT = <TokenType.LEFT: 'LEFT'>
LIKE = <TokenType.LIKE: 'LIKE'>
LIKE_ANY = <TokenType.LIKE_ANY: 'LIKE_ANY'>
LIMIT = <TokenType.LIMIT: 'LIMIT'>
LOAD = <TokenType.LOAD: 'LOAD'>
LOCK = <TokenType.LOCK: 'LOCK'>
MAP = <TokenType.MAP: 'MAP'>
MATCH_RECOGNIZE = <TokenType.MATCH_RECOGNIZE: 'MATCH_RECOGNIZE'>
MERGE = <TokenType.MERGE: 'MERGE'>
MOD = <TokenType.MOD: 'MOD'>
NATURAL = <TokenType.NATURAL: 'NATURAL'>
NEXT = <TokenType.NEXT: 'NEXT'>
NEXT_VALUE_FOR = <TokenType.NEXT_VALUE_FOR: 'NEXT_VALUE_FOR'>
NOTNULL = <TokenType.NOTNULL: 'NOTNULL'>
NULL = <TokenType.NULL: 'NULL'>
OFFSET = <TokenType.OFFSET: 'OFFSET'>
ON = <TokenType.ON: 'ON'>
ORDER_BY = <TokenType.ORDER_BY: 'ORDER_BY'>
ORDERED = <TokenType.ORDERED: 'ORDERED'>
ORDINALITY = <TokenType.ORDINALITY: 'ORDINALITY'>
OUTER = <TokenType.OUTER: 'OUTER'>
OVER = <TokenType.OVER: 'OVER'>
OVERLAPS = <TokenType.OVERLAPS: 'OVERLAPS'>
OVERWRITE = <TokenType.OVERWRITE: 'OVERWRITE'>
PARTITION = <TokenType.PARTITION: 'PARTITION'>
PARTITION_BY = <TokenType.PARTITION_BY: 'PARTITION_BY'>
PERCENT = <TokenType.PERCENT: 'PERCENT'>
PIVOT = <TokenType.PIVOT: 'PIVOT'>
PLACEHOLDER = <TokenType.PLACEHOLDER: 'PLACEHOLDER'>
PRAGMA = <TokenType.PRAGMA: 'PRAGMA'>
PRIMARY_KEY = <TokenType.PRIMARY_KEY: 'PRIMARY_KEY'>
PROCEDURE = <TokenType.PROCEDURE: 'PROCEDURE'>
PROPERTIES = <TokenType.PROPERTIES: 'PROPERTIES'>
PSEUDO_TYPE = <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>
QUALIFY = <TokenType.QUALIFY: 'QUALIFY'>
QUOTE = <TokenType.QUOTE: 'QUOTE'>
RANGE = <TokenType.RANGE: 'RANGE'>
RECURSIVE = <TokenType.RECURSIVE: 'RECURSIVE'>
REPLACE = <TokenType.REPLACE: 'REPLACE'>
RETURNING = <TokenType.RETURNING: 'RETURNING'>
REFERENCES = <TokenType.REFERENCES: 'REFERENCES'>
RIGHT = <TokenType.RIGHT: 'RIGHT'>
RLIKE = <TokenType.RLIKE: 'RLIKE'>
ROLLBACK = <TokenType.ROLLBACK: 'ROLLBACK'>
ROLLUP = <TokenType.ROLLUP: 'ROLLUP'>
ROW = <TokenType.ROW: 'ROW'>
ROWS = <TokenType.ROWS: 'ROWS'>
SELECT = <TokenType.SELECT: 'SELECT'>
SEMI = <TokenType.SEMI: 'SEMI'>
SEPARATOR = <TokenType.SEPARATOR: 'SEPARATOR'>
SERDE_PROPERTIES = <TokenType.SERDE_PROPERTIES: 'SERDE_PROPERTIES'>
SET = <TokenType.SET: 'SET'>
SETTINGS = <TokenType.SETTINGS: 'SETTINGS'>
SHOW = <TokenType.SHOW: 'SHOW'>
SIMILAR_TO = <TokenType.SIMILAR_TO: 'SIMILAR_TO'>
SOME = <TokenType.SOME: 'SOME'>
STRUCT = <TokenType.STRUCT: 'STRUCT'>
TABLE_SAMPLE = <TokenType.TABLE_SAMPLE: 'TABLE_SAMPLE'>
TEMPORARY = <TokenType.TEMPORARY: 'TEMPORARY'>
TOP = <TokenType.TOP: 'TOP'>
THEN = <TokenType.THEN: 'THEN'>
TRUE = <TokenType.TRUE: 'TRUE'>
UNCACHE = <TokenType.UNCACHE: 'UNCACHE'>
UNION = <TokenType.UNION: 'UNION'>
UNNEST = <TokenType.UNNEST: 'UNNEST'>
UNPIVOT = <TokenType.UNPIVOT: 'UNPIVOT'>
UPDATE = <TokenType.UPDATE: 'UPDATE'>
USE = <TokenType.USE: 'USE'>
USING = <TokenType.USING: 'USING'>
VALUES = <TokenType.VALUES: 'VALUES'>
VIEW = <TokenType.VIEW: 'VIEW'>
VOLATILE = <TokenType.VOLATILE: 'VOLATILE'>
WHEN = <TokenType.WHEN: 'WHEN'>
WHERE = <TokenType.WHERE: 'WHERE'>
WINDOW = <TokenType.WINDOW: 'WINDOW'>
WITH = <TokenType.WITH: 'WITH'>
UNIQUE = <TokenType.UNIQUE: 'UNIQUE'>
Inherited Members
enum.Enum
name
value
class Token:
309class Token:
310    __slots__ = ("token_type", "text", "line", "col", "start", "end", "comments")
311
312    @classmethod
313    def number(cls, number: int) -> Token:
314        """Returns a NUMBER token with `number` as its text."""
315        return cls(TokenType.NUMBER, str(number))
316
317    @classmethod
318    def string(cls, string: str) -> Token:
319        """Returns a STRING token with `string` as its text."""
320        return cls(TokenType.STRING, string)
321
322    @classmethod
323    def identifier(cls, identifier: str) -> Token:
324        """Returns an IDENTIFIER token with `identifier` as its text."""
325        return cls(TokenType.IDENTIFIER, identifier)
326
327    @classmethod
328    def var(cls, var: str) -> Token:
329        """Returns an VAR token with `var` as its text."""
330        return cls(TokenType.VAR, var)
331
332    def __init__(
333        self,
334        token_type: TokenType,
335        text: str,
336        line: int = 1,
337        col: int = 1,
338        start: int = 0,
339        end: int = 0,
340        comments: t.List[str] = [],
341    ) -> None:
342        """Token initializer.
343
344        Args:
345            token_type: The TokenType Enum.
346            text: The text of the token.
347            line: The line that the token ends on.
348            col: The column that the token ends on.
349            start: The start index of the token.
350            end: The ending index of the token.
351            comments: The comments to attach to the token.
352        """
353        self.token_type = token_type
354        self.text = text
355        self.line = line
356        self.col = col
357        self.start = start
358        self.end = end
359        self.comments = comments
360
361    def __repr__(self) -> str:
362        attributes = ", ".join(f"{k}: {getattr(self, k)}" for k in self.__slots__)
363        return f"<Token {attributes}>"
Token( token_type: sqlglot.tokens.TokenType, text: str, line: int = 1, col: int = 1, start: int = 0, end: int = 0, comments: List[str] = [])
332    def __init__(
333        self,
334        token_type: TokenType,
335        text: str,
336        line: int = 1,
337        col: int = 1,
338        start: int = 0,
339        end: int = 0,
340        comments: t.List[str] = [],
341    ) -> None:
342        """Token initializer.
343
344        Args:
345            token_type: The TokenType Enum.
346            text: The text of the token.
347            line: The line that the token ends on.
348            col: The column that the token ends on.
349            start: The start index of the token.
350            end: The ending index of the token.
351            comments: The comments to attach to the token.
352        """
353        self.token_type = token_type
354        self.text = text
355        self.line = line
356        self.col = col
357        self.start = start
358        self.end = end
359        self.comments = comments

Token initializer.

Arguments:
  • token_type: The TokenType Enum.
  • text: The text of the token.
  • line: The line that the token ends on.
  • col: The column that the token ends on.
  • start: The start index of the token.
  • end: The ending index of the token.
  • comments: The comments to attach to the token.
@classmethod
def number(cls, number: int) -> sqlglot.tokens.Token:
312    @classmethod
313    def number(cls, number: int) -> Token:
314        """Returns a NUMBER token with `number` as its text."""
315        return cls(TokenType.NUMBER, str(number))

Returns a NUMBER token with number as its text.

@classmethod
def string(cls, string: str) -> sqlglot.tokens.Token:
317    @classmethod
318    def string(cls, string: str) -> Token:
319        """Returns a STRING token with `string` as its text."""
320        return cls(TokenType.STRING, string)

Returns a STRING token with string as its text.

@classmethod
def identifier(cls, identifier: str) -> sqlglot.tokens.Token:
322    @classmethod
323    def identifier(cls, identifier: str) -> Token:
324        """Returns an IDENTIFIER token with `identifier` as its text."""
325        return cls(TokenType.IDENTIFIER, identifier)

Returns an IDENTIFIER token with identifier as its text.

@classmethod
def var(cls, var: str) -> sqlglot.tokens.Token:
327    @classmethod
328    def var(cls, var: str) -> Token:
329        """Returns an VAR token with `var` as its text."""
330        return cls(TokenType.VAR, var)

Returns an VAR token with var as its text.

class Tokenizer:
 419class Tokenizer(metaclass=_Tokenizer):
 420    SINGLE_TOKENS = {
 421        "(": TokenType.L_PAREN,
 422        ")": TokenType.R_PAREN,
 423        "[": TokenType.L_BRACKET,
 424        "]": TokenType.R_BRACKET,
 425        "{": TokenType.L_BRACE,
 426        "}": TokenType.R_BRACE,
 427        "&": TokenType.AMP,
 428        "^": TokenType.CARET,
 429        ":": TokenType.COLON,
 430        ",": TokenType.COMMA,
 431        ".": TokenType.DOT,
 432        "-": TokenType.DASH,
 433        "=": TokenType.EQ,
 434        ">": TokenType.GT,
 435        "<": TokenType.LT,
 436        "%": TokenType.MOD,
 437        "!": TokenType.NOT,
 438        "|": TokenType.PIPE,
 439        "+": TokenType.PLUS,
 440        ";": TokenType.SEMICOLON,
 441        "/": TokenType.SLASH,
 442        "\\": TokenType.BACKSLASH,
 443        "*": TokenType.STAR,
 444        "~": TokenType.TILDA,
 445        "?": TokenType.PLACEHOLDER,
 446        "@": TokenType.PARAMETER,
 447        # used for breaking a var like x'y' but nothing else
 448        # the token type doesn't matter
 449        "'": TokenType.QUOTE,
 450        "`": TokenType.IDENTIFIER,
 451        '"': TokenType.IDENTIFIER,
 452        "#": TokenType.HASH,
 453    }
 454
 455    BIT_STRINGS: t.List[str | t.Tuple[str, str]] = []
 456    BYTE_STRINGS: t.List[str | t.Tuple[str, str]] = []
 457    HEX_STRINGS: t.List[str | t.Tuple[str, str]] = []
 458    RAW_STRINGS: t.List[str | t.Tuple[str, str]] = []
 459    IDENTIFIERS: t.List[str | t.Tuple[str, str]] = ['"']
 460    IDENTIFIER_ESCAPES = ['"']
 461    QUOTES: t.List[t.Tuple[str, str] | str] = ["'"]
 462    STRING_ESCAPES = ["'"]
 463    VAR_SINGLE_TOKENS: t.Set[str] = set()
 464
 465    # Autofilled
 466    IDENTIFIERS_CAN_START_WITH_DIGIT: bool = False
 467
 468    _COMMENTS: t.Dict[str, str] = {}
 469    _FORMAT_STRINGS: t.Dict[str, t.Tuple[str, TokenType]] = {}
 470    _IDENTIFIERS: t.Dict[str, str] = {}
 471    _IDENTIFIER_ESCAPES: t.Set[str] = set()
 472    _QUOTES: t.Dict[str, str] = {}
 473    _STRING_ESCAPES: t.Set[str] = set()
 474    _KEYWORD_TRIE: t.Dict = {}
 475
 476    KEYWORDS: t.Dict[str, TokenType] = {
 477        **{f"{{%{postfix}": TokenType.BLOCK_START for postfix in ("", "+", "-")},
 478        **{f"{prefix}%}}": TokenType.BLOCK_END for prefix in ("", "+", "-")},
 479        **{f"{{{{{postfix}": TokenType.BLOCK_START for postfix in ("+", "-")},
 480        **{f"{prefix}}}}}": TokenType.BLOCK_END for prefix in ("+", "-")},
 481        "/*+": TokenType.HINT,
 482        "==": TokenType.EQ,
 483        "::": TokenType.DCOLON,
 484        "||": TokenType.DPIPE,
 485        ">=": TokenType.GTE,
 486        "<=": TokenType.LTE,
 487        "<>": TokenType.NEQ,
 488        "!=": TokenType.NEQ,
 489        "<=>": TokenType.NULLSAFE_EQ,
 490        "->": TokenType.ARROW,
 491        "->>": TokenType.DARROW,
 492        "=>": TokenType.FARROW,
 493        "#>": TokenType.HASH_ARROW,
 494        "#>>": TokenType.DHASH_ARROW,
 495        "<->": TokenType.LR_ARROW,
 496        "&&": TokenType.DAMP,
 497        "ALL": TokenType.ALL,
 498        "ALWAYS": TokenType.ALWAYS,
 499        "AND": TokenType.AND,
 500        "ANTI": TokenType.ANTI,
 501        "ANY": TokenType.ANY,
 502        "ASC": TokenType.ASC,
 503        "AS": TokenType.ALIAS,
 504        "ASOF": TokenType.ASOF,
 505        "AUTOINCREMENT": TokenType.AUTO_INCREMENT,
 506        "AUTO_INCREMENT": TokenType.AUTO_INCREMENT,
 507        "BEGIN": TokenType.BEGIN,
 508        "BETWEEN": TokenType.BETWEEN,
 509        "CACHE": TokenType.CACHE,
 510        "UNCACHE": TokenType.UNCACHE,
 511        "CASE": TokenType.CASE,
 512        "CHARACTER SET": TokenType.CHARACTER_SET,
 513        "COLLATE": TokenType.COLLATE,
 514        "COLUMN": TokenType.COLUMN,
 515        "COMMIT": TokenType.COMMIT,
 516        "CONSTRAINT": TokenType.CONSTRAINT,
 517        "CREATE": TokenType.CREATE,
 518        "CROSS": TokenType.CROSS,
 519        "CUBE": TokenType.CUBE,
 520        "CURRENT_DATE": TokenType.CURRENT_DATE,
 521        "CURRENT_TIME": TokenType.CURRENT_TIME,
 522        "CURRENT_TIMESTAMP": TokenType.CURRENT_TIMESTAMP,
 523        "CURRENT_USER": TokenType.CURRENT_USER,
 524        "DATABASE": TokenType.DATABASE,
 525        "DEFAULT": TokenType.DEFAULT,
 526        "DELETE": TokenType.DELETE,
 527        "DESC": TokenType.DESC,
 528        "DESCRIBE": TokenType.DESCRIBE,
 529        "DISTINCT": TokenType.DISTINCT,
 530        "DIV": TokenType.DIV,
 531        "DROP": TokenType.DROP,
 532        "ELSE": TokenType.ELSE,
 533        "END": TokenType.END,
 534        "ESCAPE": TokenType.ESCAPE,
 535        "EXCEPT": TokenType.EXCEPT,
 536        "EXECUTE": TokenType.EXECUTE,
 537        "EXISTS": TokenType.EXISTS,
 538        "FALSE": TokenType.FALSE,
 539        "FETCH": TokenType.FETCH,
 540        "FILTER": TokenType.FILTER,
 541        "FIRST": TokenType.FIRST,
 542        "FULL": TokenType.FULL,
 543        "FUNCTION": TokenType.FUNCTION,
 544        "FOR": TokenType.FOR,
 545        "FOREIGN KEY": TokenType.FOREIGN_KEY,
 546        "FORMAT": TokenType.FORMAT,
 547        "FROM": TokenType.FROM,
 548        "GEOGRAPHY": TokenType.GEOGRAPHY,
 549        "GEOMETRY": TokenType.GEOMETRY,
 550        "GLOB": TokenType.GLOB,
 551        "GROUP BY": TokenType.GROUP_BY,
 552        "GROUPING SETS": TokenType.GROUPING_SETS,
 553        "HAVING": TokenType.HAVING,
 554        "IF": TokenType.IF,
 555        "ILIKE": TokenType.ILIKE,
 556        "IN": TokenType.IN,
 557        "INDEX": TokenType.INDEX,
 558        "INET": TokenType.INET,
 559        "INNER": TokenType.INNER,
 560        "INSERT": TokenType.INSERT,
 561        "INTERVAL": TokenType.INTERVAL,
 562        "INTERSECT": TokenType.INTERSECT,
 563        "INTO": TokenType.INTO,
 564        "IS": TokenType.IS,
 565        "ISNULL": TokenType.ISNULL,
 566        "JOIN": TokenType.JOIN,
 567        "KEEP": TokenType.KEEP,
 568        "LATERAL": TokenType.LATERAL,
 569        "LEFT": TokenType.LEFT,
 570        "LIKE": TokenType.LIKE,
 571        "LIMIT": TokenType.LIMIT,
 572        "LOAD": TokenType.LOAD,
 573        "LOCK": TokenType.LOCK,
 574        "MERGE": TokenType.MERGE,
 575        "NATURAL": TokenType.NATURAL,
 576        "NEXT": TokenType.NEXT,
 577        "NEXT VALUE FOR": TokenType.NEXT_VALUE_FOR,
 578        "NOT": TokenType.NOT,
 579        "NOTNULL": TokenType.NOTNULL,
 580        "NULL": TokenType.NULL,
 581        "OBJECT": TokenType.OBJECT,
 582        "OFFSET": TokenType.OFFSET,
 583        "ON": TokenType.ON,
 584        "OR": TokenType.OR,
 585        "ORDER BY": TokenType.ORDER_BY,
 586        "ORDINALITY": TokenType.ORDINALITY,
 587        "OUTER": TokenType.OUTER,
 588        "OVER": TokenType.OVER,
 589        "OVERLAPS": TokenType.OVERLAPS,
 590        "OVERWRITE": TokenType.OVERWRITE,
 591        "PARTITION": TokenType.PARTITION,
 592        "PARTITION BY": TokenType.PARTITION_BY,
 593        "PARTITIONED BY": TokenType.PARTITION_BY,
 594        "PARTITIONED_BY": TokenType.PARTITION_BY,
 595        "PERCENT": TokenType.PERCENT,
 596        "PIVOT": TokenType.PIVOT,
 597        "PRAGMA": TokenType.PRAGMA,
 598        "PRIMARY KEY": TokenType.PRIMARY_KEY,
 599        "PROCEDURE": TokenType.PROCEDURE,
 600        "QUALIFY": TokenType.QUALIFY,
 601        "RANGE": TokenType.RANGE,
 602        "RECURSIVE": TokenType.RECURSIVE,
 603        "REGEXP": TokenType.RLIKE,
 604        "REPLACE": TokenType.REPLACE,
 605        "RETURNING": TokenType.RETURNING,
 606        "REFERENCES": TokenType.REFERENCES,
 607        "RIGHT": TokenType.RIGHT,
 608        "RLIKE": TokenType.RLIKE,
 609        "ROLLBACK": TokenType.ROLLBACK,
 610        "ROLLUP": TokenType.ROLLUP,
 611        "ROW": TokenType.ROW,
 612        "ROWS": TokenType.ROWS,
 613        "SCHEMA": TokenType.SCHEMA,
 614        "SELECT": TokenType.SELECT,
 615        "SEMI": TokenType.SEMI,
 616        "SET": TokenType.SET,
 617        "SETTINGS": TokenType.SETTINGS,
 618        "SHOW": TokenType.SHOW,
 619        "SIMILAR TO": TokenType.SIMILAR_TO,
 620        "SOME": TokenType.SOME,
 621        "TABLE": TokenType.TABLE,
 622        "TABLESAMPLE": TokenType.TABLE_SAMPLE,
 623        "TEMP": TokenType.TEMPORARY,
 624        "TEMPORARY": TokenType.TEMPORARY,
 625        "THEN": TokenType.THEN,
 626        "TRUE": TokenType.TRUE,
 627        "UNION": TokenType.UNION,
 628        "UNNEST": TokenType.UNNEST,
 629        "UNPIVOT": TokenType.UNPIVOT,
 630        "UPDATE": TokenType.UPDATE,
 631        "USE": TokenType.USE,
 632        "USING": TokenType.USING,
 633        "UUID": TokenType.UUID,
 634        "VALUES": TokenType.VALUES,
 635        "VIEW": TokenType.VIEW,
 636        "VOLATILE": TokenType.VOLATILE,
 637        "WHEN": TokenType.WHEN,
 638        "WHERE": TokenType.WHERE,
 639        "WINDOW": TokenType.WINDOW,
 640        "WITH": TokenType.WITH,
 641        "APPLY": TokenType.APPLY,
 642        "ARRAY": TokenType.ARRAY,
 643        "BIT": TokenType.BIT,
 644        "BOOL": TokenType.BOOLEAN,
 645        "BOOLEAN": TokenType.BOOLEAN,
 646        "BYTE": TokenType.TINYINT,
 647        "TINYINT": TokenType.TINYINT,
 648        "SHORT": TokenType.SMALLINT,
 649        "SMALLINT": TokenType.SMALLINT,
 650        "INT2": TokenType.SMALLINT,
 651        "INTEGER": TokenType.INT,
 652        "INT": TokenType.INT,
 653        "INT4": TokenType.INT,
 654        "LONG": TokenType.BIGINT,
 655        "BIGINT": TokenType.BIGINT,
 656        "INT8": TokenType.BIGINT,
 657        "DEC": TokenType.DECIMAL,
 658        "DECIMAL": TokenType.DECIMAL,
 659        "BIGDECIMAL": TokenType.BIGDECIMAL,
 660        "BIGNUMERIC": TokenType.BIGDECIMAL,
 661        "MAP": TokenType.MAP,
 662        "NULLABLE": TokenType.NULLABLE,
 663        "NUMBER": TokenType.DECIMAL,
 664        "NUMERIC": TokenType.DECIMAL,
 665        "FIXED": TokenType.DECIMAL,
 666        "REAL": TokenType.FLOAT,
 667        "FLOAT": TokenType.FLOAT,
 668        "FLOAT4": TokenType.FLOAT,
 669        "FLOAT8": TokenType.DOUBLE,
 670        "DOUBLE": TokenType.DOUBLE,
 671        "DOUBLE PRECISION": TokenType.DOUBLE,
 672        "JSON": TokenType.JSON,
 673        "CHAR": TokenType.CHAR,
 674        "CHARACTER": TokenType.CHAR,
 675        "NCHAR": TokenType.NCHAR,
 676        "VARCHAR": TokenType.VARCHAR,
 677        "VARCHAR2": TokenType.VARCHAR,
 678        "NVARCHAR": TokenType.NVARCHAR,
 679        "NVARCHAR2": TokenType.NVARCHAR,
 680        "STR": TokenType.TEXT,
 681        "STRING": TokenType.TEXT,
 682        "TEXT": TokenType.TEXT,
 683        "CLOB": TokenType.TEXT,
 684        "LONGVARCHAR": TokenType.TEXT,
 685        "BINARY": TokenType.BINARY,
 686        "BLOB": TokenType.VARBINARY,
 687        "BYTEA": TokenType.VARBINARY,
 688        "VARBINARY": TokenType.VARBINARY,
 689        "TIME": TokenType.TIME,
 690        "TIMESTAMP": TokenType.TIMESTAMP,
 691        "TIMESTAMPTZ": TokenType.TIMESTAMPTZ,
 692        "TIMESTAMPLTZ": TokenType.TIMESTAMPLTZ,
 693        "DATE": TokenType.DATE,
 694        "DATETIME": TokenType.DATETIME,
 695        "INT4RANGE": TokenType.INT4RANGE,
 696        "INT4MULTIRANGE": TokenType.INT4MULTIRANGE,
 697        "INT8RANGE": TokenType.INT8RANGE,
 698        "INT8MULTIRANGE": TokenType.INT8MULTIRANGE,
 699        "NUMRANGE": TokenType.NUMRANGE,
 700        "NUMMULTIRANGE": TokenType.NUMMULTIRANGE,
 701        "TSRANGE": TokenType.TSRANGE,
 702        "TSMULTIRANGE": TokenType.TSMULTIRANGE,
 703        "TSTZRANGE": TokenType.TSTZRANGE,
 704        "TSTZMULTIRANGE": TokenType.TSTZMULTIRANGE,
 705        "DATERANGE": TokenType.DATERANGE,
 706        "DATEMULTIRANGE": TokenType.DATEMULTIRANGE,
 707        "UNIQUE": TokenType.UNIQUE,
 708        "STRUCT": TokenType.STRUCT,
 709        "VARIANT": TokenType.VARIANT,
 710        "ALTER": TokenType.ALTER,
 711        "ANALYZE": TokenType.COMMAND,
 712        "CALL": TokenType.COMMAND,
 713        "COMMENT": TokenType.COMMENT,
 714        "COPY": TokenType.COMMAND,
 715        "EXPLAIN": TokenType.COMMAND,
 716        "GRANT": TokenType.COMMAND,
 717        "OPTIMIZE": TokenType.COMMAND,
 718        "PREPARE": TokenType.COMMAND,
 719        "TRUNCATE": TokenType.COMMAND,
 720        "VACUUM": TokenType.COMMAND,
 721    }
 722
 723    WHITE_SPACE: t.Dict[t.Optional[str], TokenType] = {
 724        " ": TokenType.SPACE,
 725        "\t": TokenType.SPACE,
 726        "\n": TokenType.BREAK,
 727        "\r": TokenType.BREAK,
 728        "\r\n": TokenType.BREAK,
 729    }
 730
 731    COMMANDS = {
 732        TokenType.COMMAND,
 733        TokenType.EXECUTE,
 734        TokenType.FETCH,
 735        TokenType.SHOW,
 736    }
 737
 738    COMMAND_PREFIX_TOKENS = {TokenType.SEMICOLON, TokenType.BEGIN}
 739
 740    # handle numeric literals like in hive (3L = BIGINT)
 741    NUMERIC_LITERALS: t.Dict[str, str] = {}
 742    ENCODE: t.Optional[str] = None
 743
 744    COMMENTS = ["--", ("/*", "*/")]
 745
 746    __slots__ = (
 747        "sql",
 748        "size",
 749        "tokens",
 750        "_start",
 751        "_current",
 752        "_line",
 753        "_col",
 754        "_comments",
 755        "_char",
 756        "_end",
 757        "_peek",
 758        "_prev_token_line",
 759    )
 760
 761    def __init__(self) -> None:
 762        self.reset()
 763
 764    def reset(self) -> None:
 765        self.sql = ""
 766        self.size = 0
 767        self.tokens: t.List[Token] = []
 768        self._start = 0
 769        self._current = 0
 770        self._line = 1
 771        self._col = 0
 772        self._comments: t.List[str] = []
 773
 774        self._char = ""
 775        self._end = False
 776        self._peek = ""
 777        self._prev_token_line = -1
 778
 779    def tokenize(self, sql: str) -> t.List[Token]:
 780        """Returns a list of tokens corresponding to the SQL string `sql`."""
 781        self.reset()
 782        self.sql = sql
 783        self.size = len(sql)
 784
 785        try:
 786            self._scan()
 787        except Exception as e:
 788            start = max(self._current - 50, 0)
 789            end = min(self._current + 50, self.size - 1)
 790            context = self.sql[start:end]
 791            raise ValueError(f"Error tokenizing '{context}'") from e
 792
 793        return self.tokens
 794
 795    def _scan(self, until: t.Optional[t.Callable] = None) -> None:
 796        while self.size and not self._end:
 797            self._start = self._current
 798            self._advance()
 799
 800            if self._char is None:
 801                break
 802
 803            if self._char not in self.WHITE_SPACE:
 804                if self._char.isdigit():
 805                    self._scan_number()
 806                elif self._char in self._IDENTIFIERS:
 807                    self._scan_identifier(self._IDENTIFIERS[self._char])
 808                else:
 809                    self._scan_keywords()
 810
 811            if until and until():
 812                break
 813
 814        if self.tokens and self._comments:
 815            self.tokens[-1].comments.extend(self._comments)
 816
 817    def _chars(self, size: int) -> str:
 818        if size == 1:
 819            return self._char
 820
 821        start = self._current - 1
 822        end = start + size
 823
 824        return self.sql[start:end] if end <= self.size else ""
 825
 826    def _advance(self, i: int = 1, alnum: bool = False) -> None:
 827        if self.WHITE_SPACE.get(self._char) is TokenType.BREAK:
 828            self._col = 1
 829            self._line += 1
 830        else:
 831            self._col += i
 832
 833        self._current += i
 834        self._end = self._current >= self.size
 835        self._char = self.sql[self._current - 1]
 836        self._peek = "" if self._end else self.sql[self._current]
 837
 838        if alnum and self._char.isalnum():
 839            # Here we use local variables instead of attributes for better performance
 840            _col = self._col
 841            _current = self._current
 842            _end = self._end
 843            _peek = self._peek
 844
 845            while _peek.isalnum():
 846                _col += 1
 847                _current += 1
 848                _end = _current >= self.size
 849                _peek = "" if _end else self.sql[_current]
 850
 851            self._col = _col
 852            self._current = _current
 853            self._end = _end
 854            self._peek = _peek
 855            self._char = self.sql[_current - 1]
 856
 857    @property
 858    def _text(self) -> str:
 859        return self.sql[self._start : self._current]
 860
 861    def peek(self, i: int = 0) -> str:
 862        i = self._current + i
 863        if i < self.size:
 864            return self.sql[i]
 865        return ""
 866
 867    def _add(self, token_type: TokenType, text: t.Optional[str] = None) -> None:
 868        self._prev_token_line = self._line
 869        self.tokens.append(
 870            Token(
 871                token_type,
 872                text=self._text if text is None else text,
 873                line=self._line,
 874                col=self._col,
 875                start=self._start,
 876                end=self._current - 1,
 877                comments=self._comments,
 878            )
 879        )
 880        self._comments = []
 881
 882        # If we have either a semicolon or a begin token before the command's token, we'll parse
 883        # whatever follows the command's token as a string
 884        if (
 885            token_type in self.COMMANDS
 886            and self._peek != ";"
 887            and (len(self.tokens) == 1 or self.tokens[-2].token_type in self.COMMAND_PREFIX_TOKENS)
 888        ):
 889            start = self._current
 890            tokens = len(self.tokens)
 891            self._scan(lambda: self._peek == ";")
 892            self.tokens = self.tokens[:tokens]
 893            text = self.sql[start : self._current].strip()
 894            if text:
 895                self._add(TokenType.STRING, text)
 896
 897    def _scan_keywords(self) -> None:
 898        size = 0
 899        word = None
 900        chars = self._text
 901        char = chars
 902        prev_space = False
 903        skip = False
 904        trie = self._KEYWORD_TRIE
 905        single_token = char in self.SINGLE_TOKENS
 906
 907        while chars:
 908            if skip:
 909                result = 1
 910            else:
 911                result, trie = in_trie(trie, char.upper())
 912
 913            if result == 0:
 914                break
 915            if result == 2:
 916                word = chars
 917
 918            size += 1
 919            end = self._current - 1 + size
 920
 921            if end < self.size:
 922                char = self.sql[end]
 923                single_token = single_token or char in self.SINGLE_TOKENS
 924                is_space = char in self.WHITE_SPACE
 925
 926                if not is_space or not prev_space:
 927                    if is_space:
 928                        char = " "
 929                    chars += char
 930                    prev_space = is_space
 931                    skip = False
 932                else:
 933                    skip = True
 934            else:
 935                char = ""
 936                chars = " "
 937
 938        word = None if not single_token and chars[-1] not in self.WHITE_SPACE else word
 939
 940        if not word:
 941            if self._char in self.SINGLE_TOKENS:
 942                self._add(self.SINGLE_TOKENS[self._char], text=self._char)
 943                return
 944            self._scan_var()
 945            return
 946
 947        if self._scan_string(word):
 948            return
 949        if self._scan_comment(word):
 950            return
 951
 952        self._advance(size - 1)
 953        word = word.upper()
 954        self._add(self.KEYWORDS[word], text=word)
 955
 956    def _scan_comment(self, comment_start: str) -> bool:
 957        if comment_start not in self._COMMENTS:
 958            return False
 959
 960        comment_start_line = self._line
 961        comment_start_size = len(comment_start)
 962        comment_end = self._COMMENTS[comment_start]
 963
 964        if comment_end:
 965            # Skip the comment's start delimiter
 966            self._advance(comment_start_size)
 967
 968            comment_end_size = len(comment_end)
 969            while not self._end and self._chars(comment_end_size) != comment_end:
 970                self._advance(alnum=True)
 971
 972            self._comments.append(self._text[comment_start_size : -comment_end_size + 1])
 973            self._advance(comment_end_size - 1)
 974        else:
 975            while not self._end and not self.WHITE_SPACE.get(self._peek) is TokenType.BREAK:
 976                self._advance(alnum=True)
 977            self._comments.append(self._text[comment_start_size:])
 978
 979        # Leading comment is attached to the succeeding token, whilst trailing comment to the preceding.
 980        # Multiple consecutive comments are preserved by appending them to the current comments list.
 981        if comment_start_line == self._prev_token_line:
 982            self.tokens[-1].comments.extend(self._comments)
 983            self._comments = []
 984            self._prev_token_line = self._line
 985
 986        return True
 987
 988    def _scan_number(self) -> None:
 989        if self._char == "0":
 990            peek = self._peek.upper()
 991            if peek == "B":
 992                return self._scan_bits() if self.BIT_STRINGS else self._add(TokenType.NUMBER)
 993            elif peek == "X":
 994                return self._scan_hex() if self.HEX_STRINGS else self._add(TokenType.NUMBER)
 995
 996        decimal = False
 997        scientific = 0
 998
 999        while True:
1000            if self._peek.isdigit():
1001                self._advance()
1002            elif self._peek == "." and not decimal:
1003                after = self.peek(1)
1004                if after.isdigit() or not after.isalpha():
1005                    decimal = True
1006                    self._advance()
1007                else:
1008                    return self._add(TokenType.VAR)
1009            elif self._peek in ("-", "+") and scientific == 1:
1010                scientific += 1
1011                self._advance()
1012            elif self._peek.upper() == "E" and not scientific:
1013                scientific += 1
1014                self._advance()
1015            elif self._peek.isidentifier():
1016                number_text = self._text
1017                literal = ""
1018
1019                while self._peek.strip() and self._peek not in self.SINGLE_TOKENS:
1020                    literal += self._peek.upper()
1021                    self._advance()
1022
1023                token_type = self.KEYWORDS.get(self.NUMERIC_LITERALS.get(literal, ""))
1024
1025                if token_type:
1026                    self._add(TokenType.NUMBER, number_text)
1027                    self._add(TokenType.DCOLON, "::")
1028                    return self._add(token_type, literal)
1029                elif self.IDENTIFIERS_CAN_START_WITH_DIGIT:
1030                    return self._add(TokenType.VAR)
1031
1032                self._add(TokenType.NUMBER, number_text)
1033                return self._advance(-len(literal))
1034            else:
1035                return self._add(TokenType.NUMBER)
1036
1037    def _scan_bits(self) -> None:
1038        self._advance()
1039        value = self._extract_value()
1040        try:
1041            # If `value` can't be converted to a binary, fallback to tokenizing it as an identifier
1042            int(value, 2)
1043            self._add(TokenType.BIT_STRING, value[2:])  # Drop the 0b
1044        except ValueError:
1045            self._add(TokenType.IDENTIFIER)
1046
1047    def _scan_hex(self) -> None:
1048        self._advance()
1049        value = self._extract_value()
1050        try:
1051            # If `value` can't be converted to a hex, fallback to tokenizing it as an identifier
1052            int(value, 16)
1053            self._add(TokenType.HEX_STRING, value[2:])  # Drop the 0x
1054        except ValueError:
1055            self._add(TokenType.IDENTIFIER)
1056
1057    def _extract_value(self) -> str:
1058        while True:
1059            char = self._peek.strip()
1060            if char and char not in self.SINGLE_TOKENS:
1061                self._advance(alnum=True)
1062            else:
1063                break
1064
1065        return self._text
1066
1067    def _scan_string(self, start: str) -> bool:
1068        base = None
1069        token_type = TokenType.STRING
1070
1071        if start in self._QUOTES:
1072            end = self._QUOTES[start]
1073        elif start in self._FORMAT_STRINGS:
1074            end, token_type = self._FORMAT_STRINGS[start]
1075
1076            if token_type == TokenType.HEX_STRING:
1077                base = 16
1078            elif token_type == TokenType.BIT_STRING:
1079                base = 2
1080        else:
1081            return False
1082
1083        self._advance(len(start))
1084        text = self._extract_string(end)
1085
1086        if base:
1087            try:
1088                int(text, base)
1089            except:
1090                raise RuntimeError(
1091                    f"Numeric string contains invalid characters from {self._line}:{self._start}"
1092                )
1093        else:
1094            text = text.encode(self.ENCODE).decode(self.ENCODE) if self.ENCODE else text
1095
1096        self._add(token_type, text)
1097        return True
1098
1099    def _scan_identifier(self, identifier_end: str) -> None:
1100        self._advance()
1101        text = self._extract_string(identifier_end, self._IDENTIFIER_ESCAPES)
1102        self._add(TokenType.IDENTIFIER, text)
1103
1104    def _scan_var(self) -> None:
1105        while True:
1106            char = self._peek.strip()
1107            if char and (char in self.VAR_SINGLE_TOKENS or char not in self.SINGLE_TOKENS):
1108                self._advance(alnum=True)
1109            else:
1110                break
1111
1112        self._add(
1113            TokenType.VAR
1114            if self.tokens and self.tokens[-1].token_type == TokenType.PARAMETER
1115            else self.KEYWORDS.get(self._text.upper(), TokenType.VAR)
1116        )
1117
1118    def _extract_string(self, delimiter: str, escapes=None) -> str:
1119        text = ""
1120        delim_size = len(delimiter)
1121        escapes = self._STRING_ESCAPES if escapes is None else escapes
1122
1123        while True:
1124            if self._char in escapes and (self._peek == delimiter or self._peek in escapes):
1125                if self._peek == delimiter:
1126                    text += self._peek
1127                else:
1128                    text += self._char + self._peek
1129
1130                if self._current + 1 < self.size:
1131                    self._advance(2)
1132                else:
1133                    raise RuntimeError(f"Missing {delimiter} from {self._line}:{self._current}")
1134            else:
1135                if self._chars(delim_size) == delimiter:
1136                    if delim_size > 1:
1137                        self._advance(delim_size - 1)
1138                    break
1139
1140                if self._end:
1141                    raise RuntimeError(f"Missing {delimiter} from {self._line}:{self._start}")
1142
1143                current = self._current - 1
1144                self._advance(alnum=True)
1145                text += self.sql[current : self._current - 1]
1146
1147        return text
def reset(self) -> None:
764    def reset(self) -> None:
765        self.sql = ""
766        self.size = 0
767        self.tokens: t.List[Token] = []
768        self._start = 0
769        self._current = 0
770        self._line = 1
771        self._col = 0
772        self._comments: t.List[str] = []
773
774        self._char = ""
775        self._end = False
776        self._peek = ""
777        self._prev_token_line = -1
def tokenize(self, sql: str) -> List[sqlglot.tokens.Token]:
779    def tokenize(self, sql: str) -> t.List[Token]:
780        """Returns a list of tokens corresponding to the SQL string `sql`."""
781        self.reset()
782        self.sql = sql
783        self.size = len(sql)
784
785        try:
786            self._scan()
787        except Exception as e:
788            start = max(self._current - 50, 0)
789            end = min(self._current + 50, self.size - 1)
790            context = self.sql[start:end]
791            raise ValueError(f"Error tokenizing '{context}'") from e
792
793        return self.tokens

Returns a list of tokens corresponding to the SQL string sql.

def peek(self, i: int = 0) -> str:
861    def peek(self, i: int = 0) -> str:
862        i = self._current + i
863        if i < self.size:
864            return self.sql[i]
865        return ""