Edit on GitHub

sqlglot.tokens

   1from __future__ import annotations
   2
   3import typing as t
   4from enum import auto
   5
   6from sqlglot.helper import AutoName
   7from sqlglot.trie import in_trie, new_trie
   8
   9
  10class TokenType(AutoName):
  11    L_PAREN = auto()
  12    R_PAREN = auto()
  13    L_BRACKET = auto()
  14    R_BRACKET = auto()
  15    L_BRACE = auto()
  16    R_BRACE = auto()
  17    COMMA = auto()
  18    DOT = auto()
  19    DASH = auto()
  20    PLUS = auto()
  21    COLON = auto()
  22    DCOLON = auto()
  23    SEMICOLON = auto()
  24    STAR = auto()
  25    BACKSLASH = auto()
  26    SLASH = auto()
  27    LT = auto()
  28    LTE = auto()
  29    GT = auto()
  30    GTE = auto()
  31    NOT = auto()
  32    EQ = auto()
  33    NEQ = auto()
  34    NULLSAFE_EQ = auto()
  35    AND = auto()
  36    OR = auto()
  37    AMP = auto()
  38    DPIPE = auto()
  39    PIPE = auto()
  40    CARET = auto()
  41    TILDA = auto()
  42    ARROW = auto()
  43    DARROW = auto()
  44    FARROW = auto()
  45    HASH = auto()
  46    HASH_ARROW = auto()
  47    DHASH_ARROW = auto()
  48    LR_ARROW = auto()
  49    DOLLAR = auto()
  50    PARAMETER = auto()
  51    SESSION_PARAMETER = auto()
  52    NATIONAL = auto()
  53
  54    BLOCK_START = auto()
  55    BLOCK_END = auto()
  56
  57    SPACE = auto()
  58    BREAK = auto()
  59
  60    STRING = auto()
  61    NUMBER = auto()
  62    IDENTIFIER = auto()
  63    COLUMN = auto()
  64    COLUMN_DEF = auto()
  65    SCHEMA = auto()
  66    TABLE = auto()
  67    VAR = auto()
  68    BIT_STRING = auto()
  69    HEX_STRING = auto()
  70    BYTE_STRING = auto()
  71
  72    # types
  73    BOOLEAN = auto()
  74    TINYINT = auto()
  75    SMALLINT = auto()
  76    INT = auto()
  77    BIGINT = auto()
  78    FLOAT = auto()
  79    DOUBLE = auto()
  80    DECIMAL = auto()
  81    CHAR = auto()
  82    NCHAR = auto()
  83    VARCHAR = auto()
  84    NVARCHAR = auto()
  85    TEXT = auto()
  86    MEDIUMTEXT = auto()
  87    LONGTEXT = auto()
  88    MEDIUMBLOB = auto()
  89    LONGBLOB = auto()
  90    BINARY = auto()
  91    VARBINARY = auto()
  92    JSON = auto()
  93    JSONB = auto()
  94    TIME = auto()
  95    TIMESTAMP = auto()
  96    TIMESTAMPTZ = auto()
  97    TIMESTAMPLTZ = auto()
  98    DATETIME = auto()
  99    DATE = auto()
 100    UUID = auto()
 101    GEOGRAPHY = auto()
 102    NULLABLE = auto()
 103    GEOMETRY = auto()
 104    HLLSKETCH = auto()
 105    HSTORE = auto()
 106    SUPER = auto()
 107    SERIAL = auto()
 108    SMALLSERIAL = auto()
 109    BIGSERIAL = auto()
 110    XML = auto()
 111    UNIQUEIDENTIFIER = auto()
 112    MONEY = auto()
 113    SMALLMONEY = auto()
 114    ROWVERSION = auto()
 115    IMAGE = auto()
 116    VARIANT = auto()
 117    OBJECT = auto()
 118
 119    # keywords
 120    ALIAS = auto()
 121    ALTER = auto()
 122    ALWAYS = auto()
 123    ALL = auto()
 124    ANTI = auto()
 125    ANY = auto()
 126    APPLY = auto()
 127    ARRAY = auto()
 128    ASC = auto()
 129    ASOF = auto()
 130    AT_TIME_ZONE = auto()
 131    AUTO_INCREMENT = auto()
 132    BEGIN = auto()
 133    BETWEEN = auto()
 134    BOTH = auto()
 135    BUCKET = auto()
 136    BY_DEFAULT = auto()
 137    CACHE = auto()
 138    CASCADE = auto()
 139    CASE = auto()
 140    CHARACTER_SET = auto()
 141    CLUSTER_BY = auto()
 142    COLLATE = auto()
 143    COMMAND = auto()
 144    COMMENT = auto()
 145    COMMIT = auto()
 146    COMPOUND = auto()
 147    CONSTRAINT = auto()
 148    CREATE = auto()
 149    CROSS = auto()
 150    CUBE = auto()
 151    CURRENT_DATE = auto()
 152    CURRENT_DATETIME = auto()
 153    CURRENT_ROW = auto()
 154    CURRENT_TIME = auto()
 155    CURRENT_TIMESTAMP = auto()
 156    DEFAULT = auto()
 157    DELETE = auto()
 158    DESC = auto()
 159    DESCRIBE = auto()
 160    DISTINCT = auto()
 161    DISTINCT_FROM = auto()
 162    DISTRIBUTE_BY = auto()
 163    DIV = auto()
 164    DROP = auto()
 165    ELSE = auto()
 166    END = auto()
 167    ESCAPE = auto()
 168    EXCEPT = auto()
 169    EXECUTE = auto()
 170    EXISTS = auto()
 171    FALSE = auto()
 172    FETCH = auto()
 173    FILTER = auto()
 174    FINAL = auto()
 175    FIRST = auto()
 176    FOLLOWING = auto()
 177    FOR = auto()
 178    FOREIGN_KEY = auto()
 179    FORMAT = auto()
 180    FROM = auto()
 181    FULL = auto()
 182    FUNCTION = auto()
 183    GLOB = auto()
 184    GLOBAL = auto()
 185    GROUP_BY = auto()
 186    GROUPING_SETS = auto()
 187    HAVING = auto()
 188    HINT = auto()
 189    IF = auto()
 190    IGNORE_NULLS = auto()
 191    ILIKE = auto()
 192    ILIKE_ANY = auto()
 193    IN = auto()
 194    INDEX = auto()
 195    INNER = auto()
 196    INSERT = auto()
 197    INTERSECT = auto()
 198    INTERVAL = auto()
 199    INTO = auto()
 200    INTRODUCER = auto()
 201    IRLIKE = auto()
 202    IS = auto()
 203    ISNULL = auto()
 204    JOIN = auto()
 205    LANGUAGE = auto()
 206    LATERAL = auto()
 207    LAZY = auto()
 208    LEADING = auto()
 209    LEFT = auto()
 210    LIKE = auto()
 211    LIKE_ANY = auto()
 212    LIMIT = auto()
 213    LOAD_DATA = auto()
 214    LOCAL = auto()
 215    MAP = auto()
 216    MATCH_RECOGNIZE = auto()
 217    MATERIALIZED = auto()
 218    MERGE = auto()
 219    MOD = auto()
 220    NATURAL = auto()
 221    NEXT = auto()
 222    NO_ACTION = auto()
 223    NOTNULL = auto()
 224    NULL = auto()
 225    NULLS_FIRST = auto()
 226    NULLS_LAST = auto()
 227    OFFSET = auto()
 228    ON = auto()
 229    ONLY = auto()
 230    OPTIONS = auto()
 231    ORDER_BY = auto()
 232    ORDERED = auto()
 233    ORDINALITY = auto()
 234    OUTER = auto()
 235    OUT_OF = auto()
 236    OVER = auto()
 237    OVERWRITE = auto()
 238    PARTITION = auto()
 239    PARTITION_BY = auto()
 240    PERCENT = auto()
 241    PIVOT = auto()
 242    PLACEHOLDER = auto()
 243    PRECEDING = auto()
 244    PRIMARY_KEY = auto()
 245    PROCEDURE = auto()
 246    PROPERTIES = auto()
 247    PSEUDO_TYPE = auto()
 248    QUALIFY = auto()
 249    QUOTE = auto()
 250    RANGE = auto()
 251    RECURSIVE = auto()
 252    REPLACE = auto()
 253    RESPECT_NULLS = auto()
 254    RETURNING = auto()
 255    REFERENCES = auto()
 256    RIGHT = auto()
 257    RLIKE = auto()
 258    ROLLBACK = auto()
 259    ROLLUP = auto()
 260    ROW = auto()
 261    ROWS = auto()
 262    SEED = auto()
 263    SELECT = auto()
 264    SEMI = auto()
 265    SEPARATOR = auto()
 266    SERDE_PROPERTIES = auto()
 267    SET = auto()
 268    SHOW = auto()
 269    SIMILAR_TO = auto()
 270    SOME = auto()
 271    SORTKEY = auto()
 272    SORT_BY = auto()
 273    STRUCT = auto()
 274    TABLE_SAMPLE = auto()
 275    TEMPORARY = auto()
 276    TOP = auto()
 277    THEN = auto()
 278    TRAILING = auto()
 279    TRUE = auto()
 280    UNBOUNDED = auto()
 281    UNCACHE = auto()
 282    UNION = auto()
 283    UNLOGGED = auto()
 284    UNNEST = auto()
 285    UNPIVOT = auto()
 286    UPDATE = auto()
 287    USE = auto()
 288    USING = auto()
 289    VALUES = auto()
 290    VIEW = auto()
 291    VOLATILE = auto()
 292    WHEN = auto()
 293    WHERE = auto()
 294    WINDOW = auto()
 295    WITH = auto()
 296    WITH_TIME_ZONE = auto()
 297    WITH_LOCAL_TIME_ZONE = auto()
 298    WITHIN_GROUP = auto()
 299    WITHOUT_TIME_ZONE = auto()
 300    UNIQUE = auto()
 301
 302
 303class Token:
 304    __slots__ = ("token_type", "text", "line", "col", "comments")
 305
 306    @classmethod
 307    def number(cls, number: int) -> Token:
 308        """Returns a NUMBER token with `number` as its text."""
 309        return cls(TokenType.NUMBER, str(number))
 310
 311    @classmethod
 312    def string(cls, string: str) -> Token:
 313        """Returns a STRING token with `string` as its text."""
 314        return cls(TokenType.STRING, string)
 315
 316    @classmethod
 317    def identifier(cls, identifier: str) -> Token:
 318        """Returns an IDENTIFIER token with `identifier` as its text."""
 319        return cls(TokenType.IDENTIFIER, identifier)
 320
 321    @classmethod
 322    def var(cls, var: str) -> Token:
 323        """Returns an VAR token with `var` as its text."""
 324        return cls(TokenType.VAR, var)
 325
 326    def __init__(
 327        self,
 328        token_type: TokenType,
 329        text: str,
 330        line: int = 1,
 331        col: int = 1,
 332        comments: t.List[str] = [],
 333    ) -> None:
 334        self.token_type = token_type
 335        self.text = text
 336        self.line = line
 337        self.col = max(col - len(text), 1)
 338        self.comments = comments
 339
 340    def __repr__(self) -> str:
 341        attributes = ", ".join(f"{k}: {getattr(self, k)}" for k in self.__slots__)
 342        return f"<Token {attributes}>"
 343
 344
 345class _Tokenizer(type):
 346    def __new__(cls, clsname, bases, attrs):  # type: ignore
 347        klass = super().__new__(cls, clsname, bases, attrs)
 348
 349        klass._QUOTES = {
 350            f"{prefix}{s}": e
 351            for s, e in cls._delimeter_list_to_dict(klass.QUOTES).items()
 352            for prefix in (("",) if s[0].isalpha() else ("", "n", "N"))
 353        }
 354        klass._BIT_STRINGS = cls._delimeter_list_to_dict(klass.BIT_STRINGS)
 355        klass._HEX_STRINGS = cls._delimeter_list_to_dict(klass.HEX_STRINGS)
 356        klass._BYTE_STRINGS = cls._delimeter_list_to_dict(klass.BYTE_STRINGS)
 357        klass._IDENTIFIERS = cls._delimeter_list_to_dict(klass.IDENTIFIERS)
 358        klass._STRING_ESCAPES = set(klass.STRING_ESCAPES)
 359        klass._IDENTIFIER_ESCAPES = set(klass.IDENTIFIER_ESCAPES)
 360        klass._COMMENTS = dict(
 361            (comment, None) if isinstance(comment, str) else (comment[0], comment[1])
 362            for comment in klass.COMMENTS
 363        )
 364
 365        klass.KEYWORD_TRIE = new_trie(
 366            key.upper()
 367            for key in {
 368                **klass.KEYWORDS,
 369                **{comment: TokenType.COMMENT for comment in klass._COMMENTS},
 370                **{quote: TokenType.QUOTE for quote in klass._QUOTES},
 371                **{bit_string: TokenType.BIT_STRING for bit_string in klass._BIT_STRINGS},
 372                **{hex_string: TokenType.HEX_STRING for hex_string in klass._HEX_STRINGS},
 373                **{byte_string: TokenType.BYTE_STRING for byte_string in klass._BYTE_STRINGS},
 374            }
 375            if " " in key or any(single in key for single in klass.SINGLE_TOKENS)
 376        )
 377
 378        return klass
 379
 380    @staticmethod
 381    def _delimeter_list_to_dict(list: t.List[str | t.Tuple[str, str]]) -> t.Dict[str, str]:
 382        return dict((item, item) if isinstance(item, str) else (item[0], item[1]) for item in list)
 383
 384
 385class Tokenizer(metaclass=_Tokenizer):
 386    SINGLE_TOKENS = {
 387        "(": TokenType.L_PAREN,
 388        ")": TokenType.R_PAREN,
 389        "[": TokenType.L_BRACKET,
 390        "]": TokenType.R_BRACKET,
 391        "{": TokenType.L_BRACE,
 392        "}": TokenType.R_BRACE,
 393        "&": TokenType.AMP,
 394        "^": TokenType.CARET,
 395        ":": TokenType.COLON,
 396        ",": TokenType.COMMA,
 397        ".": TokenType.DOT,
 398        "-": TokenType.DASH,
 399        "=": TokenType.EQ,
 400        ">": TokenType.GT,
 401        "<": TokenType.LT,
 402        "%": TokenType.MOD,
 403        "!": TokenType.NOT,
 404        "|": TokenType.PIPE,
 405        "+": TokenType.PLUS,
 406        ";": TokenType.SEMICOLON,
 407        "/": TokenType.SLASH,
 408        "\\": TokenType.BACKSLASH,
 409        "*": TokenType.STAR,
 410        "~": TokenType.TILDA,
 411        "?": TokenType.PLACEHOLDER,
 412        "@": TokenType.PARAMETER,
 413        # used for breaking a var like x'y' but nothing else
 414        # the token type doesn't matter
 415        "'": TokenType.QUOTE,
 416        "`": TokenType.IDENTIFIER,
 417        '"': TokenType.IDENTIFIER,
 418        "#": TokenType.HASH,
 419    }
 420
 421    QUOTES: t.List[t.Tuple[str, str] | str] = ["'"]
 422
 423    BIT_STRINGS: t.List[str | t.Tuple[str, str]] = []
 424
 425    HEX_STRINGS: t.List[str | t.Tuple[str, str]] = []
 426
 427    BYTE_STRINGS: t.List[str | t.Tuple[str, str]] = []
 428
 429    IDENTIFIERS: t.List[str | t.Tuple[str, str]] = ['"']
 430
 431    STRING_ESCAPES = ["'"]
 432
 433    _STRING_ESCAPES: t.Set[str] = set()
 434
 435    IDENTIFIER_ESCAPES = ['"']
 436
 437    _IDENTIFIER_ESCAPES: t.Set[str] = set()
 438
 439    KEYWORDS = {
 440        **{
 441            f"{key}{postfix}": TokenType.BLOCK_START
 442            for key in ("{{", "{%", "{#")
 443            for postfix in ("", "+", "-")
 444        },
 445        **{
 446            f"{prefix}{key}": TokenType.BLOCK_END
 447            for key in ("%}", "#}")
 448            for prefix in ("", "+", "-")
 449        },
 450        "+}}": TokenType.BLOCK_END,
 451        "-}}": TokenType.BLOCK_END,
 452        "/*+": TokenType.HINT,
 453        "==": TokenType.EQ,
 454        "::": TokenType.DCOLON,
 455        "||": TokenType.DPIPE,
 456        ">=": TokenType.GTE,
 457        "<=": TokenType.LTE,
 458        "<>": TokenType.NEQ,
 459        "!=": TokenType.NEQ,
 460        "<=>": TokenType.NULLSAFE_EQ,
 461        "->": TokenType.ARROW,
 462        "->>": TokenType.DARROW,
 463        "=>": TokenType.FARROW,
 464        "#>": TokenType.HASH_ARROW,
 465        "#>>": TokenType.DHASH_ARROW,
 466        "<->": TokenType.LR_ARROW,
 467        "ALL": TokenType.ALL,
 468        "ALWAYS": TokenType.ALWAYS,
 469        "AND": TokenType.AND,
 470        "ANTI": TokenType.ANTI,
 471        "ANY": TokenType.ANY,
 472        "ASC": TokenType.ASC,
 473        "AS": TokenType.ALIAS,
 474        "AT TIME ZONE": TokenType.AT_TIME_ZONE,
 475        "AUTOINCREMENT": TokenType.AUTO_INCREMENT,
 476        "AUTO_INCREMENT": TokenType.AUTO_INCREMENT,
 477        "BEGIN": TokenType.BEGIN,
 478        "BETWEEN": TokenType.BETWEEN,
 479        "BOTH": TokenType.BOTH,
 480        "BUCKET": TokenType.BUCKET,
 481        "BY DEFAULT": TokenType.BY_DEFAULT,
 482        "CACHE": TokenType.CACHE,
 483        "UNCACHE": TokenType.UNCACHE,
 484        "CASE": TokenType.CASE,
 485        "CASCADE": TokenType.CASCADE,
 486        "CHARACTER SET": TokenType.CHARACTER_SET,
 487        "CLUSTER BY": TokenType.CLUSTER_BY,
 488        "COLLATE": TokenType.COLLATE,
 489        "COLUMN": TokenType.COLUMN,
 490        "COMMIT": TokenType.COMMIT,
 491        "COMPOUND": TokenType.COMPOUND,
 492        "CONSTRAINT": TokenType.CONSTRAINT,
 493        "CREATE": TokenType.CREATE,
 494        "CROSS": TokenType.CROSS,
 495        "CUBE": TokenType.CUBE,
 496        "CURRENT_DATE": TokenType.CURRENT_DATE,
 497        "CURRENT ROW": TokenType.CURRENT_ROW,
 498        "CURRENT_TIMESTAMP": TokenType.CURRENT_TIMESTAMP,
 499        "DEFAULT": TokenType.DEFAULT,
 500        "DELETE": TokenType.DELETE,
 501        "DESC": TokenType.DESC,
 502        "DESCRIBE": TokenType.DESCRIBE,
 503        "DISTINCT": TokenType.DISTINCT,
 504        "DISTINCT FROM": TokenType.DISTINCT_FROM,
 505        "DISTRIBUTE BY": TokenType.DISTRIBUTE_BY,
 506        "DIV": TokenType.DIV,
 507        "DROP": TokenType.DROP,
 508        "ELSE": TokenType.ELSE,
 509        "END": TokenType.END,
 510        "ESCAPE": TokenType.ESCAPE,
 511        "EXCEPT": TokenType.EXCEPT,
 512        "EXECUTE": TokenType.EXECUTE,
 513        "EXISTS": TokenType.EXISTS,
 514        "FALSE": TokenType.FALSE,
 515        "FETCH": TokenType.FETCH,
 516        "FILTER": TokenType.FILTER,
 517        "FIRST": TokenType.FIRST,
 518        "FULL": TokenType.FULL,
 519        "FUNCTION": TokenType.FUNCTION,
 520        "FOLLOWING": TokenType.FOLLOWING,
 521        "FOR": TokenType.FOR,
 522        "FOREIGN KEY": TokenType.FOREIGN_KEY,
 523        "FORMAT": TokenType.FORMAT,
 524        "FROM": TokenType.FROM,
 525        "GLOB": TokenType.GLOB,
 526        "GROUP BY": TokenType.GROUP_BY,
 527        "GROUPING SETS": TokenType.GROUPING_SETS,
 528        "HAVING": TokenType.HAVING,
 529        "IF": TokenType.IF,
 530        "ILIKE": TokenType.ILIKE,
 531        "IGNORE NULLS": TokenType.IGNORE_NULLS,
 532        "IN": TokenType.IN,
 533        "INDEX": TokenType.INDEX,
 534        "INNER": TokenType.INNER,
 535        "INSERT": TokenType.INSERT,
 536        "INTERVAL": TokenType.INTERVAL,
 537        "INTERSECT": TokenType.INTERSECT,
 538        "INTO": TokenType.INTO,
 539        "IS": TokenType.IS,
 540        "ISNULL": TokenType.ISNULL,
 541        "JOIN": TokenType.JOIN,
 542        "LATERAL": TokenType.LATERAL,
 543        "LAZY": TokenType.LAZY,
 544        "LEADING": TokenType.LEADING,
 545        "LEFT": TokenType.LEFT,
 546        "LIKE": TokenType.LIKE,
 547        "LIMIT": TokenType.LIMIT,
 548        "LOAD DATA": TokenType.LOAD_DATA,
 549        "LOCAL": TokenType.LOCAL,
 550        "MATERIALIZED": TokenType.MATERIALIZED,
 551        "MERGE": TokenType.MERGE,
 552        "NATURAL": TokenType.NATURAL,
 553        "NEXT": TokenType.NEXT,
 554        "NO ACTION": TokenType.NO_ACTION,
 555        "NOT": TokenType.NOT,
 556        "NOTNULL": TokenType.NOTNULL,
 557        "NULL": TokenType.NULL,
 558        "NULLS FIRST": TokenType.NULLS_FIRST,
 559        "NULLS LAST": TokenType.NULLS_LAST,
 560        "OBJECT": TokenType.OBJECT,
 561        "OFFSET": TokenType.OFFSET,
 562        "ON": TokenType.ON,
 563        "ONLY": TokenType.ONLY,
 564        "OPTIONS": TokenType.OPTIONS,
 565        "OR": TokenType.OR,
 566        "ORDER BY": TokenType.ORDER_BY,
 567        "ORDINALITY": TokenType.ORDINALITY,
 568        "OUTER": TokenType.OUTER,
 569        "OUT OF": TokenType.OUT_OF,
 570        "OVER": TokenType.OVER,
 571        "OVERWRITE": TokenType.OVERWRITE,
 572        "PARTITION": TokenType.PARTITION,
 573        "PARTITION BY": TokenType.PARTITION_BY,
 574        "PARTITIONED BY": TokenType.PARTITION_BY,
 575        "PARTITIONED_BY": TokenType.PARTITION_BY,
 576        "PERCENT": TokenType.PERCENT,
 577        "PIVOT": TokenType.PIVOT,
 578        "PRECEDING": TokenType.PRECEDING,
 579        "PRIMARY KEY": TokenType.PRIMARY_KEY,
 580        "PROCEDURE": TokenType.PROCEDURE,
 581        "QUALIFY": TokenType.QUALIFY,
 582        "RANGE": TokenType.RANGE,
 583        "RECURSIVE": TokenType.RECURSIVE,
 584        "REGEXP": TokenType.RLIKE,
 585        "REPLACE": TokenType.REPLACE,
 586        "RESPECT NULLS": TokenType.RESPECT_NULLS,
 587        "REFERENCES": TokenType.REFERENCES,
 588        "RIGHT": TokenType.RIGHT,
 589        "RLIKE": TokenType.RLIKE,
 590        "ROLLBACK": TokenType.ROLLBACK,
 591        "ROLLUP": TokenType.ROLLUP,
 592        "ROW": TokenType.ROW,
 593        "ROWS": TokenType.ROWS,
 594        "SCHEMA": TokenType.SCHEMA,
 595        "SEED": TokenType.SEED,
 596        "SELECT": TokenType.SELECT,
 597        "SEMI": TokenType.SEMI,
 598        "SET": TokenType.SET,
 599        "SHOW": TokenType.SHOW,
 600        "SIMILAR TO": TokenType.SIMILAR_TO,
 601        "SOME": TokenType.SOME,
 602        "SORTKEY": TokenType.SORTKEY,
 603        "SORT BY": TokenType.SORT_BY,
 604        "TABLE": TokenType.TABLE,
 605        "TABLESAMPLE": TokenType.TABLE_SAMPLE,
 606        "TEMP": TokenType.TEMPORARY,
 607        "TEMPORARY": TokenType.TEMPORARY,
 608        "THEN": TokenType.THEN,
 609        "TRUE": TokenType.TRUE,
 610        "TRAILING": TokenType.TRAILING,
 611        "UNBOUNDED": TokenType.UNBOUNDED,
 612        "UNION": TokenType.UNION,
 613        "UNLOGGED": TokenType.UNLOGGED,
 614        "UNNEST": TokenType.UNNEST,
 615        "UNPIVOT": TokenType.UNPIVOT,
 616        "UPDATE": TokenType.UPDATE,
 617        "USE": TokenType.USE,
 618        "USING": TokenType.USING,
 619        "VALUES": TokenType.VALUES,
 620        "VIEW": TokenType.VIEW,
 621        "VOLATILE": TokenType.VOLATILE,
 622        "WHEN": TokenType.WHEN,
 623        "WHERE": TokenType.WHERE,
 624        "WINDOW": TokenType.WINDOW,
 625        "WITH": TokenType.WITH,
 626        "WITH TIME ZONE": TokenType.WITH_TIME_ZONE,
 627        "WITH LOCAL TIME ZONE": TokenType.WITH_LOCAL_TIME_ZONE,
 628        "WITHIN GROUP": TokenType.WITHIN_GROUP,
 629        "WITHOUT TIME ZONE": TokenType.WITHOUT_TIME_ZONE,
 630        "APPLY": TokenType.APPLY,
 631        "ARRAY": TokenType.ARRAY,
 632        "BOOL": TokenType.BOOLEAN,
 633        "BOOLEAN": TokenType.BOOLEAN,
 634        "BYTE": TokenType.TINYINT,
 635        "TINYINT": TokenType.TINYINT,
 636        "SHORT": TokenType.SMALLINT,
 637        "SMALLINT": TokenType.SMALLINT,
 638        "INT2": TokenType.SMALLINT,
 639        "INTEGER": TokenType.INT,
 640        "INT": TokenType.INT,
 641        "INT4": TokenType.INT,
 642        "LONG": TokenType.BIGINT,
 643        "BIGINT": TokenType.BIGINT,
 644        "INT8": TokenType.BIGINT,
 645        "DECIMAL": TokenType.DECIMAL,
 646        "MAP": TokenType.MAP,
 647        "NULLABLE": TokenType.NULLABLE,
 648        "NUMBER": TokenType.DECIMAL,
 649        "NUMERIC": TokenType.DECIMAL,
 650        "FIXED": TokenType.DECIMAL,
 651        "REAL": TokenType.FLOAT,
 652        "FLOAT": TokenType.FLOAT,
 653        "FLOAT4": TokenType.FLOAT,
 654        "FLOAT8": TokenType.DOUBLE,
 655        "DOUBLE": TokenType.DOUBLE,
 656        "DOUBLE PRECISION": TokenType.DOUBLE,
 657        "JSON": TokenType.JSON,
 658        "CHAR": TokenType.CHAR,
 659        "NCHAR": TokenType.NCHAR,
 660        "VARCHAR": TokenType.VARCHAR,
 661        "VARCHAR2": TokenType.VARCHAR,
 662        "NVARCHAR": TokenType.NVARCHAR,
 663        "NVARCHAR2": TokenType.NVARCHAR,
 664        "STR": TokenType.TEXT,
 665        "STRING": TokenType.TEXT,
 666        "TEXT": TokenType.TEXT,
 667        "CLOB": TokenType.TEXT,
 668        "LONGVARCHAR": TokenType.TEXT,
 669        "BINARY": TokenType.BINARY,
 670        "BLOB": TokenType.VARBINARY,
 671        "BYTEA": TokenType.VARBINARY,
 672        "VARBINARY": TokenType.VARBINARY,
 673        "TIME": TokenType.TIME,
 674        "TIMESTAMP": TokenType.TIMESTAMP,
 675        "TIMESTAMPTZ": TokenType.TIMESTAMPTZ,
 676        "TIMESTAMPLTZ": TokenType.TIMESTAMPLTZ,
 677        "DATE": TokenType.DATE,
 678        "DATETIME": TokenType.DATETIME,
 679        "UNIQUE": TokenType.UNIQUE,
 680        "STRUCT": TokenType.STRUCT,
 681        "VARIANT": TokenType.VARIANT,
 682        "ALTER": TokenType.ALTER,
 683        "ALTER AGGREGATE": TokenType.COMMAND,
 684        "ALTER DEFAULT": TokenType.COMMAND,
 685        "ALTER DOMAIN": TokenType.COMMAND,
 686        "ALTER ROLE": TokenType.COMMAND,
 687        "ALTER RULE": TokenType.COMMAND,
 688        "ALTER SEQUENCE": TokenType.COMMAND,
 689        "ALTER TYPE": TokenType.COMMAND,
 690        "ALTER USER": TokenType.COMMAND,
 691        "ALTER VIEW": TokenType.COMMAND,
 692        "ANALYZE": TokenType.COMMAND,
 693        "CALL": TokenType.COMMAND,
 694        "COPY": TokenType.COMMAND,
 695        "EXPLAIN": TokenType.COMMAND,
 696        "OPTIMIZE": TokenType.COMMAND,
 697        "PREPARE": TokenType.COMMAND,
 698        "TRUNCATE": TokenType.COMMAND,
 699        "VACUUM": TokenType.COMMAND,
 700    }
 701
 702    WHITE_SPACE = {
 703        " ": TokenType.SPACE,
 704        "\t": TokenType.SPACE,
 705        "\n": TokenType.BREAK,
 706        "\r": TokenType.BREAK,
 707        "\r\n": TokenType.BREAK,
 708    }
 709
 710    COMMANDS = {
 711        TokenType.COMMAND,
 712        TokenType.EXECUTE,
 713        TokenType.FETCH,
 714        TokenType.SET,
 715        TokenType.SHOW,
 716    }
 717
 718    COMMAND_PREFIX_TOKENS = {TokenType.SEMICOLON, TokenType.BEGIN}
 719
 720    # handle numeric literals like in hive (3L = BIGINT)
 721    NUMERIC_LITERALS: t.Dict[str, str] = {}
 722    ENCODE: t.Optional[str] = None
 723
 724    COMMENTS = ["--", ("/*", "*/")]
 725    KEYWORD_TRIE = None  # autofilled
 726
 727    IDENTIFIER_CAN_START_WITH_DIGIT = False
 728
 729    __slots__ = (
 730        "sql",
 731        "size",
 732        "tokens",
 733        "_start",
 734        "_current",
 735        "_line",
 736        "_col",
 737        "_comments",
 738        "_char",
 739        "_end",
 740        "_peek",
 741        "_prev_token_line",
 742        "_prev_token_comments",
 743        "_prev_token_type",
 744    )
 745
 746    def __init__(self) -> None:
 747        self.reset()
 748
 749    def reset(self) -> None:
 750        self.sql = ""
 751        self.size = 0
 752        self.tokens: t.List[Token] = []
 753        self._start = 0
 754        self._current = 0
 755        self._line = 1
 756        self._col = 1
 757        self._comments: t.List[str] = []
 758
 759        self._char = None
 760        self._end = None
 761        self._peek = None
 762        self._prev_token_line = -1
 763        self._prev_token_comments: t.List[str] = []
 764        self._prev_token_type = None
 765
 766    def tokenize(self, sql: str) -> t.List[Token]:
 767        """Returns a list of tokens corresponding to the SQL string `sql`."""
 768        self.reset()
 769        self.sql = sql
 770        self.size = len(sql)
 771        self._scan()
 772        return self.tokens
 773
 774    def _scan(self, until: t.Optional[t.Callable] = None) -> None:
 775        while self.size and not self._end:
 776            self._start = self._current
 777            self._advance()
 778
 779            if not self._char:
 780                break
 781
 782            white_space = self.WHITE_SPACE.get(self._char)  # type: ignore
 783            identifier_end = self._IDENTIFIERS.get(self._char)  # type: ignore
 784
 785            if white_space:
 786                if white_space == TokenType.BREAK:
 787                    self._col = 1
 788                    self._line += 1
 789            elif self._char.isdigit():  # type:ignore
 790                self._scan_number()
 791            elif identifier_end:
 792                self._scan_identifier(identifier_end)
 793            else:
 794                self._scan_keywords()
 795
 796            if until and until():
 797                break
 798
 799    def _chars(self, size: int) -> str:
 800        if size == 1:
 801            return self._char  # type: ignore
 802        start = self._current - 1
 803        end = start + size
 804        if end <= self.size:
 805            return self.sql[start:end]
 806        return ""
 807
 808    def _advance(self, i: int = 1) -> None:
 809        self._col += i
 810        self._current += i
 811        self._end = self._current >= self.size  # type: ignore
 812        self._char = self.sql[self._current - 1]  # type: ignore
 813        self._peek = self.sql[self._current] if self._current < self.size else ""  # type: ignore
 814
 815    @property
 816    def _text(self) -> str:
 817        return self.sql[self._start : self._current]
 818
 819    def _add(self, token_type: TokenType, text: t.Optional[str] = None) -> None:
 820        self._prev_token_line = self._line
 821        self._prev_token_comments = self._comments
 822        self._prev_token_type = token_type  # type: ignore
 823        self.tokens.append(
 824            Token(
 825                token_type,
 826                self._text if text is None else text,
 827                self._line,
 828                self._col,
 829                self._comments,
 830            )
 831        )
 832        self._comments = []
 833
 834        # If we have either a semicolon or a begin token before the command's token, we'll parse
 835        # whatever follows the command's token as a string
 836        if token_type in self.COMMANDS and (
 837            len(self.tokens) == 1 or self.tokens[-2].token_type in self.COMMAND_PREFIX_TOKENS
 838        ):
 839            start = self._current
 840            tokens = len(self.tokens)
 841            self._scan(lambda: self._peek == ";")
 842            self.tokens = self.tokens[:tokens]
 843            text = self.sql[start : self._current].strip()
 844            if text:
 845                self._add(TokenType.STRING, text)
 846
 847    def _scan_keywords(self) -> None:
 848        size = 0
 849        word = None
 850        chars: t.Optional[str] = self._text
 851        char = chars
 852        prev_space = False
 853        skip = False
 854        trie = self.KEYWORD_TRIE
 855
 856        while chars:
 857            if skip:
 858                result = 1
 859            else:
 860                result, trie = in_trie(trie, char.upper())  # type: ignore
 861
 862            if result == 0:
 863                break
 864            if result == 2:
 865                word = chars
 866            size += 1
 867            end = self._current - 1 + size
 868
 869            if end < self.size:
 870                char = self.sql[end]
 871                is_space = char in self.WHITE_SPACE
 872
 873                if not is_space or not prev_space:
 874                    if is_space:
 875                        char = " "
 876                    chars += char
 877                    prev_space = is_space
 878                    skip = False
 879                else:
 880                    skip = True
 881            else:
 882                chars = None
 883
 884        if not word:
 885            if self._char in self.SINGLE_TOKENS:
 886                self._add(self.SINGLE_TOKENS[self._char])  # type: ignore
 887                return
 888            self._scan_var()
 889            return
 890
 891        if self._scan_string(word):
 892            return
 893        if self._scan_formatted_string(word):
 894            return
 895        if self._scan_comment(word):
 896            return
 897
 898        self._advance(size - 1)
 899        self._add(self.KEYWORDS[word.upper()])
 900
 901    def _scan_comment(self, comment_start: str) -> bool:
 902        if comment_start not in self._COMMENTS:  # type: ignore
 903            return False
 904
 905        comment_start_line = self._line
 906        comment_start_size = len(comment_start)
 907        comment_end = self._COMMENTS[comment_start]  # type: ignore
 908
 909        if comment_end:
 910            comment_end_size = len(comment_end)
 911
 912            while not self._end and self._chars(comment_end_size) != comment_end:
 913                self._advance()
 914
 915            self._comments.append(self._text[comment_start_size : -comment_end_size + 1])  # type: ignore
 916            self._advance(comment_end_size - 1)
 917        else:
 918            while not self._end and self.WHITE_SPACE.get(self._peek) != TokenType.BREAK:  # type: ignore
 919                self._advance()
 920            self._comments.append(self._text[comment_start_size:])  # type: ignore
 921
 922        # Leading comment is attached to the succeeding token, whilst trailing comment to the preceding.
 923        # Multiple consecutive comments are preserved by appending them to the current comments list.
 924        if comment_start_line == self._prev_token_line:
 925            self.tokens[-1].comments.extend(self._comments)
 926            self._comments = []
 927
 928        return True
 929
 930    def _scan_number(self) -> None:
 931        if self._char == "0":
 932            peek = self._peek.upper()  # type: ignore
 933            if peek == "B":
 934                return self._scan_bits()
 935            elif peek == "X":
 936                return self._scan_hex()
 937
 938        decimal = False
 939        scientific = 0
 940
 941        while True:
 942            if self._peek.isdigit():  # type: ignore
 943                self._advance()
 944            elif self._peek == "." and not decimal:
 945                decimal = True
 946                self._advance()
 947            elif self._peek in ("-", "+") and scientific == 1:
 948                scientific += 1
 949                self._advance()
 950            elif self._peek.upper() == "E" and not scientific:  # type: ignore
 951                scientific += 1
 952                self._advance()
 953            elif self._peek.isidentifier():  # type: ignore
 954                number_text = self._text
 955                literal = []
 956
 957                while self._peek.strip() and self._peek not in self.SINGLE_TOKENS:  # type: ignore
 958                    literal.append(self._peek.upper())  # type: ignore
 959                    self._advance()
 960
 961                literal = "".join(literal)  # type: ignore
 962                token_type = self.KEYWORDS.get(self.NUMERIC_LITERALS.get(literal))  # type: ignore
 963
 964                if token_type:
 965                    self._add(TokenType.NUMBER, number_text)
 966                    self._add(TokenType.DCOLON, "::")
 967                    return self._add(token_type, literal)  # type: ignore
 968                elif self.IDENTIFIER_CAN_START_WITH_DIGIT:
 969                    return self._add(TokenType.VAR)
 970
 971                self._add(TokenType.NUMBER, number_text)
 972                return self._advance(-len(literal))
 973            else:
 974                return self._add(TokenType.NUMBER)
 975
 976    def _scan_bits(self) -> None:
 977        self._advance()
 978        value = self._extract_value()
 979        try:
 980            self._add(TokenType.BIT_STRING, f"{int(value, 2)}")
 981        except ValueError:
 982            self._add(TokenType.IDENTIFIER)
 983
 984    def _scan_hex(self) -> None:
 985        self._advance()
 986        value = self._extract_value()
 987        try:
 988            self._add(TokenType.HEX_STRING, f"{int(value, 16)}")
 989        except ValueError:
 990            self._add(TokenType.IDENTIFIER)
 991
 992    def _extract_value(self) -> str:
 993        while True:
 994            char = self._peek.strip()  # type: ignore
 995            if char and char not in self.SINGLE_TOKENS:
 996                self._advance()
 997            else:
 998                break
 999
1000        return self._text
1001
1002    def _scan_string(self, quote: str) -> bool:
1003        quote_end = self._QUOTES.get(quote)  # type: ignore
1004        if quote_end is None:
1005            return False
1006
1007        self._advance(len(quote))
1008        text = self._extract_string(quote_end)
1009        text = text.encode(self.ENCODE).decode(self.ENCODE) if self.ENCODE else text  # type: ignore
1010        self._add(TokenType.NATIONAL if quote[0].upper() == "N" else TokenType.STRING, text)
1011        return True
1012
1013    # X'1234, b'0110', E'\\\\\' etc.
1014    def _scan_formatted_string(self, string_start: str) -> bool:
1015        if string_start in self._HEX_STRINGS:  # type: ignore
1016            delimiters = self._HEX_STRINGS  # type: ignore
1017            token_type = TokenType.HEX_STRING
1018            base = 16
1019        elif string_start in self._BIT_STRINGS:  # type: ignore
1020            delimiters = self._BIT_STRINGS  # type: ignore
1021            token_type = TokenType.BIT_STRING
1022            base = 2
1023        elif string_start in self._BYTE_STRINGS:  # type: ignore
1024            delimiters = self._BYTE_STRINGS  # type: ignore
1025            token_type = TokenType.BYTE_STRING
1026            base = None
1027        else:
1028            return False
1029
1030        self._advance(len(string_start))
1031        string_end = delimiters.get(string_start)
1032        text = self._extract_string(string_end)
1033
1034        if base is None:
1035            self._add(token_type, text)
1036        else:
1037            try:
1038                self._add(token_type, f"{int(text, base)}")
1039            except:
1040                raise RuntimeError(
1041                    f"Numeric string contains invalid characters from {self._line}:{self._start}"
1042                )
1043
1044        return True
1045
1046    def _scan_identifier(self, identifier_end: str) -> None:
1047        text = ""
1048        identifier_end_is_escape = identifier_end in self._IDENTIFIER_ESCAPES
1049
1050        while True:
1051            if self._end:
1052                raise RuntimeError(f"Missing {identifier_end} from {self._line}:{self._start}")
1053
1054            self._advance()
1055            if self._char == identifier_end:
1056                if identifier_end_is_escape and self._peek == identifier_end:
1057                    text += identifier_end  # type: ignore
1058                    self._advance()
1059                    continue
1060
1061                break
1062
1063            text += self._char  # type: ignore
1064
1065        self._add(TokenType.IDENTIFIER, text)
1066
1067    def _scan_var(self) -> None:
1068        while True:
1069            char = self._peek.strip()  # type: ignore
1070            if char and char not in self.SINGLE_TOKENS:
1071                self._advance()
1072            else:
1073                break
1074        self._add(
1075            TokenType.VAR
1076            if self._prev_token_type == TokenType.PARAMETER
1077            else self.KEYWORDS.get(self._text.upper(), TokenType.VAR)
1078        )
1079
1080    def _extract_string(self, delimiter: str) -> str:
1081        text = ""
1082        delim_size = len(delimiter)
1083
1084        while True:
1085            if self._char in self._STRING_ESCAPES and (
1086                self._peek == delimiter or self._peek in self._STRING_ESCAPES
1087            ):
1088                if self._peek == delimiter:
1089                    text += self._peek  # type: ignore
1090                else:
1091                    text += self._char + self._peek  # type: ignore
1092
1093                if self._current + 1 < self.size:
1094                    self._advance(2)
1095                else:
1096                    raise RuntimeError(f"Missing {delimiter} from {self._line}:{self._current}")
1097            else:
1098                if self._chars(delim_size) == delimiter:
1099                    if delim_size > 1:
1100                        self._advance(delim_size - 1)
1101                    break
1102
1103                if self._end:
1104                    raise RuntimeError(f"Missing {delimiter} from {self._line}:{self._start}")
1105                text += self._char  # type: ignore
1106                self._advance()
1107
1108        return text
class TokenType(sqlglot.helper.AutoName):
 11class TokenType(AutoName):
 12    L_PAREN = auto()
 13    R_PAREN = auto()
 14    L_BRACKET = auto()
 15    R_BRACKET = auto()
 16    L_BRACE = auto()
 17    R_BRACE = auto()
 18    COMMA = auto()
 19    DOT = auto()
 20    DASH = auto()
 21    PLUS = auto()
 22    COLON = auto()
 23    DCOLON = auto()
 24    SEMICOLON = auto()
 25    STAR = auto()
 26    BACKSLASH = auto()
 27    SLASH = auto()
 28    LT = auto()
 29    LTE = auto()
 30    GT = auto()
 31    GTE = auto()
 32    NOT = auto()
 33    EQ = auto()
 34    NEQ = auto()
 35    NULLSAFE_EQ = auto()
 36    AND = auto()
 37    OR = auto()
 38    AMP = auto()
 39    DPIPE = auto()
 40    PIPE = auto()
 41    CARET = auto()
 42    TILDA = auto()
 43    ARROW = auto()
 44    DARROW = auto()
 45    FARROW = auto()
 46    HASH = auto()
 47    HASH_ARROW = auto()
 48    DHASH_ARROW = auto()
 49    LR_ARROW = auto()
 50    DOLLAR = auto()
 51    PARAMETER = auto()
 52    SESSION_PARAMETER = auto()
 53    NATIONAL = auto()
 54
 55    BLOCK_START = auto()
 56    BLOCK_END = auto()
 57
 58    SPACE = auto()
 59    BREAK = auto()
 60
 61    STRING = auto()
 62    NUMBER = auto()
 63    IDENTIFIER = auto()
 64    COLUMN = auto()
 65    COLUMN_DEF = auto()
 66    SCHEMA = auto()
 67    TABLE = auto()
 68    VAR = auto()
 69    BIT_STRING = auto()
 70    HEX_STRING = auto()
 71    BYTE_STRING = auto()
 72
 73    # types
 74    BOOLEAN = auto()
 75    TINYINT = auto()
 76    SMALLINT = auto()
 77    INT = auto()
 78    BIGINT = auto()
 79    FLOAT = auto()
 80    DOUBLE = auto()
 81    DECIMAL = auto()
 82    CHAR = auto()
 83    NCHAR = auto()
 84    VARCHAR = auto()
 85    NVARCHAR = auto()
 86    TEXT = auto()
 87    MEDIUMTEXT = auto()
 88    LONGTEXT = auto()
 89    MEDIUMBLOB = auto()
 90    LONGBLOB = auto()
 91    BINARY = auto()
 92    VARBINARY = auto()
 93    JSON = auto()
 94    JSONB = auto()
 95    TIME = auto()
 96    TIMESTAMP = auto()
 97    TIMESTAMPTZ = auto()
 98    TIMESTAMPLTZ = auto()
 99    DATETIME = auto()
100    DATE = auto()
101    UUID = auto()
102    GEOGRAPHY = auto()
103    NULLABLE = auto()
104    GEOMETRY = auto()
105    HLLSKETCH = auto()
106    HSTORE = auto()
107    SUPER = auto()
108    SERIAL = auto()
109    SMALLSERIAL = auto()
110    BIGSERIAL = auto()
111    XML = auto()
112    UNIQUEIDENTIFIER = auto()
113    MONEY = auto()
114    SMALLMONEY = auto()
115    ROWVERSION = auto()
116    IMAGE = auto()
117    VARIANT = auto()
118    OBJECT = auto()
119
120    # keywords
121    ALIAS = auto()
122    ALTER = auto()
123    ALWAYS = auto()
124    ALL = auto()
125    ANTI = auto()
126    ANY = auto()
127    APPLY = auto()
128    ARRAY = auto()
129    ASC = auto()
130    ASOF = auto()
131    AT_TIME_ZONE = auto()
132    AUTO_INCREMENT = auto()
133    BEGIN = auto()
134    BETWEEN = auto()
135    BOTH = auto()
136    BUCKET = auto()
137    BY_DEFAULT = auto()
138    CACHE = auto()
139    CASCADE = auto()
140    CASE = auto()
141    CHARACTER_SET = auto()
142    CLUSTER_BY = auto()
143    COLLATE = auto()
144    COMMAND = auto()
145    COMMENT = auto()
146    COMMIT = auto()
147    COMPOUND = auto()
148    CONSTRAINT = auto()
149    CREATE = auto()
150    CROSS = auto()
151    CUBE = auto()
152    CURRENT_DATE = auto()
153    CURRENT_DATETIME = auto()
154    CURRENT_ROW = auto()
155    CURRENT_TIME = auto()
156    CURRENT_TIMESTAMP = auto()
157    DEFAULT = auto()
158    DELETE = auto()
159    DESC = auto()
160    DESCRIBE = auto()
161    DISTINCT = auto()
162    DISTINCT_FROM = auto()
163    DISTRIBUTE_BY = auto()
164    DIV = auto()
165    DROP = auto()
166    ELSE = auto()
167    END = auto()
168    ESCAPE = auto()
169    EXCEPT = auto()
170    EXECUTE = auto()
171    EXISTS = auto()
172    FALSE = auto()
173    FETCH = auto()
174    FILTER = auto()
175    FINAL = auto()
176    FIRST = auto()
177    FOLLOWING = auto()
178    FOR = auto()
179    FOREIGN_KEY = auto()
180    FORMAT = auto()
181    FROM = auto()
182    FULL = auto()
183    FUNCTION = auto()
184    GLOB = auto()
185    GLOBAL = auto()
186    GROUP_BY = auto()
187    GROUPING_SETS = auto()
188    HAVING = auto()
189    HINT = auto()
190    IF = auto()
191    IGNORE_NULLS = auto()
192    ILIKE = auto()
193    ILIKE_ANY = auto()
194    IN = auto()
195    INDEX = auto()
196    INNER = auto()
197    INSERT = auto()
198    INTERSECT = auto()
199    INTERVAL = auto()
200    INTO = auto()
201    INTRODUCER = auto()
202    IRLIKE = auto()
203    IS = auto()
204    ISNULL = auto()
205    JOIN = auto()
206    LANGUAGE = auto()
207    LATERAL = auto()
208    LAZY = auto()
209    LEADING = auto()
210    LEFT = auto()
211    LIKE = auto()
212    LIKE_ANY = auto()
213    LIMIT = auto()
214    LOAD_DATA = auto()
215    LOCAL = auto()
216    MAP = auto()
217    MATCH_RECOGNIZE = auto()
218    MATERIALIZED = auto()
219    MERGE = auto()
220    MOD = auto()
221    NATURAL = auto()
222    NEXT = auto()
223    NO_ACTION = auto()
224    NOTNULL = auto()
225    NULL = auto()
226    NULLS_FIRST = auto()
227    NULLS_LAST = auto()
228    OFFSET = auto()
229    ON = auto()
230    ONLY = auto()
231    OPTIONS = auto()
232    ORDER_BY = auto()
233    ORDERED = auto()
234    ORDINALITY = auto()
235    OUTER = auto()
236    OUT_OF = auto()
237    OVER = auto()
238    OVERWRITE = auto()
239    PARTITION = auto()
240    PARTITION_BY = auto()
241    PERCENT = auto()
242    PIVOT = auto()
243    PLACEHOLDER = auto()
244    PRECEDING = auto()
245    PRIMARY_KEY = auto()
246    PROCEDURE = auto()
247    PROPERTIES = auto()
248    PSEUDO_TYPE = auto()
249    QUALIFY = auto()
250    QUOTE = auto()
251    RANGE = auto()
252    RECURSIVE = auto()
253    REPLACE = auto()
254    RESPECT_NULLS = auto()
255    RETURNING = auto()
256    REFERENCES = auto()
257    RIGHT = auto()
258    RLIKE = auto()
259    ROLLBACK = auto()
260    ROLLUP = auto()
261    ROW = auto()
262    ROWS = auto()
263    SEED = auto()
264    SELECT = auto()
265    SEMI = auto()
266    SEPARATOR = auto()
267    SERDE_PROPERTIES = auto()
268    SET = auto()
269    SHOW = auto()
270    SIMILAR_TO = auto()
271    SOME = auto()
272    SORTKEY = auto()
273    SORT_BY = auto()
274    STRUCT = auto()
275    TABLE_SAMPLE = auto()
276    TEMPORARY = auto()
277    TOP = auto()
278    THEN = auto()
279    TRAILING = auto()
280    TRUE = auto()
281    UNBOUNDED = auto()
282    UNCACHE = auto()
283    UNION = auto()
284    UNLOGGED = auto()
285    UNNEST = auto()
286    UNPIVOT = auto()
287    UPDATE = auto()
288    USE = auto()
289    USING = auto()
290    VALUES = auto()
291    VIEW = auto()
292    VOLATILE = auto()
293    WHEN = auto()
294    WHERE = auto()
295    WINDOW = auto()
296    WITH = auto()
297    WITH_TIME_ZONE = auto()
298    WITH_LOCAL_TIME_ZONE = auto()
299    WITHIN_GROUP = auto()
300    WITHOUT_TIME_ZONE = auto()
301    UNIQUE = auto()

An enumeration.

L_PAREN = <TokenType.L_PAREN: 'L_PAREN'>
R_PAREN = <TokenType.R_PAREN: 'R_PAREN'>
L_BRACKET = <TokenType.L_BRACKET: 'L_BRACKET'>
R_BRACKET = <TokenType.R_BRACKET: 'R_BRACKET'>
L_BRACE = <TokenType.L_BRACE: 'L_BRACE'>
R_BRACE = <TokenType.R_BRACE: 'R_BRACE'>
COMMA = <TokenType.COMMA: 'COMMA'>
DOT = <TokenType.DOT: 'DOT'>
DASH = <TokenType.DASH: 'DASH'>
PLUS = <TokenType.PLUS: 'PLUS'>
COLON = <TokenType.COLON: 'COLON'>
DCOLON = <TokenType.DCOLON: 'DCOLON'>
SEMICOLON = <TokenType.SEMICOLON: 'SEMICOLON'>
STAR = <TokenType.STAR: 'STAR'>
BACKSLASH = <TokenType.BACKSLASH: 'BACKSLASH'>
SLASH = <TokenType.SLASH: 'SLASH'>
LT = <TokenType.LT: 'LT'>
LTE = <TokenType.LTE: 'LTE'>
GT = <TokenType.GT: 'GT'>
GTE = <TokenType.GTE: 'GTE'>
NOT = <TokenType.NOT: 'NOT'>
EQ = <TokenType.EQ: 'EQ'>
NEQ = <TokenType.NEQ: 'NEQ'>
NULLSAFE_EQ = <TokenType.NULLSAFE_EQ: 'NULLSAFE_EQ'>
AND = <TokenType.AND: 'AND'>
OR = <TokenType.OR: 'OR'>
AMP = <TokenType.AMP: 'AMP'>
DPIPE = <TokenType.DPIPE: 'DPIPE'>
PIPE = <TokenType.PIPE: 'PIPE'>
CARET = <TokenType.CARET: 'CARET'>
TILDA = <TokenType.TILDA: 'TILDA'>
ARROW = <TokenType.ARROW: 'ARROW'>
DARROW = <TokenType.DARROW: 'DARROW'>
FARROW = <TokenType.FARROW: 'FARROW'>
HASH = <TokenType.HASH: 'HASH'>
HASH_ARROW = <TokenType.HASH_ARROW: 'HASH_ARROW'>
DHASH_ARROW = <TokenType.DHASH_ARROW: 'DHASH_ARROW'>
LR_ARROW = <TokenType.LR_ARROW: 'LR_ARROW'>
DOLLAR = <TokenType.DOLLAR: 'DOLLAR'>
PARAMETER = <TokenType.PARAMETER: 'PARAMETER'>
SESSION_PARAMETER = <TokenType.SESSION_PARAMETER: 'SESSION_PARAMETER'>
NATIONAL = <TokenType.NATIONAL: 'NATIONAL'>
BLOCK_START = <TokenType.BLOCK_START: 'BLOCK_START'>
BLOCK_END = <TokenType.BLOCK_END: 'BLOCK_END'>
SPACE = <TokenType.SPACE: 'SPACE'>
BREAK = <TokenType.BREAK: 'BREAK'>
STRING = <TokenType.STRING: 'STRING'>
NUMBER = <TokenType.NUMBER: 'NUMBER'>
IDENTIFIER = <TokenType.IDENTIFIER: 'IDENTIFIER'>
COLUMN = <TokenType.COLUMN: 'COLUMN'>
COLUMN_DEF = <TokenType.COLUMN_DEF: 'COLUMN_DEF'>
SCHEMA = <TokenType.SCHEMA: 'SCHEMA'>
TABLE = <TokenType.TABLE: 'TABLE'>
VAR = <TokenType.VAR: 'VAR'>
BIT_STRING = <TokenType.BIT_STRING: 'BIT_STRING'>
HEX_STRING = <TokenType.HEX_STRING: 'HEX_STRING'>
BYTE_STRING = <TokenType.BYTE_STRING: 'BYTE_STRING'>
BOOLEAN = <TokenType.BOOLEAN: 'BOOLEAN'>
TINYINT = <TokenType.TINYINT: 'TINYINT'>
SMALLINT = <TokenType.SMALLINT: 'SMALLINT'>
INT = <TokenType.INT: 'INT'>
BIGINT = <TokenType.BIGINT: 'BIGINT'>
FLOAT = <TokenType.FLOAT: 'FLOAT'>
DOUBLE = <TokenType.DOUBLE: 'DOUBLE'>
DECIMAL = <TokenType.DECIMAL: 'DECIMAL'>
CHAR = <TokenType.CHAR: 'CHAR'>
NCHAR = <TokenType.NCHAR: 'NCHAR'>
VARCHAR = <TokenType.VARCHAR: 'VARCHAR'>
NVARCHAR = <TokenType.NVARCHAR: 'NVARCHAR'>
TEXT = <TokenType.TEXT: 'TEXT'>
MEDIUMTEXT = <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>
LONGTEXT = <TokenType.LONGTEXT: 'LONGTEXT'>
MEDIUMBLOB = <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>
LONGBLOB = <TokenType.LONGBLOB: 'LONGBLOB'>
BINARY = <TokenType.BINARY: 'BINARY'>
VARBINARY = <TokenType.VARBINARY: 'VARBINARY'>
JSON = <TokenType.JSON: 'JSON'>
JSONB = <TokenType.JSONB: 'JSONB'>
TIME = <TokenType.TIME: 'TIME'>
TIMESTAMP = <TokenType.TIMESTAMP: 'TIMESTAMP'>
TIMESTAMPTZ = <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>
TIMESTAMPLTZ = <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>
DATETIME = <TokenType.DATETIME: 'DATETIME'>
DATE = <TokenType.DATE: 'DATE'>
UUID = <TokenType.UUID: 'UUID'>
GEOGRAPHY = <TokenType.GEOGRAPHY: 'GEOGRAPHY'>
NULLABLE = <TokenType.NULLABLE: 'NULLABLE'>
GEOMETRY = <TokenType.GEOMETRY: 'GEOMETRY'>
HLLSKETCH = <TokenType.HLLSKETCH: 'HLLSKETCH'>
HSTORE = <TokenType.HSTORE: 'HSTORE'>
SUPER = <TokenType.SUPER: 'SUPER'>
SERIAL = <TokenType.SERIAL: 'SERIAL'>
SMALLSERIAL = <TokenType.SMALLSERIAL: 'SMALLSERIAL'>
BIGSERIAL = <TokenType.BIGSERIAL: 'BIGSERIAL'>
XML = <TokenType.XML: 'XML'>
UNIQUEIDENTIFIER = <TokenType.UNIQUEIDENTIFIER: 'UNIQUEIDENTIFIER'>
MONEY = <TokenType.MONEY: 'MONEY'>
SMALLMONEY = <TokenType.SMALLMONEY: 'SMALLMONEY'>
ROWVERSION = <TokenType.ROWVERSION: 'ROWVERSION'>
IMAGE = <TokenType.IMAGE: 'IMAGE'>
VARIANT = <TokenType.VARIANT: 'VARIANT'>
OBJECT = <TokenType.OBJECT: 'OBJECT'>
ALIAS = <TokenType.ALIAS: 'ALIAS'>
ALTER = <TokenType.ALTER: 'ALTER'>
ALWAYS = <TokenType.ALWAYS: 'ALWAYS'>
ALL = <TokenType.ALL: 'ALL'>
ANTI = <TokenType.ANTI: 'ANTI'>
ANY = <TokenType.ANY: 'ANY'>
APPLY = <TokenType.APPLY: 'APPLY'>
ARRAY = <TokenType.ARRAY: 'ARRAY'>
ASC = <TokenType.ASC: 'ASC'>
ASOF = <TokenType.ASOF: 'ASOF'>
AT_TIME_ZONE = <TokenType.AT_TIME_ZONE: 'AT_TIME_ZONE'>
AUTO_INCREMENT = <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>
BEGIN = <TokenType.BEGIN: 'BEGIN'>
BETWEEN = <TokenType.BETWEEN: 'BETWEEN'>
BOTH = <TokenType.BOTH: 'BOTH'>
BUCKET = <TokenType.BUCKET: 'BUCKET'>
BY_DEFAULT = <TokenType.BY_DEFAULT: 'BY_DEFAULT'>
CACHE = <TokenType.CACHE: 'CACHE'>
CASCADE = <TokenType.CASCADE: 'CASCADE'>
CASE = <TokenType.CASE: 'CASE'>
CHARACTER_SET = <TokenType.CHARACTER_SET: 'CHARACTER_SET'>
CLUSTER_BY = <TokenType.CLUSTER_BY: 'CLUSTER_BY'>
COLLATE = <TokenType.COLLATE: 'COLLATE'>
COMMAND = <TokenType.COMMAND: 'COMMAND'>
COMMENT = <TokenType.COMMENT: 'COMMENT'>
COMMIT = <TokenType.COMMIT: 'COMMIT'>
COMPOUND = <TokenType.COMPOUND: 'COMPOUND'>
CONSTRAINT = <TokenType.CONSTRAINT: 'CONSTRAINT'>
CREATE = <TokenType.CREATE: 'CREATE'>
CROSS = <TokenType.CROSS: 'CROSS'>
CUBE = <TokenType.CUBE: 'CUBE'>
CURRENT_DATE = <TokenType.CURRENT_DATE: 'CURRENT_DATE'>
CURRENT_DATETIME = <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>
CURRENT_ROW = <TokenType.CURRENT_ROW: 'CURRENT_ROW'>
CURRENT_TIME = <TokenType.CURRENT_TIME: 'CURRENT_TIME'>
CURRENT_TIMESTAMP = <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>
DEFAULT = <TokenType.DEFAULT: 'DEFAULT'>
DELETE = <TokenType.DELETE: 'DELETE'>
DESC = <TokenType.DESC: 'DESC'>
DESCRIBE = <TokenType.DESCRIBE: 'DESCRIBE'>
DISTINCT = <TokenType.DISTINCT: 'DISTINCT'>
DISTINCT_FROM = <TokenType.DISTINCT_FROM: 'DISTINCT_FROM'>
DISTRIBUTE_BY = <TokenType.DISTRIBUTE_BY: 'DISTRIBUTE_BY'>
DIV = <TokenType.DIV: 'DIV'>
DROP = <TokenType.DROP: 'DROP'>
ELSE = <TokenType.ELSE: 'ELSE'>
END = <TokenType.END: 'END'>
ESCAPE = <TokenType.ESCAPE: 'ESCAPE'>
EXCEPT = <TokenType.EXCEPT: 'EXCEPT'>
EXECUTE = <TokenType.EXECUTE: 'EXECUTE'>
EXISTS = <TokenType.EXISTS: 'EXISTS'>
FALSE = <TokenType.FALSE: 'FALSE'>
FETCH = <TokenType.FETCH: 'FETCH'>
FILTER = <TokenType.FILTER: 'FILTER'>
FINAL = <TokenType.FINAL: 'FINAL'>
FIRST = <TokenType.FIRST: 'FIRST'>
FOLLOWING = <TokenType.FOLLOWING: 'FOLLOWING'>
FOR = <TokenType.FOR: 'FOR'>
FOREIGN_KEY = <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>
FORMAT = <TokenType.FORMAT: 'FORMAT'>
FROM = <TokenType.FROM: 'FROM'>
FULL = <TokenType.FULL: 'FULL'>
FUNCTION = <TokenType.FUNCTION: 'FUNCTION'>
GLOB = <TokenType.GLOB: 'GLOB'>
GLOBAL = <TokenType.GLOBAL: 'GLOBAL'>
GROUP_BY = <TokenType.GROUP_BY: 'GROUP_BY'>
GROUPING_SETS = <TokenType.GROUPING_SETS: 'GROUPING_SETS'>
HAVING = <TokenType.HAVING: 'HAVING'>
HINT = <TokenType.HINT: 'HINT'>
IF = <TokenType.IF: 'IF'>
IGNORE_NULLS = <TokenType.IGNORE_NULLS: 'IGNORE_NULLS'>
ILIKE = <TokenType.ILIKE: 'ILIKE'>
ILIKE_ANY = <TokenType.ILIKE_ANY: 'ILIKE_ANY'>
IN = <TokenType.IN: 'IN'>
INDEX = <TokenType.INDEX: 'INDEX'>
INNER = <TokenType.INNER: 'INNER'>
INSERT = <TokenType.INSERT: 'INSERT'>
INTERSECT = <TokenType.INTERSECT: 'INTERSECT'>
INTERVAL = <TokenType.INTERVAL: 'INTERVAL'>
INTO = <TokenType.INTO: 'INTO'>
INTRODUCER = <TokenType.INTRODUCER: 'INTRODUCER'>
IRLIKE = <TokenType.IRLIKE: 'IRLIKE'>
IS = <TokenType.IS: 'IS'>
ISNULL = <TokenType.ISNULL: 'ISNULL'>
JOIN = <TokenType.JOIN: 'JOIN'>
LANGUAGE = <TokenType.LANGUAGE: 'LANGUAGE'>
LATERAL = <TokenType.LATERAL: 'LATERAL'>
LAZY = <TokenType.LAZY: 'LAZY'>
LEADING = <TokenType.LEADING: 'LEADING'>
LEFT = <TokenType.LEFT: 'LEFT'>
LIKE = <TokenType.LIKE: 'LIKE'>
LIKE_ANY = <TokenType.LIKE_ANY: 'LIKE_ANY'>
LIMIT = <TokenType.LIMIT: 'LIMIT'>
LOAD_DATA = <TokenType.LOAD_DATA: 'LOAD_DATA'>
LOCAL = <TokenType.LOCAL: 'LOCAL'>
MAP = <TokenType.MAP: 'MAP'>
MATCH_RECOGNIZE = <TokenType.MATCH_RECOGNIZE: 'MATCH_RECOGNIZE'>
MATERIALIZED = <TokenType.MATERIALIZED: 'MATERIALIZED'>
MERGE = <TokenType.MERGE: 'MERGE'>
MOD = <TokenType.MOD: 'MOD'>
NATURAL = <TokenType.NATURAL: 'NATURAL'>
NEXT = <TokenType.NEXT: 'NEXT'>
NO_ACTION = <TokenType.NO_ACTION: 'NO_ACTION'>
NOTNULL = <TokenType.NOTNULL: 'NOTNULL'>
NULL = <TokenType.NULL: 'NULL'>
NULLS_FIRST = <TokenType.NULLS_FIRST: 'NULLS_FIRST'>
NULLS_LAST = <TokenType.NULLS_LAST: 'NULLS_LAST'>
OFFSET = <TokenType.OFFSET: 'OFFSET'>
ON = <TokenType.ON: 'ON'>
ONLY = <TokenType.ONLY: 'ONLY'>
OPTIONS = <TokenType.OPTIONS: 'OPTIONS'>
ORDER_BY = <TokenType.ORDER_BY: 'ORDER_BY'>
ORDERED = <TokenType.ORDERED: 'ORDERED'>
ORDINALITY = <TokenType.ORDINALITY: 'ORDINALITY'>
OUTER = <TokenType.OUTER: 'OUTER'>
OUT_OF = <TokenType.OUT_OF: 'OUT_OF'>
OVER = <TokenType.OVER: 'OVER'>
OVERWRITE = <TokenType.OVERWRITE: 'OVERWRITE'>
PARTITION = <TokenType.PARTITION: 'PARTITION'>
PARTITION_BY = <TokenType.PARTITION_BY: 'PARTITION_BY'>
PERCENT = <TokenType.PERCENT: 'PERCENT'>
PIVOT = <TokenType.PIVOT: 'PIVOT'>
PLACEHOLDER = <TokenType.PLACEHOLDER: 'PLACEHOLDER'>
PRECEDING = <TokenType.PRECEDING: 'PRECEDING'>
PRIMARY_KEY = <TokenType.PRIMARY_KEY: 'PRIMARY_KEY'>
PROCEDURE = <TokenType.PROCEDURE: 'PROCEDURE'>
PROPERTIES = <TokenType.PROPERTIES: 'PROPERTIES'>
PSEUDO_TYPE = <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>
QUALIFY = <TokenType.QUALIFY: 'QUALIFY'>
QUOTE = <TokenType.QUOTE: 'QUOTE'>
RANGE = <TokenType.RANGE: 'RANGE'>
RECURSIVE = <TokenType.RECURSIVE: 'RECURSIVE'>
REPLACE = <TokenType.REPLACE: 'REPLACE'>
RESPECT_NULLS = <TokenType.RESPECT_NULLS: 'RESPECT_NULLS'>
RETURNING = <TokenType.RETURNING: 'RETURNING'>
REFERENCES = <TokenType.REFERENCES: 'REFERENCES'>
RIGHT = <TokenType.RIGHT: 'RIGHT'>
RLIKE = <TokenType.RLIKE: 'RLIKE'>
ROLLBACK = <TokenType.ROLLBACK: 'ROLLBACK'>
ROLLUP = <TokenType.ROLLUP: 'ROLLUP'>
ROW = <TokenType.ROW: 'ROW'>
ROWS = <TokenType.ROWS: 'ROWS'>
SEED = <TokenType.SEED: 'SEED'>
SELECT = <TokenType.SELECT: 'SELECT'>
SEMI = <TokenType.SEMI: 'SEMI'>
SEPARATOR = <TokenType.SEPARATOR: 'SEPARATOR'>
SERDE_PROPERTIES = <TokenType.SERDE_PROPERTIES: 'SERDE_PROPERTIES'>
SET = <TokenType.SET: 'SET'>
SHOW = <TokenType.SHOW: 'SHOW'>
SIMILAR_TO = <TokenType.SIMILAR_TO: 'SIMILAR_TO'>
SOME = <TokenType.SOME: 'SOME'>
SORTKEY = <TokenType.SORTKEY: 'SORTKEY'>
SORT_BY = <TokenType.SORT_BY: 'SORT_BY'>
STRUCT = <TokenType.STRUCT: 'STRUCT'>
TABLE_SAMPLE = <TokenType.TABLE_SAMPLE: 'TABLE_SAMPLE'>
TEMPORARY = <TokenType.TEMPORARY: 'TEMPORARY'>
TOP = <TokenType.TOP: 'TOP'>
THEN = <TokenType.THEN: 'THEN'>
TRAILING = <TokenType.TRAILING: 'TRAILING'>
TRUE = <TokenType.TRUE: 'TRUE'>
UNBOUNDED = <TokenType.UNBOUNDED: 'UNBOUNDED'>
UNCACHE = <TokenType.UNCACHE: 'UNCACHE'>
UNION = <TokenType.UNION: 'UNION'>
UNLOGGED = <TokenType.UNLOGGED: 'UNLOGGED'>
UNNEST = <TokenType.UNNEST: 'UNNEST'>
UNPIVOT = <TokenType.UNPIVOT: 'UNPIVOT'>
UPDATE = <TokenType.UPDATE: 'UPDATE'>
USE = <TokenType.USE: 'USE'>
USING = <TokenType.USING: 'USING'>
VALUES = <TokenType.VALUES: 'VALUES'>
VIEW = <TokenType.VIEW: 'VIEW'>
VOLATILE = <TokenType.VOLATILE: 'VOLATILE'>
WHEN = <TokenType.WHEN: 'WHEN'>
WHERE = <TokenType.WHERE: 'WHERE'>
WINDOW = <TokenType.WINDOW: 'WINDOW'>
WITH = <TokenType.WITH: 'WITH'>
WITH_TIME_ZONE = <TokenType.WITH_TIME_ZONE: 'WITH_TIME_ZONE'>
WITH_LOCAL_TIME_ZONE = <TokenType.WITH_LOCAL_TIME_ZONE: 'WITH_LOCAL_TIME_ZONE'>
WITHIN_GROUP = <TokenType.WITHIN_GROUP: 'WITHIN_GROUP'>
WITHOUT_TIME_ZONE = <TokenType.WITHOUT_TIME_ZONE: 'WITHOUT_TIME_ZONE'>
UNIQUE = <TokenType.UNIQUE: 'UNIQUE'>
Inherited Members
enum.Enum
name
value
class Token:
304class Token:
305    __slots__ = ("token_type", "text", "line", "col", "comments")
306
307    @classmethod
308    def number(cls, number: int) -> Token:
309        """Returns a NUMBER token with `number` as its text."""
310        return cls(TokenType.NUMBER, str(number))
311
312    @classmethod
313    def string(cls, string: str) -> Token:
314        """Returns a STRING token with `string` as its text."""
315        return cls(TokenType.STRING, string)
316
317    @classmethod
318    def identifier(cls, identifier: str) -> Token:
319        """Returns an IDENTIFIER token with `identifier` as its text."""
320        return cls(TokenType.IDENTIFIER, identifier)
321
322    @classmethod
323    def var(cls, var: str) -> Token:
324        """Returns an VAR token with `var` as its text."""
325        return cls(TokenType.VAR, var)
326
327    def __init__(
328        self,
329        token_type: TokenType,
330        text: str,
331        line: int = 1,
332        col: int = 1,
333        comments: t.List[str] = [],
334    ) -> None:
335        self.token_type = token_type
336        self.text = text
337        self.line = line
338        self.col = max(col - len(text), 1)
339        self.comments = comments
340
341    def __repr__(self) -> str:
342        attributes = ", ".join(f"{k}: {getattr(self, k)}" for k in self.__slots__)
343        return f"<Token {attributes}>"
Token( token_type: sqlglot.tokens.TokenType, text: str, line: int = 1, col: int = 1, comments: List[str] = [])
327    def __init__(
328        self,
329        token_type: TokenType,
330        text: str,
331        line: int = 1,
332        col: int = 1,
333        comments: t.List[str] = [],
334    ) -> None:
335        self.token_type = token_type
336        self.text = text
337        self.line = line
338        self.col = max(col - len(text), 1)
339        self.comments = comments
@classmethod
def number(cls, number: int) -> sqlglot.tokens.Token:
307    @classmethod
308    def number(cls, number: int) -> Token:
309        """Returns a NUMBER token with `number` as its text."""
310        return cls(TokenType.NUMBER, str(number))

Returns a NUMBER token with number as its text.

@classmethod
def string(cls, string: str) -> sqlglot.tokens.Token:
312    @classmethod
313    def string(cls, string: str) -> Token:
314        """Returns a STRING token with `string` as its text."""
315        return cls(TokenType.STRING, string)

Returns a STRING token with string as its text.

@classmethod
def identifier(cls, identifier: str) -> sqlglot.tokens.Token:
317    @classmethod
318    def identifier(cls, identifier: str) -> Token:
319        """Returns an IDENTIFIER token with `identifier` as its text."""
320        return cls(TokenType.IDENTIFIER, identifier)

Returns an IDENTIFIER token with identifier as its text.

@classmethod
def var(cls, var: str) -> sqlglot.tokens.Token:
322    @classmethod
323    def var(cls, var: str) -> Token:
324        """Returns an VAR token with `var` as its text."""
325        return cls(TokenType.VAR, var)

Returns an VAR token with var as its text.

class Tokenizer:
 386class Tokenizer(metaclass=_Tokenizer):
 387    SINGLE_TOKENS = {
 388        "(": TokenType.L_PAREN,
 389        ")": TokenType.R_PAREN,
 390        "[": TokenType.L_BRACKET,
 391        "]": TokenType.R_BRACKET,
 392        "{": TokenType.L_BRACE,
 393        "}": TokenType.R_BRACE,
 394        "&": TokenType.AMP,
 395        "^": TokenType.CARET,
 396        ":": TokenType.COLON,
 397        ",": TokenType.COMMA,
 398        ".": TokenType.DOT,
 399        "-": TokenType.DASH,
 400        "=": TokenType.EQ,
 401        ">": TokenType.GT,
 402        "<": TokenType.LT,
 403        "%": TokenType.MOD,
 404        "!": TokenType.NOT,
 405        "|": TokenType.PIPE,
 406        "+": TokenType.PLUS,
 407        ";": TokenType.SEMICOLON,
 408        "/": TokenType.SLASH,
 409        "\\": TokenType.BACKSLASH,
 410        "*": TokenType.STAR,
 411        "~": TokenType.TILDA,
 412        "?": TokenType.PLACEHOLDER,
 413        "@": TokenType.PARAMETER,
 414        # used for breaking a var like x'y' but nothing else
 415        # the token type doesn't matter
 416        "'": TokenType.QUOTE,
 417        "`": TokenType.IDENTIFIER,
 418        '"': TokenType.IDENTIFIER,
 419        "#": TokenType.HASH,
 420    }
 421
 422    QUOTES: t.List[t.Tuple[str, str] | str] = ["'"]
 423
 424    BIT_STRINGS: t.List[str | t.Tuple[str, str]] = []
 425
 426    HEX_STRINGS: t.List[str | t.Tuple[str, str]] = []
 427
 428    BYTE_STRINGS: t.List[str | t.Tuple[str, str]] = []
 429
 430    IDENTIFIERS: t.List[str | t.Tuple[str, str]] = ['"']
 431
 432    STRING_ESCAPES = ["'"]
 433
 434    _STRING_ESCAPES: t.Set[str] = set()
 435
 436    IDENTIFIER_ESCAPES = ['"']
 437
 438    _IDENTIFIER_ESCAPES: t.Set[str] = set()
 439
 440    KEYWORDS = {
 441        **{
 442            f"{key}{postfix}": TokenType.BLOCK_START
 443            for key in ("{{", "{%", "{#")
 444            for postfix in ("", "+", "-")
 445        },
 446        **{
 447            f"{prefix}{key}": TokenType.BLOCK_END
 448            for key in ("%}", "#}")
 449            for prefix in ("", "+", "-")
 450        },
 451        "+}}": TokenType.BLOCK_END,
 452        "-}}": TokenType.BLOCK_END,
 453        "/*+": TokenType.HINT,
 454        "==": TokenType.EQ,
 455        "::": TokenType.DCOLON,
 456        "||": TokenType.DPIPE,
 457        ">=": TokenType.GTE,
 458        "<=": TokenType.LTE,
 459        "<>": TokenType.NEQ,
 460        "!=": TokenType.NEQ,
 461        "<=>": TokenType.NULLSAFE_EQ,
 462        "->": TokenType.ARROW,
 463        "->>": TokenType.DARROW,
 464        "=>": TokenType.FARROW,
 465        "#>": TokenType.HASH_ARROW,
 466        "#>>": TokenType.DHASH_ARROW,
 467        "<->": TokenType.LR_ARROW,
 468        "ALL": TokenType.ALL,
 469        "ALWAYS": TokenType.ALWAYS,
 470        "AND": TokenType.AND,
 471        "ANTI": TokenType.ANTI,
 472        "ANY": TokenType.ANY,
 473        "ASC": TokenType.ASC,
 474        "AS": TokenType.ALIAS,
 475        "AT TIME ZONE": TokenType.AT_TIME_ZONE,
 476        "AUTOINCREMENT": TokenType.AUTO_INCREMENT,
 477        "AUTO_INCREMENT": TokenType.AUTO_INCREMENT,
 478        "BEGIN": TokenType.BEGIN,
 479        "BETWEEN": TokenType.BETWEEN,
 480        "BOTH": TokenType.BOTH,
 481        "BUCKET": TokenType.BUCKET,
 482        "BY DEFAULT": TokenType.BY_DEFAULT,
 483        "CACHE": TokenType.CACHE,
 484        "UNCACHE": TokenType.UNCACHE,
 485        "CASE": TokenType.CASE,
 486        "CASCADE": TokenType.CASCADE,
 487        "CHARACTER SET": TokenType.CHARACTER_SET,
 488        "CLUSTER BY": TokenType.CLUSTER_BY,
 489        "COLLATE": TokenType.COLLATE,
 490        "COLUMN": TokenType.COLUMN,
 491        "COMMIT": TokenType.COMMIT,
 492        "COMPOUND": TokenType.COMPOUND,
 493        "CONSTRAINT": TokenType.CONSTRAINT,
 494        "CREATE": TokenType.CREATE,
 495        "CROSS": TokenType.CROSS,
 496        "CUBE": TokenType.CUBE,
 497        "CURRENT_DATE": TokenType.CURRENT_DATE,
 498        "CURRENT ROW": TokenType.CURRENT_ROW,
 499        "CURRENT_TIMESTAMP": TokenType.CURRENT_TIMESTAMP,
 500        "DEFAULT": TokenType.DEFAULT,
 501        "DELETE": TokenType.DELETE,
 502        "DESC": TokenType.DESC,
 503        "DESCRIBE": TokenType.DESCRIBE,
 504        "DISTINCT": TokenType.DISTINCT,
 505        "DISTINCT FROM": TokenType.DISTINCT_FROM,
 506        "DISTRIBUTE BY": TokenType.DISTRIBUTE_BY,
 507        "DIV": TokenType.DIV,
 508        "DROP": TokenType.DROP,
 509        "ELSE": TokenType.ELSE,
 510        "END": TokenType.END,
 511        "ESCAPE": TokenType.ESCAPE,
 512        "EXCEPT": TokenType.EXCEPT,
 513        "EXECUTE": TokenType.EXECUTE,
 514        "EXISTS": TokenType.EXISTS,
 515        "FALSE": TokenType.FALSE,
 516        "FETCH": TokenType.FETCH,
 517        "FILTER": TokenType.FILTER,
 518        "FIRST": TokenType.FIRST,
 519        "FULL": TokenType.FULL,
 520        "FUNCTION": TokenType.FUNCTION,
 521        "FOLLOWING": TokenType.FOLLOWING,
 522        "FOR": TokenType.FOR,
 523        "FOREIGN KEY": TokenType.FOREIGN_KEY,
 524        "FORMAT": TokenType.FORMAT,
 525        "FROM": TokenType.FROM,
 526        "GLOB": TokenType.GLOB,
 527        "GROUP BY": TokenType.GROUP_BY,
 528        "GROUPING SETS": TokenType.GROUPING_SETS,
 529        "HAVING": TokenType.HAVING,
 530        "IF": TokenType.IF,
 531        "ILIKE": TokenType.ILIKE,
 532        "IGNORE NULLS": TokenType.IGNORE_NULLS,
 533        "IN": TokenType.IN,
 534        "INDEX": TokenType.INDEX,
 535        "INNER": TokenType.INNER,
 536        "INSERT": TokenType.INSERT,
 537        "INTERVAL": TokenType.INTERVAL,
 538        "INTERSECT": TokenType.INTERSECT,
 539        "INTO": TokenType.INTO,
 540        "IS": TokenType.IS,
 541        "ISNULL": TokenType.ISNULL,
 542        "JOIN": TokenType.JOIN,
 543        "LATERAL": TokenType.LATERAL,
 544        "LAZY": TokenType.LAZY,
 545        "LEADING": TokenType.LEADING,
 546        "LEFT": TokenType.LEFT,
 547        "LIKE": TokenType.LIKE,
 548        "LIMIT": TokenType.LIMIT,
 549        "LOAD DATA": TokenType.LOAD_DATA,
 550        "LOCAL": TokenType.LOCAL,
 551        "MATERIALIZED": TokenType.MATERIALIZED,
 552        "MERGE": TokenType.MERGE,
 553        "NATURAL": TokenType.NATURAL,
 554        "NEXT": TokenType.NEXT,
 555        "NO ACTION": TokenType.NO_ACTION,
 556        "NOT": TokenType.NOT,
 557        "NOTNULL": TokenType.NOTNULL,
 558        "NULL": TokenType.NULL,
 559        "NULLS FIRST": TokenType.NULLS_FIRST,
 560        "NULLS LAST": TokenType.NULLS_LAST,
 561        "OBJECT": TokenType.OBJECT,
 562        "OFFSET": TokenType.OFFSET,
 563        "ON": TokenType.ON,
 564        "ONLY": TokenType.ONLY,
 565        "OPTIONS": TokenType.OPTIONS,
 566        "OR": TokenType.OR,
 567        "ORDER BY": TokenType.ORDER_BY,
 568        "ORDINALITY": TokenType.ORDINALITY,
 569        "OUTER": TokenType.OUTER,
 570        "OUT OF": TokenType.OUT_OF,
 571        "OVER": TokenType.OVER,
 572        "OVERWRITE": TokenType.OVERWRITE,
 573        "PARTITION": TokenType.PARTITION,
 574        "PARTITION BY": TokenType.PARTITION_BY,
 575        "PARTITIONED BY": TokenType.PARTITION_BY,
 576        "PARTITIONED_BY": TokenType.PARTITION_BY,
 577        "PERCENT": TokenType.PERCENT,
 578        "PIVOT": TokenType.PIVOT,
 579        "PRECEDING": TokenType.PRECEDING,
 580        "PRIMARY KEY": TokenType.PRIMARY_KEY,
 581        "PROCEDURE": TokenType.PROCEDURE,
 582        "QUALIFY": TokenType.QUALIFY,
 583        "RANGE": TokenType.RANGE,
 584        "RECURSIVE": TokenType.RECURSIVE,
 585        "REGEXP": TokenType.RLIKE,
 586        "REPLACE": TokenType.REPLACE,
 587        "RESPECT NULLS": TokenType.RESPECT_NULLS,
 588        "REFERENCES": TokenType.REFERENCES,
 589        "RIGHT": TokenType.RIGHT,
 590        "RLIKE": TokenType.RLIKE,
 591        "ROLLBACK": TokenType.ROLLBACK,
 592        "ROLLUP": TokenType.ROLLUP,
 593        "ROW": TokenType.ROW,
 594        "ROWS": TokenType.ROWS,
 595        "SCHEMA": TokenType.SCHEMA,
 596        "SEED": TokenType.SEED,
 597        "SELECT": TokenType.SELECT,
 598        "SEMI": TokenType.SEMI,
 599        "SET": TokenType.SET,
 600        "SHOW": TokenType.SHOW,
 601        "SIMILAR TO": TokenType.SIMILAR_TO,
 602        "SOME": TokenType.SOME,
 603        "SORTKEY": TokenType.SORTKEY,
 604        "SORT BY": TokenType.SORT_BY,
 605        "TABLE": TokenType.TABLE,
 606        "TABLESAMPLE": TokenType.TABLE_SAMPLE,
 607        "TEMP": TokenType.TEMPORARY,
 608        "TEMPORARY": TokenType.TEMPORARY,
 609        "THEN": TokenType.THEN,
 610        "TRUE": TokenType.TRUE,
 611        "TRAILING": TokenType.TRAILING,
 612        "UNBOUNDED": TokenType.UNBOUNDED,
 613        "UNION": TokenType.UNION,
 614        "UNLOGGED": TokenType.UNLOGGED,
 615        "UNNEST": TokenType.UNNEST,
 616        "UNPIVOT": TokenType.UNPIVOT,
 617        "UPDATE": TokenType.UPDATE,
 618        "USE": TokenType.USE,
 619        "USING": TokenType.USING,
 620        "VALUES": TokenType.VALUES,
 621        "VIEW": TokenType.VIEW,
 622        "VOLATILE": TokenType.VOLATILE,
 623        "WHEN": TokenType.WHEN,
 624        "WHERE": TokenType.WHERE,
 625        "WINDOW": TokenType.WINDOW,
 626        "WITH": TokenType.WITH,
 627        "WITH TIME ZONE": TokenType.WITH_TIME_ZONE,
 628        "WITH LOCAL TIME ZONE": TokenType.WITH_LOCAL_TIME_ZONE,
 629        "WITHIN GROUP": TokenType.WITHIN_GROUP,
 630        "WITHOUT TIME ZONE": TokenType.WITHOUT_TIME_ZONE,
 631        "APPLY": TokenType.APPLY,
 632        "ARRAY": TokenType.ARRAY,
 633        "BOOL": TokenType.BOOLEAN,
 634        "BOOLEAN": TokenType.BOOLEAN,
 635        "BYTE": TokenType.TINYINT,
 636        "TINYINT": TokenType.TINYINT,
 637        "SHORT": TokenType.SMALLINT,
 638        "SMALLINT": TokenType.SMALLINT,
 639        "INT2": TokenType.SMALLINT,
 640        "INTEGER": TokenType.INT,
 641        "INT": TokenType.INT,
 642        "INT4": TokenType.INT,
 643        "LONG": TokenType.BIGINT,
 644        "BIGINT": TokenType.BIGINT,
 645        "INT8": TokenType.BIGINT,
 646        "DECIMAL": TokenType.DECIMAL,
 647        "MAP": TokenType.MAP,
 648        "NULLABLE": TokenType.NULLABLE,
 649        "NUMBER": TokenType.DECIMAL,
 650        "NUMERIC": TokenType.DECIMAL,
 651        "FIXED": TokenType.DECIMAL,
 652        "REAL": TokenType.FLOAT,
 653        "FLOAT": TokenType.FLOAT,
 654        "FLOAT4": TokenType.FLOAT,
 655        "FLOAT8": TokenType.DOUBLE,
 656        "DOUBLE": TokenType.DOUBLE,
 657        "DOUBLE PRECISION": TokenType.DOUBLE,
 658        "JSON": TokenType.JSON,
 659        "CHAR": TokenType.CHAR,
 660        "NCHAR": TokenType.NCHAR,
 661        "VARCHAR": TokenType.VARCHAR,
 662        "VARCHAR2": TokenType.VARCHAR,
 663        "NVARCHAR": TokenType.NVARCHAR,
 664        "NVARCHAR2": TokenType.NVARCHAR,
 665        "STR": TokenType.TEXT,
 666        "STRING": TokenType.TEXT,
 667        "TEXT": TokenType.TEXT,
 668        "CLOB": TokenType.TEXT,
 669        "LONGVARCHAR": TokenType.TEXT,
 670        "BINARY": TokenType.BINARY,
 671        "BLOB": TokenType.VARBINARY,
 672        "BYTEA": TokenType.VARBINARY,
 673        "VARBINARY": TokenType.VARBINARY,
 674        "TIME": TokenType.TIME,
 675        "TIMESTAMP": TokenType.TIMESTAMP,
 676        "TIMESTAMPTZ": TokenType.TIMESTAMPTZ,
 677        "TIMESTAMPLTZ": TokenType.TIMESTAMPLTZ,
 678        "DATE": TokenType.DATE,
 679        "DATETIME": TokenType.DATETIME,
 680        "UNIQUE": TokenType.UNIQUE,
 681        "STRUCT": TokenType.STRUCT,
 682        "VARIANT": TokenType.VARIANT,
 683        "ALTER": TokenType.ALTER,
 684        "ALTER AGGREGATE": TokenType.COMMAND,
 685        "ALTER DEFAULT": TokenType.COMMAND,
 686        "ALTER DOMAIN": TokenType.COMMAND,
 687        "ALTER ROLE": TokenType.COMMAND,
 688        "ALTER RULE": TokenType.COMMAND,
 689        "ALTER SEQUENCE": TokenType.COMMAND,
 690        "ALTER TYPE": TokenType.COMMAND,
 691        "ALTER USER": TokenType.COMMAND,
 692        "ALTER VIEW": TokenType.COMMAND,
 693        "ANALYZE": TokenType.COMMAND,
 694        "CALL": TokenType.COMMAND,
 695        "COPY": TokenType.COMMAND,
 696        "EXPLAIN": TokenType.COMMAND,
 697        "OPTIMIZE": TokenType.COMMAND,
 698        "PREPARE": TokenType.COMMAND,
 699        "TRUNCATE": TokenType.COMMAND,
 700        "VACUUM": TokenType.COMMAND,
 701    }
 702
 703    WHITE_SPACE = {
 704        " ": TokenType.SPACE,
 705        "\t": TokenType.SPACE,
 706        "\n": TokenType.BREAK,
 707        "\r": TokenType.BREAK,
 708        "\r\n": TokenType.BREAK,
 709    }
 710
 711    COMMANDS = {
 712        TokenType.COMMAND,
 713        TokenType.EXECUTE,
 714        TokenType.FETCH,
 715        TokenType.SET,
 716        TokenType.SHOW,
 717    }
 718
 719    COMMAND_PREFIX_TOKENS = {TokenType.SEMICOLON, TokenType.BEGIN}
 720
 721    # handle numeric literals like in hive (3L = BIGINT)
 722    NUMERIC_LITERALS: t.Dict[str, str] = {}
 723    ENCODE: t.Optional[str] = None
 724
 725    COMMENTS = ["--", ("/*", "*/")]
 726    KEYWORD_TRIE = None  # autofilled
 727
 728    IDENTIFIER_CAN_START_WITH_DIGIT = False
 729
 730    __slots__ = (
 731        "sql",
 732        "size",
 733        "tokens",
 734        "_start",
 735        "_current",
 736        "_line",
 737        "_col",
 738        "_comments",
 739        "_char",
 740        "_end",
 741        "_peek",
 742        "_prev_token_line",
 743        "_prev_token_comments",
 744        "_prev_token_type",
 745    )
 746
 747    def __init__(self) -> None:
 748        self.reset()
 749
 750    def reset(self) -> None:
 751        self.sql = ""
 752        self.size = 0
 753        self.tokens: t.List[Token] = []
 754        self._start = 0
 755        self._current = 0
 756        self._line = 1
 757        self._col = 1
 758        self._comments: t.List[str] = []
 759
 760        self._char = None
 761        self._end = None
 762        self._peek = None
 763        self._prev_token_line = -1
 764        self._prev_token_comments: t.List[str] = []
 765        self._prev_token_type = None
 766
 767    def tokenize(self, sql: str) -> t.List[Token]:
 768        """Returns a list of tokens corresponding to the SQL string `sql`."""
 769        self.reset()
 770        self.sql = sql
 771        self.size = len(sql)
 772        self._scan()
 773        return self.tokens
 774
 775    def _scan(self, until: t.Optional[t.Callable] = None) -> None:
 776        while self.size and not self._end:
 777            self._start = self._current
 778            self._advance()
 779
 780            if not self._char:
 781                break
 782
 783            white_space = self.WHITE_SPACE.get(self._char)  # type: ignore
 784            identifier_end = self._IDENTIFIERS.get(self._char)  # type: ignore
 785
 786            if white_space:
 787                if white_space == TokenType.BREAK:
 788                    self._col = 1
 789                    self._line += 1
 790            elif self._char.isdigit():  # type:ignore
 791                self._scan_number()
 792            elif identifier_end:
 793                self._scan_identifier(identifier_end)
 794            else:
 795                self._scan_keywords()
 796
 797            if until and until():
 798                break
 799
 800    def _chars(self, size: int) -> str:
 801        if size == 1:
 802            return self._char  # type: ignore
 803        start = self._current - 1
 804        end = start + size
 805        if end <= self.size:
 806            return self.sql[start:end]
 807        return ""
 808
 809    def _advance(self, i: int = 1) -> None:
 810        self._col += i
 811        self._current += i
 812        self._end = self._current >= self.size  # type: ignore
 813        self._char = self.sql[self._current - 1]  # type: ignore
 814        self._peek = self.sql[self._current] if self._current < self.size else ""  # type: ignore
 815
 816    @property
 817    def _text(self) -> str:
 818        return self.sql[self._start : self._current]
 819
 820    def _add(self, token_type: TokenType, text: t.Optional[str] = None) -> None:
 821        self._prev_token_line = self._line
 822        self._prev_token_comments = self._comments
 823        self._prev_token_type = token_type  # type: ignore
 824        self.tokens.append(
 825            Token(
 826                token_type,
 827                self._text if text is None else text,
 828                self._line,
 829                self._col,
 830                self._comments,
 831            )
 832        )
 833        self._comments = []
 834
 835        # If we have either a semicolon or a begin token before the command's token, we'll parse
 836        # whatever follows the command's token as a string
 837        if token_type in self.COMMANDS and (
 838            len(self.tokens) == 1 or self.tokens[-2].token_type in self.COMMAND_PREFIX_TOKENS
 839        ):
 840            start = self._current
 841            tokens = len(self.tokens)
 842            self._scan(lambda: self._peek == ";")
 843            self.tokens = self.tokens[:tokens]
 844            text = self.sql[start : self._current].strip()
 845            if text:
 846                self._add(TokenType.STRING, text)
 847
 848    def _scan_keywords(self) -> None:
 849        size = 0
 850        word = None
 851        chars: t.Optional[str] = self._text
 852        char = chars
 853        prev_space = False
 854        skip = False
 855        trie = self.KEYWORD_TRIE
 856
 857        while chars:
 858            if skip:
 859                result = 1
 860            else:
 861                result, trie = in_trie(trie, char.upper())  # type: ignore
 862
 863            if result == 0:
 864                break
 865            if result == 2:
 866                word = chars
 867            size += 1
 868            end = self._current - 1 + size
 869
 870            if end < self.size:
 871                char = self.sql[end]
 872                is_space = char in self.WHITE_SPACE
 873
 874                if not is_space or not prev_space:
 875                    if is_space:
 876                        char = " "
 877                    chars += char
 878                    prev_space = is_space
 879                    skip = False
 880                else:
 881                    skip = True
 882            else:
 883                chars = None
 884
 885        if not word:
 886            if self._char in self.SINGLE_TOKENS:
 887                self._add(self.SINGLE_TOKENS[self._char])  # type: ignore
 888                return
 889            self._scan_var()
 890            return
 891
 892        if self._scan_string(word):
 893            return
 894        if self._scan_formatted_string(word):
 895            return
 896        if self._scan_comment(word):
 897            return
 898
 899        self._advance(size - 1)
 900        self._add(self.KEYWORDS[word.upper()])
 901
 902    def _scan_comment(self, comment_start: str) -> bool:
 903        if comment_start not in self._COMMENTS:  # type: ignore
 904            return False
 905
 906        comment_start_line = self._line
 907        comment_start_size = len(comment_start)
 908        comment_end = self._COMMENTS[comment_start]  # type: ignore
 909
 910        if comment_end:
 911            comment_end_size = len(comment_end)
 912
 913            while not self._end and self._chars(comment_end_size) != comment_end:
 914                self._advance()
 915
 916            self._comments.append(self._text[comment_start_size : -comment_end_size + 1])  # type: ignore
 917            self._advance(comment_end_size - 1)
 918        else:
 919            while not self._end and self.WHITE_SPACE.get(self._peek) != TokenType.BREAK:  # type: ignore
 920                self._advance()
 921            self._comments.append(self._text[comment_start_size:])  # type: ignore
 922
 923        # Leading comment is attached to the succeeding token, whilst trailing comment to the preceding.
 924        # Multiple consecutive comments are preserved by appending them to the current comments list.
 925        if comment_start_line == self._prev_token_line:
 926            self.tokens[-1].comments.extend(self._comments)
 927            self._comments = []
 928
 929        return True
 930
 931    def _scan_number(self) -> None:
 932        if self._char == "0":
 933            peek = self._peek.upper()  # type: ignore
 934            if peek == "B":
 935                return self._scan_bits()
 936            elif peek == "X":
 937                return self._scan_hex()
 938
 939        decimal = False
 940        scientific = 0
 941
 942        while True:
 943            if self._peek.isdigit():  # type: ignore
 944                self._advance()
 945            elif self._peek == "." and not decimal:
 946                decimal = True
 947                self._advance()
 948            elif self._peek in ("-", "+") and scientific == 1:
 949                scientific += 1
 950                self._advance()
 951            elif self._peek.upper() == "E" and not scientific:  # type: ignore
 952                scientific += 1
 953                self._advance()
 954            elif self._peek.isidentifier():  # type: ignore
 955                number_text = self._text
 956                literal = []
 957
 958                while self._peek.strip() and self._peek not in self.SINGLE_TOKENS:  # type: ignore
 959                    literal.append(self._peek.upper())  # type: ignore
 960                    self._advance()
 961
 962                literal = "".join(literal)  # type: ignore
 963                token_type = self.KEYWORDS.get(self.NUMERIC_LITERALS.get(literal))  # type: ignore
 964
 965                if token_type:
 966                    self._add(TokenType.NUMBER, number_text)
 967                    self._add(TokenType.DCOLON, "::")
 968                    return self._add(token_type, literal)  # type: ignore
 969                elif self.IDENTIFIER_CAN_START_WITH_DIGIT:
 970                    return self._add(TokenType.VAR)
 971
 972                self._add(TokenType.NUMBER, number_text)
 973                return self._advance(-len(literal))
 974            else:
 975                return self._add(TokenType.NUMBER)
 976
 977    def _scan_bits(self) -> None:
 978        self._advance()
 979        value = self._extract_value()
 980        try:
 981            self._add(TokenType.BIT_STRING, f"{int(value, 2)}")
 982        except ValueError:
 983            self._add(TokenType.IDENTIFIER)
 984
 985    def _scan_hex(self) -> None:
 986        self._advance()
 987        value = self._extract_value()
 988        try:
 989            self._add(TokenType.HEX_STRING, f"{int(value, 16)}")
 990        except ValueError:
 991            self._add(TokenType.IDENTIFIER)
 992
 993    def _extract_value(self) -> str:
 994        while True:
 995            char = self._peek.strip()  # type: ignore
 996            if char and char not in self.SINGLE_TOKENS:
 997                self._advance()
 998            else:
 999                break
1000
1001        return self._text
1002
1003    def _scan_string(self, quote: str) -> bool:
1004        quote_end = self._QUOTES.get(quote)  # type: ignore
1005        if quote_end is None:
1006            return False
1007
1008        self._advance(len(quote))
1009        text = self._extract_string(quote_end)
1010        text = text.encode(self.ENCODE).decode(self.ENCODE) if self.ENCODE else text  # type: ignore
1011        self._add(TokenType.NATIONAL if quote[0].upper() == "N" else TokenType.STRING, text)
1012        return True
1013
1014    # X'1234, b'0110', E'\\\\\' etc.
1015    def _scan_formatted_string(self, string_start: str) -> bool:
1016        if string_start in self._HEX_STRINGS:  # type: ignore
1017            delimiters = self._HEX_STRINGS  # type: ignore
1018            token_type = TokenType.HEX_STRING
1019            base = 16
1020        elif string_start in self._BIT_STRINGS:  # type: ignore
1021            delimiters = self._BIT_STRINGS  # type: ignore
1022            token_type = TokenType.BIT_STRING
1023            base = 2
1024        elif string_start in self._BYTE_STRINGS:  # type: ignore
1025            delimiters = self._BYTE_STRINGS  # type: ignore
1026            token_type = TokenType.BYTE_STRING
1027            base = None
1028        else:
1029            return False
1030
1031        self._advance(len(string_start))
1032        string_end = delimiters.get(string_start)
1033        text = self._extract_string(string_end)
1034
1035        if base is None:
1036            self._add(token_type, text)
1037        else:
1038            try:
1039                self._add(token_type, f"{int(text, base)}")
1040            except:
1041                raise RuntimeError(
1042                    f"Numeric string contains invalid characters from {self._line}:{self._start}"
1043                )
1044
1045        return True
1046
1047    def _scan_identifier(self, identifier_end: str) -> None:
1048        text = ""
1049        identifier_end_is_escape = identifier_end in self._IDENTIFIER_ESCAPES
1050
1051        while True:
1052            if self._end:
1053                raise RuntimeError(f"Missing {identifier_end} from {self._line}:{self._start}")
1054
1055            self._advance()
1056            if self._char == identifier_end:
1057                if identifier_end_is_escape and self._peek == identifier_end:
1058                    text += identifier_end  # type: ignore
1059                    self._advance()
1060                    continue
1061
1062                break
1063
1064            text += self._char  # type: ignore
1065
1066        self._add(TokenType.IDENTIFIER, text)
1067
1068    def _scan_var(self) -> None:
1069        while True:
1070            char = self._peek.strip()  # type: ignore
1071            if char and char not in self.SINGLE_TOKENS:
1072                self._advance()
1073            else:
1074                break
1075        self._add(
1076            TokenType.VAR
1077            if self._prev_token_type == TokenType.PARAMETER
1078            else self.KEYWORDS.get(self._text.upper(), TokenType.VAR)
1079        )
1080
1081    def _extract_string(self, delimiter: str) -> str:
1082        text = ""
1083        delim_size = len(delimiter)
1084
1085        while True:
1086            if self._char in self._STRING_ESCAPES and (
1087                self._peek == delimiter or self._peek in self._STRING_ESCAPES
1088            ):
1089                if self._peek == delimiter:
1090                    text += self._peek  # type: ignore
1091                else:
1092                    text += self._char + self._peek  # type: ignore
1093
1094                if self._current + 1 < self.size:
1095                    self._advance(2)
1096                else:
1097                    raise RuntimeError(f"Missing {delimiter} from {self._line}:{self._current}")
1098            else:
1099                if self._chars(delim_size) == delimiter:
1100                    if delim_size > 1:
1101                        self._advance(delim_size - 1)
1102                    break
1103
1104                if self._end:
1105                    raise RuntimeError(f"Missing {delimiter} from {self._line}:{self._start}")
1106                text += self._char  # type: ignore
1107                self._advance()
1108
1109        return text
Tokenizer()
747    def __init__(self) -> None:
748        self.reset()
def reset(self) -> None:
750    def reset(self) -> None:
751        self.sql = ""
752        self.size = 0
753        self.tokens: t.List[Token] = []
754        self._start = 0
755        self._current = 0
756        self._line = 1
757        self._col = 1
758        self._comments: t.List[str] = []
759
760        self._char = None
761        self._end = None
762        self._peek = None
763        self._prev_token_line = -1
764        self._prev_token_comments: t.List[str] = []
765        self._prev_token_type = None
def tokenize(self, sql: str) -> List[sqlglot.tokens.Token]:
767    def tokenize(self, sql: str) -> t.List[Token]:
768        """Returns a list of tokens corresponding to the SQL string `sql`."""
769        self.reset()
770        self.sql = sql
771        self.size = len(sql)
772        self._scan()
773        return self.tokens

Returns a list of tokens corresponding to the SQL string sql.