sqlglot.tokens
1from __future__ import annotations 2 3import typing as t 4from enum import auto 5 6from sqlglot.helper import AutoName 7from sqlglot.trie import in_trie, new_trie 8 9 10class TokenType(AutoName): 11 L_PAREN = auto() 12 R_PAREN = auto() 13 L_BRACKET = auto() 14 R_BRACKET = auto() 15 L_BRACE = auto() 16 R_BRACE = auto() 17 COMMA = auto() 18 DOT = auto() 19 DASH = auto() 20 PLUS = auto() 21 COLON = auto() 22 DCOLON = auto() 23 SEMICOLON = auto() 24 STAR = auto() 25 BACKSLASH = auto() 26 SLASH = auto() 27 LT = auto() 28 LTE = auto() 29 GT = auto() 30 GTE = auto() 31 NOT = auto() 32 EQ = auto() 33 NEQ = auto() 34 NULLSAFE_EQ = auto() 35 AND = auto() 36 OR = auto() 37 AMP = auto() 38 DPIPE = auto() 39 PIPE = auto() 40 CARET = auto() 41 TILDA = auto() 42 ARROW = auto() 43 DARROW = auto() 44 FARROW = auto() 45 HASH = auto() 46 HASH_ARROW = auto() 47 DHASH_ARROW = auto() 48 LR_ARROW = auto() 49 DOLLAR = auto() 50 PARAMETER = auto() 51 SESSION_PARAMETER = auto() 52 NATIONAL = auto() 53 54 BLOCK_START = auto() 55 BLOCK_END = auto() 56 57 SPACE = auto() 58 BREAK = auto() 59 60 STRING = auto() 61 NUMBER = auto() 62 IDENTIFIER = auto() 63 COLUMN = auto() 64 COLUMN_DEF = auto() 65 SCHEMA = auto() 66 TABLE = auto() 67 VAR = auto() 68 BIT_STRING = auto() 69 HEX_STRING = auto() 70 BYTE_STRING = auto() 71 72 # types 73 BOOLEAN = auto() 74 TINYINT = auto() 75 SMALLINT = auto() 76 INT = auto() 77 BIGINT = auto() 78 FLOAT = auto() 79 DOUBLE = auto() 80 DECIMAL = auto() 81 CHAR = auto() 82 NCHAR = auto() 83 VARCHAR = auto() 84 NVARCHAR = auto() 85 TEXT = auto() 86 MEDIUMTEXT = auto() 87 LONGTEXT = auto() 88 MEDIUMBLOB = auto() 89 LONGBLOB = auto() 90 BINARY = auto() 91 VARBINARY = auto() 92 JSON = auto() 93 JSONB = auto() 94 TIME = auto() 95 TIMESTAMP = auto() 96 TIMESTAMPTZ = auto() 97 TIMESTAMPLTZ = auto() 98 DATETIME = auto() 99 DATE = auto() 100 UUID = auto() 101 GEOGRAPHY = auto() 102 NULLABLE = auto() 103 GEOMETRY = auto() 104 HLLSKETCH = auto() 105 HSTORE = auto() 106 SUPER = auto() 107 SERIAL = auto() 108 SMALLSERIAL = auto() 109 BIGSERIAL = auto() 110 XML = auto() 111 UNIQUEIDENTIFIER = auto() 112 MONEY = auto() 113 SMALLMONEY = auto() 114 ROWVERSION = auto() 115 IMAGE = auto() 116 VARIANT = auto() 117 OBJECT = auto() 118 119 # keywords 120 ALIAS = auto() 121 ALTER = auto() 122 ALWAYS = auto() 123 ALL = auto() 124 ANTI = auto() 125 ANY = auto() 126 APPLY = auto() 127 ARRAY = auto() 128 ASC = auto() 129 ASOF = auto() 130 AT_TIME_ZONE = auto() 131 AUTO_INCREMENT = auto() 132 BEGIN = auto() 133 BETWEEN = auto() 134 BOTH = auto() 135 BUCKET = auto() 136 BY_DEFAULT = auto() 137 CACHE = auto() 138 CASCADE = auto() 139 CASE = auto() 140 CHARACTER_SET = auto() 141 CLUSTER_BY = auto() 142 COLLATE = auto() 143 COMMAND = auto() 144 COMMENT = auto() 145 COMMIT = auto() 146 COMPOUND = auto() 147 CONSTRAINT = auto() 148 CREATE = auto() 149 CROSS = auto() 150 CUBE = auto() 151 CURRENT_DATE = auto() 152 CURRENT_DATETIME = auto() 153 CURRENT_ROW = auto() 154 CURRENT_TIME = auto() 155 CURRENT_TIMESTAMP = auto() 156 DEFAULT = auto() 157 DELETE = auto() 158 DESC = auto() 159 DESCRIBE = auto() 160 DISTINCT = auto() 161 DISTINCT_FROM = auto() 162 DISTRIBUTE_BY = auto() 163 DIV = auto() 164 DROP = auto() 165 ELSE = auto() 166 END = auto() 167 ESCAPE = auto() 168 EXCEPT = auto() 169 EXECUTE = auto() 170 EXISTS = auto() 171 FALSE = auto() 172 FETCH = auto() 173 FILTER = auto() 174 FINAL = auto() 175 FIRST = auto() 176 FOLLOWING = auto() 177 FOR = auto() 178 FOREIGN_KEY = auto() 179 FORMAT = auto() 180 FROM = auto() 181 FULL = auto() 182 FUNCTION = auto() 183 GLOB = auto() 184 GLOBAL = auto() 185 GROUP_BY = auto() 186 GROUPING_SETS = auto() 187 HAVING = auto() 188 HINT = auto() 189 IF = auto() 190 IGNORE_NULLS = auto() 191 ILIKE = auto() 192 ILIKE_ANY = auto() 193 IN = auto() 194 INDEX = auto() 195 INNER = auto() 196 INSERT = auto() 197 INTERSECT = auto() 198 INTERVAL = auto() 199 INTO = auto() 200 INTRODUCER = auto() 201 IRLIKE = auto() 202 IS = auto() 203 ISNULL = auto() 204 JOIN = auto() 205 LANGUAGE = auto() 206 LATERAL = auto() 207 LAZY = auto() 208 LEADING = auto() 209 LEFT = auto() 210 LIKE = auto() 211 LIKE_ANY = auto() 212 LIMIT = auto() 213 LOAD_DATA = auto() 214 LOCAL = auto() 215 MAP = auto() 216 MATCH_RECOGNIZE = auto() 217 MATERIALIZED = auto() 218 MERGE = auto() 219 MOD = auto() 220 NATURAL = auto() 221 NEXT = auto() 222 NO_ACTION = auto() 223 NOTNULL = auto() 224 NULL = auto() 225 NULLS_FIRST = auto() 226 NULLS_LAST = auto() 227 OFFSET = auto() 228 ON = auto() 229 ONLY = auto() 230 OPTIONS = auto() 231 ORDER_BY = auto() 232 ORDERED = auto() 233 ORDINALITY = auto() 234 OUTER = auto() 235 OUT_OF = auto() 236 OVER = auto() 237 OVERWRITE = auto() 238 PARTITION = auto() 239 PARTITION_BY = auto() 240 PERCENT = auto() 241 PIVOT = auto() 242 PLACEHOLDER = auto() 243 PRECEDING = auto() 244 PRIMARY_KEY = auto() 245 PROCEDURE = auto() 246 PROPERTIES = auto() 247 PSEUDO_TYPE = auto() 248 QUALIFY = auto() 249 QUOTE = auto() 250 RANGE = auto() 251 RECURSIVE = auto() 252 REPLACE = auto() 253 RESPECT_NULLS = auto() 254 RETURNING = auto() 255 REFERENCES = auto() 256 RIGHT = auto() 257 RLIKE = auto() 258 ROLLBACK = auto() 259 ROLLUP = auto() 260 ROW = auto() 261 ROWS = auto() 262 SEED = auto() 263 SELECT = auto() 264 SEMI = auto() 265 SEPARATOR = auto() 266 SERDE_PROPERTIES = auto() 267 SET = auto() 268 SHOW = auto() 269 SIMILAR_TO = auto() 270 SOME = auto() 271 SORTKEY = auto() 272 SORT_BY = auto() 273 STRUCT = auto() 274 TABLE_SAMPLE = auto() 275 TEMPORARY = auto() 276 TOP = auto() 277 THEN = auto() 278 TRAILING = auto() 279 TRUE = auto() 280 UNBOUNDED = auto() 281 UNCACHE = auto() 282 UNION = auto() 283 UNLOGGED = auto() 284 UNNEST = auto() 285 UNPIVOT = auto() 286 UPDATE = auto() 287 USE = auto() 288 USING = auto() 289 VALUES = auto() 290 VIEW = auto() 291 VOLATILE = auto() 292 WHEN = auto() 293 WHERE = auto() 294 WINDOW = auto() 295 WITH = auto() 296 WITH_TIME_ZONE = auto() 297 WITH_LOCAL_TIME_ZONE = auto() 298 WITHIN_GROUP = auto() 299 WITHOUT_TIME_ZONE = auto() 300 UNIQUE = auto() 301 302 303class Token: 304 __slots__ = ("token_type", "text", "line", "col", "comments") 305 306 @classmethod 307 def number(cls, number: int) -> Token: 308 """Returns a NUMBER token with `number` as its text.""" 309 return cls(TokenType.NUMBER, str(number)) 310 311 @classmethod 312 def string(cls, string: str) -> Token: 313 """Returns a STRING token with `string` as its text.""" 314 return cls(TokenType.STRING, string) 315 316 @classmethod 317 def identifier(cls, identifier: str) -> Token: 318 """Returns an IDENTIFIER token with `identifier` as its text.""" 319 return cls(TokenType.IDENTIFIER, identifier) 320 321 @classmethod 322 def var(cls, var: str) -> Token: 323 """Returns an VAR token with `var` as its text.""" 324 return cls(TokenType.VAR, var) 325 326 def __init__( 327 self, 328 token_type: TokenType, 329 text: str, 330 line: int = 1, 331 col: int = 1, 332 comments: t.List[str] = [], 333 ) -> None: 334 self.token_type = token_type 335 self.text = text 336 self.line = line 337 self.col = max(col - len(text), 1) 338 self.comments = comments 339 340 def __repr__(self) -> str: 341 attributes = ", ".join(f"{k}: {getattr(self, k)}" for k in self.__slots__) 342 return f"<Token {attributes}>" 343 344 345class _Tokenizer(type): 346 def __new__(cls, clsname, bases, attrs): # type: ignore 347 klass = super().__new__(cls, clsname, bases, attrs) 348 349 klass._QUOTES = { 350 f"{prefix}{s}": e 351 for s, e in cls._delimeter_list_to_dict(klass.QUOTES).items() 352 for prefix in (("",) if s[0].isalpha() else ("", "n", "N")) 353 } 354 klass._BIT_STRINGS = cls._delimeter_list_to_dict(klass.BIT_STRINGS) 355 klass._HEX_STRINGS = cls._delimeter_list_to_dict(klass.HEX_STRINGS) 356 klass._BYTE_STRINGS = cls._delimeter_list_to_dict(klass.BYTE_STRINGS) 357 klass._IDENTIFIERS = cls._delimeter_list_to_dict(klass.IDENTIFIERS) 358 klass._STRING_ESCAPES = set(klass.STRING_ESCAPES) 359 klass._IDENTIFIER_ESCAPES = set(klass.IDENTIFIER_ESCAPES) 360 klass._COMMENTS = dict( 361 (comment, None) if isinstance(comment, str) else (comment[0], comment[1]) 362 for comment in klass.COMMENTS 363 ) 364 365 klass.KEYWORD_TRIE = new_trie( 366 key.upper() 367 for key in { 368 **klass.KEYWORDS, 369 **{comment: TokenType.COMMENT for comment in klass._COMMENTS}, 370 **{quote: TokenType.QUOTE for quote in klass._QUOTES}, 371 **{bit_string: TokenType.BIT_STRING for bit_string in klass._BIT_STRINGS}, 372 **{hex_string: TokenType.HEX_STRING for hex_string in klass._HEX_STRINGS}, 373 **{byte_string: TokenType.BYTE_STRING for byte_string in klass._BYTE_STRINGS}, 374 } 375 if " " in key or any(single in key for single in klass.SINGLE_TOKENS) 376 ) 377 378 return klass 379 380 @staticmethod 381 def _delimeter_list_to_dict(list: t.List[str | t.Tuple[str, str]]) -> t.Dict[str, str]: 382 return dict((item, item) if isinstance(item, str) else (item[0], item[1]) for item in list) 383 384 385class Tokenizer(metaclass=_Tokenizer): 386 SINGLE_TOKENS = { 387 "(": TokenType.L_PAREN, 388 ")": TokenType.R_PAREN, 389 "[": TokenType.L_BRACKET, 390 "]": TokenType.R_BRACKET, 391 "{": TokenType.L_BRACE, 392 "}": TokenType.R_BRACE, 393 "&": TokenType.AMP, 394 "^": TokenType.CARET, 395 ":": TokenType.COLON, 396 ",": TokenType.COMMA, 397 ".": TokenType.DOT, 398 "-": TokenType.DASH, 399 "=": TokenType.EQ, 400 ">": TokenType.GT, 401 "<": TokenType.LT, 402 "%": TokenType.MOD, 403 "!": TokenType.NOT, 404 "|": TokenType.PIPE, 405 "+": TokenType.PLUS, 406 ";": TokenType.SEMICOLON, 407 "/": TokenType.SLASH, 408 "\\": TokenType.BACKSLASH, 409 "*": TokenType.STAR, 410 "~": TokenType.TILDA, 411 "?": TokenType.PLACEHOLDER, 412 "@": TokenType.PARAMETER, 413 # used for breaking a var like x'y' but nothing else 414 # the token type doesn't matter 415 "'": TokenType.QUOTE, 416 "`": TokenType.IDENTIFIER, 417 '"': TokenType.IDENTIFIER, 418 "#": TokenType.HASH, 419 } 420 421 QUOTES: t.List[t.Tuple[str, str] | str] = ["'"] 422 423 BIT_STRINGS: t.List[str | t.Tuple[str, str]] = [] 424 425 HEX_STRINGS: t.List[str | t.Tuple[str, str]] = [] 426 427 BYTE_STRINGS: t.List[str | t.Tuple[str, str]] = [] 428 429 IDENTIFIERS: t.List[str | t.Tuple[str, str]] = ['"'] 430 431 STRING_ESCAPES = ["'"] 432 433 _STRING_ESCAPES: t.Set[str] = set() 434 435 IDENTIFIER_ESCAPES = ['"'] 436 437 _IDENTIFIER_ESCAPES: t.Set[str] = set() 438 439 KEYWORDS = { 440 **{ 441 f"{key}{postfix}": TokenType.BLOCK_START 442 for key in ("{%", "{#") 443 for postfix in ("", "+", "-") 444 }, 445 **{ 446 f"{prefix}{key}": TokenType.BLOCK_END 447 for key in ("%}", "#}") 448 for prefix in ("", "+", "-") 449 }, 450 "{{+": TokenType.BLOCK_START, 451 "{{-": TokenType.BLOCK_START, 452 "+}}": TokenType.BLOCK_END, 453 "-}}": TokenType.BLOCK_END, 454 "/*+": TokenType.HINT, 455 "==": TokenType.EQ, 456 "::": TokenType.DCOLON, 457 "||": TokenType.DPIPE, 458 ">=": TokenType.GTE, 459 "<=": TokenType.LTE, 460 "<>": TokenType.NEQ, 461 "!=": TokenType.NEQ, 462 "<=>": TokenType.NULLSAFE_EQ, 463 "->": TokenType.ARROW, 464 "->>": TokenType.DARROW, 465 "=>": TokenType.FARROW, 466 "#>": TokenType.HASH_ARROW, 467 "#>>": TokenType.DHASH_ARROW, 468 "<->": TokenType.LR_ARROW, 469 "ALL": TokenType.ALL, 470 "ALWAYS": TokenType.ALWAYS, 471 "AND": TokenType.AND, 472 "ANTI": TokenType.ANTI, 473 "ANY": TokenType.ANY, 474 "ASC": TokenType.ASC, 475 "AS": TokenType.ALIAS, 476 "AT TIME ZONE": TokenType.AT_TIME_ZONE, 477 "AUTOINCREMENT": TokenType.AUTO_INCREMENT, 478 "AUTO_INCREMENT": TokenType.AUTO_INCREMENT, 479 "BEGIN": TokenType.BEGIN, 480 "BETWEEN": TokenType.BETWEEN, 481 "BOTH": TokenType.BOTH, 482 "BUCKET": TokenType.BUCKET, 483 "BY DEFAULT": TokenType.BY_DEFAULT, 484 "CACHE": TokenType.CACHE, 485 "UNCACHE": TokenType.UNCACHE, 486 "CASE": TokenType.CASE, 487 "CASCADE": TokenType.CASCADE, 488 "CHARACTER SET": TokenType.CHARACTER_SET, 489 "CLUSTER BY": TokenType.CLUSTER_BY, 490 "COLLATE": TokenType.COLLATE, 491 "COLUMN": TokenType.COLUMN, 492 "COMMIT": TokenType.COMMIT, 493 "COMPOUND": TokenType.COMPOUND, 494 "CONSTRAINT": TokenType.CONSTRAINT, 495 "CREATE": TokenType.CREATE, 496 "CROSS": TokenType.CROSS, 497 "CUBE": TokenType.CUBE, 498 "CURRENT_DATE": TokenType.CURRENT_DATE, 499 "CURRENT ROW": TokenType.CURRENT_ROW, 500 "CURRENT_TIMESTAMP": TokenType.CURRENT_TIMESTAMP, 501 "DEFAULT": TokenType.DEFAULT, 502 "DELETE": TokenType.DELETE, 503 "DESC": TokenType.DESC, 504 "DESCRIBE": TokenType.DESCRIBE, 505 "DISTINCT": TokenType.DISTINCT, 506 "DISTINCT FROM": TokenType.DISTINCT_FROM, 507 "DISTRIBUTE BY": TokenType.DISTRIBUTE_BY, 508 "DIV": TokenType.DIV, 509 "DROP": TokenType.DROP, 510 "ELSE": TokenType.ELSE, 511 "END": TokenType.END, 512 "ESCAPE": TokenType.ESCAPE, 513 "EXCEPT": TokenType.EXCEPT, 514 "EXECUTE": TokenType.EXECUTE, 515 "EXISTS": TokenType.EXISTS, 516 "FALSE": TokenType.FALSE, 517 "FETCH": TokenType.FETCH, 518 "FILTER": TokenType.FILTER, 519 "FIRST": TokenType.FIRST, 520 "FULL": TokenType.FULL, 521 "FUNCTION": TokenType.FUNCTION, 522 "FOLLOWING": TokenType.FOLLOWING, 523 "FOR": TokenType.FOR, 524 "FOREIGN KEY": TokenType.FOREIGN_KEY, 525 "FORMAT": TokenType.FORMAT, 526 "FROM": TokenType.FROM, 527 "GLOB": TokenType.GLOB, 528 "GROUP BY": TokenType.GROUP_BY, 529 "GROUPING SETS": TokenType.GROUPING_SETS, 530 "HAVING": TokenType.HAVING, 531 "IF": TokenType.IF, 532 "ILIKE": TokenType.ILIKE, 533 "IGNORE NULLS": TokenType.IGNORE_NULLS, 534 "IN": TokenType.IN, 535 "INDEX": TokenType.INDEX, 536 "INNER": TokenType.INNER, 537 "INSERT": TokenType.INSERT, 538 "INTERVAL": TokenType.INTERVAL, 539 "INTERSECT": TokenType.INTERSECT, 540 "INTO": TokenType.INTO, 541 "IS": TokenType.IS, 542 "ISNULL": TokenType.ISNULL, 543 "JOIN": TokenType.JOIN, 544 "LATERAL": TokenType.LATERAL, 545 "LAZY": TokenType.LAZY, 546 "LEADING": TokenType.LEADING, 547 "LEFT": TokenType.LEFT, 548 "LIKE": TokenType.LIKE, 549 "LIMIT": TokenType.LIMIT, 550 "LOAD DATA": TokenType.LOAD_DATA, 551 "LOCAL": TokenType.LOCAL, 552 "MATERIALIZED": TokenType.MATERIALIZED, 553 "MERGE": TokenType.MERGE, 554 "NATURAL": TokenType.NATURAL, 555 "NEXT": TokenType.NEXT, 556 "NO ACTION": TokenType.NO_ACTION, 557 "NOT": TokenType.NOT, 558 "NOTNULL": TokenType.NOTNULL, 559 "NULL": TokenType.NULL, 560 "NULLS FIRST": TokenType.NULLS_FIRST, 561 "NULLS LAST": TokenType.NULLS_LAST, 562 "OBJECT": TokenType.OBJECT, 563 "OFFSET": TokenType.OFFSET, 564 "ON": TokenType.ON, 565 "ONLY": TokenType.ONLY, 566 "OPTIONS": TokenType.OPTIONS, 567 "OR": TokenType.OR, 568 "ORDER BY": TokenType.ORDER_BY, 569 "ORDINALITY": TokenType.ORDINALITY, 570 "OUTER": TokenType.OUTER, 571 "OUT OF": TokenType.OUT_OF, 572 "OVER": TokenType.OVER, 573 "OVERWRITE": TokenType.OVERWRITE, 574 "PARTITION": TokenType.PARTITION, 575 "PARTITION BY": TokenType.PARTITION_BY, 576 "PARTITIONED BY": TokenType.PARTITION_BY, 577 "PARTITIONED_BY": TokenType.PARTITION_BY, 578 "PERCENT": TokenType.PERCENT, 579 "PIVOT": TokenType.PIVOT, 580 "PRECEDING": TokenType.PRECEDING, 581 "PRIMARY KEY": TokenType.PRIMARY_KEY, 582 "PROCEDURE": TokenType.PROCEDURE, 583 "QUALIFY": TokenType.QUALIFY, 584 "RANGE": TokenType.RANGE, 585 "RECURSIVE": TokenType.RECURSIVE, 586 "REGEXP": TokenType.RLIKE, 587 "REPLACE": TokenType.REPLACE, 588 "RESPECT NULLS": TokenType.RESPECT_NULLS, 589 "REFERENCES": TokenType.REFERENCES, 590 "RIGHT": TokenType.RIGHT, 591 "RLIKE": TokenType.RLIKE, 592 "ROLLBACK": TokenType.ROLLBACK, 593 "ROLLUP": TokenType.ROLLUP, 594 "ROW": TokenType.ROW, 595 "ROWS": TokenType.ROWS, 596 "SCHEMA": TokenType.SCHEMA, 597 "SEED": TokenType.SEED, 598 "SELECT": TokenType.SELECT, 599 "SEMI": TokenType.SEMI, 600 "SET": TokenType.SET, 601 "SHOW": TokenType.SHOW, 602 "SIMILAR TO": TokenType.SIMILAR_TO, 603 "SOME": TokenType.SOME, 604 "SORTKEY": TokenType.SORTKEY, 605 "SORT BY": TokenType.SORT_BY, 606 "TABLE": TokenType.TABLE, 607 "TABLESAMPLE": TokenType.TABLE_SAMPLE, 608 "TEMP": TokenType.TEMPORARY, 609 "TEMPORARY": TokenType.TEMPORARY, 610 "THEN": TokenType.THEN, 611 "TRUE": TokenType.TRUE, 612 "TRAILING": TokenType.TRAILING, 613 "UNBOUNDED": TokenType.UNBOUNDED, 614 "UNION": TokenType.UNION, 615 "UNLOGGED": TokenType.UNLOGGED, 616 "UNNEST": TokenType.UNNEST, 617 "UNPIVOT": TokenType.UNPIVOT, 618 "UPDATE": TokenType.UPDATE, 619 "USE": TokenType.USE, 620 "USING": TokenType.USING, 621 "VALUES": TokenType.VALUES, 622 "VIEW": TokenType.VIEW, 623 "VOLATILE": TokenType.VOLATILE, 624 "WHEN": TokenType.WHEN, 625 "WHERE": TokenType.WHERE, 626 "WINDOW": TokenType.WINDOW, 627 "WITH": TokenType.WITH, 628 "WITH TIME ZONE": TokenType.WITH_TIME_ZONE, 629 "WITH LOCAL TIME ZONE": TokenType.WITH_LOCAL_TIME_ZONE, 630 "WITHIN GROUP": TokenType.WITHIN_GROUP, 631 "WITHOUT TIME ZONE": TokenType.WITHOUT_TIME_ZONE, 632 "APPLY": TokenType.APPLY, 633 "ARRAY": TokenType.ARRAY, 634 "BOOL": TokenType.BOOLEAN, 635 "BOOLEAN": TokenType.BOOLEAN, 636 "BYTE": TokenType.TINYINT, 637 "TINYINT": TokenType.TINYINT, 638 "SHORT": TokenType.SMALLINT, 639 "SMALLINT": TokenType.SMALLINT, 640 "INT2": TokenType.SMALLINT, 641 "INTEGER": TokenType.INT, 642 "INT": TokenType.INT, 643 "INT4": TokenType.INT, 644 "LONG": TokenType.BIGINT, 645 "BIGINT": TokenType.BIGINT, 646 "INT8": TokenType.BIGINT, 647 "DECIMAL": TokenType.DECIMAL, 648 "MAP": TokenType.MAP, 649 "NULLABLE": TokenType.NULLABLE, 650 "NUMBER": TokenType.DECIMAL, 651 "NUMERIC": TokenType.DECIMAL, 652 "FIXED": TokenType.DECIMAL, 653 "REAL": TokenType.FLOAT, 654 "FLOAT": TokenType.FLOAT, 655 "FLOAT4": TokenType.FLOAT, 656 "FLOAT8": TokenType.DOUBLE, 657 "DOUBLE": TokenType.DOUBLE, 658 "DOUBLE PRECISION": TokenType.DOUBLE, 659 "JSON": TokenType.JSON, 660 "CHAR": TokenType.CHAR, 661 "NCHAR": TokenType.NCHAR, 662 "VARCHAR": TokenType.VARCHAR, 663 "VARCHAR2": TokenType.VARCHAR, 664 "NVARCHAR": TokenType.NVARCHAR, 665 "NVARCHAR2": TokenType.NVARCHAR, 666 "STR": TokenType.TEXT, 667 "STRING": TokenType.TEXT, 668 "TEXT": TokenType.TEXT, 669 "CLOB": TokenType.TEXT, 670 "LONGVARCHAR": TokenType.TEXT, 671 "BINARY": TokenType.BINARY, 672 "BLOB": TokenType.VARBINARY, 673 "BYTEA": TokenType.VARBINARY, 674 "VARBINARY": TokenType.VARBINARY, 675 "TIME": TokenType.TIME, 676 "TIMESTAMP": TokenType.TIMESTAMP, 677 "TIMESTAMPTZ": TokenType.TIMESTAMPTZ, 678 "TIMESTAMPLTZ": TokenType.TIMESTAMPLTZ, 679 "DATE": TokenType.DATE, 680 "DATETIME": TokenType.DATETIME, 681 "UNIQUE": TokenType.UNIQUE, 682 "STRUCT": TokenType.STRUCT, 683 "VARIANT": TokenType.VARIANT, 684 "ALTER": TokenType.ALTER, 685 "ALTER AGGREGATE": TokenType.COMMAND, 686 "ALTER DEFAULT": TokenType.COMMAND, 687 "ALTER DOMAIN": TokenType.COMMAND, 688 "ALTER ROLE": TokenType.COMMAND, 689 "ALTER RULE": TokenType.COMMAND, 690 "ALTER SEQUENCE": TokenType.COMMAND, 691 "ALTER TYPE": TokenType.COMMAND, 692 "ALTER USER": TokenType.COMMAND, 693 "ALTER VIEW": TokenType.COMMAND, 694 "ANALYZE": TokenType.COMMAND, 695 "CALL": TokenType.COMMAND, 696 "COPY": TokenType.COMMAND, 697 "EXPLAIN": TokenType.COMMAND, 698 "OPTIMIZE": TokenType.COMMAND, 699 "PREPARE": TokenType.COMMAND, 700 "TRUNCATE": TokenType.COMMAND, 701 "VACUUM": TokenType.COMMAND, 702 } 703 704 WHITE_SPACE = { 705 " ": TokenType.SPACE, 706 "\t": TokenType.SPACE, 707 "\n": TokenType.BREAK, 708 "\r": TokenType.BREAK, 709 "\r\n": TokenType.BREAK, 710 } 711 712 COMMANDS = { 713 TokenType.COMMAND, 714 TokenType.EXECUTE, 715 TokenType.FETCH, 716 TokenType.SET, 717 TokenType.SHOW, 718 } 719 720 COMMAND_PREFIX_TOKENS = {TokenType.SEMICOLON, TokenType.BEGIN} 721 722 # handle numeric literals like in hive (3L = BIGINT) 723 NUMERIC_LITERALS: t.Dict[str, str] = {} 724 ENCODE: t.Optional[str] = None 725 726 COMMENTS = ["--", ("/*", "*/")] 727 KEYWORD_TRIE = None # autofilled 728 729 IDENTIFIER_CAN_START_WITH_DIGIT = False 730 731 __slots__ = ( 732 "sql", 733 "size", 734 "tokens", 735 "_start", 736 "_current", 737 "_line", 738 "_col", 739 "_comments", 740 "_char", 741 "_end", 742 "_peek", 743 "_prev_token_line", 744 "_prev_token_comments", 745 "_prev_token_type", 746 ) 747 748 def __init__(self) -> None: 749 self.reset() 750 751 def reset(self) -> None: 752 self.sql = "" 753 self.size = 0 754 self.tokens: t.List[Token] = [] 755 self._start = 0 756 self._current = 0 757 self._line = 1 758 self._col = 1 759 self._comments: t.List[str] = [] 760 761 self._char = None 762 self._end = None 763 self._peek = None 764 self._prev_token_line = -1 765 self._prev_token_comments: t.List[str] = [] 766 self._prev_token_type = None 767 768 def tokenize(self, sql: str) -> t.List[Token]: 769 """Returns a list of tokens corresponding to the SQL string `sql`.""" 770 self.reset() 771 self.sql = sql 772 self.size = len(sql) 773 self._scan() 774 return self.tokens 775 776 def _scan(self, until: t.Optional[t.Callable] = None) -> None: 777 while self.size and not self._end: 778 self._start = self._current 779 self._advance() 780 781 if not self._char: 782 break 783 784 white_space = self.WHITE_SPACE.get(self._char) # type: ignore 785 identifier_end = self._IDENTIFIERS.get(self._char) # type: ignore 786 787 if white_space: 788 if white_space == TokenType.BREAK: 789 self._col = 1 790 self._line += 1 791 elif self._char.isdigit(): # type:ignore 792 self._scan_number() 793 elif identifier_end: 794 self._scan_identifier(identifier_end) 795 else: 796 self._scan_keywords() 797 798 if until and until(): 799 break 800 801 def _chars(self, size: int) -> str: 802 if size == 1: 803 return self._char # type: ignore 804 start = self._current - 1 805 end = start + size 806 if end <= self.size: 807 return self.sql[start:end] 808 return "" 809 810 def _advance(self, i: int = 1) -> None: 811 self._col += i 812 self._current += i 813 self._end = self._current >= self.size # type: ignore 814 self._char = self.sql[self._current - 1] # type: ignore 815 self._peek = self.sql[self._current] if self._current < self.size else "" # type: ignore 816 817 @property 818 def _text(self) -> str: 819 return self.sql[self._start : self._current] 820 821 def _add(self, token_type: TokenType, text: t.Optional[str] = None) -> None: 822 self._prev_token_line = self._line 823 self._prev_token_comments = self._comments 824 self._prev_token_type = token_type # type: ignore 825 self.tokens.append( 826 Token( 827 token_type, 828 self._text if text is None else text, 829 self._line, 830 self._col, 831 self._comments, 832 ) 833 ) 834 self._comments = [] 835 836 # If we have either a semicolon or a begin token before the command's token, we'll parse 837 # whatever follows the command's token as a string 838 if token_type in self.COMMANDS and ( 839 len(self.tokens) == 1 or self.tokens[-2].token_type in self.COMMAND_PREFIX_TOKENS 840 ): 841 start = self._current 842 tokens = len(self.tokens) 843 self._scan(lambda: self._peek == ";") 844 self.tokens = self.tokens[:tokens] 845 text = self.sql[start : self._current].strip() 846 if text: 847 self._add(TokenType.STRING, text) 848 849 def _scan_keywords(self) -> None: 850 size = 0 851 word = None 852 chars: t.Optional[str] = self._text 853 char = chars 854 prev_space = False 855 skip = False 856 trie = self.KEYWORD_TRIE 857 858 while chars: 859 if skip: 860 result = 1 861 else: 862 result, trie = in_trie(trie, char.upper()) # type: ignore 863 864 if result == 0: 865 break 866 if result == 2: 867 word = chars 868 size += 1 869 end = self._current - 1 + size 870 871 if end < self.size: 872 char = self.sql[end] 873 is_space = char in self.WHITE_SPACE 874 875 if not is_space or not prev_space: 876 if is_space: 877 char = " " 878 chars += char 879 prev_space = is_space 880 skip = False 881 else: 882 skip = True 883 else: 884 chars = None 885 886 if not word: 887 if self._char in self.SINGLE_TOKENS: 888 self._add(self.SINGLE_TOKENS[self._char]) # type: ignore 889 return 890 self._scan_var() 891 return 892 893 if self._scan_string(word): 894 return 895 if self._scan_formatted_string(word): 896 return 897 if self._scan_comment(word): 898 return 899 900 self._advance(size - 1) 901 self._add(self.KEYWORDS[word.upper()]) 902 903 def _scan_comment(self, comment_start: str) -> bool: 904 if comment_start not in self._COMMENTS: # type: ignore 905 return False 906 907 comment_start_line = self._line 908 comment_start_size = len(comment_start) 909 comment_end = self._COMMENTS[comment_start] # type: ignore 910 911 if comment_end: 912 comment_end_size = len(comment_end) 913 914 while not self._end and self._chars(comment_end_size) != comment_end: 915 self._advance() 916 917 self._comments.append(self._text[comment_start_size : -comment_end_size + 1]) # type: ignore 918 self._advance(comment_end_size - 1) 919 else: 920 while not self._end and self.WHITE_SPACE.get(self._peek) != TokenType.BREAK: # type: ignore 921 self._advance() 922 self._comments.append(self._text[comment_start_size:]) # type: ignore 923 924 # Leading comment is attached to the succeeding token, whilst trailing comment to the preceding. 925 # Multiple consecutive comments are preserved by appending them to the current comments list. 926 if comment_start_line == self._prev_token_line: 927 self.tokens[-1].comments.extend(self._comments) 928 self._comments = [] 929 930 return True 931 932 def _scan_number(self) -> None: 933 if self._char == "0": 934 peek = self._peek.upper() # type: ignore 935 if peek == "B": 936 return self._scan_bits() 937 elif peek == "X": 938 return self._scan_hex() 939 940 decimal = False 941 scientific = 0 942 943 while True: 944 if self._peek.isdigit(): # type: ignore 945 self._advance() 946 elif self._peek == "." and not decimal: 947 decimal = True 948 self._advance() 949 elif self._peek in ("-", "+") and scientific == 1: 950 scientific += 1 951 self._advance() 952 elif self._peek.upper() == "E" and not scientific: # type: ignore 953 scientific += 1 954 self._advance() 955 elif self._peek.isidentifier(): # type: ignore 956 number_text = self._text 957 literal = [] 958 959 while self._peek.strip() and self._peek not in self.SINGLE_TOKENS: # type: ignore 960 literal.append(self._peek.upper()) # type: ignore 961 self._advance() 962 963 literal = "".join(literal) # type: ignore 964 token_type = self.KEYWORDS.get(self.NUMERIC_LITERALS.get(literal)) # type: ignore 965 966 if token_type: 967 self._add(TokenType.NUMBER, number_text) 968 self._add(TokenType.DCOLON, "::") 969 return self._add(token_type, literal) # type: ignore 970 elif self.IDENTIFIER_CAN_START_WITH_DIGIT: 971 return self._add(TokenType.VAR) 972 973 self._add(TokenType.NUMBER, number_text) 974 return self._advance(-len(literal)) 975 else: 976 return self._add(TokenType.NUMBER) 977 978 def _scan_bits(self) -> None: 979 self._advance() 980 value = self._extract_value() 981 try: 982 self._add(TokenType.BIT_STRING, f"{int(value, 2)}") 983 except ValueError: 984 self._add(TokenType.IDENTIFIER) 985 986 def _scan_hex(self) -> None: 987 self._advance() 988 value = self._extract_value() 989 try: 990 self._add(TokenType.HEX_STRING, f"{int(value, 16)}") 991 except ValueError: 992 self._add(TokenType.IDENTIFIER) 993 994 def _extract_value(self) -> str: 995 while True: 996 char = self._peek.strip() # type: ignore 997 if char and char not in self.SINGLE_TOKENS: 998 self._advance() 999 else: 1000 break 1001 1002 return self._text 1003 1004 def _scan_string(self, quote: str) -> bool: 1005 quote_end = self._QUOTES.get(quote) # type: ignore 1006 if quote_end is None: 1007 return False 1008 1009 self._advance(len(quote)) 1010 text = self._extract_string(quote_end) 1011 text = text.encode(self.ENCODE).decode(self.ENCODE) if self.ENCODE else text # type: ignore 1012 self._add(TokenType.NATIONAL if quote[0].upper() == "N" else TokenType.STRING, text) 1013 return True 1014 1015 # X'1234, b'0110', E'\\\\\' etc. 1016 def _scan_formatted_string(self, string_start: str) -> bool: 1017 if string_start in self._HEX_STRINGS: # type: ignore 1018 delimiters = self._HEX_STRINGS # type: ignore 1019 token_type = TokenType.HEX_STRING 1020 base = 16 1021 elif string_start in self._BIT_STRINGS: # type: ignore 1022 delimiters = self._BIT_STRINGS # type: ignore 1023 token_type = TokenType.BIT_STRING 1024 base = 2 1025 elif string_start in self._BYTE_STRINGS: # type: ignore 1026 delimiters = self._BYTE_STRINGS # type: ignore 1027 token_type = TokenType.BYTE_STRING 1028 base = None 1029 else: 1030 return False 1031 1032 self._advance(len(string_start)) 1033 string_end = delimiters.get(string_start) 1034 text = self._extract_string(string_end) 1035 1036 if base is None: 1037 self._add(token_type, text) 1038 else: 1039 try: 1040 self._add(token_type, f"{int(text, base)}") 1041 except: 1042 raise RuntimeError( 1043 f"Numeric string contains invalid characters from {self._line}:{self._start}" 1044 ) 1045 1046 return True 1047 1048 def _scan_identifier(self, identifier_end: str) -> None: 1049 text = "" 1050 identifier_end_is_escape = identifier_end in self._IDENTIFIER_ESCAPES 1051 1052 while True: 1053 if self._end: 1054 raise RuntimeError(f"Missing {identifier_end} from {self._line}:{self._start}") 1055 1056 self._advance() 1057 if self._char == identifier_end: 1058 if identifier_end_is_escape and self._peek == identifier_end: 1059 text += identifier_end # type: ignore 1060 self._advance() 1061 continue 1062 1063 break 1064 1065 text += self._char # type: ignore 1066 1067 self._add(TokenType.IDENTIFIER, text) 1068 1069 def _scan_var(self) -> None: 1070 while True: 1071 char = self._peek.strip() # type: ignore 1072 if char and char not in self.SINGLE_TOKENS: 1073 self._advance() 1074 else: 1075 break 1076 self._add( 1077 TokenType.VAR 1078 if self._prev_token_type == TokenType.PARAMETER 1079 else self.KEYWORDS.get(self._text.upper(), TokenType.VAR) 1080 ) 1081 1082 def _extract_string(self, delimiter: str) -> str: 1083 text = "" 1084 delim_size = len(delimiter) 1085 1086 while True: 1087 if self._char in self._STRING_ESCAPES and ( 1088 self._peek == delimiter or self._peek in self._STRING_ESCAPES 1089 ): 1090 if self._peek == delimiter: 1091 text += self._peek # type: ignore 1092 else: 1093 text += self._char + self._peek # type: ignore 1094 1095 if self._current + 1 < self.size: 1096 self._advance(2) 1097 else: 1098 raise RuntimeError(f"Missing {delimiter} from {self._line}:{self._current}") 1099 else: 1100 if self._chars(delim_size) == delimiter: 1101 if delim_size > 1: 1102 self._advance(delim_size - 1) 1103 break 1104 1105 if self._end: 1106 raise RuntimeError(f"Missing {delimiter} from {self._line}:{self._start}") 1107 text += self._char # type: ignore 1108 self._advance() 1109 1110 return text
11class TokenType(AutoName): 12 L_PAREN = auto() 13 R_PAREN = auto() 14 L_BRACKET = auto() 15 R_BRACKET = auto() 16 L_BRACE = auto() 17 R_BRACE = auto() 18 COMMA = auto() 19 DOT = auto() 20 DASH = auto() 21 PLUS = auto() 22 COLON = auto() 23 DCOLON = auto() 24 SEMICOLON = auto() 25 STAR = auto() 26 BACKSLASH = auto() 27 SLASH = auto() 28 LT = auto() 29 LTE = auto() 30 GT = auto() 31 GTE = auto() 32 NOT = auto() 33 EQ = auto() 34 NEQ = auto() 35 NULLSAFE_EQ = auto() 36 AND = auto() 37 OR = auto() 38 AMP = auto() 39 DPIPE = auto() 40 PIPE = auto() 41 CARET = auto() 42 TILDA = auto() 43 ARROW = auto() 44 DARROW = auto() 45 FARROW = auto() 46 HASH = auto() 47 HASH_ARROW = auto() 48 DHASH_ARROW = auto() 49 LR_ARROW = auto() 50 DOLLAR = auto() 51 PARAMETER = auto() 52 SESSION_PARAMETER = auto() 53 NATIONAL = auto() 54 55 BLOCK_START = auto() 56 BLOCK_END = auto() 57 58 SPACE = auto() 59 BREAK = auto() 60 61 STRING = auto() 62 NUMBER = auto() 63 IDENTIFIER = auto() 64 COLUMN = auto() 65 COLUMN_DEF = auto() 66 SCHEMA = auto() 67 TABLE = auto() 68 VAR = auto() 69 BIT_STRING = auto() 70 HEX_STRING = auto() 71 BYTE_STRING = auto() 72 73 # types 74 BOOLEAN = auto() 75 TINYINT = auto() 76 SMALLINT = auto() 77 INT = auto() 78 BIGINT = auto() 79 FLOAT = auto() 80 DOUBLE = auto() 81 DECIMAL = auto() 82 CHAR = auto() 83 NCHAR = auto() 84 VARCHAR = auto() 85 NVARCHAR = auto() 86 TEXT = auto() 87 MEDIUMTEXT = auto() 88 LONGTEXT = auto() 89 MEDIUMBLOB = auto() 90 LONGBLOB = auto() 91 BINARY = auto() 92 VARBINARY = auto() 93 JSON = auto() 94 JSONB = auto() 95 TIME = auto() 96 TIMESTAMP = auto() 97 TIMESTAMPTZ = auto() 98 TIMESTAMPLTZ = auto() 99 DATETIME = auto() 100 DATE = auto() 101 UUID = auto() 102 GEOGRAPHY = auto() 103 NULLABLE = auto() 104 GEOMETRY = auto() 105 HLLSKETCH = auto() 106 HSTORE = auto() 107 SUPER = auto() 108 SERIAL = auto() 109 SMALLSERIAL = auto() 110 BIGSERIAL = auto() 111 XML = auto() 112 UNIQUEIDENTIFIER = auto() 113 MONEY = auto() 114 SMALLMONEY = auto() 115 ROWVERSION = auto() 116 IMAGE = auto() 117 VARIANT = auto() 118 OBJECT = auto() 119 120 # keywords 121 ALIAS = auto() 122 ALTER = auto() 123 ALWAYS = auto() 124 ALL = auto() 125 ANTI = auto() 126 ANY = auto() 127 APPLY = auto() 128 ARRAY = auto() 129 ASC = auto() 130 ASOF = auto() 131 AT_TIME_ZONE = auto() 132 AUTO_INCREMENT = auto() 133 BEGIN = auto() 134 BETWEEN = auto() 135 BOTH = auto() 136 BUCKET = auto() 137 BY_DEFAULT = auto() 138 CACHE = auto() 139 CASCADE = auto() 140 CASE = auto() 141 CHARACTER_SET = auto() 142 CLUSTER_BY = auto() 143 COLLATE = auto() 144 COMMAND = auto() 145 COMMENT = auto() 146 COMMIT = auto() 147 COMPOUND = auto() 148 CONSTRAINT = auto() 149 CREATE = auto() 150 CROSS = auto() 151 CUBE = auto() 152 CURRENT_DATE = auto() 153 CURRENT_DATETIME = auto() 154 CURRENT_ROW = auto() 155 CURRENT_TIME = auto() 156 CURRENT_TIMESTAMP = auto() 157 DEFAULT = auto() 158 DELETE = auto() 159 DESC = auto() 160 DESCRIBE = auto() 161 DISTINCT = auto() 162 DISTINCT_FROM = auto() 163 DISTRIBUTE_BY = auto() 164 DIV = auto() 165 DROP = auto() 166 ELSE = auto() 167 END = auto() 168 ESCAPE = auto() 169 EXCEPT = auto() 170 EXECUTE = auto() 171 EXISTS = auto() 172 FALSE = auto() 173 FETCH = auto() 174 FILTER = auto() 175 FINAL = auto() 176 FIRST = auto() 177 FOLLOWING = auto() 178 FOR = auto() 179 FOREIGN_KEY = auto() 180 FORMAT = auto() 181 FROM = auto() 182 FULL = auto() 183 FUNCTION = auto() 184 GLOB = auto() 185 GLOBAL = auto() 186 GROUP_BY = auto() 187 GROUPING_SETS = auto() 188 HAVING = auto() 189 HINT = auto() 190 IF = auto() 191 IGNORE_NULLS = auto() 192 ILIKE = auto() 193 ILIKE_ANY = auto() 194 IN = auto() 195 INDEX = auto() 196 INNER = auto() 197 INSERT = auto() 198 INTERSECT = auto() 199 INTERVAL = auto() 200 INTO = auto() 201 INTRODUCER = auto() 202 IRLIKE = auto() 203 IS = auto() 204 ISNULL = auto() 205 JOIN = auto() 206 LANGUAGE = auto() 207 LATERAL = auto() 208 LAZY = auto() 209 LEADING = auto() 210 LEFT = auto() 211 LIKE = auto() 212 LIKE_ANY = auto() 213 LIMIT = auto() 214 LOAD_DATA = auto() 215 LOCAL = auto() 216 MAP = auto() 217 MATCH_RECOGNIZE = auto() 218 MATERIALIZED = auto() 219 MERGE = auto() 220 MOD = auto() 221 NATURAL = auto() 222 NEXT = auto() 223 NO_ACTION = auto() 224 NOTNULL = auto() 225 NULL = auto() 226 NULLS_FIRST = auto() 227 NULLS_LAST = auto() 228 OFFSET = auto() 229 ON = auto() 230 ONLY = auto() 231 OPTIONS = auto() 232 ORDER_BY = auto() 233 ORDERED = auto() 234 ORDINALITY = auto() 235 OUTER = auto() 236 OUT_OF = auto() 237 OVER = auto() 238 OVERWRITE = auto() 239 PARTITION = auto() 240 PARTITION_BY = auto() 241 PERCENT = auto() 242 PIVOT = auto() 243 PLACEHOLDER = auto() 244 PRECEDING = auto() 245 PRIMARY_KEY = auto() 246 PROCEDURE = auto() 247 PROPERTIES = auto() 248 PSEUDO_TYPE = auto() 249 QUALIFY = auto() 250 QUOTE = auto() 251 RANGE = auto() 252 RECURSIVE = auto() 253 REPLACE = auto() 254 RESPECT_NULLS = auto() 255 RETURNING = auto() 256 REFERENCES = auto() 257 RIGHT = auto() 258 RLIKE = auto() 259 ROLLBACK = auto() 260 ROLLUP = auto() 261 ROW = auto() 262 ROWS = auto() 263 SEED = auto() 264 SELECT = auto() 265 SEMI = auto() 266 SEPARATOR = auto() 267 SERDE_PROPERTIES = auto() 268 SET = auto() 269 SHOW = auto() 270 SIMILAR_TO = auto() 271 SOME = auto() 272 SORTKEY = auto() 273 SORT_BY = auto() 274 STRUCT = auto() 275 TABLE_SAMPLE = auto() 276 TEMPORARY = auto() 277 TOP = auto() 278 THEN = auto() 279 TRAILING = auto() 280 TRUE = auto() 281 UNBOUNDED = auto() 282 UNCACHE = auto() 283 UNION = auto() 284 UNLOGGED = auto() 285 UNNEST = auto() 286 UNPIVOT = auto() 287 UPDATE = auto() 288 USE = auto() 289 USING = auto() 290 VALUES = auto() 291 VIEW = auto() 292 VOLATILE = auto() 293 WHEN = auto() 294 WHERE = auto() 295 WINDOW = auto() 296 WITH = auto() 297 WITH_TIME_ZONE = auto() 298 WITH_LOCAL_TIME_ZONE = auto() 299 WITHIN_GROUP = auto() 300 WITHOUT_TIME_ZONE = auto() 301 UNIQUE = auto()
An enumeration.
L_PAREN = <TokenType.L_PAREN: 'L_PAREN'>
R_PAREN = <TokenType.R_PAREN: 'R_PAREN'>
L_BRACKET = <TokenType.L_BRACKET: 'L_BRACKET'>
R_BRACKET = <TokenType.R_BRACKET: 'R_BRACKET'>
L_BRACE = <TokenType.L_BRACE: 'L_BRACE'>
R_BRACE = <TokenType.R_BRACE: 'R_BRACE'>
COMMA = <TokenType.COMMA: 'COMMA'>
DOT = <TokenType.DOT: 'DOT'>
DASH = <TokenType.DASH: 'DASH'>
PLUS = <TokenType.PLUS: 'PLUS'>
COLON = <TokenType.COLON: 'COLON'>
DCOLON = <TokenType.DCOLON: 'DCOLON'>
SEMICOLON = <TokenType.SEMICOLON: 'SEMICOLON'>
STAR = <TokenType.STAR: 'STAR'>
BACKSLASH = <TokenType.BACKSLASH: 'BACKSLASH'>
SLASH = <TokenType.SLASH: 'SLASH'>
LT = <TokenType.LT: 'LT'>
LTE = <TokenType.LTE: 'LTE'>
GT = <TokenType.GT: 'GT'>
GTE = <TokenType.GTE: 'GTE'>
NOT = <TokenType.NOT: 'NOT'>
EQ = <TokenType.EQ: 'EQ'>
NEQ = <TokenType.NEQ: 'NEQ'>
NULLSAFE_EQ = <TokenType.NULLSAFE_EQ: 'NULLSAFE_EQ'>
AND = <TokenType.AND: 'AND'>
OR = <TokenType.OR: 'OR'>
AMP = <TokenType.AMP: 'AMP'>
DPIPE = <TokenType.DPIPE: 'DPIPE'>
PIPE = <TokenType.PIPE: 'PIPE'>
CARET = <TokenType.CARET: 'CARET'>
TILDA = <TokenType.TILDA: 'TILDA'>
ARROW = <TokenType.ARROW: 'ARROW'>
DARROW = <TokenType.DARROW: 'DARROW'>
FARROW = <TokenType.FARROW: 'FARROW'>
HASH = <TokenType.HASH: 'HASH'>
HASH_ARROW = <TokenType.HASH_ARROW: 'HASH_ARROW'>
DHASH_ARROW = <TokenType.DHASH_ARROW: 'DHASH_ARROW'>
LR_ARROW = <TokenType.LR_ARROW: 'LR_ARROW'>
DOLLAR = <TokenType.DOLLAR: 'DOLLAR'>
PARAMETER = <TokenType.PARAMETER: 'PARAMETER'>
SESSION_PARAMETER = <TokenType.SESSION_PARAMETER: 'SESSION_PARAMETER'>
NATIONAL = <TokenType.NATIONAL: 'NATIONAL'>
BLOCK_START = <TokenType.BLOCK_START: 'BLOCK_START'>
BLOCK_END = <TokenType.BLOCK_END: 'BLOCK_END'>
SPACE = <TokenType.SPACE: 'SPACE'>
BREAK = <TokenType.BREAK: 'BREAK'>
STRING = <TokenType.STRING: 'STRING'>
NUMBER = <TokenType.NUMBER: 'NUMBER'>
IDENTIFIER = <TokenType.IDENTIFIER: 'IDENTIFIER'>
COLUMN = <TokenType.COLUMN: 'COLUMN'>
COLUMN_DEF = <TokenType.COLUMN_DEF: 'COLUMN_DEF'>
SCHEMA = <TokenType.SCHEMA: 'SCHEMA'>
TABLE = <TokenType.TABLE: 'TABLE'>
VAR = <TokenType.VAR: 'VAR'>
BIT_STRING = <TokenType.BIT_STRING: 'BIT_STRING'>
HEX_STRING = <TokenType.HEX_STRING: 'HEX_STRING'>
BYTE_STRING = <TokenType.BYTE_STRING: 'BYTE_STRING'>
BOOLEAN = <TokenType.BOOLEAN: 'BOOLEAN'>
TINYINT = <TokenType.TINYINT: 'TINYINT'>
SMALLINT = <TokenType.SMALLINT: 'SMALLINT'>
INT = <TokenType.INT: 'INT'>
BIGINT = <TokenType.BIGINT: 'BIGINT'>
FLOAT = <TokenType.FLOAT: 'FLOAT'>
DOUBLE = <TokenType.DOUBLE: 'DOUBLE'>
DECIMAL = <TokenType.DECIMAL: 'DECIMAL'>
CHAR = <TokenType.CHAR: 'CHAR'>
NCHAR = <TokenType.NCHAR: 'NCHAR'>
VARCHAR = <TokenType.VARCHAR: 'VARCHAR'>
NVARCHAR = <TokenType.NVARCHAR: 'NVARCHAR'>
TEXT = <TokenType.TEXT: 'TEXT'>
MEDIUMTEXT = <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>
LONGTEXT = <TokenType.LONGTEXT: 'LONGTEXT'>
MEDIUMBLOB = <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>
LONGBLOB = <TokenType.LONGBLOB: 'LONGBLOB'>
BINARY = <TokenType.BINARY: 'BINARY'>
VARBINARY = <TokenType.VARBINARY: 'VARBINARY'>
JSON = <TokenType.JSON: 'JSON'>
JSONB = <TokenType.JSONB: 'JSONB'>
TIME = <TokenType.TIME: 'TIME'>
TIMESTAMP = <TokenType.TIMESTAMP: 'TIMESTAMP'>
TIMESTAMPTZ = <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>
TIMESTAMPLTZ = <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>
DATETIME = <TokenType.DATETIME: 'DATETIME'>
DATE = <TokenType.DATE: 'DATE'>
UUID = <TokenType.UUID: 'UUID'>
GEOGRAPHY = <TokenType.GEOGRAPHY: 'GEOGRAPHY'>
NULLABLE = <TokenType.NULLABLE: 'NULLABLE'>
GEOMETRY = <TokenType.GEOMETRY: 'GEOMETRY'>
HLLSKETCH = <TokenType.HLLSKETCH: 'HLLSKETCH'>
HSTORE = <TokenType.HSTORE: 'HSTORE'>
SUPER = <TokenType.SUPER: 'SUPER'>
SERIAL = <TokenType.SERIAL: 'SERIAL'>
SMALLSERIAL = <TokenType.SMALLSERIAL: 'SMALLSERIAL'>
BIGSERIAL = <TokenType.BIGSERIAL: 'BIGSERIAL'>
XML = <TokenType.XML: 'XML'>
UNIQUEIDENTIFIER = <TokenType.UNIQUEIDENTIFIER: 'UNIQUEIDENTIFIER'>
MONEY = <TokenType.MONEY: 'MONEY'>
SMALLMONEY = <TokenType.SMALLMONEY: 'SMALLMONEY'>
ROWVERSION = <TokenType.ROWVERSION: 'ROWVERSION'>
IMAGE = <TokenType.IMAGE: 'IMAGE'>
VARIANT = <TokenType.VARIANT: 'VARIANT'>
OBJECT = <TokenType.OBJECT: 'OBJECT'>
ALIAS = <TokenType.ALIAS: 'ALIAS'>
ALTER = <TokenType.ALTER: 'ALTER'>
ALWAYS = <TokenType.ALWAYS: 'ALWAYS'>
ALL = <TokenType.ALL: 'ALL'>
ANTI = <TokenType.ANTI: 'ANTI'>
ANY = <TokenType.ANY: 'ANY'>
APPLY = <TokenType.APPLY: 'APPLY'>
ARRAY = <TokenType.ARRAY: 'ARRAY'>
ASC = <TokenType.ASC: 'ASC'>
ASOF = <TokenType.ASOF: 'ASOF'>
AT_TIME_ZONE = <TokenType.AT_TIME_ZONE: 'AT_TIME_ZONE'>
AUTO_INCREMENT = <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>
BEGIN = <TokenType.BEGIN: 'BEGIN'>
BETWEEN = <TokenType.BETWEEN: 'BETWEEN'>
BOTH = <TokenType.BOTH: 'BOTH'>
BUCKET = <TokenType.BUCKET: 'BUCKET'>
BY_DEFAULT = <TokenType.BY_DEFAULT: 'BY_DEFAULT'>
CACHE = <TokenType.CACHE: 'CACHE'>
CASCADE = <TokenType.CASCADE: 'CASCADE'>
CASE = <TokenType.CASE: 'CASE'>
CHARACTER_SET = <TokenType.CHARACTER_SET: 'CHARACTER_SET'>
CLUSTER_BY = <TokenType.CLUSTER_BY: 'CLUSTER_BY'>
COLLATE = <TokenType.COLLATE: 'COLLATE'>
COMMAND = <TokenType.COMMAND: 'COMMAND'>
COMMENT = <TokenType.COMMENT: 'COMMENT'>
COMMIT = <TokenType.COMMIT: 'COMMIT'>
COMPOUND = <TokenType.COMPOUND: 'COMPOUND'>
CONSTRAINT = <TokenType.CONSTRAINT: 'CONSTRAINT'>
CREATE = <TokenType.CREATE: 'CREATE'>
CROSS = <TokenType.CROSS: 'CROSS'>
CUBE = <TokenType.CUBE: 'CUBE'>
CURRENT_DATE = <TokenType.CURRENT_DATE: 'CURRENT_DATE'>
CURRENT_DATETIME = <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>
CURRENT_ROW = <TokenType.CURRENT_ROW: 'CURRENT_ROW'>
CURRENT_TIME = <TokenType.CURRENT_TIME: 'CURRENT_TIME'>
CURRENT_TIMESTAMP = <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>
DEFAULT = <TokenType.DEFAULT: 'DEFAULT'>
DELETE = <TokenType.DELETE: 'DELETE'>
DESC = <TokenType.DESC: 'DESC'>
DESCRIBE = <TokenType.DESCRIBE: 'DESCRIBE'>
DISTINCT = <TokenType.DISTINCT: 'DISTINCT'>
DISTINCT_FROM = <TokenType.DISTINCT_FROM: 'DISTINCT_FROM'>
DISTRIBUTE_BY = <TokenType.DISTRIBUTE_BY: 'DISTRIBUTE_BY'>
DIV = <TokenType.DIV: 'DIV'>
DROP = <TokenType.DROP: 'DROP'>
ELSE = <TokenType.ELSE: 'ELSE'>
END = <TokenType.END: 'END'>
ESCAPE = <TokenType.ESCAPE: 'ESCAPE'>
EXCEPT = <TokenType.EXCEPT: 'EXCEPT'>
EXECUTE = <TokenType.EXECUTE: 'EXECUTE'>
EXISTS = <TokenType.EXISTS: 'EXISTS'>
FALSE = <TokenType.FALSE: 'FALSE'>
FETCH = <TokenType.FETCH: 'FETCH'>
FILTER = <TokenType.FILTER: 'FILTER'>
FINAL = <TokenType.FINAL: 'FINAL'>
FIRST = <TokenType.FIRST: 'FIRST'>
FOLLOWING = <TokenType.FOLLOWING: 'FOLLOWING'>
FOR = <TokenType.FOR: 'FOR'>
FOREIGN_KEY = <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>
FORMAT = <TokenType.FORMAT: 'FORMAT'>
FROM = <TokenType.FROM: 'FROM'>
FULL = <TokenType.FULL: 'FULL'>
FUNCTION = <TokenType.FUNCTION: 'FUNCTION'>
GLOB = <TokenType.GLOB: 'GLOB'>
GLOBAL = <TokenType.GLOBAL: 'GLOBAL'>
GROUP_BY = <TokenType.GROUP_BY: 'GROUP_BY'>
GROUPING_SETS = <TokenType.GROUPING_SETS: 'GROUPING_SETS'>
HAVING = <TokenType.HAVING: 'HAVING'>
HINT = <TokenType.HINT: 'HINT'>
IF = <TokenType.IF: 'IF'>
IGNORE_NULLS = <TokenType.IGNORE_NULLS: 'IGNORE_NULLS'>
ILIKE = <TokenType.ILIKE: 'ILIKE'>
ILIKE_ANY = <TokenType.ILIKE_ANY: 'ILIKE_ANY'>
IN = <TokenType.IN: 'IN'>
INDEX = <TokenType.INDEX: 'INDEX'>
INNER = <TokenType.INNER: 'INNER'>
INSERT = <TokenType.INSERT: 'INSERT'>
INTERSECT = <TokenType.INTERSECT: 'INTERSECT'>
INTERVAL = <TokenType.INTERVAL: 'INTERVAL'>
INTO = <TokenType.INTO: 'INTO'>
INTRODUCER = <TokenType.INTRODUCER: 'INTRODUCER'>
IRLIKE = <TokenType.IRLIKE: 'IRLIKE'>
IS = <TokenType.IS: 'IS'>
ISNULL = <TokenType.ISNULL: 'ISNULL'>
JOIN = <TokenType.JOIN: 'JOIN'>
LANGUAGE = <TokenType.LANGUAGE: 'LANGUAGE'>
LATERAL = <TokenType.LATERAL: 'LATERAL'>
LAZY = <TokenType.LAZY: 'LAZY'>
LEADING = <TokenType.LEADING: 'LEADING'>
LEFT = <TokenType.LEFT: 'LEFT'>
LIKE = <TokenType.LIKE: 'LIKE'>
LIKE_ANY = <TokenType.LIKE_ANY: 'LIKE_ANY'>
LIMIT = <TokenType.LIMIT: 'LIMIT'>
LOAD_DATA = <TokenType.LOAD_DATA: 'LOAD_DATA'>
LOCAL = <TokenType.LOCAL: 'LOCAL'>
MAP = <TokenType.MAP: 'MAP'>
MATCH_RECOGNIZE = <TokenType.MATCH_RECOGNIZE: 'MATCH_RECOGNIZE'>
MATERIALIZED = <TokenType.MATERIALIZED: 'MATERIALIZED'>
MERGE = <TokenType.MERGE: 'MERGE'>
MOD = <TokenType.MOD: 'MOD'>
NATURAL = <TokenType.NATURAL: 'NATURAL'>
NEXT = <TokenType.NEXT: 'NEXT'>
NO_ACTION = <TokenType.NO_ACTION: 'NO_ACTION'>
NOTNULL = <TokenType.NOTNULL: 'NOTNULL'>
NULL = <TokenType.NULL: 'NULL'>
NULLS_FIRST = <TokenType.NULLS_FIRST: 'NULLS_FIRST'>
NULLS_LAST = <TokenType.NULLS_LAST: 'NULLS_LAST'>
OFFSET = <TokenType.OFFSET: 'OFFSET'>
ON = <TokenType.ON: 'ON'>
ONLY = <TokenType.ONLY: 'ONLY'>
OPTIONS = <TokenType.OPTIONS: 'OPTIONS'>
ORDER_BY = <TokenType.ORDER_BY: 'ORDER_BY'>
ORDERED = <TokenType.ORDERED: 'ORDERED'>
ORDINALITY = <TokenType.ORDINALITY: 'ORDINALITY'>
OUTER = <TokenType.OUTER: 'OUTER'>
OUT_OF = <TokenType.OUT_OF: 'OUT_OF'>
OVER = <TokenType.OVER: 'OVER'>
OVERWRITE = <TokenType.OVERWRITE: 'OVERWRITE'>
PARTITION = <TokenType.PARTITION: 'PARTITION'>
PARTITION_BY = <TokenType.PARTITION_BY: 'PARTITION_BY'>
PERCENT = <TokenType.PERCENT: 'PERCENT'>
PIVOT = <TokenType.PIVOT: 'PIVOT'>
PLACEHOLDER = <TokenType.PLACEHOLDER: 'PLACEHOLDER'>
PRECEDING = <TokenType.PRECEDING: 'PRECEDING'>
PRIMARY_KEY = <TokenType.PRIMARY_KEY: 'PRIMARY_KEY'>
PROCEDURE = <TokenType.PROCEDURE: 'PROCEDURE'>
PROPERTIES = <TokenType.PROPERTIES: 'PROPERTIES'>
PSEUDO_TYPE = <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>
QUALIFY = <TokenType.QUALIFY: 'QUALIFY'>
QUOTE = <TokenType.QUOTE: 'QUOTE'>
RANGE = <TokenType.RANGE: 'RANGE'>
RECURSIVE = <TokenType.RECURSIVE: 'RECURSIVE'>
REPLACE = <TokenType.REPLACE: 'REPLACE'>
RESPECT_NULLS = <TokenType.RESPECT_NULLS: 'RESPECT_NULLS'>
RETURNING = <TokenType.RETURNING: 'RETURNING'>
REFERENCES = <TokenType.REFERENCES: 'REFERENCES'>
RIGHT = <TokenType.RIGHT: 'RIGHT'>
RLIKE = <TokenType.RLIKE: 'RLIKE'>
ROLLBACK = <TokenType.ROLLBACK: 'ROLLBACK'>
ROLLUP = <TokenType.ROLLUP: 'ROLLUP'>
ROW = <TokenType.ROW: 'ROW'>
ROWS = <TokenType.ROWS: 'ROWS'>
SEED = <TokenType.SEED: 'SEED'>
SELECT = <TokenType.SELECT: 'SELECT'>
SEMI = <TokenType.SEMI: 'SEMI'>
SEPARATOR = <TokenType.SEPARATOR: 'SEPARATOR'>
SERDE_PROPERTIES = <TokenType.SERDE_PROPERTIES: 'SERDE_PROPERTIES'>
SET = <TokenType.SET: 'SET'>
SHOW = <TokenType.SHOW: 'SHOW'>
SIMILAR_TO = <TokenType.SIMILAR_TO: 'SIMILAR_TO'>
SOME = <TokenType.SOME: 'SOME'>
SORTKEY = <TokenType.SORTKEY: 'SORTKEY'>
SORT_BY = <TokenType.SORT_BY: 'SORT_BY'>
STRUCT = <TokenType.STRUCT: 'STRUCT'>
TABLE_SAMPLE = <TokenType.TABLE_SAMPLE: 'TABLE_SAMPLE'>
TEMPORARY = <TokenType.TEMPORARY: 'TEMPORARY'>
TOP = <TokenType.TOP: 'TOP'>
THEN = <TokenType.THEN: 'THEN'>
TRAILING = <TokenType.TRAILING: 'TRAILING'>
TRUE = <TokenType.TRUE: 'TRUE'>
UNBOUNDED = <TokenType.UNBOUNDED: 'UNBOUNDED'>
UNCACHE = <TokenType.UNCACHE: 'UNCACHE'>
UNION = <TokenType.UNION: 'UNION'>
UNLOGGED = <TokenType.UNLOGGED: 'UNLOGGED'>
UNNEST = <TokenType.UNNEST: 'UNNEST'>
UNPIVOT = <TokenType.UNPIVOT: 'UNPIVOT'>
UPDATE = <TokenType.UPDATE: 'UPDATE'>
USE = <TokenType.USE: 'USE'>
USING = <TokenType.USING: 'USING'>
VALUES = <TokenType.VALUES: 'VALUES'>
VIEW = <TokenType.VIEW: 'VIEW'>
VOLATILE = <TokenType.VOLATILE: 'VOLATILE'>
WHEN = <TokenType.WHEN: 'WHEN'>
WHERE = <TokenType.WHERE: 'WHERE'>
WINDOW = <TokenType.WINDOW: 'WINDOW'>
WITH = <TokenType.WITH: 'WITH'>
WITH_TIME_ZONE = <TokenType.WITH_TIME_ZONE: 'WITH_TIME_ZONE'>
WITH_LOCAL_TIME_ZONE = <TokenType.WITH_LOCAL_TIME_ZONE: 'WITH_LOCAL_TIME_ZONE'>
WITHIN_GROUP = <TokenType.WITHIN_GROUP: 'WITHIN_GROUP'>
WITHOUT_TIME_ZONE = <TokenType.WITHOUT_TIME_ZONE: 'WITHOUT_TIME_ZONE'>
UNIQUE = <TokenType.UNIQUE: 'UNIQUE'>
Inherited Members
- enum.Enum
- name
- value
class
Token:
304class Token: 305 __slots__ = ("token_type", "text", "line", "col", "comments") 306 307 @classmethod 308 def number(cls, number: int) -> Token: 309 """Returns a NUMBER token with `number` as its text.""" 310 return cls(TokenType.NUMBER, str(number)) 311 312 @classmethod 313 def string(cls, string: str) -> Token: 314 """Returns a STRING token with `string` as its text.""" 315 return cls(TokenType.STRING, string) 316 317 @classmethod 318 def identifier(cls, identifier: str) -> Token: 319 """Returns an IDENTIFIER token with `identifier` as its text.""" 320 return cls(TokenType.IDENTIFIER, identifier) 321 322 @classmethod 323 def var(cls, var: str) -> Token: 324 """Returns an VAR token with `var` as its text.""" 325 return cls(TokenType.VAR, var) 326 327 def __init__( 328 self, 329 token_type: TokenType, 330 text: str, 331 line: int = 1, 332 col: int = 1, 333 comments: t.List[str] = [], 334 ) -> None: 335 self.token_type = token_type 336 self.text = text 337 self.line = line 338 self.col = max(col - len(text), 1) 339 self.comments = comments 340 341 def __repr__(self) -> str: 342 attributes = ", ".join(f"{k}: {getattr(self, k)}" for k in self.__slots__) 343 return f"<Token {attributes}>"
Token( token_type: sqlglot.tokens.TokenType, text: str, line: int = 1, col: int = 1, comments: List[str] = [])
307 @classmethod 308 def number(cls, number: int) -> Token: 309 """Returns a NUMBER token with `number` as its text.""" 310 return cls(TokenType.NUMBER, str(number))
Returns a NUMBER token with number
as its text.
312 @classmethod 313 def string(cls, string: str) -> Token: 314 """Returns a STRING token with `string` as its text.""" 315 return cls(TokenType.STRING, string)
Returns a STRING token with string
as its text.
317 @classmethod 318 def identifier(cls, identifier: str) -> Token: 319 """Returns an IDENTIFIER token with `identifier` as its text.""" 320 return cls(TokenType.IDENTIFIER, identifier)
Returns an IDENTIFIER token with identifier
as its text.
class
Tokenizer:
386class Tokenizer(metaclass=_Tokenizer): 387 SINGLE_TOKENS = { 388 "(": TokenType.L_PAREN, 389 ")": TokenType.R_PAREN, 390 "[": TokenType.L_BRACKET, 391 "]": TokenType.R_BRACKET, 392 "{": TokenType.L_BRACE, 393 "}": TokenType.R_BRACE, 394 "&": TokenType.AMP, 395 "^": TokenType.CARET, 396 ":": TokenType.COLON, 397 ",": TokenType.COMMA, 398 ".": TokenType.DOT, 399 "-": TokenType.DASH, 400 "=": TokenType.EQ, 401 ">": TokenType.GT, 402 "<": TokenType.LT, 403 "%": TokenType.MOD, 404 "!": TokenType.NOT, 405 "|": TokenType.PIPE, 406 "+": TokenType.PLUS, 407 ";": TokenType.SEMICOLON, 408 "/": TokenType.SLASH, 409 "\\": TokenType.BACKSLASH, 410 "*": TokenType.STAR, 411 "~": TokenType.TILDA, 412 "?": TokenType.PLACEHOLDER, 413 "@": TokenType.PARAMETER, 414 # used for breaking a var like x'y' but nothing else 415 # the token type doesn't matter 416 "'": TokenType.QUOTE, 417 "`": TokenType.IDENTIFIER, 418 '"': TokenType.IDENTIFIER, 419 "#": TokenType.HASH, 420 } 421 422 QUOTES: t.List[t.Tuple[str, str] | str] = ["'"] 423 424 BIT_STRINGS: t.List[str | t.Tuple[str, str]] = [] 425 426 HEX_STRINGS: t.List[str | t.Tuple[str, str]] = [] 427 428 BYTE_STRINGS: t.List[str | t.Tuple[str, str]] = [] 429 430 IDENTIFIERS: t.List[str | t.Tuple[str, str]] = ['"'] 431 432 STRING_ESCAPES = ["'"] 433 434 _STRING_ESCAPES: t.Set[str] = set() 435 436 IDENTIFIER_ESCAPES = ['"'] 437 438 _IDENTIFIER_ESCAPES: t.Set[str] = set() 439 440 KEYWORDS = { 441 **{ 442 f"{key}{postfix}": TokenType.BLOCK_START 443 for key in ("{%", "{#") 444 for postfix in ("", "+", "-") 445 }, 446 **{ 447 f"{prefix}{key}": TokenType.BLOCK_END 448 for key in ("%}", "#}") 449 for prefix in ("", "+", "-") 450 }, 451 "{{+": TokenType.BLOCK_START, 452 "{{-": TokenType.BLOCK_START, 453 "+}}": TokenType.BLOCK_END, 454 "-}}": TokenType.BLOCK_END, 455 "/*+": TokenType.HINT, 456 "==": TokenType.EQ, 457 "::": TokenType.DCOLON, 458 "||": TokenType.DPIPE, 459 ">=": TokenType.GTE, 460 "<=": TokenType.LTE, 461 "<>": TokenType.NEQ, 462 "!=": TokenType.NEQ, 463 "<=>": TokenType.NULLSAFE_EQ, 464 "->": TokenType.ARROW, 465 "->>": TokenType.DARROW, 466 "=>": TokenType.FARROW, 467 "#>": TokenType.HASH_ARROW, 468 "#>>": TokenType.DHASH_ARROW, 469 "<->": TokenType.LR_ARROW, 470 "ALL": TokenType.ALL, 471 "ALWAYS": TokenType.ALWAYS, 472 "AND": TokenType.AND, 473 "ANTI": TokenType.ANTI, 474 "ANY": TokenType.ANY, 475 "ASC": TokenType.ASC, 476 "AS": TokenType.ALIAS, 477 "AT TIME ZONE": TokenType.AT_TIME_ZONE, 478 "AUTOINCREMENT": TokenType.AUTO_INCREMENT, 479 "AUTO_INCREMENT": TokenType.AUTO_INCREMENT, 480 "BEGIN": TokenType.BEGIN, 481 "BETWEEN": TokenType.BETWEEN, 482 "BOTH": TokenType.BOTH, 483 "BUCKET": TokenType.BUCKET, 484 "BY DEFAULT": TokenType.BY_DEFAULT, 485 "CACHE": TokenType.CACHE, 486 "UNCACHE": TokenType.UNCACHE, 487 "CASE": TokenType.CASE, 488 "CASCADE": TokenType.CASCADE, 489 "CHARACTER SET": TokenType.CHARACTER_SET, 490 "CLUSTER BY": TokenType.CLUSTER_BY, 491 "COLLATE": TokenType.COLLATE, 492 "COLUMN": TokenType.COLUMN, 493 "COMMIT": TokenType.COMMIT, 494 "COMPOUND": TokenType.COMPOUND, 495 "CONSTRAINT": TokenType.CONSTRAINT, 496 "CREATE": TokenType.CREATE, 497 "CROSS": TokenType.CROSS, 498 "CUBE": TokenType.CUBE, 499 "CURRENT_DATE": TokenType.CURRENT_DATE, 500 "CURRENT ROW": TokenType.CURRENT_ROW, 501 "CURRENT_TIMESTAMP": TokenType.CURRENT_TIMESTAMP, 502 "DEFAULT": TokenType.DEFAULT, 503 "DELETE": TokenType.DELETE, 504 "DESC": TokenType.DESC, 505 "DESCRIBE": TokenType.DESCRIBE, 506 "DISTINCT": TokenType.DISTINCT, 507 "DISTINCT FROM": TokenType.DISTINCT_FROM, 508 "DISTRIBUTE BY": TokenType.DISTRIBUTE_BY, 509 "DIV": TokenType.DIV, 510 "DROP": TokenType.DROP, 511 "ELSE": TokenType.ELSE, 512 "END": TokenType.END, 513 "ESCAPE": TokenType.ESCAPE, 514 "EXCEPT": TokenType.EXCEPT, 515 "EXECUTE": TokenType.EXECUTE, 516 "EXISTS": TokenType.EXISTS, 517 "FALSE": TokenType.FALSE, 518 "FETCH": TokenType.FETCH, 519 "FILTER": TokenType.FILTER, 520 "FIRST": TokenType.FIRST, 521 "FULL": TokenType.FULL, 522 "FUNCTION": TokenType.FUNCTION, 523 "FOLLOWING": TokenType.FOLLOWING, 524 "FOR": TokenType.FOR, 525 "FOREIGN KEY": TokenType.FOREIGN_KEY, 526 "FORMAT": TokenType.FORMAT, 527 "FROM": TokenType.FROM, 528 "GLOB": TokenType.GLOB, 529 "GROUP BY": TokenType.GROUP_BY, 530 "GROUPING SETS": TokenType.GROUPING_SETS, 531 "HAVING": TokenType.HAVING, 532 "IF": TokenType.IF, 533 "ILIKE": TokenType.ILIKE, 534 "IGNORE NULLS": TokenType.IGNORE_NULLS, 535 "IN": TokenType.IN, 536 "INDEX": TokenType.INDEX, 537 "INNER": TokenType.INNER, 538 "INSERT": TokenType.INSERT, 539 "INTERVAL": TokenType.INTERVAL, 540 "INTERSECT": TokenType.INTERSECT, 541 "INTO": TokenType.INTO, 542 "IS": TokenType.IS, 543 "ISNULL": TokenType.ISNULL, 544 "JOIN": TokenType.JOIN, 545 "LATERAL": TokenType.LATERAL, 546 "LAZY": TokenType.LAZY, 547 "LEADING": TokenType.LEADING, 548 "LEFT": TokenType.LEFT, 549 "LIKE": TokenType.LIKE, 550 "LIMIT": TokenType.LIMIT, 551 "LOAD DATA": TokenType.LOAD_DATA, 552 "LOCAL": TokenType.LOCAL, 553 "MATERIALIZED": TokenType.MATERIALIZED, 554 "MERGE": TokenType.MERGE, 555 "NATURAL": TokenType.NATURAL, 556 "NEXT": TokenType.NEXT, 557 "NO ACTION": TokenType.NO_ACTION, 558 "NOT": TokenType.NOT, 559 "NOTNULL": TokenType.NOTNULL, 560 "NULL": TokenType.NULL, 561 "NULLS FIRST": TokenType.NULLS_FIRST, 562 "NULLS LAST": TokenType.NULLS_LAST, 563 "OBJECT": TokenType.OBJECT, 564 "OFFSET": TokenType.OFFSET, 565 "ON": TokenType.ON, 566 "ONLY": TokenType.ONLY, 567 "OPTIONS": TokenType.OPTIONS, 568 "OR": TokenType.OR, 569 "ORDER BY": TokenType.ORDER_BY, 570 "ORDINALITY": TokenType.ORDINALITY, 571 "OUTER": TokenType.OUTER, 572 "OUT OF": TokenType.OUT_OF, 573 "OVER": TokenType.OVER, 574 "OVERWRITE": TokenType.OVERWRITE, 575 "PARTITION": TokenType.PARTITION, 576 "PARTITION BY": TokenType.PARTITION_BY, 577 "PARTITIONED BY": TokenType.PARTITION_BY, 578 "PARTITIONED_BY": TokenType.PARTITION_BY, 579 "PERCENT": TokenType.PERCENT, 580 "PIVOT": TokenType.PIVOT, 581 "PRECEDING": TokenType.PRECEDING, 582 "PRIMARY KEY": TokenType.PRIMARY_KEY, 583 "PROCEDURE": TokenType.PROCEDURE, 584 "QUALIFY": TokenType.QUALIFY, 585 "RANGE": TokenType.RANGE, 586 "RECURSIVE": TokenType.RECURSIVE, 587 "REGEXP": TokenType.RLIKE, 588 "REPLACE": TokenType.REPLACE, 589 "RESPECT NULLS": TokenType.RESPECT_NULLS, 590 "REFERENCES": TokenType.REFERENCES, 591 "RIGHT": TokenType.RIGHT, 592 "RLIKE": TokenType.RLIKE, 593 "ROLLBACK": TokenType.ROLLBACK, 594 "ROLLUP": TokenType.ROLLUP, 595 "ROW": TokenType.ROW, 596 "ROWS": TokenType.ROWS, 597 "SCHEMA": TokenType.SCHEMA, 598 "SEED": TokenType.SEED, 599 "SELECT": TokenType.SELECT, 600 "SEMI": TokenType.SEMI, 601 "SET": TokenType.SET, 602 "SHOW": TokenType.SHOW, 603 "SIMILAR TO": TokenType.SIMILAR_TO, 604 "SOME": TokenType.SOME, 605 "SORTKEY": TokenType.SORTKEY, 606 "SORT BY": TokenType.SORT_BY, 607 "TABLE": TokenType.TABLE, 608 "TABLESAMPLE": TokenType.TABLE_SAMPLE, 609 "TEMP": TokenType.TEMPORARY, 610 "TEMPORARY": TokenType.TEMPORARY, 611 "THEN": TokenType.THEN, 612 "TRUE": TokenType.TRUE, 613 "TRAILING": TokenType.TRAILING, 614 "UNBOUNDED": TokenType.UNBOUNDED, 615 "UNION": TokenType.UNION, 616 "UNLOGGED": TokenType.UNLOGGED, 617 "UNNEST": TokenType.UNNEST, 618 "UNPIVOT": TokenType.UNPIVOT, 619 "UPDATE": TokenType.UPDATE, 620 "USE": TokenType.USE, 621 "USING": TokenType.USING, 622 "VALUES": TokenType.VALUES, 623 "VIEW": TokenType.VIEW, 624 "VOLATILE": TokenType.VOLATILE, 625 "WHEN": TokenType.WHEN, 626 "WHERE": TokenType.WHERE, 627 "WINDOW": TokenType.WINDOW, 628 "WITH": TokenType.WITH, 629 "WITH TIME ZONE": TokenType.WITH_TIME_ZONE, 630 "WITH LOCAL TIME ZONE": TokenType.WITH_LOCAL_TIME_ZONE, 631 "WITHIN GROUP": TokenType.WITHIN_GROUP, 632 "WITHOUT TIME ZONE": TokenType.WITHOUT_TIME_ZONE, 633 "APPLY": TokenType.APPLY, 634 "ARRAY": TokenType.ARRAY, 635 "BOOL": TokenType.BOOLEAN, 636 "BOOLEAN": TokenType.BOOLEAN, 637 "BYTE": TokenType.TINYINT, 638 "TINYINT": TokenType.TINYINT, 639 "SHORT": TokenType.SMALLINT, 640 "SMALLINT": TokenType.SMALLINT, 641 "INT2": TokenType.SMALLINT, 642 "INTEGER": TokenType.INT, 643 "INT": TokenType.INT, 644 "INT4": TokenType.INT, 645 "LONG": TokenType.BIGINT, 646 "BIGINT": TokenType.BIGINT, 647 "INT8": TokenType.BIGINT, 648 "DECIMAL": TokenType.DECIMAL, 649 "MAP": TokenType.MAP, 650 "NULLABLE": TokenType.NULLABLE, 651 "NUMBER": TokenType.DECIMAL, 652 "NUMERIC": TokenType.DECIMAL, 653 "FIXED": TokenType.DECIMAL, 654 "REAL": TokenType.FLOAT, 655 "FLOAT": TokenType.FLOAT, 656 "FLOAT4": TokenType.FLOAT, 657 "FLOAT8": TokenType.DOUBLE, 658 "DOUBLE": TokenType.DOUBLE, 659 "DOUBLE PRECISION": TokenType.DOUBLE, 660 "JSON": TokenType.JSON, 661 "CHAR": TokenType.CHAR, 662 "NCHAR": TokenType.NCHAR, 663 "VARCHAR": TokenType.VARCHAR, 664 "VARCHAR2": TokenType.VARCHAR, 665 "NVARCHAR": TokenType.NVARCHAR, 666 "NVARCHAR2": TokenType.NVARCHAR, 667 "STR": TokenType.TEXT, 668 "STRING": TokenType.TEXT, 669 "TEXT": TokenType.TEXT, 670 "CLOB": TokenType.TEXT, 671 "LONGVARCHAR": TokenType.TEXT, 672 "BINARY": TokenType.BINARY, 673 "BLOB": TokenType.VARBINARY, 674 "BYTEA": TokenType.VARBINARY, 675 "VARBINARY": TokenType.VARBINARY, 676 "TIME": TokenType.TIME, 677 "TIMESTAMP": TokenType.TIMESTAMP, 678 "TIMESTAMPTZ": TokenType.TIMESTAMPTZ, 679 "TIMESTAMPLTZ": TokenType.TIMESTAMPLTZ, 680 "DATE": TokenType.DATE, 681 "DATETIME": TokenType.DATETIME, 682 "UNIQUE": TokenType.UNIQUE, 683 "STRUCT": TokenType.STRUCT, 684 "VARIANT": TokenType.VARIANT, 685 "ALTER": TokenType.ALTER, 686 "ALTER AGGREGATE": TokenType.COMMAND, 687 "ALTER DEFAULT": TokenType.COMMAND, 688 "ALTER DOMAIN": TokenType.COMMAND, 689 "ALTER ROLE": TokenType.COMMAND, 690 "ALTER RULE": TokenType.COMMAND, 691 "ALTER SEQUENCE": TokenType.COMMAND, 692 "ALTER TYPE": TokenType.COMMAND, 693 "ALTER USER": TokenType.COMMAND, 694 "ALTER VIEW": TokenType.COMMAND, 695 "ANALYZE": TokenType.COMMAND, 696 "CALL": TokenType.COMMAND, 697 "COPY": TokenType.COMMAND, 698 "EXPLAIN": TokenType.COMMAND, 699 "OPTIMIZE": TokenType.COMMAND, 700 "PREPARE": TokenType.COMMAND, 701 "TRUNCATE": TokenType.COMMAND, 702 "VACUUM": TokenType.COMMAND, 703 } 704 705 WHITE_SPACE = { 706 " ": TokenType.SPACE, 707 "\t": TokenType.SPACE, 708 "\n": TokenType.BREAK, 709 "\r": TokenType.BREAK, 710 "\r\n": TokenType.BREAK, 711 } 712 713 COMMANDS = { 714 TokenType.COMMAND, 715 TokenType.EXECUTE, 716 TokenType.FETCH, 717 TokenType.SET, 718 TokenType.SHOW, 719 } 720 721 COMMAND_PREFIX_TOKENS = {TokenType.SEMICOLON, TokenType.BEGIN} 722 723 # handle numeric literals like in hive (3L = BIGINT) 724 NUMERIC_LITERALS: t.Dict[str, str] = {} 725 ENCODE: t.Optional[str] = None 726 727 COMMENTS = ["--", ("/*", "*/")] 728 KEYWORD_TRIE = None # autofilled 729 730 IDENTIFIER_CAN_START_WITH_DIGIT = False 731 732 __slots__ = ( 733 "sql", 734 "size", 735 "tokens", 736 "_start", 737 "_current", 738 "_line", 739 "_col", 740 "_comments", 741 "_char", 742 "_end", 743 "_peek", 744 "_prev_token_line", 745 "_prev_token_comments", 746 "_prev_token_type", 747 ) 748 749 def __init__(self) -> None: 750 self.reset() 751 752 def reset(self) -> None: 753 self.sql = "" 754 self.size = 0 755 self.tokens: t.List[Token] = [] 756 self._start = 0 757 self._current = 0 758 self._line = 1 759 self._col = 1 760 self._comments: t.List[str] = [] 761 762 self._char = None 763 self._end = None 764 self._peek = None 765 self._prev_token_line = -1 766 self._prev_token_comments: t.List[str] = [] 767 self._prev_token_type = None 768 769 def tokenize(self, sql: str) -> t.List[Token]: 770 """Returns a list of tokens corresponding to the SQL string `sql`.""" 771 self.reset() 772 self.sql = sql 773 self.size = len(sql) 774 self._scan() 775 return self.tokens 776 777 def _scan(self, until: t.Optional[t.Callable] = None) -> None: 778 while self.size and not self._end: 779 self._start = self._current 780 self._advance() 781 782 if not self._char: 783 break 784 785 white_space = self.WHITE_SPACE.get(self._char) # type: ignore 786 identifier_end = self._IDENTIFIERS.get(self._char) # type: ignore 787 788 if white_space: 789 if white_space == TokenType.BREAK: 790 self._col = 1 791 self._line += 1 792 elif self._char.isdigit(): # type:ignore 793 self._scan_number() 794 elif identifier_end: 795 self._scan_identifier(identifier_end) 796 else: 797 self._scan_keywords() 798 799 if until and until(): 800 break 801 802 def _chars(self, size: int) -> str: 803 if size == 1: 804 return self._char # type: ignore 805 start = self._current - 1 806 end = start + size 807 if end <= self.size: 808 return self.sql[start:end] 809 return "" 810 811 def _advance(self, i: int = 1) -> None: 812 self._col += i 813 self._current += i 814 self._end = self._current >= self.size # type: ignore 815 self._char = self.sql[self._current - 1] # type: ignore 816 self._peek = self.sql[self._current] if self._current < self.size else "" # type: ignore 817 818 @property 819 def _text(self) -> str: 820 return self.sql[self._start : self._current] 821 822 def _add(self, token_type: TokenType, text: t.Optional[str] = None) -> None: 823 self._prev_token_line = self._line 824 self._prev_token_comments = self._comments 825 self._prev_token_type = token_type # type: ignore 826 self.tokens.append( 827 Token( 828 token_type, 829 self._text if text is None else text, 830 self._line, 831 self._col, 832 self._comments, 833 ) 834 ) 835 self._comments = [] 836 837 # If we have either a semicolon or a begin token before the command's token, we'll parse 838 # whatever follows the command's token as a string 839 if token_type in self.COMMANDS and ( 840 len(self.tokens) == 1 or self.tokens[-2].token_type in self.COMMAND_PREFIX_TOKENS 841 ): 842 start = self._current 843 tokens = len(self.tokens) 844 self._scan(lambda: self._peek == ";") 845 self.tokens = self.tokens[:tokens] 846 text = self.sql[start : self._current].strip() 847 if text: 848 self._add(TokenType.STRING, text) 849 850 def _scan_keywords(self) -> None: 851 size = 0 852 word = None 853 chars: t.Optional[str] = self._text 854 char = chars 855 prev_space = False 856 skip = False 857 trie = self.KEYWORD_TRIE 858 859 while chars: 860 if skip: 861 result = 1 862 else: 863 result, trie = in_trie(trie, char.upper()) # type: ignore 864 865 if result == 0: 866 break 867 if result == 2: 868 word = chars 869 size += 1 870 end = self._current - 1 + size 871 872 if end < self.size: 873 char = self.sql[end] 874 is_space = char in self.WHITE_SPACE 875 876 if not is_space or not prev_space: 877 if is_space: 878 char = " " 879 chars += char 880 prev_space = is_space 881 skip = False 882 else: 883 skip = True 884 else: 885 chars = None 886 887 if not word: 888 if self._char in self.SINGLE_TOKENS: 889 self._add(self.SINGLE_TOKENS[self._char]) # type: ignore 890 return 891 self._scan_var() 892 return 893 894 if self._scan_string(word): 895 return 896 if self._scan_formatted_string(word): 897 return 898 if self._scan_comment(word): 899 return 900 901 self._advance(size - 1) 902 self._add(self.KEYWORDS[word.upper()]) 903 904 def _scan_comment(self, comment_start: str) -> bool: 905 if comment_start not in self._COMMENTS: # type: ignore 906 return False 907 908 comment_start_line = self._line 909 comment_start_size = len(comment_start) 910 comment_end = self._COMMENTS[comment_start] # type: ignore 911 912 if comment_end: 913 comment_end_size = len(comment_end) 914 915 while not self._end and self._chars(comment_end_size) != comment_end: 916 self._advance() 917 918 self._comments.append(self._text[comment_start_size : -comment_end_size + 1]) # type: ignore 919 self._advance(comment_end_size - 1) 920 else: 921 while not self._end and self.WHITE_SPACE.get(self._peek) != TokenType.BREAK: # type: ignore 922 self._advance() 923 self._comments.append(self._text[comment_start_size:]) # type: ignore 924 925 # Leading comment is attached to the succeeding token, whilst trailing comment to the preceding. 926 # Multiple consecutive comments are preserved by appending them to the current comments list. 927 if comment_start_line == self._prev_token_line: 928 self.tokens[-1].comments.extend(self._comments) 929 self._comments = [] 930 931 return True 932 933 def _scan_number(self) -> None: 934 if self._char == "0": 935 peek = self._peek.upper() # type: ignore 936 if peek == "B": 937 return self._scan_bits() 938 elif peek == "X": 939 return self._scan_hex() 940 941 decimal = False 942 scientific = 0 943 944 while True: 945 if self._peek.isdigit(): # type: ignore 946 self._advance() 947 elif self._peek == "." and not decimal: 948 decimal = True 949 self._advance() 950 elif self._peek in ("-", "+") and scientific == 1: 951 scientific += 1 952 self._advance() 953 elif self._peek.upper() == "E" and not scientific: # type: ignore 954 scientific += 1 955 self._advance() 956 elif self._peek.isidentifier(): # type: ignore 957 number_text = self._text 958 literal = [] 959 960 while self._peek.strip() and self._peek not in self.SINGLE_TOKENS: # type: ignore 961 literal.append(self._peek.upper()) # type: ignore 962 self._advance() 963 964 literal = "".join(literal) # type: ignore 965 token_type = self.KEYWORDS.get(self.NUMERIC_LITERALS.get(literal)) # type: ignore 966 967 if token_type: 968 self._add(TokenType.NUMBER, number_text) 969 self._add(TokenType.DCOLON, "::") 970 return self._add(token_type, literal) # type: ignore 971 elif self.IDENTIFIER_CAN_START_WITH_DIGIT: 972 return self._add(TokenType.VAR) 973 974 self._add(TokenType.NUMBER, number_text) 975 return self._advance(-len(literal)) 976 else: 977 return self._add(TokenType.NUMBER) 978 979 def _scan_bits(self) -> None: 980 self._advance() 981 value = self._extract_value() 982 try: 983 self._add(TokenType.BIT_STRING, f"{int(value, 2)}") 984 except ValueError: 985 self._add(TokenType.IDENTIFIER) 986 987 def _scan_hex(self) -> None: 988 self._advance() 989 value = self._extract_value() 990 try: 991 self._add(TokenType.HEX_STRING, f"{int(value, 16)}") 992 except ValueError: 993 self._add(TokenType.IDENTIFIER) 994 995 def _extract_value(self) -> str: 996 while True: 997 char = self._peek.strip() # type: ignore 998 if char and char not in self.SINGLE_TOKENS: 999 self._advance() 1000 else: 1001 break 1002 1003 return self._text 1004 1005 def _scan_string(self, quote: str) -> bool: 1006 quote_end = self._QUOTES.get(quote) # type: ignore 1007 if quote_end is None: 1008 return False 1009 1010 self._advance(len(quote)) 1011 text = self._extract_string(quote_end) 1012 text = text.encode(self.ENCODE).decode(self.ENCODE) if self.ENCODE else text # type: ignore 1013 self._add(TokenType.NATIONAL if quote[0].upper() == "N" else TokenType.STRING, text) 1014 return True 1015 1016 # X'1234, b'0110', E'\\\\\' etc. 1017 def _scan_formatted_string(self, string_start: str) -> bool: 1018 if string_start in self._HEX_STRINGS: # type: ignore 1019 delimiters = self._HEX_STRINGS # type: ignore 1020 token_type = TokenType.HEX_STRING 1021 base = 16 1022 elif string_start in self._BIT_STRINGS: # type: ignore 1023 delimiters = self._BIT_STRINGS # type: ignore 1024 token_type = TokenType.BIT_STRING 1025 base = 2 1026 elif string_start in self._BYTE_STRINGS: # type: ignore 1027 delimiters = self._BYTE_STRINGS # type: ignore 1028 token_type = TokenType.BYTE_STRING 1029 base = None 1030 else: 1031 return False 1032 1033 self._advance(len(string_start)) 1034 string_end = delimiters.get(string_start) 1035 text = self._extract_string(string_end) 1036 1037 if base is None: 1038 self._add(token_type, text) 1039 else: 1040 try: 1041 self._add(token_type, f"{int(text, base)}") 1042 except: 1043 raise RuntimeError( 1044 f"Numeric string contains invalid characters from {self._line}:{self._start}" 1045 ) 1046 1047 return True 1048 1049 def _scan_identifier(self, identifier_end: str) -> None: 1050 text = "" 1051 identifier_end_is_escape = identifier_end in self._IDENTIFIER_ESCAPES 1052 1053 while True: 1054 if self._end: 1055 raise RuntimeError(f"Missing {identifier_end} from {self._line}:{self._start}") 1056 1057 self._advance() 1058 if self._char == identifier_end: 1059 if identifier_end_is_escape and self._peek == identifier_end: 1060 text += identifier_end # type: ignore 1061 self._advance() 1062 continue 1063 1064 break 1065 1066 text += self._char # type: ignore 1067 1068 self._add(TokenType.IDENTIFIER, text) 1069 1070 def _scan_var(self) -> None: 1071 while True: 1072 char = self._peek.strip() # type: ignore 1073 if char and char not in self.SINGLE_TOKENS: 1074 self._advance() 1075 else: 1076 break 1077 self._add( 1078 TokenType.VAR 1079 if self._prev_token_type == TokenType.PARAMETER 1080 else self.KEYWORDS.get(self._text.upper(), TokenType.VAR) 1081 ) 1082 1083 def _extract_string(self, delimiter: str) -> str: 1084 text = "" 1085 delim_size = len(delimiter) 1086 1087 while True: 1088 if self._char in self._STRING_ESCAPES and ( 1089 self._peek == delimiter or self._peek in self._STRING_ESCAPES 1090 ): 1091 if self._peek == delimiter: 1092 text += self._peek # type: ignore 1093 else: 1094 text += self._char + self._peek # type: ignore 1095 1096 if self._current + 1 < self.size: 1097 self._advance(2) 1098 else: 1099 raise RuntimeError(f"Missing {delimiter} from {self._line}:{self._current}") 1100 else: 1101 if self._chars(delim_size) == delimiter: 1102 if delim_size > 1: 1103 self._advance(delim_size - 1) 1104 break 1105 1106 if self._end: 1107 raise RuntimeError(f"Missing {delimiter} from {self._line}:{self._start}") 1108 text += self._char # type: ignore 1109 self._advance() 1110 1111 return text
def
reset(self) -> None:
752 def reset(self) -> None: 753 self.sql = "" 754 self.size = 0 755 self.tokens: t.List[Token] = [] 756 self._start = 0 757 self._current = 0 758 self._line = 1 759 self._col = 1 760 self._comments: t.List[str] = [] 761 762 self._char = None 763 self._end = None 764 self._peek = None 765 self._prev_token_line = -1 766 self._prev_token_comments: t.List[str] = [] 767 self._prev_token_type = None