From ccb96d1393ae2c16620ea8e8dc749d9642b94e9b Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Tue, 20 Feb 2024 10:38:01 +0100 Subject: Merging upstream version 21.1.2. Signed-off-by: Daniel Baumann --- docs/sqlglot/_version.html | 8 +- docs/sqlglot/dataframe/sql.html | 40 +- docs/sqlglot/dialects/bigquery.html | 3522 +++-- docs/sqlglot/dialects/clickhouse.html | 15 +- docs/sqlglot/dialects/databricks.html | 3 +- docs/sqlglot/dialects/doris.html | 3 +- docs/sqlglot/dialects/drill.html | 3 +- docs/sqlglot/dialects/duckdb.html | 5 +- docs/sqlglot/dialects/hive.html | 2338 +-- docs/sqlglot/dialects/mysql.html | 2542 ++-- docs/sqlglot/dialects/oracle.html | 1096 +- docs/sqlglot/dialects/postgres.html | 3 +- docs/sqlglot/dialects/presto.html | 1863 +-- docs/sqlglot/dialects/redshift.html | 594 +- docs/sqlglot/dialects/snowflake.html | 11 +- docs/sqlglot/dialects/spark.html | 1 + docs/sqlglot/dialects/spark2.html | 1 + docs/sqlglot/dialects/sqlite.html | 5 +- docs/sqlglot/dialects/starrocks.html | 3 +- docs/sqlglot/dialects/tableau.html | 1 + docs/sqlglot/dialects/teradata.html | 1274 +- docs/sqlglot/dialects/trino.html | 2 +- docs/sqlglot/dialects/tsql.html | 19 +- docs/sqlglot/expressions.html | 1000 +- docs/sqlglot/helper.html | 116 +- docs/sqlglot/lineage.html | 432 +- docs/sqlglot/optimizer/annotate_types.html | 8 +- docs/sqlglot/optimizer/merge_subqueries.html | 2 +- docs/sqlglot/optimizer/qualify_columns.html | 124 +- docs/sqlglot/optimizer/simplify.html | 4 +- docs/sqlglot/parser.html | 19153 +++++++++++++------------ docs/sqlglot/schema.html | 18 +- docs/sqlglot/time.html | 2 +- docs/sqlglot/tokens.html | 4 +- 34 files changed, 17219 insertions(+), 16996 deletions(-) (limited to 'docs/sqlglot') diff --git a/docs/sqlglot/_version.html b/docs/sqlglot/_version.html index a8f695d..87a1d4a 100644 --- a/docs/sqlglot/_version.html +++ b/docs/sqlglot/_version.html @@ -76,8 +76,8 @@ 12__version_tuple__: VERSION_TUPLE 13version_tuple: VERSION_TUPLE 14 -15__version__ = version = '21.1.0' -16__version_tuple__ = version_tuple = (21, 1, 0) +15__version__ = version = '21.1.1' +16__version_tuple__ = version_tuple = (21, 1, 1) @@ -97,7 +97,7 @@
version: str = -'21.1.0' +'21.1.1'
@@ -109,7 +109,7 @@
version_tuple: object = -(21, 1, 0) +(21, 1, 1)
diff --git a/docs/sqlglot/dataframe/sql.html b/docs/sqlglot/dataframe/sql.html index 1c6b1f4..e6cfcc5 100644 --- a/docs/sqlglot/dataframe/sql.html +++ b/docs/sqlglot/dataframe/sql.html @@ -784,7 +784,7 @@
def - createDataFrame( self, data: Sequence[Union[Dict[str, <MagicMock id='139981980193248'>], List[<MagicMock id='139981980193248'>], Tuple]], schema: Optional[<MagicMock id='139981978717392'>] = None, samplingRatio: Optional[float] = None, verifySchema: bool = False) -> DataFrame: + createDataFrame( self, data: Sequence[Union[Dict[str, <MagicMock id='139663034964704'>], List[<MagicMock id='139663034964704'>], Tuple]], schema: Optional[<MagicMock id='139663034977728'>] = None, samplingRatio: Optional[float] = None, verifySchema: bool = False) -> DataFrame: @@ -1840,7 +1840,7 @@
- DataFrame( spark: <MagicMock id='139981983176912'>, expression: sqlglot.expressions.Select, branch_id: Optional[str] = None, sequence_id: Optional[str] = None, last_op: sqlglot.dataframe.sql.operations.Operation = <Operation.INIT: -1>, pending_hints: Optional[List[sqlglot.expressions.Expression]] = None, output_expression_container: Optional[<MagicMock id='139981983444528'>] = None, **kwargs) + DataFrame( spark: <MagicMock id='139663038364112'>, expression: sqlglot.expressions.Select, branch_id: Optional[str] = None, sequence_id: Optional[str] = None, last_op: sqlglot.dataframe.sql.operations.Operation = <Operation.INIT: -1>, pending_hints: Optional[List[sqlglot.expressions.Expression]] = None, output_expression_container: Optional[<MagicMock id='139663039727648'>] = None, **kwargs) @@ -2088,7 +2088,7 @@
def - sql( self, dialect: <MagicMock id='139981979212416'> = None, optimize: bool = True, **kwargs) -> List[str]: + sql( self, dialect: <MagicMock id='139663035688096'> = None, optimize: bool = True, **kwargs) -> List[str]: @@ -2837,7 +2837,7 @@ is unlikely to come up.

@operation(Operation.FROM)
def - fillna( self, value: <MagicMock id='139981977951568'>, subset: Union[str, Tuple[str, ...], List[str], NoneType] = None) -> DataFrame: + fillna( self, value: <MagicMock id='139663035617568'>, subset: Union[str, Tuple[str, ...], List[str], NoneType] = None) -> DataFrame: @@ -2906,7 +2906,7 @@ and check if it matches the type of the value provided. If not then make it null
@operation(Operation.FROM)
def - replace( self, to_replace: Union[bool, int, float, str, List, Dict], value: Union[bool, int, float, str, List, NoneType] = None, subset: Union[Collection[<MagicMock id='139981977374288'>], <MagicMock id='139981977374288'>, NoneType] = None) -> DataFrame: + replace( self, to_replace: Union[bool, int, float, str, List, Dict], value: Union[bool, int, float, str, List, NoneType] = None, subset: Union[Collection[<MagicMock id='139663033686128'>], <MagicMock id='139663033686128'>, NoneType] = None) -> DataFrame: @@ -3111,7 +3111,7 @@ and check if it matches the type of the value provided. If not then make it null
@operation(Operation.NO_OP)
def - repartition( self, numPartitions: Union[int, <MagicMock id='139981977374288'>], *cols: <MagicMock id='139981977374288'>) -> DataFrame: + repartition( self, numPartitions: Union[int, <MagicMock id='139663033686128'>], *cols: <MagicMock id='139663033686128'>) -> DataFrame: @@ -3831,7 +3831,7 @@ and check if it matches the type of the value provided. If not then make it null
- Column( expression: Union[<MagicMock id='139981981343056'>, sqlglot.expressions.Expression, NoneType]) + Column( expression: Union[<MagicMock id='139663036908736'>, sqlglot.expressions.Expression, NoneType]) @@ -3875,7 +3875,7 @@ and check if it matches the type of the value provided. If not then make it null
@classmethod
def - ensure_col( cls, value: Union[<MagicMock id='139981981343056'>, sqlglot.expressions.Expression, NoneType]) -> Column: + ensure_col( cls, value: Union[<MagicMock id='139663036908736'>, sqlglot.expressions.Expression, NoneType]) -> Column: @@ -3896,7 +3896,7 @@ and check if it matches the type of the value provided. If not then make it null
@classmethod
def - ensure_cols( cls, args: List[Union[<MagicMock id='139981981343056'>, sqlglot.expressions.Expression]]) -> List[Column]: + ensure_cols( cls, args: List[Union[<MagicMock id='139663036908736'>, sqlglot.expressions.Expression]]) -> List[Column]: @@ -3917,7 +3917,7 @@ and check if it matches the type of the value provided. If not then make it null
@classmethod
def - invoke_anonymous_function( cls, column: Optional[<MagicMock id='139981981343056'>], func_name: str, *args: Optional[<MagicMock id='139981981343056'>]) -> Column: + invoke_anonymous_function( cls, column: Optional[<MagicMock id='139663036908736'>], func_name: str, *args: Optional[<MagicMock id='139663036908736'>]) -> Column: @@ -3944,7 +3944,7 @@ and check if it matches the type of the value provided. If not then make it null
@classmethod
def - invoke_expression_over_column( cls, column: Optional[<MagicMock id='139981981343056'>], callable_expression: Callable, **kwargs) -> Column: + invoke_expression_over_column( cls, column: Optional[<MagicMock id='139663036908736'>], callable_expression: Callable, **kwargs) -> Column: @@ -3983,7 +3983,7 @@ and check if it matches the type of the value provided. If not then make it null
def - binary_op( self, klass: Callable, other: <MagicMock id='139981981343056'>, **kwargs) -> Column: + binary_op( self, klass: Callable, other: <MagicMock id='139663036908736'>, **kwargs) -> Column: @@ -4004,7 +4004,7 @@ and check if it matches the type of the value provided. If not then make it null
def - inverse_binary_op( self, klass: Callable, other: <MagicMock id='139981981343056'>, **kwargs) -> Column: + inverse_binary_op( self, klass: Callable, other: <MagicMock id='139663036908736'>, **kwargs) -> Column: @@ -4598,7 +4598,7 @@ Sqlglot doesn't currently replicate this class so it only accepts a string

def - isin( self, *cols: Union[<MagicMock id='139981981343056'>, Iterable[<MagicMock id='139981981343056'>]]): + isin( self, *cols: Union[<MagicMock id='139663036908736'>, Iterable[<MagicMock id='139663036908736'>]]): @@ -4619,7 +4619,7 @@ Sqlglot doesn't currently replicate this class so it only accepts a string

def - between( self, lowerBound: <MagicMock id='139981981343056'>, upperBound: <MagicMock id='139981981343056'>) -> Column: + between( self, lowerBound: <MagicMock id='139663036908736'>, upperBound: <MagicMock id='139663036908736'>) -> Column: @@ -4654,7 +4654,7 @@ Sqlglot doesn't currently replicate this class so it only accepts a string

def - over( self, window: <MagicMock id='139981977905056'>) -> Column: + over( self, window: <MagicMock id='139663034172272'>) -> Column: @@ -4899,7 +4899,7 @@ Sqlglot doesn't currently replicate this class so it only accepts a string

@classmethod
def - partitionBy( cls, *cols: Union[<MagicMock id='139981978118480'>, List[<MagicMock id='139981978118480'>]]) -> WindowSpec: + partitionBy( cls, *cols: Union[<MagicMock id='139663034577728'>, List[<MagicMock id='139663034577728'>]]) -> WindowSpec: @@ -4920,7 +4920,7 @@ Sqlglot doesn't currently replicate this class so it only accepts a string

@classmethod
def - orderBy( cls, *cols: Union[<MagicMock id='139981978118480'>, List[<MagicMock id='139981978118480'>]]) -> WindowSpec: + orderBy( cls, *cols: Union[<MagicMock id='139663034577728'>, List[<MagicMock id='139663034577728'>]]) -> WindowSpec: @@ -5162,7 +5162,7 @@ Sqlglot doesn't currently replicate this class so it only accepts a string

def - partitionBy( self, *cols: Union[<MagicMock id='139981978118480'>, List[<MagicMock id='139981978118480'>]]) -> WindowSpec: + partitionBy( self, *cols: Union[<MagicMock id='139663034577728'>, List[<MagicMock id='139663034577728'>]]) -> WindowSpec: @@ -5189,7 +5189,7 @@ Sqlglot doesn't currently replicate this class so it only accepts a string

def - orderBy( self, *cols: Union[<MagicMock id='139981978118480'>, List[<MagicMock id='139981978118480'>]]) -> WindowSpec: + orderBy( self, *cols: Union[<MagicMock id='139663034577728'>, List[<MagicMock id='139663034577728'>]]) -> WindowSpec: diff --git a/docs/sqlglot/dialects/bigquery.html b/docs/sqlglot/dialects/bigquery.html index b575de9..03fd61c 100644 --- a/docs/sqlglot/dialects/bigquery.html +++ b/docs/sqlglot/dialects/bigquery.html @@ -123,9 +123,6 @@
  • NESTED_TYPE_TOKENS
  • -
  • - ID_VAR_TOKENS -
  • PROPERTY_PARSERS
  • @@ -695,570 +692,566 @@ 331 "TIMESTAMP": TokenType.TIMESTAMPTZ, 332 } 333 KEYWORDS.pop("DIV") -334 -335 class Parser(parser.Parser): -336 PREFIXED_PIVOT_COLUMNS = True -337 -338 LOG_DEFAULTS_TO_LN = True -339 -340 FUNCTIONS = { -341 **parser.Parser.FUNCTIONS, -342 "DATE": _parse_date, -343 "DATE_ADD": parse_date_delta_with_interval(exp.DateAdd), -344 "DATE_SUB": parse_date_delta_with_interval(exp.DateSub), -345 "DATE_TRUNC": lambda args: exp.DateTrunc( -346 unit=exp.Literal.string(str(seq_get(args, 1))), -347 this=seq_get(args, 0), -348 ), -349 "DATETIME_ADD": parse_date_delta_with_interval(exp.DatetimeAdd), -350 "DATETIME_SUB": parse_date_delta_with_interval(exp.DatetimeSub), -351 "DIV": binary_from_function(exp.IntDiv), -352 "FORMAT_DATE": lambda args: exp.TimeToStr( -353 this=exp.TsOrDsToDate(this=seq_get(args, 1)), format=seq_get(args, 0) -354 ), -355 "GENERATE_ARRAY": exp.GenerateSeries.from_arg_list, -356 "JSON_EXTRACT_SCALAR": lambda args: exp.JSONExtractScalar( -357 this=seq_get(args, 0), expression=seq_get(args, 1) or exp.Literal.string("$") -358 ), -359 "MD5": exp.MD5Digest.from_arg_list, -360 "TO_HEX": _parse_to_hex, -361 "PARSE_DATE": lambda args: format_time_lambda(exp.StrToDate, "bigquery")( -362 [seq_get(args, 1), seq_get(args, 0)] -363 ), -364 "PARSE_TIMESTAMP": _parse_parse_timestamp, -365 "REGEXP_CONTAINS": exp.RegexpLike.from_arg_list, -366 "REGEXP_EXTRACT": lambda args: exp.RegexpExtract( -367 this=seq_get(args, 0), -368 expression=seq_get(args, 1), -369 position=seq_get(args, 2), -370 occurrence=seq_get(args, 3), -371 group=exp.Literal.number(1) if re.compile(args[1].name).groups == 1 else None, -372 ), -373 "SHA256": lambda args: exp.SHA2(this=seq_get(args, 0), length=exp.Literal.number(256)), -374 "SHA512": lambda args: exp.SHA2(this=seq_get(args, 0), length=exp.Literal.number(512)), -375 "SPLIT": lambda args: exp.Split( -376 # https://cloud.google.com/bigquery/docs/reference/standard-sql/string_functions#split -377 this=seq_get(args, 0), -378 expression=seq_get(args, 1) or exp.Literal.string(","), -379 ), -380 "TIME": _parse_time, -381 "TIME_ADD": parse_date_delta_with_interval(exp.TimeAdd), -382 "TIME_SUB": parse_date_delta_with_interval(exp.TimeSub), -383 "TIMESTAMP": _parse_timestamp, -384 "TIMESTAMP_ADD": parse_date_delta_with_interval(exp.TimestampAdd), -385 "TIMESTAMP_SUB": parse_date_delta_with_interval(exp.TimestampSub), -386 "TIMESTAMP_MICROS": lambda args: exp.UnixToTime( -387 this=seq_get(args, 0), scale=exp.UnixToTime.MICROS -388 ), -389 "TIMESTAMP_MILLIS": lambda args: exp.UnixToTime( -390 this=seq_get(args, 0), scale=exp.UnixToTime.MILLIS -391 ), -392 "TIMESTAMP_SECONDS": lambda args: exp.UnixToTime(this=seq_get(args, 0)), -393 "TO_JSON_STRING": exp.JSONFormat.from_arg_list, -394 } -395 -396 FUNCTION_PARSERS = { -397 **parser.Parser.FUNCTION_PARSERS, -398 "ARRAY": lambda self: self.expression(exp.Array, expressions=[self._parse_statement()]), -399 } -400 FUNCTION_PARSERS.pop("TRIM") -401 -402 NO_PAREN_FUNCTIONS = { -403 **parser.Parser.NO_PAREN_FUNCTIONS, -404 TokenType.CURRENT_DATETIME: exp.CurrentDatetime, -405 } -406 -407 NESTED_TYPE_TOKENS = { -408 *parser.Parser.NESTED_TYPE_TOKENS, -409 TokenType.TABLE, -410 } -411 -412 ID_VAR_TOKENS = { -413 *parser.Parser.ID_VAR_TOKENS, -414 TokenType.VALUES, -415 } -416 -417 PROPERTY_PARSERS = { -418 **parser.Parser.PROPERTY_PARSERS, -419 "NOT DETERMINISTIC": lambda self: self.expression( -420 exp.StabilityProperty, this=exp.Literal.string("VOLATILE") -421 ), -422 "OPTIONS": lambda self: self._parse_with_property(), -423 } -424 -425 CONSTRAINT_PARSERS = { -426 **parser.Parser.CONSTRAINT_PARSERS, -427 "OPTIONS": lambda self: exp.Properties(expressions=self._parse_with_property()), -428 } -429 -430 RANGE_PARSERS = parser.Parser.RANGE_PARSERS.copy() -431 RANGE_PARSERS.pop(TokenType.OVERLAPS, None) -432 -433 NULL_TOKENS = {TokenType.NULL, TokenType.UNKNOWN} -434 -435 STATEMENT_PARSERS = { -436 **parser.Parser.STATEMENT_PARSERS, -437 TokenType.END: lambda self: self._parse_as_command(self._prev), -438 TokenType.FOR: lambda self: self._parse_for_in(), -439 } -440 -441 BRACKET_OFFSETS = { -442 "OFFSET": (0, False), -443 "ORDINAL": (1, False), -444 "SAFE_OFFSET": (0, True), -445 "SAFE_ORDINAL": (1, True), -446 } -447 -448 def _parse_for_in(self) -> exp.ForIn: -449 this = self._parse_range() -450 self._match_text_seq("DO") -451 return self.expression(exp.ForIn, this=this, expression=self._parse_statement()) -452 -453 def _parse_table_part(self, schema: bool = False) -> t.Optional[exp.Expression]: -454 this = super()._parse_table_part(schema=schema) or self._parse_number() -455 -456 # https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#table_names -457 if isinstance(this, exp.Identifier): -458 table_name = this.name -459 while self._match(TokenType.DASH, advance=False) and self._next: -460 self._advance(2) -461 table_name += f"-{self._prev.text}" +334 KEYWORDS.pop("VALUES") +335 +336 class Parser(parser.Parser): +337 PREFIXED_PIVOT_COLUMNS = True +338 +339 LOG_DEFAULTS_TO_LN = True +340 +341 FUNCTIONS = { +342 **parser.Parser.FUNCTIONS, +343 "DATE": _parse_date, +344 "DATE_ADD": parse_date_delta_with_interval(exp.DateAdd), +345 "DATE_SUB": parse_date_delta_with_interval(exp.DateSub), +346 "DATE_TRUNC": lambda args: exp.DateTrunc( +347 unit=exp.Literal.string(str(seq_get(args, 1))), +348 this=seq_get(args, 0), +349 ), +350 "DATETIME_ADD": parse_date_delta_with_interval(exp.DatetimeAdd), +351 "DATETIME_SUB": parse_date_delta_with_interval(exp.DatetimeSub), +352 "DIV": binary_from_function(exp.IntDiv), +353 "FORMAT_DATE": lambda args: exp.TimeToStr( +354 this=exp.TsOrDsToDate(this=seq_get(args, 1)), format=seq_get(args, 0) +355 ), +356 "GENERATE_ARRAY": exp.GenerateSeries.from_arg_list, +357 "JSON_EXTRACT_SCALAR": lambda args: exp.JSONExtractScalar( +358 this=seq_get(args, 0), expression=seq_get(args, 1) or exp.Literal.string("$") +359 ), +360 "MD5": exp.MD5Digest.from_arg_list, +361 "TO_HEX": _parse_to_hex, +362 "PARSE_DATE": lambda args: format_time_lambda(exp.StrToDate, "bigquery")( +363 [seq_get(args, 1), seq_get(args, 0)] +364 ), +365 "PARSE_TIMESTAMP": _parse_parse_timestamp, +366 "REGEXP_CONTAINS": exp.RegexpLike.from_arg_list, +367 "REGEXP_EXTRACT": lambda args: exp.RegexpExtract( +368 this=seq_get(args, 0), +369 expression=seq_get(args, 1), +370 position=seq_get(args, 2), +371 occurrence=seq_get(args, 3), +372 group=exp.Literal.number(1) if re.compile(args[1].name).groups == 1 else None, +373 ), +374 "SHA256": lambda args: exp.SHA2(this=seq_get(args, 0), length=exp.Literal.number(256)), +375 "SHA512": lambda args: exp.SHA2(this=seq_get(args, 0), length=exp.Literal.number(512)), +376 "SPLIT": lambda args: exp.Split( +377 # https://cloud.google.com/bigquery/docs/reference/standard-sql/string_functions#split +378 this=seq_get(args, 0), +379 expression=seq_get(args, 1) or exp.Literal.string(","), +380 ), +381 "TIME": _parse_time, +382 "TIME_ADD": parse_date_delta_with_interval(exp.TimeAdd), +383 "TIME_SUB": parse_date_delta_with_interval(exp.TimeSub), +384 "TIMESTAMP": _parse_timestamp, +385 "TIMESTAMP_ADD": parse_date_delta_with_interval(exp.TimestampAdd), +386 "TIMESTAMP_SUB": parse_date_delta_with_interval(exp.TimestampSub), +387 "TIMESTAMP_MICROS": lambda args: exp.UnixToTime( +388 this=seq_get(args, 0), scale=exp.UnixToTime.MICROS +389 ), +390 "TIMESTAMP_MILLIS": lambda args: exp.UnixToTime( +391 this=seq_get(args, 0), scale=exp.UnixToTime.MILLIS +392 ), +393 "TIMESTAMP_SECONDS": lambda args: exp.UnixToTime(this=seq_get(args, 0)), +394 "TO_JSON_STRING": exp.JSONFormat.from_arg_list, +395 } +396 +397 FUNCTION_PARSERS = { +398 **parser.Parser.FUNCTION_PARSERS, +399 "ARRAY": lambda self: self.expression(exp.Array, expressions=[self._parse_statement()]), +400 } +401 FUNCTION_PARSERS.pop("TRIM") +402 +403 NO_PAREN_FUNCTIONS = { +404 **parser.Parser.NO_PAREN_FUNCTIONS, +405 TokenType.CURRENT_DATETIME: exp.CurrentDatetime, +406 } +407 +408 NESTED_TYPE_TOKENS = { +409 *parser.Parser.NESTED_TYPE_TOKENS, +410 TokenType.TABLE, +411 } +412 +413 PROPERTY_PARSERS = { +414 **parser.Parser.PROPERTY_PARSERS, +415 "NOT DETERMINISTIC": lambda self: self.expression( +416 exp.StabilityProperty, this=exp.Literal.string("VOLATILE") +417 ), +418 "OPTIONS": lambda self: self._parse_with_property(), +419 } +420 +421 CONSTRAINT_PARSERS = { +422 **parser.Parser.CONSTRAINT_PARSERS, +423 "OPTIONS": lambda self: exp.Properties(expressions=self._parse_with_property()), +424 } +425 +426 RANGE_PARSERS = parser.Parser.RANGE_PARSERS.copy() +427 RANGE_PARSERS.pop(TokenType.OVERLAPS, None) +428 +429 NULL_TOKENS = {TokenType.NULL, TokenType.UNKNOWN} +430 +431 STATEMENT_PARSERS = { +432 **parser.Parser.STATEMENT_PARSERS, +433 TokenType.END: lambda self: self._parse_as_command(self._prev), +434 TokenType.FOR: lambda self: self._parse_for_in(), +435 } +436 +437 BRACKET_OFFSETS = { +438 "OFFSET": (0, False), +439 "ORDINAL": (1, False), +440 "SAFE_OFFSET": (0, True), +441 "SAFE_ORDINAL": (1, True), +442 } +443 +444 def _parse_for_in(self) -> exp.ForIn: +445 this = self._parse_range() +446 self._match_text_seq("DO") +447 return self.expression(exp.ForIn, this=this, expression=self._parse_statement()) +448 +449 def _parse_table_part(self, schema: bool = False) -> t.Optional[exp.Expression]: +450 this = super()._parse_table_part(schema=schema) or self._parse_number() +451 +452 # https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#table_names +453 if isinstance(this, exp.Identifier): +454 table_name = this.name +455 while self._match(TokenType.DASH, advance=False) and self._next: +456 self._advance(2) +457 table_name += f"-{self._prev.text}" +458 +459 this = exp.Identifier(this=table_name, quoted=this.args.get("quoted")) +460 elif isinstance(this, exp.Literal): +461 table_name = this.name 462 -463 this = exp.Identifier(this=table_name, quoted=this.args.get("quoted")) -464 elif isinstance(this, exp.Literal): -465 table_name = this.name -466 -467 if self._is_connected() and self._parse_var(any_token=True): -468 table_name += self._prev.text +463 if self._is_connected() and self._parse_var(any_token=True): +464 table_name += self._prev.text +465 +466 this = exp.Identifier(this=table_name, quoted=True) +467 +468 return this 469 -470 this = exp.Identifier(this=table_name, quoted=True) -471 -472 return this -473 -474 def _parse_table_parts( -475 self, schema: bool = False, is_db_reference: bool = False -476 ) -> exp.Table: -477 table = super()._parse_table_parts(schema=schema, is_db_reference=is_db_reference) -478 if isinstance(table.this, exp.Identifier) and "." in table.name: -479 catalog, db, this, *rest = ( -480 t.cast(t.Optional[exp.Expression], exp.to_identifier(x)) -481 for x in split_num_words(table.name, ".", 3) -482 ) -483 -484 if rest and this: -485 this = exp.Dot.build(t.cast(t.List[exp.Expression], [this, *rest])) +470 def _parse_table_parts( +471 self, schema: bool = False, is_db_reference: bool = False +472 ) -> exp.Table: +473 table = super()._parse_table_parts(schema=schema, is_db_reference=is_db_reference) +474 if isinstance(table.this, exp.Identifier) and "." in table.name: +475 catalog, db, this, *rest = ( +476 t.cast(t.Optional[exp.Expression], exp.to_identifier(x)) +477 for x in split_num_words(table.name, ".", 3) +478 ) +479 +480 if rest and this: +481 this = exp.Dot.build(t.cast(t.List[exp.Expression], [this, *rest])) +482 +483 table = exp.Table(this=this, db=db, catalog=catalog) +484 +485 return table 486 -487 table = exp.Table(this=this, db=db, catalog=catalog) -488 -489 return table +487 @t.overload +488 def _parse_json_object(self, agg: Lit[False]) -> exp.JSONObject: +489 ... 490 491 @t.overload -492 def _parse_json_object(self, agg: Lit[False]) -> exp.JSONObject: +492 def _parse_json_object(self, agg: Lit[True]) -> exp.JSONObjectAgg: 493 ... 494 -495 @t.overload -496 def _parse_json_object(self, agg: Lit[True]) -> exp.JSONObjectAgg: -497 ... +495 def _parse_json_object(self, agg=False): +496 json_object = super()._parse_json_object() +497 array_kv_pair = seq_get(json_object.expressions, 0) 498 -499 def _parse_json_object(self, agg=False): -500 json_object = super()._parse_json_object() -501 array_kv_pair = seq_get(json_object.expressions, 0) -502 -503 # Converts BQ's "signature 2" of JSON_OBJECT into SQLGlot's canonical representation -504 # https://cloud.google.com/bigquery/docs/reference/standard-sql/json_functions#json_object_signature2 -505 if ( -506 array_kv_pair -507 and isinstance(array_kv_pair.this, exp.Array) -508 and isinstance(array_kv_pair.expression, exp.Array) -509 ): -510 keys = array_kv_pair.this.expressions -511 values = array_kv_pair.expression.expressions -512 -513 json_object.set( -514 "expressions", -515 [exp.JSONKeyValue(this=k, expression=v) for k, v in zip(keys, values)], -516 ) -517 -518 return json_object -519 -520 def _parse_bracket(self, this: t.Optional[exp.Expression]) -> t.Optional[exp.Expression]: -521 bracket = super()._parse_bracket(this) -522 -523 if this is bracket: -524 return bracket +499 # Converts BQ's "signature 2" of JSON_OBJECT into SQLGlot's canonical representation +500 # https://cloud.google.com/bigquery/docs/reference/standard-sql/json_functions#json_object_signature2 +501 if ( +502 array_kv_pair +503 and isinstance(array_kv_pair.this, exp.Array) +504 and isinstance(array_kv_pair.expression, exp.Array) +505 ): +506 keys = array_kv_pair.this.expressions +507 values = array_kv_pair.expression.expressions +508 +509 json_object.set( +510 "expressions", +511 [exp.JSONKeyValue(this=k, expression=v) for k, v in zip(keys, values)], +512 ) +513 +514 return json_object +515 +516 def _parse_bracket(self, this: t.Optional[exp.Expression]) -> t.Optional[exp.Expression]: +517 bracket = super()._parse_bracket(this) +518 +519 if this is bracket: +520 return bracket +521 +522 if isinstance(bracket, exp.Bracket): +523 for expression in bracket.expressions: +524 name = expression.name.upper() 525 -526 if isinstance(bracket, exp.Bracket): -527 for expression in bracket.expressions: -528 name = expression.name.upper() -529 -530 if name not in self.BRACKET_OFFSETS: -531 break -532 -533 offset, safe = self.BRACKET_OFFSETS[name] -534 bracket.set("offset", offset) -535 bracket.set("safe", safe) -536 expression.replace(expression.expressions[0]) -537 -538 return bracket -539 -540 class Generator(generator.Generator): -541 EXPLICIT_UNION = True -542 INTERVAL_ALLOWS_PLURAL_FORM = False -543 JOIN_HINTS = False -544 QUERY_HINTS = False -545 TABLE_HINTS = False -546 LIMIT_FETCH = "LIMIT" -547 RENAME_TABLE_WITH_DB = False -548 NVL2_SUPPORTED = False -549 UNNEST_WITH_ORDINALITY = False -550 COLLATE_IS_FUNC = True -551 LIMIT_ONLY_LITERALS = True -552 SUPPORTS_TABLE_ALIAS_COLUMNS = False -553 UNPIVOT_ALIASES_ARE_IDENTIFIERS = False -554 JSON_KEY_VALUE_PAIR_SEP = "," -555 NULL_ORDERING_SUPPORTED = False -556 IGNORE_NULLS_IN_FUNC = True -557 JSON_PATH_SINGLE_QUOTE_ESCAPE = True -558 -559 TRANSFORMS = { -560 **generator.Generator.TRANSFORMS, -561 exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"), -562 exp.ArgMax: arg_max_or_min_no_count("MAX_BY"), -563 exp.ArgMin: arg_max_or_min_no_count("MIN_BY"), -564 exp.ArrayContains: _array_contains_sql, -565 exp.ArraySize: rename_func("ARRAY_LENGTH"), -566 exp.Cast: transforms.preprocess([transforms.remove_precision_parameterized_types]), -567 exp.CollateProperty: lambda self, e: ( -568 f"DEFAULT COLLATE {self.sql(e, 'this')}" -569 if e.args.get("default") -570 else f"COLLATE {self.sql(e, 'this')}" -571 ), -572 exp.CountIf: rename_func("COUNTIF"), -573 exp.Create: _create_sql, -574 exp.CTE: transforms.preprocess([_pushdown_cte_column_names]), -575 exp.DateAdd: date_add_interval_sql("DATE", "ADD"), -576 exp.DateDiff: lambda self, -577 e: f"DATE_DIFF({self.sql(e, 'this')}, {self.sql(e, 'expression')}, {self.sql(e.args.get('unit', 'DAY'))})", -578 exp.DateFromParts: rename_func("DATE"), -579 exp.DateStrToDate: datestrtodate_sql, -580 exp.DateSub: date_add_interval_sql("DATE", "SUB"), -581 exp.DatetimeAdd: date_add_interval_sql("DATETIME", "ADD"), -582 exp.DatetimeSub: date_add_interval_sql("DATETIME", "SUB"), -583 exp.DateTrunc: lambda self, e: self.func("DATE_TRUNC", e.this, e.text("unit")), -584 exp.FromTimeZone: lambda self, e: self.func( -585 "DATETIME", self.func("TIMESTAMP", e.this, e.args.get("zone")), "'UTC'" -586 ), -587 exp.GenerateSeries: rename_func("GENERATE_ARRAY"), -588 exp.GroupConcat: rename_func("STRING_AGG"), -589 exp.Hex: rename_func("TO_HEX"), -590 exp.If: if_sql(false_value="NULL"), -591 exp.ILike: no_ilike_sql, -592 exp.IntDiv: rename_func("DIV"), -593 exp.JSONFormat: rename_func("TO_JSON_STRING"), -594 exp.Max: max_or_greatest, -595 exp.MD5: lambda self, e: self.func("TO_HEX", self.func("MD5", e.this)), -596 exp.MD5Digest: rename_func("MD5"), -597 exp.Min: min_or_least, -598 exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}", -599 exp.RegexpExtract: lambda self, e: self.func( -600 "REGEXP_EXTRACT", -601 e.this, -602 e.expression, -603 e.args.get("position"), -604 e.args.get("occurrence"), -605 ), -606 exp.RegexpReplace: regexp_replace_sql, -607 exp.RegexpLike: rename_func("REGEXP_CONTAINS"), -608 exp.ReturnsProperty: _returnsproperty_sql, -609 exp.Select: transforms.preprocess( -610 [ -611 transforms.explode_to_unnest(), -612 _unqualify_unnest, -613 transforms.eliminate_distinct_on, -614 _alias_ordered_group, -615 transforms.eliminate_semi_and_anti_joins, -616 ] -617 ), -618 exp.SHA2: lambda self, e: self.func( -619 "SHA256" if e.text("length") == "256" else "SHA512", e.this -620 ), -621 exp.StabilityProperty: lambda self, e: ( -622 "DETERMINISTIC" if e.name == "IMMUTABLE" else "NOT DETERMINISTIC" -623 ), -624 exp.StrToDate: lambda self, -625 e: f"PARSE_DATE({self.format_time(e)}, {self.sql(e, 'this')})", -626 exp.StrToTime: lambda self, e: self.func( -627 "PARSE_TIMESTAMP", self.format_time(e), e.this, e.args.get("zone") -628 ), -629 exp.TimeAdd: date_add_interval_sql("TIME", "ADD"), -630 exp.TimeFromParts: rename_func("TIME"), -631 exp.TimeSub: date_add_interval_sql("TIME", "SUB"), -632 exp.TimestampAdd: date_add_interval_sql("TIMESTAMP", "ADD"), -633 exp.TimestampDiff: rename_func("TIMESTAMP_DIFF"), -634 exp.TimestampSub: date_add_interval_sql("TIMESTAMP", "SUB"), -635 exp.TimeStrToTime: timestrtotime_sql, -636 exp.Trim: lambda self, e: self.func("TRIM", e.this, e.expression), -637 exp.TsOrDsAdd: _ts_or_ds_add_sql, -638 exp.TsOrDsDiff: _ts_or_ds_diff_sql, -639 exp.TsOrDsToTime: rename_func("TIME"), -640 exp.Unhex: rename_func("FROM_HEX"), -641 exp.UnixDate: rename_func("UNIX_DATE"), -642 exp.UnixToTime: _unix_to_time_sql, -643 exp.Values: _derived_table_values_to_unnest, -644 exp.VariancePop: rename_func("VAR_POP"), -645 } -646 -647 SUPPORTED_JSON_PATH_PARTS = { -648 exp.JSONPathKey, -649 exp.JSONPathRoot, -650 exp.JSONPathSubscript, -651 } -652 -653 TYPE_MAPPING = { -654 **generator.Generator.TYPE_MAPPING, -655 exp.DataType.Type.BIGDECIMAL: "BIGNUMERIC", -656 exp.DataType.Type.BIGINT: "INT64", -657 exp.DataType.Type.BINARY: "BYTES", -658 exp.DataType.Type.BOOLEAN: "BOOL", -659 exp.DataType.Type.CHAR: "STRING", -660 exp.DataType.Type.DECIMAL: "NUMERIC", -661 exp.DataType.Type.DOUBLE: "FLOAT64", -662 exp.DataType.Type.FLOAT: "FLOAT64", -663 exp.DataType.Type.INT: "INT64", -664 exp.DataType.Type.NCHAR: "STRING", -665 exp.DataType.Type.NVARCHAR: "STRING", -666 exp.DataType.Type.SMALLINT: "INT64", -667 exp.DataType.Type.TEXT: "STRING", -668 exp.DataType.Type.TIMESTAMP: "DATETIME", -669 exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP", -670 exp.DataType.Type.TIMESTAMPLTZ: "TIMESTAMP", -671 exp.DataType.Type.TINYINT: "INT64", -672 exp.DataType.Type.VARBINARY: "BYTES", -673 exp.DataType.Type.VARCHAR: "STRING", -674 exp.DataType.Type.VARIANT: "ANY TYPE", -675 } -676 -677 PROPERTIES_LOCATION = { -678 **generator.Generator.PROPERTIES_LOCATION, -679 exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA, -680 exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED, -681 } -682 -683 # from: https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#reserved_keywords -684 RESERVED_KEYWORDS = { -685 *generator.Generator.RESERVED_KEYWORDS, -686 "all", -687 "and", -688 "any", -689 "array", -690 "as", -691 "asc", -692 "assert_rows_modified", -693 "at", -694 "between", -695 "by", -696 "case", -697 "cast", -698 "collate", -699 "contains", -700 "create", -701 "cross", -702 "cube", -703 "current", -704 "default", -705 "define", -706 "desc", -707 "distinct", -708 "else", -709 "end", -710 "enum", -711 "escape", -712 "except", -713 "exclude", -714 "exists", -715 "extract", -716 "false", -717 "fetch", -718 "following", -719 "for", -720 "from", -721 "full", -722 "group", -723 "grouping", -724 "groups", -725 "hash", -726 "having", -727 "if", -728 "ignore", -729 "in", -730 "inner", -731 "intersect", -732 "interval", -733 "into", -734 "is", -735 "join", -736 "lateral", -737 "left", -738 "like", -739 "limit", -740 "lookup", -741 "merge", -742 "natural", -743 "new", -744 "no", -745 "not", -746 "null", -747 "nulls", -748 "of", -749 "on", -750 "or", -751 "order", -752 "outer", -753 "over", -754 "partition", -755 "preceding", -756 "proto", -757 "qualify", -758 "range", -759 "recursive", -760 "respect", -761 "right", -762 "rollup", -763 "rows", -764 "select", -765 "set", -766 "some", -767 "struct", -768 "tablesample", -769 "then", -770 "to", -771 "treat", -772 "true", -773 "unbounded", -774 "union", -775 "unnest", -776 "using", -777 "when", -778 "where", -779 "window", -780 "with", -781 "within", -782 } -783 -784 def timetostr_sql(self, expression: exp.TimeToStr) -> str: -785 if isinstance(expression.this, exp.TsOrDsToDate): -786 this: exp.Expression = expression.this -787 else: -788 this = expression -789 -790 return f"FORMAT_DATE({self.format_time(expression)}, {self.sql(this, 'this')})" -791 -792 def struct_sql(self, expression: exp.Struct) -> str: -793 args = [] -794 for expr in expression.expressions: -795 if isinstance(expr, self.KEY_VALUE_DEFINITIONS): -796 arg = f"{self.sql(expr, 'expression')} AS {expr.this.name}" -797 else: -798 arg = self.sql(expr) +526 if name not in self.BRACKET_OFFSETS: +527 break +528 +529 offset, safe = self.BRACKET_OFFSETS[name] +530 bracket.set("offset", offset) +531 bracket.set("safe", safe) +532 expression.replace(expression.expressions[0]) +533 +534 return bracket +535 +536 class Generator(generator.Generator): +537 EXPLICIT_UNION = True +538 INTERVAL_ALLOWS_PLURAL_FORM = False +539 JOIN_HINTS = False +540 QUERY_HINTS = False +541 TABLE_HINTS = False +542 LIMIT_FETCH = "LIMIT" +543 RENAME_TABLE_WITH_DB = False +544 NVL2_SUPPORTED = False +545 UNNEST_WITH_ORDINALITY = False +546 COLLATE_IS_FUNC = True +547 LIMIT_ONLY_LITERALS = True +548 SUPPORTS_TABLE_ALIAS_COLUMNS = False +549 UNPIVOT_ALIASES_ARE_IDENTIFIERS = False +550 JSON_KEY_VALUE_PAIR_SEP = "," +551 NULL_ORDERING_SUPPORTED = False +552 IGNORE_NULLS_IN_FUNC = True +553 JSON_PATH_SINGLE_QUOTE_ESCAPE = True +554 +555 TRANSFORMS = { +556 **generator.Generator.TRANSFORMS, +557 exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"), +558 exp.ArgMax: arg_max_or_min_no_count("MAX_BY"), +559 exp.ArgMin: arg_max_or_min_no_count("MIN_BY"), +560 exp.ArrayContains: _array_contains_sql, +561 exp.ArraySize: rename_func("ARRAY_LENGTH"), +562 exp.Cast: transforms.preprocess([transforms.remove_precision_parameterized_types]), +563 exp.CollateProperty: lambda self, e: ( +564 f"DEFAULT COLLATE {self.sql(e, 'this')}" +565 if e.args.get("default") +566 else f"COLLATE {self.sql(e, 'this')}" +567 ), +568 exp.CountIf: rename_func("COUNTIF"), +569 exp.Create: _create_sql, +570 exp.CTE: transforms.preprocess([_pushdown_cte_column_names]), +571 exp.DateAdd: date_add_interval_sql("DATE", "ADD"), +572 exp.DateDiff: lambda self, +573 e: f"DATE_DIFF({self.sql(e, 'this')}, {self.sql(e, 'expression')}, {self.sql(e.args.get('unit', 'DAY'))})", +574 exp.DateFromParts: rename_func("DATE"), +575 exp.DateStrToDate: datestrtodate_sql, +576 exp.DateSub: date_add_interval_sql("DATE", "SUB"), +577 exp.DatetimeAdd: date_add_interval_sql("DATETIME", "ADD"), +578 exp.DatetimeSub: date_add_interval_sql("DATETIME", "SUB"), +579 exp.DateTrunc: lambda self, e: self.func("DATE_TRUNC", e.this, e.text("unit")), +580 exp.FromTimeZone: lambda self, e: self.func( +581 "DATETIME", self.func("TIMESTAMP", e.this, e.args.get("zone")), "'UTC'" +582 ), +583 exp.GenerateSeries: rename_func("GENERATE_ARRAY"), +584 exp.GroupConcat: rename_func("STRING_AGG"), +585 exp.Hex: rename_func("TO_HEX"), +586 exp.If: if_sql(false_value="NULL"), +587 exp.ILike: no_ilike_sql, +588 exp.IntDiv: rename_func("DIV"), +589 exp.JSONFormat: rename_func("TO_JSON_STRING"), +590 exp.Max: max_or_greatest, +591 exp.MD5: lambda self, e: self.func("TO_HEX", self.func("MD5", e.this)), +592 exp.MD5Digest: rename_func("MD5"), +593 exp.Min: min_or_least, +594 exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}", +595 exp.RegexpExtract: lambda self, e: self.func( +596 "REGEXP_EXTRACT", +597 e.this, +598 e.expression, +599 e.args.get("position"), +600 e.args.get("occurrence"), +601 ), +602 exp.RegexpReplace: regexp_replace_sql, +603 exp.RegexpLike: rename_func("REGEXP_CONTAINS"), +604 exp.ReturnsProperty: _returnsproperty_sql, +605 exp.Select: transforms.preprocess( +606 [ +607 transforms.explode_to_unnest(), +608 _unqualify_unnest, +609 transforms.eliminate_distinct_on, +610 _alias_ordered_group, +611 transforms.eliminate_semi_and_anti_joins, +612 ] +613 ), +614 exp.SHA2: lambda self, e: self.func( +615 "SHA256" if e.text("length") == "256" else "SHA512", e.this +616 ), +617 exp.StabilityProperty: lambda self, e: ( +618 "DETERMINISTIC" if e.name == "IMMUTABLE" else "NOT DETERMINISTIC" +619 ), +620 exp.StrToDate: lambda self, +621 e: f"PARSE_DATE({self.format_time(e)}, {self.sql(e, 'this')})", +622 exp.StrToTime: lambda self, e: self.func( +623 "PARSE_TIMESTAMP", self.format_time(e), e.this, e.args.get("zone") +624 ), +625 exp.TimeAdd: date_add_interval_sql("TIME", "ADD"), +626 exp.TimeFromParts: rename_func("TIME"), +627 exp.TimeSub: date_add_interval_sql("TIME", "SUB"), +628 exp.TimestampAdd: date_add_interval_sql("TIMESTAMP", "ADD"), +629 exp.TimestampDiff: rename_func("TIMESTAMP_DIFF"), +630 exp.TimestampSub: date_add_interval_sql("TIMESTAMP", "SUB"), +631 exp.TimeStrToTime: timestrtotime_sql, +632 exp.Trim: lambda self, e: self.func("TRIM", e.this, e.expression), +633 exp.TsOrDsAdd: _ts_or_ds_add_sql, +634 exp.TsOrDsDiff: _ts_or_ds_diff_sql, +635 exp.TsOrDsToTime: rename_func("TIME"), +636 exp.Unhex: rename_func("FROM_HEX"), +637 exp.UnixDate: rename_func("UNIX_DATE"), +638 exp.UnixToTime: _unix_to_time_sql, +639 exp.Values: _derived_table_values_to_unnest, +640 exp.VariancePop: rename_func("VAR_POP"), +641 } +642 +643 SUPPORTED_JSON_PATH_PARTS = { +644 exp.JSONPathKey, +645 exp.JSONPathRoot, +646 exp.JSONPathSubscript, +647 } +648 +649 TYPE_MAPPING = { +650 **generator.Generator.TYPE_MAPPING, +651 exp.DataType.Type.BIGDECIMAL: "BIGNUMERIC", +652 exp.DataType.Type.BIGINT: "INT64", +653 exp.DataType.Type.BINARY: "BYTES", +654 exp.DataType.Type.BOOLEAN: "BOOL", +655 exp.DataType.Type.CHAR: "STRING", +656 exp.DataType.Type.DECIMAL: "NUMERIC", +657 exp.DataType.Type.DOUBLE: "FLOAT64", +658 exp.DataType.Type.FLOAT: "FLOAT64", +659 exp.DataType.Type.INT: "INT64", +660 exp.DataType.Type.NCHAR: "STRING", +661 exp.DataType.Type.NVARCHAR: "STRING", +662 exp.DataType.Type.SMALLINT: "INT64", +663 exp.DataType.Type.TEXT: "STRING", +664 exp.DataType.Type.TIMESTAMP: "DATETIME", +665 exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP", +666 exp.DataType.Type.TIMESTAMPLTZ: "TIMESTAMP", +667 exp.DataType.Type.TINYINT: "INT64", +668 exp.DataType.Type.VARBINARY: "BYTES", +669 exp.DataType.Type.VARCHAR: "STRING", +670 exp.DataType.Type.VARIANT: "ANY TYPE", +671 } +672 +673 PROPERTIES_LOCATION = { +674 **generator.Generator.PROPERTIES_LOCATION, +675 exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA, +676 exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED, +677 } +678 +679 # from: https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#reserved_keywords +680 RESERVED_KEYWORDS = { +681 *generator.Generator.RESERVED_KEYWORDS, +682 "all", +683 "and", +684 "any", +685 "array", +686 "as", +687 "asc", +688 "assert_rows_modified", +689 "at", +690 "between", +691 "by", +692 "case", +693 "cast", +694 "collate", +695 "contains", +696 "create", +697 "cross", +698 "cube", +699 "current", +700 "default", +701 "define", +702 "desc", +703 "distinct", +704 "else", +705 "end", +706 "enum", +707 "escape", +708 "except", +709 "exclude", +710 "exists", +711 "extract", +712 "false", +713 "fetch", +714 "following", +715 "for", +716 "from", +717 "full", +718 "group", +719 "grouping", +720 "groups", +721 "hash", +722 "having", +723 "if", +724 "ignore", +725 "in", +726 "inner", +727 "intersect", +728 "interval", +729 "into", +730 "is", +731 "join", +732 "lateral", +733 "left", +734 "like", +735 "limit", +736 "lookup", +737 "merge", +738 "natural", +739 "new", +740 "no", +741 "not", +742 "null", +743 "nulls", +744 "of", +745 "on", +746 "or", +747 "order", +748 "outer", +749 "over", +750 "partition", +751 "preceding", +752 "proto", +753 "qualify", +754 "range", +755 "recursive", +756 "respect", +757 "right", +758 "rollup", +759 "rows", +760 "select", +761 "set", +762 "some", +763 "struct", +764 "tablesample", +765 "then", +766 "to", +767 "treat", +768 "true", +769 "unbounded", +770 "union", +771 "unnest", +772 "using", +773 "when", +774 "where", +775 "window", +776 "with", +777 "within", +778 } +779 +780 def timetostr_sql(self, expression: exp.TimeToStr) -> str: +781 if isinstance(expression.this, exp.TsOrDsToDate): +782 this: exp.Expression = expression.this +783 else: +784 this = expression +785 +786 return f"FORMAT_DATE({self.format_time(expression)}, {self.sql(this, 'this')})" +787 +788 def struct_sql(self, expression: exp.Struct) -> str: +789 args = [] +790 for expr in expression.expressions: +791 if isinstance(expr, self.KEY_VALUE_DEFINITIONS): +792 arg = f"{self.sql(expr, 'expression')} AS {expr.this.name}" +793 else: +794 arg = self.sql(expr) +795 +796 args.append(arg) +797 +798 return self.func("STRUCT", *args) 799 -800 args.append(arg) -801 -802 return self.func("STRUCT", *args) -803 -804 def eq_sql(self, expression: exp.EQ) -> str: -805 # Operands of = cannot be NULL in BigQuery -806 if isinstance(expression.left, exp.Null) or isinstance(expression.right, exp.Null): -807 if not isinstance(expression.parent, exp.Update): -808 return "NULL" -809 -810 return self.binary(expression, "=") -811 -812 def attimezone_sql(self, expression: exp.AtTimeZone) -> str: -813 parent = expression.parent -814 -815 # BigQuery allows CAST(.. AS {STRING|TIMESTAMP} [FORMAT <fmt> [AT TIME ZONE <tz>]]). -816 # Only the TIMESTAMP one should use the below conversion, when AT TIME ZONE is included. -817 if not isinstance(parent, exp.Cast) or not parent.to.is_type("text"): -818 return self.func( -819 "TIMESTAMP", self.func("DATETIME", expression.this, expression.args.get("zone")) -820 ) -821 -822 return super().attimezone_sql(expression) -823 -824 def trycast_sql(self, expression: exp.TryCast) -> str: -825 return self.cast_sql(expression, safe_prefix="SAFE_") -826 -827 def cte_sql(self, expression: exp.CTE) -> str: -828 if expression.alias_column_names: -829 self.unsupported("Column names in CTE definition are not supported.") -830 return super().cte_sql(expression) -831 -832 def array_sql(self, expression: exp.Array) -> str: -833 first_arg = seq_get(expression.expressions, 0) -834 if isinstance(first_arg, exp.Subqueryable): -835 return f"ARRAY{self.wrap(self.sql(first_arg))}" -836 -837 return inline_array_sql(self, expression) +800 def eq_sql(self, expression: exp.EQ) -> str: +801 # Operands of = cannot be NULL in BigQuery +802 if isinstance(expression.left, exp.Null) or isinstance(expression.right, exp.Null): +803 if not isinstance(expression.parent, exp.Update): +804 return "NULL" +805 +806 return self.binary(expression, "=") +807 +808 def attimezone_sql(self, expression: exp.AtTimeZone) -> str: +809 parent = expression.parent +810 +811 # BigQuery allows CAST(.. AS {STRING|TIMESTAMP} [FORMAT <fmt> [AT TIME ZONE <tz>]]). +812 # Only the TIMESTAMP one should use the below conversion, when AT TIME ZONE is included. +813 if not isinstance(parent, exp.Cast) or not parent.to.is_type("text"): +814 return self.func( +815 "TIMESTAMP", self.func("DATETIME", expression.this, expression.args.get("zone")) +816 ) +817 +818 return super().attimezone_sql(expression) +819 +820 def trycast_sql(self, expression: exp.TryCast) -> str: +821 return self.cast_sql(expression, safe_prefix="SAFE_") +822 +823 def cte_sql(self, expression: exp.CTE) -> str: +824 if expression.alias_column_names: +825 self.unsupported("Column names in CTE definition are not supported.") +826 return super().cte_sql(expression) +827 +828 def array_sql(self, expression: exp.Array) -> str: +829 first_arg = seq_get(expression.expressions, 0) +830 if isinstance(first_arg, exp.Subqueryable): +831 return f"ARRAY{self.wrap(self.sql(first_arg))}" +832 +833 return inline_array_sql(self, expression) +834 +835 def bracket_sql(self, expression: exp.Bracket) -> str: +836 this = self.sql(expression, "this") +837 expressions = expression.expressions 838 -839 def bracket_sql(self, expression: exp.Bracket) -> str: -840 this = self.sql(expression, "this") -841 expressions = expression.expressions -842 -843 if len(expressions) == 1: -844 arg = expressions[0] -845 if arg.type is None: -846 from sqlglot.optimizer.annotate_types import annotate_types -847 -848 arg = annotate_types(arg) +839 if len(expressions) == 1: +840 arg = expressions[0] +841 if arg.type is None: +842 from sqlglot.optimizer.annotate_types import annotate_types +843 +844 arg = annotate_types(arg) +845 +846 if arg.type and arg.type.this in exp.DataType.TEXT_TYPES: +847 # BQ doesn't support bracket syntax with string values +848 return f"{this}.{arg.name}" 849 -850 if arg.type and arg.type.this in exp.DataType.TEXT_TYPES: -851 # BQ doesn't support bracket syntax with string values -852 return f"{this}.{arg.name}" -853 -854 expressions_sql = ", ".join(self.sql(e) for e in expressions) -855 offset = expression.args.get("offset") -856 -857 if offset == 0: -858 expressions_sql = f"OFFSET({expressions_sql})" -859 elif offset == 1: -860 expressions_sql = f"ORDINAL({expressions_sql})" -861 elif offset is not None: -862 self.unsupported(f"Unsupported array offset: {offset}") -863 -864 if expression.args.get("safe"): -865 expressions_sql = f"SAFE_{expressions_sql}" -866 -867 return f"{this}[{expressions_sql}]" -868 -869 def transaction_sql(self, *_) -> str: -870 return "BEGIN TRANSACTION" -871 -872 def commit_sql(self, *_) -> str: -873 return "COMMIT TRANSACTION" -874 -875 def rollback_sql(self, *_) -> str: -876 return "ROLLBACK TRANSACTION" -877 -878 def in_unnest_op(self, expression: exp.Unnest) -> str: -879 return self.sql(expression) -880 -881 def except_op(self, expression: exp.Except) -> str: -882 if not expression.args.get("distinct", False): -883 self.unsupported("EXCEPT without DISTINCT is not supported in BigQuery") -884 return f"EXCEPT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}" -885 -886 def intersect_op(self, expression: exp.Intersect) -> str: -887 if not expression.args.get("distinct", False): -888 self.unsupported("INTERSECT without DISTINCT is not supported in BigQuery") -889 return f"INTERSECT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}" -890 -891 def with_properties(self, properties: exp.Properties) -> str: -892 return self.properties(properties, prefix=self.seg("OPTIONS")) -893 -894 def version_sql(self, expression: exp.Version) -> str: -895 if expression.name == "TIMESTAMP": -896 expression.set("this", "SYSTEM_TIME") -897 return super().version_sql(expression) +850 expressions_sql = ", ".join(self.sql(e) for e in expressions) +851 offset = expression.args.get("offset") +852 +853 if offset == 0: +854 expressions_sql = f"OFFSET({expressions_sql})" +855 elif offset == 1: +856 expressions_sql = f"ORDINAL({expressions_sql})" +857 elif offset is not None: +858 self.unsupported(f"Unsupported array offset: {offset}") +859 +860 if expression.args.get("safe"): +861 expressions_sql = f"SAFE_{expressions_sql}" +862 +863 return f"{this}[{expressions_sql}]" +864 +865 def transaction_sql(self, *_) -> str: +866 return "BEGIN TRANSACTION" +867 +868 def commit_sql(self, *_) -> str: +869 return "COMMIT TRANSACTION" +870 +871 def rollback_sql(self, *_) -> str: +872 return "ROLLBACK TRANSACTION" +873 +874 def in_unnest_op(self, expression: exp.Unnest) -> str: +875 return self.sql(expression) +876 +877 def except_op(self, expression: exp.Except) -> str: +878 if not expression.args.get("distinct", False): +879 self.unsupported("EXCEPT without DISTINCT is not supported in BigQuery") +880 return f"EXCEPT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}" +881 +882 def intersect_op(self, expression: exp.Intersect) -> str: +883 if not expression.args.get("distinct", False): +884 self.unsupported("INTERSECT without DISTINCT is not supported in BigQuery") +885 return f"INTERSECT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}" +886 +887 def with_properties(self, properties: exp.Properties) -> str: +888 return self.properties(properties, prefix=self.seg("OPTIONS")) +889 +890 def version_sql(self, expression: exp.Version) -> str: +891 if expression.name == "TIMESTAMP": +892 expression.set("this", "SYSTEM_TIME") +893 return super().version_sql(expression)
    @@ -1383,570 +1376,566 @@ 332 "TIMESTAMP": TokenType.TIMESTAMPTZ, 333 } 334 KEYWORDS.pop("DIV") -335 -336 class Parser(parser.Parser): -337 PREFIXED_PIVOT_COLUMNS = True -338 -339 LOG_DEFAULTS_TO_LN = True -340 -341 FUNCTIONS = { -342 **parser.Parser.FUNCTIONS, -343 "DATE": _parse_date, -344 "DATE_ADD": parse_date_delta_with_interval(exp.DateAdd), -345 "DATE_SUB": parse_date_delta_with_interval(exp.DateSub), -346 "DATE_TRUNC": lambda args: exp.DateTrunc( -347 unit=exp.Literal.string(str(seq_get(args, 1))), -348 this=seq_get(args, 0), -349 ), -350 "DATETIME_ADD": parse_date_delta_with_interval(exp.DatetimeAdd), -351 "DATETIME_SUB": parse_date_delta_with_interval(exp.DatetimeSub), -352 "DIV": binary_from_function(exp.IntDiv), -353 "FORMAT_DATE": lambda args: exp.TimeToStr( -354 this=exp.TsOrDsToDate(this=seq_get(args, 1)), format=seq_get(args, 0) -355 ), -356 "GENERATE_ARRAY": exp.GenerateSeries.from_arg_list, -357 "JSON_EXTRACT_SCALAR": lambda args: exp.JSONExtractScalar( -358 this=seq_get(args, 0), expression=seq_get(args, 1) or exp.Literal.string("$") -359 ), -360 "MD5": exp.MD5Digest.from_arg_list, -361 "TO_HEX": _parse_to_hex, -362 "PARSE_DATE": lambda args: format_time_lambda(exp.StrToDate, "bigquery")( -363 [seq_get(args, 1), seq_get(args, 0)] -364 ), -365 "PARSE_TIMESTAMP": _parse_parse_timestamp, -366 "REGEXP_CONTAINS": exp.RegexpLike.from_arg_list, -367 "REGEXP_EXTRACT": lambda args: exp.RegexpExtract( -368 this=seq_get(args, 0), -369 expression=seq_get(args, 1), -370 position=seq_get(args, 2), -371 occurrence=seq_get(args, 3), -372 group=exp.Literal.number(1) if re.compile(args[1].name).groups == 1 else None, -373 ), -374 "SHA256": lambda args: exp.SHA2(this=seq_get(args, 0), length=exp.Literal.number(256)), -375 "SHA512": lambda args: exp.SHA2(this=seq_get(args, 0), length=exp.Literal.number(512)), -376 "SPLIT": lambda args: exp.Split( -377 # https://cloud.google.com/bigquery/docs/reference/standard-sql/string_functions#split -378 this=seq_get(args, 0), -379 expression=seq_get(args, 1) or exp.Literal.string(","), -380 ), -381 "TIME": _parse_time, -382 "TIME_ADD": parse_date_delta_with_interval(exp.TimeAdd), -383 "TIME_SUB": parse_date_delta_with_interval(exp.TimeSub), -384 "TIMESTAMP": _parse_timestamp, -385 "TIMESTAMP_ADD": parse_date_delta_with_interval(exp.TimestampAdd), -386 "TIMESTAMP_SUB": parse_date_delta_with_interval(exp.TimestampSub), -387 "TIMESTAMP_MICROS": lambda args: exp.UnixToTime( -388 this=seq_get(args, 0), scale=exp.UnixToTime.MICROS -389 ), -390 "TIMESTAMP_MILLIS": lambda args: exp.UnixToTime( -391 this=seq_get(args, 0), scale=exp.UnixToTime.MILLIS -392 ), -393 "TIMESTAMP_SECONDS": lambda args: exp.UnixToTime(this=seq_get(args, 0)), -394 "TO_JSON_STRING": exp.JSONFormat.from_arg_list, -395 } -396 -397 FUNCTION_PARSERS = { -398 **parser.Parser.FUNCTION_PARSERS, -399 "ARRAY": lambda self: self.expression(exp.Array, expressions=[self._parse_statement()]), -400 } -401 FUNCTION_PARSERS.pop("TRIM") -402 -403 NO_PAREN_FUNCTIONS = { -404 **parser.Parser.NO_PAREN_FUNCTIONS, -405 TokenType.CURRENT_DATETIME: exp.CurrentDatetime, -406 } -407 -408 NESTED_TYPE_TOKENS = { -409 *parser.Parser.NESTED_TYPE_TOKENS, -410 TokenType.TABLE, -411 } -412 -413 ID_VAR_TOKENS = { -414 *parser.Parser.ID_VAR_TOKENS, -415 TokenType.VALUES, -416 } -417 -418 PROPERTY_PARSERS = { -419 **parser.Parser.PROPERTY_PARSERS, -420 "NOT DETERMINISTIC": lambda self: self.expression( -421 exp.StabilityProperty, this=exp.Literal.string("VOLATILE") -422 ), -423 "OPTIONS": lambda self: self._parse_with_property(), -424 } -425 -426 CONSTRAINT_PARSERS = { -427 **parser.Parser.CONSTRAINT_PARSERS, -428 "OPTIONS": lambda self: exp.Properties(expressions=self._parse_with_property()), -429 } -430 -431 RANGE_PARSERS = parser.Parser.RANGE_PARSERS.copy() -432 RANGE_PARSERS.pop(TokenType.OVERLAPS, None) -433 -434 NULL_TOKENS = {TokenType.NULL, TokenType.UNKNOWN} -435 -436 STATEMENT_PARSERS = { -437 **parser.Parser.STATEMENT_PARSERS, -438 TokenType.END: lambda self: self._parse_as_command(self._prev), -439 TokenType.FOR: lambda self: self._parse_for_in(), -440 } -441 -442 BRACKET_OFFSETS = { -443 "OFFSET": (0, False), -444 "ORDINAL": (1, False), -445 "SAFE_OFFSET": (0, True), -446 "SAFE_ORDINAL": (1, True), -447 } -448 -449 def _parse_for_in(self) -> exp.ForIn: -450 this = self._parse_range() -451 self._match_text_seq("DO") -452 return self.expression(exp.ForIn, this=this, expression=self._parse_statement()) -453 -454 def _parse_table_part(self, schema: bool = False) -> t.Optional[exp.Expression]: -455 this = super()._parse_table_part(schema=schema) or self._parse_number() -456 -457 # https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#table_names -458 if isinstance(this, exp.Identifier): -459 table_name = this.name -460 while self._match(TokenType.DASH, advance=False) and self._next: -461 self._advance(2) -462 table_name += f"-{self._prev.text}" +335 KEYWORDS.pop("VALUES") +336 +337 class Parser(parser.Parser): +338 PREFIXED_PIVOT_COLUMNS = True +339 +340 LOG_DEFAULTS_TO_LN = True +341 +342 FUNCTIONS = { +343 **parser.Parser.FUNCTIONS, +344 "DATE": _parse_date, +345 "DATE_ADD": parse_date_delta_with_interval(exp.DateAdd), +346 "DATE_SUB": parse_date_delta_with_interval(exp.DateSub), +347 "DATE_TRUNC": lambda args: exp.DateTrunc( +348 unit=exp.Literal.string(str(seq_get(args, 1))), +349 this=seq_get(args, 0), +350 ), +351 "DATETIME_ADD": parse_date_delta_with_interval(exp.DatetimeAdd), +352 "DATETIME_SUB": parse_date_delta_with_interval(exp.DatetimeSub), +353 "DIV": binary_from_function(exp.IntDiv), +354 "FORMAT_DATE": lambda args: exp.TimeToStr( +355 this=exp.TsOrDsToDate(this=seq_get(args, 1)), format=seq_get(args, 0) +356 ), +357 "GENERATE_ARRAY": exp.GenerateSeries.from_arg_list, +358 "JSON_EXTRACT_SCALAR": lambda args: exp.JSONExtractScalar( +359 this=seq_get(args, 0), expression=seq_get(args, 1) or exp.Literal.string("$") +360 ), +361 "MD5": exp.MD5Digest.from_arg_list, +362 "TO_HEX": _parse_to_hex, +363 "PARSE_DATE": lambda args: format_time_lambda(exp.StrToDate, "bigquery")( +364 [seq_get(args, 1), seq_get(args, 0)] +365 ), +366 "PARSE_TIMESTAMP": _parse_parse_timestamp, +367 "REGEXP_CONTAINS": exp.RegexpLike.from_arg_list, +368 "REGEXP_EXTRACT": lambda args: exp.RegexpExtract( +369 this=seq_get(args, 0), +370 expression=seq_get(args, 1), +371 position=seq_get(args, 2), +372 occurrence=seq_get(args, 3), +373 group=exp.Literal.number(1) if re.compile(args[1].name).groups == 1 else None, +374 ), +375 "SHA256": lambda args: exp.SHA2(this=seq_get(args, 0), length=exp.Literal.number(256)), +376 "SHA512": lambda args: exp.SHA2(this=seq_get(args, 0), length=exp.Literal.number(512)), +377 "SPLIT": lambda args: exp.Split( +378 # https://cloud.google.com/bigquery/docs/reference/standard-sql/string_functions#split +379 this=seq_get(args, 0), +380 expression=seq_get(args, 1) or exp.Literal.string(","), +381 ), +382 "TIME": _parse_time, +383 "TIME_ADD": parse_date_delta_with_interval(exp.TimeAdd), +384 "TIME_SUB": parse_date_delta_with_interval(exp.TimeSub), +385 "TIMESTAMP": _parse_timestamp, +386 "TIMESTAMP_ADD": parse_date_delta_with_interval(exp.TimestampAdd), +387 "TIMESTAMP_SUB": parse_date_delta_with_interval(exp.TimestampSub), +388 "TIMESTAMP_MICROS": lambda args: exp.UnixToTime( +389 this=seq_get(args, 0), scale=exp.UnixToTime.MICROS +390 ), +391 "TIMESTAMP_MILLIS": lambda args: exp.UnixToTime( +392 this=seq_get(args, 0), scale=exp.UnixToTime.MILLIS +393 ), +394 "TIMESTAMP_SECONDS": lambda args: exp.UnixToTime(this=seq_get(args, 0)), +395 "TO_JSON_STRING": exp.JSONFormat.from_arg_list, +396 } +397 +398 FUNCTION_PARSERS = { +399 **parser.Parser.FUNCTION_PARSERS, +400 "ARRAY": lambda self: self.expression(exp.Array, expressions=[self._parse_statement()]), +401 } +402 FUNCTION_PARSERS.pop("TRIM") +403 +404 NO_PAREN_FUNCTIONS = { +405 **parser.Parser.NO_PAREN_FUNCTIONS, +406 TokenType.CURRENT_DATETIME: exp.CurrentDatetime, +407 } +408 +409 NESTED_TYPE_TOKENS = { +410 *parser.Parser.NESTED_TYPE_TOKENS, +411 TokenType.TABLE, +412 } +413 +414 PROPERTY_PARSERS = { +415 **parser.Parser.PROPERTY_PARSERS, +416 "NOT DETERMINISTIC": lambda self: self.expression( +417 exp.StabilityProperty, this=exp.Literal.string("VOLATILE") +418 ), +419 "OPTIONS": lambda self: self._parse_with_property(), +420 } +421 +422 CONSTRAINT_PARSERS = { +423 **parser.Parser.CONSTRAINT_PARSERS, +424 "OPTIONS": lambda self: exp.Properties(expressions=self._parse_with_property()), +425 } +426 +427 RANGE_PARSERS = parser.Parser.RANGE_PARSERS.copy() +428 RANGE_PARSERS.pop(TokenType.OVERLAPS, None) +429 +430 NULL_TOKENS = {TokenType.NULL, TokenType.UNKNOWN} +431 +432 STATEMENT_PARSERS = { +433 **parser.Parser.STATEMENT_PARSERS, +434 TokenType.END: lambda self: self._parse_as_command(self._prev), +435 TokenType.FOR: lambda self: self._parse_for_in(), +436 } +437 +438 BRACKET_OFFSETS = { +439 "OFFSET": (0, False), +440 "ORDINAL": (1, False), +441 "SAFE_OFFSET": (0, True), +442 "SAFE_ORDINAL": (1, True), +443 } +444 +445 def _parse_for_in(self) -> exp.ForIn: +446 this = self._parse_range() +447 self._match_text_seq("DO") +448 return self.expression(exp.ForIn, this=this, expression=self._parse_statement()) +449 +450 def _parse_table_part(self, schema: bool = False) -> t.Optional[exp.Expression]: +451 this = super()._parse_table_part(schema=schema) or self._parse_number() +452 +453 # https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#table_names +454 if isinstance(this, exp.Identifier): +455 table_name = this.name +456 while self._match(TokenType.DASH, advance=False) and self._next: +457 self._advance(2) +458 table_name += f"-{self._prev.text}" +459 +460 this = exp.Identifier(this=table_name, quoted=this.args.get("quoted")) +461 elif isinstance(this, exp.Literal): +462 table_name = this.name 463 -464 this = exp.Identifier(this=table_name, quoted=this.args.get("quoted")) -465 elif isinstance(this, exp.Literal): -466 table_name = this.name -467 -468 if self._is_connected() and self._parse_var(any_token=True): -469 table_name += self._prev.text +464 if self._is_connected() and self._parse_var(any_token=True): +465 table_name += self._prev.text +466 +467 this = exp.Identifier(this=table_name, quoted=True) +468 +469 return this 470 -471 this = exp.Identifier(this=table_name, quoted=True) -472 -473 return this -474 -475 def _parse_table_parts( -476 self, schema: bool = False, is_db_reference: bool = False -477 ) -> exp.Table: -478 table = super()._parse_table_parts(schema=schema, is_db_reference=is_db_reference) -479 if isinstance(table.this, exp.Identifier) and "." in table.name: -480 catalog, db, this, *rest = ( -481 t.cast(t.Optional[exp.Expression], exp.to_identifier(x)) -482 for x in split_num_words(table.name, ".", 3) -483 ) -484 -485 if rest and this: -486 this = exp.Dot.build(t.cast(t.List[exp.Expression], [this, *rest])) +471 def _parse_table_parts( +472 self, schema: bool = False, is_db_reference: bool = False +473 ) -> exp.Table: +474 table = super()._parse_table_parts(schema=schema, is_db_reference=is_db_reference) +475 if isinstance(table.this, exp.Identifier) and "." in table.name: +476 catalog, db, this, *rest = ( +477 t.cast(t.Optional[exp.Expression], exp.to_identifier(x)) +478 for x in split_num_words(table.name, ".", 3) +479 ) +480 +481 if rest and this: +482 this = exp.Dot.build(t.cast(t.List[exp.Expression], [this, *rest])) +483 +484 table = exp.Table(this=this, db=db, catalog=catalog) +485 +486 return table 487 -488 table = exp.Table(this=this, db=db, catalog=catalog) -489 -490 return table +488 @t.overload +489 def _parse_json_object(self, agg: Lit[False]) -> exp.JSONObject: +490 ... 491 492 @t.overload -493 def _parse_json_object(self, agg: Lit[False]) -> exp.JSONObject: +493 def _parse_json_object(self, agg: Lit[True]) -> exp.JSONObjectAgg: 494 ... 495 -496 @t.overload -497 def _parse_json_object(self, agg: Lit[True]) -> exp.JSONObjectAgg: -498 ... +496 def _parse_json_object(self, agg=False): +497 json_object = super()._parse_json_object() +498 array_kv_pair = seq_get(json_object.expressions, 0) 499 -500 def _parse_json_object(self, agg=False): -501 json_object = super()._parse_json_object() -502 array_kv_pair = seq_get(json_object.expressions, 0) -503 -504 # Converts BQ's "signature 2" of JSON_OBJECT into SQLGlot's canonical representation -505 # https://cloud.google.com/bigquery/docs/reference/standard-sql/json_functions#json_object_signature2 -506 if ( -507 array_kv_pair -508 and isinstance(array_kv_pair.this, exp.Array) -509 and isinstance(array_kv_pair.expression, exp.Array) -510 ): -511 keys = array_kv_pair.this.expressions -512 values = array_kv_pair.expression.expressions -513 -514 json_object.set( -515 "expressions", -516 [exp.JSONKeyValue(this=k, expression=v) for k, v in zip(keys, values)], -517 ) -518 -519 return json_object -520 -521 def _parse_bracket(self, this: t.Optional[exp.Expression]) -> t.Optional[exp.Expression]: -522 bracket = super()._parse_bracket(this) -523 -524 if this is bracket: -525 return bracket +500 # Converts BQ's "signature 2" of JSON_OBJECT into SQLGlot's canonical representation +501 # https://cloud.google.com/bigquery/docs/reference/standard-sql/json_functions#json_object_signature2 +502 if ( +503 array_kv_pair +504 and isinstance(array_kv_pair.this, exp.Array) +505 and isinstance(array_kv_pair.expression, exp.Array) +506 ): +507 keys = array_kv_pair.this.expressions +508 values = array_kv_pair.expression.expressions +509 +510 json_object.set( +511 "expressions", +512 [exp.JSONKeyValue(this=k, expression=v) for k, v in zip(keys, values)], +513 ) +514 +515 return json_object +516 +517 def _parse_bracket(self, this: t.Optional[exp.Expression]) -> t.Optional[exp.Expression]: +518 bracket = super()._parse_bracket(this) +519 +520 if this is bracket: +521 return bracket +522 +523 if isinstance(bracket, exp.Bracket): +524 for expression in bracket.expressions: +525 name = expression.name.upper() 526 -527 if isinstance(bracket, exp.Bracket): -528 for expression in bracket.expressions: -529 name = expression.name.upper() -530 -531 if name not in self.BRACKET_OFFSETS: -532 break -533 -534 offset, safe = self.BRACKET_OFFSETS[name] -535 bracket.set("offset", offset) -536 bracket.set("safe", safe) -537 expression.replace(expression.expressions[0]) -538 -539 return bracket -540 -541 class Generator(generator.Generator): -542 EXPLICIT_UNION = True -543 INTERVAL_ALLOWS_PLURAL_FORM = False -544 JOIN_HINTS = False -545 QUERY_HINTS = False -546 TABLE_HINTS = False -547 LIMIT_FETCH = "LIMIT" -548 RENAME_TABLE_WITH_DB = False -549 NVL2_SUPPORTED = False -550 UNNEST_WITH_ORDINALITY = False -551 COLLATE_IS_FUNC = True -552 LIMIT_ONLY_LITERALS = True -553 SUPPORTS_TABLE_ALIAS_COLUMNS = False -554 UNPIVOT_ALIASES_ARE_IDENTIFIERS = False -555 JSON_KEY_VALUE_PAIR_SEP = "," -556 NULL_ORDERING_SUPPORTED = False -557 IGNORE_NULLS_IN_FUNC = True -558 JSON_PATH_SINGLE_QUOTE_ESCAPE = True -559 -560 TRANSFORMS = { -561 **generator.Generator.TRANSFORMS, -562 exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"), -563 exp.ArgMax: arg_max_or_min_no_count("MAX_BY"), -564 exp.ArgMin: arg_max_or_min_no_count("MIN_BY"), -565 exp.ArrayContains: _array_contains_sql, -566 exp.ArraySize: rename_func("ARRAY_LENGTH"), -567 exp.Cast: transforms.preprocess([transforms.remove_precision_parameterized_types]), -568 exp.CollateProperty: lambda self, e: ( -569 f"DEFAULT COLLATE {self.sql(e, 'this')}" -570 if e.args.get("default") -571 else f"COLLATE {self.sql(e, 'this')}" -572 ), -573 exp.CountIf: rename_func("COUNTIF"), -574 exp.Create: _create_sql, -575 exp.CTE: transforms.preprocess([_pushdown_cte_column_names]), -576 exp.DateAdd: date_add_interval_sql("DATE", "ADD"), -577 exp.DateDiff: lambda self, -578 e: f"DATE_DIFF({self.sql(e, 'this')}, {self.sql(e, 'expression')}, {self.sql(e.args.get('unit', 'DAY'))})", -579 exp.DateFromParts: rename_func("DATE"), -580 exp.DateStrToDate: datestrtodate_sql, -581 exp.DateSub: date_add_interval_sql("DATE", "SUB"), -582 exp.DatetimeAdd: date_add_interval_sql("DATETIME", "ADD"), -583 exp.DatetimeSub: date_add_interval_sql("DATETIME", "SUB"), -584 exp.DateTrunc: lambda self, e: self.func("DATE_TRUNC", e.this, e.text("unit")), -585 exp.FromTimeZone: lambda self, e: self.func( -586 "DATETIME", self.func("TIMESTAMP", e.this, e.args.get("zone")), "'UTC'" -587 ), -588 exp.GenerateSeries: rename_func("GENERATE_ARRAY"), -589 exp.GroupConcat: rename_func("STRING_AGG"), -590 exp.Hex: rename_func("TO_HEX"), -591 exp.If: if_sql(false_value="NULL"), -592 exp.ILike: no_ilike_sql, -593 exp.IntDiv: rename_func("DIV"), -594 exp.JSONFormat: rename_func("TO_JSON_STRING"), -595 exp.Max: max_or_greatest, -596 exp.MD5: lambda self, e: self.func("TO_HEX", self.func("MD5", e.this)), -597 exp.MD5Digest: rename_func("MD5"), -598 exp.Min: min_or_least, -599 exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}", -600 exp.RegexpExtract: lambda self, e: self.func( -601 "REGEXP_EXTRACT", -602 e.this, -603 e.expression, -604 e.args.get("position"), -605 e.args.get("occurrence"), -606 ), -607 exp.RegexpReplace: regexp_replace_sql, -608 exp.RegexpLike: rename_func("REGEXP_CONTAINS"), -609 exp.ReturnsProperty: _returnsproperty_sql, -610 exp.Select: transforms.preprocess( -611 [ -612 transforms.explode_to_unnest(), -613 _unqualify_unnest, -614 transforms.eliminate_distinct_on, -615 _alias_ordered_group, -616 transforms.eliminate_semi_and_anti_joins, -617 ] -618 ), -619 exp.SHA2: lambda self, e: self.func( -620 "SHA256" if e.text("length") == "256" else "SHA512", e.this -621 ), -622 exp.StabilityProperty: lambda self, e: ( -623 "DETERMINISTIC" if e.name == "IMMUTABLE" else "NOT DETERMINISTIC" -624 ), -625 exp.StrToDate: lambda self, -626 e: f"PARSE_DATE({self.format_time(e)}, {self.sql(e, 'this')})", -627 exp.StrToTime: lambda self, e: self.func( -628 "PARSE_TIMESTAMP", self.format_time(e), e.this, e.args.get("zone") -629 ), -630 exp.TimeAdd: date_add_interval_sql("TIME", "ADD"), -631 exp.TimeFromParts: rename_func("TIME"), -632 exp.TimeSub: date_add_interval_sql("TIME", "SUB"), -633 exp.TimestampAdd: date_add_interval_sql("TIMESTAMP", "ADD"), -634 exp.TimestampDiff: rename_func("TIMESTAMP_DIFF"), -635 exp.TimestampSub: date_add_interval_sql("TIMESTAMP", "SUB"), -636 exp.TimeStrToTime: timestrtotime_sql, -637 exp.Trim: lambda self, e: self.func("TRIM", e.this, e.expression), -638 exp.TsOrDsAdd: _ts_or_ds_add_sql, -639 exp.TsOrDsDiff: _ts_or_ds_diff_sql, -640 exp.TsOrDsToTime: rename_func("TIME"), -641 exp.Unhex: rename_func("FROM_HEX"), -642 exp.UnixDate: rename_func("UNIX_DATE"), -643 exp.UnixToTime: _unix_to_time_sql, -644 exp.Values: _derived_table_values_to_unnest, -645 exp.VariancePop: rename_func("VAR_POP"), -646 } -647 -648 SUPPORTED_JSON_PATH_PARTS = { -649 exp.JSONPathKey, -650 exp.JSONPathRoot, -651 exp.JSONPathSubscript, -652 } -653 -654 TYPE_MAPPING = { -655 **generator.Generator.TYPE_MAPPING, -656 exp.DataType.Type.BIGDECIMAL: "BIGNUMERIC", -657 exp.DataType.Type.BIGINT: "INT64", -658 exp.DataType.Type.BINARY: "BYTES", -659 exp.DataType.Type.BOOLEAN: "BOOL", -660 exp.DataType.Type.CHAR: "STRING", -661 exp.DataType.Type.DECIMAL: "NUMERIC", -662 exp.DataType.Type.DOUBLE: "FLOAT64", -663 exp.DataType.Type.FLOAT: "FLOAT64", -664 exp.DataType.Type.INT: "INT64", -665 exp.DataType.Type.NCHAR: "STRING", -666 exp.DataType.Type.NVARCHAR: "STRING", -667 exp.DataType.Type.SMALLINT: "INT64", -668 exp.DataType.Type.TEXT: "STRING", -669 exp.DataType.Type.TIMESTAMP: "DATETIME", -670 exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP", -671 exp.DataType.Type.TIMESTAMPLTZ: "TIMESTAMP", -672 exp.DataType.Type.TINYINT: "INT64", -673 exp.DataType.Type.VARBINARY: "BYTES", -674 exp.DataType.Type.VARCHAR: "STRING", -675 exp.DataType.Type.VARIANT: "ANY TYPE", -676 } -677 -678 PROPERTIES_LOCATION = { -679 **generator.Generator.PROPERTIES_LOCATION, -680 exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA, -681 exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED, -682 } -683 -684 # from: https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#reserved_keywords -685 RESERVED_KEYWORDS = { -686 *generator.Generator.RESERVED_KEYWORDS, -687 "all", -688 "and", -689 "any", -690 "array", -691 "as", -692 "asc", -693 "assert_rows_modified", -694 "at", -695 "between", -696 "by", -697 "case", -698 "cast", -699 "collate", -700 "contains", -701 "create", -702 "cross", -703 "cube", -704 "current", -705 "default", -706 "define", -707 "desc", -708 "distinct", -709 "else", -710 "end", -711 "enum", -712 "escape", -713 "except", -714 "exclude", -715 "exists", -716 "extract", -717 "false", -718 "fetch", -719 "following", -720 "for", -721 "from", -722 "full", -723 "group", -724 "grouping", -725 "groups", -726 "hash", -727 "having", -728 "if", -729 "ignore", -730 "in", -731 "inner", -732 "intersect", -733 "interval", -734 "into", -735 "is", -736 "join", -737 "lateral", -738 "left", -739 "like", -740 "limit", -741 "lookup", -742 "merge", -743 "natural", -744 "new", -745 "no", -746 "not", -747 "null", -748 "nulls", -749 "of", -750 "on", -751 "or", -752 "order", -753 "outer", -754 "over", -755 "partition", -756 "preceding", -757 "proto", -758 "qualify", -759 "range", -760 "recursive", -761 "respect", -762 "right", -763 "rollup", -764 "rows", -765 "select", -766 "set", -767 "some", -768 "struct", -769 "tablesample", -770 "then", -771 "to", -772 "treat", -773 "true", -774 "unbounded", -775 "union", -776 "unnest", -777 "using", -778 "when", -779 "where", -780 "window", -781 "with", -782 "within", -783 } -784 -785 def timetostr_sql(self, expression: exp.TimeToStr) -> str: -786 if isinstance(expression.this, exp.TsOrDsToDate): -787 this: exp.Expression = expression.this -788 else: -789 this = expression -790 -791 return f"FORMAT_DATE({self.format_time(expression)}, {self.sql(this, 'this')})" -792 -793 def struct_sql(self, expression: exp.Struct) -> str: -794 args = [] -795 for expr in expression.expressions: -796 if isinstance(expr, self.KEY_VALUE_DEFINITIONS): -797 arg = f"{self.sql(expr, 'expression')} AS {expr.this.name}" -798 else: -799 arg = self.sql(expr) +527 if name not in self.BRACKET_OFFSETS: +528 break +529 +530 offset, safe = self.BRACKET_OFFSETS[name] +531 bracket.set("offset", offset) +532 bracket.set("safe", safe) +533 expression.replace(expression.expressions[0]) +534 +535 return bracket +536 +537 class Generator(generator.Generator): +538 EXPLICIT_UNION = True +539 INTERVAL_ALLOWS_PLURAL_FORM = False +540 JOIN_HINTS = False +541 QUERY_HINTS = False +542 TABLE_HINTS = False +543 LIMIT_FETCH = "LIMIT" +544 RENAME_TABLE_WITH_DB = False +545 NVL2_SUPPORTED = False +546 UNNEST_WITH_ORDINALITY = False +547 COLLATE_IS_FUNC = True +548 LIMIT_ONLY_LITERALS = True +549 SUPPORTS_TABLE_ALIAS_COLUMNS = False +550 UNPIVOT_ALIASES_ARE_IDENTIFIERS = False +551 JSON_KEY_VALUE_PAIR_SEP = "," +552 NULL_ORDERING_SUPPORTED = False +553 IGNORE_NULLS_IN_FUNC = True +554 JSON_PATH_SINGLE_QUOTE_ESCAPE = True +555 +556 TRANSFORMS = { +557 **generator.Generator.TRANSFORMS, +558 exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"), +559 exp.ArgMax: arg_max_or_min_no_count("MAX_BY"), +560 exp.ArgMin: arg_max_or_min_no_count("MIN_BY"), +561 exp.ArrayContains: _array_contains_sql, +562 exp.ArraySize: rename_func("ARRAY_LENGTH"), +563 exp.Cast: transforms.preprocess([transforms.remove_precision_parameterized_types]), +564 exp.CollateProperty: lambda self, e: ( +565 f"DEFAULT COLLATE {self.sql(e, 'this')}" +566 if e.args.get("default") +567 else f"COLLATE {self.sql(e, 'this')}" +568 ), +569 exp.CountIf: rename_func("COUNTIF"), +570 exp.Create: _create_sql, +571 exp.CTE: transforms.preprocess([_pushdown_cte_column_names]), +572 exp.DateAdd: date_add_interval_sql("DATE", "ADD"), +573 exp.DateDiff: lambda self, +574 e: f"DATE_DIFF({self.sql(e, 'this')}, {self.sql(e, 'expression')}, {self.sql(e.args.get('unit', 'DAY'))})", +575 exp.DateFromParts: rename_func("DATE"), +576 exp.DateStrToDate: datestrtodate_sql, +577 exp.DateSub: date_add_interval_sql("DATE", "SUB"), +578 exp.DatetimeAdd: date_add_interval_sql("DATETIME", "ADD"), +579 exp.DatetimeSub: date_add_interval_sql("DATETIME", "SUB"), +580 exp.DateTrunc: lambda self, e: self.func("DATE_TRUNC", e.this, e.text("unit")), +581 exp.FromTimeZone: lambda self, e: self.func( +582 "DATETIME", self.func("TIMESTAMP", e.this, e.args.get("zone")), "'UTC'" +583 ), +584 exp.GenerateSeries: rename_func("GENERATE_ARRAY"), +585 exp.GroupConcat: rename_func("STRING_AGG"), +586 exp.Hex: rename_func("TO_HEX"), +587 exp.If: if_sql(false_value="NULL"), +588 exp.ILike: no_ilike_sql, +589 exp.IntDiv: rename_func("DIV"), +590 exp.JSONFormat: rename_func("TO_JSON_STRING"), +591 exp.Max: max_or_greatest, +592 exp.MD5: lambda self, e: self.func("TO_HEX", self.func("MD5", e.this)), +593 exp.MD5Digest: rename_func("MD5"), +594 exp.Min: min_or_least, +595 exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}", +596 exp.RegexpExtract: lambda self, e: self.func( +597 "REGEXP_EXTRACT", +598 e.this, +599 e.expression, +600 e.args.get("position"), +601 e.args.get("occurrence"), +602 ), +603 exp.RegexpReplace: regexp_replace_sql, +604 exp.RegexpLike: rename_func("REGEXP_CONTAINS"), +605 exp.ReturnsProperty: _returnsproperty_sql, +606 exp.Select: transforms.preprocess( +607 [ +608 transforms.explode_to_unnest(), +609 _unqualify_unnest, +610 transforms.eliminate_distinct_on, +611 _alias_ordered_group, +612 transforms.eliminate_semi_and_anti_joins, +613 ] +614 ), +615 exp.SHA2: lambda self, e: self.func( +616 "SHA256" if e.text("length") == "256" else "SHA512", e.this +617 ), +618 exp.StabilityProperty: lambda self, e: ( +619 "DETERMINISTIC" if e.name == "IMMUTABLE" else "NOT DETERMINISTIC" +620 ), +621 exp.StrToDate: lambda self, +622 e: f"PARSE_DATE({self.format_time(e)}, {self.sql(e, 'this')})", +623 exp.StrToTime: lambda self, e: self.func( +624 "PARSE_TIMESTAMP", self.format_time(e), e.this, e.args.get("zone") +625 ), +626 exp.TimeAdd: date_add_interval_sql("TIME", "ADD"), +627 exp.TimeFromParts: rename_func("TIME"), +628 exp.TimeSub: date_add_interval_sql("TIME", "SUB"), +629 exp.TimestampAdd: date_add_interval_sql("TIMESTAMP", "ADD"), +630 exp.TimestampDiff: rename_func("TIMESTAMP_DIFF"), +631 exp.TimestampSub: date_add_interval_sql("TIMESTAMP", "SUB"), +632 exp.TimeStrToTime: timestrtotime_sql, +633 exp.Trim: lambda self, e: self.func("TRIM", e.this, e.expression), +634 exp.TsOrDsAdd: _ts_or_ds_add_sql, +635 exp.TsOrDsDiff: _ts_or_ds_diff_sql, +636 exp.TsOrDsToTime: rename_func("TIME"), +637 exp.Unhex: rename_func("FROM_HEX"), +638 exp.UnixDate: rename_func("UNIX_DATE"), +639 exp.UnixToTime: _unix_to_time_sql, +640 exp.Values: _derived_table_values_to_unnest, +641 exp.VariancePop: rename_func("VAR_POP"), +642 } +643 +644 SUPPORTED_JSON_PATH_PARTS = { +645 exp.JSONPathKey, +646 exp.JSONPathRoot, +647 exp.JSONPathSubscript, +648 } +649 +650 TYPE_MAPPING = { +651 **generator.Generator.TYPE_MAPPING, +652 exp.DataType.Type.BIGDECIMAL: "BIGNUMERIC", +653 exp.DataType.Type.BIGINT: "INT64", +654 exp.DataType.Type.BINARY: "BYTES", +655 exp.DataType.Type.BOOLEAN: "BOOL", +656 exp.DataType.Type.CHAR: "STRING", +657 exp.DataType.Type.DECIMAL: "NUMERIC", +658 exp.DataType.Type.DOUBLE: "FLOAT64", +659 exp.DataType.Type.FLOAT: "FLOAT64", +660 exp.DataType.Type.INT: "INT64", +661 exp.DataType.Type.NCHAR: "STRING", +662 exp.DataType.Type.NVARCHAR: "STRING", +663 exp.DataType.Type.SMALLINT: "INT64", +664 exp.DataType.Type.TEXT: "STRING", +665 exp.DataType.Type.TIMESTAMP: "DATETIME", +666 exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP", +667 exp.DataType.Type.TIMESTAMPLTZ: "TIMESTAMP", +668 exp.DataType.Type.TINYINT: "INT64", +669 exp.DataType.Type.VARBINARY: "BYTES", +670 exp.DataType.Type.VARCHAR: "STRING", +671 exp.DataType.Type.VARIANT: "ANY TYPE", +672 } +673 +674 PROPERTIES_LOCATION = { +675 **generator.Generator.PROPERTIES_LOCATION, +676 exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA, +677 exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED, +678 } +679 +680 # from: https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#reserved_keywords +681 RESERVED_KEYWORDS = { +682 *generator.Generator.RESERVED_KEYWORDS, +683 "all", +684 "and", +685 "any", +686 "array", +687 "as", +688 "asc", +689 "assert_rows_modified", +690 "at", +691 "between", +692 "by", +693 "case", +694 "cast", +695 "collate", +696 "contains", +697 "create", +698 "cross", +699 "cube", +700 "current", +701 "default", +702 "define", +703 "desc", +704 "distinct", +705 "else", +706 "end", +707 "enum", +708 "escape", +709 "except", +710 "exclude", +711 "exists", +712 "extract", +713 "false", +714 "fetch", +715 "following", +716 "for", +717 "from", +718 "full", +719 "group", +720 "grouping", +721 "groups", +722 "hash", +723 "having", +724 "if", +725 "ignore", +726 "in", +727 "inner", +728 "intersect", +729 "interval", +730 "into", +731 "is", +732 "join", +733 "lateral", +734 "left", +735 "like", +736 "limit", +737 "lookup", +738 "merge", +739 "natural", +740 "new", +741 "no", +742 "not", +743 "null", +744 "nulls", +745 "of", +746 "on", +747 "or", +748 "order", +749 "outer", +750 "over", +751 "partition", +752 "preceding", +753 "proto", +754 "qualify", +755 "range", +756 "recursive", +757 "respect", +758 "right", +759 "rollup", +760 "rows", +761 "select", +762 "set", +763 "some", +764 "struct", +765 "tablesample", +766 "then", +767 "to", +768 "treat", +769 "true", +770 "unbounded", +771 "union", +772 "unnest", +773 "using", +774 "when", +775 "where", +776 "window", +777 "with", +778 "within", +779 } +780 +781 def timetostr_sql(self, expression: exp.TimeToStr) -> str: +782 if isinstance(expression.this, exp.TsOrDsToDate): +783 this: exp.Expression = expression.this +784 else: +785 this = expression +786 +787 return f"FORMAT_DATE({self.format_time(expression)}, {self.sql(this, 'this')})" +788 +789 def struct_sql(self, expression: exp.Struct) -> str: +790 args = [] +791 for expr in expression.expressions: +792 if isinstance(expr, self.KEY_VALUE_DEFINITIONS): +793 arg = f"{self.sql(expr, 'expression')} AS {expr.this.name}" +794 else: +795 arg = self.sql(expr) +796 +797 args.append(arg) +798 +799 return self.func("STRUCT", *args) 800 -801 args.append(arg) -802 -803 return self.func("STRUCT", *args) -804 -805 def eq_sql(self, expression: exp.EQ) -> str: -806 # Operands of = cannot be NULL in BigQuery -807 if isinstance(expression.left, exp.Null) or isinstance(expression.right, exp.Null): -808 if not isinstance(expression.parent, exp.Update): -809 return "NULL" -810 -811 return self.binary(expression, "=") -812 -813 def attimezone_sql(self, expression: exp.AtTimeZone) -> str: -814 parent = expression.parent -815 -816 # BigQuery allows CAST(.. AS {STRING|TIMESTAMP} [FORMAT <fmt> [AT TIME ZONE <tz>]]). -817 # Only the TIMESTAMP one should use the below conversion, when AT TIME ZONE is included. -818 if not isinstance(parent, exp.Cast) or not parent.to.is_type("text"): -819 return self.func( -820 "TIMESTAMP", self.func("DATETIME", expression.this, expression.args.get("zone")) -821 ) -822 -823 return super().attimezone_sql(expression) -824 -825 def trycast_sql(self, expression: exp.TryCast) -> str: -826 return self.cast_sql(expression, safe_prefix="SAFE_") -827 -828 def cte_sql(self, expression: exp.CTE) -> str: -829 if expression.alias_column_names: -830 self.unsupported("Column names in CTE definition are not supported.") -831 return super().cte_sql(expression) -832 -833 def array_sql(self, expression: exp.Array) -> str: -834 first_arg = seq_get(expression.expressions, 0) -835 if isinstance(first_arg, exp.Subqueryable): -836 return f"ARRAY{self.wrap(self.sql(first_arg))}" -837 -838 return inline_array_sql(self, expression) +801 def eq_sql(self, expression: exp.EQ) -> str: +802 # Operands of = cannot be NULL in BigQuery +803 if isinstance(expression.left, exp.Null) or isinstance(expression.right, exp.Null): +804 if not isinstance(expression.parent, exp.Update): +805 return "NULL" +806 +807 return self.binary(expression, "=") +808 +809 def attimezone_sql(self, expression: exp.AtTimeZone) -> str: +810 parent = expression.parent +811 +812 # BigQuery allows CAST(.. AS {STRING|TIMESTAMP} [FORMAT <fmt> [AT TIME ZONE <tz>]]). +813 # Only the TIMESTAMP one should use the below conversion, when AT TIME ZONE is included. +814 if not isinstance(parent, exp.Cast) or not parent.to.is_type("text"): +815 return self.func( +816 "TIMESTAMP", self.func("DATETIME", expression.this, expression.args.get("zone")) +817 ) +818 +819 return super().attimezone_sql(expression) +820 +821 def trycast_sql(self, expression: exp.TryCast) -> str: +822 return self.cast_sql(expression, safe_prefix="SAFE_") +823 +824 def cte_sql(self, expression: exp.CTE) -> str: +825 if expression.alias_column_names: +826 self.unsupported("Column names in CTE definition are not supported.") +827 return super().cte_sql(expression) +828 +829 def array_sql(self, expression: exp.Array) -> str: +830 first_arg = seq_get(expression.expressions, 0) +831 if isinstance(first_arg, exp.Subqueryable): +832 return f"ARRAY{self.wrap(self.sql(first_arg))}" +833 +834 return inline_array_sql(self, expression) +835 +836 def bracket_sql(self, expression: exp.Bracket) -> str: +837 this = self.sql(expression, "this") +838 expressions = expression.expressions 839 -840 def bracket_sql(self, expression: exp.Bracket) -> str: -841 this = self.sql(expression, "this") -842 expressions = expression.expressions -843 -844 if len(expressions) == 1: -845 arg = expressions[0] -846 if arg.type is None: -847 from sqlglot.optimizer.annotate_types import annotate_types -848 -849 arg = annotate_types(arg) +840 if len(expressions) == 1: +841 arg = expressions[0] +842 if arg.type is None: +843 from sqlglot.optimizer.annotate_types import annotate_types +844 +845 arg = annotate_types(arg) +846 +847 if arg.type and arg.type.this in exp.DataType.TEXT_TYPES: +848 # BQ doesn't support bracket syntax with string values +849 return f"{this}.{arg.name}" 850 -851 if arg.type and arg.type.this in exp.DataType.TEXT_TYPES: -852 # BQ doesn't support bracket syntax with string values -853 return f"{this}.{arg.name}" -854 -855 expressions_sql = ", ".join(self.sql(e) for e in expressions) -856 offset = expression.args.get("offset") -857 -858 if offset == 0: -859 expressions_sql = f"OFFSET({expressions_sql})" -860 elif offset == 1: -861 expressions_sql = f"ORDINAL({expressions_sql})" -862 elif offset is not None: -863 self.unsupported(f"Unsupported array offset: {offset}") -864 -865 if expression.args.get("safe"): -866 expressions_sql = f"SAFE_{expressions_sql}" -867 -868 return f"{this}[{expressions_sql}]" -869 -870 def transaction_sql(self, *_) -> str: -871 return "BEGIN TRANSACTION" -872 -873 def commit_sql(self, *_) -> str: -874 return "COMMIT TRANSACTION" -875 -876 def rollback_sql(self, *_) -> str: -877 return "ROLLBACK TRANSACTION" -878 -879 def in_unnest_op(self, expression: exp.Unnest) -> str: -880 return self.sql(expression) -881 -882 def except_op(self, expression: exp.Except) -> str: -883 if not expression.args.get("distinct", False): -884 self.unsupported("EXCEPT without DISTINCT is not supported in BigQuery") -885 return f"EXCEPT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}" -886 -887 def intersect_op(self, expression: exp.Intersect) -> str: -888 if not expression.args.get("distinct", False): -889 self.unsupported("INTERSECT without DISTINCT is not supported in BigQuery") -890 return f"INTERSECT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}" -891 -892 def with_properties(self, properties: exp.Properties) -> str: -893 return self.properties(properties, prefix=self.seg("OPTIONS")) -894 -895 def version_sql(self, expression: exp.Version) -> str: -896 if expression.name == "TIMESTAMP": -897 expression.set("this", "SYSTEM_TIME") -898 return super().version_sql(expression) +851 expressions_sql = ", ".join(self.sql(e) for e in expressions) +852 offset = expression.args.get("offset") +853 +854 if offset == 0: +855 expressions_sql = f"OFFSET({expressions_sql})" +856 elif offset == 1: +857 expressions_sql = f"ORDINAL({expressions_sql})" +858 elif offset is not None: +859 self.unsupported(f"Unsupported array offset: {offset}") +860 +861 if expression.args.get("safe"): +862 expressions_sql = f"SAFE_{expressions_sql}" +863 +864 return f"{this}[{expressions_sql}]" +865 +866 def transaction_sql(self, *_) -> str: +867 return "BEGIN TRANSACTION" +868 +869 def commit_sql(self, *_) -> str: +870 return "COMMIT TRANSACTION" +871 +872 def rollback_sql(self, *_) -> str: +873 return "ROLLBACK TRANSACTION" +874 +875 def in_unnest_op(self, expression: exp.Unnest) -> str: +876 return self.sql(expression) +877 +878 def except_op(self, expression: exp.Except) -> str: +879 if not expression.args.get("distinct", False): +880 self.unsupported("EXCEPT without DISTINCT is not supported in BigQuery") +881 return f"EXCEPT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}" +882 +883 def intersect_op(self, expression: exp.Intersect) -> str: +884 if not expression.args.get("distinct", False): +885 self.unsupported("INTERSECT without DISTINCT is not supported in BigQuery") +886 return f"INTERSECT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}" +887 +888 def with_properties(self, properties: exp.Properties) -> str: +889 return self.properties(properties, prefix=self.seg("OPTIONS")) +890 +891 def version_sql(self, expression: exp.Version) -> str: +892 if expression.name == "TIMESTAMP": +893 expression.set("this", "SYSTEM_TIME") +894 return super().version_sql(expression)
    @@ -2097,7 +2086,7 @@ If empty, the corresponding trie will be constructed off of
    PSEUDOCOLUMNS: Set[str] = -{'_PARTITIONTIME', '_PARTITIONDATE'} +{'_PARTITIONDATE', '_PARTITIONTIME'}
    @@ -2482,6 +2471,7 @@ that it can analyze queries in the optimizer and successfully capture their sema
    332 "TIMESTAMP": TokenType.TIMESTAMPTZ, 333 } 334 KEYWORDS.pop("DIV") +335 KEYWORDS.pop("VALUES")
    @@ -2577,7 +2567,7 @@ that it can analyze queries in the optimizer and successfully capture their sema
    KEYWORDS = - {'{%': <TokenType.BLOCK_START: 'BLOCK_START'>, '{%+': <TokenType.BLOCK_START: 'BLOCK_START'>, '{%-': <TokenType.BLOCK_START: 'BLOCK_START'>, '%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '+%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '-%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '{{+': <TokenType.BLOCK_START: 'BLOCK_START'>, '{{-': <TokenType.BLOCK_START: 'BLOCK_START'>, '+}}': <TokenType.BLOCK_END: 'BLOCK_END'>, '-}}': <TokenType.BLOCK_END: 'BLOCK_END'>, '/*+': <TokenType.HINT: 'HINT'>, '==': <TokenType.EQ: 'EQ'>, '::': <TokenType.DCOLON: 'DCOLON'>, '||': <TokenType.DPIPE: 'DPIPE'>, '>=': <TokenType.GTE: 'GTE'>, '<=': <TokenType.LTE: 'LTE'>, '<>': <TokenType.NEQ: 'NEQ'>, '!=': <TokenType.NEQ: 'NEQ'>, ':=': <TokenType.COLON_EQ: 'COLON_EQ'>, '<=>': <TokenType.NULLSAFE_EQ: 'NULLSAFE_EQ'>, '->': <TokenType.ARROW: 'ARROW'>, '->>': <TokenType.DARROW: 'DARROW'>, '=>': <TokenType.FARROW: 'FARROW'>, '#>': <TokenType.HASH_ARROW: 'HASH_ARROW'>, '#>>': <TokenType.DHASH_ARROW: 'DHASH_ARROW'>, '<->': <TokenType.LR_ARROW: 'LR_ARROW'>, '&&': <TokenType.DAMP: 'DAMP'>, '??': <TokenType.DQMARK: 'DQMARK'>, 'ALL': <TokenType.ALL: 'ALL'>, 'ALWAYS': <TokenType.ALWAYS: 'ALWAYS'>, 'AND': <TokenType.AND: 'AND'>, 'ANTI': <TokenType.ANTI: 'ANTI'>, 'ANY': <TokenType.ANY: 'ANY'>, 'ASC': <TokenType.ASC: 'ASC'>, 'AS': <TokenType.ALIAS: 'ALIAS'>, 'ASOF': <TokenType.ASOF: 'ASOF'>, 'AUTOINCREMENT': <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, 'AUTO_INCREMENT': <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, 'BEGIN': <TokenType.COMMAND: 'COMMAND'>, 'BETWEEN': <TokenType.BETWEEN: 'BETWEEN'>, 'CACHE': <TokenType.CACHE: 'CACHE'>, 'UNCACHE': <TokenType.UNCACHE: 'UNCACHE'>, 'CASE': <TokenType.CASE: 'CASE'>, 'CHARACTER SET': <TokenType.CHARACTER_SET: 'CHARACTER_SET'>, 'CLUSTER BY': <TokenType.CLUSTER_BY: 'CLUSTER_BY'>, 'COLLATE': <TokenType.COLLATE: 'COLLATE'>, 'COLUMN': <TokenType.COLUMN: 'COLUMN'>, 'COMMIT': <TokenType.COMMIT: 'COMMIT'>, 'CONNECT BY': <TokenType.CONNECT_BY: 'CONNECT_BY'>, 'CONSTRAINT': <TokenType.CONSTRAINT: 'CONSTRAINT'>, 'CREATE': <TokenType.CREATE: 'CREATE'>, 'CROSS': <TokenType.CROSS: 'CROSS'>, 'CUBE': <TokenType.CUBE: 'CUBE'>, 'CURRENT_DATE': <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, 'CURRENT_TIME': <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, 'CURRENT_TIMESTAMP': <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, 'CURRENT_USER': <TokenType.CURRENT_USER: 'CURRENT_USER'>, 'DATABASE': <TokenType.DATABASE: 'DATABASE'>, 'DEFAULT': <TokenType.DEFAULT: 'DEFAULT'>, 'DELETE': <TokenType.DELETE: 'DELETE'>, 'DESC': <TokenType.DESC: 'DESC'>, 'DESCRIBE': <TokenType.DESCRIBE: 'DESCRIBE'>, 'DISTINCT': <TokenType.DISTINCT: 'DISTINCT'>, 'DISTRIBUTE BY': <TokenType.DISTRIBUTE_BY: 'DISTRIBUTE_BY'>, 'DROP': <TokenType.DROP: 'DROP'>, 'ELSE': <TokenType.ELSE: 'ELSE'>, 'END': <TokenType.END: 'END'>, 'ESCAPE': <TokenType.ESCAPE: 'ESCAPE'>, 'EXCEPT': <TokenType.EXCEPT: 'EXCEPT'>, 'EXECUTE': <TokenType.EXECUTE: 'EXECUTE'>, 'EXISTS': <TokenType.EXISTS: 'EXISTS'>, 'FALSE': <TokenType.FALSE: 'FALSE'>, 'FETCH': <TokenType.FETCH: 'FETCH'>, 'FILTER': <TokenType.FILTER: 'FILTER'>, 'FIRST': <TokenType.FIRST: 'FIRST'>, 'FULL': <TokenType.FULL: 'FULL'>, 'FUNCTION': <TokenType.FUNCTION: 'FUNCTION'>, 'FOR': <TokenType.FOR: 'FOR'>, 'FOREIGN KEY': <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, 'FORMAT': <TokenType.FORMAT: 'FORMAT'>, 'FROM': <TokenType.FROM: 'FROM'>, 'GEOGRAPHY': <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, 'GEOMETRY': <TokenType.GEOMETRY: 'GEOMETRY'>, 'GLOB': <TokenType.GLOB: 'GLOB'>, 'GROUP BY': <TokenType.GROUP_BY: 'GROUP_BY'>, 'GROUPING SETS': <TokenType.GROUPING_SETS: 'GROUPING_SETS'>, 'HAVING': <TokenType.HAVING: 'HAVING'>, 'ILIKE': <TokenType.ILIKE: 'ILIKE'>, 'IN': <TokenType.IN: 'IN'>, 'INDEX': <TokenType.INDEX: 'INDEX'>, 'INET': <TokenType.INET: 'INET'>, 'INNER': <TokenType.INNER: 'INNER'>, 'INSERT': <TokenType.INSERT: 'INSERT'>, 'INTERVAL': <TokenType.INTERVAL: 'INTERVAL'>, 'INTERSECT': <TokenType.INTERSECT: 'INTERSECT'>, 'INTO': <TokenType.INTO: 'INTO'>, 'IS': <TokenType.IS: 'IS'>, 'ISNULL': <TokenType.ISNULL: 'ISNULL'>, 'JOIN': <TokenType.JOIN: 'JOIN'>, 'KEEP': <TokenType.KEEP: 'KEEP'>, 'KILL': <TokenType.KILL: 'KILL'>, 'LATERAL': <TokenType.LATERAL: 'LATERAL'>, 'LEFT': <TokenType.LEFT: 'LEFT'>, 'LIKE': <TokenType.LIKE: 'LIKE'>, 'LIMIT': <TokenType.LIMIT: 'LIMIT'>, 'LOAD': <TokenType.LOAD: 'LOAD'>, 'LOCK': <TokenType.LOCK: 'LOCK'>, 'MERGE': <TokenType.MERGE: 'MERGE'>, 'NATURAL': <TokenType.NATURAL: 'NATURAL'>, 'NEXT': <TokenType.NEXT: 'NEXT'>, 'NOT': <TokenType.NOT: 'NOT'>, 'NOTNULL': <TokenType.NOTNULL: 'NOTNULL'>, 'NULL': <TokenType.NULL: 'NULL'>, 'OBJECT': <TokenType.OBJECT: 'OBJECT'>, 'OFFSET': <TokenType.OFFSET: 'OFFSET'>, 'ON': <TokenType.ON: 'ON'>, 'OR': <TokenType.OR: 'OR'>, 'XOR': <TokenType.XOR: 'XOR'>, 'ORDER BY': <TokenType.ORDER_BY: 'ORDER_BY'>, 'ORDINALITY': <TokenType.ORDINALITY: 'ORDINALITY'>, 'OUTER': <TokenType.OUTER: 'OUTER'>, 'OVER': <TokenType.OVER: 'OVER'>, 'OVERLAPS': <TokenType.OVERLAPS: 'OVERLAPS'>, 'OVERWRITE': <TokenType.OVERWRITE: 'OVERWRITE'>, 'PARTITION': <TokenType.PARTITION: 'PARTITION'>, 'PARTITION BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PARTITIONED BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PARTITIONED_BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PERCENT': <TokenType.PERCENT: 'PERCENT'>, 'PIVOT': <TokenType.PIVOT: 'PIVOT'>, 'PRAGMA': <TokenType.PRAGMA: 'PRAGMA'>, 'PRIMARY KEY': <TokenType.PRIMARY_KEY: 'PRIMARY_KEY'>, 'PROCEDURE': <TokenType.PROCEDURE: 'PROCEDURE'>, 'QUALIFY': <TokenType.QUALIFY: 'QUALIFY'>, 'RANGE': <TokenType.RANGE: 'RANGE'>, 'RECURSIVE': <TokenType.RECURSIVE: 'RECURSIVE'>, 'REGEXP': <TokenType.RLIKE: 'RLIKE'>, 'REPLACE': <TokenType.REPLACE: 'REPLACE'>, 'RETURNING': <TokenType.RETURNING: 'RETURNING'>, 'REFERENCES': <TokenType.REFERENCES: 'REFERENCES'>, 'RIGHT': <TokenType.RIGHT: 'RIGHT'>, 'RLIKE': <TokenType.RLIKE: 'RLIKE'>, 'ROLLBACK': <TokenType.ROLLBACK: 'ROLLBACK'>, 'ROLLUP': <TokenType.ROLLUP: 'ROLLUP'>, 'ROW': <TokenType.ROW: 'ROW'>, 'ROWS': <TokenType.ROWS: 'ROWS'>, 'SCHEMA': <TokenType.SCHEMA: 'SCHEMA'>, 'SELECT': <TokenType.SELECT: 'SELECT'>, 'SEMI': <TokenType.SEMI: 'SEMI'>, 'SET': <TokenType.SET: 'SET'>, 'SETTINGS': <TokenType.SETTINGS: 'SETTINGS'>, 'SHOW': <TokenType.SHOW: 'SHOW'>, 'SIMILAR TO': <TokenType.SIMILAR_TO: 'SIMILAR_TO'>, 'SOME': <TokenType.SOME: 'SOME'>, 'SORT BY': <TokenType.SORT_BY: 'SORT_BY'>, 'START WITH': <TokenType.START_WITH: 'START_WITH'>, 'TABLE': <TokenType.TABLE: 'TABLE'>, 'TABLESAMPLE': <TokenType.TABLE_SAMPLE: 'TABLE_SAMPLE'>, 'TEMP': <TokenType.TEMPORARY: 'TEMPORARY'>, 'TEMPORARY': <TokenType.TEMPORARY: 'TEMPORARY'>, 'THEN': <TokenType.THEN: 'THEN'>, 'TRUE': <TokenType.TRUE: 'TRUE'>, 'UNION': <TokenType.UNION: 'UNION'>, 'UNKNOWN': <TokenType.UNKNOWN: 'UNKNOWN'>, 'UNNEST': <TokenType.UNNEST: 'UNNEST'>, 'UNPIVOT': <TokenType.UNPIVOT: 'UNPIVOT'>, 'UPDATE': <TokenType.UPDATE: 'UPDATE'>, 'USE': <TokenType.USE: 'USE'>, 'USING': <TokenType.USING: 'USING'>, 'UUID': <TokenType.UUID: 'UUID'>, 'VALUES': <TokenType.VALUES: 'VALUES'>, 'VIEW': <TokenType.VIEW: 'VIEW'>, 'VOLATILE': <TokenType.VOLATILE: 'VOLATILE'>, 'WHEN': <TokenType.WHEN: 'WHEN'>, 'WHERE': <TokenType.WHERE: 'WHERE'>, 'WINDOW': <TokenType.WINDOW: 'WINDOW'>, 'WITH': <TokenType.WITH: 'WITH'>, 'APPLY': <TokenType.APPLY: 'APPLY'>, 'ARRAY': <TokenType.ARRAY: 'ARRAY'>, 'BIT': <TokenType.BIT: 'BIT'>, 'BOOL': <TokenType.BOOLEAN: 'BOOLEAN'>, 'BOOLEAN': <TokenType.BOOLEAN: 'BOOLEAN'>, 'BYTE': <TokenType.TINYINT: 'TINYINT'>, 'MEDIUMINT': <TokenType.MEDIUMINT: 'MEDIUMINT'>, 'INT1': <TokenType.TINYINT: 'TINYINT'>, 'TINYINT': <TokenType.TINYINT: 'TINYINT'>, 'INT16': <TokenType.SMALLINT: 'SMALLINT'>, 'SHORT': <TokenType.SMALLINT: 'SMALLINT'>, 'SMALLINT': <TokenType.SMALLINT: 'SMALLINT'>, 'INT128': <TokenType.INT128: 'INT128'>, 'HUGEINT': <TokenType.INT128: 'INT128'>, 'INT2': <TokenType.SMALLINT: 'SMALLINT'>, 'INTEGER': <TokenType.INT: 'INT'>, 'INT': <TokenType.INT: 'INT'>, 'INT4': <TokenType.INT: 'INT'>, 'INT32': <TokenType.INT: 'INT'>, 'INT64': <TokenType.BIGINT: 'BIGINT'>, 'LONG': <TokenType.BIGINT: 'BIGINT'>, 'BIGINT': <TokenType.BIGINT: 'BIGINT'>, 'INT8': <TokenType.TINYINT: 'TINYINT'>, 'DEC': <TokenType.DECIMAL: 'DECIMAL'>, 'DECIMAL': <TokenType.DECIMAL: 'DECIMAL'>, 'BIGDECIMAL': <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, 'BIGNUMERIC': <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, 'MAP': <TokenType.MAP: 'MAP'>, 'NULLABLE': <TokenType.NULLABLE: 'NULLABLE'>, 'NUMBER': <TokenType.DECIMAL: 'DECIMAL'>, 'NUMERIC': <TokenType.DECIMAL: 'DECIMAL'>, 'FIXED': <TokenType.DECIMAL: 'DECIMAL'>, 'REAL': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT4': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT8': <TokenType.DOUBLE: 'DOUBLE'>, 'DOUBLE': <TokenType.DOUBLE: 'DOUBLE'>, 'DOUBLE PRECISION': <TokenType.DOUBLE: 'DOUBLE'>, 'JSON': <TokenType.JSON: 'JSON'>, 'CHAR': <TokenType.CHAR: 'CHAR'>, 'CHARACTER': <TokenType.CHAR: 'CHAR'>, 'NCHAR': <TokenType.NCHAR: 'NCHAR'>, 'VARCHAR': <TokenType.VARCHAR: 'VARCHAR'>, 'VARCHAR2': <TokenType.VARCHAR: 'VARCHAR'>, 'NVARCHAR': <TokenType.NVARCHAR: 'NVARCHAR'>, 'NVARCHAR2': <TokenType.NVARCHAR: 'NVARCHAR'>, 'BPCHAR': <TokenType.BPCHAR: 'BPCHAR'>, 'STR': <TokenType.TEXT: 'TEXT'>, 'STRING': <TokenType.TEXT: 'TEXT'>, 'TEXT': <TokenType.TEXT: 'TEXT'>, 'LONGTEXT': <TokenType.LONGTEXT: 'LONGTEXT'>, 'MEDIUMTEXT': <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, 'TINYTEXT': <TokenType.TINYTEXT: 'TINYTEXT'>, 'CLOB': <TokenType.TEXT: 'TEXT'>, 'LONGVARCHAR': <TokenType.TEXT: 'TEXT'>, 'BINARY': <TokenType.BINARY: 'BINARY'>, 'BLOB': <TokenType.VARBINARY: 'VARBINARY'>, 'LONGBLOB': <TokenType.LONGBLOB: 'LONGBLOB'>, 'MEDIUMBLOB': <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, 'TINYBLOB': <TokenType.TINYBLOB: 'TINYBLOB'>, 'BYTEA': <TokenType.VARBINARY: 'VARBINARY'>, 'VARBINARY': <TokenType.VARBINARY: 'VARBINARY'>, 'TIME': <TokenType.TIME: 'TIME'>, 'TIMETZ': <TokenType.TIMETZ: 'TIMETZ'>, 'TIMESTAMP': <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, 'TIMESTAMPTZ': <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, 'TIMESTAMPLTZ': <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, 'DATE': <TokenType.DATE: 'DATE'>, 'DATETIME': <TokenType.DATETIME: 'DATETIME'>, 'INT4RANGE': <TokenType.INT4RANGE: 'INT4RANGE'>, 'INT4MULTIRANGE': <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, 'INT8RANGE': <TokenType.INT8RANGE: 'INT8RANGE'>, 'INT8MULTIRANGE': <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, 'NUMRANGE': <TokenType.NUMRANGE: 'NUMRANGE'>, 'NUMMULTIRANGE': <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, 'TSRANGE': <TokenType.TSRANGE: 'TSRANGE'>, 'TSMULTIRANGE': <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, 'TSTZRANGE': <TokenType.TSTZRANGE: 'TSTZRANGE'>, 'TSTZMULTIRANGE': <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, 'DATERANGE': <TokenType.DATERANGE: 'DATERANGE'>, 'DATEMULTIRANGE': <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, 'UNIQUE': <TokenType.UNIQUE: 'UNIQUE'>, 'STRUCT': <TokenType.STRUCT: 'STRUCT'>, 'VARIANT': <TokenType.VARIANT: 'VARIANT'>, 'ALTER': <TokenType.ALTER: 'ALTER'>, 'ANALYZE': <TokenType.COMMAND: 'COMMAND'>, 'CALL': <TokenType.COMMAND: 'COMMAND'>, 'COMMENT': <TokenType.COMMENT: 'COMMENT'>, 'COPY': <TokenType.COMMAND: 'COMMAND'>, 'EXPLAIN': <TokenType.COMMAND: 'COMMAND'>, 'GRANT': <TokenType.COMMAND: 'COMMAND'>, 'OPTIMIZE': <TokenType.COMMAND: 'COMMAND'>, 'PREPARE': <TokenType.COMMAND: 'COMMAND'>, 'TRUNCATE': <TokenType.COMMAND: 'COMMAND'>, 'VACUUM': <TokenType.COMMAND: 'COMMAND'>, 'USER-DEFINED': <TokenType.USERDEFINED: 'USERDEFINED'>, 'FOR VERSION': <TokenType.VERSION_SNAPSHOT: 'VERSION_SNAPSHOT'>, 'FOR TIMESTAMP': <TokenType.TIMESTAMP_SNAPSHOT: 'TIMESTAMP_SNAPSHOT'>, 'ANY TYPE': <TokenType.VARIANT: 'VARIANT'>, 'BEGIN TRANSACTION': <TokenType.BEGIN: 'BEGIN'>, 'BYTES': <TokenType.BINARY: 'BINARY'>, 'CURRENT_DATETIME': <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, 'DECLARE': <TokenType.COMMAND: 'COMMAND'>, 'FLOAT64': <TokenType.DOUBLE: 'DOUBLE'>, 'FOR SYSTEM_TIME': <TokenType.TIMESTAMP_SNAPSHOT: 'TIMESTAMP_SNAPSHOT'>, 'MODEL': <TokenType.MODEL: 'MODEL'>, 'NOT DETERMINISTIC': <TokenType.VOLATILE: 'VOLATILE'>, 'RECORD': <TokenType.STRUCT: 'STRUCT'>} + {'{%': <TokenType.BLOCK_START: 'BLOCK_START'>, '{%+': <TokenType.BLOCK_START: 'BLOCK_START'>, '{%-': <TokenType.BLOCK_START: 'BLOCK_START'>, '%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '+%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '-%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '{{+': <TokenType.BLOCK_START: 'BLOCK_START'>, '{{-': <TokenType.BLOCK_START: 'BLOCK_START'>, '+}}': <TokenType.BLOCK_END: 'BLOCK_END'>, '-}}': <TokenType.BLOCK_END: 'BLOCK_END'>, '/*+': <TokenType.HINT: 'HINT'>, '==': <TokenType.EQ: 'EQ'>, '::': <TokenType.DCOLON: 'DCOLON'>, '||': <TokenType.DPIPE: 'DPIPE'>, '>=': <TokenType.GTE: 'GTE'>, '<=': <TokenType.LTE: 'LTE'>, '<>': <TokenType.NEQ: 'NEQ'>, '!=': <TokenType.NEQ: 'NEQ'>, ':=': <TokenType.COLON_EQ: 'COLON_EQ'>, '<=>': <TokenType.NULLSAFE_EQ: 'NULLSAFE_EQ'>, '->': <TokenType.ARROW: 'ARROW'>, '->>': <TokenType.DARROW: 'DARROW'>, '=>': <TokenType.FARROW: 'FARROW'>, '#>': <TokenType.HASH_ARROW: 'HASH_ARROW'>, '#>>': <TokenType.DHASH_ARROW: 'DHASH_ARROW'>, '<->': <TokenType.LR_ARROW: 'LR_ARROW'>, '&&': <TokenType.DAMP: 'DAMP'>, '??': <TokenType.DQMARK: 'DQMARK'>, 'ALL': <TokenType.ALL: 'ALL'>, 'ALWAYS': <TokenType.ALWAYS: 'ALWAYS'>, 'AND': <TokenType.AND: 'AND'>, 'ANTI': <TokenType.ANTI: 'ANTI'>, 'ANY': <TokenType.ANY: 'ANY'>, 'ASC': <TokenType.ASC: 'ASC'>, 'AS': <TokenType.ALIAS: 'ALIAS'>, 'ASOF': <TokenType.ASOF: 'ASOF'>, 'AUTOINCREMENT': <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, 'AUTO_INCREMENT': <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, 'BEGIN': <TokenType.COMMAND: 'COMMAND'>, 'BETWEEN': <TokenType.BETWEEN: 'BETWEEN'>, 'CACHE': <TokenType.CACHE: 'CACHE'>, 'UNCACHE': <TokenType.UNCACHE: 'UNCACHE'>, 'CASE': <TokenType.CASE: 'CASE'>, 'CHARACTER SET': <TokenType.CHARACTER_SET: 'CHARACTER_SET'>, 'CLUSTER BY': <TokenType.CLUSTER_BY: 'CLUSTER_BY'>, 'COLLATE': <TokenType.COLLATE: 'COLLATE'>, 'COLUMN': <TokenType.COLUMN: 'COLUMN'>, 'COMMIT': <TokenType.COMMIT: 'COMMIT'>, 'CONNECT BY': <TokenType.CONNECT_BY: 'CONNECT_BY'>, 'CONSTRAINT': <TokenType.CONSTRAINT: 'CONSTRAINT'>, 'CREATE': <TokenType.CREATE: 'CREATE'>, 'CROSS': <TokenType.CROSS: 'CROSS'>, 'CUBE': <TokenType.CUBE: 'CUBE'>, 'CURRENT_DATE': <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, 'CURRENT_TIME': <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, 'CURRENT_TIMESTAMP': <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, 'CURRENT_USER': <TokenType.CURRENT_USER: 'CURRENT_USER'>, 'DATABASE': <TokenType.DATABASE: 'DATABASE'>, 'DEFAULT': <TokenType.DEFAULT: 'DEFAULT'>, 'DELETE': <TokenType.DELETE: 'DELETE'>, 'DESC': <TokenType.DESC: 'DESC'>, 'DESCRIBE': <TokenType.DESCRIBE: 'DESCRIBE'>, 'DISTINCT': <TokenType.DISTINCT: 'DISTINCT'>, 'DISTRIBUTE BY': <TokenType.DISTRIBUTE_BY: 'DISTRIBUTE_BY'>, 'DROP': <TokenType.DROP: 'DROP'>, 'ELSE': <TokenType.ELSE: 'ELSE'>, 'END': <TokenType.END: 'END'>, 'ESCAPE': <TokenType.ESCAPE: 'ESCAPE'>, 'EXCEPT': <TokenType.EXCEPT: 'EXCEPT'>, 'EXECUTE': <TokenType.EXECUTE: 'EXECUTE'>, 'EXISTS': <TokenType.EXISTS: 'EXISTS'>, 'FALSE': <TokenType.FALSE: 'FALSE'>, 'FETCH': <TokenType.FETCH: 'FETCH'>, 'FILTER': <TokenType.FILTER: 'FILTER'>, 'FIRST': <TokenType.FIRST: 'FIRST'>, 'FULL': <TokenType.FULL: 'FULL'>, 'FUNCTION': <TokenType.FUNCTION: 'FUNCTION'>, 'FOR': <TokenType.FOR: 'FOR'>, 'FOREIGN KEY': <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, 'FORMAT': <TokenType.FORMAT: 'FORMAT'>, 'FROM': <TokenType.FROM: 'FROM'>, 'GEOGRAPHY': <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, 'GEOMETRY': <TokenType.GEOMETRY: 'GEOMETRY'>, 'GLOB': <TokenType.GLOB: 'GLOB'>, 'GROUP BY': <TokenType.GROUP_BY: 'GROUP_BY'>, 'GROUPING SETS': <TokenType.GROUPING_SETS: 'GROUPING_SETS'>, 'HAVING': <TokenType.HAVING: 'HAVING'>, 'ILIKE': <TokenType.ILIKE: 'ILIKE'>, 'IN': <TokenType.IN: 'IN'>, 'INDEX': <TokenType.INDEX: 'INDEX'>, 'INET': <TokenType.INET: 'INET'>, 'INNER': <TokenType.INNER: 'INNER'>, 'INSERT': <TokenType.INSERT: 'INSERT'>, 'INTERVAL': <TokenType.INTERVAL: 'INTERVAL'>, 'INTERSECT': <TokenType.INTERSECT: 'INTERSECT'>, 'INTO': <TokenType.INTO: 'INTO'>, 'IS': <TokenType.IS: 'IS'>, 'ISNULL': <TokenType.ISNULL: 'ISNULL'>, 'JOIN': <TokenType.JOIN: 'JOIN'>, 'KEEP': <TokenType.KEEP: 'KEEP'>, 'KILL': <TokenType.KILL: 'KILL'>, 'LATERAL': <TokenType.LATERAL: 'LATERAL'>, 'LEFT': <TokenType.LEFT: 'LEFT'>, 'LIKE': <TokenType.LIKE: 'LIKE'>, 'LIMIT': <TokenType.LIMIT: 'LIMIT'>, 'LOAD': <TokenType.LOAD: 'LOAD'>, 'LOCK': <TokenType.LOCK: 'LOCK'>, 'MERGE': <TokenType.MERGE: 'MERGE'>, 'NATURAL': <TokenType.NATURAL: 'NATURAL'>, 'NEXT': <TokenType.NEXT: 'NEXT'>, 'NOT': <TokenType.NOT: 'NOT'>, 'NOTNULL': <TokenType.NOTNULL: 'NOTNULL'>, 'NULL': <TokenType.NULL: 'NULL'>, 'OBJECT': <TokenType.OBJECT: 'OBJECT'>, 'OFFSET': <TokenType.OFFSET: 'OFFSET'>, 'ON': <TokenType.ON: 'ON'>, 'OR': <TokenType.OR: 'OR'>, 'XOR': <TokenType.XOR: 'XOR'>, 'ORDER BY': <TokenType.ORDER_BY: 'ORDER_BY'>, 'ORDINALITY': <TokenType.ORDINALITY: 'ORDINALITY'>, 'OUTER': <TokenType.OUTER: 'OUTER'>, 'OVER': <TokenType.OVER: 'OVER'>, 'OVERLAPS': <TokenType.OVERLAPS: 'OVERLAPS'>, 'OVERWRITE': <TokenType.OVERWRITE: 'OVERWRITE'>, 'PARTITION': <TokenType.PARTITION: 'PARTITION'>, 'PARTITION BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PARTITIONED BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PARTITIONED_BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PERCENT': <TokenType.PERCENT: 'PERCENT'>, 'PIVOT': <TokenType.PIVOT: 'PIVOT'>, 'PRAGMA': <TokenType.PRAGMA: 'PRAGMA'>, 'PRIMARY KEY': <TokenType.PRIMARY_KEY: 'PRIMARY_KEY'>, 'PROCEDURE': <TokenType.PROCEDURE: 'PROCEDURE'>, 'QUALIFY': <TokenType.QUALIFY: 'QUALIFY'>, 'RANGE': <TokenType.RANGE: 'RANGE'>, 'RECURSIVE': <TokenType.RECURSIVE: 'RECURSIVE'>, 'REGEXP': <TokenType.RLIKE: 'RLIKE'>, 'REPLACE': <TokenType.REPLACE: 'REPLACE'>, 'RETURNING': <TokenType.RETURNING: 'RETURNING'>, 'REFERENCES': <TokenType.REFERENCES: 'REFERENCES'>, 'RIGHT': <TokenType.RIGHT: 'RIGHT'>, 'RLIKE': <TokenType.RLIKE: 'RLIKE'>, 'ROLLBACK': <TokenType.ROLLBACK: 'ROLLBACK'>, 'ROLLUP': <TokenType.ROLLUP: 'ROLLUP'>, 'ROW': <TokenType.ROW: 'ROW'>, 'ROWS': <TokenType.ROWS: 'ROWS'>, 'SCHEMA': <TokenType.SCHEMA: 'SCHEMA'>, 'SELECT': <TokenType.SELECT: 'SELECT'>, 'SEMI': <TokenType.SEMI: 'SEMI'>, 'SET': <TokenType.SET: 'SET'>, 'SETTINGS': <TokenType.SETTINGS: 'SETTINGS'>, 'SHOW': <TokenType.SHOW: 'SHOW'>, 'SIMILAR TO': <TokenType.SIMILAR_TO: 'SIMILAR_TO'>, 'SOME': <TokenType.SOME: 'SOME'>, 'SORT BY': <TokenType.SORT_BY: 'SORT_BY'>, 'START WITH': <TokenType.START_WITH: 'START_WITH'>, 'TABLE': <TokenType.TABLE: 'TABLE'>, 'TABLESAMPLE': <TokenType.TABLE_SAMPLE: 'TABLE_SAMPLE'>, 'TEMP': <TokenType.TEMPORARY: 'TEMPORARY'>, 'TEMPORARY': <TokenType.TEMPORARY: 'TEMPORARY'>, 'THEN': <TokenType.THEN: 'THEN'>, 'TRUE': <TokenType.TRUE: 'TRUE'>, 'UNION': <TokenType.UNION: 'UNION'>, 'UNKNOWN': <TokenType.UNKNOWN: 'UNKNOWN'>, 'UNNEST': <TokenType.UNNEST: 'UNNEST'>, 'UNPIVOT': <TokenType.UNPIVOT: 'UNPIVOT'>, 'UPDATE': <TokenType.UPDATE: 'UPDATE'>, 'USE': <TokenType.USE: 'USE'>, 'USING': <TokenType.USING: 'USING'>, 'UUID': <TokenType.UUID: 'UUID'>, 'VIEW': <TokenType.VIEW: 'VIEW'>, 'VOLATILE': <TokenType.VOLATILE: 'VOLATILE'>, 'WHEN': <TokenType.WHEN: 'WHEN'>, 'WHERE': <TokenType.WHERE: 'WHERE'>, 'WINDOW': <TokenType.WINDOW: 'WINDOW'>, 'WITH': <TokenType.WITH: 'WITH'>, 'APPLY': <TokenType.APPLY: 'APPLY'>, 'ARRAY': <TokenType.ARRAY: 'ARRAY'>, 'BIT': <TokenType.BIT: 'BIT'>, 'BOOL': <TokenType.BOOLEAN: 'BOOLEAN'>, 'BOOLEAN': <TokenType.BOOLEAN: 'BOOLEAN'>, 'BYTE': <TokenType.TINYINT: 'TINYINT'>, 'MEDIUMINT': <TokenType.MEDIUMINT: 'MEDIUMINT'>, 'INT1': <TokenType.TINYINT: 'TINYINT'>, 'TINYINT': <TokenType.TINYINT: 'TINYINT'>, 'INT16': <TokenType.SMALLINT: 'SMALLINT'>, 'SHORT': <TokenType.SMALLINT: 'SMALLINT'>, 'SMALLINT': <TokenType.SMALLINT: 'SMALLINT'>, 'INT128': <TokenType.INT128: 'INT128'>, 'HUGEINT': <TokenType.INT128: 'INT128'>, 'INT2': <TokenType.SMALLINT: 'SMALLINT'>, 'INTEGER': <TokenType.INT: 'INT'>, 'INT': <TokenType.INT: 'INT'>, 'INT4': <TokenType.INT: 'INT'>, 'INT32': <TokenType.INT: 'INT'>, 'INT64': <TokenType.BIGINT: 'BIGINT'>, 'LONG': <TokenType.BIGINT: 'BIGINT'>, 'BIGINT': <TokenType.BIGINT: 'BIGINT'>, 'INT8': <TokenType.TINYINT: 'TINYINT'>, 'DEC': <TokenType.DECIMAL: 'DECIMAL'>, 'DECIMAL': <TokenType.DECIMAL: 'DECIMAL'>, 'BIGDECIMAL': <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, 'BIGNUMERIC': <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, 'MAP': <TokenType.MAP: 'MAP'>, 'NULLABLE': <TokenType.NULLABLE: 'NULLABLE'>, 'NUMBER': <TokenType.DECIMAL: 'DECIMAL'>, 'NUMERIC': <TokenType.DECIMAL: 'DECIMAL'>, 'FIXED': <TokenType.DECIMAL: 'DECIMAL'>, 'REAL': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT4': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT8': <TokenType.DOUBLE: 'DOUBLE'>, 'DOUBLE': <TokenType.DOUBLE: 'DOUBLE'>, 'DOUBLE PRECISION': <TokenType.DOUBLE: 'DOUBLE'>, 'JSON': <TokenType.JSON: 'JSON'>, 'CHAR': <TokenType.CHAR: 'CHAR'>, 'CHARACTER': <TokenType.CHAR: 'CHAR'>, 'NCHAR': <TokenType.NCHAR: 'NCHAR'>, 'VARCHAR': <TokenType.VARCHAR: 'VARCHAR'>, 'VARCHAR2': <TokenType.VARCHAR: 'VARCHAR'>, 'NVARCHAR': <TokenType.NVARCHAR: 'NVARCHAR'>, 'NVARCHAR2': <TokenType.NVARCHAR: 'NVARCHAR'>, 'BPCHAR': <TokenType.BPCHAR: 'BPCHAR'>, 'STR': <TokenType.TEXT: 'TEXT'>, 'STRING': <TokenType.TEXT: 'TEXT'>, 'TEXT': <TokenType.TEXT: 'TEXT'>, 'LONGTEXT': <TokenType.LONGTEXT: 'LONGTEXT'>, 'MEDIUMTEXT': <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, 'TINYTEXT': <TokenType.TINYTEXT: 'TINYTEXT'>, 'CLOB': <TokenType.TEXT: 'TEXT'>, 'LONGVARCHAR': <TokenType.TEXT: 'TEXT'>, 'BINARY': <TokenType.BINARY: 'BINARY'>, 'BLOB': <TokenType.VARBINARY: 'VARBINARY'>, 'LONGBLOB': <TokenType.LONGBLOB: 'LONGBLOB'>, 'MEDIUMBLOB': <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, 'TINYBLOB': <TokenType.TINYBLOB: 'TINYBLOB'>, 'BYTEA': <TokenType.VARBINARY: 'VARBINARY'>, 'VARBINARY': <TokenType.VARBINARY: 'VARBINARY'>, 'TIME': <TokenType.TIME: 'TIME'>, 'TIMETZ': <TokenType.TIMETZ: 'TIMETZ'>, 'TIMESTAMP': <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, 'TIMESTAMPTZ': <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, 'TIMESTAMPLTZ': <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, 'DATE': <TokenType.DATE: 'DATE'>, 'DATETIME': <TokenType.DATETIME: 'DATETIME'>, 'INT4RANGE': <TokenType.INT4RANGE: 'INT4RANGE'>, 'INT4MULTIRANGE': <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, 'INT8RANGE': <TokenType.INT8RANGE: 'INT8RANGE'>, 'INT8MULTIRANGE': <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, 'NUMRANGE': <TokenType.NUMRANGE: 'NUMRANGE'>, 'NUMMULTIRANGE': <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, 'TSRANGE': <TokenType.TSRANGE: 'TSRANGE'>, 'TSMULTIRANGE': <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, 'TSTZRANGE': <TokenType.TSTZRANGE: 'TSTZRANGE'>, 'TSTZMULTIRANGE': <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, 'DATERANGE': <TokenType.DATERANGE: 'DATERANGE'>, 'DATEMULTIRANGE': <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, 'UNIQUE': <TokenType.UNIQUE: 'UNIQUE'>, 'STRUCT': <TokenType.STRUCT: 'STRUCT'>, 'VARIANT': <TokenType.VARIANT: 'VARIANT'>, 'ALTER': <TokenType.ALTER: 'ALTER'>, 'ANALYZE': <TokenType.COMMAND: 'COMMAND'>, 'CALL': <TokenType.COMMAND: 'COMMAND'>, 'COMMENT': <TokenType.COMMENT: 'COMMENT'>, 'COPY': <TokenType.COMMAND: 'COMMAND'>, 'EXPLAIN': <TokenType.COMMAND: 'COMMAND'>, 'GRANT': <TokenType.COMMAND: 'COMMAND'>, 'OPTIMIZE': <TokenType.COMMAND: 'COMMAND'>, 'PREPARE': <TokenType.COMMAND: 'COMMAND'>, 'TRUNCATE': <TokenType.COMMAND: 'COMMAND'>, 'VACUUM': <TokenType.COMMAND: 'COMMAND'>, 'USER-DEFINED': <TokenType.USERDEFINED: 'USERDEFINED'>, 'FOR VERSION': <TokenType.VERSION_SNAPSHOT: 'VERSION_SNAPSHOT'>, 'FOR TIMESTAMP': <TokenType.TIMESTAMP_SNAPSHOT: 'TIMESTAMP_SNAPSHOT'>, 'ANY TYPE': <TokenType.VARIANT: 'VARIANT'>, 'BEGIN TRANSACTION': <TokenType.BEGIN: 'BEGIN'>, 'BYTES': <TokenType.BINARY: 'BINARY'>, 'CURRENT_DATETIME': <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, 'DECLARE': <TokenType.COMMAND: 'COMMAND'>, 'FLOAT64': <TokenType.DOUBLE: 'DOUBLE'>, 'FOR SYSTEM_TIME': <TokenType.TIMESTAMP_SNAPSHOT: 'TIMESTAMP_SNAPSHOT'>, 'MODEL': <TokenType.MODEL: 'MODEL'>, 'NOT DETERMINISTIC': <TokenType.VOLATILE: 'VOLATILE'>, 'RECORD': <TokenType.STRUCT: 'STRUCT'>}
    @@ -2627,210 +2617,205 @@ that it can analyze queries in the optimizer and successfully capture their sema
    -
    336    class Parser(parser.Parser):
    -337        PREFIXED_PIVOT_COLUMNS = True
    -338
    -339        LOG_DEFAULTS_TO_LN = True
    -340
    -341        FUNCTIONS = {
    -342            **parser.Parser.FUNCTIONS,
    -343            "DATE": _parse_date,
    -344            "DATE_ADD": parse_date_delta_with_interval(exp.DateAdd),
    -345            "DATE_SUB": parse_date_delta_with_interval(exp.DateSub),
    -346            "DATE_TRUNC": lambda args: exp.DateTrunc(
    -347                unit=exp.Literal.string(str(seq_get(args, 1))),
    -348                this=seq_get(args, 0),
    -349            ),
    -350            "DATETIME_ADD": parse_date_delta_with_interval(exp.DatetimeAdd),
    -351            "DATETIME_SUB": parse_date_delta_with_interval(exp.DatetimeSub),
    -352            "DIV": binary_from_function(exp.IntDiv),
    -353            "FORMAT_DATE": lambda args: exp.TimeToStr(
    -354                this=exp.TsOrDsToDate(this=seq_get(args, 1)), format=seq_get(args, 0)
    -355            ),
    -356            "GENERATE_ARRAY": exp.GenerateSeries.from_arg_list,
    -357            "JSON_EXTRACT_SCALAR": lambda args: exp.JSONExtractScalar(
    -358                this=seq_get(args, 0), expression=seq_get(args, 1) or exp.Literal.string("$")
    -359            ),
    -360            "MD5": exp.MD5Digest.from_arg_list,
    -361            "TO_HEX": _parse_to_hex,
    -362            "PARSE_DATE": lambda args: format_time_lambda(exp.StrToDate, "bigquery")(
    -363                [seq_get(args, 1), seq_get(args, 0)]
    -364            ),
    -365            "PARSE_TIMESTAMP": _parse_parse_timestamp,
    -366            "REGEXP_CONTAINS": exp.RegexpLike.from_arg_list,
    -367            "REGEXP_EXTRACT": lambda args: exp.RegexpExtract(
    -368                this=seq_get(args, 0),
    -369                expression=seq_get(args, 1),
    -370                position=seq_get(args, 2),
    -371                occurrence=seq_get(args, 3),
    -372                group=exp.Literal.number(1) if re.compile(args[1].name).groups == 1 else None,
    -373            ),
    -374            "SHA256": lambda args: exp.SHA2(this=seq_get(args, 0), length=exp.Literal.number(256)),
    -375            "SHA512": lambda args: exp.SHA2(this=seq_get(args, 0), length=exp.Literal.number(512)),
    -376            "SPLIT": lambda args: exp.Split(
    -377                # https://cloud.google.com/bigquery/docs/reference/standard-sql/string_functions#split
    -378                this=seq_get(args, 0),
    -379                expression=seq_get(args, 1) or exp.Literal.string(","),
    -380            ),
    -381            "TIME": _parse_time,
    -382            "TIME_ADD": parse_date_delta_with_interval(exp.TimeAdd),
    -383            "TIME_SUB": parse_date_delta_with_interval(exp.TimeSub),
    -384            "TIMESTAMP": _parse_timestamp,
    -385            "TIMESTAMP_ADD": parse_date_delta_with_interval(exp.TimestampAdd),
    -386            "TIMESTAMP_SUB": parse_date_delta_with_interval(exp.TimestampSub),
    -387            "TIMESTAMP_MICROS": lambda args: exp.UnixToTime(
    -388                this=seq_get(args, 0), scale=exp.UnixToTime.MICROS
    -389            ),
    -390            "TIMESTAMP_MILLIS": lambda args: exp.UnixToTime(
    -391                this=seq_get(args, 0), scale=exp.UnixToTime.MILLIS
    -392            ),
    -393            "TIMESTAMP_SECONDS": lambda args: exp.UnixToTime(this=seq_get(args, 0)),
    -394            "TO_JSON_STRING": exp.JSONFormat.from_arg_list,
    -395        }
    -396
    -397        FUNCTION_PARSERS = {
    -398            **parser.Parser.FUNCTION_PARSERS,
    -399            "ARRAY": lambda self: self.expression(exp.Array, expressions=[self._parse_statement()]),
    -400        }
    -401        FUNCTION_PARSERS.pop("TRIM")
    -402
    -403        NO_PAREN_FUNCTIONS = {
    -404            **parser.Parser.NO_PAREN_FUNCTIONS,
    -405            TokenType.CURRENT_DATETIME: exp.CurrentDatetime,
    -406        }
    -407
    -408        NESTED_TYPE_TOKENS = {
    -409            *parser.Parser.NESTED_TYPE_TOKENS,
    -410            TokenType.TABLE,
    -411        }
    -412
    -413        ID_VAR_TOKENS = {
    -414            *parser.Parser.ID_VAR_TOKENS,
    -415            TokenType.VALUES,
    -416        }
    -417
    -418        PROPERTY_PARSERS = {
    -419            **parser.Parser.PROPERTY_PARSERS,
    -420            "NOT DETERMINISTIC": lambda self: self.expression(
    -421                exp.StabilityProperty, this=exp.Literal.string("VOLATILE")
    -422            ),
    -423            "OPTIONS": lambda self: self._parse_with_property(),
    -424        }
    -425
    -426        CONSTRAINT_PARSERS = {
    -427            **parser.Parser.CONSTRAINT_PARSERS,
    -428            "OPTIONS": lambda self: exp.Properties(expressions=self._parse_with_property()),
    -429        }
    -430
    -431        RANGE_PARSERS = parser.Parser.RANGE_PARSERS.copy()
    -432        RANGE_PARSERS.pop(TokenType.OVERLAPS, None)
    -433
    -434        NULL_TOKENS = {TokenType.NULL, TokenType.UNKNOWN}
    -435
    -436        STATEMENT_PARSERS = {
    -437            **parser.Parser.STATEMENT_PARSERS,
    -438            TokenType.END: lambda self: self._parse_as_command(self._prev),
    -439            TokenType.FOR: lambda self: self._parse_for_in(),
    -440        }
    -441
    -442        BRACKET_OFFSETS = {
    -443            "OFFSET": (0, False),
    -444            "ORDINAL": (1, False),
    -445            "SAFE_OFFSET": (0, True),
    -446            "SAFE_ORDINAL": (1, True),
    -447        }
    -448
    -449        def _parse_for_in(self) -> exp.ForIn:
    -450            this = self._parse_range()
    -451            self._match_text_seq("DO")
    -452            return self.expression(exp.ForIn, this=this, expression=self._parse_statement())
    -453
    -454        def _parse_table_part(self, schema: bool = False) -> t.Optional[exp.Expression]:
    -455            this = super()._parse_table_part(schema=schema) or self._parse_number()
    -456
    -457            # https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#table_names
    -458            if isinstance(this, exp.Identifier):
    -459                table_name = this.name
    -460                while self._match(TokenType.DASH, advance=False) and self._next:
    -461                    self._advance(2)
    -462                    table_name += f"-{self._prev.text}"
    +            
    337    class Parser(parser.Parser):
    +338        PREFIXED_PIVOT_COLUMNS = True
    +339
    +340        LOG_DEFAULTS_TO_LN = True
    +341
    +342        FUNCTIONS = {
    +343            **parser.Parser.FUNCTIONS,
    +344            "DATE": _parse_date,
    +345            "DATE_ADD": parse_date_delta_with_interval(exp.DateAdd),
    +346            "DATE_SUB": parse_date_delta_with_interval(exp.DateSub),
    +347            "DATE_TRUNC": lambda args: exp.DateTrunc(
    +348                unit=exp.Literal.string(str(seq_get(args, 1))),
    +349                this=seq_get(args, 0),
    +350            ),
    +351            "DATETIME_ADD": parse_date_delta_with_interval(exp.DatetimeAdd),
    +352            "DATETIME_SUB": parse_date_delta_with_interval(exp.DatetimeSub),
    +353            "DIV": binary_from_function(exp.IntDiv),
    +354            "FORMAT_DATE": lambda args: exp.TimeToStr(
    +355                this=exp.TsOrDsToDate(this=seq_get(args, 1)), format=seq_get(args, 0)
    +356            ),
    +357            "GENERATE_ARRAY": exp.GenerateSeries.from_arg_list,
    +358            "JSON_EXTRACT_SCALAR": lambda args: exp.JSONExtractScalar(
    +359                this=seq_get(args, 0), expression=seq_get(args, 1) or exp.Literal.string("$")
    +360            ),
    +361            "MD5": exp.MD5Digest.from_arg_list,
    +362            "TO_HEX": _parse_to_hex,
    +363            "PARSE_DATE": lambda args: format_time_lambda(exp.StrToDate, "bigquery")(
    +364                [seq_get(args, 1), seq_get(args, 0)]
    +365            ),
    +366            "PARSE_TIMESTAMP": _parse_parse_timestamp,
    +367            "REGEXP_CONTAINS": exp.RegexpLike.from_arg_list,
    +368            "REGEXP_EXTRACT": lambda args: exp.RegexpExtract(
    +369                this=seq_get(args, 0),
    +370                expression=seq_get(args, 1),
    +371                position=seq_get(args, 2),
    +372                occurrence=seq_get(args, 3),
    +373                group=exp.Literal.number(1) if re.compile(args[1].name).groups == 1 else None,
    +374            ),
    +375            "SHA256": lambda args: exp.SHA2(this=seq_get(args, 0), length=exp.Literal.number(256)),
    +376            "SHA512": lambda args: exp.SHA2(this=seq_get(args, 0), length=exp.Literal.number(512)),
    +377            "SPLIT": lambda args: exp.Split(
    +378                # https://cloud.google.com/bigquery/docs/reference/standard-sql/string_functions#split
    +379                this=seq_get(args, 0),
    +380                expression=seq_get(args, 1) or exp.Literal.string(","),
    +381            ),
    +382            "TIME": _parse_time,
    +383            "TIME_ADD": parse_date_delta_with_interval(exp.TimeAdd),
    +384            "TIME_SUB": parse_date_delta_with_interval(exp.TimeSub),
    +385            "TIMESTAMP": _parse_timestamp,
    +386            "TIMESTAMP_ADD": parse_date_delta_with_interval(exp.TimestampAdd),
    +387            "TIMESTAMP_SUB": parse_date_delta_with_interval(exp.TimestampSub),
    +388            "TIMESTAMP_MICROS": lambda args: exp.UnixToTime(
    +389                this=seq_get(args, 0), scale=exp.UnixToTime.MICROS
    +390            ),
    +391            "TIMESTAMP_MILLIS": lambda args: exp.UnixToTime(
    +392                this=seq_get(args, 0), scale=exp.UnixToTime.MILLIS
    +393            ),
    +394            "TIMESTAMP_SECONDS": lambda args: exp.UnixToTime(this=seq_get(args, 0)),
    +395            "TO_JSON_STRING": exp.JSONFormat.from_arg_list,
    +396        }
    +397
    +398        FUNCTION_PARSERS = {
    +399            **parser.Parser.FUNCTION_PARSERS,
    +400            "ARRAY": lambda self: self.expression(exp.Array, expressions=[self._parse_statement()]),
    +401        }
    +402        FUNCTION_PARSERS.pop("TRIM")
    +403
    +404        NO_PAREN_FUNCTIONS = {
    +405            **parser.Parser.NO_PAREN_FUNCTIONS,
    +406            TokenType.CURRENT_DATETIME: exp.CurrentDatetime,
    +407        }
    +408
    +409        NESTED_TYPE_TOKENS = {
    +410            *parser.Parser.NESTED_TYPE_TOKENS,
    +411            TokenType.TABLE,
    +412        }
    +413
    +414        PROPERTY_PARSERS = {
    +415            **parser.Parser.PROPERTY_PARSERS,
    +416            "NOT DETERMINISTIC": lambda self: self.expression(
    +417                exp.StabilityProperty, this=exp.Literal.string("VOLATILE")
    +418            ),
    +419            "OPTIONS": lambda self: self._parse_with_property(),
    +420        }
    +421
    +422        CONSTRAINT_PARSERS = {
    +423            **parser.Parser.CONSTRAINT_PARSERS,
    +424            "OPTIONS": lambda self: exp.Properties(expressions=self._parse_with_property()),
    +425        }
    +426
    +427        RANGE_PARSERS = parser.Parser.RANGE_PARSERS.copy()
    +428        RANGE_PARSERS.pop(TokenType.OVERLAPS, None)
    +429
    +430        NULL_TOKENS = {TokenType.NULL, TokenType.UNKNOWN}
    +431
    +432        STATEMENT_PARSERS = {
    +433            **parser.Parser.STATEMENT_PARSERS,
    +434            TokenType.END: lambda self: self._parse_as_command(self._prev),
    +435            TokenType.FOR: lambda self: self._parse_for_in(),
    +436        }
    +437
    +438        BRACKET_OFFSETS = {
    +439            "OFFSET": (0, False),
    +440            "ORDINAL": (1, False),
    +441            "SAFE_OFFSET": (0, True),
    +442            "SAFE_ORDINAL": (1, True),
    +443        }
    +444
    +445        def _parse_for_in(self) -> exp.ForIn:
    +446            this = self._parse_range()
    +447            self._match_text_seq("DO")
    +448            return self.expression(exp.ForIn, this=this, expression=self._parse_statement())
    +449
    +450        def _parse_table_part(self, schema: bool = False) -> t.Optional[exp.Expression]:
    +451            this = super()._parse_table_part(schema=schema) or self._parse_number()
    +452
    +453            # https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#table_names
    +454            if isinstance(this, exp.Identifier):
    +455                table_name = this.name
    +456                while self._match(TokenType.DASH, advance=False) and self._next:
    +457                    self._advance(2)
    +458                    table_name += f"-{self._prev.text}"
    +459
    +460                this = exp.Identifier(this=table_name, quoted=this.args.get("quoted"))
    +461            elif isinstance(this, exp.Literal):
    +462                table_name = this.name
     463
    -464                this = exp.Identifier(this=table_name, quoted=this.args.get("quoted"))
    -465            elif isinstance(this, exp.Literal):
    -466                table_name = this.name
    -467
    -468                if self._is_connected() and self._parse_var(any_token=True):
    -469                    table_name += self._prev.text
    +464                if self._is_connected() and self._parse_var(any_token=True):
    +465                    table_name += self._prev.text
    +466
    +467                this = exp.Identifier(this=table_name, quoted=True)
    +468
    +469            return this
     470
    -471                this = exp.Identifier(this=table_name, quoted=True)
    -472
    -473            return this
    -474
    -475        def _parse_table_parts(
    -476            self, schema: bool = False, is_db_reference: bool = False
    -477        ) -> exp.Table:
    -478            table = super()._parse_table_parts(schema=schema, is_db_reference=is_db_reference)
    -479            if isinstance(table.this, exp.Identifier) and "." in table.name:
    -480                catalog, db, this, *rest = (
    -481                    t.cast(t.Optional[exp.Expression], exp.to_identifier(x))
    -482                    for x in split_num_words(table.name, ".", 3)
    -483                )
    -484
    -485                if rest and this:
    -486                    this = exp.Dot.build(t.cast(t.List[exp.Expression], [this, *rest]))
    +471        def _parse_table_parts(
    +472            self, schema: bool = False, is_db_reference: bool = False
    +473        ) -> exp.Table:
    +474            table = super()._parse_table_parts(schema=schema, is_db_reference=is_db_reference)
    +475            if isinstance(table.this, exp.Identifier) and "." in table.name:
    +476                catalog, db, this, *rest = (
    +477                    t.cast(t.Optional[exp.Expression], exp.to_identifier(x))
    +478                    for x in split_num_words(table.name, ".", 3)
    +479                )
    +480
    +481                if rest and this:
    +482                    this = exp.Dot.build(t.cast(t.List[exp.Expression], [this, *rest]))
    +483
    +484                table = exp.Table(this=this, db=db, catalog=catalog)
    +485
    +486            return table
     487
    -488                table = exp.Table(this=this, db=db, catalog=catalog)
    -489
    -490            return table
    +488        @t.overload
    +489        def _parse_json_object(self, agg: Lit[False]) -> exp.JSONObject:
    +490            ...
     491
     492        @t.overload
    -493        def _parse_json_object(self, agg: Lit[False]) -> exp.JSONObject:
    +493        def _parse_json_object(self, agg: Lit[True]) -> exp.JSONObjectAgg:
     494            ...
     495
    -496        @t.overload
    -497        def _parse_json_object(self, agg: Lit[True]) -> exp.JSONObjectAgg:
    -498            ...
    +496        def _parse_json_object(self, agg=False):
    +497            json_object = super()._parse_json_object()
    +498            array_kv_pair = seq_get(json_object.expressions, 0)
     499
    -500        def _parse_json_object(self, agg=False):
    -501            json_object = super()._parse_json_object()
    -502            array_kv_pair = seq_get(json_object.expressions, 0)
    -503
    -504            # Converts BQ's "signature 2" of JSON_OBJECT into SQLGlot's canonical representation
    -505            # https://cloud.google.com/bigquery/docs/reference/standard-sql/json_functions#json_object_signature2
    -506            if (
    -507                array_kv_pair
    -508                and isinstance(array_kv_pair.this, exp.Array)
    -509                and isinstance(array_kv_pair.expression, exp.Array)
    -510            ):
    -511                keys = array_kv_pair.this.expressions
    -512                values = array_kv_pair.expression.expressions
    -513
    -514                json_object.set(
    -515                    "expressions",
    -516                    [exp.JSONKeyValue(this=k, expression=v) for k, v in zip(keys, values)],
    -517                )
    -518
    -519            return json_object
    -520
    -521        def _parse_bracket(self, this: t.Optional[exp.Expression]) -> t.Optional[exp.Expression]:
    -522            bracket = super()._parse_bracket(this)
    -523
    -524            if this is bracket:
    -525                return bracket
    +500            # Converts BQ's "signature 2" of JSON_OBJECT into SQLGlot's canonical representation
    +501            # https://cloud.google.com/bigquery/docs/reference/standard-sql/json_functions#json_object_signature2
    +502            if (
    +503                array_kv_pair
    +504                and isinstance(array_kv_pair.this, exp.Array)
    +505                and isinstance(array_kv_pair.expression, exp.Array)
    +506            ):
    +507                keys = array_kv_pair.this.expressions
    +508                values = array_kv_pair.expression.expressions
    +509
    +510                json_object.set(
    +511                    "expressions",
    +512                    [exp.JSONKeyValue(this=k, expression=v) for k, v in zip(keys, values)],
    +513                )
    +514
    +515            return json_object
    +516
    +517        def _parse_bracket(self, this: t.Optional[exp.Expression]) -> t.Optional[exp.Expression]:
    +518            bracket = super()._parse_bracket(this)
    +519
    +520            if this is bracket:
    +521                return bracket
    +522
    +523            if isinstance(bracket, exp.Bracket):
    +524                for expression in bracket.expressions:
    +525                    name = expression.name.upper()
     526
    -527            if isinstance(bracket, exp.Bracket):
    -528                for expression in bracket.expressions:
    -529                    name = expression.name.upper()
    -530
    -531                    if name not in self.BRACKET_OFFSETS:
    -532                        break
    -533
    -534                    offset, safe = self.BRACKET_OFFSETS[name]
    -535                    bracket.set("offset", offset)
    -536                    bracket.set("safe", safe)
    -537                    expression.replace(expression.expressions[0])
    -538
    -539            return bracket
    +527                    if name not in self.BRACKET_OFFSETS:
    +528                        break
    +529
    +530                    offset, safe = self.BRACKET_OFFSETS[name]
    +531                    bracket.set("offset", offset)
    +532                    bracket.set("safe", safe)
    +533                    expression.replace(expression.expressions[0])
    +534
    +535            return bracket
     
    @@ -2918,7 +2903,7 @@ Default: 3
    NESTED_TYPE_TOKENS = - {<TokenType.ARRAY: 'ARRAY'>, <TokenType.NESTED: 'NESTED'>, <TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>, <TokenType.STRUCT: 'STRUCT'>, <TokenType.TABLE: 'TABLE'>, <TokenType.MAP: 'MAP'>, <TokenType.NULLABLE: 'NULLABLE'>} + {<TokenType.TABLE: 'TABLE'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>, <TokenType.STRUCT: 'STRUCT'>, <TokenType.MAP: 'MAP'>, <TokenType.NESTED: 'NESTED'>}
    @@ -2926,19 +2911,6 @@ Default: 3 -
    -
    -
    - ID_VAR_TOKENS = - - {<TokenType.UPDATE: 'UPDATE'>, <TokenType.DEFAULT: 'DEFAULT'>, <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>, <TokenType.UNIQUEIDENTIFIER: 'UNIQUEIDENTIFIER'>, <TokenType.PIVOT: 'PIVOT'>, <TokenType.PARTITION: 'PARTITION'>, <TokenType.VALUES: 'VALUES'>, <TokenType.TSTZRANGE: 'TSTZRANGE'>, <TokenType.VIEW: 'VIEW'>, <TokenType.COMMENT: 'COMMENT'>, <TokenType.TEXT: 'TEXT'>, <TokenType.UINT: 'UINT'>, <TokenType.DELETE: 'DELETE'>, <TokenType.TOP: 'TOP'>, <TokenType.LONGBLOB: 'LONGBLOB'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.DATETIME: 'DATETIME'>, <TokenType.BIGINT: 'BIGINT'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, <TokenType.KEEP: 'KEEP'>, <TokenType.SMALLSERIAL: 'SMALLSERIAL'>, <TokenType.NEXT: 'NEXT'>, <TokenType.BPCHAR: 'BPCHAR'>, <TokenType.TABLE: 'TABLE'>, <TokenType.DATETIME64: 'DATETIME64'>, <TokenType.VAR: 'VAR'>, <TokenType.BIGSERIAL: 'BIGSERIAL'>, <TokenType.ANY: 'ANY'>, <TokenType.UMEDIUMINT: 'UMEDIUMINT'>, <TokenType.FILTER: 'FILTER'>, <TokenType.TIMESTAMP_MS: 'TIMESTAMP_MS'>, <TokenType.SERIAL: 'SERIAL'>, <TokenType.END: 'END'>, <TokenType.ALL: 'ALL'>, <TokenType.TIMETZ: 'TIMETZ'>, <TokenType.TINYBLOB: 'TINYBLOB'>, <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, <TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>, <TokenType.SMALLINT: 'SMALLINT'>, <TokenType.TRUE: 'TRUE'>, <TokenType.FUNCTION: 'FUNCTION'>, <TokenType.IPPREFIX: 'IPPREFIX'>, <TokenType.UNKNOWN: 'UNKNOWN'>, <TokenType.ANTI: 'ANTI'>, <TokenType.INTERVAL: 'INTERVAL'>, <TokenType.NESTED: 'NESTED'>, <TokenType.NUMRANGE: 'NUMRANGE'>, <TokenType.JSON: 'JSON'>, <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, <TokenType.JSONB: 'JSONB'>, <TokenType.ROW: 'ROW'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, <TokenType.ENUM: 'ENUM'>, <TokenType.SMALLMONEY: 'SMALLMONEY'>, <TokenType.REFERENCES: 'REFERENCES'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.EXISTS: 'EXISTS'>, <TokenType.SEMI: 'SEMI'>, <TokenType.RANGE: 'RANGE'>, <TokenType.SET: 'SET'>, <TokenType.IS: 'IS'>, <TokenType.HLLSKETCH: 'HLLSKETCH'>, <TokenType.BIT: 'BIT'>, <TokenType.NATURAL: 'NATURAL'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.TSRANGE: 'TSRANGE'>, <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, <TokenType.INT256: 'INT256'>, <TokenType.FALSE: 'FALSE'>, <TokenType.CHAR: 'CHAR'>, <TokenType.ASC: 'ASC'>, <TokenType.VARBINARY: 'VARBINARY'>, <TokenType.DOUBLE: 'DOUBLE'>, <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, <TokenType.ORDINALITY: 'ORDINALITY'>, <TokenType.ESCAPE: 'ESCAPE'>, <TokenType.ISNULL: 'ISNULL'>, <TokenType.SHOW: 'SHOW'>, <TokenType.UNPIVOT: 'UNPIVOT'>, <TokenType.DECIMAL: 'DECIMAL'>, <TokenType.OVERWRITE: 'OVERWRITE'>, <TokenType.PERCENT: 'PERCENT'>, <TokenType.FULL: 'FULL'>, <TokenType.SCHEMA: 'SCHEMA'>, <TokenType.NULL: 'NULL'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, <TokenType.RECURSIVE: 'RECURSIVE'>, <TokenType.MEDIUMINT: 'MEDIUMINT'>, <TokenType.UDECIMAL: 'UDECIMAL'>, <TokenType.COLUMN: 'COLUMN'>, <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, <TokenType.DIV: 'DIV'>, <TokenType.FIXEDSTRING: 'FIXEDSTRING'>, <TokenType.USMALLINT: 'USMALLINT'>, <TokenType.BOOLEAN: 'BOOLEAN'>, <TokenType.CASE: 'CASE'>, <TokenType.ENUM16: 'ENUM16'>, <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>, <TokenType.INT8RANGE: 'INT8RANGE'>, <TokenType.REPLACE: 'REPLACE'>, <TokenType.TINYTEXT: 'TINYTEXT'>, <TokenType.FIRST: 'FIRST'>, <TokenType.OFFSET: 'OFFSET'>, <TokenType.YEAR: 'YEAR'>, <TokenType.UINT256: 'UINT256'>, <TokenType.MODEL: 'MODEL'>, <TokenType.UINT128: 'UINT128'>, <TokenType.OVERLAPS: 'OVERLAPS'>, <TokenType.TIMESTAMP_NS: 'TIMESTAMP_NS'>, <TokenType.RIGHT: 'RIGHT'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.SIMPLEAGGREGATEFUNCTION: 'SIMPLEAGGREGATEFUNCTION'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, <TokenType.LEFT: 'LEFT'>, <TokenType.PRAGMA: 'PRAGMA'>, <TokenType.PROCEDURE: 'PROCEDURE'>, <TokenType.IPADDRESS: 'IPADDRESS'>, <TokenType.NVARCHAR: 'NVARCHAR'>, <TokenType.LONGTEXT: 'LONGTEXT'>, <TokenType.BINARY: 'BINARY'>, <TokenType.DICTIONARY: 'DICTIONARY'>, <TokenType.USE: 'USE'>, <TokenType.TIME: 'TIME'>, <TokenType.COMMIT: 'COMMIT'>, <TokenType.MONEY: 'MONEY'>, <TokenType.LOAD: 'LOAD'>, <TokenType.CACHE: 'CACHE'>, <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, <TokenType.IMAGE: 'IMAGE'>, <TokenType.IPV6: 'IPV6'>, <TokenType.INT: 'INT'>, <TokenType.ROWVERSION: 'ROWVERSION'>, <TokenType.VARIANT: 'VARIANT'>, <TokenType.BEGIN: 'BEGIN'>, <TokenType.UTINYINT: 'UTINYINT'>, <TokenType.VOLATILE: 'VOLATILE'>, <TokenType.SOME: 'SOME'>, <TokenType.SUPER: 'SUPER'>, <TokenType.OPERATOR: 'OPERATOR'>, <TokenType.DATABASE: 'DATABASE'>, <TokenType.KILL: 'KILL'>, <TokenType.DATE: 'DATE'>, <TokenType.DESC: 'DESC'>, <TokenType.HSTORE: 'HSTORE'>, <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, <TokenType.MAP: 'MAP'>, <TokenType.TINYINT: 'TINYINT'>, <TokenType.IPV4: 'IPV4'>, <TokenType.AGGREGATEFUNCTION: 'AGGREGATEFUNCTION'>, <TokenType.INT4RANGE: 'INT4RANGE'>, <TokenType.ENUM8: 'ENUM8'>, <TokenType.UUID: 'UUID'>, <TokenType.FORMAT: 'FORMAT'>, <TokenType.NCHAR: 'NCHAR'>, <TokenType.OBJECT: 'OBJECT'>, <TokenType.DESCRIBE: 'DESCRIBE'>, <TokenType.GEOMETRY: 'GEOMETRY'>, <TokenType.UNIQUE: 'UNIQUE'>, <TokenType.DATERANGE: 'DATERANGE'>, <TokenType.INDEX: 'INDEX'>, <TokenType.REFRESH: 'REFRESH'>, <TokenType.FLOAT: 'FLOAT'>, <TokenType.ROWS: 'ROWS'>, <TokenType.VARCHAR: 'VARCHAR'>, <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, <TokenType.INET: 'INET'>, <TokenType.FINAL: 'FINAL'>, <TokenType.WINDOW: 'WINDOW'>, <TokenType.SETTINGS: 'SETTINGS'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.COLLATE: 'COLLATE'>, <TokenType.TEMPORARY: 'TEMPORARY'>, <TokenType.TIMESTAMP_S: 'TIMESTAMP_S'>, <TokenType.APPLY: 'APPLY'>, <TokenType.UBIGINT: 'UBIGINT'>, <TokenType.USERDEFINED: 'USERDEFINED'>, <TokenType.DATE32: 'DATE32'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, <TokenType.INT128: 'INT128'>, <TokenType.MERGE: 'MERGE'>, <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, <TokenType.XML: 'XML'>, <TokenType.CONSTRAINT: 'CONSTRAINT'>, <TokenType.STRUCT: 'STRUCT'>} - - -
    - - - -
    @@ -3020,7 +2992,7 @@ Default: 3
    TABLE_ALIAS_TOKENS = - {<TokenType.UPDATE: 'UPDATE'>, <TokenType.DEFAULT: 'DEFAULT'>, <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>, <TokenType.UNIQUEIDENTIFIER: 'UNIQUEIDENTIFIER'>, <TokenType.PIVOT: 'PIVOT'>, <TokenType.PARTITION: 'PARTITION'>, <TokenType.TSTZRANGE: 'TSTZRANGE'>, <TokenType.VIEW: 'VIEW'>, <TokenType.COMMENT: 'COMMENT'>, <TokenType.TEXT: 'TEXT'>, <TokenType.UINT: 'UINT'>, <TokenType.DELETE: 'DELETE'>, <TokenType.TOP: 'TOP'>, <TokenType.LONGBLOB: 'LONGBLOB'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.DATETIME: 'DATETIME'>, <TokenType.BIGINT: 'BIGINT'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, <TokenType.KEEP: 'KEEP'>, <TokenType.SMALLSERIAL: 'SMALLSERIAL'>, <TokenType.NEXT: 'NEXT'>, <TokenType.BPCHAR: 'BPCHAR'>, <TokenType.TABLE: 'TABLE'>, <TokenType.DATETIME64: 'DATETIME64'>, <TokenType.VAR: 'VAR'>, <TokenType.BIGSERIAL: 'BIGSERIAL'>, <TokenType.ANY: 'ANY'>, <TokenType.UMEDIUMINT: 'UMEDIUMINT'>, <TokenType.FILTER: 'FILTER'>, <TokenType.TIMESTAMP_MS: 'TIMESTAMP_MS'>, <TokenType.SERIAL: 'SERIAL'>, <TokenType.END: 'END'>, <TokenType.ALL: 'ALL'>, <TokenType.TIMETZ: 'TIMETZ'>, <TokenType.TINYBLOB: 'TINYBLOB'>, <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, <TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>, <TokenType.SMALLINT: 'SMALLINT'>, <TokenType.TRUE: 'TRUE'>, <TokenType.FUNCTION: 'FUNCTION'>, <TokenType.IPPREFIX: 'IPPREFIX'>, <TokenType.UNKNOWN: 'UNKNOWN'>, <TokenType.INTERVAL: 'INTERVAL'>, <TokenType.NESTED: 'NESTED'>, <TokenType.NUMRANGE: 'NUMRANGE'>, <TokenType.JSON: 'JSON'>, <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, <TokenType.ANTI: 'ANTI'>, <TokenType.JSONB: 'JSONB'>, <TokenType.ROW: 'ROW'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, <TokenType.ENUM: 'ENUM'>, <TokenType.SMALLMONEY: 'SMALLMONEY'>, <TokenType.REFERENCES: 'REFERENCES'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.EXISTS: 'EXISTS'>, <TokenType.SEMI: 'SEMI'>, <TokenType.RANGE: 'RANGE'>, <TokenType.SET: 'SET'>, <TokenType.IS: 'IS'>, <TokenType.HLLSKETCH: 'HLLSKETCH'>, <TokenType.BIT: 'BIT'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.TSRANGE: 'TSRANGE'>, <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, <TokenType.INT256: 'INT256'>, <TokenType.FALSE: 'FALSE'>, <TokenType.CHAR: 'CHAR'>, <TokenType.ASC: 'ASC'>, <TokenType.VARBINARY: 'VARBINARY'>, <TokenType.DOUBLE: 'DOUBLE'>, <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, <TokenType.ORDINALITY: 'ORDINALITY'>, <TokenType.ESCAPE: 'ESCAPE'>, <TokenType.ISNULL: 'ISNULL'>, <TokenType.SHOW: 'SHOW'>, <TokenType.UNPIVOT: 'UNPIVOT'>, <TokenType.DECIMAL: 'DECIMAL'>, <TokenType.OVERWRITE: 'OVERWRITE'>, <TokenType.PERCENT: 'PERCENT'>, <TokenType.SCHEMA: 'SCHEMA'>, <TokenType.NULL: 'NULL'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, <TokenType.RECURSIVE: 'RECURSIVE'>, <TokenType.MEDIUMINT: 'MEDIUMINT'>, <TokenType.UDECIMAL: 'UDECIMAL'>, <TokenType.COLUMN: 'COLUMN'>, <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, <TokenType.DIV: 'DIV'>, <TokenType.FIXEDSTRING: 'FIXEDSTRING'>, <TokenType.USMALLINT: 'USMALLINT'>, <TokenType.BOOLEAN: 'BOOLEAN'>, <TokenType.CASE: 'CASE'>, <TokenType.ENUM16: 'ENUM16'>, <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>, <TokenType.INT8RANGE: 'INT8RANGE'>, <TokenType.REPLACE: 'REPLACE'>, <TokenType.TINYTEXT: 'TINYTEXT'>, <TokenType.FIRST: 'FIRST'>, <TokenType.YEAR: 'YEAR'>, <TokenType.UINT256: 'UINT256'>, <TokenType.MODEL: 'MODEL'>, <TokenType.UINT128: 'UINT128'>, <TokenType.OVERLAPS: 'OVERLAPS'>, <TokenType.TIMESTAMP_NS: 'TIMESTAMP_NS'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.SIMPLEAGGREGATEFUNCTION: 'SIMPLEAGGREGATEFUNCTION'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, <TokenType.PRAGMA: 'PRAGMA'>, <TokenType.PROCEDURE: 'PROCEDURE'>, <TokenType.IPADDRESS: 'IPADDRESS'>, <TokenType.NVARCHAR: 'NVARCHAR'>, <TokenType.LONGTEXT: 'LONGTEXT'>, <TokenType.BINARY: 'BINARY'>, <TokenType.DICTIONARY: 'DICTIONARY'>, <TokenType.USE: 'USE'>, <TokenType.TIME: 'TIME'>, <TokenType.COMMIT: 'COMMIT'>, <TokenType.MONEY: 'MONEY'>, <TokenType.LOAD: 'LOAD'>, <TokenType.CACHE: 'CACHE'>, <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, <TokenType.IMAGE: 'IMAGE'>, <TokenType.IPV6: 'IPV6'>, <TokenType.INT: 'INT'>, <TokenType.ROWVERSION: 'ROWVERSION'>, <TokenType.VARIANT: 'VARIANT'>, <TokenType.BEGIN: 'BEGIN'>, <TokenType.UTINYINT: 'UTINYINT'>, <TokenType.VOLATILE: 'VOLATILE'>, <TokenType.SOME: 'SOME'>, <TokenType.SUPER: 'SUPER'>, <TokenType.OPERATOR: 'OPERATOR'>, <TokenType.DATABASE: 'DATABASE'>, <TokenType.KILL: 'KILL'>, <TokenType.DATE: 'DATE'>, <TokenType.DESC: 'DESC'>, <TokenType.HSTORE: 'HSTORE'>, <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, <TokenType.MAP: 'MAP'>, <TokenType.TINYINT: 'TINYINT'>, <TokenType.IPV4: 'IPV4'>, <TokenType.AGGREGATEFUNCTION: 'AGGREGATEFUNCTION'>, <TokenType.INT4RANGE: 'INT4RANGE'>, <TokenType.ENUM8: 'ENUM8'>, <TokenType.UUID: 'UUID'>, <TokenType.FORMAT: 'FORMAT'>, <TokenType.NCHAR: 'NCHAR'>, <TokenType.OBJECT: 'OBJECT'>, <TokenType.DESCRIBE: 'DESCRIBE'>, <TokenType.GEOMETRY: 'GEOMETRY'>, <TokenType.UNIQUE: 'UNIQUE'>, <TokenType.DATERANGE: 'DATERANGE'>, <TokenType.INDEX: 'INDEX'>, <TokenType.REFRESH: 'REFRESH'>, <TokenType.FLOAT: 'FLOAT'>, <TokenType.ROWS: 'ROWS'>, <TokenType.VARCHAR: 'VARCHAR'>, <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, <TokenType.INET: 'INET'>, <TokenType.FINAL: 'FINAL'>, <TokenType.SETTINGS: 'SETTINGS'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.COLLATE: 'COLLATE'>, <TokenType.TEMPORARY: 'TEMPORARY'>, <TokenType.TIMESTAMP_S: 'TIMESTAMP_S'>, <TokenType.UBIGINT: 'UBIGINT'>, <TokenType.USERDEFINED: 'USERDEFINED'>, <TokenType.DATE32: 'DATE32'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, <TokenType.INT128: 'INT128'>, <TokenType.MERGE: 'MERGE'>, <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, <TokenType.XML: 'XML'>, <TokenType.CONSTRAINT: 'CONSTRAINT'>, <TokenType.STRUCT: 'STRUCT'>} + {<TokenType.BIT: 'BIT'>, <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, <TokenType.TEMPORARY: 'TEMPORARY'>, <TokenType.NVARCHAR: 'NVARCHAR'>, <TokenType.OBJECT: 'OBJECT'>, <TokenType.FIRST: 'FIRST'>, <TokenType.RANGE: 'RANGE'>, <TokenType.TIME: 'TIME'>, <TokenType.INT: 'INT'>, <TokenType.UMEDIUMINT: 'UMEDIUMINT'>, <TokenType.UTINYINT: 'UTINYINT'>, <TokenType.IS: 'IS'>, <TokenType.XML: 'XML'>, <TokenType.BIGINT: 'BIGINT'>, <TokenType.VAR: 'VAR'>, <TokenType.DOUBLE: 'DOUBLE'>, <TokenType.ROWS: 'ROWS'>, <TokenType.MERGE: 'MERGE'>, <TokenType.ANY: 'ANY'>, <TokenType.DATERANGE: 'DATERANGE'>, <TokenType.UDECIMAL: 'UDECIMAL'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>, <TokenType.REFERENCES: 'REFERENCES'>, <TokenType.PERCENT: 'PERCENT'>, <TokenType.CACHE: 'CACHE'>, <TokenType.STRUCT: 'STRUCT'>, <TokenType.JSONB: 'JSONB'>, <TokenType.COMMIT: 'COMMIT'>, <TokenType.ENUM: 'ENUM'>, <TokenType.SETTINGS: 'SETTINGS'>, <TokenType.ORDINALITY: 'ORDINALITY'>, <TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>, <TokenType.INDEX: 'INDEX'>, <TokenType.TIMESTAMP_NS: 'TIMESTAMP_NS'>, <TokenType.FINAL: 'FINAL'>, <TokenType.VOLATILE: 'VOLATILE'>, <TokenType.DICTIONARY: 'DICTIONARY'>, <TokenType.USE: 'USE'>, <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, <TokenType.FUNCTION: 'FUNCTION'>, <TokenType.LONGTEXT: 'LONGTEXT'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.FIXEDSTRING: 'FIXEDSTRING'>, <TokenType.IPV6: 'IPV6'>, <TokenType.DATABASE: 'DATABASE'>, <TokenType.SMALLINT: 'SMALLINT'>, <TokenType.ASC: 'ASC'>, <TokenType.RECURSIVE: 'RECURSIVE'>, <TokenType.ISNULL: 'ISNULL'>, <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, <TokenType.BINARY: 'BINARY'>, <TokenType.UNIQUEIDENTIFIER: 'UNIQUEIDENTIFIER'>, <TokenType.HSTORE: 'HSTORE'>, <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, <TokenType.DATE: 'DATE'>, <TokenType.SUPER: 'SUPER'>, <TokenType.TOP: 'TOP'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.VARCHAR: 'VARCHAR'>, <TokenType.FILTER: 'FILTER'>, <TokenType.TEXT: 'TEXT'>, <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, <TokenType.TRUE: 'TRUE'>, <TokenType.ESCAPE: 'ESCAPE'>, <TokenType.SHOW: 'SHOW'>, <TokenType.TSTZRANGE: 'TSTZRANGE'>, <TokenType.SET: 'SET'>, <TokenType.NESTED: 'NESTED'>, <TokenType.INTERVAL: 'INTERVAL'>, <TokenType.BOOLEAN: 'BOOLEAN'>, <TokenType.VARIANT: 'VARIANT'>, <TokenType.IPADDRESS: 'IPADDRESS'>, <TokenType.DESCRIBE: 'DESCRIBE'>, <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>, <TokenType.MONEY: 'MONEY'>, <TokenType.ENUM8: 'ENUM8'>, <TokenType.IPPREFIX: 'IPPREFIX'>, <TokenType.NULL: 'NULL'>, <TokenType.COLLATE: 'COLLATE'>, <TokenType.NEXT: 'NEXT'>, <TokenType.TSRANGE: 'TSRANGE'>, <TokenType.PRAGMA: 'PRAGMA'>, <TokenType.ALL: 'ALL'>, <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, <TokenType.ROW: 'ROW'>, <TokenType.UBIGINT: 'UBIGINT'>, <TokenType.TABLE: 'TABLE'>, <TokenType.SERIAL: 'SERIAL'>, <TokenType.CASE: 'CASE'>, <TokenType.BPCHAR: 'BPCHAR'>, <TokenType.INT8RANGE: 'INT8RANGE'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.FLOAT: 'FLOAT'>, <TokenType.MODEL: 'MODEL'>, <TokenType.YEAR: 'YEAR'>, <TokenType.IPV4: 'IPV4'>, <TokenType.UNKNOWN: 'UNKNOWN'>, <TokenType.SCHEMA: 'SCHEMA'>, <TokenType.INT256: 'INT256'>, <TokenType.UNPIVOT: 'UNPIVOT'>, <TokenType.DECIMAL: 'DECIMAL'>, <TokenType.TINYBLOB: 'TINYBLOB'>, <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, <TokenType.TIMESTAMP_S: 'TIMESTAMP_S'>, <TokenType.INET: 'INET'>, <TokenType.CONSTRAINT: 'CONSTRAINT'>, <TokenType.ROWVERSION: 'ROWVERSION'>, <TokenType.TINYTEXT: 'TINYTEXT'>, <TokenType.MAP: 'MAP'>, <TokenType.FORMAT: 'FORMAT'>, <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, <TokenType.PIVOT: 'PIVOT'>, <TokenType.SIMPLEAGGREGATEFUNCTION: 'SIMPLEAGGREGATEFUNCTION'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.REPLACE: 'REPLACE'>, <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, <TokenType.USERDEFINED: 'USERDEFINED'>, <TokenType.PROCEDURE: 'PROCEDURE'>, <TokenType.PARTITION: 'PARTITION'>, <TokenType.SMALLMONEY: 'SMALLMONEY'>, <TokenType.EXISTS: 'EXISTS'>, <TokenType.UINT: 'UINT'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.UINT256: 'UINT256'>, <TokenType.OPERATOR: 'OPERATOR'>, <TokenType.CHAR: 'CHAR'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, <TokenType.DIV: 'DIV'>, <TokenType.GEOMETRY: 'GEOMETRY'>, <TokenType.COMMENT: 'COMMENT'>, <TokenType.BEGIN: 'BEGIN'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, <TokenType.UINT128: 'UINT128'>, <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, <TokenType.OVERLAPS: 'OVERLAPS'>, <TokenType.DELETE: 'DELETE'>, <TokenType.LOAD: 'LOAD'>, <TokenType.NCHAR: 'NCHAR'>, <TokenType.UUID: 'UUID'>, <TokenType.ENUM16: 'ENUM16'>, <TokenType.TINYINT: 'TINYINT'>, <TokenType.DATE32: 'DATE32'>, <TokenType.NUMRANGE: 'NUMRANGE'>, <TokenType.DATETIME: 'DATETIME'>, <TokenType.VIEW: 'VIEW'>, <TokenType.JSON: 'JSON'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, <TokenType.COLUMN: 'COLUMN'>, <TokenType.IMAGE: 'IMAGE'>, <TokenType.KEEP: 'KEEP'>, <TokenType.SMALLSERIAL: 'SMALLSERIAL'>, <TokenType.END: 'END'>, <TokenType.INT128: 'INT128'>, <TokenType.KILL: 'KILL'>, <TokenType.TIMETZ: 'TIMETZ'>, <TokenType.DATETIME64: 'DATETIME64'>, <TokenType.REFRESH: 'REFRESH'>, <TokenType.VARBINARY: 'VARBINARY'>, <TokenType.USMALLINT: 'USMALLINT'>, <TokenType.AGGREGATEFUNCTION: 'AGGREGATEFUNCTION'>, <TokenType.UNIQUE: 'UNIQUE'>, <TokenType.SEMI: 'SEMI'>, <TokenType.DEFAULT: 'DEFAULT'>, <TokenType.BIGSERIAL: 'BIGSERIAL'>, <TokenType.TIMESTAMP_MS: 'TIMESTAMP_MS'>, <TokenType.MEDIUMINT: 'MEDIUMINT'>, <TokenType.SOME: 'SOME'>, <TokenType.ANTI: 'ANTI'>, <TokenType.FALSE: 'FALSE'>, <TokenType.INT4RANGE: 'INT4RANGE'>, <TokenType.DESC: 'DESC'>, <TokenType.OVERWRITE: 'OVERWRITE'>, <TokenType.LONGBLOB: 'LONGBLOB'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, <TokenType.HLLSKETCH: 'HLLSKETCH'>, <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, <TokenType.UPDATE: 'UPDATE'>}
    @@ -3067,6 +3039,7 @@ Default: 3
    RESERVED_TOKENS
    DB_CREATABLES
    CREATABLES
    +
    ID_VAR_TOKENS
    INTERVAL_VARS
    COMMENT_TABLE_ALIAS_TOKENS
    UPDATE_ALIAS_TOKENS
    @@ -3130,6 +3103,7 @@ Default: 3
    MODIFIERS_ATTACHED_TO_UNION
    UNION_MODIFIERS
    NO_PAREN_IF_COMMANDS
    +
    VALUES_FOLLOWED_BY_PAREN
    error_level
    error_message_context
    max_errors
    @@ -3159,364 +3133,364 @@ Default: 3
    -
    541    class Generator(generator.Generator):
    -542        EXPLICIT_UNION = True
    -543        INTERVAL_ALLOWS_PLURAL_FORM = False
    -544        JOIN_HINTS = False
    -545        QUERY_HINTS = False
    -546        TABLE_HINTS = False
    -547        LIMIT_FETCH = "LIMIT"
    -548        RENAME_TABLE_WITH_DB = False
    -549        NVL2_SUPPORTED = False
    -550        UNNEST_WITH_ORDINALITY = False
    -551        COLLATE_IS_FUNC = True
    -552        LIMIT_ONLY_LITERALS = True
    -553        SUPPORTS_TABLE_ALIAS_COLUMNS = False
    -554        UNPIVOT_ALIASES_ARE_IDENTIFIERS = False
    -555        JSON_KEY_VALUE_PAIR_SEP = ","
    -556        NULL_ORDERING_SUPPORTED = False
    -557        IGNORE_NULLS_IN_FUNC = True
    -558        JSON_PATH_SINGLE_QUOTE_ESCAPE = True
    -559
    -560        TRANSFORMS = {
    -561            **generator.Generator.TRANSFORMS,
    -562            exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"),
    -563            exp.ArgMax: arg_max_or_min_no_count("MAX_BY"),
    -564            exp.ArgMin: arg_max_or_min_no_count("MIN_BY"),
    -565            exp.ArrayContains: _array_contains_sql,
    -566            exp.ArraySize: rename_func("ARRAY_LENGTH"),
    -567            exp.Cast: transforms.preprocess([transforms.remove_precision_parameterized_types]),
    -568            exp.CollateProperty: lambda self, e: (
    -569                f"DEFAULT COLLATE {self.sql(e, 'this')}"
    -570                if e.args.get("default")
    -571                else f"COLLATE {self.sql(e, 'this')}"
    -572            ),
    -573            exp.CountIf: rename_func("COUNTIF"),
    -574            exp.Create: _create_sql,
    -575            exp.CTE: transforms.preprocess([_pushdown_cte_column_names]),
    -576            exp.DateAdd: date_add_interval_sql("DATE", "ADD"),
    -577            exp.DateDiff: lambda self,
    -578            e: f"DATE_DIFF({self.sql(e, 'this')}, {self.sql(e, 'expression')}, {self.sql(e.args.get('unit', 'DAY'))})",
    -579            exp.DateFromParts: rename_func("DATE"),
    -580            exp.DateStrToDate: datestrtodate_sql,
    -581            exp.DateSub: date_add_interval_sql("DATE", "SUB"),
    -582            exp.DatetimeAdd: date_add_interval_sql("DATETIME", "ADD"),
    -583            exp.DatetimeSub: date_add_interval_sql("DATETIME", "SUB"),
    -584            exp.DateTrunc: lambda self, e: self.func("DATE_TRUNC", e.this, e.text("unit")),
    -585            exp.FromTimeZone: lambda self, e: self.func(
    -586                "DATETIME", self.func("TIMESTAMP", e.this, e.args.get("zone")), "'UTC'"
    -587            ),
    -588            exp.GenerateSeries: rename_func("GENERATE_ARRAY"),
    -589            exp.GroupConcat: rename_func("STRING_AGG"),
    -590            exp.Hex: rename_func("TO_HEX"),
    -591            exp.If: if_sql(false_value="NULL"),
    -592            exp.ILike: no_ilike_sql,
    -593            exp.IntDiv: rename_func("DIV"),
    -594            exp.JSONFormat: rename_func("TO_JSON_STRING"),
    -595            exp.Max: max_or_greatest,
    -596            exp.MD5: lambda self, e: self.func("TO_HEX", self.func("MD5", e.this)),
    -597            exp.MD5Digest: rename_func("MD5"),
    -598            exp.Min: min_or_least,
    -599            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
    -600            exp.RegexpExtract: lambda self, e: self.func(
    -601                "REGEXP_EXTRACT",
    -602                e.this,
    -603                e.expression,
    -604                e.args.get("position"),
    -605                e.args.get("occurrence"),
    -606            ),
    -607            exp.RegexpReplace: regexp_replace_sql,
    -608            exp.RegexpLike: rename_func("REGEXP_CONTAINS"),
    -609            exp.ReturnsProperty: _returnsproperty_sql,
    -610            exp.Select: transforms.preprocess(
    -611                [
    -612                    transforms.explode_to_unnest(),
    -613                    _unqualify_unnest,
    -614                    transforms.eliminate_distinct_on,
    -615                    _alias_ordered_group,
    -616                    transforms.eliminate_semi_and_anti_joins,
    -617                ]
    -618            ),
    -619            exp.SHA2: lambda self, e: self.func(
    -620                "SHA256" if e.text("length") == "256" else "SHA512", e.this
    -621            ),
    -622            exp.StabilityProperty: lambda self, e: (
    -623                "DETERMINISTIC" if e.name == "IMMUTABLE" else "NOT DETERMINISTIC"
    -624            ),
    -625            exp.StrToDate: lambda self,
    -626            e: f"PARSE_DATE({self.format_time(e)}, {self.sql(e, 'this')})",
    -627            exp.StrToTime: lambda self, e: self.func(
    -628                "PARSE_TIMESTAMP", self.format_time(e), e.this, e.args.get("zone")
    -629            ),
    -630            exp.TimeAdd: date_add_interval_sql("TIME", "ADD"),
    -631            exp.TimeFromParts: rename_func("TIME"),
    -632            exp.TimeSub: date_add_interval_sql("TIME", "SUB"),
    -633            exp.TimestampAdd: date_add_interval_sql("TIMESTAMP", "ADD"),
    -634            exp.TimestampDiff: rename_func("TIMESTAMP_DIFF"),
    -635            exp.TimestampSub: date_add_interval_sql("TIMESTAMP", "SUB"),
    -636            exp.TimeStrToTime: timestrtotime_sql,
    -637            exp.Trim: lambda self, e: self.func("TRIM", e.this, e.expression),
    -638            exp.TsOrDsAdd: _ts_or_ds_add_sql,
    -639            exp.TsOrDsDiff: _ts_or_ds_diff_sql,
    -640            exp.TsOrDsToTime: rename_func("TIME"),
    -641            exp.Unhex: rename_func("FROM_HEX"),
    -642            exp.UnixDate: rename_func("UNIX_DATE"),
    -643            exp.UnixToTime: _unix_to_time_sql,
    -644            exp.Values: _derived_table_values_to_unnest,
    -645            exp.VariancePop: rename_func("VAR_POP"),
    -646        }
    -647
    -648        SUPPORTED_JSON_PATH_PARTS = {
    -649            exp.JSONPathKey,
    -650            exp.JSONPathRoot,
    -651            exp.JSONPathSubscript,
    -652        }
    -653
    -654        TYPE_MAPPING = {
    -655            **generator.Generator.TYPE_MAPPING,
    -656            exp.DataType.Type.BIGDECIMAL: "BIGNUMERIC",
    -657            exp.DataType.Type.BIGINT: "INT64",
    -658            exp.DataType.Type.BINARY: "BYTES",
    -659            exp.DataType.Type.BOOLEAN: "BOOL",
    -660            exp.DataType.Type.CHAR: "STRING",
    -661            exp.DataType.Type.DECIMAL: "NUMERIC",
    -662            exp.DataType.Type.DOUBLE: "FLOAT64",
    -663            exp.DataType.Type.FLOAT: "FLOAT64",
    -664            exp.DataType.Type.INT: "INT64",
    -665            exp.DataType.Type.NCHAR: "STRING",
    -666            exp.DataType.Type.NVARCHAR: "STRING",
    -667            exp.DataType.Type.SMALLINT: "INT64",
    -668            exp.DataType.Type.TEXT: "STRING",
    -669            exp.DataType.Type.TIMESTAMP: "DATETIME",
    -670            exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP",
    -671            exp.DataType.Type.TIMESTAMPLTZ: "TIMESTAMP",
    -672            exp.DataType.Type.TINYINT: "INT64",
    -673            exp.DataType.Type.VARBINARY: "BYTES",
    -674            exp.DataType.Type.VARCHAR: "STRING",
    -675            exp.DataType.Type.VARIANT: "ANY TYPE",
    -676        }
    -677
    -678        PROPERTIES_LOCATION = {
    -679            **generator.Generator.PROPERTIES_LOCATION,
    -680            exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA,
    -681            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
    -682        }
    -683
    -684        # from: https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#reserved_keywords
    -685        RESERVED_KEYWORDS = {
    -686            *generator.Generator.RESERVED_KEYWORDS,
    -687            "all",
    -688            "and",
    -689            "any",
    -690            "array",
    -691            "as",
    -692            "asc",
    -693            "assert_rows_modified",
    -694            "at",
    -695            "between",
    -696            "by",
    -697            "case",
    -698            "cast",
    -699            "collate",
    -700            "contains",
    -701            "create",
    -702            "cross",
    -703            "cube",
    -704            "current",
    -705            "default",
    -706            "define",
    -707            "desc",
    -708            "distinct",
    -709            "else",
    -710            "end",
    -711            "enum",
    -712            "escape",
    -713            "except",
    -714            "exclude",
    -715            "exists",
    -716            "extract",
    -717            "false",
    -718            "fetch",
    -719            "following",
    -720            "for",
    -721            "from",
    -722            "full",
    -723            "group",
    -724            "grouping",
    -725            "groups",
    -726            "hash",
    -727            "having",
    -728            "if",
    -729            "ignore",
    -730            "in",
    -731            "inner",
    -732            "intersect",
    -733            "interval",
    -734            "into",
    -735            "is",
    -736            "join",
    -737            "lateral",
    -738            "left",
    -739            "like",
    -740            "limit",
    -741            "lookup",
    -742            "merge",
    -743            "natural",
    -744            "new",
    -745            "no",
    -746            "not",
    -747            "null",
    -748            "nulls",
    -749            "of",
    -750            "on",
    -751            "or",
    -752            "order",
    -753            "outer",
    -754            "over",
    -755            "partition",
    -756            "preceding",
    -757            "proto",
    -758            "qualify",
    -759            "range",
    -760            "recursive",
    -761            "respect",
    -762            "right",
    -763            "rollup",
    -764            "rows",
    -765            "select",
    -766            "set",
    -767            "some",
    -768            "struct",
    -769            "tablesample",
    -770            "then",
    -771            "to",
    -772            "treat",
    -773            "true",
    -774            "unbounded",
    -775            "union",
    -776            "unnest",
    -777            "using",
    -778            "when",
    -779            "where",
    -780            "window",
    -781            "with",
    -782            "within",
    -783        }
    -784
    -785        def timetostr_sql(self, expression: exp.TimeToStr) -> str:
    -786            if isinstance(expression.this, exp.TsOrDsToDate):
    -787                this: exp.Expression = expression.this
    -788            else:
    -789                this = expression
    -790
    -791            return f"FORMAT_DATE({self.format_time(expression)}, {self.sql(this, 'this')})"
    -792
    -793        def struct_sql(self, expression: exp.Struct) -> str:
    -794            args = []
    -795            for expr in expression.expressions:
    -796                if isinstance(expr, self.KEY_VALUE_DEFINITIONS):
    -797                    arg = f"{self.sql(expr, 'expression')} AS {expr.this.name}"
    -798                else:
    -799                    arg = self.sql(expr)
    +            
    537    class Generator(generator.Generator):
    +538        EXPLICIT_UNION = True
    +539        INTERVAL_ALLOWS_PLURAL_FORM = False
    +540        JOIN_HINTS = False
    +541        QUERY_HINTS = False
    +542        TABLE_HINTS = False
    +543        LIMIT_FETCH = "LIMIT"
    +544        RENAME_TABLE_WITH_DB = False
    +545        NVL2_SUPPORTED = False
    +546        UNNEST_WITH_ORDINALITY = False
    +547        COLLATE_IS_FUNC = True
    +548        LIMIT_ONLY_LITERALS = True
    +549        SUPPORTS_TABLE_ALIAS_COLUMNS = False
    +550        UNPIVOT_ALIASES_ARE_IDENTIFIERS = False
    +551        JSON_KEY_VALUE_PAIR_SEP = ","
    +552        NULL_ORDERING_SUPPORTED = False
    +553        IGNORE_NULLS_IN_FUNC = True
    +554        JSON_PATH_SINGLE_QUOTE_ESCAPE = True
    +555
    +556        TRANSFORMS = {
    +557            **generator.Generator.TRANSFORMS,
    +558            exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"),
    +559            exp.ArgMax: arg_max_or_min_no_count("MAX_BY"),
    +560            exp.ArgMin: arg_max_or_min_no_count("MIN_BY"),
    +561            exp.ArrayContains: _array_contains_sql,
    +562            exp.ArraySize: rename_func("ARRAY_LENGTH"),
    +563            exp.Cast: transforms.preprocess([transforms.remove_precision_parameterized_types]),
    +564            exp.CollateProperty: lambda self, e: (
    +565                f"DEFAULT COLLATE {self.sql(e, 'this')}"
    +566                if e.args.get("default")
    +567                else f"COLLATE {self.sql(e, 'this')}"
    +568            ),
    +569            exp.CountIf: rename_func("COUNTIF"),
    +570            exp.Create: _create_sql,
    +571            exp.CTE: transforms.preprocess([_pushdown_cte_column_names]),
    +572            exp.DateAdd: date_add_interval_sql("DATE", "ADD"),
    +573            exp.DateDiff: lambda self,
    +574            e: f"DATE_DIFF({self.sql(e, 'this')}, {self.sql(e, 'expression')}, {self.sql(e.args.get('unit', 'DAY'))})",
    +575            exp.DateFromParts: rename_func("DATE"),
    +576            exp.DateStrToDate: datestrtodate_sql,
    +577            exp.DateSub: date_add_interval_sql("DATE", "SUB"),
    +578            exp.DatetimeAdd: date_add_interval_sql("DATETIME", "ADD"),
    +579            exp.DatetimeSub: date_add_interval_sql("DATETIME", "SUB"),
    +580            exp.DateTrunc: lambda self, e: self.func("DATE_TRUNC", e.this, e.text("unit")),
    +581            exp.FromTimeZone: lambda self, e: self.func(
    +582                "DATETIME", self.func("TIMESTAMP", e.this, e.args.get("zone")), "'UTC'"
    +583            ),
    +584            exp.GenerateSeries: rename_func("GENERATE_ARRAY"),
    +585            exp.GroupConcat: rename_func("STRING_AGG"),
    +586            exp.Hex: rename_func("TO_HEX"),
    +587            exp.If: if_sql(false_value="NULL"),
    +588            exp.ILike: no_ilike_sql,
    +589            exp.IntDiv: rename_func("DIV"),
    +590            exp.JSONFormat: rename_func("TO_JSON_STRING"),
    +591            exp.Max: max_or_greatest,
    +592            exp.MD5: lambda self, e: self.func("TO_HEX", self.func("MD5", e.this)),
    +593            exp.MD5Digest: rename_func("MD5"),
    +594            exp.Min: min_or_least,
    +595            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
    +596            exp.RegexpExtract: lambda self, e: self.func(
    +597                "REGEXP_EXTRACT",
    +598                e.this,
    +599                e.expression,
    +600                e.args.get("position"),
    +601                e.args.get("occurrence"),
    +602            ),
    +603            exp.RegexpReplace: regexp_replace_sql,
    +604            exp.RegexpLike: rename_func("REGEXP_CONTAINS"),
    +605            exp.ReturnsProperty: _returnsproperty_sql,
    +606            exp.Select: transforms.preprocess(
    +607                [
    +608                    transforms.explode_to_unnest(),
    +609                    _unqualify_unnest,
    +610                    transforms.eliminate_distinct_on,
    +611                    _alias_ordered_group,
    +612                    transforms.eliminate_semi_and_anti_joins,
    +613                ]
    +614            ),
    +615            exp.SHA2: lambda self, e: self.func(
    +616                "SHA256" if e.text("length") == "256" else "SHA512", e.this
    +617            ),
    +618            exp.StabilityProperty: lambda self, e: (
    +619                "DETERMINISTIC" if e.name == "IMMUTABLE" else "NOT DETERMINISTIC"
    +620            ),
    +621            exp.StrToDate: lambda self,
    +622            e: f"PARSE_DATE({self.format_time(e)}, {self.sql(e, 'this')})",
    +623            exp.StrToTime: lambda self, e: self.func(
    +624                "PARSE_TIMESTAMP", self.format_time(e), e.this, e.args.get("zone")
    +625            ),
    +626            exp.TimeAdd: date_add_interval_sql("TIME", "ADD"),
    +627            exp.TimeFromParts: rename_func("TIME"),
    +628            exp.TimeSub: date_add_interval_sql("TIME", "SUB"),
    +629            exp.TimestampAdd: date_add_interval_sql("TIMESTAMP", "ADD"),
    +630            exp.TimestampDiff: rename_func("TIMESTAMP_DIFF"),
    +631            exp.TimestampSub: date_add_interval_sql("TIMESTAMP", "SUB"),
    +632            exp.TimeStrToTime: timestrtotime_sql,
    +633            exp.Trim: lambda self, e: self.func("TRIM", e.this, e.expression),
    +634            exp.TsOrDsAdd: _ts_or_ds_add_sql,
    +635            exp.TsOrDsDiff: _ts_or_ds_diff_sql,
    +636            exp.TsOrDsToTime: rename_func("TIME"),
    +637            exp.Unhex: rename_func("FROM_HEX"),
    +638            exp.UnixDate: rename_func("UNIX_DATE"),
    +639            exp.UnixToTime: _unix_to_time_sql,
    +640            exp.Values: _derived_table_values_to_unnest,
    +641            exp.VariancePop: rename_func("VAR_POP"),
    +642        }
    +643
    +644        SUPPORTED_JSON_PATH_PARTS = {
    +645            exp.JSONPathKey,
    +646            exp.JSONPathRoot,
    +647            exp.JSONPathSubscript,
    +648        }
    +649
    +650        TYPE_MAPPING = {
    +651            **generator.Generator.TYPE_MAPPING,
    +652            exp.DataType.Type.BIGDECIMAL: "BIGNUMERIC",
    +653            exp.DataType.Type.BIGINT: "INT64",
    +654            exp.DataType.Type.BINARY: "BYTES",
    +655            exp.DataType.Type.BOOLEAN: "BOOL",
    +656            exp.DataType.Type.CHAR: "STRING",
    +657            exp.DataType.Type.DECIMAL: "NUMERIC",
    +658            exp.DataType.Type.DOUBLE: "FLOAT64",
    +659            exp.DataType.Type.FLOAT: "FLOAT64",
    +660            exp.DataType.Type.INT: "INT64",
    +661            exp.DataType.Type.NCHAR: "STRING",
    +662            exp.DataType.Type.NVARCHAR: "STRING",
    +663            exp.DataType.Type.SMALLINT: "INT64",
    +664            exp.DataType.Type.TEXT: "STRING",
    +665            exp.DataType.Type.TIMESTAMP: "DATETIME",
    +666            exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP",
    +667            exp.DataType.Type.TIMESTAMPLTZ: "TIMESTAMP",
    +668            exp.DataType.Type.TINYINT: "INT64",
    +669            exp.DataType.Type.VARBINARY: "BYTES",
    +670            exp.DataType.Type.VARCHAR: "STRING",
    +671            exp.DataType.Type.VARIANT: "ANY TYPE",
    +672        }
    +673
    +674        PROPERTIES_LOCATION = {
    +675            **generator.Generator.PROPERTIES_LOCATION,
    +676            exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA,
    +677            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
    +678        }
    +679
    +680        # from: https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#reserved_keywords
    +681        RESERVED_KEYWORDS = {
    +682            *generator.Generator.RESERVED_KEYWORDS,
    +683            "all",
    +684            "and",
    +685            "any",
    +686            "array",
    +687            "as",
    +688            "asc",
    +689            "assert_rows_modified",
    +690            "at",
    +691            "between",
    +692            "by",
    +693            "case",
    +694            "cast",
    +695            "collate",
    +696            "contains",
    +697            "create",
    +698            "cross",
    +699            "cube",
    +700            "current",
    +701            "default",
    +702            "define",
    +703            "desc",
    +704            "distinct",
    +705            "else",
    +706            "end",
    +707            "enum",
    +708            "escape",
    +709            "except",
    +710            "exclude",
    +711            "exists",
    +712            "extract",
    +713            "false",
    +714            "fetch",
    +715            "following",
    +716            "for",
    +717            "from",
    +718            "full",
    +719            "group",
    +720            "grouping",
    +721            "groups",
    +722            "hash",
    +723            "having",
    +724            "if",
    +725            "ignore",
    +726            "in",
    +727            "inner",
    +728            "intersect",
    +729            "interval",
    +730            "into",
    +731            "is",
    +732            "join",
    +733            "lateral",
    +734            "left",
    +735            "like",
    +736            "limit",
    +737            "lookup",
    +738            "merge",
    +739            "natural",
    +740            "new",
    +741            "no",
    +742            "not",
    +743            "null",
    +744            "nulls",
    +745            "of",
    +746            "on",
    +747            "or",
    +748            "order",
    +749            "outer",
    +750            "over",
    +751            "partition",
    +752            "preceding",
    +753            "proto",
    +754            "qualify",
    +755            "range",
    +756            "recursive",
    +757            "respect",
    +758            "right",
    +759            "rollup",
    +760            "rows",
    +761            "select",
    +762            "set",
    +763            "some",
    +764            "struct",
    +765            "tablesample",
    +766            "then",
    +767            "to",
    +768            "treat",
    +769            "true",
    +770            "unbounded",
    +771            "union",
    +772            "unnest",
    +773            "using",
    +774            "when",
    +775            "where",
    +776            "window",
    +777            "with",
    +778            "within",
    +779        }
    +780
    +781        def timetostr_sql(self, expression: exp.TimeToStr) -> str:
    +782            if isinstance(expression.this, exp.TsOrDsToDate):
    +783                this: exp.Expression = expression.this
    +784            else:
    +785                this = expression
    +786
    +787            return f"FORMAT_DATE({self.format_time(expression)}, {self.sql(this, 'this')})"
    +788
    +789        def struct_sql(self, expression: exp.Struct) -> str:
    +790            args = []
    +791            for expr in expression.expressions:
    +792                if isinstance(expr, self.KEY_VALUE_DEFINITIONS):
    +793                    arg = f"{self.sql(expr, 'expression')} AS {expr.this.name}"
    +794                else:
    +795                    arg = self.sql(expr)
    +796
    +797                args.append(arg)
    +798
    +799            return self.func("STRUCT", *args)
     800
    -801                args.append(arg)
    -802
    -803            return self.func("STRUCT", *args)
    -804
    -805        def eq_sql(self, expression: exp.EQ) -> str:
    -806            # Operands of = cannot be NULL in BigQuery
    -807            if isinstance(expression.left, exp.Null) or isinstance(expression.right, exp.Null):
    -808                if not isinstance(expression.parent, exp.Update):
    -809                    return "NULL"
    -810
    -811            return self.binary(expression, "=")
    -812
    -813        def attimezone_sql(self, expression: exp.AtTimeZone) -> str:
    -814            parent = expression.parent
    -815
    -816            # BigQuery allows CAST(.. AS {STRING|TIMESTAMP} [FORMAT <fmt> [AT TIME ZONE <tz>]]).
    -817            # Only the TIMESTAMP one should use the below conversion, when AT TIME ZONE is included.
    -818            if not isinstance(parent, exp.Cast) or not parent.to.is_type("text"):
    -819                return self.func(
    -820                    "TIMESTAMP", self.func("DATETIME", expression.this, expression.args.get("zone"))
    -821                )
    -822
    -823            return super().attimezone_sql(expression)
    -824
    -825        def trycast_sql(self, expression: exp.TryCast) -> str:
    -826            return self.cast_sql(expression, safe_prefix="SAFE_")
    -827
    -828        def cte_sql(self, expression: exp.CTE) -> str:
    -829            if expression.alias_column_names:
    -830                self.unsupported("Column names in CTE definition are not supported.")
    -831            return super().cte_sql(expression)
    -832
    -833        def array_sql(self, expression: exp.Array) -> str:
    -834            first_arg = seq_get(expression.expressions, 0)
    -835            if isinstance(first_arg, exp.Subqueryable):
    -836                return f"ARRAY{self.wrap(self.sql(first_arg))}"
    -837
    -838            return inline_array_sql(self, expression)
    +801        def eq_sql(self, expression: exp.EQ) -> str:
    +802            # Operands of = cannot be NULL in BigQuery
    +803            if isinstance(expression.left, exp.Null) or isinstance(expression.right, exp.Null):
    +804                if not isinstance(expression.parent, exp.Update):
    +805                    return "NULL"
    +806
    +807            return self.binary(expression, "=")
    +808
    +809        def attimezone_sql(self, expression: exp.AtTimeZone) -> str:
    +810            parent = expression.parent
    +811
    +812            # BigQuery allows CAST(.. AS {STRING|TIMESTAMP} [FORMAT <fmt> [AT TIME ZONE <tz>]]).
    +813            # Only the TIMESTAMP one should use the below conversion, when AT TIME ZONE is included.
    +814            if not isinstance(parent, exp.Cast) or not parent.to.is_type("text"):
    +815                return self.func(
    +816                    "TIMESTAMP", self.func("DATETIME", expression.this, expression.args.get("zone"))
    +817                )
    +818
    +819            return super().attimezone_sql(expression)
    +820
    +821        def trycast_sql(self, expression: exp.TryCast) -> str:
    +822            return self.cast_sql(expression, safe_prefix="SAFE_")
    +823
    +824        def cte_sql(self, expression: exp.CTE) -> str:
    +825            if expression.alias_column_names:
    +826                self.unsupported("Column names in CTE definition are not supported.")
    +827            return super().cte_sql(expression)
    +828
    +829        def array_sql(self, expression: exp.Array) -> str:
    +830            first_arg = seq_get(expression.expressions, 0)
    +831            if isinstance(first_arg, exp.Subqueryable):
    +832                return f"ARRAY{self.wrap(self.sql(first_arg))}"
    +833
    +834            return inline_array_sql(self, expression)
    +835
    +836        def bracket_sql(self, expression: exp.Bracket) -> str:
    +837            this = self.sql(expression, "this")
    +838            expressions = expression.expressions
     839
    -840        def bracket_sql(self, expression: exp.Bracket) -> str:
    -841            this = self.sql(expression, "this")
    -842            expressions = expression.expressions
    -843
    -844            if len(expressions) == 1:
    -845                arg = expressions[0]
    -846                if arg.type is None:
    -847                    from sqlglot.optimizer.annotate_types import annotate_types
    -848
    -849                    arg = annotate_types(arg)
    +840            if len(expressions) == 1:
    +841                arg = expressions[0]
    +842                if arg.type is None:
    +843                    from sqlglot.optimizer.annotate_types import annotate_types
    +844
    +845                    arg = annotate_types(arg)
    +846
    +847                if arg.type and arg.type.this in exp.DataType.TEXT_TYPES:
    +848                    # BQ doesn't support bracket syntax with string values
    +849                    return f"{this}.{arg.name}"
     850
    -851                if arg.type and arg.type.this in exp.DataType.TEXT_TYPES:
    -852                    # BQ doesn't support bracket syntax with string values
    -853                    return f"{this}.{arg.name}"
    -854
    -855            expressions_sql = ", ".join(self.sql(e) for e in expressions)
    -856            offset = expression.args.get("offset")
    -857
    -858            if offset == 0:
    -859                expressions_sql = f"OFFSET({expressions_sql})"
    -860            elif offset == 1:
    -861                expressions_sql = f"ORDINAL({expressions_sql})"
    -862            elif offset is not None:
    -863                self.unsupported(f"Unsupported array offset: {offset}")
    -864
    -865            if expression.args.get("safe"):
    -866                expressions_sql = f"SAFE_{expressions_sql}"
    -867
    -868            return f"{this}[{expressions_sql}]"
    -869
    -870        def transaction_sql(self, *_) -> str:
    -871            return "BEGIN TRANSACTION"
    -872
    -873        def commit_sql(self, *_) -> str:
    -874            return "COMMIT TRANSACTION"
    -875
    -876        def rollback_sql(self, *_) -> str:
    -877            return "ROLLBACK TRANSACTION"
    -878
    -879        def in_unnest_op(self, expression: exp.Unnest) -> str:
    -880            return self.sql(expression)
    -881
    -882        def except_op(self, expression: exp.Except) -> str:
    -883            if not expression.args.get("distinct", False):
    -884                self.unsupported("EXCEPT without DISTINCT is not supported in BigQuery")
    -885            return f"EXCEPT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}"
    -886
    -887        def intersect_op(self, expression: exp.Intersect) -> str:
    -888            if not expression.args.get("distinct", False):
    -889                self.unsupported("INTERSECT without DISTINCT is not supported in BigQuery")
    -890            return f"INTERSECT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}"
    -891
    -892        def with_properties(self, properties: exp.Properties) -> str:
    -893            return self.properties(properties, prefix=self.seg("OPTIONS"))
    -894
    -895        def version_sql(self, expression: exp.Version) -> str:
    -896            if expression.name == "TIMESTAMP":
    -897                expression.set("this", "SYSTEM_TIME")
    -898            return super().version_sql(expression)
    +851            expressions_sql = ", ".join(self.sql(e) for e in expressions)
    +852            offset = expression.args.get("offset")
    +853
    +854            if offset == 0:
    +855                expressions_sql = f"OFFSET({expressions_sql})"
    +856            elif offset == 1:
    +857                expressions_sql = f"ORDINAL({expressions_sql})"
    +858            elif offset is not None:
    +859                self.unsupported(f"Unsupported array offset: {offset}")
    +860
    +861            if expression.args.get("safe"):
    +862                expressions_sql = f"SAFE_{expressions_sql}"
    +863
    +864            return f"{this}[{expressions_sql}]"
    +865
    +866        def transaction_sql(self, *_) -> str:
    +867            return "BEGIN TRANSACTION"
    +868
    +869        def commit_sql(self, *_) -> str:
    +870            return "COMMIT TRANSACTION"
    +871
    +872        def rollback_sql(self, *_) -> str:
    +873            return "ROLLBACK TRANSACTION"
    +874
    +875        def in_unnest_op(self, expression: exp.Unnest) -> str:
    +876            return self.sql(expression)
    +877
    +878        def except_op(self, expression: exp.Except) -> str:
    +879            if not expression.args.get("distinct", False):
    +880                self.unsupported("EXCEPT without DISTINCT is not supported in BigQuery")
    +881            return f"EXCEPT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}"
    +882
    +883        def intersect_op(self, expression: exp.Intersect) -> str:
    +884            if not expression.args.get("distinct", False):
    +885                self.unsupported("INTERSECT without DISTINCT is not supported in BigQuery")
    +886            return f"INTERSECT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}"
    +887
    +888        def with_properties(self, properties: exp.Properties) -> str:
    +889            return self.properties(properties, prefix=self.seg("OPTIONS"))
    +890
    +891        def version_sql(self, expression: exp.Version) -> str:
    +892            if expression.name == "TIMESTAMP":
    +893                expression.set("this", "SYSTEM_TIME")
    +894            return super().version_sql(expression)
     
    @@ -3780,7 +3754,7 @@ Default: True @@ -3819,7 +3793,7 @@ Default: True
    RESERVED_KEYWORDS = - {'create', 'asc', 'join', 'is', 'on', 'into', 'qualify', 'lateral', 'over', 'and', 'ignore', 'contains', 'select', 'partition', 'as', 'new', 'for', 'not', 'preceding', 'exists', 'escape', 'respect', 'of', 'default', 'lookup', 'right', 'unbounded', 'false', 'current', 'extract', 'end', 'treat', 'fetch', 'cross', 'from', 'nulls', 'case', 'any', 'desc', 'tablesample', 'using', 'cube', 'outer', 'inner', 'set', 'hash', 'union', 'distinct', 'except', 'some', 'order', 'rollup', 'no', 'proto', 'full', 'if', 'limit', 'unnest', 'recursive', 'having', 'grouping', 'between', 'null', 'by', 'assert_rows_modified', 'collate', 'interval', 'define', 'in', 'range', 'like', 'rows', 'exclude', 'at', 'window', 'merge', 'array', 'cast', 'left', 'when', 'where', 'all', 'or', 'groups', 'struct', 'true', 'else', 'to', 'within', 'enum', 'following', 'then', 'intersect', 'with', 'group', 'natural'} + {'lookup', 'intersect', 'end', 'on', 'cube', 'full', 'limit', 'preceding', 'in', 'assert_rows_modified', 'extract', 'ignore', 'struct', 'tablesample', 'or', 'false', 'of', 'rows', 'when', 'nulls', 'and', 'then', 'within', 'over', 'window', 'using', 'not', 'no', 'left', 'distinct', 'else', 'into', 'as', 'where', 'merge', 'some', 'qualify', 'create', 'exists', 'proto', 'inner', 'union', 'rollup', 'with', 'if', 'any', 'cross', 'for', 'array', 'having', 'default', 'all', 'like', 'contains', 'groups', 'desc', 'asc', 'following', 'right', 'natural', 'respect', 'at', 'case', 'interval', 'null', 'from', 'recursive', 'select', 'grouping', 'current', 'between', 'partition', 'lateral', 'define', 'join', 'escape', 'fetch', 'to', 'except', 'new', 'true', 'group', 'exclude', 'hash', 'is', 'set', 'enum', 'treat', 'outer', 'unbounded', 'order', 'cast', 'collate', 'unnest', 'range', 'by'}
    @@ -3839,13 +3813,13 @@ Default: True
    -
    785        def timetostr_sql(self, expression: exp.TimeToStr) -> str:
    -786            if isinstance(expression.this, exp.TsOrDsToDate):
    -787                this: exp.Expression = expression.this
    -788            else:
    -789                this = expression
    -790
    -791            return f"FORMAT_DATE({self.format_time(expression)}, {self.sql(this, 'this')})"
    +            
    781        def timetostr_sql(self, expression: exp.TimeToStr) -> str:
    +782            if isinstance(expression.this, exp.TsOrDsToDate):
    +783                this: exp.Expression = expression.this
    +784            else:
    +785                this = expression
    +786
    +787            return f"FORMAT_DATE({self.format_time(expression)}, {self.sql(this, 'this')})"
     
    @@ -3863,17 +3837,17 @@ Default: True
    -
    793        def struct_sql(self, expression: exp.Struct) -> str:
    -794            args = []
    -795            for expr in expression.expressions:
    -796                if isinstance(expr, self.KEY_VALUE_DEFINITIONS):
    -797                    arg = f"{self.sql(expr, 'expression')} AS {expr.this.name}"
    -798                else:
    -799                    arg = self.sql(expr)
    -800
    -801                args.append(arg)
    -802
    -803            return self.func("STRUCT", *args)
    +            
    789        def struct_sql(self, expression: exp.Struct) -> str:
    +790            args = []
    +791            for expr in expression.expressions:
    +792                if isinstance(expr, self.KEY_VALUE_DEFINITIONS):
    +793                    arg = f"{self.sql(expr, 'expression')} AS {expr.this.name}"
    +794                else:
    +795                    arg = self.sql(expr)
    +796
    +797                args.append(arg)
    +798
    +799            return self.func("STRUCT", *args)
     
    @@ -3891,13 +3865,13 @@ Default: True
    -
    805        def eq_sql(self, expression: exp.EQ) -> str:
    -806            # Operands of = cannot be NULL in BigQuery
    -807            if isinstance(expression.left, exp.Null) or isinstance(expression.right, exp.Null):
    -808                if not isinstance(expression.parent, exp.Update):
    -809                    return "NULL"
    -810
    -811            return self.binary(expression, "=")
    +            
    801        def eq_sql(self, expression: exp.EQ) -> str:
    +802            # Operands of = cannot be NULL in BigQuery
    +803            if isinstance(expression.left, exp.Null) or isinstance(expression.right, exp.Null):
    +804                if not isinstance(expression.parent, exp.Update):
    +805                    return "NULL"
    +806
    +807            return self.binary(expression, "=")
     
    @@ -3915,17 +3889,17 @@ Default: True
    -
    813        def attimezone_sql(self, expression: exp.AtTimeZone) -> str:
    -814            parent = expression.parent
    -815
    -816            # BigQuery allows CAST(.. AS {STRING|TIMESTAMP} [FORMAT <fmt> [AT TIME ZONE <tz>]]).
    -817            # Only the TIMESTAMP one should use the below conversion, when AT TIME ZONE is included.
    -818            if not isinstance(parent, exp.Cast) or not parent.to.is_type("text"):
    -819                return self.func(
    -820                    "TIMESTAMP", self.func("DATETIME", expression.this, expression.args.get("zone"))
    -821                )
    -822
    -823            return super().attimezone_sql(expression)
    +            
    809        def attimezone_sql(self, expression: exp.AtTimeZone) -> str:
    +810            parent = expression.parent
    +811
    +812            # BigQuery allows CAST(.. AS {STRING|TIMESTAMP} [FORMAT <fmt> [AT TIME ZONE <tz>]]).
    +813            # Only the TIMESTAMP one should use the below conversion, when AT TIME ZONE is included.
    +814            if not isinstance(parent, exp.Cast) or not parent.to.is_type("text"):
    +815                return self.func(
    +816                    "TIMESTAMP", self.func("DATETIME", expression.this, expression.args.get("zone"))
    +817                )
    +818
    +819            return super().attimezone_sql(expression)
     
    @@ -3943,8 +3917,8 @@ Default: True
    -
    825        def trycast_sql(self, expression: exp.TryCast) -> str:
    -826            return self.cast_sql(expression, safe_prefix="SAFE_")
    +            
    821        def trycast_sql(self, expression: exp.TryCast) -> str:
    +822            return self.cast_sql(expression, safe_prefix="SAFE_")
     
    @@ -3962,10 +3936,10 @@ Default: True
    -
    828        def cte_sql(self, expression: exp.CTE) -> str:
    -829            if expression.alias_column_names:
    -830                self.unsupported("Column names in CTE definition are not supported.")
    -831            return super().cte_sql(expression)
    +            
    824        def cte_sql(self, expression: exp.CTE) -> str:
    +825            if expression.alias_column_names:
    +826                self.unsupported("Column names in CTE definition are not supported.")
    +827            return super().cte_sql(expression)
     
    @@ -3983,12 +3957,12 @@ Default: True
    -
    833        def array_sql(self, expression: exp.Array) -> str:
    -834            first_arg = seq_get(expression.expressions, 0)
    -835            if isinstance(first_arg, exp.Subqueryable):
    -836                return f"ARRAY{self.wrap(self.sql(first_arg))}"
    -837
    -838            return inline_array_sql(self, expression)
    +            
    829        def array_sql(self, expression: exp.Array) -> str:
    +830            first_arg = seq_get(expression.expressions, 0)
    +831            if isinstance(first_arg, exp.Subqueryable):
    +832                return f"ARRAY{self.wrap(self.sql(first_arg))}"
    +833
    +834            return inline_array_sql(self, expression)
     
    @@ -4006,35 +3980,35 @@ Default: True
    -
    840        def bracket_sql(self, expression: exp.Bracket) -> str:
    -841            this = self.sql(expression, "this")
    -842            expressions = expression.expressions
    -843
    -844            if len(expressions) == 1:
    -845                arg = expressions[0]
    -846                if arg.type is None:
    -847                    from sqlglot.optimizer.annotate_types import annotate_types
    -848
    -849                    arg = annotate_types(arg)
    +            
    836        def bracket_sql(self, expression: exp.Bracket) -> str:
    +837            this = self.sql(expression, "this")
    +838            expressions = expression.expressions
    +839
    +840            if len(expressions) == 1:
    +841                arg = expressions[0]
    +842                if arg.type is None:
    +843                    from sqlglot.optimizer.annotate_types import annotate_types
    +844
    +845                    arg = annotate_types(arg)
    +846
    +847                if arg.type and arg.type.this in exp.DataType.TEXT_TYPES:
    +848                    # BQ doesn't support bracket syntax with string values
    +849                    return f"{this}.{arg.name}"
     850
    -851                if arg.type and arg.type.this in exp.DataType.TEXT_TYPES:
    -852                    # BQ doesn't support bracket syntax with string values
    -853                    return f"{this}.{arg.name}"
    -854
    -855            expressions_sql = ", ".join(self.sql(e) for e in expressions)
    -856            offset = expression.args.get("offset")
    -857
    -858            if offset == 0:
    -859                expressions_sql = f"OFFSET({expressions_sql})"
    -860            elif offset == 1:
    -861                expressions_sql = f"ORDINAL({expressions_sql})"
    -862            elif offset is not None:
    -863                self.unsupported(f"Unsupported array offset: {offset}")
    -864
    -865            if expression.args.get("safe"):
    -866                expressions_sql = f"SAFE_{expressions_sql}"
    -867
    -868            return f"{this}[{expressions_sql}]"
    +851            expressions_sql = ", ".join(self.sql(e) for e in expressions)
    +852            offset = expression.args.get("offset")
    +853
    +854            if offset == 0:
    +855                expressions_sql = f"OFFSET({expressions_sql})"
    +856            elif offset == 1:
    +857                expressions_sql = f"ORDINAL({expressions_sql})"
    +858            elif offset is not None:
    +859                self.unsupported(f"Unsupported array offset: {offset}")
    +860
    +861            if expression.args.get("safe"):
    +862                expressions_sql = f"SAFE_{expressions_sql}"
    +863
    +864            return f"{this}[{expressions_sql}]"
     
    @@ -4052,8 +4026,8 @@ Default: True
    -
    870        def transaction_sql(self, *_) -> str:
    -871            return "BEGIN TRANSACTION"
    +            
    866        def transaction_sql(self, *_) -> str:
    +867            return "BEGIN TRANSACTION"
     
    @@ -4071,8 +4045,8 @@ Default: True
    -
    873        def commit_sql(self, *_) -> str:
    -874            return "COMMIT TRANSACTION"
    +            
    869        def commit_sql(self, *_) -> str:
    +870            return "COMMIT TRANSACTION"
     
    @@ -4090,8 +4064,8 @@ Default: True
    -
    876        def rollback_sql(self, *_) -> str:
    -877            return "ROLLBACK TRANSACTION"
    +            
    872        def rollback_sql(self, *_) -> str:
    +873            return "ROLLBACK TRANSACTION"
     
    @@ -4109,8 +4083,8 @@ Default: True
    -
    879        def in_unnest_op(self, expression: exp.Unnest) -> str:
    -880            return self.sql(expression)
    +            
    875        def in_unnest_op(self, expression: exp.Unnest) -> str:
    +876            return self.sql(expression)
     
    @@ -4128,10 +4102,10 @@ Default: True
    -
    882        def except_op(self, expression: exp.Except) -> str:
    -883            if not expression.args.get("distinct", False):
    -884                self.unsupported("EXCEPT without DISTINCT is not supported in BigQuery")
    -885            return f"EXCEPT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}"
    +            
    878        def except_op(self, expression: exp.Except) -> str:
    +879            if not expression.args.get("distinct", False):
    +880                self.unsupported("EXCEPT without DISTINCT is not supported in BigQuery")
    +881            return f"EXCEPT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}"
     
    @@ -4149,10 +4123,10 @@ Default: True
    -
    887        def intersect_op(self, expression: exp.Intersect) -> str:
    -888            if not expression.args.get("distinct", False):
    -889                self.unsupported("INTERSECT without DISTINCT is not supported in BigQuery")
    -890            return f"INTERSECT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}"
    +            
    883        def intersect_op(self, expression: exp.Intersect) -> str:
    +884            if not expression.args.get("distinct", False):
    +885                self.unsupported("INTERSECT without DISTINCT is not supported in BigQuery")
    +886            return f"INTERSECT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}"
     
    @@ -4170,8 +4144,8 @@ Default: True
    -
    892        def with_properties(self, properties: exp.Properties) -> str:
    -893            return self.properties(properties, prefix=self.seg("OPTIONS"))
    +            
    888        def with_properties(self, properties: exp.Properties) -> str:
    +889            return self.properties(properties, prefix=self.seg("OPTIONS"))
     
    @@ -4189,10 +4163,10 @@ Default: True
    -
    895        def version_sql(self, expression: exp.Version) -> str:
    -896            if expression.name == "TIMESTAMP":
    -897                expression.set("this", "SYSTEM_TIME")
    -898            return super().version_sql(expression)
    +            
    891        def version_sql(self, expression: exp.Version) -> str:
    +892            if expression.name == "TIMESTAMP":
    +893                expression.set("this", "SYSTEM_TIME")
    +894            return super().version_sql(expression)
     
    diff --git a/docs/sqlglot/dialects/clickhouse.html b/docs/sqlglot/dialects/clickhouse.html index b099c0d..8ecf137 100644 --- a/docs/sqlglot/dialects/clickhouse.html +++ b/docs/sqlglot/dialects/clickhouse.html @@ -2762,7 +2762,7 @@ Default: 3
    AGG_FUNCTIONS = - {'groupArraySample', 'cramersVBiasCorrected', 'argMax', 'quantileBFloat16', 'quantileTimingWeighted', 'stochasticLogisticRegression', 'stddevPop', 'maxIntersections', 'sumWithOverflow', 'sumMap', 'max', 'sumCount', 'minMap', 'quantiles', 'meanZTest', 'uniqCombined', 'quantileExact', 'avg', 'quantilesExactLow', 'quantileTiming', 'quantileGK', 'count', 'covarPop', 'groupBitmap', 'first_value', 'quantileExactWeighted', 'intervalLengthSum', 'uniq', 'quantilesExactWeighted', 'quantilesTDigestWeighted', 'topKWeighted', 'quantilesTDigest', 'groupBitXor', 'quantilesGK', 'skewPop', 'mannWhitneyUTest', 'quantileTDigestWeighted', 'quantilesDeterministic', 'median', 'largestTriangleThreeBuckets', 'rankCorr', 'maxIntersectionsPosition', 'varSamp', 'last_value', 'welchTTest', 'cramersV', 'quantileDeterministic', 'uniqCombined64', 'uniqTheta', 'groupArrayInsertAt', 'uniqExact', 'covarSamp', 'quantilesInterpolatedWeighted', 'groupBitmapOr', 'groupBitAnd', 'sumKahan', 'quantilesTimingWeighted', 'simpleLinearRegression', 'groupUniqArray', 'kolmogorovSmirnovTest', 'min', 'argMin', 'corr', 'any', 'quantileInterpolatedWeighted', 'groupArray', 'groupArrayMovingSum', 'groupArrayLast', 'quantileBFloat16Weighted', 'skewSamp', 'stddevSamp', 'kurtSamp', 'uniqHLL12', 'boundingRatio', 'quantileTDigest', 'theilsU', 'sum', 'quantile', 'quantilesBFloat16Weighted', 'quantilesExact', 'stochasticLinearRegression', 'groupBitOr', 'varPop', 'topK', 'kurtPop', 'anyLast', 'groupArrayMovingAvg', 'quantilesBFloat16', 'entropy', 'quantileExactHigh', 'sparkBar', 'studentTTest', 'deltaSumTimestamp', 'quantilesExactHigh', 'quantilesTiming', 'groupBitmapXor', 'exponentialMovingAverage', 'groupBitmapAnd', 'contingency', 'categoricalInformationValue', 'maxMap', 'quantileExactLow', 'avgWeighted', 'anyHeavy', 'deltaSum'} + {'uniqCombined', 'contingency', 'stddevPop', 'uniqTheta', 'quantileExactWeighted', 'simpleLinearRegression', 'quantilesExact', 'theilsU', 'quantile', 'quantilesDeterministic', 'groupBitmapXor', 'quantileExactLow', 'maxMap', 'deltaSum', 'groupBitmapOr', 'stochasticLogisticRegression', 'intervalLengthSum', 'groupArraySample', 'covarSamp', 'groupBitXor', 'quantilesInterpolatedWeighted', 'first_value', 'quantilesGK', 'deltaSumTimestamp', 'maxIntersectionsPosition', 'groupArrayMovingSum', 'varSamp', 'groupBitOr', 'argMin', 'groupBitmapAnd', 'rankCorr', 'welchTTest', 'skewPop', 'avg', 'uniqHLL12', 'quantileGK', 'mannWhitneyUTest', 'largestTriangleThreeBuckets', 'quantileBFloat16', 'quantileTDigest', 'minMap', 'quantilesTDigestWeighted', 'uniqExact', 'quantilesTimingWeighted', 'topK', 'quantilesExactHigh', 'topKWeighted', 'categoricalInformationValue', 'entropy', 'uniqCombined64', 'quantileExactHigh', 'kolmogorovSmirnovTest', 'min', 'groupArray', 'quantilesBFloat16', 'boundingRatio', 'corr', 'cramersV', 'cramersVBiasCorrected', 'quantilesTDigest', 'stochasticLinearRegression', 'groupUniqArray', 'sparkBar', 'groupArrayLast', 'groupArrayMovingAvg', 'meanZTest', 'sumKahan', 'kurtSamp', 'uniq', 'groupBitmap', 'sumMap', 'covarPop', 'quantiles', 'quantileTDigestWeighted', 'argMax', 'studentTTest', 'kurtPop', 'stddevSamp', 'skewSamp', 'last_value', 'quantileBFloat16Weighted', 'sumCount', 'quantilesTiming', 'sum', 'median', 'any', 'quantileDeterministic', 'groupArrayInsertAt', 'maxIntersections', 'quantileExact', 'quantileTiming', 'count', 'quantileTimingWeighted', 'anyHeavy', 'varPop', 'quantileInterpolatedWeighted', 'max', 'avgWeighted', 'groupBitAnd', 'quantilesExactWeighted', 'sumWithOverflow', 'anyLast', 'quantilesBFloat16Weighted', 'quantilesExactLow', 'exponentialMovingAverage'}
    @@ -2788,7 +2788,7 @@ Default: 3
    AGG_FUNC_MAPPING = - {'groupArraySampleIf': ('groupArraySample', 'If'), 'cramersVBiasCorrectedIf': ('cramersVBiasCorrected', 'If'), 'argMaxIf': ('argMax', 'If'), 'quantileBFloat16If': ('quantileBFloat16', 'If'), 'quantileTimingWeightedIf': ('quantileTimingWeighted', 'If'), 'stochasticLogisticRegressionIf': ('stochasticLogisticRegression', 'If'), 'stddevPopIf': ('stddevPop', 'If'), 'maxIntersectionsIf': ('maxIntersections', 'If'), 'sumWithOverflowIf': ('sumWithOverflow', 'If'), 'sumMapIf': ('sumMap', 'If'), 'maxIf': ('max', 'If'), 'sumCountIf': ('sumCount', 'If'), 'minMapIf': ('minMap', 'If'), 'quantilesIf': ('quantiles', 'If'), 'meanZTestIf': ('meanZTest', 'If'), 'uniqCombinedIf': ('uniqCombined', 'If'), 'quantileExactIf': ('quantileExact', 'If'), 'avgIf': ('avg', 'If'), 'quantilesExactLowIf': ('quantilesExactLow', 'If'), 'quantileTimingIf': ('quantileTiming', 'If'), 'quantileGKIf': ('quantileGK', 'If'), 'countIf': ('count', 'If'), 'covarPopIf': ('covarPop', 'If'), 'groupBitmapIf': ('groupBitmap', 'If'), 'first_valueIf': ('first_value', 'If'), 'quantileExactWeightedIf': ('quantileExactWeighted', 'If'), 'intervalLengthSumIf': ('intervalLengthSum', 'If'), 'uniqIf': ('uniq', 'If'), 'quantilesExactWeightedIf': ('quantilesExactWeighted', 'If'), 'quantilesTDigestWeightedIf': ('quantilesTDigestWeighted', 'If'), 'topKWeightedIf': ('topKWeighted', 'If'), 'quantilesTDigestIf': ('quantilesTDigest', 'If'), 'groupBitXorIf': ('groupBitXor', 'If'), 'quantilesGKIf': ('quantilesGK', 'If'), 'skewPopIf': ('skewPop', 'If'), 'mannWhitneyUTestIf': ('mannWhitneyUTest', 'If'), 'quantileTDigestWeightedIf': ('quantileTDigestWeighted', 'If'), 'quantilesDeterministicIf': ('quantilesDeterministic', 'If'), 'medianIf': ('median', 'If'), 'largestTriangleThreeBucketsIf': ('largestTriangleThreeBuckets', 'If'), 'rankCorrIf': ('rankCorr', 'If'), 'maxIntersectionsPositionIf': ('maxIntersectionsPosition', 'If'), 'varSampIf': ('varSamp', 'If'), 'last_valueIf': ('last_value', 'If'), 'welchTTestIf': ('welchTTest', 'If'), 'cramersVIf': ('cramersV', 'If'), 'quantileDeterministicIf': ('quantileDeterministic', 'If'), 'uniqCombined64If': ('uniqCombined64', 'If'), 'uniqThetaIf': ('uniqTheta', 'If'), 'groupArrayInsertAtIf': ('groupArrayInsertAt', 'If'), 'uniqExactIf': ('uniqExact', 'If'), 'covarSampIf': ('covarSamp', 'If'), 'quantilesInterpolatedWeightedIf': ('quantilesInterpolatedWeighted', 'If'), 'groupBitmapOrIf': ('groupBitmapOr', 'If'), 'groupBitAndIf': ('groupBitAnd', 'If'), 'sumKahanIf': ('sumKahan', 'If'), 'quantilesTimingWeightedIf': ('quantilesTimingWeighted', 'If'), 'simpleLinearRegressionIf': ('simpleLinearRegression', 'If'), 'groupUniqArrayIf': ('groupUniqArray', 'If'), 'kolmogorovSmirnovTestIf': ('kolmogorovSmirnovTest', 'If'), 'minIf': ('min', 'If'), 'argMinIf': ('argMin', 'If'), 'corrIf': ('corr', 'If'), 'anyIf': ('any', 'If'), 'quantileInterpolatedWeightedIf': ('quantileInterpolatedWeighted', 'If'), 'groupArrayIf': ('groupArray', 'If'), 'groupArrayMovingSumIf': ('groupArrayMovingSum', 'If'), 'groupArrayLastIf': ('groupArrayLast', 'If'), 'quantileBFloat16WeightedIf': ('quantileBFloat16Weighted', 'If'), 'skewSampIf': ('skewSamp', 'If'), 'stddevSampIf': ('stddevSamp', 'If'), 'kurtSampIf': ('kurtSamp', 'If'), 'uniqHLL12If': ('uniqHLL12', 'If'), 'boundingRatioIf': ('boundingRatio', 'If'), 'quantileTDigestIf': ('quantileTDigest', 'If'), 'theilsUIf': ('theilsU', 'If'), 'sumIf': ('sum', 'If'), 'quantileIf': ('quantile', 'If'), 'quantilesBFloat16WeightedIf': ('quantilesBFloat16Weighted', 'If'), 'quantilesExactIf': ('quantilesExact', 'If'), 'stochasticLinearRegressionIf': ('stochasticLinearRegression', 'If'), 'groupBitOrIf': ('groupBitOr', 'If'), 'varPopIf': ('varPop', 'If'), 'topKIf': ('topK', 'If'), 'kurtPopIf': ('kurtPop', 'If'), 'anyLastIf': ('anyLast', 'If'), 'groupArrayMovingAvgIf': ('groupArrayMovingAvg', 'If'), 'quantilesBFloat16If': ('quantilesBFloat16', 'If'), 'entropyIf': ('entropy', 'If'), 'quantileExactHighIf': ('quantileExactHigh', 'If'), 'sparkBarIf': ('sparkBar', 'If'), 'studentTTestIf': ('studentTTest', 'If'), 'deltaSumTimestampIf': ('deltaSumTimestamp', 'If'), 'quantilesExactHighIf': ('quantilesExactHigh', 'If'), 'quantilesTimingIf': ('quantilesTiming', 'If'), 'groupBitmapXorIf': ('groupBitmapXor', 'If'), 'exponentialMovingAverageIf': ('exponentialMovingAverage', 'If'), 'groupBitmapAndIf': ('groupBitmapAnd', 'If'), 'contingencyIf': ('contingency', 'If'), 'categoricalInformationValueIf': ('categoricalInformationValue', 'If'), 'maxMapIf': ('maxMap', 'If'), 'quantileExactLowIf': ('quantileExactLow', 'If'), 'avgWeightedIf': ('avgWeighted', 'If'), 'anyHeavyIf': ('anyHeavy', 'If'), 'deltaSumIf': ('deltaSum', 'If'), 'groupArraySampleArray': ('groupArraySample', 'Array'), 'cramersVBiasCorrectedArray': ('cramersVBiasCorrected', 'Array'), 'argMaxArray': ('argMax', 'Array'), 'quantileBFloat16Array': ('quantileBFloat16', 'Array'), 'quantileTimingWeightedArray': ('quantileTimingWeighted', 'Array'), 'stochasticLogisticRegressionArray': ('stochasticLogisticRegression', 'Array'), 'stddevPopArray': ('stddevPop', 'Array'), 'maxIntersectionsArray': ('maxIntersections', 'Array'), 'sumWithOverflowArray': ('sumWithOverflow', 'Array'), 'sumMapArray': ('sumMap', 'Array'), 'maxArray': ('max', 'Array'), 'sumCountArray': ('sumCount', 'Array'), 'minMapArray': ('minMap', 'Array'), 'quantilesArray': ('quantiles', 'Array'), 'meanZTestArray': ('meanZTest', 'Array'), 'uniqCombinedArray': ('uniqCombined', 'Array'), 'quantileExactArray': ('quantileExact', 'Array'), 'avgArray': ('avg', 'Array'), 'quantilesExactLowArray': ('quantilesExactLow', 'Array'), 'quantileTimingArray': ('quantileTiming', 'Array'), 'quantileGKArray': ('quantileGK', 'Array'), 'countArray': ('count', 'Array'), 'covarPopArray': ('covarPop', 'Array'), 'groupBitmapArray': ('groupBitmap', 'Array'), 'first_valueArray': ('first_value', 'Array'), 'quantileExactWeightedArray': ('quantileExactWeighted', 'Array'), 'intervalLengthSumArray': ('intervalLengthSum', 'Array'), 'uniqArray': ('uniq', 'Array'), 'quantilesExactWeightedArray': ('quantilesExactWeighted', 'Array'), 'quantilesTDigestWeightedArray': ('quantilesTDigestWeighted', 'Array'), 'topKWeightedArray': ('topKWeighted', 'Array'), 'quantilesTDigestArray': ('quantilesTDigest', 'Array'), 'groupBitXorArray': ('groupBitXor', 'Array'), 'quantilesGKArray': ('quantilesGK', 'Array'), 'skewPopArray': ('skewPop', 'Array'), 'mannWhitneyUTestArray': ('mannWhitneyUTest', 'Array'), 'quantileTDigestWeightedArray': ('quantileTDigestWeighted', 'Array'), 'quantilesDeterministicArray': ('quantilesDeterministic', 'Array'), 'medianArray': ('median', 'Array'), 'largestTriangleThreeBucketsArray': ('largestTriangleThreeBuckets', 'Array'), 'rankCorrArray': ('rankCorr', 'Array'), 'maxIntersectionsPositionArray': ('maxIntersectionsPosition', 'Array'), 'varSampArray': ('varSamp', 'Array'), 'last_valueArray': ('last_value', 'Array'), 'welchTTestArray': ('welchTTest', 'Array'), 'cramersVArray': ('cramersV', 'Array'), 'quantileDeterministicArray': ('quantileDeterministic', 'Array'), 'uniqCombined64Array': ('uniqCombined64', 'Array'), 'uniqThetaArray': ('uniqTheta', 'Array'), 'groupArrayInsertAtArray': ('groupArrayInsertAt', 'Array'), 'uniqExactArray': ('uniqExact', 'Array'), 'covarSampArray': ('covarSamp', 'Array'), 'quantilesInterpolatedWeightedArray': ('quantilesInterpolatedWeighted', 'Array'), 'groupBitmapOrArray': ('groupBitmapOr', 'Array'), 'groupBitAndArray': ('groupBitAnd', 'Array'), 'sumKahanArray': ('sumKahan', 'Array'), 'quantilesTimingWeightedArray': ('quantilesTimingWeighted', 'Array'), 'simpleLinearRegressionArray': ('simpleLinearRegression', 'Array'), 'groupUniqArrayArray': ('groupUniqArray', 'Array'), 'kolmogorovSmirnovTestArray': ('kolmogorovSmirnovTest', 'Array'), 'minArray': ('min', 'Array'), 'argMinArray': ('argMin', 'Array'), 'corrArray': ('corr', 'Array'), 'anyArray': ('any', 'Array'), 'quantileInterpolatedWeightedArray': ('quantileInterpolatedWeighted', 'Array'), 'groupArrayArray': ('groupArray', 'Array'), 'groupArrayMovingSumArray': ('groupArrayMovingSum', 'Array'), 'groupArrayLastArray': ('groupArrayLast', 'Array'), 'quantileBFloat16WeightedArray': ('quantileBFloat16Weighted', 'Array'), 'skewSampArray': ('skewSamp', 'Array'), 'stddevSampArray': ('stddevSamp', 'Array'), 'kurtSampArray': ('kurtSamp', 'Array'), 'uniqHLL12Array': ('uniqHLL12', 'Array'), 'boundingRatioArray': ('boundingRatio', 'Array'), 'quantileTDigestArray': ('quantileTDigest', 'Array'), 'theilsUArray': ('theilsU', 'Array'), 'sumArray': ('sum', 'Array'), 'quantileArray': ('quantile', 'Array'), 'quantilesBFloat16WeightedArray': ('quantilesBFloat16Weighted', 'Array'), 'quantilesExactArray': ('quantilesExact', 'Array'), 'stochasticLinearRegressionArray': ('stochasticLinearRegression', 'Array'), 'groupBitOrArray': ('groupBitOr', 'Array'), 'varPopArray': ('varPop', 'Array'), 'topKArray': ('topK', 'Array'), 'kurtPopArray': ('kurtPop', 'Array'), 'anyLastArray': ('anyLast', 'Array'), 'groupArrayMovingAvgArray': ('groupArrayMovingAvg', 'Array'), 'quantilesBFloat16Array': ('quantilesBFloat16', 'Array'), 'entropyArray': ('entropy', 'Array'), 'quantileExactHighArray': ('quantileExactHigh', 'Array'), 'sparkBarArray': ('sparkBar', 'Array'), 'studentTTestArray': ('studentTTest', 'Array'), 'deltaSumTimestampArray': ('deltaSumTimestamp', 'Array'), 'quantilesExactHighArray': ('quantilesExactHigh', 'Array'), 'quantilesTimingArray': ('quantilesTiming', 'Array'), 'groupBitmapXorArray': ('groupBitmapXor', 'Array'), 'exponentialMovingAverageArray': ('exponentialMovingAverage', 'Array'), 'groupBitmapAndArray': ('groupBitmapAnd', 'Array'), 'contingencyArray': ('contingency', 'Array'), 'categoricalInformationValueArray': ('categoricalInformationValue', 'Array'), 'maxMapArray': ('maxMap', 'Array'), 'quantileExactLowArray': ('quantileExactLow', 'Array'), 'avgWeightedArray': ('avgWeighted', 'Array'), 'anyHeavyArray': ('anyHeavy', 'Array'), 'deltaSumArray': ('deltaSum', 'Array'), 'groupArraySampleArrayIf': ('groupArraySample', 'ArrayIf'), 'cramersVBiasCorrectedArrayIf': ('cramersVBiasCorrected', 'ArrayIf'), 'argMaxArrayIf': ('argMax', 'ArrayIf'), 'quantileBFloat16ArrayIf': ('quantileBFloat16', 'ArrayIf'), 'quantileTimingWeightedArrayIf': ('quantileTimingWeighted', 'ArrayIf'), 'stochasticLogisticRegressionArrayIf': ('stochasticLogisticRegression', 'ArrayIf'), 'stddevPopArrayIf': ('stddevPop', 'ArrayIf'), 'maxIntersectionsArrayIf': ('maxIntersections', 'ArrayIf'), 'sumWithOverflowArrayIf': ('sumWithOverflow', 'ArrayIf'), 'sumMapArrayIf': ('sumMap', 'ArrayIf'), 'maxArrayIf': ('max', 'ArrayIf'), 'sumCountArrayIf': ('sumCount', 'ArrayIf'), 'minMapArrayIf': ('minMap', 'ArrayIf'), 'quantilesArrayIf': ('quantiles', 'ArrayIf'), 'meanZTestArrayIf': ('meanZTest', 'ArrayIf'), 'uniqCombinedArrayIf': ('uniqCombined', 'ArrayIf'), 'quantileExactArrayIf': ('quantileExact', 'ArrayIf'), 'avgArrayIf': ('avg', 'ArrayIf'), 'quantilesExactLowArrayIf': ('quantilesExactLow', 'ArrayIf'), 'quantileTimingArrayIf': ('quantileTiming', 'ArrayIf'), 'quantileGKArrayIf': ('quantileGK', 'ArrayIf'), 'countArrayIf': ('count', 'ArrayIf'), 'covarPopArrayIf': ('covarPop', 'ArrayIf'), 'groupBitmapArrayIf': ('groupBitmap', 'ArrayIf'), 'first_valueArrayIf': ('first_value', 'ArrayIf'), 'quantileExactWeightedArrayIf': ('quantileExactWeighted', 'ArrayIf'), 'intervalLengthSumArrayIf': ('intervalLengthSum', 'ArrayIf'), 'uniqArrayIf': ('uniq', 'ArrayIf'), 'quantilesExactWeightedArrayIf': ('quantilesExactWeighted', 'ArrayIf'), 'quantilesTDigestWeightedArrayIf': ('quantilesTDigestWeighted', 'ArrayIf'), 'topKWeightedArrayIf': ('topKWeighted', 'ArrayIf'), 'quantilesTDigestArrayIf': ('quantilesTDigest', 'ArrayIf'), 'groupBitXorArrayIf': ('groupBitXor', 'ArrayIf'), 'quantilesGKArrayIf': ('quantilesGK', 'ArrayIf'), 'skewPopArrayIf': ('skewPop', 'ArrayIf'), 'mannWhitneyUTestArrayIf': ('mannWhitneyUTest', 'ArrayIf'), 'quantileTDigestWeightedArrayIf': ('quantileTDigestWeighted', 'ArrayIf'), 'quantilesDeterministicArrayIf': ('quantilesDeterministic', 'ArrayIf'), 'medianArrayIf': ('median', 'ArrayIf'), 'largestTriangleThreeBucketsArrayIf': ('largestTriangleThreeBuckets', 'ArrayIf'), 'rankCorrArrayIf': ('rankCorr', 'ArrayIf'), 'maxIntersectionsPositionArrayIf': ('maxIntersectionsPosition', 'ArrayIf'), 'varSampArrayIf': ('varSamp', 'ArrayIf'), 'last_valueArrayIf': ('last_value', 'ArrayIf'), 'welchTTestArrayIf': ('welchTTest', 'ArrayIf'), 'cramersVArrayIf': ('cramersV', 'ArrayIf'), 'quantileDeterministicArrayIf': ('quantileDeterministic', 'ArrayIf'), 'uniqCombined64ArrayIf': ('uniqCombined64', 'ArrayIf'), 'uniqThetaArrayIf': ('uniqTheta', 'ArrayIf'), 'groupArrayInsertAtArrayIf': ('groupArrayInsertAt', 'ArrayIf'), 'uniqExactArrayIf': ('uniqExact', 'ArrayIf'), 'covarSampArrayIf': ('covarSamp', 'ArrayIf'), 'quantilesInterpolatedWeightedArrayIf': ('quantilesInterpolatedWeighted', 'ArrayIf'), 'groupBitmapOrArrayIf': ('groupBitmapOr', 'ArrayIf'), 'groupBitAndArrayIf': ('groupBitAnd', 'ArrayIf'), 'sumKahanArrayIf': ('sumKahan', 'ArrayIf'), 'quantilesTimingWeightedArrayIf': ('quantilesTimingWeighted', 'ArrayIf'), 'simpleLinearRegressionArrayIf': ('simpleLinearRegression', 'ArrayIf'), 'groupUniqArrayArrayIf': ('groupUniqArray', 'ArrayIf'), 'kolmogorovSmirnovTestArrayIf': ('kolmogorovSmirnovTest', 'ArrayIf'), 'minArrayIf': ('min', 'ArrayIf'), 'argMinArrayIf': ('argMin', 'ArrayIf'), 'corrArrayIf': ('corr', 'ArrayIf'), 'anyArrayIf': ('any', 'ArrayIf'), 'quantileInterpolatedWeightedArrayIf': ('quantileInterpolatedWeighted', 'ArrayIf'), 'groupArrayArrayIf': ('groupArray', 'ArrayIf'), 'groupArrayMovingSumArrayIf': ('groupArrayMovingSum', 'ArrayIf'), 'groupArrayLastArrayIf': ('groupArrayLast', 'ArrayIf'), 'quantileBFloat16WeightedArrayIf': ('quantileBFloat16Weighted', 'ArrayIf'), 'skewSampArrayIf': ('skewSamp', 'ArrayIf'), 'stddevSampArrayIf': ('stddevSamp', 'ArrayIf'), 'kurtSampArrayIf': ('kurtSamp', 'ArrayIf'), 'uniqHLL12ArrayIf': ('uniqHLL12', 'ArrayIf'), 'boundingRatioArrayIf': ('boundingRatio', 'ArrayIf'), 'quantileTDigestArrayIf': ('quantileTDigest', 'ArrayIf'), 'theilsUArrayIf': ('theilsU', 'ArrayIf'), 'sumArrayIf': ('sum', 'ArrayIf'), 'quantileArrayIf': ('quantile', 'ArrayIf'), 'quantilesBFloat16WeightedArrayIf': ('quantilesBFloat16Weighted', 'ArrayIf'), 'quantilesExactArrayIf': ('quantilesExact', 'ArrayIf'), 'stochasticLinearRegressionArrayIf': ('stochasticLinearRegression', 'ArrayIf'), 'groupBitOrArrayIf': ('groupBitOr', 'ArrayIf'), 'varPopArrayIf': ('varPop', 'ArrayIf'), 'topKArrayIf': ('topK', 'ArrayIf'), 'kurtPopArrayIf': ('kurtPop', 'ArrayIf'), 'anyLastArrayIf': ('anyLast', 'ArrayIf'), 'groupArrayMovingAvgArrayIf': ('groupArrayMovingAvg', 'ArrayIf'), 'quantilesBFloat16ArrayIf': ('quantilesBFloat16', 'ArrayIf'), 'entropyArrayIf': ('entropy', 'ArrayIf'), 'quantileExactHighArrayIf': ('quantileExactHigh', 'ArrayIf'), 'sparkBarArrayIf': ('sparkBar', 'ArrayIf'), 'studentTTestArrayIf': ('studentTTest', 'ArrayIf'), 'deltaSumTimestampArrayIf': ('deltaSumTimestamp', 'ArrayIf'), 'quantilesExactHighArrayIf': ('quantilesExactHigh', 'ArrayIf'), 'quantilesTimingArrayIf': ('quantilesTiming', 'ArrayIf'), 'groupBitmapXorArrayIf': ('groupBitmapXor', 'ArrayIf'), 'exponentialMovingAverageArrayIf': ('exponentialMovingAverage', 'ArrayIf'), 'groupBitmapAndArrayIf': ('groupBitmapAnd', 'ArrayIf'), 'contingencyArrayIf': ('contingency', 'ArrayIf'), 'categoricalInformationValueArrayIf': ('categoricalInformationValue', 'ArrayIf'), 'maxMapArrayIf': ('maxMap', 'ArrayIf'), 'quantileExactLowArrayIf': ('quantileExactLow', 'ArrayIf'), 'avgWeightedArrayIf': ('avgWeighted', 'ArrayIf'), 'anyHeavyArrayIf': ('anyHeavy', 'ArrayIf'), 'deltaSumArrayIf': ('deltaSum', 'ArrayIf'), 'groupArraySampleMap': ('groupArraySample', 'Map'), 'cramersVBiasCorrectedMap': ('cramersVBiasCorrected', 'Map'), 'argMaxMap': ('argMax', 'Map'), 'quantileBFloat16Map': ('quantileBFloat16', 'Map'), 'quantileTimingWeightedMap': ('quantileTimingWeighted', 'Map'), 'stochasticLogisticRegressionMap': ('stochasticLogisticRegression', 'Map'), 'stddevPopMap': ('stddevPop', 'Map'), 'maxIntersectionsMap': ('maxIntersections', 'Map'), 'sumWithOverflowMap': ('sumWithOverflow', 'Map'), 'sumMapMap': ('sumMap', 'Map'), 'maxMap': ('maxMap', ''), 'sumCountMap': ('sumCount', 'Map'), 'minMapMap': ('minMap', 'Map'), 'quantilesMap': ('quantiles', 'Map'), 'meanZTestMap': ('meanZTest', 'Map'), 'uniqCombinedMap': ('uniqCombined', 'Map'), 'quantileExactMap': ('quantileExact', 'Map'), 'avgMap': ('avg', 'Map'), 'quantilesExactLowMap': ('quantilesExactLow', 'Map'), 'quantileTimingMap': ('quantileTiming', 'Map'), 'quantileGKMap': ('quantileGK', 'Map'), 'countMap': ('count', 'Map'), 'covarPopMap': ('covarPop', 'Map'), 'groupBitmapMap': ('groupBitmap', 'Map'), 'first_valueMap': ('first_value', 'Map'), 'quantileExactWeightedMap': ('quantileExactWeighted', 'Map'), 'intervalLengthSumMap': ('intervalLengthSum', 'Map'), 'uniqMap': ('uniq', 'Map'), 'quantilesExactWeightedMap': ('quantilesExactWeighted', 'Map'), 'quantilesTDigestWeightedMap': ('quantilesTDigestWeighted', 'Map'), 'topKWeightedMap': ('topKWeighted', 'Map'), 'quantilesTDigestMap': ('quantilesTDigest', 'Map'), 'groupBitXorMap': ('groupBitXor', 'Map'), 'quantilesGKMap': ('quantilesGK', 'Map'), 'skewPopMap': ('skewPop', 'Map'), 'mannWhitneyUTestMap': ('mannWhitneyUTest', 'Map'), 'quantileTDigestWeightedMap': ('quantileTDigestWeighted', 'Map'), 'quantilesDeterministicMap': ('quantilesDeterministic', 'Map'), 'medianMap': ('median', 'Map'), 'largestTriangleThreeBucketsMap': ('largestTriangleThreeBuckets', 'Map'), 'rankCorrMap': ('rankCorr', 'Map'), 'maxIntersectionsPositionMap': ('maxIntersectionsPosition', 'Map'), 'varSampMap': ('varSamp', 'Map'), 'last_valueMap': ('last_value', 'Map'), 'welchTTestMap': ('welchTTest', 'Map'), 'cramersVMap': ('cramersV', 'Map'), 'quantileDeterministicMap': ('quantileDeterministic', 'Map'), 'uniqCombined64Map': ('uniqCombined64', 'Map'), 'uniqThetaMap': ('uniqTheta', 'Map'), 'groupArrayInsertAtMap': ('groupArrayInsertAt', 'Map'), 'uniqExactMap': ('uniqExact', 'Map'), 'covarSampMap': ('covarSamp', 'Map'), 'quantilesInterpolatedWeightedMap': ('quantilesInterpolatedWeighted', 'Map'), 'groupBitmapOrMap': ('groupBitmapOr', 'Map'), 'groupBitAndMap': ('groupBitAnd', 'Map'), 'sumKahanMap': ('sumKahan', 'Map'), 'quantilesTimingWeightedMap': ('quantilesTimingWeighted', 'Map'), 'simpleLinearRegressionMap': ('simpleLinearRegression', 'Map'), 'groupUniqArrayMap': ('groupUniqArray', 'Map'), 'kolmogorovSmirnovTestMap': ('kolmogorovSmirnovTest', 'Map'), 'minMap': ('minMap', ''), 'argMinMap': ('argMin', 'Map'), 'corrMap': ('corr', 'Map'), 'anyMap': ('any', 'Map'), 'quantileInterpolatedWeightedMap': ('quantileInterpolatedWeighted', 'Map'), 'groupArrayMap': ('groupArray', 'Map'), 'groupArrayMovingSumMap': ('groupArrayMovingSum', 'Map'), 'groupArrayLastMap': ('groupArrayLast', 'Map'), 'quantileBFloat16WeightedMap': ('quantileBFloat16Weighted', 'Map'), 'skewSampMap': ('skewSamp', 'Map'), 'stddevSampMap': ('stddevSamp', 'Map'), 'kurtSampMap': ('kurtSamp', 'Map'), 'uniqHLL12Map': ('uniqHLL12', 'Map'), 'boundingRatioMap': ('boundingRatio', 'Map'), 'quantileTDigestMap': ('quantileTDigest', 'Map'), 'theilsUMap': ('theilsU', 'Map'), 'sumMap': ('sumMap', ''), 'quantileMap': ('quantile', 'Map'), 'quantilesBFloat16WeightedMap': ('quantilesBFloat16Weighted', 'Map'), 'quantilesExactMap': ('quantilesExact', 'Map'), 'stochasticLinearRegressionMap': ('stochasticLinearRegression', 'Map'), 'groupBitOrMap': ('groupBitOr', 'Map'), 'varPopMap': ('varPop', 'Map'), 'topKMap': ('topK', 'Map'), 'kurtPopMap': ('kurtPop', 'Map'), 'anyLastMap': ('anyLast', 'Map'), 'groupArrayMovingAvgMap': ('groupArrayMovingAvg', 'Map'), 'quantilesBFloat16Map': ('quantilesBFloat16', 'Map'), 'entropyMap': ('entropy', 'Map'), 'quantileExactHighMap': ('quantileExactHigh', 'Map'), 'sparkBarMap': ('sparkBar', 'Map'), 'studentTTestMap': ('studentTTest', 'Map'), 'deltaSumTimestampMap': ('deltaSumTimestamp', 'Map'), 'quantilesExactHighMap': ('quantilesExactHigh', 'Map'), 'quantilesTimingMap': ('quantilesTiming', 'Map'), 'groupBitmapXorMap': ('groupBitmapXor', 'Map'), 'exponentialMovingAverageMap': ('exponentialMovingAverage', 'Map'), 'groupBitmapAndMap': ('groupBitmapAnd', 'Map'), 'contingencyMap': ('contingency', 'Map'), 'categoricalInformationValueMap': ('categoricalInformationValue', 'Map'), 'maxMapMap': ('maxMap', 'Map'), 'quantileExactLowMap': ('quantileExactLow', 'Map'), 'avgWeightedMap': ('avgWeighted', 'Map'), 'anyHeavyMap': ('anyHeavy', 'Map'), 'deltaSumMap': ('deltaSum', 'Map'), 'groupArraySampleSimpleState': ('groupArraySample', 'SimpleState'), 'cramersVBiasCorrectedSimpleState': ('cramersVBiasCorrected', 'SimpleState'), 'argMaxSimpleState': ('argMax', 'SimpleState'), 'quantileBFloat16SimpleState': ('quantileBFloat16', 'SimpleState'), 'quantileTimingWeightedSimpleState': ('quantileTimingWeighted', 'SimpleState'), 'stochasticLogisticRegressionSimpleState': ('stochasticLogisticRegression', 'SimpleState'), 'stddevPopSimpleState': ('stddevPop', 'SimpleState'), 'maxIntersectionsSimpleState': ('maxIntersections', 'SimpleState'), 'sumWithOverflowSimpleState': ('sumWithOverflow', 'SimpleState'), 'sumMapSimpleState': ('sumMap', 'SimpleState'), 'maxSimpleState': ('max', 'SimpleState'), 'sumCountSimpleState': ('sumCount', 'SimpleState'), 'minMapSimpleState': ('minMap', 'SimpleState'), 'quantilesSimpleState': ('quantiles', 'SimpleState'), 'meanZTestSimpleState': ('meanZTest', 'SimpleState'), 'uniqCombinedSimpleState': ('uniqCombined', 'SimpleState'), 'quantileExactSimpleState': ('quantileExact', 'SimpleState'), 'avgSimpleState': ('avg', 'SimpleState'), 'quantilesExactLowSimpleState': ('quantilesExactLow', 'SimpleState'), 'quantileTimingSimpleState': ('quantileTiming', 'SimpleState'), 'quantileGKSimpleState': ('quantileGK', 'SimpleState'), 'countSimpleState': ('count', 'SimpleState'), 'covarPopSimpleState': ('covarPop', 'SimpleState'), 'groupBitmapSimpleState': ('groupBitmap', 'SimpleState'), 'first_valueSimpleState': ('first_value', 'SimpleState'), 'quantileExactWeightedSimpleState': ('quantileExactWeighted', 'SimpleState'), 'intervalLengthSumSimpleState': ('intervalLengthSum', 'SimpleState'), 'uniqSimpleState': ('uniq', 'SimpleState'), 'quantilesExactWeightedSimpleState': ('quantilesExactWeighted', 'SimpleState'), 'quantilesTDigestWeightedSimpleState': ('quantilesTDigestWeighted', 'SimpleState'), 'topKWeightedSimpleState': ('topKWeighted', 'SimpleState'), 'quantilesTDigestSimpleState': ('quantilesTDigest', 'SimpleState'), 'groupBitXorSimpleState': ('groupBitXor', 'SimpleState'), 'quantilesGKSimpleState': ('quantilesGK', 'SimpleState'), 'skewPopSimpleState': ('skewPop', 'SimpleState'), 'mannWhitneyUTestSimpleState': ('mannWhitneyUTest', 'SimpleState'), 'quantileTDigestWeightedSimpleState': ('quantileTDigestWeighted', 'SimpleState'), 'quantilesDeterministicSimpleState': ('quantilesDeterministic', 'SimpleState'), 'medianSimpleState': ('median', 'SimpleState'), 'largestTriangleThreeBucketsSimpleState': ('largestTriangleThreeBuckets', 'SimpleState'), 'rankCorrSimpleState': ('rankCorr', 'SimpleState'), 'maxIntersectionsPositionSimpleState': ('maxIntersectionsPosition', 'SimpleState'), 'varSampSimpleState': ('varSamp', 'SimpleState'), 'last_valueSimpleState': ('last_value', 'SimpleState'), 'welchTTestSimpleState': ('welchTTest', 'SimpleState'), 'cramersVSimpleState': ('cramersV', 'SimpleState'), 'quantileDeterministicSimpleState': ('quantileDeterministic', 'SimpleState'), 'uniqCombined64SimpleState': ('uniqCombined64', 'SimpleState'), 'uniqThetaSimpleState': ('uniqTheta', 'SimpleState'), 'groupArrayInsertAtSimpleState': ('groupArrayInsertAt', 'SimpleState'), 'uniqExactSimpleState': ('uniqExact', 'SimpleState'), 'covarSampSimpleState': ('covarSamp', 'SimpleState'), 'quantilesInterpolatedWeightedSimpleState': ('quantilesInterpolatedWeighted', 'SimpleState'), 'groupBitmapOrSimpleState': ('groupBitmapOr', 'SimpleState'), 'groupBitAndSimpleState': ('groupBitAnd', 'SimpleState'), 'sumKahanSimpleState': ('sumKahan', 'SimpleState'), 'quantilesTimingWeightedSimpleState': ('quantilesTimingWeighted', 'SimpleState'), 'simpleLinearRegressionSimpleState': ('simpleLinearRegression', 'SimpleState'), 'groupUniqArraySimpleState': ('groupUniqArray', 'SimpleState'), 'kolmogorovSmirnovTestSimpleState': ('kolmogorovSmirnovTest', 'SimpleState'), 'minSimpleState': ('min', 'SimpleState'), 'argMinSimpleState': ('argMin', 'SimpleState'), 'corrSimpleState': ('corr', 'SimpleState'), 'anySimpleState': ('any', 'SimpleState'), 'quantileInterpolatedWeightedSimpleState': ('quantileInterpolatedWeighted', 'SimpleState'), 'groupArraySimpleState': ('groupArray', 'SimpleState'), 'groupArrayMovingSumSimpleState': ('groupArrayMovingSum', 'SimpleState'), 'groupArrayLastSimpleState': ('groupArrayLast', 'SimpleState'), 'quantileBFloat16WeightedSimpleState': ('quantileBFloat16Weighted', 'SimpleState'), 'skewSampSimpleState': ('skewSamp', 'SimpleState'), 'stddevSampSimpleState': ('stddevSamp', 'SimpleState'), 'kurtSampSimpleState': ('kurtSamp', 'SimpleState'), 'uniqHLL12SimpleState': ('uniqHLL12', 'SimpleState'), 'boundingRatioSimpleState': ('boundingRatio', 'SimpleState'), 'quantileTDigestSimpleState': ('quantileTDigest', 'SimpleState'), 'theilsUSimpleState': ('theilsU', 'SimpleState'), 'sumSimpleState': ('sum', 'SimpleState'), 'quantileSimpleState': ('quantile', 'SimpleState'), 'quantilesBFloat16WeightedSimpleState': ('quantilesBFloat16Weighted', 'SimpleState'), 'quantilesExactSimpleState': ('quantilesExact', 'SimpleState'), 'stochasticLinearRegressionSimpleState': ('stochasticLinearRegression', 'SimpleState'), 'groupBitOrSimpleState': ('groupBitOr', 'SimpleState'), 'varPopSimpleState': ('varPop', 'SimpleState'), 'topKSimpleState': ('topK', 'SimpleState'), 'kurtPopSimpleState': ('kurtPop', 'SimpleState'), 'anyLastSimpleState': ('anyLast', 'SimpleState'), 'groupArrayMovingAvgSimpleState': ('groupArrayMovingAvg', 'SimpleState'), 'quantilesBFloat16SimpleState': ('quantilesBFloat16', 'SimpleState'), 'entropySimpleState': ('entropy', 'SimpleState'), 'quantileExactHighSimpleState': ('quantileExactHigh', 'SimpleState'), 'sparkBarSimpleState': ('sparkBar', 'SimpleState'), 'studentTTestSimpleState': ('studentTTest', 'SimpleState'), 'deltaSumTimestampSimpleState': ('deltaSumTimestamp', 'SimpleState'), 'quantilesExactHighSimpleState': ('quantilesExactHigh', 'SimpleState'), 'quantilesTimingSimpleState': ('quantilesTiming', 'SimpleState'), 'groupBitmapXorSimpleState': ('groupBitmapXor', 'SimpleState'), 'exponentialMovingAverageSimpleState': ('exponentialMovingAverage', 'SimpleState'), 'groupBitmapAndSimpleState': ('groupBitmapAnd', 'SimpleState'), 'contingencySimpleState': ('contingency', 'SimpleState'), 'categoricalInformationValueSimpleState': ('categoricalInformationValue', 'SimpleState'), 'maxMapSimpleState': ('maxMap', 'SimpleState'), 'quantileExactLowSimpleState': ('quantileExactLow', 'SimpleState'), 'avgWeightedSimpleState': ('avgWeighted', 'SimpleState'), 'anyHeavySimpleState': ('anyHeavy', 'SimpleState'), 'deltaSumSimpleState': ('deltaSum', 'SimpleState'), 'groupArraySampleState': ('groupArraySample', 'State'), 'cramersVBiasCorrectedState': ('cramersVBiasCorrected', 'State'), 'argMaxState': ('argMax', 'State'), 'quantileBFloat16State': ('quantileBFloat16', 'State'), 'quantileTimingWeightedState': ('quantileTimingWeighted', 'State'), 'stochasticLogisticRegressionState': ('stochasticLogisticRegression', 'State'), 'stddevPopState': ('stddevPop', 'State'), 'maxIntersectionsState': ('maxIntersections', 'State'), 'sumWithOverflowState': ('sumWithOverflow', 'State'), 'sumMapState': ('sumMap', 'State'), 'maxState': ('max', 'State'), 'sumCountState': ('sumCount', 'State'), 'minMapState': ('minMap', 'State'), 'quantilesState': ('quantiles', 'State'), 'meanZTestState': ('meanZTest', 'State'), 'uniqCombinedState': ('uniqCombined', 'State'), 'quantileExactState': ('quantileExact', 'State'), 'avgState': ('avg', 'State'), 'quantilesExactLowState': ('quantilesExactLow', 'State'), 'quantileTimingState': ('quantileTiming', 'State'), 'quantileGKState': ('quantileGK', 'State'), 'countState': ('count', 'State'), 'covarPopState': ('covarPop', 'State'), 'groupBitmapState': ('groupBitmap', 'State'), 'first_valueState': ('first_value', 'State'), 'quantileExactWeightedState': ('quantileExactWeighted', 'State'), 'intervalLengthSumState': ('intervalLengthSum', 'State'), 'uniqState': ('uniq', 'State'), 'quantilesExactWeightedState': ('quantilesExactWeighted', 'State'), 'quantilesTDigestWeightedState': ('quantilesTDigestWeighted', 'State'), 'topKWeightedState': ('topKWeighted', 'State'), 'quantilesTDigestState': ('quantilesTDigest', 'State'), 'groupBitXorState': ('groupBitXor', 'State'), 'quantilesGKState': ('quantilesGK', 'State'), 'skewPopState': ('skewPop', 'State'), 'mannWhitneyUTestState': ('mannWhitneyUTest', 'State'), 'quantileTDigestWeightedState': ('quantileTDigestWeighted', 'State'), 'quantilesDeterministicState': ('quantilesDeterministic', 'State'), 'medianState': ('median', 'State'), 'largestTriangleThreeBucketsState': ('largestTriangleThreeBuckets', 'State'), 'rankCorrState': ('rankCorr', 'State'), 'maxIntersectionsPositionState': ('maxIntersectionsPosition', 'State'), 'varSampState': ('varSamp', 'State'), 'last_valueState': ('last_value', 'State'), 'welchTTestState': ('welchTTest', 'State'), 'cramersVState': ('cramersV', 'State'), 'quantileDeterministicState': ('quantileDeterministic', 'State'), 'uniqCombined64State': ('uniqCombined64', 'State'), 'uniqThetaState': ('uniqTheta', 'State'), 'groupArrayInsertAtState': ('groupArrayInsertAt', 'State'), 'uniqExactState': ('uniqExact', 'State'), 'covarSampState': ('covarSamp', 'State'), 'quantilesInterpolatedWeightedState': ('quantilesInterpolatedWeighted', 'State'), 'groupBitmapOrState': ('groupBitmapOr', 'State'), 'groupBitAndState': ('groupBitAnd', 'State'), 'sumKahanState': ('sumKahan', 'State'), 'quantilesTimingWeightedState': ('quantilesTimingWeighted', 'State'), 'simpleLinearRegressionState': ('simpleLinearRegression', 'State'), 'groupUniqArrayState': ('groupUniqArray', 'State'), 'kolmogorovSmirnovTestState': ('kolmogorovSmirnovTest', 'State'), 'minState': ('min', 'State'), 'argMinState': ('argMin', 'State'), 'corrState': ('corr', 'State'), 'anyState': ('any', 'State'), 'quantileInterpolatedWeightedState': ('quantileInterpolatedWeighted', 'State'), 'groupArrayState': ('groupArray', 'State'), 'groupArrayMovingSumState': ('groupArrayMovingSum', 'State'), 'groupArrayLastState': ('groupArrayLast', 'State'), 'quantileBFloat16WeightedState': ('quantileBFloat16Weighted', 'State'), 'skewSampState': ('skewSamp', 'State'), 'stddevSampState': ('stddevSamp', 'State'), 'kurtSampState': ('kurtSamp', 'State'), 'uniqHLL12State': ('uniqHLL12', 'State'), 'boundingRatioState': ('boundingRatio', 'State'), 'quantileTDigestState': ('quantileTDigest', 'State'), 'theilsUState': ('theilsU', 'State'), 'sumState': ('sum', 'State'), 'quantileState': ('quantile', 'State'), 'quantilesBFloat16WeightedState': ('quantilesBFloat16Weighted', 'State'), 'quantilesExactState': ('quantilesExact', 'State'), 'stochasticLinearRegressionState': ('stochasticLinearRegression', 'State'), 'groupBitOrState': ('groupBitOr', 'State'), 'varPopState': ('varPop', 'State'), 'topKState': ('topK', 'State'), 'kurtPopState': ('kurtPop', 'State'), 'anyLastState': ('anyLast', 'State'), 'groupArrayMovingAvgState': ('groupArrayMovingAvg', 'State'), 'quantilesBFloat16State': ('quantilesBFloat16', 'State'), 'entropyState': ('entropy', 'State'), 'quantileExactHighState': ('quantileExactHigh', 'State'), 'sparkBarState': ('sparkBar', 'State'), 'studentTTestState': ('studentTTest', 'State'), 'deltaSumTimestampState': ('deltaSumTimestamp', 'State'), 'quantilesExactHighState': ('quantilesExactHigh', 'State'), 'quantilesTimingState': ('quantilesTiming', 'State'), 'groupBitmapXorState': ('groupBitmapXor', 'State'), 'exponentialMovingAverageState': ('exponentialMovingAverage', 'State'), 'groupBitmapAndState': ('groupBitmapAnd', 'State'), 'contingencyState': ('contingency', 'State'), 'categoricalInformationValueState': ('categoricalInformationValue', 'State'), 'maxMapState': ('maxMap', 'State'), 'quantileExactLowState': ('quantileExactLow', 'State'), 'avgWeightedState': ('avgWeighted', 'State'), 'anyHeavyState': ('anyHeavy', 'State'), 'deltaSumState': ('deltaSum', 'State'), 'groupArraySampleMerge': ('groupArraySample', 'Merge'), 'cramersVBiasCorrectedMerge': ('cramersVBiasCorrected', 'Merge'), 'argMaxMerge': ('argMax', 'Merge'), 'quantileBFloat16Merge': ('quantileBFloat16', 'Merge'), 'quantileTimingWeightedMerge': ('quantileTimingWeighted', 'Merge'), 'stochasticLogisticRegressionMerge': ('stochasticLogisticRegression', 'Merge'), 'stddevPopMerge': ('stddevPop', 'Merge'), 'maxIntersectionsMerge': ('maxIntersections', 'Merge'), 'sumWithOverflowMerge': ('sumWithOverflow', 'Merge'), 'sumMapMerge': ('sumMap', 'Merge'), 'maxMerge': ('max', 'Merge'), 'sumCountMerge': ('sumCount', 'Merge'), 'minMapMerge': ('minMap', 'Merge'), 'quantilesMerge': ('quantiles', 'Merge'), 'meanZTestMerge': ('meanZTest', 'Merge'), 'uniqCombinedMerge': ('uniqCombined', 'Merge'), 'quantileExactMerge': ('quantileExact', 'Merge'), 'avgMerge': ('avg', 'Merge'), 'quantilesExactLowMerge': ('quantilesExactLow', 'Merge'), 'quantileTimingMerge': ('quantileTiming', 'Merge'), 'quantileGKMerge': ('quantileGK', 'Merge'), 'countMerge': ('count', 'Merge'), 'covarPopMerge': ('covarPop', 'Merge'), 'groupBitmapMerge': ('groupBitmap', 'Merge'), 'first_valueMerge': ('first_value', 'Merge'), 'quantileExactWeightedMerge': ('quantileExactWeighted', 'Merge'), 'intervalLengthSumMerge': ('intervalLengthSum', 'Merge'), 'uniqMerge': ('uniq', 'Merge'), 'quantilesExactWeightedMerge': ('quantilesExactWeighted', 'Merge'), 'quantilesTDigestWeightedMerge': ('quantilesTDigestWeighted', 'Merge'), 'topKWeightedMerge': ('topKWeighted', 'Merge'), 'quantilesTDigestMerge': ('quantilesTDigest', 'Merge'), 'groupBitXorMerge': ('groupBitXor', 'Merge'), 'quantilesGKMerge': ('quantilesGK', 'Merge'), 'skewPopMerge': ('skewPop', 'Merge'), 'mannWhitneyUTestMerge': ('mannWhitneyUTest', 'Merge'), 'quantileTDigestWeightedMerge': ('quantileTDigestWeighted', 'Merge'), 'quantilesDeterministicMerge': ('quantilesDeterministic', 'Merge'), 'medianMerge': ('median', 'Merge'), 'largestTriangleThreeBucketsMerge': ('largestTriangleThreeBuckets', 'Merge'), 'rankCorrMerge': ('rankCorr', 'Merge'), 'maxIntersectionsPositionMerge': ('maxIntersectionsPosition', 'Merge'), 'varSampMerge': ('varSamp', 'Merge'), 'last_valueMerge': ('last_value', 'Merge'), 'welchTTestMerge': ('welchTTest', 'Merge'), 'cramersVMerge': ('cramersV', 'Merge'), 'quantileDeterministicMerge': ('quantileDeterministic', 'Merge'), 'uniqCombined64Merge': ('uniqCombined64', 'Merge'), 'uniqThetaMerge': ('uniqTheta', 'Merge'), 'groupArrayInsertAtMerge': ('groupArrayInsertAt', 'Merge'), 'uniqExactMerge': ('uniqExact', 'Merge'), 'covarSampMerge': ('covarSamp', 'Merge'), 'quantilesInterpolatedWeightedMerge': ('quantilesInterpolatedWeighted', 'Merge'), 'groupBitmapOrMerge': ('groupBitmapOr', 'Merge'), 'groupBitAndMerge': ('groupBitAnd', 'Merge'), 'sumKahanMerge': ('sumKahan', 'Merge'), 'quantilesTimingWeightedMerge': ('quantilesTimingWeighted', 'Merge'), 'simpleLinearRegressionMerge': ('simpleLinearRegression', 'Merge'), 'groupUniqArrayMerge': ('groupUniqArray', 'Merge'), 'kolmogorovSmirnovTestMerge': ('kolmogorovSmirnovTest', 'Merge'), 'minMerge': ('min', 'Merge'), 'argMinMerge': ('argMin', 'Merge'), 'corrMerge': ('corr', 'Merge'), 'anyMerge': ('any', 'Merge'), 'quantileInterpolatedWeightedMerge': ('quantileInterpolatedWeighted', 'Merge'), 'groupArrayMerge': ('groupArray', 'Merge'), 'groupArrayMovingSumMerge': ('groupArrayMovingSum', 'Merge'), 'groupArrayLastMerge': ('groupArrayLast', 'Merge'), 'quantileBFloat16WeightedMerge': ('quantileBFloat16Weighted', 'Merge'), 'skewSampMerge': ('skewSamp', 'Merge'), 'stddevSampMerge': ('stddevSamp', 'Merge'), 'kurtSampMerge': ('kurtSamp', 'Merge'), 'uniqHLL12Merge': ('uniqHLL12', 'Merge'), 'boundingRatioMerge': ('boundingRatio', 'Merge'), 'quantileTDigestMerge': ('quantileTDigest', 'Merge'), 'theilsUMerge': ('theilsU', 'Merge'), 'sumMerge': ('sum', 'Merge'), 'quantileMerge': ('quantile', 'Merge'), 'quantilesBFloat16WeightedMerge': ('quantilesBFloat16Weighted', 'Merge'), 'quantilesExactMerge': ('quantilesExact', 'Merge'), 'stochasticLinearRegressionMerge': ('stochasticLinearRegression', 'Merge'), 'groupBitOrMerge': ('groupBitOr', 'Merge'), 'varPopMerge': ('varPop', 'Merge'), 'topKMerge': ('topK', 'Merge'), 'kurtPopMerge': ('kurtPop', 'Merge'), 'anyLastMerge': ('anyLast', 'Merge'), 'groupArrayMovingAvgMerge': ('groupArrayMovingAvg', 'Merge'), 'quantilesBFloat16Merge': ('quantilesBFloat16', 'Merge'), 'entropyMerge': ('entropy', 'Merge'), 'quantileExactHighMerge': ('quantileExactHigh', 'Merge'), 'sparkBarMerge': ('sparkBar', 'Merge'), 'studentTTestMerge': ('studentTTest', 'Merge'), 'deltaSumTimestampMerge': ('deltaSumTimestamp', 'Merge'), 'quantilesExactHighMerge': ('quantilesExactHigh', 'Merge'), 'quantilesTimingMerge': ('quantilesTiming', 'Merge'), 'groupBitmapXorMerge': ('groupBitmapXor', 'Merge'), 'exponentialMovingAverageMerge': ('exponentialMovingAverage', 'Merge'), 'groupBitmapAndMerge': ('groupBitmapAnd', 'Merge'), 'contingencyMerge': ('contingency', 'Merge'), 'categoricalInformationValueMerge': ('categoricalInformationValue', 'Merge'), 'maxMapMerge': ('maxMap', 'Merge'), 'quantileExactLowMerge': ('quantileExactLow', 'Merge'), 'avgWeightedMerge': ('avgWeighted', 'Merge'), 'anyHeavyMerge': ('anyHeavy', 'Merge'), 'deltaSumMerge': ('deltaSum', 'Merge'), 'groupArraySampleMergeState': ('groupArraySample', 'MergeState'), 'cramersVBiasCorrectedMergeState': ('cramersVBiasCorrected', 'MergeState'), 'argMaxMergeState': ('argMax', 'MergeState'), 'quantileBFloat16MergeState': ('quantileBFloat16', 'MergeState'), 'quantileTimingWeightedMergeState': ('quantileTimingWeighted', 'MergeState'), 'stochasticLogisticRegressionMergeState': ('stochasticLogisticRegression', 'MergeState'), 'stddevPopMergeState': ('stddevPop', 'MergeState'), 'maxIntersectionsMergeState': ('maxIntersections', 'MergeState'), 'sumWithOverflowMergeState': ('sumWithOverflow', 'MergeState'), 'sumMapMergeState': ('sumMap', 'MergeState'), 'maxMergeState': ('max', 'MergeState'), 'sumCountMergeState': ('sumCount', 'MergeState'), 'minMapMergeState': ('minMap', 'MergeState'), 'quantilesMergeState': ('quantiles', 'MergeState'), 'meanZTestMergeState': ('meanZTest', 'MergeState'), 'uniqCombinedMergeState': ('uniqCombined', 'MergeState'), 'quantileExactMergeState': ('quantileExact', 'MergeState'), 'avgMergeState': ('avg', 'MergeState'), 'quantilesExactLowMergeState': ('quantilesExactLow', 'MergeState'), 'quantileTimingMergeState': ('quantileTiming', 'MergeState'), 'quantileGKMergeState': ('quantileGK', 'MergeState'), 'countMergeState': ('count', 'MergeState'), 'covarPopMergeState': ('covarPop', 'MergeState'), 'groupBitmapMergeState': ('groupBitmap', 'MergeState'), 'first_valueMergeState': ('first_value', 'MergeState'), 'quantileExactWeightedMergeState': ('quantileExactWeighted', 'MergeState'), 'intervalLengthSumMergeState': ('intervalLengthSum', 'MergeState'), 'uniqMergeState': ('uniq', 'MergeState'), 'quantilesExactWeightedMergeState': ('quantilesExactWeighted', 'MergeState'), 'quantilesTDigestWeightedMergeState': ('quantilesTDigestWeighted', 'MergeState'), 'topKWeightedMergeState': ('topKWeighted', 'MergeState'), 'quantilesTDigestMergeState': ('quantilesTDigest', 'MergeState'), 'groupBitXorMergeState': ('groupBitXor', 'MergeState'), 'quantilesGKMergeState': ('quantilesGK', 'MergeState'), 'skewPopMergeState': ('skewPop', 'MergeState'), 'mannWhitneyUTestMergeState': ('mannWhitneyUTest', 'MergeState'), 'quantileTDigestWeightedMergeState': ('quantileTDigestWeighted', 'MergeState'), 'quantilesDeterministicMergeState': ('quantilesDeterministic', 'MergeState'), 'medianMergeState': ('median', 'MergeState'), 'largestTriangleThreeBucketsMergeState': ('largestTriangleThreeBuckets', 'MergeState'), 'rankCorrMergeState': ('rankCorr', 'MergeState'), 'maxIntersectionsPositionMergeState': ('maxIntersectionsPosition', 'MergeState'), 'varSampMergeState': ('varSamp', 'MergeState'), 'last_valueMergeState': ('last_value', 'MergeState'), 'welchTTestMergeState': ('welchTTest', 'MergeState'), 'cramersVMergeState': ('cramersV', 'MergeState'), 'quantileDeterministicMergeState': ('quantileDeterministic', 'MergeState'), 'uniqCombined64MergeState': ('uniqCombined64', 'MergeState'), 'uniqThetaMergeState': ('uniqTheta', 'MergeState'), 'groupArrayInsertAtMergeState': ('groupArrayInsertAt', 'MergeState'), 'uniqExactMergeState': ('uniqExact', 'MergeState'), 'covarSampMergeState': ('covarSamp', 'MergeState'), 'quantilesInterpolatedWeightedMergeState': ('quantilesInterpolatedWeighted', 'MergeState'), 'groupBitmapOrMergeState': ('groupBitmapOr', 'MergeState'), 'groupBitAndMergeState': ('groupBitAnd', 'MergeState'), 'sumKahanMergeState': ('sumKahan', 'MergeState'), 'quantilesTimingWeightedMergeState': ('quantilesTimingWeighted', 'MergeState'), 'simpleLinearRegressionMergeState': ('simpleLinearRegression', 'MergeState'), 'groupUniqArrayMergeState': ('groupUniqArray', 'MergeState'), 'kolmogorovSmirnovTestMergeState': ('kolmogorovSmirnovTest', 'MergeState'), 'minMergeState': ('min', 'MergeState'), 'argMinMergeState': ('argMin', 'MergeState'), 'corrMergeState': ('corr', 'MergeState'), 'anyMergeState': ('any', 'MergeState'), 'quantileInterpolatedWeightedMergeState': ('quantileInterpolatedWeighted', 'MergeState'), 'groupArrayMergeState': ('groupArray', 'MergeState'), 'groupArrayMovingSumMergeState': ('groupArrayMovingSum', 'MergeState'), 'groupArrayLastMergeState': ('groupArrayLast', 'MergeState'), 'quantileBFloat16WeightedMergeState': ('quantileBFloat16Weighted', 'MergeState'), 'skewSampMergeState': ('skewSamp', 'MergeState'), 'stddevSampMergeState': ('stddevSamp', 'MergeState'), 'kurtSampMergeState': ('kurtSamp', 'MergeState'), 'uniqHLL12MergeState': ('uniqHLL12', 'MergeState'), 'boundingRatioMergeState': ('boundingRatio', 'MergeState'), 'quantileTDigestMergeState': ('quantileTDigest', 'MergeState'), 'theilsUMergeState': ('theilsU', 'MergeState'), 'sumMergeState': ('sum', 'MergeState'), 'quantileMergeState': ('quantile', 'MergeState'), 'quantilesBFloat16WeightedMergeState': ('quantilesBFloat16Weighted', 'MergeState'), 'quantilesExactMergeState': ('quantilesExact', 'MergeState'), 'stochasticLinearRegressionMergeState': ('stochasticLinearRegression', 'MergeState'), 'groupBitOrMergeState': ('groupBitOr', 'MergeState'), 'varPopMergeState': ('varPop', 'MergeState'), 'topKMergeState': ('topK', 'MergeState'), 'kurtPopMergeState': ('kurtPop', 'MergeState'), 'anyLastMergeState': ('anyLast', 'MergeState'), 'groupArrayMovingAvgMergeState': ('groupArrayMovingAvg', 'MergeState'), 'quantilesBFloat16MergeState': ('quantilesBFloat16', 'MergeState'), 'entropyMergeState': ('entropy', 'MergeState'), 'quantileExactHighMergeState': ('quantileExactHigh', 'MergeState'), 'sparkBarMergeState': ('sparkBar', 'MergeState'), 'studentTTestMergeState': ('studentTTest', 'MergeState'), 'deltaSumTimestampMergeState': ('deltaSumTimestamp', 'MergeState'), 'quantilesExactHighMergeState': ('quantilesExactHigh', 'MergeState'), 'quantilesTimingMergeState': ('quantilesTiming', 'MergeState'), 'groupBitmapXorMergeState': ('groupBitmapXor', 'MergeState'), 'exponentialMovingAverageMergeState': ('exponentialMovingAverage', 'MergeState'), 'groupBitmapAndMergeState': ('groupBitmapAnd', 'MergeState'), 'contingencyMergeState': ('contingency', 'MergeState'), 'categoricalInformationValueMergeState': ('categoricalInformationValue', 'MergeState'), 'maxMapMergeState': ('maxMap', 'MergeState'), 'quantileExactLowMergeState': ('quantileExactLow', 'MergeState'), 'avgWeightedMergeState': ('avgWeighted', 'MergeState'), 'anyHeavyMergeState': ('anyHeavy', 'MergeState'), 'deltaSumMergeState': ('deltaSum', 'MergeState'), 'groupArraySampleForEach': ('groupArraySample', 'ForEach'), 'cramersVBiasCorrectedForEach': ('cramersVBiasCorrected', 'ForEach'), 'argMaxForEach': ('argMax', 'ForEach'), 'quantileBFloat16ForEach': ('quantileBFloat16', 'ForEach'), 'quantileTimingWeightedForEach': ('quantileTimingWeighted', 'ForEach'), 'stochasticLogisticRegressionForEach': ('stochasticLogisticRegression', 'ForEach'), 'stddevPopForEach': ('stddevPop', 'ForEach'), 'maxIntersectionsForEach': ('maxIntersections', 'ForEach'), 'sumWithOverflowForEach': ('sumWithOverflow', 'ForEach'), 'sumMapForEach': ('sumMap', 'ForEach'), 'maxForEach': ('max', 'ForEach'), 'sumCountForEach': ('sumCount', 'ForEach'), 'minMapForEach': ('minMap', 'ForEach'), 'quantilesForEach': ('quantiles', 'ForEach'), 'meanZTestForEach': ('meanZTest', 'ForEach'), 'uniqCombinedForEach': ('uniqCombined', 'ForEach'), 'quantileExactForEach': ('quantileExact', 'ForEach'), 'avgForEach': ('avg', 'ForEach'), 'quantilesExactLowForEach': ('quantilesExactLow', 'ForEach'), 'quantileTimingForEach': ('quantileTiming', 'ForEach'), 'quantileGKForEach': ('quantileGK', 'ForEach'), 'countForEach': ('count', 'ForEach'), 'covarPopForEach': ('covarPop', 'ForEach'), 'groupBitmapForEach': ('groupBitmap', 'ForEach'), 'first_valueForEach': ('first_value', 'ForEach'), 'quantileExactWeightedForEach': ('quantileExactWeighted', 'ForEach'), 'intervalLengthSumForEach': ('intervalLengthSum', 'ForEach'), 'uniqForEach': ('uniq', 'ForEach'), 'quantilesExactWeightedForEach': ('quantilesExactWeighted', 'ForEach'), 'quantilesTDigestWeightedForEach': ('quantilesTDigestWeighted', 'ForEach'), 'topKWeightedForEach': ('topKWeighted', 'ForEach'), 'quantilesTDigestForEach': ('quantilesTDigest', 'ForEach'), 'groupBitXorForEach': ('groupBitXor', 'ForEach'), 'quantilesGKForEach': ('quantilesGK', 'ForEach'), 'skewPopForEach': ('skewPop', 'ForEach'), 'mannWhitneyUTestForEach': ('mannWhitneyUTest', 'ForEach'), 'quantileTDigestWeightedForEach': ('quantileTDigestWeighted', 'ForEach'), 'quantilesDeterministicForEach': ('quantilesDeterministic', 'ForEach'), 'medianForEach': ('median', 'ForEach'), 'largestTriangleThreeBucketsForEach': ('largestTriangleThreeBuckets', 'ForEach'), 'rankCorrForEach': ('rankCorr', 'ForEach'), 'maxIntersectionsPositionForEach': ('maxIntersectionsPosition', 'ForEach'), 'varSampForEach': ('varSamp', 'ForEach'), 'last_valueForEach': ('last_value', 'ForEach'), 'welchTTestForEach': ('welchTTest', 'ForEach'), 'cramersVForEach': ('cramersV', 'ForEach'), 'quantileDeterministicForEach': ('quantileDeterministic', 'ForEach'), 'uniqCombined64ForEach': ('uniqCombined64', 'ForEach'), 'uniqThetaForEach': ('uniqTheta', 'ForEach'), 'groupArrayInsertAtForEach': ('groupArrayInsertAt', 'ForEach'), 'uniqExactForEach': ('uniqExact', 'ForEach'), 'covarSampForEach': ('covarSamp', 'ForEach'), 'quantilesInterpolatedWeightedForEach': ('quantilesInterpolatedWeighted', 'ForEach'), 'groupBitmapOrForEach': ('groupBitmapOr', 'ForEach'), 'groupBitAndForEach': ('groupBitAnd', 'ForEach'), 'sumKahanForEach': ('sumKahan', 'ForEach'), 'quantilesTimingWeightedForEach': ('quantilesTimingWeighted', 'ForEach'), 'simpleLinearRegressionForEach': ('simpleLinearRegression', 'ForEach'), 'groupUniqArrayForEach': ('groupUniqArray', 'ForEach'), 'kolmogorovSmirnovTestForEach': ('kolmogorovSmirnovTest', 'ForEach'), 'minForEach': ('min', 'ForEach'), 'argMinForEach': ('argMin', 'ForEach'), 'corrForEach': ('corr', 'ForEach'), 'anyForEach': ('any', 'ForEach'), 'quantileInterpolatedWeightedForEach': ('quantileInterpolatedWeighted', 'ForEach'), 'groupArrayForEach': ('groupArray', 'ForEach'), 'groupArrayMovingSumForEach': ('groupArrayMovingSum', 'ForEach'), 'groupArrayLastForEach': ('groupArrayLast', 'ForEach'), 'quantileBFloat16WeightedForEach': ('quantileBFloat16Weighted', 'ForEach'), 'skewSampForEach': ('skewSamp', 'ForEach'), 'stddevSampForEach': ('stddevSamp', 'ForEach'), 'kurtSampForEach': ('kurtSamp', 'ForEach'), 'uniqHLL12ForEach': ('uniqHLL12', 'ForEach'), 'boundingRatioForEach': ('boundingRatio', 'ForEach'), 'quantileTDigestForEach': ('quantileTDigest', 'ForEach'), 'theilsUForEach': ('theilsU', 'ForEach'), 'sumForEach': ('sum', 'ForEach'), 'quantileForEach': ('quantile', 'ForEach'), 'quantilesBFloat16WeightedForEach': ('quantilesBFloat16Weighted', 'ForEach'), 'quantilesExactForEach': ('quantilesExact', 'ForEach'), 'stochasticLinearRegressionForEach': ('stochasticLinearRegression', 'ForEach'), 'groupBitOrForEach': ('groupBitOr', 'ForEach'), 'varPopForEach': ('varPop', 'ForEach'), 'topKForEach': ('topK', 'ForEach'), 'kurtPopForEach': ('kurtPop', 'ForEach'), 'anyLastForEach': ('anyLast', 'ForEach'), 'groupArrayMovingAvgForEach': ('groupArrayMovingAvg', 'ForEach'), 'quantilesBFloat16ForEach': ('quantilesBFloat16', 'ForEach'), 'entropyForEach': ('entropy', 'ForEach'), 'quantileExactHighForEach': ('quantileExactHigh', 'ForEach'), 'sparkBarForEach': ('sparkBar', 'ForEach'), 'studentTTestForEach': ('studentTTest', 'ForEach'), 'deltaSumTimestampForEach': ('deltaSumTimestamp', 'ForEach'), 'quantilesExactHighForEach': ('quantilesExactHigh', 'ForEach'), 'quantilesTimingForEach': ('quantilesTiming', 'ForEach'), 'groupBitmapXorForEach': ('groupBitmapXor', 'ForEach'), 'exponentialMovingAverageForEach': ('exponentialMovingAverage', 'ForEach'), 'groupBitmapAndForEach': ('groupBitmapAnd', 'ForEach'), 'contingencyForEach': ('contingency', 'ForEach'), 'categoricalInformationValueForEach': ('categoricalInformationValue', 'ForEach'), 'maxMapForEach': ('maxMap', 'ForEach'), 'quantileExactLowForEach': ('quantileExactLow', 'ForEach'), 'avgWeightedForEach': ('avgWeighted', 'ForEach'), 'anyHeavyForEach': ('anyHeavy', 'ForEach'), 'deltaSumForEach': ('deltaSum', 'ForEach'), 'groupArraySampleDistinct': ('groupArraySample', 'Distinct'), 'cramersVBiasCorrectedDistinct': ('cramersVBiasCorrected', 'Distinct'), 'argMaxDistinct': ('argMax', 'Distinct'), 'quantileBFloat16Distinct': ('quantileBFloat16', 'Distinct'), 'quantileTimingWeightedDistinct': ('quantileTimingWeighted', 'Distinct'), 'stochasticLogisticRegressionDistinct': ('stochasticLogisticRegression', 'Distinct'), 'stddevPopDistinct': ('stddevPop', 'Distinct'), 'maxIntersectionsDistinct': ('maxIntersections', 'Distinct'), 'sumWithOverflowDistinct': ('sumWithOverflow', 'Distinct'), 'sumMapDistinct': ('sumMap', 'Distinct'), 'maxDistinct': ('max', 'Distinct'), 'sumCountDistinct': ('sumCount', 'Distinct'), 'minMapDistinct': ('minMap', 'Distinct'), 'quantilesDistinct': ('quantiles', 'Distinct'), 'meanZTestDistinct': ('meanZTest', 'Distinct'), 'uniqCombinedDistinct': ('uniqCombined', 'Distinct'), 'quantileExactDistinct': ('quantileExact', 'Distinct'), 'avgDistinct': ('avg', 'Distinct'), 'quantilesExactLowDistinct': ('quantilesExactLow', 'Distinct'), 'quantileTimingDistinct': ('quantileTiming', 'Distinct'), 'quantileGKDistinct': ('quantileGK', 'Distinct'), 'countDistinct': ('count', 'Distinct'), 'covarPopDistinct': ('covarPop', 'Distinct'), 'groupBitmapDistinct': ('groupBitmap', 'Distinct'), 'first_valueDistinct': ('first_value', 'Distinct'), 'quantileExactWeightedDistinct': ('quantileExactWeighted', 'Distinct'), 'intervalLengthSumDistinct': ('intervalLengthSum', 'Distinct'), 'uniqDistinct': ('uniq', 'Distinct'), 'quantilesExactWeightedDistinct': ('quantilesExactWeighted', 'Distinct'), 'quantilesTDigestWeightedDistinct': ('quantilesTDigestWeighted', 'Distinct'), 'topKWeightedDistinct': ('topKWeighted', 'Distinct'), 'quantilesTDigestDistinct': ('quantilesTDigest', 'Distinct'), 'groupBitXorDistinct': ('groupBitXor', 'Distinct'), 'quantilesGKDistinct': ('quantilesGK', 'Distinct'), 'skewPopDistinct': ('skewPop', 'Distinct'), 'mannWhitneyUTestDistinct': ('mannWhitneyUTest', 'Distinct'), 'quantileTDigestWeightedDistinct': ('quantileTDigestWeighted', 'Distinct'), 'quantilesDeterministicDistinct': ('quantilesDeterministic', 'Distinct'), 'medianDistinct': ('median', 'Distinct'), 'largestTriangleThreeBucketsDistinct': ('largestTriangleThreeBuckets', 'Distinct'), 'rankCorrDistinct': ('rankCorr', 'Distinct'), 'maxIntersectionsPositionDistinct': ('maxIntersectionsPosition', 'Distinct'), 'varSampDistinct': ('varSamp', 'Distinct'), 'last_valueDistinct': ('last_value', 'Distinct'), 'welchTTestDistinct': ('welchTTest', 'Distinct'), 'cramersVDistinct': ('cramersV', 'Distinct'), 'quantileDeterministicDistinct': ('quantileDeterministic', 'Distinct'), 'uniqCombined64Distinct': ('uniqCombined64', 'Distinct'), 'uniqThetaDistinct': ('uniqTheta', 'Distinct'), 'groupArrayInsertAtDistinct': ('groupArrayInsertAt', 'Distinct'), 'uniqExactDistinct': ('uniqExact', 'Distinct'), 'covarSampDistinct': ('covarSamp', 'Distinct'), 'quantilesInterpolatedWeightedDistinct': ('quantilesInterpolatedWeighted', 'Distinct'), 'groupBitmapOrDistinct': ('groupBitmapOr', 'Distinct'), 'groupBitAndDistinct': ('groupBitAnd', 'Distinct'), 'sumKahanDistinct': ('sumKahan', 'Distinct'), 'quantilesTimingWeightedDistinct': ('quantilesTimingWeighted', 'Distinct'), 'simpleLinearRegressionDistinct': ('simpleLinearRegression', 'Distinct'), 'groupUniqArrayDistinct': ('groupUniqArray', 'Distinct'), 'kolmogorovSmirnovTestDistinct': ('kolmogorovSmirnovTest', 'Distinct'), 'minDistinct': ('min', 'Distinct'), 'argMinDistinct': ('argMin', 'Distinct'), 'corrDistinct': ('corr', 'Distinct'), 'anyDistinct': ('any', 'Distinct'), 'quantileInterpolatedWeightedDistinct': ('quantileInterpolatedWeighted', 'Distinct'), 'groupArrayDistinct': ('groupArray', 'Distinct'), 'groupArrayMovingSumDistinct': ('groupArrayMovingSum', 'Distinct'), 'groupArrayLastDistinct': ('groupArrayLast', 'Distinct'), 'quantileBFloat16WeightedDistinct': ('quantileBFloat16Weighted', 'Distinct'), 'skewSampDistinct': ('skewSamp', 'Distinct'), 'stddevSampDistinct': ('stddevSamp', 'Distinct'), 'kurtSampDistinct': ('kurtSamp', 'Distinct'), 'uniqHLL12Distinct': ('uniqHLL12', 'Distinct'), 'boundingRatioDistinct': ('boundingRatio', 'Distinct'), 'quantileTDigestDistinct': ('quantileTDigest', 'Distinct'), 'theilsUDistinct': ('theilsU', 'Distinct'), 'sumDistinct': ('sum', 'Distinct'), 'quantileDistinct': ('quantile', 'Distinct'), 'quantilesBFloat16WeightedDistinct': ('quantilesBFloat16Weighted', 'Distinct'), 'quantilesExactDistinct': ('quantilesExact', 'Distinct'), 'stochasticLinearRegressionDistinct': ('stochasticLinearRegression', 'Distinct'), 'groupBitOrDistinct': ('groupBitOr', 'Distinct'), 'varPopDistinct': ('varPop', 'Distinct'), 'topKDistinct': ('topK', 'Distinct'), 'kurtPopDistinct': ('kurtPop', 'Distinct'), 'anyLastDistinct': ('anyLast', 'Distinct'), 'groupArrayMovingAvgDistinct': ('groupArrayMovingAvg', 'Distinct'), 'quantilesBFloat16Distinct': ('quantilesBFloat16', 'Distinct'), 'entropyDistinct': ('entropy', 'Distinct'), 'quantileExactHighDistinct': ('quantileExactHigh', 'Distinct'), 'sparkBarDistinct': ('sparkBar', 'Distinct'), 'studentTTestDistinct': ('studentTTest', 'Distinct'), 'deltaSumTimestampDistinct': ('deltaSumTimestamp', 'Distinct'), 'quantilesExactHighDistinct': ('quantilesExactHigh', 'Distinct'), 'quantilesTimingDistinct': ('quantilesTiming', 'Distinct'), 'groupBitmapXorDistinct': ('groupBitmapXor', 'Distinct'), 'exponentialMovingAverageDistinct': ('exponentialMovingAverage', 'Distinct'), 'groupBitmapAndDistinct': ('groupBitmapAnd', 'Distinct'), 'contingencyDistinct': ('contingency', 'Distinct'), 'categoricalInformationValueDistinct': ('categoricalInformationValue', 'Distinct'), 'maxMapDistinct': ('maxMap', 'Distinct'), 'quantileExactLowDistinct': ('quantileExactLow', 'Distinct'), 'avgWeightedDistinct': ('avgWeighted', 'Distinct'), 'anyHeavyDistinct': ('anyHeavy', 'Distinct'), 'deltaSumDistinct': ('deltaSum', 'Distinct'), 'groupArraySampleOrDefault': ('groupArraySample', 'OrDefault'), 'cramersVBiasCorrectedOrDefault': ('cramersVBiasCorrected', 'OrDefault'), 'argMaxOrDefault': ('argMax', 'OrDefault'), 'quantileBFloat16OrDefault': ('quantileBFloat16', 'OrDefault'), 'quantileTimingWeightedOrDefault': ('quantileTimingWeighted', 'OrDefault'), 'stochasticLogisticRegressionOrDefault': ('stochasticLogisticRegression', 'OrDefault'), 'stddevPopOrDefault': ('stddevPop', 'OrDefault'), 'maxIntersectionsOrDefault': ('maxIntersections', 'OrDefault'), 'sumWithOverflowOrDefault': ('sumWithOverflow', 'OrDefault'), 'sumMapOrDefault': ('sumMap', 'OrDefault'), 'maxOrDefault': ('max', 'OrDefault'), 'sumCountOrDefault': ('sumCount', 'OrDefault'), 'minMapOrDefault': ('minMap', 'OrDefault'), 'quantilesOrDefault': ('quantiles', 'OrDefault'), 'meanZTestOrDefault': ('meanZTest', 'OrDefault'), 'uniqCombinedOrDefault': ('uniqCombined', 'OrDefault'), 'quantileExactOrDefault': ('quantileExact', 'OrDefault'), 'avgOrDefault': ('avg', 'OrDefault'), 'quantilesExactLowOrDefault': ('quantilesExactLow', 'OrDefault'), 'quantileTimingOrDefault': ('quantileTiming', 'OrDefault'), 'quantileGKOrDefault': ('quantileGK', 'OrDefault'), 'countOrDefault': ('count', 'OrDefault'), 'covarPopOrDefault': ('covarPop', 'OrDefault'), 'groupBitmapOrDefault': ('groupBitmap', 'OrDefault'), 'first_valueOrDefault': ('first_value', 'OrDefault'), 'quantileExactWeightedOrDefault': ('quantileExactWeighted', 'OrDefault'), 'intervalLengthSumOrDefault': ('intervalLengthSum', 'OrDefault'), 'uniqOrDefault': ('uniq', 'OrDefault'), 'quantilesExactWeightedOrDefault': ('quantilesExactWeighted', 'OrDefault'), 'quantilesTDigestWeightedOrDefault': ('quantilesTDigestWeighted', 'OrDefault'), 'topKWeightedOrDefault': ('topKWeighted', 'OrDefault'), 'quantilesTDigestOrDefault': ('quantilesTDigest', 'OrDefault'), 'groupBitXorOrDefault': ('groupBitXor', 'OrDefault'), 'quantilesGKOrDefault': ('quantilesGK', 'OrDefault'), 'skewPopOrDefault': ('skewPop', 'OrDefault'), 'mannWhitneyUTestOrDefault': ('mannWhitneyUTest', 'OrDefault'), 'quantileTDigestWeightedOrDefault': ('quantileTDigestWeighted', 'OrDefault'), 'quantilesDeterministicOrDefault': ('quantilesDeterministic', 'OrDefault'), 'medianOrDefault': ('median', 'OrDefault'), 'largestTriangleThreeBucketsOrDefault': ('largestTriangleThreeBuckets', 'OrDefault'), 'rankCorrOrDefault': ('rankCorr', 'OrDefault'), 'maxIntersectionsPositionOrDefault': ('maxIntersectionsPosition', 'OrDefault'), 'varSampOrDefault': ('varSamp', 'OrDefault'), 'last_valueOrDefault': ('last_value', 'OrDefault'), 'welchTTestOrDefault': ('welchTTest', 'OrDefault'), 'cramersVOrDefault': ('cramersV', 'OrDefault'), 'quantileDeterministicOrDefault': ('quantileDeterministic', 'OrDefault'), 'uniqCombined64OrDefault': ('uniqCombined64', 'OrDefault'), 'uniqThetaOrDefault': ('uniqTheta', 'OrDefault'), 'groupArrayInsertAtOrDefault': ('groupArrayInsertAt', 'OrDefault'), 'uniqExactOrDefault': ('uniqExact', 'OrDefault'), 'covarSampOrDefault': ('covarSamp', 'OrDefault'), 'quantilesInterpolatedWeightedOrDefault': ('quantilesInterpolatedWeighted', 'OrDefault'), 'groupBitmapOrOrDefault': ('groupBitmapOr', 'OrDefault'), 'groupBitAndOrDefault': ('groupBitAnd', 'OrDefault'), 'sumKahanOrDefault': ('sumKahan', 'OrDefault'), 'quantilesTimingWeightedOrDefault': ('quantilesTimingWeighted', 'OrDefault'), 'simpleLinearRegressionOrDefault': ('simpleLinearRegression', 'OrDefault'), 'groupUniqArrayOrDefault': ('groupUniqArray', 'OrDefault'), 'kolmogorovSmirnovTestOrDefault': ('kolmogorovSmirnovTest', 'OrDefault'), 'minOrDefault': ('min', 'OrDefault'), 'argMinOrDefault': ('argMin', 'OrDefault'), 'corrOrDefault': ('corr', 'OrDefault'), 'anyOrDefault': ('any', 'OrDefault'), 'quantileInterpolatedWeightedOrDefault': ('quantileInterpolatedWeighted', 'OrDefault'), 'groupArrayOrDefault': ('groupArray', 'OrDefault'), 'groupArrayMovingSumOrDefault': ('groupArrayMovingSum', 'OrDefault'), 'groupArrayLastOrDefault': ('groupArrayLast', 'OrDefault'), 'quantileBFloat16WeightedOrDefault': ('quantileBFloat16Weighted', 'OrDefault'), 'skewSampOrDefault': ('skewSamp', 'OrDefault'), 'stddevSampOrDefault': ('stddevSamp', 'OrDefault'), 'kurtSampOrDefault': ('kurtSamp', 'OrDefault'), 'uniqHLL12OrDefault': ('uniqHLL12', 'OrDefault'), 'boundingRatioOrDefault': ('boundingRatio', 'OrDefault'), 'quantileTDigestOrDefault': ('quantileTDigest', 'OrDefault'), 'theilsUOrDefault': ('theilsU', 'OrDefault'), 'sumOrDefault': ('sum', 'OrDefault'), 'quantileOrDefault': ('quantile', 'OrDefault'), 'quantilesBFloat16WeightedOrDefault': ('quantilesBFloat16Weighted', 'OrDefault'), 'quantilesExactOrDefault': ('quantilesExact', 'OrDefault'), 'stochasticLinearRegressionOrDefault': ('stochasticLinearRegression', 'OrDefault'), 'groupBitOrOrDefault': ('groupBitOr', 'OrDefault'), 'varPopOrDefault': ('varPop', 'OrDefault'), 'topKOrDefault': ('topK', 'OrDefault'), 'kurtPopOrDefault': ('kurtPop', 'OrDefault'), 'anyLastOrDefault': ('anyLast', 'OrDefault'), 'groupArrayMovingAvgOrDefault': ('groupArrayMovingAvg', 'OrDefault'), 'quantilesBFloat16OrDefault': ('quantilesBFloat16', 'OrDefault'), 'entropyOrDefault': ('entropy', 'OrDefault'), 'quantileExactHighOrDefault': ('quantileExactHigh', 'OrDefault'), 'sparkBarOrDefault': ('sparkBar', 'OrDefault'), 'studentTTestOrDefault': ('studentTTest', 'OrDefault'), 'deltaSumTimestampOrDefault': ('deltaSumTimestamp', 'OrDefault'), 'quantilesExactHighOrDefault': ('quantilesExactHigh', 'OrDefault'), 'quantilesTimingOrDefault': ('quantilesTiming', 'OrDefault'), 'groupBitmapXorOrDefault': ('groupBitmapXor', 'OrDefault'), 'exponentialMovingAverageOrDefault': ('exponentialMovingAverage', 'OrDefault'), 'groupBitmapAndOrDefault': ('groupBitmapAnd', 'OrDefault'), 'contingencyOrDefault': ('contingency', 'OrDefault'), 'categoricalInformationValueOrDefault': ('categoricalInformationValue', 'OrDefault'), 'maxMapOrDefault': ('maxMap', 'OrDefault'), 'quantileExactLowOrDefault': ('quantileExactLow', 'OrDefault'), 'avgWeightedOrDefault': ('avgWeighted', 'OrDefault'), 'anyHeavyOrDefault': ('anyHeavy', 'OrDefault'), 'deltaSumOrDefault': ('deltaSum', 'OrDefault'), 'groupArraySampleOrNull': ('groupArraySample', 'OrNull'), 'cramersVBiasCorrectedOrNull': ('cramersVBiasCorrected', 'OrNull'), 'argMaxOrNull': ('argMax', 'OrNull'), 'quantileBFloat16OrNull': ('quantileBFloat16', 'OrNull'), 'quantileTimingWeightedOrNull': ('quantileTimingWeighted', 'OrNull'), 'stochasticLogisticRegressionOrNull': ('stochasticLogisticRegression', 'OrNull'), 'stddevPopOrNull': ('stddevPop', 'OrNull'), 'maxIntersectionsOrNull': ('maxIntersections', 'OrNull'), 'sumWithOverflowOrNull': ('sumWithOverflow', 'OrNull'), 'sumMapOrNull': ('sumMap', 'OrNull'), 'maxOrNull': ('max', 'OrNull'), 'sumCountOrNull': ('sumCount', 'OrNull'), 'minMapOrNull': ('minMap', 'OrNull'), 'quantilesOrNull': ('quantiles', 'OrNull'), 'meanZTestOrNull': ('meanZTest', 'OrNull'), 'uniqCombinedOrNull': ('uniqCombined', 'OrNull'), 'quantileExactOrNull': ('quantileExact', 'OrNull'), 'avgOrNull': ('avg', 'OrNull'), 'quantilesExactLowOrNull': ('quantilesExactLow', 'OrNull'), 'quantileTimingOrNull': ('quantileTiming', 'OrNull'), 'quantileGKOrNull': ('quantileGK', 'OrNull'), 'countOrNull': ('count', 'OrNull'), 'covarPopOrNull': ('covarPop', 'OrNull'), 'groupBitmapOrNull': ('groupBitmap', 'OrNull'), 'first_valueOrNull': ('first_value', 'OrNull'), 'quantileExactWeightedOrNull': ('quantileExactWeighted', 'OrNull'), 'intervalLengthSumOrNull': ('intervalLengthSum', 'OrNull'), 'uniqOrNull': ('uniq', 'OrNull'), 'quantilesExactWeightedOrNull': ('quantilesExactWeighted', 'OrNull'), 'quantilesTDigestWeightedOrNull': ('quantilesTDigestWeighted', 'OrNull'), 'topKWeightedOrNull': ('topKWeighted', 'OrNull'), 'quantilesTDigestOrNull': ('quantilesTDigest', 'OrNull'), 'groupBitXorOrNull': ('groupBitXor', 'OrNull'), 'quantilesGKOrNull': ('quantilesGK', 'OrNull'), 'skewPopOrNull': ('skewPop', 'OrNull'), 'mannWhitneyUTestOrNull': ('mannWhitneyUTest', 'OrNull'), 'quantileTDigestWeightedOrNull': ('quantileTDigestWeighted', 'OrNull'), 'quantilesDeterministicOrNull': ('quantilesDeterministic', 'OrNull'), 'medianOrNull': ('median', 'OrNull'), 'largestTriangleThreeBucketsOrNull': ('largestTriangleThreeBuckets', 'OrNull'), 'rankCorrOrNull': ('rankCorr', 'OrNull'), 'maxIntersectionsPositionOrNull': ('maxIntersectionsPosition', 'OrNull'), 'varSampOrNull': ('varSamp', 'OrNull'), 'last_valueOrNull': ('last_value', 'OrNull'), 'welchTTestOrNull': ('welchTTest', 'OrNull'), 'cramersVOrNull': ('cramersV', 'OrNull'), 'quantileDeterministicOrNull': ('quantileDeterministic', 'OrNull'), 'uniqCombined64OrNull': ('uniqCombined64', 'OrNull'), 'uniqThetaOrNull': ('uniqTheta', 'OrNull'), 'groupArrayInsertAtOrNull': ('groupArrayInsertAt', 'OrNull'), 'uniqExactOrNull': ('uniqExact', 'OrNull'), 'covarSampOrNull': ('covarSamp', 'OrNull'), 'quantilesInterpolatedWeightedOrNull': ('quantilesInterpolatedWeighted', 'OrNull'), 'groupBitmapOrOrNull': ('groupBitmapOr', 'OrNull'), 'groupBitAndOrNull': ('groupBitAnd', 'OrNull'), 'sumKahanOrNull': ('sumKahan', 'OrNull'), 'quantilesTimingWeightedOrNull': ('quantilesTimingWeighted', 'OrNull'), 'simpleLinearRegressionOrNull': ('simpleLinearRegression', 'OrNull'), 'groupUniqArrayOrNull': ('groupUniqArray', 'OrNull'), 'kolmogorovSmirnovTestOrNull': ('kolmogorovSmirnovTest', 'OrNull'), 'minOrNull': ('min', 'OrNull'), 'argMinOrNull': ('argMin', 'OrNull'), 'corrOrNull': ('corr', 'OrNull'), 'anyOrNull': ('any', 'OrNull'), 'quantileInterpolatedWeightedOrNull': ('quantileInterpolatedWeighted', 'OrNull'), 'groupArrayOrNull': ('groupArray', 'OrNull'), 'groupArrayMovingSumOrNull': ('groupArrayMovingSum', 'OrNull'), 'groupArrayLastOrNull': ('groupArrayLast', 'OrNull'), 'quantileBFloat16WeightedOrNull': ('quantileBFloat16Weighted', 'OrNull'), 'skewSampOrNull': ('skewSamp', 'OrNull'), 'stddevSampOrNull': ('stddevSamp', 'OrNull'), 'kurtSampOrNull': ('kurtSamp', 'OrNull'), 'uniqHLL12OrNull': ('uniqHLL12', 'OrNull'), 'boundingRatioOrNull': ('boundingRatio', 'OrNull'), 'quantileTDigestOrNull': ('quantileTDigest', 'OrNull'), 'theilsUOrNull': ('theilsU', 'OrNull'), 'sumOrNull': ('sum', 'OrNull'), 'quantileOrNull': ('quantile', 'OrNull'), 'quantilesBFloat16WeightedOrNull': ('quantilesBFloat16Weighted', 'OrNull'), 'quantilesExactOrNull': ('quantilesExact', 'OrNull'), 'stochasticLinearRegressionOrNull': ('stochasticLinearRegression', 'OrNull'), 'groupBitOrOrNull': ('groupBitOr', 'OrNull'), 'varPopOrNull': ('varPop', 'OrNull'), 'topKOrNull': ('topK', 'OrNull'), 'kurtPopOrNull': ('kurtPop', 'OrNull'), 'anyLastOrNull': ('anyLast', 'OrNull'), 'groupArrayMovingAvgOrNull': ('groupArrayMovingAvg', 'OrNull'), 'quantilesBFloat16OrNull': ('quantilesBFloat16', 'OrNull'), 'entropyOrNull': ('entropy', 'OrNull'), 'quantileExactHighOrNull': ('quantileExactHigh', 'OrNull'), 'sparkBarOrNull': ('sparkBar', 'OrNull'), 'studentTTestOrNull': ('studentTTest', 'OrNull'), 'deltaSumTimestampOrNull': ('deltaSumTimestamp', 'OrNull'), 'quantilesExactHighOrNull': ('quantilesExactHigh', 'OrNull'), 'quantilesTimingOrNull': ('quantilesTiming', 'OrNull'), 'groupBitmapXorOrNull': ('groupBitmapXor', 'OrNull'), 'exponentialMovingAverageOrNull': ('exponentialMovingAverage', 'OrNull'), 'groupBitmapAndOrNull': ('groupBitmapAnd', 'OrNull'), 'contingencyOrNull': ('contingency', 'OrNull'), 'categoricalInformationValueOrNull': ('categoricalInformationValue', 'OrNull'), 'maxMapOrNull': ('maxMap', 'OrNull'), 'quantileExactLowOrNull': ('quantileExactLow', 'OrNull'), 'avgWeightedOrNull': ('avgWeighted', 'OrNull'), 'anyHeavyOrNull': ('anyHeavy', 'OrNull'), 'deltaSumOrNull': ('deltaSum', 'OrNull'), 'groupArraySampleResample': ('groupArraySample', 'Resample'), 'cramersVBiasCorrectedResample': ('cramersVBiasCorrected', 'Resample'), 'argMaxResample': ('argMax', 'Resample'), 'quantileBFloat16Resample': ('quantileBFloat16', 'Resample'), 'quantileTimingWeightedResample': ('quantileTimingWeighted', 'Resample'), 'stochasticLogisticRegressionResample': ('stochasticLogisticRegression', 'Resample'), 'stddevPopResample': ('stddevPop', 'Resample'), 'maxIntersectionsResample': ('maxIntersections', 'Resample'), 'sumWithOverflowResample': ('sumWithOverflow', 'Resample'), 'sumMapResample': ('sumMap', 'Resample'), 'maxResample': ('max', 'Resample'), 'sumCountResample': ('sumCount', 'Resample'), 'minMapResample': ('minMap', 'Resample'), 'quantilesResample': ('quantiles', 'Resample'), 'meanZTestResample': ('meanZTest', 'Resample'), 'uniqCombinedResample': ('uniqCombined', 'Resample'), 'quantileExactResample': ('quantileExact', 'Resample'), 'avgResample': ('avg', 'Resample'), 'quantilesExactLowResample': ('quantilesExactLow', 'Resample'), 'quantileTimingResample': ('quantileTiming', 'Resample'), 'quantileGKResample': ('quantileGK', 'Resample'), 'countResample': ('count', 'Resample'), 'covarPopResample': ('covarPop', 'Resample'), 'groupBitmapResample': ('groupBitmap', 'Resample'), 'first_valueResample': ('first_value', 'Resample'), 'quantileExactWeightedResample': ('quantileExactWeighted', 'Resample'), 'intervalLengthSumResample': ('intervalLengthSum', 'Resample'), 'uniqResample': ('uniq', 'Resample'), 'quantilesExactWeightedResample': ('quantilesExactWeighted', 'Resample'), 'quantilesTDigestWeightedResample': ('quantilesTDigestWeighted', 'Resample'), 'topKWeightedResample': ('topKWeighted', 'Resample'), 'quantilesTDigestResample': ('quantilesTDigest', 'Resample'), 'groupBitXorResample': ('groupBitXor', 'Resample'), 'quantilesGKResample': ('quantilesGK', 'Resample'), 'skewPopResample': ('skewPop', 'Resample'), 'mannWhitneyUTestResample': ('mannWhitneyUTest', 'Resample'), 'quantileTDigestWeightedResample': ('quantileTDigestWeighted', 'Resample'), 'quantilesDeterministicResample': ('quantilesDeterministic', 'Resample'), 'medianResample': ('median', 'Resample'), 'largestTriangleThreeBucketsResample': ('largestTriangleThreeBuckets', 'Resample'), 'rankCorrResample': ('rankCorr', 'Resample'), 'maxIntersectionsPositionResample': ('maxIntersectionsPosition', 'Resample'), 'varSampResample': ('varSamp', 'Resample'), 'last_valueResample': ('last_value', 'Resample'), 'welchTTestResample': ('welchTTest', 'Resample'), 'cramersVResample': ('cramersV', 'Resample'), 'quantileDeterministicResample': ('quantileDeterministic', 'Resample'), 'uniqCombined64Resample': ('uniqCombined64', 'Resample'), 'uniqThetaResample': ('uniqTheta', 'Resample'), 'groupArrayInsertAtResample': ('groupArrayInsertAt', 'Resample'), 'uniqExactResample': ('uniqExact', 'Resample'), 'covarSampResample': ('covarSamp', 'Resample'), 'quantilesInterpolatedWeightedResample': ('quantilesInterpolatedWeighted', 'Resample'), 'groupBitmapOrResample': ('groupBitmapOr', 'Resample'), 'groupBitAndResample': ('groupBitAnd', 'Resample'), 'sumKahanResample': ('sumKahan', 'Resample'), 'quantilesTimingWeightedResample': ('quantilesTimingWeighted', 'Resample'), 'simpleLinearRegressionResample': ('simpleLinearRegression', 'Resample'), 'groupUniqArrayResample': ('groupUniqArray', 'Resample'), 'kolmogorovSmirnovTestResample': ('kolmogorovSmirnovTest', 'Resample'), 'minResample': ('min', 'Resample'), 'argMinResample': ('argMin', 'Resample'), 'corrResample': ('corr', 'Resample'), 'anyResample': ('any', 'Resample'), 'quantileInterpolatedWeightedResample': ('quantileInterpolatedWeighted', 'Resample'), 'groupArrayResample': ('groupArray', 'Resample'), 'groupArrayMovingSumResample': ('groupArrayMovingSum', 'Resample'), 'groupArrayLastResample': ('groupArrayLast', 'Resample'), 'quantileBFloat16WeightedResample': ('quantileBFloat16Weighted', 'Resample'), 'skewSampResample': ('skewSamp', 'Resample'), 'stddevSampResample': ('stddevSamp', 'Resample'), 'kurtSampResample': ('kurtSamp', 'Resample'), 'uniqHLL12Resample': ('uniqHLL12', 'Resample'), 'boundingRatioResample': ('boundingRatio', 'Resample'), 'quantileTDigestResample': ('quantileTDigest', 'Resample'), 'theilsUResample': ('theilsU', 'Resample'), 'sumResample': ('sum', 'Resample'), 'quantileResample': ('quantile', 'Resample'), 'quantilesBFloat16WeightedResample': ('quantilesBFloat16Weighted', 'Resample'), 'quantilesExactResample': ('quantilesExact', 'Resample'), 'stochasticLinearRegressionResample': ('stochasticLinearRegression', 'Resample'), 'groupBitOrResample': ('groupBitOr', 'Resample'), 'varPopResample': ('varPop', 'Resample'), 'topKResample': ('topK', 'Resample'), 'kurtPopResample': ('kurtPop', 'Resample'), 'anyLastResample': ('anyLast', 'Resample'), 'groupArrayMovingAvgResample': ('groupArrayMovingAvg', 'Resample'), 'quantilesBFloat16Resample': ('quantilesBFloat16', 'Resample'), 'entropyResample': ('entropy', 'Resample'), 'quantileExactHighResample': ('quantileExactHigh', 'Resample'), 'sparkBarResample': ('sparkBar', 'Resample'), 'studentTTestResample': ('studentTTest', 'Resample'), 'deltaSumTimestampResample': ('deltaSumTimestamp', 'Resample'), 'quantilesExactHighResample': ('quantilesExactHigh', 'Resample'), 'quantilesTimingResample': ('quantilesTiming', 'Resample'), 'groupBitmapXorResample': ('groupBitmapXor', 'Resample'), 'exponentialMovingAverageResample': ('exponentialMovingAverage', 'Resample'), 'groupBitmapAndResample': ('groupBitmapAnd', 'Resample'), 'contingencyResample': ('contingency', 'Resample'), 'categoricalInformationValueResample': ('categoricalInformationValue', 'Resample'), 'maxMapResample': ('maxMap', 'Resample'), 'quantileExactLowResample': ('quantileExactLow', 'Resample'), 'avgWeightedResample': ('avgWeighted', 'Resample'), 'anyHeavyResample': ('anyHeavy', 'Resample'), 'deltaSumResample': ('deltaSum', 'Resample'), 'groupArraySampleArgMin': ('groupArraySample', 'ArgMin'), 'cramersVBiasCorrectedArgMin': ('cramersVBiasCorrected', 'ArgMin'), 'argMaxArgMin': ('argMax', 'ArgMin'), 'quantileBFloat16ArgMin': ('quantileBFloat16', 'ArgMin'), 'quantileTimingWeightedArgMin': ('quantileTimingWeighted', 'ArgMin'), 'stochasticLogisticRegressionArgMin': ('stochasticLogisticRegression', 'ArgMin'), 'stddevPopArgMin': ('stddevPop', 'ArgMin'), 'maxIntersectionsArgMin': ('maxIntersections', 'ArgMin'), 'sumWithOverflowArgMin': ('sumWithOverflow', 'ArgMin'), 'sumMapArgMin': ('sumMap', 'ArgMin'), 'maxArgMin': ('max', 'ArgMin'), 'sumCountArgMin': ('sumCount', 'ArgMin'), 'minMapArgMin': ('minMap', 'ArgMin'), 'quantilesArgMin': ('quantiles', 'ArgMin'), 'meanZTestArgMin': ('meanZTest', 'ArgMin'), 'uniqCombinedArgMin': ('uniqCombined', 'ArgMin'), 'quantileExactArgMin': ('quantileExact', 'ArgMin'), 'avgArgMin': ('avg', 'ArgMin'), 'quantilesExactLowArgMin': ('quantilesExactLow', 'ArgMin'), 'quantileTimingArgMin': ('quantileTiming', 'ArgMin'), 'quantileGKArgMin': ('quantileGK', 'ArgMin'), 'countArgMin': ('count', 'ArgMin'), 'covarPopArgMin': ('covarPop', 'ArgMin'), 'groupBitmapArgMin': ('groupBitmap', 'ArgMin'), 'first_valueArgMin': ('first_value', 'ArgMin'), 'quantileExactWeightedArgMin': ('quantileExactWeighted', 'ArgMin'), 'intervalLengthSumArgMin': ('intervalLengthSum', 'ArgMin'), 'uniqArgMin': ('uniq', 'ArgMin'), 'quantilesExactWeightedArgMin': ('quantilesExactWeighted', 'ArgMin'), 'quantilesTDigestWeightedArgMin': ('quantilesTDigestWeighted', 'ArgMin'), 'topKWeightedArgMin': ('topKWeighted', 'ArgMin'), 'quantilesTDigestArgMin': ('quantilesTDigest', 'ArgMin'), 'groupBitXorArgMin': ('groupBitXor', 'ArgMin'), 'quantilesGKArgMin': ('quantilesGK', 'ArgMin'), 'skewPopArgMin': ('skewPop', 'ArgMin'), 'mannWhitneyUTestArgMin': ('mannWhitneyUTest', 'ArgMin'), 'quantileTDigestWeightedArgMin': ('quantileTDigestWeighted', 'ArgMin'), 'quantilesDeterministicArgMin': ('quantilesDeterministic', 'ArgMin'), 'medianArgMin': ('median', 'ArgMin'), 'largestTriangleThreeBucketsArgMin': ('largestTriangleThreeBuckets', 'ArgMin'), 'rankCorrArgMin': ('rankCorr', 'ArgMin'), 'maxIntersectionsPositionArgMin': ('maxIntersectionsPosition', 'ArgMin'), 'varSampArgMin': ('varSamp', 'ArgMin'), 'last_valueArgMin': ('last_value', 'ArgMin'), 'welchTTestArgMin': ('welchTTest', 'ArgMin'), 'cramersVArgMin': ('cramersV', 'ArgMin'), 'quantileDeterministicArgMin': ('quantileDeterministic', 'ArgMin'), 'uniqCombined64ArgMin': ('uniqCombined64', 'ArgMin'), 'uniqThetaArgMin': ('uniqTheta', 'ArgMin'), 'groupArrayInsertAtArgMin': ('groupArrayInsertAt', 'ArgMin'), 'uniqExactArgMin': ('uniqExact', 'ArgMin'), 'covarSampArgMin': ('covarSamp', 'ArgMin'), 'quantilesInterpolatedWeightedArgMin': ('quantilesInterpolatedWeighted', 'ArgMin'), 'groupBitmapOrArgMin': ('groupBitmapOr', 'ArgMin'), 'groupBitAndArgMin': ('groupBitAnd', 'ArgMin'), 'sumKahanArgMin': ('sumKahan', 'ArgMin'), 'quantilesTimingWeightedArgMin': ('quantilesTimingWeighted', 'ArgMin'), 'simpleLinearRegressionArgMin': ('simpleLinearRegression', 'ArgMin'), 'groupUniqArrayArgMin': ('groupUniqArray', 'ArgMin'), 'kolmogorovSmirnovTestArgMin': ('kolmogorovSmirnovTest', 'ArgMin'), 'minArgMin': ('min', 'ArgMin'), 'argMinArgMin': ('argMin', 'ArgMin'), 'corrArgMin': ('corr', 'ArgMin'), 'anyArgMin': ('any', 'ArgMin'), 'quantileInterpolatedWeightedArgMin': ('quantileInterpolatedWeighted', 'ArgMin'), 'groupArrayArgMin': ('groupArray', 'ArgMin'), 'groupArrayMovingSumArgMin': ('groupArrayMovingSum', 'ArgMin'), 'groupArrayLastArgMin': ('groupArrayLast', 'ArgMin'), 'quantileBFloat16WeightedArgMin': ('quantileBFloat16Weighted', 'ArgMin'), 'skewSampArgMin': ('skewSamp', 'ArgMin'), 'stddevSampArgMin': ('stddevSamp', 'ArgMin'), 'kurtSampArgMin': ('kurtSamp', 'ArgMin'), 'uniqHLL12ArgMin': ('uniqHLL12', 'ArgMin'), 'boundingRatioArgMin': ('boundingRatio', 'ArgMin'), 'quantileTDigestArgMin': ('quantileTDigest', 'ArgMin'), 'theilsUArgMin': ('theilsU', 'ArgMin'), 'sumArgMin': ('sum', 'ArgMin'), 'quantileArgMin': ('quantile', 'ArgMin'), 'quantilesBFloat16WeightedArgMin': ('quantilesBFloat16Weighted', 'ArgMin'), 'quantilesExactArgMin': ('quantilesExact', 'ArgMin'), 'stochasticLinearRegressionArgMin': ('stochasticLinearRegression', 'ArgMin'), 'groupBitOrArgMin': ('groupBitOr', 'ArgMin'), 'varPopArgMin': ('varPop', 'ArgMin'), 'topKArgMin': ('topK', 'ArgMin'), 'kurtPopArgMin': ('kurtPop', 'ArgMin'), 'anyLastArgMin': ('anyLast', 'ArgMin'), 'groupArrayMovingAvgArgMin': ('groupArrayMovingAvg', 'ArgMin'), 'quantilesBFloat16ArgMin': ('quantilesBFloat16', 'ArgMin'), 'entropyArgMin': ('entropy', 'ArgMin'), 'quantileExactHighArgMin': ('quantileExactHigh', 'ArgMin'), 'sparkBarArgMin': ('sparkBar', 'ArgMin'), 'studentTTestArgMin': ('studentTTest', 'ArgMin'), 'deltaSumTimestampArgMin': ('deltaSumTimestamp', 'ArgMin'), 'quantilesExactHighArgMin': ('quantilesExactHigh', 'ArgMin'), 'quantilesTimingArgMin': ('quantilesTiming', 'ArgMin'), 'groupBitmapXorArgMin': ('groupBitmapXor', 'ArgMin'), 'exponentialMovingAverageArgMin': ('exponentialMovingAverage', 'ArgMin'), 'groupBitmapAndArgMin': ('groupBitmapAnd', 'ArgMin'), 'contingencyArgMin': ('contingency', 'ArgMin'), 'categoricalInformationValueArgMin': ('categoricalInformationValue', 'ArgMin'), 'maxMapArgMin': ('maxMap', 'ArgMin'), 'quantileExactLowArgMin': ('quantileExactLow', 'ArgMin'), 'avgWeightedArgMin': ('avgWeighted', 'ArgMin'), 'anyHeavyArgMin': ('anyHeavy', 'ArgMin'), 'deltaSumArgMin': ('deltaSum', 'ArgMin'), 'groupArraySampleArgMax': ('groupArraySample', 'ArgMax'), 'cramersVBiasCorrectedArgMax': ('cramersVBiasCorrected', 'ArgMax'), 'argMaxArgMax': ('argMax', 'ArgMax'), 'quantileBFloat16ArgMax': ('quantileBFloat16', 'ArgMax'), 'quantileTimingWeightedArgMax': ('quantileTimingWeighted', 'ArgMax'), 'stochasticLogisticRegressionArgMax': ('stochasticLogisticRegression', 'ArgMax'), 'stddevPopArgMax': ('stddevPop', 'ArgMax'), 'maxIntersectionsArgMax': ('maxIntersections', 'ArgMax'), 'sumWithOverflowArgMax': ('sumWithOverflow', 'ArgMax'), 'sumMapArgMax': ('sumMap', 'ArgMax'), 'maxArgMax': ('max', 'ArgMax'), 'sumCountArgMax': ('sumCount', 'ArgMax'), 'minMapArgMax': ('minMap', 'ArgMax'), 'quantilesArgMax': ('quantiles', 'ArgMax'), 'meanZTestArgMax': ('meanZTest', 'ArgMax'), 'uniqCombinedArgMax': ('uniqCombined', 'ArgMax'), 'quantileExactArgMax': ('quantileExact', 'ArgMax'), 'avgArgMax': ('avg', 'ArgMax'), 'quantilesExactLowArgMax': ('quantilesExactLow', 'ArgMax'), 'quantileTimingArgMax': ('quantileTiming', 'ArgMax'), 'quantileGKArgMax': ('quantileGK', 'ArgMax'), 'countArgMax': ('count', 'ArgMax'), 'covarPopArgMax': ('covarPop', 'ArgMax'), 'groupBitmapArgMax': ('groupBitmap', 'ArgMax'), 'first_valueArgMax': ('first_value', 'ArgMax'), 'quantileExactWeightedArgMax': ('quantileExactWeighted', 'ArgMax'), 'intervalLengthSumArgMax': ('intervalLengthSum', 'ArgMax'), 'uniqArgMax': ('uniq', 'ArgMax'), 'quantilesExactWeightedArgMax': ('quantilesExactWeighted', 'ArgMax'), 'quantilesTDigestWeightedArgMax': ('quantilesTDigestWeighted', 'ArgMax'), 'topKWeightedArgMax': ('topKWeighted', 'ArgMax'), 'quantilesTDigestArgMax': ('quantilesTDigest', 'ArgMax'), 'groupBitXorArgMax': ('groupBitXor', 'ArgMax'), 'quantilesGKArgMax': ('quantilesGK', 'ArgMax'), 'skewPopArgMax': ('skewPop', 'ArgMax'), 'mannWhitneyUTestArgMax': ('mannWhitneyUTest', 'ArgMax'), 'quantileTDigestWeightedArgMax': ('quantileTDigestWeighted', 'ArgMax'), 'quantilesDeterministicArgMax': ('quantilesDeterministic', 'ArgMax'), 'medianArgMax': ('median', 'ArgMax'), 'largestTriangleThreeBucketsArgMax': ('largestTriangleThreeBuckets', 'ArgMax'), 'rankCorrArgMax': ('rankCorr', 'ArgMax'), 'maxIntersectionsPositionArgMax': ('maxIntersectionsPosition', 'ArgMax'), 'varSampArgMax': ('varSamp', 'ArgMax'), 'last_valueArgMax': ('last_value', 'ArgMax'), 'welchTTestArgMax': ('welchTTest', 'ArgMax'), 'cramersVArgMax': ('cramersV', 'ArgMax'), 'quantileDeterministicArgMax': ('quantileDeterministic', 'ArgMax'), 'uniqCombined64ArgMax': ('uniqCombined64', 'ArgMax'), 'uniqThetaArgMax': ('uniqTheta', 'ArgMax'), 'groupArrayInsertAtArgMax': ('groupArrayInsertAt', 'ArgMax'), 'uniqExactArgMax': ('uniqExact', 'ArgMax'), 'covarSampArgMax': ('covarSamp', 'ArgMax'), 'quantilesInterpolatedWeightedArgMax': ('quantilesInterpolatedWeighted', 'ArgMax'), 'groupBitmapOrArgMax': ('groupBitmapOr', 'ArgMax'), 'groupBitAndArgMax': ('groupBitAnd', 'ArgMax'), 'sumKahanArgMax': ('sumKahan', 'ArgMax'), 'quantilesTimingWeightedArgMax': ('quantilesTimingWeighted', 'ArgMax'), 'simpleLinearRegressionArgMax': ('simpleLinearRegression', 'ArgMax'), 'groupUniqArrayArgMax': ('groupUniqArray', 'ArgMax'), 'kolmogorovSmirnovTestArgMax': ('kolmogorovSmirnovTest', 'ArgMax'), 'minArgMax': ('min', 'ArgMax'), 'argMinArgMax': ('argMin', 'ArgMax'), 'corrArgMax': ('corr', 'ArgMax'), 'anyArgMax': ('any', 'ArgMax'), 'quantileInterpolatedWeightedArgMax': ('quantileInterpolatedWeighted', 'ArgMax'), 'groupArrayArgMax': ('groupArray', 'ArgMax'), 'groupArrayMovingSumArgMax': ('groupArrayMovingSum', 'ArgMax'), 'groupArrayLastArgMax': ('groupArrayLast', 'ArgMax'), 'quantileBFloat16WeightedArgMax': ('quantileBFloat16Weighted', 'ArgMax'), 'skewSampArgMax': ('skewSamp', 'ArgMax'), 'stddevSampArgMax': ('stddevSamp', 'ArgMax'), 'kurtSampArgMax': ('kurtSamp', 'ArgMax'), 'uniqHLL12ArgMax': ('uniqHLL12', 'ArgMax'), 'boundingRatioArgMax': ('boundingRatio', 'ArgMax'), 'quantileTDigestArgMax': ('quantileTDigest', 'ArgMax'), 'theilsUArgMax': ('theilsU', 'ArgMax'), 'sumArgMax': ('sum', 'ArgMax'), 'quantileArgMax': ('quantile', 'ArgMax'), 'quantilesBFloat16WeightedArgMax': ('quantilesBFloat16Weighted', 'ArgMax'), 'quantilesExactArgMax': ('quantilesExact', 'ArgMax'), 'stochasticLinearRegressionArgMax': ('stochasticLinearRegression', 'ArgMax'), 'groupBitOrArgMax': ('groupBitOr', 'ArgMax'), 'varPopArgMax': ('varPop', 'ArgMax'), 'topKArgMax': ('topK', 'ArgMax'), 'kurtPopArgMax': ('kurtPop', 'ArgMax'), 'anyLastArgMax': ('anyLast', 'ArgMax'), 'groupArrayMovingAvgArgMax': ('groupArrayMovingAvg', 'ArgMax'), 'quantilesBFloat16ArgMax': ('quantilesBFloat16', 'ArgMax'), 'entropyArgMax': ('entropy', 'ArgMax'), 'quantileExactHighArgMax': ('quantileExactHigh', 'ArgMax'), 'sparkBarArgMax': ('sparkBar', 'ArgMax'), 'studentTTestArgMax': ('studentTTest', 'ArgMax'), 'deltaSumTimestampArgMax': ('deltaSumTimestamp', 'ArgMax'), 'quantilesExactHighArgMax': ('quantilesExactHigh', 'ArgMax'), 'quantilesTimingArgMax': ('quantilesTiming', 'ArgMax'), 'groupBitmapXorArgMax': ('groupBitmapXor', 'ArgMax'), 'exponentialMovingAverageArgMax': ('exponentialMovingAverage', 'ArgMax'), 'groupBitmapAndArgMax': ('groupBitmapAnd', 'ArgMax'), 'contingencyArgMax': ('contingency', 'ArgMax'), 'categoricalInformationValueArgMax': ('categoricalInformationValue', 'ArgMax'), 'maxMapArgMax': ('maxMap', 'ArgMax'), 'quantileExactLowArgMax': ('quantileExactLow', 'ArgMax'), 'avgWeightedArgMax': ('avgWeighted', 'ArgMax'), 'anyHeavyArgMax': ('anyHeavy', 'ArgMax'), 'deltaSumArgMax': ('deltaSum', 'ArgMax'), 'groupArraySample': ('groupArraySample', ''), 'cramersVBiasCorrected': ('cramersVBiasCorrected', ''), 'argMax': ('argMax', ''), 'quantileBFloat16': ('quantileBFloat16', ''), 'quantileTimingWeighted': ('quantileTimingWeighted', ''), 'stochasticLogisticRegression': ('stochasticLogisticRegression', ''), 'stddevPop': ('stddevPop', ''), 'maxIntersections': ('maxIntersections', ''), 'sumWithOverflow': ('sumWithOverflow', ''), 'max': ('max', ''), 'sumCount': ('sumCount', ''), 'quantiles': ('quantiles', ''), 'meanZTest': ('meanZTest', ''), 'uniqCombined': ('uniqCombined', ''), 'quantileExact': ('quantileExact', ''), 'avg': ('avg', ''), 'quantilesExactLow': ('quantilesExactLow', ''), 'quantileTiming': ('quantileTiming', ''), 'quantileGK': ('quantileGK', ''), 'count': ('count', ''), 'covarPop': ('covarPop', ''), 'groupBitmap': ('groupBitmap', ''), 'first_value': ('first_value', ''), 'quantileExactWeighted': ('quantileExactWeighted', ''), 'intervalLengthSum': ('intervalLengthSum', ''), 'uniq': ('uniq', ''), 'quantilesExactWeighted': ('quantilesExactWeighted', ''), 'quantilesTDigestWeighted': ('quantilesTDigestWeighted', ''), 'topKWeighted': ('topKWeighted', ''), 'quantilesTDigest': ('quantilesTDigest', ''), 'groupBitXor': ('groupBitXor', ''), 'quantilesGK': ('quantilesGK', ''), 'skewPop': ('skewPop', ''), 'mannWhitneyUTest': ('mannWhitneyUTest', ''), 'quantileTDigestWeighted': ('quantileTDigestWeighted', ''), 'quantilesDeterministic': ('quantilesDeterministic', ''), 'median': ('median', ''), 'largestTriangleThreeBuckets': ('largestTriangleThreeBuckets', ''), 'rankCorr': ('rankCorr', ''), 'maxIntersectionsPosition': ('maxIntersectionsPosition', ''), 'varSamp': ('varSamp', ''), 'last_value': ('last_value', ''), 'welchTTest': ('welchTTest', ''), 'cramersV': ('cramersV', ''), 'quantileDeterministic': ('quantileDeterministic', ''), 'uniqCombined64': ('uniqCombined64', ''), 'uniqTheta': ('uniqTheta', ''), 'groupArrayInsertAt': ('groupArrayInsertAt', ''), 'uniqExact': ('uniqExact', ''), 'covarSamp': ('covarSamp', ''), 'quantilesInterpolatedWeighted': ('quantilesInterpolatedWeighted', ''), 'groupBitmapOr': ('groupBitmapOr', ''), 'groupBitAnd': ('groupBitAnd', ''), 'sumKahan': ('sumKahan', ''), 'quantilesTimingWeighted': ('quantilesTimingWeighted', ''), 'simpleLinearRegression': ('simpleLinearRegression', ''), 'groupUniqArray': ('groupUniqArray', ''), 'kolmogorovSmirnovTest': ('kolmogorovSmirnovTest', ''), 'min': ('min', ''), 'argMin': ('argMin', ''), 'corr': ('corr', ''), 'any': ('any', ''), 'quantileInterpolatedWeighted': ('quantileInterpolatedWeighted', ''), 'groupArray': ('groupArray', ''), 'groupArrayMovingSum': ('groupArrayMovingSum', ''), 'groupArrayLast': ('groupArrayLast', ''), 'quantileBFloat16Weighted': ('quantileBFloat16Weighted', ''), 'skewSamp': ('skewSamp', ''), 'stddevSamp': ('stddevSamp', ''), 'kurtSamp': ('kurtSamp', ''), 'uniqHLL12': ('uniqHLL12', ''), 'boundingRatio': ('boundingRatio', ''), 'quantileTDigest': ('quantileTDigest', ''), 'theilsU': ('theilsU', ''), 'sum': ('sum', ''), 'quantile': ('quantile', ''), 'quantilesBFloat16Weighted': ('quantilesBFloat16Weighted', ''), 'quantilesExact': ('quantilesExact', ''), 'stochasticLinearRegression': ('stochasticLinearRegression', ''), 'groupBitOr': ('groupBitOr', ''), 'varPop': ('varPop', ''), 'topK': ('topK', ''), 'kurtPop': ('kurtPop', ''), 'anyLast': ('anyLast', ''), 'groupArrayMovingAvg': ('groupArrayMovingAvg', ''), 'quantilesBFloat16': ('quantilesBFloat16', ''), 'entropy': ('entropy', ''), 'quantileExactHigh': ('quantileExactHigh', ''), 'sparkBar': ('sparkBar', ''), 'studentTTest': ('studentTTest', ''), 'deltaSumTimestamp': ('deltaSumTimestamp', ''), 'quantilesExactHigh': ('quantilesExactHigh', ''), 'quantilesTiming': ('quantilesTiming', ''), 'groupBitmapXor': ('groupBitmapXor', ''), 'exponentialMovingAverage': ('exponentialMovingAverage', ''), 'groupBitmapAnd': ('groupBitmapAnd', ''), 'contingency': ('contingency', ''), 'categoricalInformationValue': ('categoricalInformationValue', ''), 'quantileExactLow': ('quantileExactLow', ''), 'avgWeighted': ('avgWeighted', ''), 'anyHeavy': ('anyHeavy', ''), 'deltaSum': ('deltaSum', '')} + {'uniqCombinedIf': ('uniqCombined', 'If'), 'contingencyIf': ('contingency', 'If'), 'stddevPopIf': ('stddevPop', 'If'), 'uniqThetaIf': ('uniqTheta', 'If'), 'quantileExactWeightedIf': ('quantileExactWeighted', 'If'), 'simpleLinearRegressionIf': ('simpleLinearRegression', 'If'), 'quantilesExactIf': ('quantilesExact', 'If'), 'theilsUIf': ('theilsU', 'If'), 'quantileIf': ('quantile', 'If'), 'quantilesDeterministicIf': ('quantilesDeterministic', 'If'), 'groupBitmapXorIf': ('groupBitmapXor', 'If'), 'quantileExactLowIf': ('quantileExactLow', 'If'), 'maxMapIf': ('maxMap', 'If'), 'deltaSumIf': ('deltaSum', 'If'), 'groupBitmapOrIf': ('groupBitmapOr', 'If'), 'stochasticLogisticRegressionIf': ('stochasticLogisticRegression', 'If'), 'intervalLengthSumIf': ('intervalLengthSum', 'If'), 'groupArraySampleIf': ('groupArraySample', 'If'), 'covarSampIf': ('covarSamp', 'If'), 'groupBitXorIf': ('groupBitXor', 'If'), 'quantilesInterpolatedWeightedIf': ('quantilesInterpolatedWeighted', 'If'), 'first_valueIf': ('first_value', 'If'), 'quantilesGKIf': ('quantilesGK', 'If'), 'deltaSumTimestampIf': ('deltaSumTimestamp', 'If'), 'maxIntersectionsPositionIf': ('maxIntersectionsPosition', 'If'), 'groupArrayMovingSumIf': ('groupArrayMovingSum', 'If'), 'varSampIf': ('varSamp', 'If'), 'groupBitOrIf': ('groupBitOr', 'If'), 'argMinIf': ('argMin', 'If'), 'groupBitmapAndIf': ('groupBitmapAnd', 'If'), 'rankCorrIf': ('rankCorr', 'If'), 'welchTTestIf': ('welchTTest', 'If'), 'skewPopIf': ('skewPop', 'If'), 'avgIf': ('avg', 'If'), 'uniqHLL12If': ('uniqHLL12', 'If'), 'quantileGKIf': ('quantileGK', 'If'), 'mannWhitneyUTestIf': ('mannWhitneyUTest', 'If'), 'largestTriangleThreeBucketsIf': ('largestTriangleThreeBuckets', 'If'), 'quantileBFloat16If': ('quantileBFloat16', 'If'), 'quantileTDigestIf': ('quantileTDigest', 'If'), 'minMapIf': ('minMap', 'If'), 'quantilesTDigestWeightedIf': ('quantilesTDigestWeighted', 'If'), 'uniqExactIf': ('uniqExact', 'If'), 'quantilesTimingWeightedIf': ('quantilesTimingWeighted', 'If'), 'topKIf': ('topK', 'If'), 'quantilesExactHighIf': ('quantilesExactHigh', 'If'), 'topKWeightedIf': ('topKWeighted', 'If'), 'categoricalInformationValueIf': ('categoricalInformationValue', 'If'), 'entropyIf': ('entropy', 'If'), 'uniqCombined64If': ('uniqCombined64', 'If'), 'quantileExactHighIf': ('quantileExactHigh', 'If'), 'kolmogorovSmirnovTestIf': ('kolmogorovSmirnovTest', 'If'), 'minIf': ('min', 'If'), 'groupArrayIf': ('groupArray', 'If'), 'quantilesBFloat16If': ('quantilesBFloat16', 'If'), 'boundingRatioIf': ('boundingRatio', 'If'), 'corrIf': ('corr', 'If'), 'cramersVIf': ('cramersV', 'If'), 'cramersVBiasCorrectedIf': ('cramersVBiasCorrected', 'If'), 'quantilesTDigestIf': ('quantilesTDigest', 'If'), 'stochasticLinearRegressionIf': ('stochasticLinearRegression', 'If'), 'groupUniqArrayIf': ('groupUniqArray', 'If'), 'sparkBarIf': ('sparkBar', 'If'), 'groupArrayLastIf': ('groupArrayLast', 'If'), 'groupArrayMovingAvgIf': ('groupArrayMovingAvg', 'If'), 'meanZTestIf': ('meanZTest', 'If'), 'sumKahanIf': ('sumKahan', 'If'), 'kurtSampIf': ('kurtSamp', 'If'), 'uniqIf': ('uniq', 'If'), 'groupBitmapIf': ('groupBitmap', 'If'), 'sumMapIf': ('sumMap', 'If'), 'covarPopIf': ('covarPop', 'If'), 'quantilesIf': ('quantiles', 'If'), 'quantileTDigestWeightedIf': ('quantileTDigestWeighted', 'If'), 'argMaxIf': ('argMax', 'If'), 'studentTTestIf': ('studentTTest', 'If'), 'kurtPopIf': ('kurtPop', 'If'), 'stddevSampIf': ('stddevSamp', 'If'), 'skewSampIf': ('skewSamp', 'If'), 'last_valueIf': ('last_value', 'If'), 'quantileBFloat16WeightedIf': ('quantileBFloat16Weighted', 'If'), 'sumCountIf': ('sumCount', 'If'), 'quantilesTimingIf': ('quantilesTiming', 'If'), 'sumIf': ('sum', 'If'), 'medianIf': ('median', 'If'), 'anyIf': ('any', 'If'), 'quantileDeterministicIf': ('quantileDeterministic', 'If'), 'groupArrayInsertAtIf': ('groupArrayInsertAt', 'If'), 'maxIntersectionsIf': ('maxIntersections', 'If'), 'quantileExactIf': ('quantileExact', 'If'), 'quantileTimingIf': ('quantileTiming', 'If'), 'countIf': ('count', 'If'), 'quantileTimingWeightedIf': ('quantileTimingWeighted', 'If'), 'anyHeavyIf': ('anyHeavy', 'If'), 'varPopIf': ('varPop', 'If'), 'quantileInterpolatedWeightedIf': ('quantileInterpolatedWeighted', 'If'), 'maxIf': ('max', 'If'), 'avgWeightedIf': ('avgWeighted', 'If'), 'groupBitAndIf': ('groupBitAnd', 'If'), 'quantilesExactWeightedIf': ('quantilesExactWeighted', 'If'), 'sumWithOverflowIf': ('sumWithOverflow', 'If'), 'anyLastIf': ('anyLast', 'If'), 'quantilesBFloat16WeightedIf': ('quantilesBFloat16Weighted', 'If'), 'quantilesExactLowIf': ('quantilesExactLow', 'If'), 'exponentialMovingAverageIf': ('exponentialMovingAverage', 'If'), 'uniqCombinedArray': ('uniqCombined', 'Array'), 'contingencyArray': ('contingency', 'Array'), 'stddevPopArray': ('stddevPop', 'Array'), 'uniqThetaArray': ('uniqTheta', 'Array'), 'quantileExactWeightedArray': ('quantileExactWeighted', 'Array'), 'simpleLinearRegressionArray': ('simpleLinearRegression', 'Array'), 'quantilesExactArray': ('quantilesExact', 'Array'), 'theilsUArray': ('theilsU', 'Array'), 'quantileArray': ('quantile', 'Array'), 'quantilesDeterministicArray': ('quantilesDeterministic', 'Array'), 'groupBitmapXorArray': ('groupBitmapXor', 'Array'), 'quantileExactLowArray': ('quantileExactLow', 'Array'), 'maxMapArray': ('maxMap', 'Array'), 'deltaSumArray': ('deltaSum', 'Array'), 'groupBitmapOrArray': ('groupBitmapOr', 'Array'), 'stochasticLogisticRegressionArray': ('stochasticLogisticRegression', 'Array'), 'intervalLengthSumArray': ('intervalLengthSum', 'Array'), 'groupArraySampleArray': ('groupArraySample', 'Array'), 'covarSampArray': ('covarSamp', 'Array'), 'groupBitXorArray': ('groupBitXor', 'Array'), 'quantilesInterpolatedWeightedArray': ('quantilesInterpolatedWeighted', 'Array'), 'first_valueArray': ('first_value', 'Array'), 'quantilesGKArray': ('quantilesGK', 'Array'), 'deltaSumTimestampArray': ('deltaSumTimestamp', 'Array'), 'maxIntersectionsPositionArray': ('maxIntersectionsPosition', 'Array'), 'groupArrayMovingSumArray': ('groupArrayMovingSum', 'Array'), 'varSampArray': ('varSamp', 'Array'), 'groupBitOrArray': ('groupBitOr', 'Array'), 'argMinArray': ('argMin', 'Array'), 'groupBitmapAndArray': ('groupBitmapAnd', 'Array'), 'rankCorrArray': ('rankCorr', 'Array'), 'welchTTestArray': ('welchTTest', 'Array'), 'skewPopArray': ('skewPop', 'Array'), 'avgArray': ('avg', 'Array'), 'uniqHLL12Array': ('uniqHLL12', 'Array'), 'quantileGKArray': ('quantileGK', 'Array'), 'mannWhitneyUTestArray': ('mannWhitneyUTest', 'Array'), 'largestTriangleThreeBucketsArray': ('largestTriangleThreeBuckets', 'Array'), 'quantileBFloat16Array': ('quantileBFloat16', 'Array'), 'quantileTDigestArray': ('quantileTDigest', 'Array'), 'minMapArray': ('minMap', 'Array'), 'quantilesTDigestWeightedArray': ('quantilesTDigestWeighted', 'Array'), 'uniqExactArray': ('uniqExact', 'Array'), 'quantilesTimingWeightedArray': ('quantilesTimingWeighted', 'Array'), 'topKArray': ('topK', 'Array'), 'quantilesExactHighArray': ('quantilesExactHigh', 'Array'), 'topKWeightedArray': ('topKWeighted', 'Array'), 'categoricalInformationValueArray': ('categoricalInformationValue', 'Array'), 'entropyArray': ('entropy', 'Array'), 'uniqCombined64Array': ('uniqCombined64', 'Array'), 'quantileExactHighArray': ('quantileExactHigh', 'Array'), 'kolmogorovSmirnovTestArray': ('kolmogorovSmirnovTest', 'Array'), 'minArray': ('min', 'Array'), 'groupArrayArray': ('groupArray', 'Array'), 'quantilesBFloat16Array': ('quantilesBFloat16', 'Array'), 'boundingRatioArray': ('boundingRatio', 'Array'), 'corrArray': ('corr', 'Array'), 'cramersVArray': ('cramersV', 'Array'), 'cramersVBiasCorrectedArray': ('cramersVBiasCorrected', 'Array'), 'quantilesTDigestArray': ('quantilesTDigest', 'Array'), 'stochasticLinearRegressionArray': ('stochasticLinearRegression', 'Array'), 'groupUniqArrayArray': ('groupUniqArray', 'Array'), 'sparkBarArray': ('sparkBar', 'Array'), 'groupArrayLastArray': ('groupArrayLast', 'Array'), 'groupArrayMovingAvgArray': ('groupArrayMovingAvg', 'Array'), 'meanZTestArray': ('meanZTest', 'Array'), 'sumKahanArray': ('sumKahan', 'Array'), 'kurtSampArray': ('kurtSamp', 'Array'), 'uniqArray': ('uniq', 'Array'), 'groupBitmapArray': ('groupBitmap', 'Array'), 'sumMapArray': ('sumMap', 'Array'), 'covarPopArray': ('covarPop', 'Array'), 'quantilesArray': ('quantiles', 'Array'), 'quantileTDigestWeightedArray': ('quantileTDigestWeighted', 'Array'), 'argMaxArray': ('argMax', 'Array'), 'studentTTestArray': ('studentTTest', 'Array'), 'kurtPopArray': ('kurtPop', 'Array'), 'stddevSampArray': ('stddevSamp', 'Array'), 'skewSampArray': ('skewSamp', 'Array'), 'last_valueArray': ('last_value', 'Array'), 'quantileBFloat16WeightedArray': ('quantileBFloat16Weighted', 'Array'), 'sumCountArray': ('sumCount', 'Array'), 'quantilesTimingArray': ('quantilesTiming', 'Array'), 'sumArray': ('sum', 'Array'), 'medianArray': ('median', 'Array'), 'anyArray': ('any', 'Array'), 'quantileDeterministicArray': ('quantileDeterministic', 'Array'), 'groupArrayInsertAtArray': ('groupArrayInsertAt', 'Array'), 'maxIntersectionsArray': ('maxIntersections', 'Array'), 'quantileExactArray': ('quantileExact', 'Array'), 'quantileTimingArray': ('quantileTiming', 'Array'), 'countArray': ('count', 'Array'), 'quantileTimingWeightedArray': ('quantileTimingWeighted', 'Array'), 'anyHeavyArray': ('anyHeavy', 'Array'), 'varPopArray': ('varPop', 'Array'), 'quantileInterpolatedWeightedArray': ('quantileInterpolatedWeighted', 'Array'), 'maxArray': ('max', 'Array'), 'avgWeightedArray': ('avgWeighted', 'Array'), 'groupBitAndArray': ('groupBitAnd', 'Array'), 'quantilesExactWeightedArray': ('quantilesExactWeighted', 'Array'), 'sumWithOverflowArray': ('sumWithOverflow', 'Array'), 'anyLastArray': ('anyLast', 'Array'), 'quantilesBFloat16WeightedArray': ('quantilesBFloat16Weighted', 'Array'), 'quantilesExactLowArray': ('quantilesExactLow', 'Array'), 'exponentialMovingAverageArray': ('exponentialMovingAverage', 'Array'), 'uniqCombinedArrayIf': ('uniqCombined', 'ArrayIf'), 'contingencyArrayIf': ('contingency', 'ArrayIf'), 'stddevPopArrayIf': ('stddevPop', 'ArrayIf'), 'uniqThetaArrayIf': ('uniqTheta', 'ArrayIf'), 'quantileExactWeightedArrayIf': ('quantileExactWeighted', 'ArrayIf'), 'simpleLinearRegressionArrayIf': ('simpleLinearRegression', 'ArrayIf'), 'quantilesExactArrayIf': ('quantilesExact', 'ArrayIf'), 'theilsUArrayIf': ('theilsU', 'ArrayIf'), 'quantileArrayIf': ('quantile', 'ArrayIf'), 'quantilesDeterministicArrayIf': ('quantilesDeterministic', 'ArrayIf'), 'groupBitmapXorArrayIf': ('groupBitmapXor', 'ArrayIf'), 'quantileExactLowArrayIf': ('quantileExactLow', 'ArrayIf'), 'maxMapArrayIf': ('maxMap', 'ArrayIf'), 'deltaSumArrayIf': ('deltaSum', 'ArrayIf'), 'groupBitmapOrArrayIf': ('groupBitmapOr', 'ArrayIf'), 'stochasticLogisticRegressionArrayIf': ('stochasticLogisticRegression', 'ArrayIf'), 'intervalLengthSumArrayIf': ('intervalLengthSum', 'ArrayIf'), 'groupArraySampleArrayIf': ('groupArraySample', 'ArrayIf'), 'covarSampArrayIf': ('covarSamp', 'ArrayIf'), 'groupBitXorArrayIf': ('groupBitXor', 'ArrayIf'), 'quantilesInterpolatedWeightedArrayIf': ('quantilesInterpolatedWeighted', 'ArrayIf'), 'first_valueArrayIf': ('first_value', 'ArrayIf'), 'quantilesGKArrayIf': ('quantilesGK', 'ArrayIf'), 'deltaSumTimestampArrayIf': ('deltaSumTimestamp', 'ArrayIf'), 'maxIntersectionsPositionArrayIf': ('maxIntersectionsPosition', 'ArrayIf'), 'groupArrayMovingSumArrayIf': ('groupArrayMovingSum', 'ArrayIf'), 'varSampArrayIf': ('varSamp', 'ArrayIf'), 'groupBitOrArrayIf': ('groupBitOr', 'ArrayIf'), 'argMinArrayIf': ('argMin', 'ArrayIf'), 'groupBitmapAndArrayIf': ('groupBitmapAnd', 'ArrayIf'), 'rankCorrArrayIf': ('rankCorr', 'ArrayIf'), 'welchTTestArrayIf': ('welchTTest', 'ArrayIf'), 'skewPopArrayIf': ('skewPop', 'ArrayIf'), 'avgArrayIf': ('avg', 'ArrayIf'), 'uniqHLL12ArrayIf': ('uniqHLL12', 'ArrayIf'), 'quantileGKArrayIf': ('quantileGK', 'ArrayIf'), 'mannWhitneyUTestArrayIf': ('mannWhitneyUTest', 'ArrayIf'), 'largestTriangleThreeBucketsArrayIf': ('largestTriangleThreeBuckets', 'ArrayIf'), 'quantileBFloat16ArrayIf': ('quantileBFloat16', 'ArrayIf'), 'quantileTDigestArrayIf': ('quantileTDigest', 'ArrayIf'), 'minMapArrayIf': ('minMap', 'ArrayIf'), 'quantilesTDigestWeightedArrayIf': ('quantilesTDigestWeighted', 'ArrayIf'), 'uniqExactArrayIf': ('uniqExact', 'ArrayIf'), 'quantilesTimingWeightedArrayIf': ('quantilesTimingWeighted', 'ArrayIf'), 'topKArrayIf': ('topK', 'ArrayIf'), 'quantilesExactHighArrayIf': ('quantilesExactHigh', 'ArrayIf'), 'topKWeightedArrayIf': ('topKWeighted', 'ArrayIf'), 'categoricalInformationValueArrayIf': ('categoricalInformationValue', 'ArrayIf'), 'entropyArrayIf': ('entropy', 'ArrayIf'), 'uniqCombined64ArrayIf': ('uniqCombined64', 'ArrayIf'), 'quantileExactHighArrayIf': ('quantileExactHigh', 'ArrayIf'), 'kolmogorovSmirnovTestArrayIf': ('kolmogorovSmirnovTest', 'ArrayIf'), 'minArrayIf': ('min', 'ArrayIf'), 'groupArrayArrayIf': ('groupArray', 'ArrayIf'), 'quantilesBFloat16ArrayIf': ('quantilesBFloat16', 'ArrayIf'), 'boundingRatioArrayIf': ('boundingRatio', 'ArrayIf'), 'corrArrayIf': ('corr', 'ArrayIf'), 'cramersVArrayIf': ('cramersV', 'ArrayIf'), 'cramersVBiasCorrectedArrayIf': ('cramersVBiasCorrected', 'ArrayIf'), 'quantilesTDigestArrayIf': ('quantilesTDigest', 'ArrayIf'), 'stochasticLinearRegressionArrayIf': ('stochasticLinearRegression', 'ArrayIf'), 'groupUniqArrayArrayIf': ('groupUniqArray', 'ArrayIf'), 'sparkBarArrayIf': ('sparkBar', 'ArrayIf'), 'groupArrayLastArrayIf': ('groupArrayLast', 'ArrayIf'), 'groupArrayMovingAvgArrayIf': ('groupArrayMovingAvg', 'ArrayIf'), 'meanZTestArrayIf': ('meanZTest', 'ArrayIf'), 'sumKahanArrayIf': ('sumKahan', 'ArrayIf'), 'kurtSampArrayIf': ('kurtSamp', 'ArrayIf'), 'uniqArrayIf': ('uniq', 'ArrayIf'), 'groupBitmapArrayIf': ('groupBitmap', 'ArrayIf'), 'sumMapArrayIf': ('sumMap', 'ArrayIf'), 'covarPopArrayIf': ('covarPop', 'ArrayIf'), 'quantilesArrayIf': ('quantiles', 'ArrayIf'), 'quantileTDigestWeightedArrayIf': ('quantileTDigestWeighted', 'ArrayIf'), 'argMaxArrayIf': ('argMax', 'ArrayIf'), 'studentTTestArrayIf': ('studentTTest', 'ArrayIf'), 'kurtPopArrayIf': ('kurtPop', 'ArrayIf'), 'stddevSampArrayIf': ('stddevSamp', 'ArrayIf'), 'skewSampArrayIf': ('skewSamp', 'ArrayIf'), 'last_valueArrayIf': ('last_value', 'ArrayIf'), 'quantileBFloat16WeightedArrayIf': ('quantileBFloat16Weighted', 'ArrayIf'), 'sumCountArrayIf': ('sumCount', 'ArrayIf'), 'quantilesTimingArrayIf': ('quantilesTiming', 'ArrayIf'), 'sumArrayIf': ('sum', 'ArrayIf'), 'medianArrayIf': ('median', 'ArrayIf'), 'anyArrayIf': ('any', 'ArrayIf'), 'quantileDeterministicArrayIf': ('quantileDeterministic', 'ArrayIf'), 'groupArrayInsertAtArrayIf': ('groupArrayInsertAt', 'ArrayIf'), 'maxIntersectionsArrayIf': ('maxIntersections', 'ArrayIf'), 'quantileExactArrayIf': ('quantileExact', 'ArrayIf'), 'quantileTimingArrayIf': ('quantileTiming', 'ArrayIf'), 'countArrayIf': ('count', 'ArrayIf'), 'quantileTimingWeightedArrayIf': ('quantileTimingWeighted', 'ArrayIf'), 'anyHeavyArrayIf': ('anyHeavy', 'ArrayIf'), 'varPopArrayIf': ('varPop', 'ArrayIf'), 'quantileInterpolatedWeightedArrayIf': ('quantileInterpolatedWeighted', 'ArrayIf'), 'maxArrayIf': ('max', 'ArrayIf'), 'avgWeightedArrayIf': ('avgWeighted', 'ArrayIf'), 'groupBitAndArrayIf': ('groupBitAnd', 'ArrayIf'), 'quantilesExactWeightedArrayIf': ('quantilesExactWeighted', 'ArrayIf'), 'sumWithOverflowArrayIf': ('sumWithOverflow', 'ArrayIf'), 'anyLastArrayIf': ('anyLast', 'ArrayIf'), 'quantilesBFloat16WeightedArrayIf': ('quantilesBFloat16Weighted', 'ArrayIf'), 'quantilesExactLowArrayIf': ('quantilesExactLow', 'ArrayIf'), 'exponentialMovingAverageArrayIf': ('exponentialMovingAverage', 'ArrayIf'), 'uniqCombinedMap': ('uniqCombined', 'Map'), 'contingencyMap': ('contingency', 'Map'), 'stddevPopMap': ('stddevPop', 'Map'), 'uniqThetaMap': ('uniqTheta', 'Map'), 'quantileExactWeightedMap': ('quantileExactWeighted', 'Map'), 'simpleLinearRegressionMap': ('simpleLinearRegression', 'Map'), 'quantilesExactMap': ('quantilesExact', 'Map'), 'theilsUMap': ('theilsU', 'Map'), 'quantileMap': ('quantile', 'Map'), 'quantilesDeterministicMap': ('quantilesDeterministic', 'Map'), 'groupBitmapXorMap': ('groupBitmapXor', 'Map'), 'quantileExactLowMap': ('quantileExactLow', 'Map'), 'maxMapMap': ('maxMap', 'Map'), 'deltaSumMap': ('deltaSum', 'Map'), 'groupBitmapOrMap': ('groupBitmapOr', 'Map'), 'stochasticLogisticRegressionMap': ('stochasticLogisticRegression', 'Map'), 'intervalLengthSumMap': ('intervalLengthSum', 'Map'), 'groupArraySampleMap': ('groupArraySample', 'Map'), 'covarSampMap': ('covarSamp', 'Map'), 'groupBitXorMap': ('groupBitXor', 'Map'), 'quantilesInterpolatedWeightedMap': ('quantilesInterpolatedWeighted', 'Map'), 'first_valueMap': ('first_value', 'Map'), 'quantilesGKMap': ('quantilesGK', 'Map'), 'deltaSumTimestampMap': ('deltaSumTimestamp', 'Map'), 'maxIntersectionsPositionMap': ('maxIntersectionsPosition', 'Map'), 'groupArrayMovingSumMap': ('groupArrayMovingSum', 'Map'), 'varSampMap': ('varSamp', 'Map'), 'groupBitOrMap': ('groupBitOr', 'Map'), 'argMinMap': ('argMin', 'Map'), 'groupBitmapAndMap': ('groupBitmapAnd', 'Map'), 'rankCorrMap': ('rankCorr', 'Map'), 'welchTTestMap': ('welchTTest', 'Map'), 'skewPopMap': ('skewPop', 'Map'), 'avgMap': ('avg', 'Map'), 'uniqHLL12Map': ('uniqHLL12', 'Map'), 'quantileGKMap': ('quantileGK', 'Map'), 'mannWhitneyUTestMap': ('mannWhitneyUTest', 'Map'), 'largestTriangleThreeBucketsMap': ('largestTriangleThreeBuckets', 'Map'), 'quantileBFloat16Map': ('quantileBFloat16', 'Map'), 'quantileTDigestMap': ('quantileTDigest', 'Map'), 'minMapMap': ('minMap', 'Map'), 'quantilesTDigestWeightedMap': ('quantilesTDigestWeighted', 'Map'), 'uniqExactMap': ('uniqExact', 'Map'), 'quantilesTimingWeightedMap': ('quantilesTimingWeighted', 'Map'), 'topKMap': ('topK', 'Map'), 'quantilesExactHighMap': ('quantilesExactHigh', 'Map'), 'topKWeightedMap': ('topKWeighted', 'Map'), 'categoricalInformationValueMap': ('categoricalInformationValue', 'Map'), 'entropyMap': ('entropy', 'Map'), 'uniqCombined64Map': ('uniqCombined64', 'Map'), 'quantileExactHighMap': ('quantileExactHigh', 'Map'), 'kolmogorovSmirnovTestMap': ('kolmogorovSmirnovTest', 'Map'), 'minMap': ('minMap', ''), 'groupArrayMap': ('groupArray', 'Map'), 'quantilesBFloat16Map': ('quantilesBFloat16', 'Map'), 'boundingRatioMap': ('boundingRatio', 'Map'), 'corrMap': ('corr', 'Map'), 'cramersVMap': ('cramersV', 'Map'), 'cramersVBiasCorrectedMap': ('cramersVBiasCorrected', 'Map'), 'quantilesTDigestMap': ('quantilesTDigest', 'Map'), 'stochasticLinearRegressionMap': ('stochasticLinearRegression', 'Map'), 'groupUniqArrayMap': ('groupUniqArray', 'Map'), 'sparkBarMap': ('sparkBar', 'Map'), 'groupArrayLastMap': ('groupArrayLast', 'Map'), 'groupArrayMovingAvgMap': ('groupArrayMovingAvg', 'Map'), 'meanZTestMap': ('meanZTest', 'Map'), 'sumKahanMap': ('sumKahan', 'Map'), 'kurtSampMap': ('kurtSamp', 'Map'), 'uniqMap': ('uniq', 'Map'), 'groupBitmapMap': ('groupBitmap', 'Map'), 'sumMapMap': ('sumMap', 'Map'), 'covarPopMap': ('covarPop', 'Map'), 'quantilesMap': ('quantiles', 'Map'), 'quantileTDigestWeightedMap': ('quantileTDigestWeighted', 'Map'), 'argMaxMap': ('argMax', 'Map'), 'studentTTestMap': ('studentTTest', 'Map'), 'kurtPopMap': ('kurtPop', 'Map'), 'stddevSampMap': ('stddevSamp', 'Map'), 'skewSampMap': ('skewSamp', 'Map'), 'last_valueMap': ('last_value', 'Map'), 'quantileBFloat16WeightedMap': ('quantileBFloat16Weighted', 'Map'), 'sumCountMap': ('sumCount', 'Map'), 'quantilesTimingMap': ('quantilesTiming', 'Map'), 'sumMap': ('sumMap', ''), 'medianMap': ('median', 'Map'), 'anyMap': ('any', 'Map'), 'quantileDeterministicMap': ('quantileDeterministic', 'Map'), 'groupArrayInsertAtMap': ('groupArrayInsertAt', 'Map'), 'maxIntersectionsMap': ('maxIntersections', 'Map'), 'quantileExactMap': ('quantileExact', 'Map'), 'quantileTimingMap': ('quantileTiming', 'Map'), 'countMap': ('count', 'Map'), 'quantileTimingWeightedMap': ('quantileTimingWeighted', 'Map'), 'anyHeavyMap': ('anyHeavy', 'Map'), 'varPopMap': ('varPop', 'Map'), 'quantileInterpolatedWeightedMap': ('quantileInterpolatedWeighted', 'Map'), 'maxMap': ('maxMap', ''), 'avgWeightedMap': ('avgWeighted', 'Map'), 'groupBitAndMap': ('groupBitAnd', 'Map'), 'quantilesExactWeightedMap': ('quantilesExactWeighted', 'Map'), 'sumWithOverflowMap': ('sumWithOverflow', 'Map'), 'anyLastMap': ('anyLast', 'Map'), 'quantilesBFloat16WeightedMap': ('quantilesBFloat16Weighted', 'Map'), 'quantilesExactLowMap': ('quantilesExactLow', 'Map'), 'exponentialMovingAverageMap': ('exponentialMovingAverage', 'Map'), 'uniqCombinedSimpleState': ('uniqCombined', 'SimpleState'), 'contingencySimpleState': ('contingency', 'SimpleState'), 'stddevPopSimpleState': ('stddevPop', 'SimpleState'), 'uniqThetaSimpleState': ('uniqTheta', 'SimpleState'), 'quantileExactWeightedSimpleState': ('quantileExactWeighted', 'SimpleState'), 'simpleLinearRegressionSimpleState': ('simpleLinearRegression', 'SimpleState'), 'quantilesExactSimpleState': ('quantilesExact', 'SimpleState'), 'theilsUSimpleState': ('theilsU', 'SimpleState'), 'quantileSimpleState': ('quantile', 'SimpleState'), 'quantilesDeterministicSimpleState': ('quantilesDeterministic', 'SimpleState'), 'groupBitmapXorSimpleState': ('groupBitmapXor', 'SimpleState'), 'quantileExactLowSimpleState': ('quantileExactLow', 'SimpleState'), 'maxMapSimpleState': ('maxMap', 'SimpleState'), 'deltaSumSimpleState': ('deltaSum', 'SimpleState'), 'groupBitmapOrSimpleState': ('groupBitmapOr', 'SimpleState'), 'stochasticLogisticRegressionSimpleState': ('stochasticLogisticRegression', 'SimpleState'), 'intervalLengthSumSimpleState': ('intervalLengthSum', 'SimpleState'), 'groupArraySampleSimpleState': ('groupArraySample', 'SimpleState'), 'covarSampSimpleState': ('covarSamp', 'SimpleState'), 'groupBitXorSimpleState': ('groupBitXor', 'SimpleState'), 'quantilesInterpolatedWeightedSimpleState': ('quantilesInterpolatedWeighted', 'SimpleState'), 'first_valueSimpleState': ('first_value', 'SimpleState'), 'quantilesGKSimpleState': ('quantilesGK', 'SimpleState'), 'deltaSumTimestampSimpleState': ('deltaSumTimestamp', 'SimpleState'), 'maxIntersectionsPositionSimpleState': ('maxIntersectionsPosition', 'SimpleState'), 'groupArrayMovingSumSimpleState': ('groupArrayMovingSum', 'SimpleState'), 'varSampSimpleState': ('varSamp', 'SimpleState'), 'groupBitOrSimpleState': ('groupBitOr', 'SimpleState'), 'argMinSimpleState': ('argMin', 'SimpleState'), 'groupBitmapAndSimpleState': ('groupBitmapAnd', 'SimpleState'), 'rankCorrSimpleState': ('rankCorr', 'SimpleState'), 'welchTTestSimpleState': ('welchTTest', 'SimpleState'), 'skewPopSimpleState': ('skewPop', 'SimpleState'), 'avgSimpleState': ('avg', 'SimpleState'), 'uniqHLL12SimpleState': ('uniqHLL12', 'SimpleState'), 'quantileGKSimpleState': ('quantileGK', 'SimpleState'), 'mannWhitneyUTestSimpleState': ('mannWhitneyUTest', 'SimpleState'), 'largestTriangleThreeBucketsSimpleState': ('largestTriangleThreeBuckets', 'SimpleState'), 'quantileBFloat16SimpleState': ('quantileBFloat16', 'SimpleState'), 'quantileTDigestSimpleState': ('quantileTDigest', 'SimpleState'), 'minMapSimpleState': ('minMap', 'SimpleState'), 'quantilesTDigestWeightedSimpleState': ('quantilesTDigestWeighted', 'SimpleState'), 'uniqExactSimpleState': ('uniqExact', 'SimpleState'), 'quantilesTimingWeightedSimpleState': ('quantilesTimingWeighted', 'SimpleState'), 'topKSimpleState': ('topK', 'SimpleState'), 'quantilesExactHighSimpleState': ('quantilesExactHigh', 'SimpleState'), 'topKWeightedSimpleState': ('topKWeighted', 'SimpleState'), 'categoricalInformationValueSimpleState': ('categoricalInformationValue', 'SimpleState'), 'entropySimpleState': ('entropy', 'SimpleState'), 'uniqCombined64SimpleState': ('uniqCombined64', 'SimpleState'), 'quantileExactHighSimpleState': ('quantileExactHigh', 'SimpleState'), 'kolmogorovSmirnovTestSimpleState': ('kolmogorovSmirnovTest', 'SimpleState'), 'minSimpleState': ('min', 'SimpleState'), 'groupArraySimpleState': ('groupArray', 'SimpleState'), 'quantilesBFloat16SimpleState': ('quantilesBFloat16', 'SimpleState'), 'boundingRatioSimpleState': ('boundingRatio', 'SimpleState'), 'corrSimpleState': ('corr', 'SimpleState'), 'cramersVSimpleState': ('cramersV', 'SimpleState'), 'cramersVBiasCorrectedSimpleState': ('cramersVBiasCorrected', 'SimpleState'), 'quantilesTDigestSimpleState': ('quantilesTDigest', 'SimpleState'), 'stochasticLinearRegressionSimpleState': ('stochasticLinearRegression', 'SimpleState'), 'groupUniqArraySimpleState': ('groupUniqArray', 'SimpleState'), 'sparkBarSimpleState': ('sparkBar', 'SimpleState'), 'groupArrayLastSimpleState': ('groupArrayLast', 'SimpleState'), 'groupArrayMovingAvgSimpleState': ('groupArrayMovingAvg', 'SimpleState'), 'meanZTestSimpleState': ('meanZTest', 'SimpleState'), 'sumKahanSimpleState': ('sumKahan', 'SimpleState'), 'kurtSampSimpleState': ('kurtSamp', 'SimpleState'), 'uniqSimpleState': ('uniq', 'SimpleState'), 'groupBitmapSimpleState': ('groupBitmap', 'SimpleState'), 'sumMapSimpleState': ('sumMap', 'SimpleState'), 'covarPopSimpleState': ('covarPop', 'SimpleState'), 'quantilesSimpleState': ('quantiles', 'SimpleState'), 'quantileTDigestWeightedSimpleState': ('quantileTDigestWeighted', 'SimpleState'), 'argMaxSimpleState': ('argMax', 'SimpleState'), 'studentTTestSimpleState': ('studentTTest', 'SimpleState'), 'kurtPopSimpleState': ('kurtPop', 'SimpleState'), 'stddevSampSimpleState': ('stddevSamp', 'SimpleState'), 'skewSampSimpleState': ('skewSamp', 'SimpleState'), 'last_valueSimpleState': ('last_value', 'SimpleState'), 'quantileBFloat16WeightedSimpleState': ('quantileBFloat16Weighted', 'SimpleState'), 'sumCountSimpleState': ('sumCount', 'SimpleState'), 'quantilesTimingSimpleState': ('quantilesTiming', 'SimpleState'), 'sumSimpleState': ('sum', 'SimpleState'), 'medianSimpleState': ('median', 'SimpleState'), 'anySimpleState': ('any', 'SimpleState'), 'quantileDeterministicSimpleState': ('quantileDeterministic', 'SimpleState'), 'groupArrayInsertAtSimpleState': ('groupArrayInsertAt', 'SimpleState'), 'maxIntersectionsSimpleState': ('maxIntersections', 'SimpleState'), 'quantileExactSimpleState': ('quantileExact', 'SimpleState'), 'quantileTimingSimpleState': ('quantileTiming', 'SimpleState'), 'countSimpleState': ('count', 'SimpleState'), 'quantileTimingWeightedSimpleState': ('quantileTimingWeighted', 'SimpleState'), 'anyHeavySimpleState': ('anyHeavy', 'SimpleState'), 'varPopSimpleState': ('varPop', 'SimpleState'), 'quantileInterpolatedWeightedSimpleState': ('quantileInterpolatedWeighted', 'SimpleState'), 'maxSimpleState': ('max', 'SimpleState'), 'avgWeightedSimpleState': ('avgWeighted', 'SimpleState'), 'groupBitAndSimpleState': ('groupBitAnd', 'SimpleState'), 'quantilesExactWeightedSimpleState': ('quantilesExactWeighted', 'SimpleState'), 'sumWithOverflowSimpleState': ('sumWithOverflow', 'SimpleState'), 'anyLastSimpleState': ('anyLast', 'SimpleState'), 'quantilesBFloat16WeightedSimpleState': ('quantilesBFloat16Weighted', 'SimpleState'), 'quantilesExactLowSimpleState': ('quantilesExactLow', 'SimpleState'), 'exponentialMovingAverageSimpleState': ('exponentialMovingAverage', 'SimpleState'), 'uniqCombinedState': ('uniqCombined', 'State'), 'contingencyState': ('contingency', 'State'), 'stddevPopState': ('stddevPop', 'State'), 'uniqThetaState': ('uniqTheta', 'State'), 'quantileExactWeightedState': ('quantileExactWeighted', 'State'), 'simpleLinearRegressionState': ('simpleLinearRegression', 'State'), 'quantilesExactState': ('quantilesExact', 'State'), 'theilsUState': ('theilsU', 'State'), 'quantileState': ('quantile', 'State'), 'quantilesDeterministicState': ('quantilesDeterministic', 'State'), 'groupBitmapXorState': ('groupBitmapXor', 'State'), 'quantileExactLowState': ('quantileExactLow', 'State'), 'maxMapState': ('maxMap', 'State'), 'deltaSumState': ('deltaSum', 'State'), 'groupBitmapOrState': ('groupBitmapOr', 'State'), 'stochasticLogisticRegressionState': ('stochasticLogisticRegression', 'State'), 'intervalLengthSumState': ('intervalLengthSum', 'State'), 'groupArraySampleState': ('groupArraySample', 'State'), 'covarSampState': ('covarSamp', 'State'), 'groupBitXorState': ('groupBitXor', 'State'), 'quantilesInterpolatedWeightedState': ('quantilesInterpolatedWeighted', 'State'), 'first_valueState': ('first_value', 'State'), 'quantilesGKState': ('quantilesGK', 'State'), 'deltaSumTimestampState': ('deltaSumTimestamp', 'State'), 'maxIntersectionsPositionState': ('maxIntersectionsPosition', 'State'), 'groupArrayMovingSumState': ('groupArrayMovingSum', 'State'), 'varSampState': ('varSamp', 'State'), 'groupBitOrState': ('groupBitOr', 'State'), 'argMinState': ('argMin', 'State'), 'groupBitmapAndState': ('groupBitmapAnd', 'State'), 'rankCorrState': ('rankCorr', 'State'), 'welchTTestState': ('welchTTest', 'State'), 'skewPopState': ('skewPop', 'State'), 'avgState': ('avg', 'State'), 'uniqHLL12State': ('uniqHLL12', 'State'), 'quantileGKState': ('quantileGK', 'State'), 'mannWhitneyUTestState': ('mannWhitneyUTest', 'State'), 'largestTriangleThreeBucketsState': ('largestTriangleThreeBuckets', 'State'), 'quantileBFloat16State': ('quantileBFloat16', 'State'), 'quantileTDigestState': ('quantileTDigest', 'State'), 'minMapState': ('minMap', 'State'), 'quantilesTDigestWeightedState': ('quantilesTDigestWeighted', 'State'), 'uniqExactState': ('uniqExact', 'State'), 'quantilesTimingWeightedState': ('quantilesTimingWeighted', 'State'), 'topKState': ('topK', 'State'), 'quantilesExactHighState': ('quantilesExactHigh', 'State'), 'topKWeightedState': ('topKWeighted', 'State'), 'categoricalInformationValueState': ('categoricalInformationValue', 'State'), 'entropyState': ('entropy', 'State'), 'uniqCombined64State': ('uniqCombined64', 'State'), 'quantileExactHighState': ('quantileExactHigh', 'State'), 'kolmogorovSmirnovTestState': ('kolmogorovSmirnovTest', 'State'), 'minState': ('min', 'State'), 'groupArrayState': ('groupArray', 'State'), 'quantilesBFloat16State': ('quantilesBFloat16', 'State'), 'boundingRatioState': ('boundingRatio', 'State'), 'corrState': ('corr', 'State'), 'cramersVState': ('cramersV', 'State'), 'cramersVBiasCorrectedState': ('cramersVBiasCorrected', 'State'), 'quantilesTDigestState': ('quantilesTDigest', 'State'), 'stochasticLinearRegressionState': ('stochasticLinearRegression', 'State'), 'groupUniqArrayState': ('groupUniqArray', 'State'), 'sparkBarState': ('sparkBar', 'State'), 'groupArrayLastState': ('groupArrayLast', 'State'), 'groupArrayMovingAvgState': ('groupArrayMovingAvg', 'State'), 'meanZTestState': ('meanZTest', 'State'), 'sumKahanState': ('sumKahan', 'State'), 'kurtSampState': ('kurtSamp', 'State'), 'uniqState': ('uniq', 'State'), 'groupBitmapState': ('groupBitmap', 'State'), 'sumMapState': ('sumMap', 'State'), 'covarPopState': ('covarPop', 'State'), 'quantilesState': ('quantiles', 'State'), 'quantileTDigestWeightedState': ('quantileTDigestWeighted', 'State'), 'argMaxState': ('argMax', 'State'), 'studentTTestState': ('studentTTest', 'State'), 'kurtPopState': ('kurtPop', 'State'), 'stddevSampState': ('stddevSamp', 'State'), 'skewSampState': ('skewSamp', 'State'), 'last_valueState': ('last_value', 'State'), 'quantileBFloat16WeightedState': ('quantileBFloat16Weighted', 'State'), 'sumCountState': ('sumCount', 'State'), 'quantilesTimingState': ('quantilesTiming', 'State'), 'sumState': ('sum', 'State'), 'medianState': ('median', 'State'), 'anyState': ('any', 'State'), 'quantileDeterministicState': ('quantileDeterministic', 'State'), 'groupArrayInsertAtState': ('groupArrayInsertAt', 'State'), 'maxIntersectionsState': ('maxIntersections', 'State'), 'quantileExactState': ('quantileExact', 'State'), 'quantileTimingState': ('quantileTiming', 'State'), 'countState': ('count', 'State'), 'quantileTimingWeightedState': ('quantileTimingWeighted', 'State'), 'anyHeavyState': ('anyHeavy', 'State'), 'varPopState': ('varPop', 'State'), 'quantileInterpolatedWeightedState': ('quantileInterpolatedWeighted', 'State'), 'maxState': ('max', 'State'), 'avgWeightedState': ('avgWeighted', 'State'), 'groupBitAndState': ('groupBitAnd', 'State'), 'quantilesExactWeightedState': ('quantilesExactWeighted', 'State'), 'sumWithOverflowState': ('sumWithOverflow', 'State'), 'anyLastState': ('anyLast', 'State'), 'quantilesBFloat16WeightedState': ('quantilesBFloat16Weighted', 'State'), 'quantilesExactLowState': ('quantilesExactLow', 'State'), 'exponentialMovingAverageState': ('exponentialMovingAverage', 'State'), 'uniqCombinedMerge': ('uniqCombined', 'Merge'), 'contingencyMerge': ('contingency', 'Merge'), 'stddevPopMerge': ('stddevPop', 'Merge'), 'uniqThetaMerge': ('uniqTheta', 'Merge'), 'quantileExactWeightedMerge': ('quantileExactWeighted', 'Merge'), 'simpleLinearRegressionMerge': ('simpleLinearRegression', 'Merge'), 'quantilesExactMerge': ('quantilesExact', 'Merge'), 'theilsUMerge': ('theilsU', 'Merge'), 'quantileMerge': ('quantile', 'Merge'), 'quantilesDeterministicMerge': ('quantilesDeterministic', 'Merge'), 'groupBitmapXorMerge': ('groupBitmapXor', 'Merge'), 'quantileExactLowMerge': ('quantileExactLow', 'Merge'), 'maxMapMerge': ('maxMap', 'Merge'), 'deltaSumMerge': ('deltaSum', 'Merge'), 'groupBitmapOrMerge': ('groupBitmapOr', 'Merge'), 'stochasticLogisticRegressionMerge': ('stochasticLogisticRegression', 'Merge'), 'intervalLengthSumMerge': ('intervalLengthSum', 'Merge'), 'groupArraySampleMerge': ('groupArraySample', 'Merge'), 'covarSampMerge': ('covarSamp', 'Merge'), 'groupBitXorMerge': ('groupBitXor', 'Merge'), 'quantilesInterpolatedWeightedMerge': ('quantilesInterpolatedWeighted', 'Merge'), 'first_valueMerge': ('first_value', 'Merge'), 'quantilesGKMerge': ('quantilesGK', 'Merge'), 'deltaSumTimestampMerge': ('deltaSumTimestamp', 'Merge'), 'maxIntersectionsPositionMerge': ('maxIntersectionsPosition', 'Merge'), 'groupArrayMovingSumMerge': ('groupArrayMovingSum', 'Merge'), 'varSampMerge': ('varSamp', 'Merge'), 'groupBitOrMerge': ('groupBitOr', 'Merge'), 'argMinMerge': ('argMin', 'Merge'), 'groupBitmapAndMerge': ('groupBitmapAnd', 'Merge'), 'rankCorrMerge': ('rankCorr', 'Merge'), 'welchTTestMerge': ('welchTTest', 'Merge'), 'skewPopMerge': ('skewPop', 'Merge'), 'avgMerge': ('avg', 'Merge'), 'uniqHLL12Merge': ('uniqHLL12', 'Merge'), 'quantileGKMerge': ('quantileGK', 'Merge'), 'mannWhitneyUTestMerge': ('mannWhitneyUTest', 'Merge'), 'largestTriangleThreeBucketsMerge': ('largestTriangleThreeBuckets', 'Merge'), 'quantileBFloat16Merge': ('quantileBFloat16', 'Merge'), 'quantileTDigestMerge': ('quantileTDigest', 'Merge'), 'minMapMerge': ('minMap', 'Merge'), 'quantilesTDigestWeightedMerge': ('quantilesTDigestWeighted', 'Merge'), 'uniqExactMerge': ('uniqExact', 'Merge'), 'quantilesTimingWeightedMerge': ('quantilesTimingWeighted', 'Merge'), 'topKMerge': ('topK', 'Merge'), 'quantilesExactHighMerge': ('quantilesExactHigh', 'Merge'), 'topKWeightedMerge': ('topKWeighted', 'Merge'), 'categoricalInformationValueMerge': ('categoricalInformationValue', 'Merge'), 'entropyMerge': ('entropy', 'Merge'), 'uniqCombined64Merge': ('uniqCombined64', 'Merge'), 'quantileExactHighMerge': ('quantileExactHigh', 'Merge'), 'kolmogorovSmirnovTestMerge': ('kolmogorovSmirnovTest', 'Merge'), 'minMerge': ('min', 'Merge'), 'groupArrayMerge': ('groupArray', 'Merge'), 'quantilesBFloat16Merge': ('quantilesBFloat16', 'Merge'), 'boundingRatioMerge': ('boundingRatio', 'Merge'), 'corrMerge': ('corr', 'Merge'), 'cramersVMerge': ('cramersV', 'Merge'), 'cramersVBiasCorrectedMerge': ('cramersVBiasCorrected', 'Merge'), 'quantilesTDigestMerge': ('quantilesTDigest', 'Merge'), 'stochasticLinearRegressionMerge': ('stochasticLinearRegression', 'Merge'), 'groupUniqArrayMerge': ('groupUniqArray', 'Merge'), 'sparkBarMerge': ('sparkBar', 'Merge'), 'groupArrayLastMerge': ('groupArrayLast', 'Merge'), 'groupArrayMovingAvgMerge': ('groupArrayMovingAvg', 'Merge'), 'meanZTestMerge': ('meanZTest', 'Merge'), 'sumKahanMerge': ('sumKahan', 'Merge'), 'kurtSampMerge': ('kurtSamp', 'Merge'), 'uniqMerge': ('uniq', 'Merge'), 'groupBitmapMerge': ('groupBitmap', 'Merge'), 'sumMapMerge': ('sumMap', 'Merge'), 'covarPopMerge': ('covarPop', 'Merge'), 'quantilesMerge': ('quantiles', 'Merge'), 'quantileTDigestWeightedMerge': ('quantileTDigestWeighted', 'Merge'), 'argMaxMerge': ('argMax', 'Merge'), 'studentTTestMerge': ('studentTTest', 'Merge'), 'kurtPopMerge': ('kurtPop', 'Merge'), 'stddevSampMerge': ('stddevSamp', 'Merge'), 'skewSampMerge': ('skewSamp', 'Merge'), 'last_valueMerge': ('last_value', 'Merge'), 'quantileBFloat16WeightedMerge': ('quantileBFloat16Weighted', 'Merge'), 'sumCountMerge': ('sumCount', 'Merge'), 'quantilesTimingMerge': ('quantilesTiming', 'Merge'), 'sumMerge': ('sum', 'Merge'), 'medianMerge': ('median', 'Merge'), 'anyMerge': ('any', 'Merge'), 'quantileDeterministicMerge': ('quantileDeterministic', 'Merge'), 'groupArrayInsertAtMerge': ('groupArrayInsertAt', 'Merge'), 'maxIntersectionsMerge': ('maxIntersections', 'Merge'), 'quantileExactMerge': ('quantileExact', 'Merge'), 'quantileTimingMerge': ('quantileTiming', 'Merge'), 'countMerge': ('count', 'Merge'), 'quantileTimingWeightedMerge': ('quantileTimingWeighted', 'Merge'), 'anyHeavyMerge': ('anyHeavy', 'Merge'), 'varPopMerge': ('varPop', 'Merge'), 'quantileInterpolatedWeightedMerge': ('quantileInterpolatedWeighted', 'Merge'), 'maxMerge': ('max', 'Merge'), 'avgWeightedMerge': ('avgWeighted', 'Merge'), 'groupBitAndMerge': ('groupBitAnd', 'Merge'), 'quantilesExactWeightedMerge': ('quantilesExactWeighted', 'Merge'), 'sumWithOverflowMerge': ('sumWithOverflow', 'Merge'), 'anyLastMerge': ('anyLast', 'Merge'), 'quantilesBFloat16WeightedMerge': ('quantilesBFloat16Weighted', 'Merge'), 'quantilesExactLowMerge': ('quantilesExactLow', 'Merge'), 'exponentialMovingAverageMerge': ('exponentialMovingAverage', 'Merge'), 'uniqCombinedMergeState': ('uniqCombined', 'MergeState'), 'contingencyMergeState': ('contingency', 'MergeState'), 'stddevPopMergeState': ('stddevPop', 'MergeState'), 'uniqThetaMergeState': ('uniqTheta', 'MergeState'), 'quantileExactWeightedMergeState': ('quantileExactWeighted', 'MergeState'), 'simpleLinearRegressionMergeState': ('simpleLinearRegression', 'MergeState'), 'quantilesExactMergeState': ('quantilesExact', 'MergeState'), 'theilsUMergeState': ('theilsU', 'MergeState'), 'quantileMergeState': ('quantile', 'MergeState'), 'quantilesDeterministicMergeState': ('quantilesDeterministic', 'MergeState'), 'groupBitmapXorMergeState': ('groupBitmapXor', 'MergeState'), 'quantileExactLowMergeState': ('quantileExactLow', 'MergeState'), 'maxMapMergeState': ('maxMap', 'MergeState'), 'deltaSumMergeState': ('deltaSum', 'MergeState'), 'groupBitmapOrMergeState': ('groupBitmapOr', 'MergeState'), 'stochasticLogisticRegressionMergeState': ('stochasticLogisticRegression', 'MergeState'), 'intervalLengthSumMergeState': ('intervalLengthSum', 'MergeState'), 'groupArraySampleMergeState': ('groupArraySample', 'MergeState'), 'covarSampMergeState': ('covarSamp', 'MergeState'), 'groupBitXorMergeState': ('groupBitXor', 'MergeState'), 'quantilesInterpolatedWeightedMergeState': ('quantilesInterpolatedWeighted', 'MergeState'), 'first_valueMergeState': ('first_value', 'MergeState'), 'quantilesGKMergeState': ('quantilesGK', 'MergeState'), 'deltaSumTimestampMergeState': ('deltaSumTimestamp', 'MergeState'), 'maxIntersectionsPositionMergeState': ('maxIntersectionsPosition', 'MergeState'), 'groupArrayMovingSumMergeState': ('groupArrayMovingSum', 'MergeState'), 'varSampMergeState': ('varSamp', 'MergeState'), 'groupBitOrMergeState': ('groupBitOr', 'MergeState'), 'argMinMergeState': ('argMin', 'MergeState'), 'groupBitmapAndMergeState': ('groupBitmapAnd', 'MergeState'), 'rankCorrMergeState': ('rankCorr', 'MergeState'), 'welchTTestMergeState': ('welchTTest', 'MergeState'), 'skewPopMergeState': ('skewPop', 'MergeState'), 'avgMergeState': ('avg', 'MergeState'), 'uniqHLL12MergeState': ('uniqHLL12', 'MergeState'), 'quantileGKMergeState': ('quantileGK', 'MergeState'), 'mannWhitneyUTestMergeState': ('mannWhitneyUTest', 'MergeState'), 'largestTriangleThreeBucketsMergeState': ('largestTriangleThreeBuckets', 'MergeState'), 'quantileBFloat16MergeState': ('quantileBFloat16', 'MergeState'), 'quantileTDigestMergeState': ('quantileTDigest', 'MergeState'), 'minMapMergeState': ('minMap', 'MergeState'), 'quantilesTDigestWeightedMergeState': ('quantilesTDigestWeighted', 'MergeState'), 'uniqExactMergeState': ('uniqExact', 'MergeState'), 'quantilesTimingWeightedMergeState': ('quantilesTimingWeighted', 'MergeState'), 'topKMergeState': ('topK', 'MergeState'), 'quantilesExactHighMergeState': ('quantilesExactHigh', 'MergeState'), 'topKWeightedMergeState': ('topKWeighted', 'MergeState'), 'categoricalInformationValueMergeState': ('categoricalInformationValue', 'MergeState'), 'entropyMergeState': ('entropy', 'MergeState'), 'uniqCombined64MergeState': ('uniqCombined64', 'MergeState'), 'quantileExactHighMergeState': ('quantileExactHigh', 'MergeState'), 'kolmogorovSmirnovTestMergeState': ('kolmogorovSmirnovTest', 'MergeState'), 'minMergeState': ('min', 'MergeState'), 'groupArrayMergeState': ('groupArray', 'MergeState'), 'quantilesBFloat16MergeState': ('quantilesBFloat16', 'MergeState'), 'boundingRatioMergeState': ('boundingRatio', 'MergeState'), 'corrMergeState': ('corr', 'MergeState'), 'cramersVMergeState': ('cramersV', 'MergeState'), 'cramersVBiasCorrectedMergeState': ('cramersVBiasCorrected', 'MergeState'), 'quantilesTDigestMergeState': ('quantilesTDigest', 'MergeState'), 'stochasticLinearRegressionMergeState': ('stochasticLinearRegression', 'MergeState'), 'groupUniqArrayMergeState': ('groupUniqArray', 'MergeState'), 'sparkBarMergeState': ('sparkBar', 'MergeState'), 'groupArrayLastMergeState': ('groupArrayLast', 'MergeState'), 'groupArrayMovingAvgMergeState': ('groupArrayMovingAvg', 'MergeState'), 'meanZTestMergeState': ('meanZTest', 'MergeState'), 'sumKahanMergeState': ('sumKahan', 'MergeState'), 'kurtSampMergeState': ('kurtSamp', 'MergeState'), 'uniqMergeState': ('uniq', 'MergeState'), 'groupBitmapMergeState': ('groupBitmap', 'MergeState'), 'sumMapMergeState': ('sumMap', 'MergeState'), 'covarPopMergeState': ('covarPop', 'MergeState'), 'quantilesMergeState': ('quantiles', 'MergeState'), 'quantileTDigestWeightedMergeState': ('quantileTDigestWeighted', 'MergeState'), 'argMaxMergeState': ('argMax', 'MergeState'), 'studentTTestMergeState': ('studentTTest', 'MergeState'), 'kurtPopMergeState': ('kurtPop', 'MergeState'), 'stddevSampMergeState': ('stddevSamp', 'MergeState'), 'skewSampMergeState': ('skewSamp', 'MergeState'), 'last_valueMergeState': ('last_value', 'MergeState'), 'quantileBFloat16WeightedMergeState': ('quantileBFloat16Weighted', 'MergeState'), 'sumCountMergeState': ('sumCount', 'MergeState'), 'quantilesTimingMergeState': ('quantilesTiming', 'MergeState'), 'sumMergeState': ('sum', 'MergeState'), 'medianMergeState': ('median', 'MergeState'), 'anyMergeState': ('any', 'MergeState'), 'quantileDeterministicMergeState': ('quantileDeterministic', 'MergeState'), 'groupArrayInsertAtMergeState': ('groupArrayInsertAt', 'MergeState'), 'maxIntersectionsMergeState': ('maxIntersections', 'MergeState'), 'quantileExactMergeState': ('quantileExact', 'MergeState'), 'quantileTimingMergeState': ('quantileTiming', 'MergeState'), 'countMergeState': ('count', 'MergeState'), 'quantileTimingWeightedMergeState': ('quantileTimingWeighted', 'MergeState'), 'anyHeavyMergeState': ('anyHeavy', 'MergeState'), 'varPopMergeState': ('varPop', 'MergeState'), 'quantileInterpolatedWeightedMergeState': ('quantileInterpolatedWeighted', 'MergeState'), 'maxMergeState': ('max', 'MergeState'), 'avgWeightedMergeState': ('avgWeighted', 'MergeState'), 'groupBitAndMergeState': ('groupBitAnd', 'MergeState'), 'quantilesExactWeightedMergeState': ('quantilesExactWeighted', 'MergeState'), 'sumWithOverflowMergeState': ('sumWithOverflow', 'MergeState'), 'anyLastMergeState': ('anyLast', 'MergeState'), 'quantilesBFloat16WeightedMergeState': ('quantilesBFloat16Weighted', 'MergeState'), 'quantilesExactLowMergeState': ('quantilesExactLow', 'MergeState'), 'exponentialMovingAverageMergeState': ('exponentialMovingAverage', 'MergeState'), 'uniqCombinedForEach': ('uniqCombined', 'ForEach'), 'contingencyForEach': ('contingency', 'ForEach'), 'stddevPopForEach': ('stddevPop', 'ForEach'), 'uniqThetaForEach': ('uniqTheta', 'ForEach'), 'quantileExactWeightedForEach': ('quantileExactWeighted', 'ForEach'), 'simpleLinearRegressionForEach': ('simpleLinearRegression', 'ForEach'), 'quantilesExactForEach': ('quantilesExact', 'ForEach'), 'theilsUForEach': ('theilsU', 'ForEach'), 'quantileForEach': ('quantile', 'ForEach'), 'quantilesDeterministicForEach': ('quantilesDeterministic', 'ForEach'), 'groupBitmapXorForEach': ('groupBitmapXor', 'ForEach'), 'quantileExactLowForEach': ('quantileExactLow', 'ForEach'), 'maxMapForEach': ('maxMap', 'ForEach'), 'deltaSumForEach': ('deltaSum', 'ForEach'), 'groupBitmapOrForEach': ('groupBitmapOr', 'ForEach'), 'stochasticLogisticRegressionForEach': ('stochasticLogisticRegression', 'ForEach'), 'intervalLengthSumForEach': ('intervalLengthSum', 'ForEach'), 'groupArraySampleForEach': ('groupArraySample', 'ForEach'), 'covarSampForEach': ('covarSamp', 'ForEach'), 'groupBitXorForEach': ('groupBitXor', 'ForEach'), 'quantilesInterpolatedWeightedForEach': ('quantilesInterpolatedWeighted', 'ForEach'), 'first_valueForEach': ('first_value', 'ForEach'), 'quantilesGKForEach': ('quantilesGK', 'ForEach'), 'deltaSumTimestampForEach': ('deltaSumTimestamp', 'ForEach'), 'maxIntersectionsPositionForEach': ('maxIntersectionsPosition', 'ForEach'), 'groupArrayMovingSumForEach': ('groupArrayMovingSum', 'ForEach'), 'varSampForEach': ('varSamp', 'ForEach'), 'groupBitOrForEach': ('groupBitOr', 'ForEach'), 'argMinForEach': ('argMin', 'ForEach'), 'groupBitmapAndForEach': ('groupBitmapAnd', 'ForEach'), 'rankCorrForEach': ('rankCorr', 'ForEach'), 'welchTTestForEach': ('welchTTest', 'ForEach'), 'skewPopForEach': ('skewPop', 'ForEach'), 'avgForEach': ('avg', 'ForEach'), 'uniqHLL12ForEach': ('uniqHLL12', 'ForEach'), 'quantileGKForEach': ('quantileGK', 'ForEach'), 'mannWhitneyUTestForEach': ('mannWhitneyUTest', 'ForEach'), 'largestTriangleThreeBucketsForEach': ('largestTriangleThreeBuckets', 'ForEach'), 'quantileBFloat16ForEach': ('quantileBFloat16', 'ForEach'), 'quantileTDigestForEach': ('quantileTDigest', 'ForEach'), 'minMapForEach': ('minMap', 'ForEach'), 'quantilesTDigestWeightedForEach': ('quantilesTDigestWeighted', 'ForEach'), 'uniqExactForEach': ('uniqExact', 'ForEach'), 'quantilesTimingWeightedForEach': ('quantilesTimingWeighted', 'ForEach'), 'topKForEach': ('topK', 'ForEach'), 'quantilesExactHighForEach': ('quantilesExactHigh', 'ForEach'), 'topKWeightedForEach': ('topKWeighted', 'ForEach'), 'categoricalInformationValueForEach': ('categoricalInformationValue', 'ForEach'), 'entropyForEach': ('entropy', 'ForEach'), 'uniqCombined64ForEach': ('uniqCombined64', 'ForEach'), 'quantileExactHighForEach': ('quantileExactHigh', 'ForEach'), 'kolmogorovSmirnovTestForEach': ('kolmogorovSmirnovTest', 'ForEach'), 'minForEach': ('min', 'ForEach'), 'groupArrayForEach': ('groupArray', 'ForEach'), 'quantilesBFloat16ForEach': ('quantilesBFloat16', 'ForEach'), 'boundingRatioForEach': ('boundingRatio', 'ForEach'), 'corrForEach': ('corr', 'ForEach'), 'cramersVForEach': ('cramersV', 'ForEach'), 'cramersVBiasCorrectedForEach': ('cramersVBiasCorrected', 'ForEach'), 'quantilesTDigestForEach': ('quantilesTDigest', 'ForEach'), 'stochasticLinearRegressionForEach': ('stochasticLinearRegression', 'ForEach'), 'groupUniqArrayForEach': ('groupUniqArray', 'ForEach'), 'sparkBarForEach': ('sparkBar', 'ForEach'), 'groupArrayLastForEach': ('groupArrayLast', 'ForEach'), 'groupArrayMovingAvgForEach': ('groupArrayMovingAvg', 'ForEach'), 'meanZTestForEach': ('meanZTest', 'ForEach'), 'sumKahanForEach': ('sumKahan', 'ForEach'), 'kurtSampForEach': ('kurtSamp', 'ForEach'), 'uniqForEach': ('uniq', 'ForEach'), 'groupBitmapForEach': ('groupBitmap', 'ForEach'), 'sumMapForEach': ('sumMap', 'ForEach'), 'covarPopForEach': ('covarPop', 'ForEach'), 'quantilesForEach': ('quantiles', 'ForEach'), 'quantileTDigestWeightedForEach': ('quantileTDigestWeighted', 'ForEach'), 'argMaxForEach': ('argMax', 'ForEach'), 'studentTTestForEach': ('studentTTest', 'ForEach'), 'kurtPopForEach': ('kurtPop', 'ForEach'), 'stddevSampForEach': ('stddevSamp', 'ForEach'), 'skewSampForEach': ('skewSamp', 'ForEach'), 'last_valueForEach': ('last_value', 'ForEach'), 'quantileBFloat16WeightedForEach': ('quantileBFloat16Weighted', 'ForEach'), 'sumCountForEach': ('sumCount', 'ForEach'), 'quantilesTimingForEach': ('quantilesTiming', 'ForEach'), 'sumForEach': ('sum', 'ForEach'), 'medianForEach': ('median', 'ForEach'), 'anyForEach': ('any', 'ForEach'), 'quantileDeterministicForEach': ('quantileDeterministic', 'ForEach'), 'groupArrayInsertAtForEach': ('groupArrayInsertAt', 'ForEach'), 'maxIntersectionsForEach': ('maxIntersections', 'ForEach'), 'quantileExactForEach': ('quantileExact', 'ForEach'), 'quantileTimingForEach': ('quantileTiming', 'ForEach'), 'countForEach': ('count', 'ForEach'), 'quantileTimingWeightedForEach': ('quantileTimingWeighted', 'ForEach'), 'anyHeavyForEach': ('anyHeavy', 'ForEach'), 'varPopForEach': ('varPop', 'ForEach'), 'quantileInterpolatedWeightedForEach': ('quantileInterpolatedWeighted', 'ForEach'), 'maxForEach': ('max', 'ForEach'), 'avgWeightedForEach': ('avgWeighted', 'ForEach'), 'groupBitAndForEach': ('groupBitAnd', 'ForEach'), 'quantilesExactWeightedForEach': ('quantilesExactWeighted', 'ForEach'), 'sumWithOverflowForEach': ('sumWithOverflow', 'ForEach'), 'anyLastForEach': ('anyLast', 'ForEach'), 'quantilesBFloat16WeightedForEach': ('quantilesBFloat16Weighted', 'ForEach'), 'quantilesExactLowForEach': ('quantilesExactLow', 'ForEach'), 'exponentialMovingAverageForEach': ('exponentialMovingAverage', 'ForEach'), 'uniqCombinedDistinct': ('uniqCombined', 'Distinct'), 'contingencyDistinct': ('contingency', 'Distinct'), 'stddevPopDistinct': ('stddevPop', 'Distinct'), 'uniqThetaDistinct': ('uniqTheta', 'Distinct'), 'quantileExactWeightedDistinct': ('quantileExactWeighted', 'Distinct'), 'simpleLinearRegressionDistinct': ('simpleLinearRegression', 'Distinct'), 'quantilesExactDistinct': ('quantilesExact', 'Distinct'), 'theilsUDistinct': ('theilsU', 'Distinct'), 'quantileDistinct': ('quantile', 'Distinct'), 'quantilesDeterministicDistinct': ('quantilesDeterministic', 'Distinct'), 'groupBitmapXorDistinct': ('groupBitmapXor', 'Distinct'), 'quantileExactLowDistinct': ('quantileExactLow', 'Distinct'), 'maxMapDistinct': ('maxMap', 'Distinct'), 'deltaSumDistinct': ('deltaSum', 'Distinct'), 'groupBitmapOrDistinct': ('groupBitmapOr', 'Distinct'), 'stochasticLogisticRegressionDistinct': ('stochasticLogisticRegression', 'Distinct'), 'intervalLengthSumDistinct': ('intervalLengthSum', 'Distinct'), 'groupArraySampleDistinct': ('groupArraySample', 'Distinct'), 'covarSampDistinct': ('covarSamp', 'Distinct'), 'groupBitXorDistinct': ('groupBitXor', 'Distinct'), 'quantilesInterpolatedWeightedDistinct': ('quantilesInterpolatedWeighted', 'Distinct'), 'first_valueDistinct': ('first_value', 'Distinct'), 'quantilesGKDistinct': ('quantilesGK', 'Distinct'), 'deltaSumTimestampDistinct': ('deltaSumTimestamp', 'Distinct'), 'maxIntersectionsPositionDistinct': ('maxIntersectionsPosition', 'Distinct'), 'groupArrayMovingSumDistinct': ('groupArrayMovingSum', 'Distinct'), 'varSampDistinct': ('varSamp', 'Distinct'), 'groupBitOrDistinct': ('groupBitOr', 'Distinct'), 'argMinDistinct': ('argMin', 'Distinct'), 'groupBitmapAndDistinct': ('groupBitmapAnd', 'Distinct'), 'rankCorrDistinct': ('rankCorr', 'Distinct'), 'welchTTestDistinct': ('welchTTest', 'Distinct'), 'skewPopDistinct': ('skewPop', 'Distinct'), 'avgDistinct': ('avg', 'Distinct'), 'uniqHLL12Distinct': ('uniqHLL12', 'Distinct'), 'quantileGKDistinct': ('quantileGK', 'Distinct'), 'mannWhitneyUTestDistinct': ('mannWhitneyUTest', 'Distinct'), 'largestTriangleThreeBucketsDistinct': ('largestTriangleThreeBuckets', 'Distinct'), 'quantileBFloat16Distinct': ('quantileBFloat16', 'Distinct'), 'quantileTDigestDistinct': ('quantileTDigest', 'Distinct'), 'minMapDistinct': ('minMap', 'Distinct'), 'quantilesTDigestWeightedDistinct': ('quantilesTDigestWeighted', 'Distinct'), 'uniqExactDistinct': ('uniqExact', 'Distinct'), 'quantilesTimingWeightedDistinct': ('quantilesTimingWeighted', 'Distinct'), 'topKDistinct': ('topK', 'Distinct'), 'quantilesExactHighDistinct': ('quantilesExactHigh', 'Distinct'), 'topKWeightedDistinct': ('topKWeighted', 'Distinct'), 'categoricalInformationValueDistinct': ('categoricalInformationValue', 'Distinct'), 'entropyDistinct': ('entropy', 'Distinct'), 'uniqCombined64Distinct': ('uniqCombined64', 'Distinct'), 'quantileExactHighDistinct': ('quantileExactHigh', 'Distinct'), 'kolmogorovSmirnovTestDistinct': ('kolmogorovSmirnovTest', 'Distinct'), 'minDistinct': ('min', 'Distinct'), 'groupArrayDistinct': ('groupArray', 'Distinct'), 'quantilesBFloat16Distinct': ('quantilesBFloat16', 'Distinct'), 'boundingRatioDistinct': ('boundingRatio', 'Distinct'), 'corrDistinct': ('corr', 'Distinct'), 'cramersVDistinct': ('cramersV', 'Distinct'), 'cramersVBiasCorrectedDistinct': ('cramersVBiasCorrected', 'Distinct'), 'quantilesTDigestDistinct': ('quantilesTDigest', 'Distinct'), 'stochasticLinearRegressionDistinct': ('stochasticLinearRegression', 'Distinct'), 'groupUniqArrayDistinct': ('groupUniqArray', 'Distinct'), 'sparkBarDistinct': ('sparkBar', 'Distinct'), 'groupArrayLastDistinct': ('groupArrayLast', 'Distinct'), 'groupArrayMovingAvgDistinct': ('groupArrayMovingAvg', 'Distinct'), 'meanZTestDistinct': ('meanZTest', 'Distinct'), 'sumKahanDistinct': ('sumKahan', 'Distinct'), 'kurtSampDistinct': ('kurtSamp', 'Distinct'), 'uniqDistinct': ('uniq', 'Distinct'), 'groupBitmapDistinct': ('groupBitmap', 'Distinct'), 'sumMapDistinct': ('sumMap', 'Distinct'), 'covarPopDistinct': ('covarPop', 'Distinct'), 'quantilesDistinct': ('quantiles', 'Distinct'), 'quantileTDigestWeightedDistinct': ('quantileTDigestWeighted', 'Distinct'), 'argMaxDistinct': ('argMax', 'Distinct'), 'studentTTestDistinct': ('studentTTest', 'Distinct'), 'kurtPopDistinct': ('kurtPop', 'Distinct'), 'stddevSampDistinct': ('stddevSamp', 'Distinct'), 'skewSampDistinct': ('skewSamp', 'Distinct'), 'last_valueDistinct': ('last_value', 'Distinct'), 'quantileBFloat16WeightedDistinct': ('quantileBFloat16Weighted', 'Distinct'), 'sumCountDistinct': ('sumCount', 'Distinct'), 'quantilesTimingDistinct': ('quantilesTiming', 'Distinct'), 'sumDistinct': ('sum', 'Distinct'), 'medianDistinct': ('median', 'Distinct'), 'anyDistinct': ('any', 'Distinct'), 'quantileDeterministicDistinct': ('quantileDeterministic', 'Distinct'), 'groupArrayInsertAtDistinct': ('groupArrayInsertAt', 'Distinct'), 'maxIntersectionsDistinct': ('maxIntersections', 'Distinct'), 'quantileExactDistinct': ('quantileExact', 'Distinct'), 'quantileTimingDistinct': ('quantileTiming', 'Distinct'), 'countDistinct': ('count', 'Distinct'), 'quantileTimingWeightedDistinct': ('quantileTimingWeighted', 'Distinct'), 'anyHeavyDistinct': ('anyHeavy', 'Distinct'), 'varPopDistinct': ('varPop', 'Distinct'), 'quantileInterpolatedWeightedDistinct': ('quantileInterpolatedWeighted', 'Distinct'), 'maxDistinct': ('max', 'Distinct'), 'avgWeightedDistinct': ('avgWeighted', 'Distinct'), 'groupBitAndDistinct': ('groupBitAnd', 'Distinct'), 'quantilesExactWeightedDistinct': ('quantilesExactWeighted', 'Distinct'), 'sumWithOverflowDistinct': ('sumWithOverflow', 'Distinct'), 'anyLastDistinct': ('anyLast', 'Distinct'), 'quantilesBFloat16WeightedDistinct': ('quantilesBFloat16Weighted', 'Distinct'), 'quantilesExactLowDistinct': ('quantilesExactLow', 'Distinct'), 'exponentialMovingAverageDistinct': ('exponentialMovingAverage', 'Distinct'), 'uniqCombinedOrDefault': ('uniqCombined', 'OrDefault'), 'contingencyOrDefault': ('contingency', 'OrDefault'), 'stddevPopOrDefault': ('stddevPop', 'OrDefault'), 'uniqThetaOrDefault': ('uniqTheta', 'OrDefault'), 'quantileExactWeightedOrDefault': ('quantileExactWeighted', 'OrDefault'), 'simpleLinearRegressionOrDefault': ('simpleLinearRegression', 'OrDefault'), 'quantilesExactOrDefault': ('quantilesExact', 'OrDefault'), 'theilsUOrDefault': ('theilsU', 'OrDefault'), 'quantileOrDefault': ('quantile', 'OrDefault'), 'quantilesDeterministicOrDefault': ('quantilesDeterministic', 'OrDefault'), 'groupBitmapXorOrDefault': ('groupBitmapXor', 'OrDefault'), 'quantileExactLowOrDefault': ('quantileExactLow', 'OrDefault'), 'maxMapOrDefault': ('maxMap', 'OrDefault'), 'deltaSumOrDefault': ('deltaSum', 'OrDefault'), 'groupBitmapOrOrDefault': ('groupBitmapOr', 'OrDefault'), 'stochasticLogisticRegressionOrDefault': ('stochasticLogisticRegression', 'OrDefault'), 'intervalLengthSumOrDefault': ('intervalLengthSum', 'OrDefault'), 'groupArraySampleOrDefault': ('groupArraySample', 'OrDefault'), 'covarSampOrDefault': ('covarSamp', 'OrDefault'), 'groupBitXorOrDefault': ('groupBitXor', 'OrDefault'), 'quantilesInterpolatedWeightedOrDefault': ('quantilesInterpolatedWeighted', 'OrDefault'), 'first_valueOrDefault': ('first_value', 'OrDefault'), 'quantilesGKOrDefault': ('quantilesGK', 'OrDefault'), 'deltaSumTimestampOrDefault': ('deltaSumTimestamp', 'OrDefault'), 'maxIntersectionsPositionOrDefault': ('maxIntersectionsPosition', 'OrDefault'), 'groupArrayMovingSumOrDefault': ('groupArrayMovingSum', 'OrDefault'), 'varSampOrDefault': ('varSamp', 'OrDefault'), 'groupBitOrOrDefault': ('groupBitOr', 'OrDefault'), 'argMinOrDefault': ('argMin', 'OrDefault'), 'groupBitmapAndOrDefault': ('groupBitmapAnd', 'OrDefault'), 'rankCorrOrDefault': ('rankCorr', 'OrDefault'), 'welchTTestOrDefault': ('welchTTest', 'OrDefault'), 'skewPopOrDefault': ('skewPop', 'OrDefault'), 'avgOrDefault': ('avg', 'OrDefault'), 'uniqHLL12OrDefault': ('uniqHLL12', 'OrDefault'), 'quantileGKOrDefault': ('quantileGK', 'OrDefault'), 'mannWhitneyUTestOrDefault': ('mannWhitneyUTest', 'OrDefault'), 'largestTriangleThreeBucketsOrDefault': ('largestTriangleThreeBuckets', 'OrDefault'), 'quantileBFloat16OrDefault': ('quantileBFloat16', 'OrDefault'), 'quantileTDigestOrDefault': ('quantileTDigest', 'OrDefault'), 'minMapOrDefault': ('minMap', 'OrDefault'), 'quantilesTDigestWeightedOrDefault': ('quantilesTDigestWeighted', 'OrDefault'), 'uniqExactOrDefault': ('uniqExact', 'OrDefault'), 'quantilesTimingWeightedOrDefault': ('quantilesTimingWeighted', 'OrDefault'), 'topKOrDefault': ('topK', 'OrDefault'), 'quantilesExactHighOrDefault': ('quantilesExactHigh', 'OrDefault'), 'topKWeightedOrDefault': ('topKWeighted', 'OrDefault'), 'categoricalInformationValueOrDefault': ('categoricalInformationValue', 'OrDefault'), 'entropyOrDefault': ('entropy', 'OrDefault'), 'uniqCombined64OrDefault': ('uniqCombined64', 'OrDefault'), 'quantileExactHighOrDefault': ('quantileExactHigh', 'OrDefault'), 'kolmogorovSmirnovTestOrDefault': ('kolmogorovSmirnovTest', 'OrDefault'), 'minOrDefault': ('min', 'OrDefault'), 'groupArrayOrDefault': ('groupArray', 'OrDefault'), 'quantilesBFloat16OrDefault': ('quantilesBFloat16', 'OrDefault'), 'boundingRatioOrDefault': ('boundingRatio', 'OrDefault'), 'corrOrDefault': ('corr', 'OrDefault'), 'cramersVOrDefault': ('cramersV', 'OrDefault'), 'cramersVBiasCorrectedOrDefault': ('cramersVBiasCorrected', 'OrDefault'), 'quantilesTDigestOrDefault': ('quantilesTDigest', 'OrDefault'), 'stochasticLinearRegressionOrDefault': ('stochasticLinearRegression', 'OrDefault'), 'groupUniqArrayOrDefault': ('groupUniqArray', 'OrDefault'), 'sparkBarOrDefault': ('sparkBar', 'OrDefault'), 'groupArrayLastOrDefault': ('groupArrayLast', 'OrDefault'), 'groupArrayMovingAvgOrDefault': ('groupArrayMovingAvg', 'OrDefault'), 'meanZTestOrDefault': ('meanZTest', 'OrDefault'), 'sumKahanOrDefault': ('sumKahan', 'OrDefault'), 'kurtSampOrDefault': ('kurtSamp', 'OrDefault'), 'uniqOrDefault': ('uniq', 'OrDefault'), 'groupBitmapOrDefault': ('groupBitmap', 'OrDefault'), 'sumMapOrDefault': ('sumMap', 'OrDefault'), 'covarPopOrDefault': ('covarPop', 'OrDefault'), 'quantilesOrDefault': ('quantiles', 'OrDefault'), 'quantileTDigestWeightedOrDefault': ('quantileTDigestWeighted', 'OrDefault'), 'argMaxOrDefault': ('argMax', 'OrDefault'), 'studentTTestOrDefault': ('studentTTest', 'OrDefault'), 'kurtPopOrDefault': ('kurtPop', 'OrDefault'), 'stddevSampOrDefault': ('stddevSamp', 'OrDefault'), 'skewSampOrDefault': ('skewSamp', 'OrDefault'), 'last_valueOrDefault': ('last_value', 'OrDefault'), 'quantileBFloat16WeightedOrDefault': ('quantileBFloat16Weighted', 'OrDefault'), 'sumCountOrDefault': ('sumCount', 'OrDefault'), 'quantilesTimingOrDefault': ('quantilesTiming', 'OrDefault'), 'sumOrDefault': ('sum', 'OrDefault'), 'medianOrDefault': ('median', 'OrDefault'), 'anyOrDefault': ('any', 'OrDefault'), 'quantileDeterministicOrDefault': ('quantileDeterministic', 'OrDefault'), 'groupArrayInsertAtOrDefault': ('groupArrayInsertAt', 'OrDefault'), 'maxIntersectionsOrDefault': ('maxIntersections', 'OrDefault'), 'quantileExactOrDefault': ('quantileExact', 'OrDefault'), 'quantileTimingOrDefault': ('quantileTiming', 'OrDefault'), 'countOrDefault': ('count', 'OrDefault'), 'quantileTimingWeightedOrDefault': ('quantileTimingWeighted', 'OrDefault'), 'anyHeavyOrDefault': ('anyHeavy', 'OrDefault'), 'varPopOrDefault': ('varPop', 'OrDefault'), 'quantileInterpolatedWeightedOrDefault': ('quantileInterpolatedWeighted', 'OrDefault'), 'maxOrDefault': ('max', 'OrDefault'), 'avgWeightedOrDefault': ('avgWeighted', 'OrDefault'), 'groupBitAndOrDefault': ('groupBitAnd', 'OrDefault'), 'quantilesExactWeightedOrDefault': ('quantilesExactWeighted', 'OrDefault'), 'sumWithOverflowOrDefault': ('sumWithOverflow', 'OrDefault'), 'anyLastOrDefault': ('anyLast', 'OrDefault'), 'quantilesBFloat16WeightedOrDefault': ('quantilesBFloat16Weighted', 'OrDefault'), 'quantilesExactLowOrDefault': ('quantilesExactLow', 'OrDefault'), 'exponentialMovingAverageOrDefault': ('exponentialMovingAverage', 'OrDefault'), 'uniqCombinedOrNull': ('uniqCombined', 'OrNull'), 'contingencyOrNull': ('contingency', 'OrNull'), 'stddevPopOrNull': ('stddevPop', 'OrNull'), 'uniqThetaOrNull': ('uniqTheta', 'OrNull'), 'quantileExactWeightedOrNull': ('quantileExactWeighted', 'OrNull'), 'simpleLinearRegressionOrNull': ('simpleLinearRegression', 'OrNull'), 'quantilesExactOrNull': ('quantilesExact', 'OrNull'), 'theilsUOrNull': ('theilsU', 'OrNull'), 'quantileOrNull': ('quantile', 'OrNull'), 'quantilesDeterministicOrNull': ('quantilesDeterministic', 'OrNull'), 'groupBitmapXorOrNull': ('groupBitmapXor', 'OrNull'), 'quantileExactLowOrNull': ('quantileExactLow', 'OrNull'), 'maxMapOrNull': ('maxMap', 'OrNull'), 'deltaSumOrNull': ('deltaSum', 'OrNull'), 'groupBitmapOrOrNull': ('groupBitmapOr', 'OrNull'), 'stochasticLogisticRegressionOrNull': ('stochasticLogisticRegression', 'OrNull'), 'intervalLengthSumOrNull': ('intervalLengthSum', 'OrNull'), 'groupArraySampleOrNull': ('groupArraySample', 'OrNull'), 'covarSampOrNull': ('covarSamp', 'OrNull'), 'groupBitXorOrNull': ('groupBitXor', 'OrNull'), 'quantilesInterpolatedWeightedOrNull': ('quantilesInterpolatedWeighted', 'OrNull'), 'first_valueOrNull': ('first_value', 'OrNull'), 'quantilesGKOrNull': ('quantilesGK', 'OrNull'), 'deltaSumTimestampOrNull': ('deltaSumTimestamp', 'OrNull'), 'maxIntersectionsPositionOrNull': ('maxIntersectionsPosition', 'OrNull'), 'groupArrayMovingSumOrNull': ('groupArrayMovingSum', 'OrNull'), 'varSampOrNull': ('varSamp', 'OrNull'), 'groupBitOrOrNull': ('groupBitOr', 'OrNull'), 'argMinOrNull': ('argMin', 'OrNull'), 'groupBitmapAndOrNull': ('groupBitmapAnd', 'OrNull'), 'rankCorrOrNull': ('rankCorr', 'OrNull'), 'welchTTestOrNull': ('welchTTest', 'OrNull'), 'skewPopOrNull': ('skewPop', 'OrNull'), 'avgOrNull': ('avg', 'OrNull'), 'uniqHLL12OrNull': ('uniqHLL12', 'OrNull'), 'quantileGKOrNull': ('quantileGK', 'OrNull'), 'mannWhitneyUTestOrNull': ('mannWhitneyUTest', 'OrNull'), 'largestTriangleThreeBucketsOrNull': ('largestTriangleThreeBuckets', 'OrNull'), 'quantileBFloat16OrNull': ('quantileBFloat16', 'OrNull'), 'quantileTDigestOrNull': ('quantileTDigest', 'OrNull'), 'minMapOrNull': ('minMap', 'OrNull'), 'quantilesTDigestWeightedOrNull': ('quantilesTDigestWeighted', 'OrNull'), 'uniqExactOrNull': ('uniqExact', 'OrNull'), 'quantilesTimingWeightedOrNull': ('quantilesTimingWeighted', 'OrNull'), 'topKOrNull': ('topK', 'OrNull'), 'quantilesExactHighOrNull': ('quantilesExactHigh', 'OrNull'), 'topKWeightedOrNull': ('topKWeighted', 'OrNull'), 'categoricalInformationValueOrNull': ('categoricalInformationValue', 'OrNull'), 'entropyOrNull': ('entropy', 'OrNull'), 'uniqCombined64OrNull': ('uniqCombined64', 'OrNull'), 'quantileExactHighOrNull': ('quantileExactHigh', 'OrNull'), 'kolmogorovSmirnovTestOrNull': ('kolmogorovSmirnovTest', 'OrNull'), 'minOrNull': ('min', 'OrNull'), 'groupArrayOrNull': ('groupArray', 'OrNull'), 'quantilesBFloat16OrNull': ('quantilesBFloat16', 'OrNull'), 'boundingRatioOrNull': ('boundingRatio', 'OrNull'), 'corrOrNull': ('corr', 'OrNull'), 'cramersVOrNull': ('cramersV', 'OrNull'), 'cramersVBiasCorrectedOrNull': ('cramersVBiasCorrected', 'OrNull'), 'quantilesTDigestOrNull': ('quantilesTDigest', 'OrNull'), 'stochasticLinearRegressionOrNull': ('stochasticLinearRegression', 'OrNull'), 'groupUniqArrayOrNull': ('groupUniqArray', 'OrNull'), 'sparkBarOrNull': ('sparkBar', 'OrNull'), 'groupArrayLastOrNull': ('groupArrayLast', 'OrNull'), 'groupArrayMovingAvgOrNull': ('groupArrayMovingAvg', 'OrNull'), 'meanZTestOrNull': ('meanZTest', 'OrNull'), 'sumKahanOrNull': ('sumKahan', 'OrNull'), 'kurtSampOrNull': ('kurtSamp', 'OrNull'), 'uniqOrNull': ('uniq', 'OrNull'), 'groupBitmapOrNull': ('groupBitmap', 'OrNull'), 'sumMapOrNull': ('sumMap', 'OrNull'), 'covarPopOrNull': ('covarPop', 'OrNull'), 'quantilesOrNull': ('quantiles', 'OrNull'), 'quantileTDigestWeightedOrNull': ('quantileTDigestWeighted', 'OrNull'), 'argMaxOrNull': ('argMax', 'OrNull'), 'studentTTestOrNull': ('studentTTest', 'OrNull'), 'kurtPopOrNull': ('kurtPop', 'OrNull'), 'stddevSampOrNull': ('stddevSamp', 'OrNull'), 'skewSampOrNull': ('skewSamp', 'OrNull'), 'last_valueOrNull': ('last_value', 'OrNull'), 'quantileBFloat16WeightedOrNull': ('quantileBFloat16Weighted', 'OrNull'), 'sumCountOrNull': ('sumCount', 'OrNull'), 'quantilesTimingOrNull': ('quantilesTiming', 'OrNull'), 'sumOrNull': ('sum', 'OrNull'), 'medianOrNull': ('median', 'OrNull'), 'anyOrNull': ('any', 'OrNull'), 'quantileDeterministicOrNull': ('quantileDeterministic', 'OrNull'), 'groupArrayInsertAtOrNull': ('groupArrayInsertAt', 'OrNull'), 'maxIntersectionsOrNull': ('maxIntersections', 'OrNull'), 'quantileExactOrNull': ('quantileExact', 'OrNull'), 'quantileTimingOrNull': ('quantileTiming', 'OrNull'), 'countOrNull': ('count', 'OrNull'), 'quantileTimingWeightedOrNull': ('quantileTimingWeighted', 'OrNull'), 'anyHeavyOrNull': ('anyHeavy', 'OrNull'), 'varPopOrNull': ('varPop', 'OrNull'), 'quantileInterpolatedWeightedOrNull': ('quantileInterpolatedWeighted', 'OrNull'), 'maxOrNull': ('max', 'OrNull'), 'avgWeightedOrNull': ('avgWeighted', 'OrNull'), 'groupBitAndOrNull': ('groupBitAnd', 'OrNull'), 'quantilesExactWeightedOrNull': ('quantilesExactWeighted', 'OrNull'), 'sumWithOverflowOrNull': ('sumWithOverflow', 'OrNull'), 'anyLastOrNull': ('anyLast', 'OrNull'), 'quantilesBFloat16WeightedOrNull': ('quantilesBFloat16Weighted', 'OrNull'), 'quantilesExactLowOrNull': ('quantilesExactLow', 'OrNull'), 'exponentialMovingAverageOrNull': ('exponentialMovingAverage', 'OrNull'), 'uniqCombinedResample': ('uniqCombined', 'Resample'), 'contingencyResample': ('contingency', 'Resample'), 'stddevPopResample': ('stddevPop', 'Resample'), 'uniqThetaResample': ('uniqTheta', 'Resample'), 'quantileExactWeightedResample': ('quantileExactWeighted', 'Resample'), 'simpleLinearRegressionResample': ('simpleLinearRegression', 'Resample'), 'quantilesExactResample': ('quantilesExact', 'Resample'), 'theilsUResample': ('theilsU', 'Resample'), 'quantileResample': ('quantile', 'Resample'), 'quantilesDeterministicResample': ('quantilesDeterministic', 'Resample'), 'groupBitmapXorResample': ('groupBitmapXor', 'Resample'), 'quantileExactLowResample': ('quantileExactLow', 'Resample'), 'maxMapResample': ('maxMap', 'Resample'), 'deltaSumResample': ('deltaSum', 'Resample'), 'groupBitmapOrResample': ('groupBitmapOr', 'Resample'), 'stochasticLogisticRegressionResample': ('stochasticLogisticRegression', 'Resample'), 'intervalLengthSumResample': ('intervalLengthSum', 'Resample'), 'groupArraySampleResample': ('groupArraySample', 'Resample'), 'covarSampResample': ('covarSamp', 'Resample'), 'groupBitXorResample': ('groupBitXor', 'Resample'), 'quantilesInterpolatedWeightedResample': ('quantilesInterpolatedWeighted', 'Resample'), 'first_valueResample': ('first_value', 'Resample'), 'quantilesGKResample': ('quantilesGK', 'Resample'), 'deltaSumTimestampResample': ('deltaSumTimestamp', 'Resample'), 'maxIntersectionsPositionResample': ('maxIntersectionsPosition', 'Resample'), 'groupArrayMovingSumResample': ('groupArrayMovingSum', 'Resample'), 'varSampResample': ('varSamp', 'Resample'), 'groupBitOrResample': ('groupBitOr', 'Resample'), 'argMinResample': ('argMin', 'Resample'), 'groupBitmapAndResample': ('groupBitmapAnd', 'Resample'), 'rankCorrResample': ('rankCorr', 'Resample'), 'welchTTestResample': ('welchTTest', 'Resample'), 'skewPopResample': ('skewPop', 'Resample'), 'avgResample': ('avg', 'Resample'), 'uniqHLL12Resample': ('uniqHLL12', 'Resample'), 'quantileGKResample': ('quantileGK', 'Resample'), 'mannWhitneyUTestResample': ('mannWhitneyUTest', 'Resample'), 'largestTriangleThreeBucketsResample': ('largestTriangleThreeBuckets', 'Resample'), 'quantileBFloat16Resample': ('quantileBFloat16', 'Resample'), 'quantileTDigestResample': ('quantileTDigest', 'Resample'), 'minMapResample': ('minMap', 'Resample'), 'quantilesTDigestWeightedResample': ('quantilesTDigestWeighted', 'Resample'), 'uniqExactResample': ('uniqExact', 'Resample'), 'quantilesTimingWeightedResample': ('quantilesTimingWeighted', 'Resample'), 'topKResample': ('topK', 'Resample'), 'quantilesExactHighResample': ('quantilesExactHigh', 'Resample'), 'topKWeightedResample': ('topKWeighted', 'Resample'), 'categoricalInformationValueResample': ('categoricalInformationValue', 'Resample'), 'entropyResample': ('entropy', 'Resample'), 'uniqCombined64Resample': ('uniqCombined64', 'Resample'), 'quantileExactHighResample': ('quantileExactHigh', 'Resample'), 'kolmogorovSmirnovTestResample': ('kolmogorovSmirnovTest', 'Resample'), 'minResample': ('min', 'Resample'), 'groupArrayResample': ('groupArray', 'Resample'), 'quantilesBFloat16Resample': ('quantilesBFloat16', 'Resample'), 'boundingRatioResample': ('boundingRatio', 'Resample'), 'corrResample': ('corr', 'Resample'), 'cramersVResample': ('cramersV', 'Resample'), 'cramersVBiasCorrectedResample': ('cramersVBiasCorrected', 'Resample'), 'quantilesTDigestResample': ('quantilesTDigest', 'Resample'), 'stochasticLinearRegressionResample': ('stochasticLinearRegression', 'Resample'), 'groupUniqArrayResample': ('groupUniqArray', 'Resample'), 'sparkBarResample': ('sparkBar', 'Resample'), 'groupArrayLastResample': ('groupArrayLast', 'Resample'), 'groupArrayMovingAvgResample': ('groupArrayMovingAvg', 'Resample'), 'meanZTestResample': ('meanZTest', 'Resample'), 'sumKahanResample': ('sumKahan', 'Resample'), 'kurtSampResample': ('kurtSamp', 'Resample'), 'uniqResample': ('uniq', 'Resample'), 'groupBitmapResample': ('groupBitmap', 'Resample'), 'sumMapResample': ('sumMap', 'Resample'), 'covarPopResample': ('covarPop', 'Resample'), 'quantilesResample': ('quantiles', 'Resample'), 'quantileTDigestWeightedResample': ('quantileTDigestWeighted', 'Resample'), 'argMaxResample': ('argMax', 'Resample'), 'studentTTestResample': ('studentTTest', 'Resample'), 'kurtPopResample': ('kurtPop', 'Resample'), 'stddevSampResample': ('stddevSamp', 'Resample'), 'skewSampResample': ('skewSamp', 'Resample'), 'last_valueResample': ('last_value', 'Resample'), 'quantileBFloat16WeightedResample': ('quantileBFloat16Weighted', 'Resample'), 'sumCountResample': ('sumCount', 'Resample'), 'quantilesTimingResample': ('quantilesTiming', 'Resample'), 'sumResample': ('sum', 'Resample'), 'medianResample': ('median', 'Resample'), 'anyResample': ('any', 'Resample'), 'quantileDeterministicResample': ('quantileDeterministic', 'Resample'), 'groupArrayInsertAtResample': ('groupArrayInsertAt', 'Resample'), 'maxIntersectionsResample': ('maxIntersections', 'Resample'), 'quantileExactResample': ('quantileExact', 'Resample'), 'quantileTimingResample': ('quantileTiming', 'Resample'), 'countResample': ('count', 'Resample'), 'quantileTimingWeightedResample': ('quantileTimingWeighted', 'Resample'), 'anyHeavyResample': ('anyHeavy', 'Resample'), 'varPopResample': ('varPop', 'Resample'), 'quantileInterpolatedWeightedResample': ('quantileInterpolatedWeighted', 'Resample'), 'maxResample': ('max', 'Resample'), 'avgWeightedResample': ('avgWeighted', 'Resample'), 'groupBitAndResample': ('groupBitAnd', 'Resample'), 'quantilesExactWeightedResample': ('quantilesExactWeighted', 'Resample'), 'sumWithOverflowResample': ('sumWithOverflow', 'Resample'), 'anyLastResample': ('anyLast', 'Resample'), 'quantilesBFloat16WeightedResample': ('quantilesBFloat16Weighted', 'Resample'), 'quantilesExactLowResample': ('quantilesExactLow', 'Resample'), 'exponentialMovingAverageResample': ('exponentialMovingAverage', 'Resample'), 'uniqCombinedArgMin': ('uniqCombined', 'ArgMin'), 'contingencyArgMin': ('contingency', 'ArgMin'), 'stddevPopArgMin': ('stddevPop', 'ArgMin'), 'uniqThetaArgMin': ('uniqTheta', 'ArgMin'), 'quantileExactWeightedArgMin': ('quantileExactWeighted', 'ArgMin'), 'simpleLinearRegressionArgMin': ('simpleLinearRegression', 'ArgMin'), 'quantilesExactArgMin': ('quantilesExact', 'ArgMin'), 'theilsUArgMin': ('theilsU', 'ArgMin'), 'quantileArgMin': ('quantile', 'ArgMin'), 'quantilesDeterministicArgMin': ('quantilesDeterministic', 'ArgMin'), 'groupBitmapXorArgMin': ('groupBitmapXor', 'ArgMin'), 'quantileExactLowArgMin': ('quantileExactLow', 'ArgMin'), 'maxMapArgMin': ('maxMap', 'ArgMin'), 'deltaSumArgMin': ('deltaSum', 'ArgMin'), 'groupBitmapOrArgMin': ('groupBitmapOr', 'ArgMin'), 'stochasticLogisticRegressionArgMin': ('stochasticLogisticRegression', 'ArgMin'), 'intervalLengthSumArgMin': ('intervalLengthSum', 'ArgMin'), 'groupArraySampleArgMin': ('groupArraySample', 'ArgMin'), 'covarSampArgMin': ('covarSamp', 'ArgMin'), 'groupBitXorArgMin': ('groupBitXor', 'ArgMin'), 'quantilesInterpolatedWeightedArgMin': ('quantilesInterpolatedWeighted', 'ArgMin'), 'first_valueArgMin': ('first_value', 'ArgMin'), 'quantilesGKArgMin': ('quantilesGK', 'ArgMin'), 'deltaSumTimestampArgMin': ('deltaSumTimestamp', 'ArgMin'), 'maxIntersectionsPositionArgMin': ('maxIntersectionsPosition', 'ArgMin'), 'groupArrayMovingSumArgMin': ('groupArrayMovingSum', 'ArgMin'), 'varSampArgMin': ('varSamp', 'ArgMin'), 'groupBitOrArgMin': ('groupBitOr', 'ArgMin'), 'argMinArgMin': ('argMin', 'ArgMin'), 'groupBitmapAndArgMin': ('groupBitmapAnd', 'ArgMin'), 'rankCorrArgMin': ('rankCorr', 'ArgMin'), 'welchTTestArgMin': ('welchTTest', 'ArgMin'), 'skewPopArgMin': ('skewPop', 'ArgMin'), 'avgArgMin': ('avg', 'ArgMin'), 'uniqHLL12ArgMin': ('uniqHLL12', 'ArgMin'), 'quantileGKArgMin': ('quantileGK', 'ArgMin'), 'mannWhitneyUTestArgMin': ('mannWhitneyUTest', 'ArgMin'), 'largestTriangleThreeBucketsArgMin': ('largestTriangleThreeBuckets', 'ArgMin'), 'quantileBFloat16ArgMin': ('quantileBFloat16', 'ArgMin'), 'quantileTDigestArgMin': ('quantileTDigest', 'ArgMin'), 'minMapArgMin': ('minMap', 'ArgMin'), 'quantilesTDigestWeightedArgMin': ('quantilesTDigestWeighted', 'ArgMin'), 'uniqExactArgMin': ('uniqExact', 'ArgMin'), 'quantilesTimingWeightedArgMin': ('quantilesTimingWeighted', 'ArgMin'), 'topKArgMin': ('topK', 'ArgMin'), 'quantilesExactHighArgMin': ('quantilesExactHigh', 'ArgMin'), 'topKWeightedArgMin': ('topKWeighted', 'ArgMin'), 'categoricalInformationValueArgMin': ('categoricalInformationValue', 'ArgMin'), 'entropyArgMin': ('entropy', 'ArgMin'), 'uniqCombined64ArgMin': ('uniqCombined64', 'ArgMin'), 'quantileExactHighArgMin': ('quantileExactHigh', 'ArgMin'), 'kolmogorovSmirnovTestArgMin': ('kolmogorovSmirnovTest', 'ArgMin'), 'minArgMin': ('min', 'ArgMin'), 'groupArrayArgMin': ('groupArray', 'ArgMin'), 'quantilesBFloat16ArgMin': ('quantilesBFloat16', 'ArgMin'), 'boundingRatioArgMin': ('boundingRatio', 'ArgMin'), 'corrArgMin': ('corr', 'ArgMin'), 'cramersVArgMin': ('cramersV', 'ArgMin'), 'cramersVBiasCorrectedArgMin': ('cramersVBiasCorrected', 'ArgMin'), 'quantilesTDigestArgMin': ('quantilesTDigest', 'ArgMin'), 'stochasticLinearRegressionArgMin': ('stochasticLinearRegression', 'ArgMin'), 'groupUniqArrayArgMin': ('groupUniqArray', 'ArgMin'), 'sparkBarArgMin': ('sparkBar', 'ArgMin'), 'groupArrayLastArgMin': ('groupArrayLast', 'ArgMin'), 'groupArrayMovingAvgArgMin': ('groupArrayMovingAvg', 'ArgMin'), 'meanZTestArgMin': ('meanZTest', 'ArgMin'), 'sumKahanArgMin': ('sumKahan', 'ArgMin'), 'kurtSampArgMin': ('kurtSamp', 'ArgMin'), 'uniqArgMin': ('uniq', 'ArgMin'), 'groupBitmapArgMin': ('groupBitmap', 'ArgMin'), 'sumMapArgMin': ('sumMap', 'ArgMin'), 'covarPopArgMin': ('covarPop', 'ArgMin'), 'quantilesArgMin': ('quantiles', 'ArgMin'), 'quantileTDigestWeightedArgMin': ('quantileTDigestWeighted', 'ArgMin'), 'argMaxArgMin': ('argMax', 'ArgMin'), 'studentTTestArgMin': ('studentTTest', 'ArgMin'), 'kurtPopArgMin': ('kurtPop', 'ArgMin'), 'stddevSampArgMin': ('stddevSamp', 'ArgMin'), 'skewSampArgMin': ('skewSamp', 'ArgMin'), 'last_valueArgMin': ('last_value', 'ArgMin'), 'quantileBFloat16WeightedArgMin': ('quantileBFloat16Weighted', 'ArgMin'), 'sumCountArgMin': ('sumCount', 'ArgMin'), 'quantilesTimingArgMin': ('quantilesTiming', 'ArgMin'), 'sumArgMin': ('sum', 'ArgMin'), 'medianArgMin': ('median', 'ArgMin'), 'anyArgMin': ('any', 'ArgMin'), 'quantileDeterministicArgMin': ('quantileDeterministic', 'ArgMin'), 'groupArrayInsertAtArgMin': ('groupArrayInsertAt', 'ArgMin'), 'maxIntersectionsArgMin': ('maxIntersections', 'ArgMin'), 'quantileExactArgMin': ('quantileExact', 'ArgMin'), 'quantileTimingArgMin': ('quantileTiming', 'ArgMin'), 'countArgMin': ('count', 'ArgMin'), 'quantileTimingWeightedArgMin': ('quantileTimingWeighted', 'ArgMin'), 'anyHeavyArgMin': ('anyHeavy', 'ArgMin'), 'varPopArgMin': ('varPop', 'ArgMin'), 'quantileInterpolatedWeightedArgMin': ('quantileInterpolatedWeighted', 'ArgMin'), 'maxArgMin': ('max', 'ArgMin'), 'avgWeightedArgMin': ('avgWeighted', 'ArgMin'), 'groupBitAndArgMin': ('groupBitAnd', 'ArgMin'), 'quantilesExactWeightedArgMin': ('quantilesExactWeighted', 'ArgMin'), 'sumWithOverflowArgMin': ('sumWithOverflow', 'ArgMin'), 'anyLastArgMin': ('anyLast', 'ArgMin'), 'quantilesBFloat16WeightedArgMin': ('quantilesBFloat16Weighted', 'ArgMin'), 'quantilesExactLowArgMin': ('quantilesExactLow', 'ArgMin'), 'exponentialMovingAverageArgMin': ('exponentialMovingAverage', 'ArgMin'), 'uniqCombinedArgMax': ('uniqCombined', 'ArgMax'), 'contingencyArgMax': ('contingency', 'ArgMax'), 'stddevPopArgMax': ('stddevPop', 'ArgMax'), 'uniqThetaArgMax': ('uniqTheta', 'ArgMax'), 'quantileExactWeightedArgMax': ('quantileExactWeighted', 'ArgMax'), 'simpleLinearRegressionArgMax': ('simpleLinearRegression', 'ArgMax'), 'quantilesExactArgMax': ('quantilesExact', 'ArgMax'), 'theilsUArgMax': ('theilsU', 'ArgMax'), 'quantileArgMax': ('quantile', 'ArgMax'), 'quantilesDeterministicArgMax': ('quantilesDeterministic', 'ArgMax'), 'groupBitmapXorArgMax': ('groupBitmapXor', 'ArgMax'), 'quantileExactLowArgMax': ('quantileExactLow', 'ArgMax'), 'maxMapArgMax': ('maxMap', 'ArgMax'), 'deltaSumArgMax': ('deltaSum', 'ArgMax'), 'groupBitmapOrArgMax': ('groupBitmapOr', 'ArgMax'), 'stochasticLogisticRegressionArgMax': ('stochasticLogisticRegression', 'ArgMax'), 'intervalLengthSumArgMax': ('intervalLengthSum', 'ArgMax'), 'groupArraySampleArgMax': ('groupArraySample', 'ArgMax'), 'covarSampArgMax': ('covarSamp', 'ArgMax'), 'groupBitXorArgMax': ('groupBitXor', 'ArgMax'), 'quantilesInterpolatedWeightedArgMax': ('quantilesInterpolatedWeighted', 'ArgMax'), 'first_valueArgMax': ('first_value', 'ArgMax'), 'quantilesGKArgMax': ('quantilesGK', 'ArgMax'), 'deltaSumTimestampArgMax': ('deltaSumTimestamp', 'ArgMax'), 'maxIntersectionsPositionArgMax': ('maxIntersectionsPosition', 'ArgMax'), 'groupArrayMovingSumArgMax': ('groupArrayMovingSum', 'ArgMax'), 'varSampArgMax': ('varSamp', 'ArgMax'), 'groupBitOrArgMax': ('groupBitOr', 'ArgMax'), 'argMinArgMax': ('argMin', 'ArgMax'), 'groupBitmapAndArgMax': ('groupBitmapAnd', 'ArgMax'), 'rankCorrArgMax': ('rankCorr', 'ArgMax'), 'welchTTestArgMax': ('welchTTest', 'ArgMax'), 'skewPopArgMax': ('skewPop', 'ArgMax'), 'avgArgMax': ('avg', 'ArgMax'), 'uniqHLL12ArgMax': ('uniqHLL12', 'ArgMax'), 'quantileGKArgMax': ('quantileGK', 'ArgMax'), 'mannWhitneyUTestArgMax': ('mannWhitneyUTest', 'ArgMax'), 'largestTriangleThreeBucketsArgMax': ('largestTriangleThreeBuckets', 'ArgMax'), 'quantileBFloat16ArgMax': ('quantileBFloat16', 'ArgMax'), 'quantileTDigestArgMax': ('quantileTDigest', 'ArgMax'), 'minMapArgMax': ('minMap', 'ArgMax'), 'quantilesTDigestWeightedArgMax': ('quantilesTDigestWeighted', 'ArgMax'), 'uniqExactArgMax': ('uniqExact', 'ArgMax'), 'quantilesTimingWeightedArgMax': ('quantilesTimingWeighted', 'ArgMax'), 'topKArgMax': ('topK', 'ArgMax'), 'quantilesExactHighArgMax': ('quantilesExactHigh', 'ArgMax'), 'topKWeightedArgMax': ('topKWeighted', 'ArgMax'), 'categoricalInformationValueArgMax': ('categoricalInformationValue', 'ArgMax'), 'entropyArgMax': ('entropy', 'ArgMax'), 'uniqCombined64ArgMax': ('uniqCombined64', 'ArgMax'), 'quantileExactHighArgMax': ('quantileExactHigh', 'ArgMax'), 'kolmogorovSmirnovTestArgMax': ('kolmogorovSmirnovTest', 'ArgMax'), 'minArgMax': ('min', 'ArgMax'), 'groupArrayArgMax': ('groupArray', 'ArgMax'), 'quantilesBFloat16ArgMax': ('quantilesBFloat16', 'ArgMax'), 'boundingRatioArgMax': ('boundingRatio', 'ArgMax'), 'corrArgMax': ('corr', 'ArgMax'), 'cramersVArgMax': ('cramersV', 'ArgMax'), 'cramersVBiasCorrectedArgMax': ('cramersVBiasCorrected', 'ArgMax'), 'quantilesTDigestArgMax': ('quantilesTDigest', 'ArgMax'), 'stochasticLinearRegressionArgMax': ('stochasticLinearRegression', 'ArgMax'), 'groupUniqArrayArgMax': ('groupUniqArray', 'ArgMax'), 'sparkBarArgMax': ('sparkBar', 'ArgMax'), 'groupArrayLastArgMax': ('groupArrayLast', 'ArgMax'), 'groupArrayMovingAvgArgMax': ('groupArrayMovingAvg', 'ArgMax'), 'meanZTestArgMax': ('meanZTest', 'ArgMax'), 'sumKahanArgMax': ('sumKahan', 'ArgMax'), 'kurtSampArgMax': ('kurtSamp', 'ArgMax'), 'uniqArgMax': ('uniq', 'ArgMax'), 'groupBitmapArgMax': ('groupBitmap', 'ArgMax'), 'sumMapArgMax': ('sumMap', 'ArgMax'), 'covarPopArgMax': ('covarPop', 'ArgMax'), 'quantilesArgMax': ('quantiles', 'ArgMax'), 'quantileTDigestWeightedArgMax': ('quantileTDigestWeighted', 'ArgMax'), 'argMaxArgMax': ('argMax', 'ArgMax'), 'studentTTestArgMax': ('studentTTest', 'ArgMax'), 'kurtPopArgMax': ('kurtPop', 'ArgMax'), 'stddevSampArgMax': ('stddevSamp', 'ArgMax'), 'skewSampArgMax': ('skewSamp', 'ArgMax'), 'last_valueArgMax': ('last_value', 'ArgMax'), 'quantileBFloat16WeightedArgMax': ('quantileBFloat16Weighted', 'ArgMax'), 'sumCountArgMax': ('sumCount', 'ArgMax'), 'quantilesTimingArgMax': ('quantilesTiming', 'ArgMax'), 'sumArgMax': ('sum', 'ArgMax'), 'medianArgMax': ('median', 'ArgMax'), 'anyArgMax': ('any', 'ArgMax'), 'quantileDeterministicArgMax': ('quantileDeterministic', 'ArgMax'), 'groupArrayInsertAtArgMax': ('groupArrayInsertAt', 'ArgMax'), 'maxIntersectionsArgMax': ('maxIntersections', 'ArgMax'), 'quantileExactArgMax': ('quantileExact', 'ArgMax'), 'quantileTimingArgMax': ('quantileTiming', 'ArgMax'), 'countArgMax': ('count', 'ArgMax'), 'quantileTimingWeightedArgMax': ('quantileTimingWeighted', 'ArgMax'), 'anyHeavyArgMax': ('anyHeavy', 'ArgMax'), 'varPopArgMax': ('varPop', 'ArgMax'), 'quantileInterpolatedWeightedArgMax': ('quantileInterpolatedWeighted', 'ArgMax'), 'maxArgMax': ('max', 'ArgMax'), 'avgWeightedArgMax': ('avgWeighted', 'ArgMax'), 'groupBitAndArgMax': ('groupBitAnd', 'ArgMax'), 'quantilesExactWeightedArgMax': ('quantilesExactWeighted', 'ArgMax'), 'sumWithOverflowArgMax': ('sumWithOverflow', 'ArgMax'), 'anyLastArgMax': ('anyLast', 'ArgMax'), 'quantilesBFloat16WeightedArgMax': ('quantilesBFloat16Weighted', 'ArgMax'), 'quantilesExactLowArgMax': ('quantilesExactLow', 'ArgMax'), 'exponentialMovingAverageArgMax': ('exponentialMovingAverage', 'ArgMax'), 'uniqCombined': ('uniqCombined', ''), 'contingency': ('contingency', ''), 'stddevPop': ('stddevPop', ''), 'uniqTheta': ('uniqTheta', ''), 'quantileExactWeighted': ('quantileExactWeighted', ''), 'simpleLinearRegression': ('simpleLinearRegression', ''), 'quantilesExact': ('quantilesExact', ''), 'theilsU': ('theilsU', ''), 'quantile': ('quantile', ''), 'quantilesDeterministic': ('quantilesDeterministic', ''), 'groupBitmapXor': ('groupBitmapXor', ''), 'quantileExactLow': ('quantileExactLow', ''), 'deltaSum': ('deltaSum', ''), 'groupBitmapOr': ('groupBitmapOr', ''), 'stochasticLogisticRegression': ('stochasticLogisticRegression', ''), 'intervalLengthSum': ('intervalLengthSum', ''), 'groupArraySample': ('groupArraySample', ''), 'covarSamp': ('covarSamp', ''), 'groupBitXor': ('groupBitXor', ''), 'quantilesInterpolatedWeighted': ('quantilesInterpolatedWeighted', ''), 'first_value': ('first_value', ''), 'quantilesGK': ('quantilesGK', ''), 'deltaSumTimestamp': ('deltaSumTimestamp', ''), 'maxIntersectionsPosition': ('maxIntersectionsPosition', ''), 'groupArrayMovingSum': ('groupArrayMovingSum', ''), 'varSamp': ('varSamp', ''), 'groupBitOr': ('groupBitOr', ''), 'argMin': ('argMin', ''), 'groupBitmapAnd': ('groupBitmapAnd', ''), 'rankCorr': ('rankCorr', ''), 'welchTTest': ('welchTTest', ''), 'skewPop': ('skewPop', ''), 'avg': ('avg', ''), 'uniqHLL12': ('uniqHLL12', ''), 'quantileGK': ('quantileGK', ''), 'mannWhitneyUTest': ('mannWhitneyUTest', ''), 'largestTriangleThreeBuckets': ('largestTriangleThreeBuckets', ''), 'quantileBFloat16': ('quantileBFloat16', ''), 'quantileTDigest': ('quantileTDigest', ''), 'quantilesTDigestWeighted': ('quantilesTDigestWeighted', ''), 'uniqExact': ('uniqExact', ''), 'quantilesTimingWeighted': ('quantilesTimingWeighted', ''), 'topK': ('topK', ''), 'quantilesExactHigh': ('quantilesExactHigh', ''), 'topKWeighted': ('topKWeighted', ''), 'categoricalInformationValue': ('categoricalInformationValue', ''), 'entropy': ('entropy', ''), 'uniqCombined64': ('uniqCombined64', ''), 'quantileExactHigh': ('quantileExactHigh', ''), 'kolmogorovSmirnovTest': ('kolmogorovSmirnovTest', ''), 'min': ('min', ''), 'groupArray': ('groupArray', ''), 'quantilesBFloat16': ('quantilesBFloat16', ''), 'boundingRatio': ('boundingRatio', ''), 'corr': ('corr', ''), 'cramersV': ('cramersV', ''), 'cramersVBiasCorrected': ('cramersVBiasCorrected', ''), 'quantilesTDigest': ('quantilesTDigest', ''), 'stochasticLinearRegression': ('stochasticLinearRegression', ''), 'groupUniqArray': ('groupUniqArray', ''), 'sparkBar': ('sparkBar', ''), 'groupArrayLast': ('groupArrayLast', ''), 'groupArrayMovingAvg': ('groupArrayMovingAvg', ''), 'meanZTest': ('meanZTest', ''), 'sumKahan': ('sumKahan', ''), 'kurtSamp': ('kurtSamp', ''), 'uniq': ('uniq', ''), 'groupBitmap': ('groupBitmap', ''), 'covarPop': ('covarPop', ''), 'quantiles': ('quantiles', ''), 'quantileTDigestWeighted': ('quantileTDigestWeighted', ''), 'argMax': ('argMax', ''), 'studentTTest': ('studentTTest', ''), 'kurtPop': ('kurtPop', ''), 'stddevSamp': ('stddevSamp', ''), 'skewSamp': ('skewSamp', ''), 'last_value': ('last_value', ''), 'quantileBFloat16Weighted': ('quantileBFloat16Weighted', ''), 'sumCount': ('sumCount', ''), 'quantilesTiming': ('quantilesTiming', ''), 'sum': ('sum', ''), 'median': ('median', ''), 'any': ('any', ''), 'quantileDeterministic': ('quantileDeterministic', ''), 'groupArrayInsertAt': ('groupArrayInsertAt', ''), 'maxIntersections': ('maxIntersections', ''), 'quantileExact': ('quantileExact', ''), 'quantileTiming': ('quantileTiming', ''), 'count': ('count', ''), 'quantileTimingWeighted': ('quantileTimingWeighted', ''), 'anyHeavy': ('anyHeavy', ''), 'varPop': ('varPop', ''), 'quantileInterpolatedWeighted': ('quantileInterpolatedWeighted', ''), 'max': ('max', ''), 'avgWeighted': ('avgWeighted', ''), 'groupBitAnd': ('groupBitAnd', ''), 'quantilesExactWeighted': ('quantilesExactWeighted', ''), 'sumWithOverflow': ('sumWithOverflow', ''), 'anyLast': ('anyLast', ''), 'quantilesBFloat16Weighted': ('quantilesBFloat16Weighted', ''), 'quantilesExactLow': ('quantilesExactLow', ''), 'exponentialMovingAverage': ('exponentialMovingAverage', '')}
    @@ -2800,7 +2800,7 @@ Default: 3
    FUNCTIONS_WITH_ALIASED_ARGS = -{'TUPLE', 'STRUCT'} +{'STRUCT', 'TUPLE'}
    @@ -2865,7 +2865,7 @@ Default: 3
    JOIN_KINDS = - {<TokenType.ARRAY: 'ARRAY'>, <TokenType.ASOF: 'ASOF'>, <TokenType.ANTI: 'ANTI'>, <TokenType.INNER: 'INNER'>, <TokenType.OUTER: 'OUTER'>, <TokenType.SEMI: 'SEMI'>, <TokenType.ANY: 'ANY'>, <TokenType.CROSS: 'CROSS'>} + {<TokenType.ANY: 'ANY'>, <TokenType.ANTI: 'ANTI'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.CROSS: 'CROSS'>, <TokenType.OUTER: 'OUTER'>, <TokenType.ASOF: 'ASOF'>, <TokenType.SEMI: 'SEMI'>, <TokenType.INNER: 'INNER'>}
    @@ -2878,7 +2878,7 @@ Default: 3
    TABLE_ALIAS_TOKENS = - {<TokenType.UPDATE: 'UPDATE'>, <TokenType.DEFAULT: 'DEFAULT'>, <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>, <TokenType.UNIQUEIDENTIFIER: 'UNIQUEIDENTIFIER'>, <TokenType.PIVOT: 'PIVOT'>, <TokenType.PARTITION: 'PARTITION'>, <TokenType.TSTZRANGE: 'TSTZRANGE'>, <TokenType.VIEW: 'VIEW'>, <TokenType.COMMENT: 'COMMENT'>, <TokenType.TEXT: 'TEXT'>, <TokenType.UINT: 'UINT'>, <TokenType.DELETE: 'DELETE'>, <TokenType.TOP: 'TOP'>, <TokenType.LONGBLOB: 'LONGBLOB'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.DATETIME: 'DATETIME'>, <TokenType.BIGINT: 'BIGINT'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, <TokenType.KEEP: 'KEEP'>, <TokenType.SMALLSERIAL: 'SMALLSERIAL'>, <TokenType.NEXT: 'NEXT'>, <TokenType.BPCHAR: 'BPCHAR'>, <TokenType.TABLE: 'TABLE'>, <TokenType.DATETIME64: 'DATETIME64'>, <TokenType.VAR: 'VAR'>, <TokenType.BIGSERIAL: 'BIGSERIAL'>, <TokenType.UMEDIUMINT: 'UMEDIUMINT'>, <TokenType.FILTER: 'FILTER'>, <TokenType.TIMESTAMP_MS: 'TIMESTAMP_MS'>, <TokenType.SERIAL: 'SERIAL'>, <TokenType.END: 'END'>, <TokenType.ALL: 'ALL'>, <TokenType.TIMETZ: 'TIMETZ'>, <TokenType.TINYBLOB: 'TINYBLOB'>, <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, <TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>, <TokenType.SMALLINT: 'SMALLINT'>, <TokenType.TRUE: 'TRUE'>, <TokenType.FUNCTION: 'FUNCTION'>, <TokenType.IPPREFIX: 'IPPREFIX'>, <TokenType.UNKNOWN: 'UNKNOWN'>, <TokenType.INTERVAL: 'INTERVAL'>, <TokenType.NESTED: 'NESTED'>, <TokenType.NUMRANGE: 'NUMRANGE'>, <TokenType.JSON: 'JSON'>, <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, <TokenType.JSONB: 'JSONB'>, <TokenType.ROW: 'ROW'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, <TokenType.ENUM: 'ENUM'>, <TokenType.SMALLMONEY: 'SMALLMONEY'>, <TokenType.REFERENCES: 'REFERENCES'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.EXISTS: 'EXISTS'>, <TokenType.RANGE: 'RANGE'>, <TokenType.SET: 'SET'>, <TokenType.IS: 'IS'>, <TokenType.HLLSKETCH: 'HLLSKETCH'>, <TokenType.BIT: 'BIT'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.TSRANGE: 'TSRANGE'>, <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, <TokenType.INT256: 'INT256'>, <TokenType.FALSE: 'FALSE'>, <TokenType.CHAR: 'CHAR'>, <TokenType.ASC: 'ASC'>, <TokenType.VARBINARY: 'VARBINARY'>, <TokenType.DOUBLE: 'DOUBLE'>, <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, <TokenType.ORDINALITY: 'ORDINALITY'>, <TokenType.ESCAPE: 'ESCAPE'>, <TokenType.ISNULL: 'ISNULL'>, <TokenType.SHOW: 'SHOW'>, <TokenType.UNPIVOT: 'UNPIVOT'>, <TokenType.DECIMAL: 'DECIMAL'>, <TokenType.OVERWRITE: 'OVERWRITE'>, <TokenType.PERCENT: 'PERCENT'>, <TokenType.SCHEMA: 'SCHEMA'>, <TokenType.NULL: 'NULL'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, <TokenType.RECURSIVE: 'RECURSIVE'>, <TokenType.MEDIUMINT: 'MEDIUMINT'>, <TokenType.UDECIMAL: 'UDECIMAL'>, <TokenType.COLUMN: 'COLUMN'>, <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, <TokenType.DIV: 'DIV'>, <TokenType.FIXEDSTRING: 'FIXEDSTRING'>, <TokenType.USMALLINT: 'USMALLINT'>, <TokenType.BOOLEAN: 'BOOLEAN'>, <TokenType.CASE: 'CASE'>, <TokenType.ENUM16: 'ENUM16'>, <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>, <TokenType.INT8RANGE: 'INT8RANGE'>, <TokenType.REPLACE: 'REPLACE'>, <TokenType.TINYTEXT: 'TINYTEXT'>, <TokenType.FIRST: 'FIRST'>, <TokenType.YEAR: 'YEAR'>, <TokenType.UINT256: 'UINT256'>, <TokenType.MODEL: 'MODEL'>, <TokenType.UINT128: 'UINT128'>, <TokenType.OVERLAPS: 'OVERLAPS'>, <TokenType.TIMESTAMP_NS: 'TIMESTAMP_NS'>, <TokenType.SIMPLEAGGREGATEFUNCTION: 'SIMPLEAGGREGATEFUNCTION'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, <TokenType.PRAGMA: 'PRAGMA'>, <TokenType.PROCEDURE: 'PROCEDURE'>, <TokenType.IPADDRESS: 'IPADDRESS'>, <TokenType.NVARCHAR: 'NVARCHAR'>, <TokenType.LONGTEXT: 'LONGTEXT'>, <TokenType.BINARY: 'BINARY'>, <TokenType.DICTIONARY: 'DICTIONARY'>, <TokenType.USE: 'USE'>, <TokenType.TIME: 'TIME'>, <TokenType.COMMIT: 'COMMIT'>, <TokenType.MONEY: 'MONEY'>, <TokenType.LOAD: 'LOAD'>, <TokenType.CACHE: 'CACHE'>, <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, <TokenType.IMAGE: 'IMAGE'>, <TokenType.IPV6: 'IPV6'>, <TokenType.INT: 'INT'>, <TokenType.ROWVERSION: 'ROWVERSION'>, <TokenType.VARIANT: 'VARIANT'>, <TokenType.BEGIN: 'BEGIN'>, <TokenType.UTINYINT: 'UTINYINT'>, <TokenType.VOLATILE: 'VOLATILE'>, <TokenType.SOME: 'SOME'>, <TokenType.SUPER: 'SUPER'>, <TokenType.OPERATOR: 'OPERATOR'>, <TokenType.DATABASE: 'DATABASE'>, <TokenType.KILL: 'KILL'>, <TokenType.DATE: 'DATE'>, <TokenType.DESC: 'DESC'>, <TokenType.HSTORE: 'HSTORE'>, <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, <TokenType.MAP: 'MAP'>, <TokenType.TINYINT: 'TINYINT'>, <TokenType.IPV4: 'IPV4'>, <TokenType.AGGREGATEFUNCTION: 'AGGREGATEFUNCTION'>, <TokenType.INT4RANGE: 'INT4RANGE'>, <TokenType.ENUM8: 'ENUM8'>, <TokenType.UUID: 'UUID'>, <TokenType.NCHAR: 'NCHAR'>, <TokenType.OBJECT: 'OBJECT'>, <TokenType.DESCRIBE: 'DESCRIBE'>, <TokenType.GEOMETRY: 'GEOMETRY'>, <TokenType.UNIQUE: 'UNIQUE'>, <TokenType.DATERANGE: 'DATERANGE'>, <TokenType.INDEX: 'INDEX'>, <TokenType.REFRESH: 'REFRESH'>, <TokenType.FLOAT: 'FLOAT'>, <TokenType.ROWS: 'ROWS'>, <TokenType.VARCHAR: 'VARCHAR'>, <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, <TokenType.INET: 'INET'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.COLLATE: 'COLLATE'>, <TokenType.TEMPORARY: 'TEMPORARY'>, <TokenType.TIMESTAMP_S: 'TIMESTAMP_S'>, <TokenType.UBIGINT: 'UBIGINT'>, <TokenType.USERDEFINED: 'USERDEFINED'>, <TokenType.DATE32: 'DATE32'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, <TokenType.INT128: 'INT128'>, <TokenType.MERGE: 'MERGE'>, <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, <TokenType.XML: 'XML'>, <TokenType.CONSTRAINT: 'CONSTRAINT'>, <TokenType.STRUCT: 'STRUCT'>} + {<TokenType.BIT: 'BIT'>, <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, <TokenType.TEMPORARY: 'TEMPORARY'>, <TokenType.NVARCHAR: 'NVARCHAR'>, <TokenType.OBJECT: 'OBJECT'>, <TokenType.FIRST: 'FIRST'>, <TokenType.RANGE: 'RANGE'>, <TokenType.TIME: 'TIME'>, <TokenType.INT: 'INT'>, <TokenType.UMEDIUMINT: 'UMEDIUMINT'>, <TokenType.UTINYINT: 'UTINYINT'>, <TokenType.IS: 'IS'>, <TokenType.XML: 'XML'>, <TokenType.BIGINT: 'BIGINT'>, <TokenType.VAR: 'VAR'>, <TokenType.DOUBLE: 'DOUBLE'>, <TokenType.ROWS: 'ROWS'>, <TokenType.MERGE: 'MERGE'>, <TokenType.DATERANGE: 'DATERANGE'>, <TokenType.UDECIMAL: 'UDECIMAL'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>, <TokenType.REFERENCES: 'REFERENCES'>, <TokenType.PERCENT: 'PERCENT'>, <TokenType.CACHE: 'CACHE'>, <TokenType.STRUCT: 'STRUCT'>, <TokenType.JSONB: 'JSONB'>, <TokenType.COMMIT: 'COMMIT'>, <TokenType.ENUM: 'ENUM'>, <TokenType.ORDINALITY: 'ORDINALITY'>, <TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>, <TokenType.INDEX: 'INDEX'>, <TokenType.TIMESTAMP_NS: 'TIMESTAMP_NS'>, <TokenType.VOLATILE: 'VOLATILE'>, <TokenType.DICTIONARY: 'DICTIONARY'>, <TokenType.USE: 'USE'>, <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, <TokenType.FUNCTION: 'FUNCTION'>, <TokenType.LONGTEXT: 'LONGTEXT'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.FIXEDSTRING: 'FIXEDSTRING'>, <TokenType.IPV6: 'IPV6'>, <TokenType.DATABASE: 'DATABASE'>, <TokenType.SMALLINT: 'SMALLINT'>, <TokenType.ASC: 'ASC'>, <TokenType.RECURSIVE: 'RECURSIVE'>, <TokenType.ISNULL: 'ISNULL'>, <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, <TokenType.BINARY: 'BINARY'>, <TokenType.UNIQUEIDENTIFIER: 'UNIQUEIDENTIFIER'>, <TokenType.HSTORE: 'HSTORE'>, <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, <TokenType.DATE: 'DATE'>, <TokenType.SUPER: 'SUPER'>, <TokenType.TOP: 'TOP'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.VARCHAR: 'VARCHAR'>, <TokenType.FILTER: 'FILTER'>, <TokenType.TEXT: 'TEXT'>, <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, <TokenType.TRUE: 'TRUE'>, <TokenType.ESCAPE: 'ESCAPE'>, <TokenType.SHOW: 'SHOW'>, <TokenType.TSTZRANGE: 'TSTZRANGE'>, <TokenType.SET: 'SET'>, <TokenType.NESTED: 'NESTED'>, <TokenType.INTERVAL: 'INTERVAL'>, <TokenType.BOOLEAN: 'BOOLEAN'>, <TokenType.VARIANT: 'VARIANT'>, <TokenType.IPADDRESS: 'IPADDRESS'>, <TokenType.DESCRIBE: 'DESCRIBE'>, <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>, <TokenType.MONEY: 'MONEY'>, <TokenType.ENUM8: 'ENUM8'>, <TokenType.IPPREFIX: 'IPPREFIX'>, <TokenType.NULL: 'NULL'>, <TokenType.COLLATE: 'COLLATE'>, <TokenType.NEXT: 'NEXT'>, <TokenType.TSRANGE: 'TSRANGE'>, <TokenType.PRAGMA: 'PRAGMA'>, <TokenType.ALL: 'ALL'>, <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, <TokenType.ROW: 'ROW'>, <TokenType.UBIGINT: 'UBIGINT'>, <TokenType.TABLE: 'TABLE'>, <TokenType.SERIAL: 'SERIAL'>, <TokenType.CASE: 'CASE'>, <TokenType.BPCHAR: 'BPCHAR'>, <TokenType.INT8RANGE: 'INT8RANGE'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.FLOAT: 'FLOAT'>, <TokenType.MODEL: 'MODEL'>, <TokenType.YEAR: 'YEAR'>, <TokenType.IPV4: 'IPV4'>, <TokenType.UNKNOWN: 'UNKNOWN'>, <TokenType.SCHEMA: 'SCHEMA'>, <TokenType.INT256: 'INT256'>, <TokenType.UNPIVOT: 'UNPIVOT'>, <TokenType.DECIMAL: 'DECIMAL'>, <TokenType.TINYBLOB: 'TINYBLOB'>, <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, <TokenType.TIMESTAMP_S: 'TIMESTAMP_S'>, <TokenType.INET: 'INET'>, <TokenType.CONSTRAINT: 'CONSTRAINT'>, <TokenType.ROWVERSION: 'ROWVERSION'>, <TokenType.TINYTEXT: 'TINYTEXT'>, <TokenType.MAP: 'MAP'>, <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, <TokenType.PIVOT: 'PIVOT'>, <TokenType.SIMPLEAGGREGATEFUNCTION: 'SIMPLEAGGREGATEFUNCTION'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.REPLACE: 'REPLACE'>, <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, <TokenType.USERDEFINED: 'USERDEFINED'>, <TokenType.PROCEDURE: 'PROCEDURE'>, <TokenType.PARTITION: 'PARTITION'>, <TokenType.SMALLMONEY: 'SMALLMONEY'>, <TokenType.EXISTS: 'EXISTS'>, <TokenType.UINT: 'UINT'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.UINT256: 'UINT256'>, <TokenType.OPERATOR: 'OPERATOR'>, <TokenType.CHAR: 'CHAR'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, <TokenType.DIV: 'DIV'>, <TokenType.GEOMETRY: 'GEOMETRY'>, <TokenType.COMMENT: 'COMMENT'>, <TokenType.BEGIN: 'BEGIN'>, <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, <TokenType.UINT128: 'UINT128'>, <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, <TokenType.OVERLAPS: 'OVERLAPS'>, <TokenType.DELETE: 'DELETE'>, <TokenType.LOAD: 'LOAD'>, <TokenType.NCHAR: 'NCHAR'>, <TokenType.UUID: 'UUID'>, <TokenType.ENUM16: 'ENUM16'>, <TokenType.TINYINT: 'TINYINT'>, <TokenType.DATE32: 'DATE32'>, <TokenType.NUMRANGE: 'NUMRANGE'>, <TokenType.DATETIME: 'DATETIME'>, <TokenType.VIEW: 'VIEW'>, <TokenType.JSON: 'JSON'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, <TokenType.COLUMN: 'COLUMN'>, <TokenType.IMAGE: 'IMAGE'>, <TokenType.KEEP: 'KEEP'>, <TokenType.SMALLSERIAL: 'SMALLSERIAL'>, <TokenType.END: 'END'>, <TokenType.INT128: 'INT128'>, <TokenType.KILL: 'KILL'>, <TokenType.TIMETZ: 'TIMETZ'>, <TokenType.DATETIME64: 'DATETIME64'>, <TokenType.REFRESH: 'REFRESH'>, <TokenType.VARBINARY: 'VARBINARY'>, <TokenType.USMALLINT: 'USMALLINT'>, <TokenType.AGGREGATEFUNCTION: 'AGGREGATEFUNCTION'>, <TokenType.UNIQUE: 'UNIQUE'>, <TokenType.DEFAULT: 'DEFAULT'>, <TokenType.BIGSERIAL: 'BIGSERIAL'>, <TokenType.TIMESTAMP_MS: 'TIMESTAMP_MS'>, <TokenType.MEDIUMINT: 'MEDIUMINT'>, <TokenType.SOME: 'SOME'>, <TokenType.FALSE: 'FALSE'>, <TokenType.INT4RANGE: 'INT4RANGE'>, <TokenType.DESC: 'DESC'>, <TokenType.OVERWRITE: 'OVERWRITE'>, <TokenType.LONGBLOB: 'LONGBLOB'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, <TokenType.HLLSKETCH: 'HLLSKETCH'>, <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, <TokenType.UPDATE: 'UPDATE'>}
    @@ -3015,6 +3015,7 @@ Default: 3
    STRING_ALIASES
    UNION_MODIFIERS
    NO_PAREN_IF_COMMANDS
    +
    VALUES_FOLLOWED_BY_PAREN
    error_level
    error_message_context
    max_errors
    @@ -3406,7 +3407,7 @@ Default: True @@ -3505,7 +3506,7 @@ Default: True
    ON_CLUSTER_TARGETS = -{'FUNCTION', 'VIEW', 'DATABASE', 'INDEX', 'NAMED COLLECTION', 'DICTIONARY', 'TABLE'} +{'INDEX', 'DATABASE', 'FUNCTION', 'DICTIONARY', 'TABLE', 'NAMED COLLECTION', 'VIEW'}
    diff --git a/docs/sqlglot/dialects/databricks.html b/docs/sqlglot/dialects/databricks.html index 675011f..102a21a 100644 --- a/docs/sqlglot/dialects/databricks.html +++ b/docs/sqlglot/dialects/databricks.html @@ -892,7 +892,8 @@ Default: 3
    diff --git a/docs/sqlglot/dialects/doris.html b/docs/sqlglot/dialects/doris.html index 72003b3..b318e78 100644 --- a/docs/sqlglot/dialects/doris.html +++ b/docs/sqlglot/dialects/doris.html @@ -714,7 +714,7 @@ Default: 3
    TABLE_ALIAS_TOKENS = - {<TokenType.UPDATE: 'UPDATE'>, <TokenType.DEFAULT: 'DEFAULT'>, <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>, <TokenType.UNIQUEIDENTIFIER: 'UNIQUEIDENTIFIER'>, <TokenType.PIVOT: 'PIVOT'>, <TokenType.PARTITION: 'PARTITION'>, <TokenType.TSTZRANGE: 'TSTZRANGE'>, <TokenType.VIEW: 'VIEW'>, <TokenType.COMMENT: 'COMMENT'>, <TokenType.TEXT: 'TEXT'>, <TokenType.UINT: 'UINT'>, <TokenType.DELETE: 'DELETE'>, <TokenType.TOP: 'TOP'>, <TokenType.LONGBLOB: 'LONGBLOB'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.DATETIME: 'DATETIME'>, <TokenType.BIGINT: 'BIGINT'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, <TokenType.KEEP: 'KEEP'>, <TokenType.SMALLSERIAL: 'SMALLSERIAL'>, <TokenType.NEXT: 'NEXT'>, <TokenType.BPCHAR: 'BPCHAR'>, <TokenType.TABLE: 'TABLE'>, <TokenType.DATETIME64: 'DATETIME64'>, <TokenType.VAR: 'VAR'>, <TokenType.BIGSERIAL: 'BIGSERIAL'>, <TokenType.ANY: 'ANY'>, <TokenType.UMEDIUMINT: 'UMEDIUMINT'>, <TokenType.FILTER: 'FILTER'>, <TokenType.TIMESTAMP_MS: 'TIMESTAMP_MS'>, <TokenType.SERIAL: 'SERIAL'>, <TokenType.END: 'END'>, <TokenType.ALL: 'ALL'>, <TokenType.TIMETZ: 'TIMETZ'>, <TokenType.TINYBLOB: 'TINYBLOB'>, <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, <TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>, <TokenType.SMALLINT: 'SMALLINT'>, <TokenType.TRUE: 'TRUE'>, <TokenType.FUNCTION: 'FUNCTION'>, <TokenType.IPPREFIX: 'IPPREFIX'>, <TokenType.UNKNOWN: 'UNKNOWN'>, <TokenType.INTERVAL: 'INTERVAL'>, <TokenType.NESTED: 'NESTED'>, <TokenType.NUMRANGE: 'NUMRANGE'>, <TokenType.JSON: 'JSON'>, <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, <TokenType.ANTI: 'ANTI'>, <TokenType.JSONB: 'JSONB'>, <TokenType.ROW: 'ROW'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, <TokenType.ENUM: 'ENUM'>, <TokenType.SMALLMONEY: 'SMALLMONEY'>, <TokenType.REFERENCES: 'REFERENCES'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.EXISTS: 'EXISTS'>, <TokenType.SEMI: 'SEMI'>, <TokenType.RANGE: 'RANGE'>, <TokenType.SET: 'SET'>, <TokenType.IS: 'IS'>, <TokenType.HLLSKETCH: 'HLLSKETCH'>, <TokenType.BIT: 'BIT'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.TSRANGE: 'TSRANGE'>, <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, <TokenType.INT256: 'INT256'>, <TokenType.FALSE: 'FALSE'>, <TokenType.CHAR: 'CHAR'>, <TokenType.ASC: 'ASC'>, <TokenType.VARBINARY: 'VARBINARY'>, <TokenType.DOUBLE: 'DOUBLE'>, <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, <TokenType.ORDINALITY: 'ORDINALITY'>, <TokenType.ESCAPE: 'ESCAPE'>, <TokenType.ISNULL: 'ISNULL'>, <TokenType.SHOW: 'SHOW'>, <TokenType.UNPIVOT: 'UNPIVOT'>, <TokenType.DECIMAL: 'DECIMAL'>, <TokenType.OVERWRITE: 'OVERWRITE'>, <TokenType.PERCENT: 'PERCENT'>, <TokenType.SCHEMA: 'SCHEMA'>, <TokenType.NULL: 'NULL'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, <TokenType.RECURSIVE: 'RECURSIVE'>, <TokenType.MEDIUMINT: 'MEDIUMINT'>, <TokenType.UDECIMAL: 'UDECIMAL'>, <TokenType.COLUMN: 'COLUMN'>, <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, <TokenType.DIV: 'DIV'>, <TokenType.FIXEDSTRING: 'FIXEDSTRING'>, <TokenType.USMALLINT: 'USMALLINT'>, <TokenType.BOOLEAN: 'BOOLEAN'>, <TokenType.CASE: 'CASE'>, <TokenType.ENUM16: 'ENUM16'>, <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>, <TokenType.INT8RANGE: 'INT8RANGE'>, <TokenType.REPLACE: 'REPLACE'>, <TokenType.TINYTEXT: 'TINYTEXT'>, <TokenType.FIRST: 'FIRST'>, <TokenType.YEAR: 'YEAR'>, <TokenType.UINT256: 'UINT256'>, <TokenType.MODEL: 'MODEL'>, <TokenType.UINT128: 'UINT128'>, <TokenType.OVERLAPS: 'OVERLAPS'>, <TokenType.TIMESTAMP_NS: 'TIMESTAMP_NS'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.SIMPLEAGGREGATEFUNCTION: 'SIMPLEAGGREGATEFUNCTION'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, <TokenType.PRAGMA: 'PRAGMA'>, <TokenType.PROCEDURE: 'PROCEDURE'>, <TokenType.IPADDRESS: 'IPADDRESS'>, <TokenType.NVARCHAR: 'NVARCHAR'>, <TokenType.LONGTEXT: 'LONGTEXT'>, <TokenType.BINARY: 'BINARY'>, <TokenType.DICTIONARY: 'DICTIONARY'>, <TokenType.TIME: 'TIME'>, <TokenType.MONEY: 'MONEY'>, <TokenType.COMMIT: 'COMMIT'>, <TokenType.LOAD: 'LOAD'>, <TokenType.CACHE: 'CACHE'>, <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, <TokenType.IMAGE: 'IMAGE'>, <TokenType.IPV6: 'IPV6'>, <TokenType.INT: 'INT'>, <TokenType.ROWVERSION: 'ROWVERSION'>, <TokenType.VARIANT: 'VARIANT'>, <TokenType.BEGIN: 'BEGIN'>, <TokenType.UTINYINT: 'UTINYINT'>, <TokenType.VOLATILE: 'VOLATILE'>, <TokenType.SOME: 'SOME'>, <TokenType.SUPER: 'SUPER'>, <TokenType.OPERATOR: 'OPERATOR'>, <TokenType.DATABASE: 'DATABASE'>, <TokenType.KILL: 'KILL'>, <TokenType.DATE: 'DATE'>, <TokenType.DESC: 'DESC'>, <TokenType.HSTORE: 'HSTORE'>, <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, <TokenType.MAP: 'MAP'>, <TokenType.TINYINT: 'TINYINT'>, <TokenType.IPV4: 'IPV4'>, <TokenType.AGGREGATEFUNCTION: 'AGGREGATEFUNCTION'>, <TokenType.INT4RANGE: 'INT4RANGE'>, <TokenType.ENUM8: 'ENUM8'>, <TokenType.UUID: 'UUID'>, <TokenType.FORMAT: 'FORMAT'>, <TokenType.NCHAR: 'NCHAR'>, <TokenType.OBJECT: 'OBJECT'>, <TokenType.DESCRIBE: 'DESCRIBE'>, <TokenType.GEOMETRY: 'GEOMETRY'>, <TokenType.UNIQUE: 'UNIQUE'>, <TokenType.DATERANGE: 'DATERANGE'>, <TokenType.INDEX: 'INDEX'>, <TokenType.REFRESH: 'REFRESH'>, <TokenType.FLOAT: 'FLOAT'>, <TokenType.ROWS: 'ROWS'>, <TokenType.VARCHAR: 'VARCHAR'>, <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, <TokenType.INET: 'INET'>, <TokenType.FINAL: 'FINAL'>, <TokenType.SETTINGS: 'SETTINGS'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.COLLATE: 'COLLATE'>, <TokenType.TEMPORARY: 'TEMPORARY'>, <TokenType.TIMESTAMP_S: 'TIMESTAMP_S'>, <TokenType.UBIGINT: 'UBIGINT'>, <TokenType.USERDEFINED: 'USERDEFINED'>, <TokenType.DATE32: 'DATE32'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, <TokenType.INT128: 'INT128'>, <TokenType.MERGE: 'MERGE'>, <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, <TokenType.XML: 'XML'>, <TokenType.CONSTRAINT: 'CONSTRAINT'>, <TokenType.STRUCT: 'STRUCT'>} + {<TokenType.BIT: 'BIT'>, <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, <TokenType.TEMPORARY: 'TEMPORARY'>, <TokenType.NVARCHAR: 'NVARCHAR'>, <TokenType.OBJECT: 'OBJECT'>, <TokenType.FIRST: 'FIRST'>, <TokenType.RANGE: 'RANGE'>, <TokenType.TIME: 'TIME'>, <TokenType.INT: 'INT'>, <TokenType.UMEDIUMINT: 'UMEDIUMINT'>, <TokenType.UTINYINT: 'UTINYINT'>, <TokenType.IS: 'IS'>, <TokenType.XML: 'XML'>, <TokenType.BIGINT: 'BIGINT'>, <TokenType.VAR: 'VAR'>, <TokenType.DOUBLE: 'DOUBLE'>, <TokenType.ROWS: 'ROWS'>, <TokenType.MERGE: 'MERGE'>, <TokenType.ANY: 'ANY'>, <TokenType.DATERANGE: 'DATERANGE'>, <TokenType.UDECIMAL: 'UDECIMAL'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>, <TokenType.REFERENCES: 'REFERENCES'>, <TokenType.PERCENT: 'PERCENT'>, <TokenType.CACHE: 'CACHE'>, <TokenType.STRUCT: 'STRUCT'>, <TokenType.HLLSKETCH: 'HLLSKETCH'>, <TokenType.JSONB: 'JSONB'>, <TokenType.COMMIT: 'COMMIT'>, <TokenType.ENUM: 'ENUM'>, <TokenType.SETTINGS: 'SETTINGS'>, <TokenType.ORDINALITY: 'ORDINALITY'>, <TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>, <TokenType.INDEX: 'INDEX'>, <TokenType.TIMESTAMP_NS: 'TIMESTAMP_NS'>, <TokenType.FINAL: 'FINAL'>, <TokenType.VOLATILE: 'VOLATILE'>, <TokenType.DICTIONARY: 'DICTIONARY'>, <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, <TokenType.FUNCTION: 'FUNCTION'>, <TokenType.LONGTEXT: 'LONGTEXT'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.FIXEDSTRING: 'FIXEDSTRING'>, <TokenType.IPV6: 'IPV6'>, <TokenType.DATABASE: 'DATABASE'>, <TokenType.SMALLINT: 'SMALLINT'>, <TokenType.ASC: 'ASC'>, <TokenType.RECURSIVE: 'RECURSIVE'>, <TokenType.ISNULL: 'ISNULL'>, <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, <TokenType.BINARY: 'BINARY'>, <TokenType.UNIQUEIDENTIFIER: 'UNIQUEIDENTIFIER'>, <TokenType.HSTORE: 'HSTORE'>, <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, <TokenType.DATE: 'DATE'>, <TokenType.SUPER: 'SUPER'>, <TokenType.TOP: 'TOP'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.VARCHAR: 'VARCHAR'>, <TokenType.FILTER: 'FILTER'>, <TokenType.TEXT: 'TEXT'>, <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, <TokenType.ESCAPE: 'ESCAPE'>, <TokenType.SHOW: 'SHOW'>, <TokenType.TSTZRANGE: 'TSTZRANGE'>, <TokenType.SET: 'SET'>, <TokenType.NESTED: 'NESTED'>, <TokenType.INTERVAL: 'INTERVAL'>, <TokenType.BOOLEAN: 'BOOLEAN'>, <TokenType.VARIANT: 'VARIANT'>, <TokenType.IPADDRESS: 'IPADDRESS'>, <TokenType.DESCRIBE: 'DESCRIBE'>, <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>, <TokenType.MONEY: 'MONEY'>, <TokenType.ENUM8: 'ENUM8'>, <TokenType.IPPREFIX: 'IPPREFIX'>, <TokenType.NULL: 'NULL'>, <TokenType.COLLATE: 'COLLATE'>, <TokenType.NEXT: 'NEXT'>, <TokenType.TSRANGE: 'TSRANGE'>, <TokenType.PRAGMA: 'PRAGMA'>, <TokenType.ALL: 'ALL'>, <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, <TokenType.ROW: 'ROW'>, <TokenType.UBIGINT: 'UBIGINT'>, <TokenType.TABLE: 'TABLE'>, <TokenType.SERIAL: 'SERIAL'>, <TokenType.CASE: 'CASE'>, <TokenType.BPCHAR: 'BPCHAR'>, <TokenType.INT8RANGE: 'INT8RANGE'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.FLOAT: 'FLOAT'>, <TokenType.MODEL: 'MODEL'>, <TokenType.YEAR: 'YEAR'>, <TokenType.IPV4: 'IPV4'>, <TokenType.UNKNOWN: 'UNKNOWN'>, <TokenType.SCHEMA: 'SCHEMA'>, <TokenType.INT256: 'INT256'>, <TokenType.UNPIVOT: 'UNPIVOT'>, <TokenType.DECIMAL: 'DECIMAL'>, <TokenType.TINYBLOB: 'TINYBLOB'>, <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, <TokenType.TIMESTAMP_S: 'TIMESTAMP_S'>, <TokenType.INET: 'INET'>, <TokenType.CONSTRAINT: 'CONSTRAINT'>, <TokenType.ROWVERSION: 'ROWVERSION'>, <TokenType.TINYTEXT: 'TINYTEXT'>, <TokenType.MAP: 'MAP'>, <TokenType.FORMAT: 'FORMAT'>, <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, <TokenType.PIVOT: 'PIVOT'>, <TokenType.SIMPLEAGGREGATEFUNCTION: 'SIMPLEAGGREGATEFUNCTION'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.REPLACE: 'REPLACE'>, <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, <TokenType.USERDEFINED: 'USERDEFINED'>, <TokenType.PROCEDURE: 'PROCEDURE'>, <TokenType.PARTITION: 'PARTITION'>, <TokenType.SMALLMONEY: 'SMALLMONEY'>, <TokenType.EXISTS: 'EXISTS'>, <TokenType.UINT: 'UINT'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.UINT256: 'UINT256'>, <TokenType.OPERATOR: 'OPERATOR'>, <TokenType.CHAR: 'CHAR'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, <TokenType.DIV: 'DIV'>, <TokenType.GEOMETRY: 'GEOMETRY'>, <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, <TokenType.COMMENT: 'COMMENT'>, <TokenType.BEGIN: 'BEGIN'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, <TokenType.UINT128: 'UINT128'>, <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, <TokenType.OVERLAPS: 'OVERLAPS'>, <TokenType.DELETE: 'DELETE'>, <TokenType.LOAD: 'LOAD'>, <TokenType.NCHAR: 'NCHAR'>, <TokenType.UUID: 'UUID'>, <TokenType.ENUM16: 'ENUM16'>, <TokenType.TINYINT: 'TINYINT'>, <TokenType.DATE32: 'DATE32'>, <TokenType.NUMRANGE: 'NUMRANGE'>, <TokenType.DATETIME: 'DATETIME'>, <TokenType.VIEW: 'VIEW'>, <TokenType.JSON: 'JSON'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, <TokenType.COLUMN: 'COLUMN'>, <TokenType.IMAGE: 'IMAGE'>, <TokenType.KEEP: 'KEEP'>, <TokenType.SMALLSERIAL: 'SMALLSERIAL'>, <TokenType.END: 'END'>, <TokenType.INT128: 'INT128'>, <TokenType.KILL: 'KILL'>, <TokenType.TIMETZ: 'TIMETZ'>, <TokenType.DATETIME64: 'DATETIME64'>, <TokenType.REFRESH: 'REFRESH'>, <TokenType.VARBINARY: 'VARBINARY'>, <TokenType.USMALLINT: 'USMALLINT'>, <TokenType.AGGREGATEFUNCTION: 'AGGREGATEFUNCTION'>, <TokenType.UNIQUE: 'UNIQUE'>, <TokenType.SEMI: 'SEMI'>, <TokenType.DEFAULT: 'DEFAULT'>, <TokenType.BIGSERIAL: 'BIGSERIAL'>, <TokenType.TIMESTAMP_MS: 'TIMESTAMP_MS'>, <TokenType.MEDIUMINT: 'MEDIUMINT'>, <TokenType.SOME: 'SOME'>, <TokenType.ANTI: 'ANTI'>, <TokenType.FALSE: 'FALSE'>, <TokenType.INT4RANGE: 'INT4RANGE'>, <TokenType.DESC: 'DESC'>, <TokenType.OVERWRITE: 'OVERWRITE'>, <TokenType.LONGBLOB: 'LONGBLOB'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.TRUE: 'TRUE'>, <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, <TokenType.UPDATE: 'UPDATE'>}
    @@ -854,6 +854,7 @@ Default: 3
    ENUM_TYPE_TOKENS
    LOG_DEFAULTS_TO_LN
    STRING_ALIASES
    +
    VALUES_FOLLOWED_BY_PAREN
    diff --git a/docs/sqlglot/dialects/drill.html b/docs/sqlglot/dialects/drill.html index 28cd59d..897b92f 100644 --- a/docs/sqlglot/dialects/drill.html +++ b/docs/sqlglot/dialects/drill.html @@ -1122,7 +1122,7 @@ Default: 3
    TABLE_ALIAS_TOKENS = - {<TokenType.UPDATE: 'UPDATE'>, <TokenType.DEFAULT: 'DEFAULT'>, <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>, <TokenType.UNIQUEIDENTIFIER: 'UNIQUEIDENTIFIER'>, <TokenType.PIVOT: 'PIVOT'>, <TokenType.PARTITION: 'PARTITION'>, <TokenType.TSTZRANGE: 'TSTZRANGE'>, <TokenType.VIEW: 'VIEW'>, <TokenType.COMMENT: 'COMMENT'>, <TokenType.TEXT: 'TEXT'>, <TokenType.UINT: 'UINT'>, <TokenType.DELETE: 'DELETE'>, <TokenType.TOP: 'TOP'>, <TokenType.LONGBLOB: 'LONGBLOB'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.DATETIME: 'DATETIME'>, <TokenType.BIGINT: 'BIGINT'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, <TokenType.KEEP: 'KEEP'>, <TokenType.SMALLSERIAL: 'SMALLSERIAL'>, <TokenType.NEXT: 'NEXT'>, <TokenType.BPCHAR: 'BPCHAR'>, <TokenType.TABLE: 'TABLE'>, <TokenType.DATETIME64: 'DATETIME64'>, <TokenType.VAR: 'VAR'>, <TokenType.BIGSERIAL: 'BIGSERIAL'>, <TokenType.ANY: 'ANY'>, <TokenType.UMEDIUMINT: 'UMEDIUMINT'>, <TokenType.FILTER: 'FILTER'>, <TokenType.TIMESTAMP_MS: 'TIMESTAMP_MS'>, <TokenType.SERIAL: 'SERIAL'>, <TokenType.END: 'END'>, <TokenType.ALL: 'ALL'>, <TokenType.TIMETZ: 'TIMETZ'>, <TokenType.TINYBLOB: 'TINYBLOB'>, <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, <TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>, <TokenType.SMALLINT: 'SMALLINT'>, <TokenType.TRUE: 'TRUE'>, <TokenType.FUNCTION: 'FUNCTION'>, <TokenType.IPPREFIX: 'IPPREFIX'>, <TokenType.UNKNOWN: 'UNKNOWN'>, <TokenType.INTERVAL: 'INTERVAL'>, <TokenType.NESTED: 'NESTED'>, <TokenType.NUMRANGE: 'NUMRANGE'>, <TokenType.JSON: 'JSON'>, <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, <TokenType.ANTI: 'ANTI'>, <TokenType.JSONB: 'JSONB'>, <TokenType.ROW: 'ROW'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, <TokenType.ENUM: 'ENUM'>, <TokenType.SMALLMONEY: 'SMALLMONEY'>, <TokenType.REFERENCES: 'REFERENCES'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.EXISTS: 'EXISTS'>, <TokenType.SEMI: 'SEMI'>, <TokenType.RANGE: 'RANGE'>, <TokenType.SET: 'SET'>, <TokenType.IS: 'IS'>, <TokenType.HLLSKETCH: 'HLLSKETCH'>, <TokenType.BIT: 'BIT'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.TSRANGE: 'TSRANGE'>, <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, <TokenType.INT256: 'INT256'>, <TokenType.FALSE: 'FALSE'>, <TokenType.CHAR: 'CHAR'>, <TokenType.ASC: 'ASC'>, <TokenType.VARBINARY: 'VARBINARY'>, <TokenType.DOUBLE: 'DOUBLE'>, <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, <TokenType.ORDINALITY: 'ORDINALITY'>, <TokenType.ESCAPE: 'ESCAPE'>, <TokenType.ISNULL: 'ISNULL'>, <TokenType.SHOW: 'SHOW'>, <TokenType.UNPIVOT: 'UNPIVOT'>, <TokenType.DECIMAL: 'DECIMAL'>, <TokenType.OVERWRITE: 'OVERWRITE'>, <TokenType.PERCENT: 'PERCENT'>, <TokenType.SCHEMA: 'SCHEMA'>, <TokenType.NULL: 'NULL'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, <TokenType.RECURSIVE: 'RECURSIVE'>, <TokenType.MEDIUMINT: 'MEDIUMINT'>, <TokenType.UDECIMAL: 'UDECIMAL'>, <TokenType.COLUMN: 'COLUMN'>, <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, <TokenType.DIV: 'DIV'>, <TokenType.FIXEDSTRING: 'FIXEDSTRING'>, <TokenType.USMALLINT: 'USMALLINT'>, <TokenType.BOOLEAN: 'BOOLEAN'>, <TokenType.CASE: 'CASE'>, <TokenType.ENUM16: 'ENUM16'>, <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>, <TokenType.INT8RANGE: 'INT8RANGE'>, <TokenType.REPLACE: 'REPLACE'>, <TokenType.TINYTEXT: 'TINYTEXT'>, <TokenType.FIRST: 'FIRST'>, <TokenType.YEAR: 'YEAR'>, <TokenType.UINT256: 'UINT256'>, <TokenType.MODEL: 'MODEL'>, <TokenType.UINT128: 'UINT128'>, <TokenType.OVERLAPS: 'OVERLAPS'>, <TokenType.TIMESTAMP_NS: 'TIMESTAMP_NS'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.SIMPLEAGGREGATEFUNCTION: 'SIMPLEAGGREGATEFUNCTION'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, <TokenType.PRAGMA: 'PRAGMA'>, <TokenType.PROCEDURE: 'PROCEDURE'>, <TokenType.IPADDRESS: 'IPADDRESS'>, <TokenType.NVARCHAR: 'NVARCHAR'>, <TokenType.LONGTEXT: 'LONGTEXT'>, <TokenType.BINARY: 'BINARY'>, <TokenType.DICTIONARY: 'DICTIONARY'>, <TokenType.USE: 'USE'>, <TokenType.TIME: 'TIME'>, <TokenType.COMMIT: 'COMMIT'>, <TokenType.MONEY: 'MONEY'>, <TokenType.LOAD: 'LOAD'>, <TokenType.CACHE: 'CACHE'>, <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, <TokenType.IMAGE: 'IMAGE'>, <TokenType.IPV6: 'IPV6'>, <TokenType.INT: 'INT'>, <TokenType.ROWVERSION: 'ROWVERSION'>, <TokenType.VARIANT: 'VARIANT'>, <TokenType.BEGIN: 'BEGIN'>, <TokenType.UTINYINT: 'UTINYINT'>, <TokenType.VOLATILE: 'VOLATILE'>, <TokenType.SOME: 'SOME'>, <TokenType.SUPER: 'SUPER'>, <TokenType.OPERATOR: 'OPERATOR'>, <TokenType.DATABASE: 'DATABASE'>, <TokenType.KILL: 'KILL'>, <TokenType.DATE: 'DATE'>, <TokenType.DESC: 'DESC'>, <TokenType.HSTORE: 'HSTORE'>, <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, <TokenType.MAP: 'MAP'>, <TokenType.TINYINT: 'TINYINT'>, <TokenType.IPV4: 'IPV4'>, <TokenType.AGGREGATEFUNCTION: 'AGGREGATEFUNCTION'>, <TokenType.INT4RANGE: 'INT4RANGE'>, <TokenType.ENUM8: 'ENUM8'>, <TokenType.UUID: 'UUID'>, <TokenType.FORMAT: 'FORMAT'>, <TokenType.NCHAR: 'NCHAR'>, <TokenType.OBJECT: 'OBJECT'>, <TokenType.DESCRIBE: 'DESCRIBE'>, <TokenType.GEOMETRY: 'GEOMETRY'>, <TokenType.UNIQUE: 'UNIQUE'>, <TokenType.DATERANGE: 'DATERANGE'>, <TokenType.INDEX: 'INDEX'>, <TokenType.REFRESH: 'REFRESH'>, <TokenType.FLOAT: 'FLOAT'>, <TokenType.ROWS: 'ROWS'>, <TokenType.VARCHAR: 'VARCHAR'>, <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, <TokenType.INET: 'INET'>, <TokenType.FINAL: 'FINAL'>, <TokenType.SETTINGS: 'SETTINGS'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.COLLATE: 'COLLATE'>, <TokenType.TEMPORARY: 'TEMPORARY'>, <TokenType.TIMESTAMP_S: 'TIMESTAMP_S'>, <TokenType.UBIGINT: 'UBIGINT'>, <TokenType.USERDEFINED: 'USERDEFINED'>, <TokenType.DATE32: 'DATE32'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, <TokenType.INT128: 'INT128'>, <TokenType.MERGE: 'MERGE'>, <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, <TokenType.XML: 'XML'>, <TokenType.CONSTRAINT: 'CONSTRAINT'>, <TokenType.STRUCT: 'STRUCT'>} + {<TokenType.BIT: 'BIT'>, <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, <TokenType.TEMPORARY: 'TEMPORARY'>, <TokenType.NVARCHAR: 'NVARCHAR'>, <TokenType.OBJECT: 'OBJECT'>, <TokenType.FIRST: 'FIRST'>, <TokenType.RANGE: 'RANGE'>, <TokenType.TIME: 'TIME'>, <TokenType.INT: 'INT'>, <TokenType.UMEDIUMINT: 'UMEDIUMINT'>, <TokenType.UTINYINT: 'UTINYINT'>, <TokenType.IS: 'IS'>, <TokenType.XML: 'XML'>, <TokenType.BIGINT: 'BIGINT'>, <TokenType.VAR: 'VAR'>, <TokenType.DOUBLE: 'DOUBLE'>, <TokenType.ROWS: 'ROWS'>, <TokenType.MERGE: 'MERGE'>, <TokenType.ANY: 'ANY'>, <TokenType.DATERANGE: 'DATERANGE'>, <TokenType.UDECIMAL: 'UDECIMAL'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>, <TokenType.REFERENCES: 'REFERENCES'>, <TokenType.PERCENT: 'PERCENT'>, <TokenType.CACHE: 'CACHE'>, <TokenType.STRUCT: 'STRUCT'>, <TokenType.JSONB: 'JSONB'>, <TokenType.COMMIT: 'COMMIT'>, <TokenType.ENUM: 'ENUM'>, <TokenType.SETTINGS: 'SETTINGS'>, <TokenType.ORDINALITY: 'ORDINALITY'>, <TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>, <TokenType.INDEX: 'INDEX'>, <TokenType.TIMESTAMP_NS: 'TIMESTAMP_NS'>, <TokenType.FINAL: 'FINAL'>, <TokenType.VOLATILE: 'VOLATILE'>, <TokenType.DICTIONARY: 'DICTIONARY'>, <TokenType.USE: 'USE'>, <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, <TokenType.FUNCTION: 'FUNCTION'>, <TokenType.LONGTEXT: 'LONGTEXT'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.FIXEDSTRING: 'FIXEDSTRING'>, <TokenType.IPV6: 'IPV6'>, <TokenType.DATABASE: 'DATABASE'>, <TokenType.SMALLINT: 'SMALLINT'>, <TokenType.ASC: 'ASC'>, <TokenType.RECURSIVE: 'RECURSIVE'>, <TokenType.ISNULL: 'ISNULL'>, <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, <TokenType.BINARY: 'BINARY'>, <TokenType.UNIQUEIDENTIFIER: 'UNIQUEIDENTIFIER'>, <TokenType.HSTORE: 'HSTORE'>, <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, <TokenType.DATE: 'DATE'>, <TokenType.SUPER: 'SUPER'>, <TokenType.TOP: 'TOP'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.VARCHAR: 'VARCHAR'>, <TokenType.FILTER: 'FILTER'>, <TokenType.TEXT: 'TEXT'>, <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, <TokenType.TRUE: 'TRUE'>, <TokenType.ESCAPE: 'ESCAPE'>, <TokenType.SHOW: 'SHOW'>, <TokenType.TSTZRANGE: 'TSTZRANGE'>, <TokenType.SET: 'SET'>, <TokenType.NESTED: 'NESTED'>, <TokenType.INTERVAL: 'INTERVAL'>, <TokenType.BOOLEAN: 'BOOLEAN'>, <TokenType.VARIANT: 'VARIANT'>, <TokenType.IPADDRESS: 'IPADDRESS'>, <TokenType.DESCRIBE: 'DESCRIBE'>, <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>, <TokenType.MONEY: 'MONEY'>, <TokenType.ENUM8: 'ENUM8'>, <TokenType.IPPREFIX: 'IPPREFIX'>, <TokenType.NULL: 'NULL'>, <TokenType.COLLATE: 'COLLATE'>, <TokenType.NEXT: 'NEXT'>, <TokenType.TSRANGE: 'TSRANGE'>, <TokenType.PRAGMA: 'PRAGMA'>, <TokenType.ALL: 'ALL'>, <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, <TokenType.ROW: 'ROW'>, <TokenType.UBIGINT: 'UBIGINT'>, <TokenType.TABLE: 'TABLE'>, <TokenType.SERIAL: 'SERIAL'>, <TokenType.CASE: 'CASE'>, <TokenType.BPCHAR: 'BPCHAR'>, <TokenType.INT8RANGE: 'INT8RANGE'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.FLOAT: 'FLOAT'>, <TokenType.MODEL: 'MODEL'>, <TokenType.YEAR: 'YEAR'>, <TokenType.IPV4: 'IPV4'>, <TokenType.UNKNOWN: 'UNKNOWN'>, <TokenType.SCHEMA: 'SCHEMA'>, <TokenType.INT256: 'INT256'>, <TokenType.UNPIVOT: 'UNPIVOT'>, <TokenType.DECIMAL: 'DECIMAL'>, <TokenType.TINYBLOB: 'TINYBLOB'>, <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, <TokenType.TIMESTAMP_S: 'TIMESTAMP_S'>, <TokenType.INET: 'INET'>, <TokenType.CONSTRAINT: 'CONSTRAINT'>, <TokenType.ROWVERSION: 'ROWVERSION'>, <TokenType.TINYTEXT: 'TINYTEXT'>, <TokenType.MAP: 'MAP'>, <TokenType.FORMAT: 'FORMAT'>, <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, <TokenType.PIVOT: 'PIVOT'>, <TokenType.SIMPLEAGGREGATEFUNCTION: 'SIMPLEAGGREGATEFUNCTION'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.REPLACE: 'REPLACE'>, <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, <TokenType.USERDEFINED: 'USERDEFINED'>, <TokenType.PROCEDURE: 'PROCEDURE'>, <TokenType.PARTITION: 'PARTITION'>, <TokenType.SMALLMONEY: 'SMALLMONEY'>, <TokenType.EXISTS: 'EXISTS'>, <TokenType.UINT: 'UINT'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.UINT256: 'UINT256'>, <TokenType.OPERATOR: 'OPERATOR'>, <TokenType.CHAR: 'CHAR'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, <TokenType.DIV: 'DIV'>, <TokenType.GEOMETRY: 'GEOMETRY'>, <TokenType.COMMENT: 'COMMENT'>, <TokenType.BEGIN: 'BEGIN'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, <TokenType.UINT128: 'UINT128'>, <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, <TokenType.OVERLAPS: 'OVERLAPS'>, <TokenType.DELETE: 'DELETE'>, <TokenType.LOAD: 'LOAD'>, <TokenType.NCHAR: 'NCHAR'>, <TokenType.UUID: 'UUID'>, <TokenType.ENUM16: 'ENUM16'>, <TokenType.TINYINT: 'TINYINT'>, <TokenType.DATE32: 'DATE32'>, <TokenType.NUMRANGE: 'NUMRANGE'>, <TokenType.DATETIME: 'DATETIME'>, <TokenType.VIEW: 'VIEW'>, <TokenType.JSON: 'JSON'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, <TokenType.COLUMN: 'COLUMN'>, <TokenType.IMAGE: 'IMAGE'>, <TokenType.KEEP: 'KEEP'>, <TokenType.SMALLSERIAL: 'SMALLSERIAL'>, <TokenType.END: 'END'>, <TokenType.INT128: 'INT128'>, <TokenType.KILL: 'KILL'>, <TokenType.TIMETZ: 'TIMETZ'>, <TokenType.DATETIME64: 'DATETIME64'>, <TokenType.REFRESH: 'REFRESH'>, <TokenType.VARBINARY: 'VARBINARY'>, <TokenType.USMALLINT: 'USMALLINT'>, <TokenType.AGGREGATEFUNCTION: 'AGGREGATEFUNCTION'>, <TokenType.UNIQUE: 'UNIQUE'>, <TokenType.SEMI: 'SEMI'>, <TokenType.DEFAULT: 'DEFAULT'>, <TokenType.BIGSERIAL: 'BIGSERIAL'>, <TokenType.TIMESTAMP_MS: 'TIMESTAMP_MS'>, <TokenType.MEDIUMINT: 'MEDIUMINT'>, <TokenType.SOME: 'SOME'>, <TokenType.ANTI: 'ANTI'>, <TokenType.FALSE: 'FALSE'>, <TokenType.INT4RANGE: 'INT4RANGE'>, <TokenType.DESC: 'DESC'>, <TokenType.OVERWRITE: 'OVERWRITE'>, <TokenType.LONGBLOB: 'LONGBLOB'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, <TokenType.HLLSKETCH: 'HLLSKETCH'>, <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, <TokenType.UPDATE: 'UPDATE'>}
    @@ -1241,6 +1241,7 @@ Default: 3
    MODIFIERS_ATTACHED_TO_UNION
    UNION_MODIFIERS
    NO_PAREN_IF_COMMANDS
    +
    VALUES_FOLLOWED_BY_PAREN
    error_level
    error_message_context
    max_errors
    diff --git a/docs/sqlglot/dialects/duckdb.html b/docs/sqlglot/dialects/duckdb.html index 0095c1f..a19f8ce 100644 --- a/docs/sqlglot/dialects/duckdb.html +++ b/docs/sqlglot/dialects/duckdb.html @@ -1895,7 +1895,7 @@ Default: 3
    TABLE_ALIAS_TOKENS = - {<TokenType.UPDATE: 'UPDATE'>, <TokenType.DEFAULT: 'DEFAULT'>, <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>, <TokenType.UNIQUEIDENTIFIER: 'UNIQUEIDENTIFIER'>, <TokenType.PIVOT: 'PIVOT'>, <TokenType.PARTITION: 'PARTITION'>, <TokenType.TSTZRANGE: 'TSTZRANGE'>, <TokenType.VIEW: 'VIEW'>, <TokenType.COMMENT: 'COMMENT'>, <TokenType.TEXT: 'TEXT'>, <TokenType.UINT: 'UINT'>, <TokenType.DELETE: 'DELETE'>, <TokenType.TOP: 'TOP'>, <TokenType.LONGBLOB: 'LONGBLOB'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.DATETIME: 'DATETIME'>, <TokenType.BIGINT: 'BIGINT'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, <TokenType.KEEP: 'KEEP'>, <TokenType.SMALLSERIAL: 'SMALLSERIAL'>, <TokenType.NEXT: 'NEXT'>, <TokenType.BPCHAR: 'BPCHAR'>, <TokenType.TABLE: 'TABLE'>, <TokenType.DATETIME64: 'DATETIME64'>, <TokenType.VAR: 'VAR'>, <TokenType.BIGSERIAL: 'BIGSERIAL'>, <TokenType.ANY: 'ANY'>, <TokenType.UMEDIUMINT: 'UMEDIUMINT'>, <TokenType.FILTER: 'FILTER'>, <TokenType.TIMESTAMP_MS: 'TIMESTAMP_MS'>, <TokenType.SERIAL: 'SERIAL'>, <TokenType.END: 'END'>, <TokenType.ALL: 'ALL'>, <TokenType.TIMETZ: 'TIMETZ'>, <TokenType.TINYBLOB: 'TINYBLOB'>, <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, <TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>, <TokenType.SMALLINT: 'SMALLINT'>, <TokenType.TRUE: 'TRUE'>, <TokenType.FUNCTION: 'FUNCTION'>, <TokenType.IPPREFIX: 'IPPREFIX'>, <TokenType.UNKNOWN: 'UNKNOWN'>, <TokenType.INTERVAL: 'INTERVAL'>, <TokenType.NESTED: 'NESTED'>, <TokenType.NUMRANGE: 'NUMRANGE'>, <TokenType.JSON: 'JSON'>, <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, <TokenType.JSONB: 'JSONB'>, <TokenType.ROW: 'ROW'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, <TokenType.ENUM: 'ENUM'>, <TokenType.SMALLMONEY: 'SMALLMONEY'>, <TokenType.REFERENCES: 'REFERENCES'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.EXISTS: 'EXISTS'>, <TokenType.RANGE: 'RANGE'>, <TokenType.SET: 'SET'>, <TokenType.IS: 'IS'>, <TokenType.HLLSKETCH: 'HLLSKETCH'>, <TokenType.BIT: 'BIT'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.TSRANGE: 'TSRANGE'>, <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, <TokenType.INT256: 'INT256'>, <TokenType.FALSE: 'FALSE'>, <TokenType.CHAR: 'CHAR'>, <TokenType.ASC: 'ASC'>, <TokenType.VARBINARY: 'VARBINARY'>, <TokenType.DOUBLE: 'DOUBLE'>, <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, <TokenType.ORDINALITY: 'ORDINALITY'>, <TokenType.ESCAPE: 'ESCAPE'>, <TokenType.ISNULL: 'ISNULL'>, <TokenType.SHOW: 'SHOW'>, <TokenType.UNPIVOT: 'UNPIVOT'>, <TokenType.DECIMAL: 'DECIMAL'>, <TokenType.OVERWRITE: 'OVERWRITE'>, <TokenType.PERCENT: 'PERCENT'>, <TokenType.SCHEMA: 'SCHEMA'>, <TokenType.NULL: 'NULL'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, <TokenType.RECURSIVE: 'RECURSIVE'>, <TokenType.MEDIUMINT: 'MEDIUMINT'>, <TokenType.UDECIMAL: 'UDECIMAL'>, <TokenType.COLUMN: 'COLUMN'>, <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, <TokenType.DIV: 'DIV'>, <TokenType.FIXEDSTRING: 'FIXEDSTRING'>, <TokenType.USMALLINT: 'USMALLINT'>, <TokenType.BOOLEAN: 'BOOLEAN'>, <TokenType.CASE: 'CASE'>, <TokenType.ENUM16: 'ENUM16'>, <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>, <TokenType.INT8RANGE: 'INT8RANGE'>, <TokenType.REPLACE: 'REPLACE'>, <TokenType.TINYTEXT: 'TINYTEXT'>, <TokenType.FIRST: 'FIRST'>, <TokenType.YEAR: 'YEAR'>, <TokenType.UINT256: 'UINT256'>, <TokenType.MODEL: 'MODEL'>, <TokenType.UINT128: 'UINT128'>, <TokenType.OVERLAPS: 'OVERLAPS'>, <TokenType.TIMESTAMP_NS: 'TIMESTAMP_NS'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.SIMPLEAGGREGATEFUNCTION: 'SIMPLEAGGREGATEFUNCTION'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, <TokenType.PRAGMA: 'PRAGMA'>, <TokenType.PROCEDURE: 'PROCEDURE'>, <TokenType.IPADDRESS: 'IPADDRESS'>, <TokenType.NVARCHAR: 'NVARCHAR'>, <TokenType.LONGTEXT: 'LONGTEXT'>, <TokenType.BINARY: 'BINARY'>, <TokenType.DICTIONARY: 'DICTIONARY'>, <TokenType.USE: 'USE'>, <TokenType.TIME: 'TIME'>, <TokenType.COMMIT: 'COMMIT'>, <TokenType.MONEY: 'MONEY'>, <TokenType.LOAD: 'LOAD'>, <TokenType.CACHE: 'CACHE'>, <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, <TokenType.IMAGE: 'IMAGE'>, <TokenType.IPV6: 'IPV6'>, <TokenType.INT: 'INT'>, <TokenType.ROWVERSION: 'ROWVERSION'>, <TokenType.VARIANT: 'VARIANT'>, <TokenType.BEGIN: 'BEGIN'>, <TokenType.UTINYINT: 'UTINYINT'>, <TokenType.VOLATILE: 'VOLATILE'>, <TokenType.SOME: 'SOME'>, <TokenType.SUPER: 'SUPER'>, <TokenType.OPERATOR: 'OPERATOR'>, <TokenType.DATABASE: 'DATABASE'>, <TokenType.KILL: 'KILL'>, <TokenType.DATE: 'DATE'>, <TokenType.DESC: 'DESC'>, <TokenType.HSTORE: 'HSTORE'>, <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, <TokenType.MAP: 'MAP'>, <TokenType.TINYINT: 'TINYINT'>, <TokenType.IPV4: 'IPV4'>, <TokenType.AGGREGATEFUNCTION: 'AGGREGATEFUNCTION'>, <TokenType.INT4RANGE: 'INT4RANGE'>, <TokenType.ENUM8: 'ENUM8'>, <TokenType.UUID: 'UUID'>, <TokenType.FORMAT: 'FORMAT'>, <TokenType.NCHAR: 'NCHAR'>, <TokenType.OBJECT: 'OBJECT'>, <TokenType.DESCRIBE: 'DESCRIBE'>, <TokenType.GEOMETRY: 'GEOMETRY'>, <TokenType.UNIQUE: 'UNIQUE'>, <TokenType.DATERANGE: 'DATERANGE'>, <TokenType.INDEX: 'INDEX'>, <TokenType.REFRESH: 'REFRESH'>, <TokenType.FLOAT: 'FLOAT'>, <TokenType.ROWS: 'ROWS'>, <TokenType.VARCHAR: 'VARCHAR'>, <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, <TokenType.INET: 'INET'>, <TokenType.FINAL: 'FINAL'>, <TokenType.SETTINGS: 'SETTINGS'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.COLLATE: 'COLLATE'>, <TokenType.TEMPORARY: 'TEMPORARY'>, <TokenType.TIMESTAMP_S: 'TIMESTAMP_S'>, <TokenType.UBIGINT: 'UBIGINT'>, <TokenType.USERDEFINED: 'USERDEFINED'>, <TokenType.DATE32: 'DATE32'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, <TokenType.INT128: 'INT128'>, <TokenType.MERGE: 'MERGE'>, <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, <TokenType.XML: 'XML'>, <TokenType.CONSTRAINT: 'CONSTRAINT'>, <TokenType.STRUCT: 'STRUCT'>} + {<TokenType.BIT: 'BIT'>, <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, <TokenType.TEMPORARY: 'TEMPORARY'>, <TokenType.NVARCHAR: 'NVARCHAR'>, <TokenType.OBJECT: 'OBJECT'>, <TokenType.FIRST: 'FIRST'>, <TokenType.RANGE: 'RANGE'>, <TokenType.TIME: 'TIME'>, <TokenType.INT: 'INT'>, <TokenType.UMEDIUMINT: 'UMEDIUMINT'>, <TokenType.UTINYINT: 'UTINYINT'>, <TokenType.IS: 'IS'>, <TokenType.XML: 'XML'>, <TokenType.BIGINT: 'BIGINT'>, <TokenType.VAR: 'VAR'>, <TokenType.DOUBLE: 'DOUBLE'>, <TokenType.ROWS: 'ROWS'>, <TokenType.MERGE: 'MERGE'>, <TokenType.ANY: 'ANY'>, <TokenType.DATERANGE: 'DATERANGE'>, <TokenType.UDECIMAL: 'UDECIMAL'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>, <TokenType.REFERENCES: 'REFERENCES'>, <TokenType.PERCENT: 'PERCENT'>, <TokenType.CACHE: 'CACHE'>, <TokenType.STRUCT: 'STRUCT'>, <TokenType.JSONB: 'JSONB'>, <TokenType.COMMIT: 'COMMIT'>, <TokenType.ENUM: 'ENUM'>, <TokenType.SETTINGS: 'SETTINGS'>, <TokenType.ORDINALITY: 'ORDINALITY'>, <TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>, <TokenType.INDEX: 'INDEX'>, <TokenType.TIMESTAMP_NS: 'TIMESTAMP_NS'>, <TokenType.FINAL: 'FINAL'>, <TokenType.VOLATILE: 'VOLATILE'>, <TokenType.DICTIONARY: 'DICTIONARY'>, <TokenType.USE: 'USE'>, <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, <TokenType.FUNCTION: 'FUNCTION'>, <TokenType.LONGTEXT: 'LONGTEXT'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.FIXEDSTRING: 'FIXEDSTRING'>, <TokenType.IPV6: 'IPV6'>, <TokenType.DATABASE: 'DATABASE'>, <TokenType.SMALLINT: 'SMALLINT'>, <TokenType.ASC: 'ASC'>, <TokenType.RECURSIVE: 'RECURSIVE'>, <TokenType.ISNULL: 'ISNULL'>, <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, <TokenType.BINARY: 'BINARY'>, <TokenType.UNIQUEIDENTIFIER: 'UNIQUEIDENTIFIER'>, <TokenType.HSTORE: 'HSTORE'>, <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, <TokenType.DATE: 'DATE'>, <TokenType.SUPER: 'SUPER'>, <TokenType.TOP: 'TOP'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.VARCHAR: 'VARCHAR'>, <TokenType.FILTER: 'FILTER'>, <TokenType.TEXT: 'TEXT'>, <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, <TokenType.TRUE: 'TRUE'>, <TokenType.ESCAPE: 'ESCAPE'>, <TokenType.SHOW: 'SHOW'>, <TokenType.TSTZRANGE: 'TSTZRANGE'>, <TokenType.SET: 'SET'>, <TokenType.NESTED: 'NESTED'>, <TokenType.INTERVAL: 'INTERVAL'>, <TokenType.BOOLEAN: 'BOOLEAN'>, <TokenType.VARIANT: 'VARIANT'>, <TokenType.IPADDRESS: 'IPADDRESS'>, <TokenType.DESCRIBE: 'DESCRIBE'>, <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>, <TokenType.MONEY: 'MONEY'>, <TokenType.ENUM8: 'ENUM8'>, <TokenType.IPPREFIX: 'IPPREFIX'>, <TokenType.NULL: 'NULL'>, <TokenType.COLLATE: 'COLLATE'>, <TokenType.NEXT: 'NEXT'>, <TokenType.TSRANGE: 'TSRANGE'>, <TokenType.PRAGMA: 'PRAGMA'>, <TokenType.ALL: 'ALL'>, <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, <TokenType.ROW: 'ROW'>, <TokenType.UBIGINT: 'UBIGINT'>, <TokenType.TABLE: 'TABLE'>, <TokenType.SERIAL: 'SERIAL'>, <TokenType.CASE: 'CASE'>, <TokenType.BPCHAR: 'BPCHAR'>, <TokenType.INT8RANGE: 'INT8RANGE'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.FLOAT: 'FLOAT'>, <TokenType.MODEL: 'MODEL'>, <TokenType.YEAR: 'YEAR'>, <TokenType.IPV4: 'IPV4'>, <TokenType.UNKNOWN: 'UNKNOWN'>, <TokenType.SCHEMA: 'SCHEMA'>, <TokenType.INT256: 'INT256'>, <TokenType.UNPIVOT: 'UNPIVOT'>, <TokenType.DECIMAL: 'DECIMAL'>, <TokenType.TINYBLOB: 'TINYBLOB'>, <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, <TokenType.TIMESTAMP_S: 'TIMESTAMP_S'>, <TokenType.INET: 'INET'>, <TokenType.CONSTRAINT: 'CONSTRAINT'>, <TokenType.ROWVERSION: 'ROWVERSION'>, <TokenType.TINYTEXT: 'TINYTEXT'>, <TokenType.MAP: 'MAP'>, <TokenType.FORMAT: 'FORMAT'>, <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, <TokenType.PIVOT: 'PIVOT'>, <TokenType.SIMPLEAGGREGATEFUNCTION: 'SIMPLEAGGREGATEFUNCTION'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.REPLACE: 'REPLACE'>, <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, <TokenType.USERDEFINED: 'USERDEFINED'>, <TokenType.PROCEDURE: 'PROCEDURE'>, <TokenType.PARTITION: 'PARTITION'>, <TokenType.SMALLMONEY: 'SMALLMONEY'>, <TokenType.EXISTS: 'EXISTS'>, <TokenType.UINT: 'UINT'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.UINT256: 'UINT256'>, <TokenType.OPERATOR: 'OPERATOR'>, <TokenType.CHAR: 'CHAR'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, <TokenType.DIV: 'DIV'>, <TokenType.GEOMETRY: 'GEOMETRY'>, <TokenType.COMMENT: 'COMMENT'>, <TokenType.BEGIN: 'BEGIN'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, <TokenType.UINT128: 'UINT128'>, <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, <TokenType.OVERLAPS: 'OVERLAPS'>, <TokenType.DELETE: 'DELETE'>, <TokenType.LOAD: 'LOAD'>, <TokenType.NCHAR: 'NCHAR'>, <TokenType.UUID: 'UUID'>, <TokenType.ENUM16: 'ENUM16'>, <TokenType.TINYINT: 'TINYINT'>, <TokenType.DATE32: 'DATE32'>, <TokenType.NUMRANGE: 'NUMRANGE'>, <TokenType.DATETIME: 'DATETIME'>, <TokenType.VIEW: 'VIEW'>, <TokenType.JSON: 'JSON'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, <TokenType.COLUMN: 'COLUMN'>, <TokenType.IMAGE: 'IMAGE'>, <TokenType.KEEP: 'KEEP'>, <TokenType.SMALLSERIAL: 'SMALLSERIAL'>, <TokenType.END: 'END'>, <TokenType.INT128: 'INT128'>, <TokenType.KILL: 'KILL'>, <TokenType.TIMETZ: 'TIMETZ'>, <TokenType.DATETIME64: 'DATETIME64'>, <TokenType.REFRESH: 'REFRESH'>, <TokenType.VARBINARY: 'VARBINARY'>, <TokenType.USMALLINT: 'USMALLINT'>, <TokenType.AGGREGATEFUNCTION: 'AGGREGATEFUNCTION'>, <TokenType.UNIQUE: 'UNIQUE'>, <TokenType.DEFAULT: 'DEFAULT'>, <TokenType.BIGSERIAL: 'BIGSERIAL'>, <TokenType.TIMESTAMP_MS: 'TIMESTAMP_MS'>, <TokenType.MEDIUMINT: 'MEDIUMINT'>, <TokenType.SOME: 'SOME'>, <TokenType.FALSE: 'FALSE'>, <TokenType.INT4RANGE: 'INT4RANGE'>, <TokenType.DESC: 'DESC'>, <TokenType.OVERWRITE: 'OVERWRITE'>, <TokenType.LONGBLOB: 'LONGBLOB'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, <TokenType.HLLSKETCH: 'HLLSKETCH'>, <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, <TokenType.UPDATE: 'UPDATE'>}
    @@ -2026,6 +2026,7 @@ Default: 3
    MODIFIERS_ATTACHED_TO_UNION
    UNION_MODIFIERS
    NO_PAREN_IF_COMMANDS
    +
    VALUES_FOLLOWED_BY_PAREN
    error_level
    error_message_context
    max_errors
    @@ -2528,7 +2529,7 @@ Default: True diff --git a/docs/sqlglot/dialects/hive.html b/docs/sqlglot/dialects/hive.html index 73e8d2a..d3fc6cc 100644 --- a/docs/sqlglot/dialects/hive.html +++ b/docs/sqlglot/dialects/hive.html @@ -102,6 +102,9 @@
  • STRICT_CAST
  • +
  • + VALUES_FOLLOWED_BY_PAREN +
  • FUNCTIONS
  • @@ -552,373 +555,374 @@
    266 class Parser(parser.Parser): 267 LOG_DEFAULTS_TO_LN = True 268 STRICT_CAST = False -269 -270 FUNCTIONS = { -271 **parser.Parser.FUNCTIONS, -272 "BASE64": exp.ToBase64.from_arg_list, -273 "COLLECT_LIST": exp.ArrayAgg.from_arg_list, -274 "COLLECT_SET": exp.ArrayUniqueAgg.from_arg_list, -275 "DATE_ADD": lambda args: exp.TsOrDsAdd( -276 this=seq_get(args, 0), expression=seq_get(args, 1), unit=exp.Literal.string("DAY") -277 ), -278 "DATE_FORMAT": lambda args: format_time_lambda(exp.TimeToStr, "hive")( -279 [ -280 exp.TimeStrToTime(this=seq_get(args, 0)), -281 seq_get(args, 1), -282 ] -283 ), -284 "DATE_SUB": lambda args: exp.TsOrDsAdd( -285 this=seq_get(args, 0), -286 expression=exp.Mul(this=seq_get(args, 1), expression=exp.Literal.number(-1)), -287 unit=exp.Literal.string("DAY"), -288 ), -289 "DATEDIFF": lambda args: exp.DateDiff( -290 this=exp.TsOrDsToDate(this=seq_get(args, 0)), -291 expression=exp.TsOrDsToDate(this=seq_get(args, 1)), -292 ), -293 "DAY": lambda args: exp.Day(this=exp.TsOrDsToDate(this=seq_get(args, 0))), -294 "FIRST": _parse_ignore_nulls(exp.First), -295 "FIRST_VALUE": _parse_ignore_nulls(exp.FirstValue), -296 "FROM_UNIXTIME": format_time_lambda(exp.UnixToStr, "hive", True), -297 "GET_JSON_OBJECT": exp.JSONExtractScalar.from_arg_list, -298 "LAST": _parse_ignore_nulls(exp.Last), -299 "LAST_VALUE": _parse_ignore_nulls(exp.LastValue), -300 "LOCATE": locate_to_strposition, -301 "MAP": parse_var_map, -302 "MONTH": lambda args: exp.Month(this=exp.TsOrDsToDate.from_arg_list(args)), -303 "PERCENTILE": exp.Quantile.from_arg_list, -304 "PERCENTILE_APPROX": exp.ApproxQuantile.from_arg_list, -305 "REGEXP_EXTRACT": lambda args: exp.RegexpExtract( -306 this=seq_get(args, 0), expression=seq_get(args, 1), group=seq_get(args, 2) -307 ), -308 "SIZE": exp.ArraySize.from_arg_list, -309 "SPLIT": exp.RegexpSplit.from_arg_list, -310 "STR_TO_MAP": lambda args: exp.StrToMap( -311 this=seq_get(args, 0), -312 pair_delim=seq_get(args, 1) or exp.Literal.string(","), -313 key_value_delim=seq_get(args, 2) or exp.Literal.string(":"), -314 ), -315 "TO_DATE": format_time_lambda(exp.TsOrDsToDate, "hive"), -316 "TO_JSON": exp.JSONFormat.from_arg_list, -317 "UNBASE64": exp.FromBase64.from_arg_list, -318 "UNIX_TIMESTAMP": format_time_lambda(exp.StrToUnix, "hive", True), -319 "YEAR": lambda args: exp.Year(this=exp.TsOrDsToDate.from_arg_list(args)), -320 } -321 -322 NO_PAREN_FUNCTION_PARSERS = { -323 **parser.Parser.NO_PAREN_FUNCTION_PARSERS, -324 "TRANSFORM": lambda self: self._parse_transform(), -325 } -326 -327 PROPERTY_PARSERS = { -328 **parser.Parser.PROPERTY_PARSERS, -329 "WITH SERDEPROPERTIES": lambda self: exp.SerdeProperties( -330 expressions=self._parse_wrapped_csv(self._parse_property) -331 ), -332 } -333 -334 def _parse_transform(self) -> t.Optional[exp.Transform | exp.QueryTransform]: -335 if not self._match(TokenType.L_PAREN, advance=False): -336 self._retreat(self._index - 1) -337 return None -338 -339 args = self._parse_wrapped_csv(self._parse_lambda) -340 row_format_before = self._parse_row_format(match_row=True) -341 -342 record_writer = None -343 if self._match_text_seq("RECORDWRITER"): -344 record_writer = self._parse_string() -345 -346 if not self._match(TokenType.USING): -347 return exp.Transform.from_arg_list(args) -348 -349 command_script = self._parse_string() -350 -351 self._match(TokenType.ALIAS) -352 schema = self._parse_schema() -353 -354 row_format_after = self._parse_row_format(match_row=True) -355 record_reader = None -356 if self._match_text_seq("RECORDREADER"): -357 record_reader = self._parse_string() -358 -359 return self.expression( -360 exp.QueryTransform, -361 expressions=args, -362 command_script=command_script, -363 schema=schema, -364 row_format_before=row_format_before, -365 record_writer=record_writer, -366 row_format_after=row_format_after, -367 record_reader=record_reader, -368 ) -369 -370 def _parse_types( -371 self, check_func: bool = False, schema: bool = False, allow_identifiers: bool = True -372 ) -> t.Optional[exp.Expression]: -373 """ -374 Spark (and most likely Hive) treats casts to CHAR(length) and VARCHAR(length) as casts to -375 STRING in all contexts except for schema definitions. For example, this is in Spark v3.4.0: -376 -377 spark-sql (default)> select cast(1234 as varchar(2)); -378 23/06/06 15:51:18 WARN CharVarcharUtils: The Spark cast operator does not support -379 char/varchar type and simply treats them as string type. Please use string type -380 directly to avoid confusion. Otherwise, you can set spark.sql.legacy.charVarcharAsString -381 to true, so that Spark treat them as string type as same as Spark 3.0 and earlier -382 -383 1234 -384 Time taken: 4.265 seconds, Fetched 1 row(s) -385 -386 This shows that Spark doesn't truncate the value into '12', which is inconsistent with -387 what other dialects (e.g. postgres) do, so we need to drop the length to transpile correctly. -388 -389 Reference: https://spark.apache.org/docs/latest/sql-ref-datatypes.html -390 """ -391 this = super()._parse_types( -392 check_func=check_func, schema=schema, allow_identifiers=allow_identifiers -393 ) -394 -395 if this and not schema: -396 return this.transform( -397 lambda node: ( -398 node.replace(exp.DataType.build("text")) -399 if isinstance(node, exp.DataType) and node.is_type("char", "varchar") -400 else node -401 ), -402 copy=False, -403 ) -404 -405 return this -406 -407 def _parse_partition_and_order( -408 self, -409 ) -> t.Tuple[t.List[exp.Expression], t.Optional[exp.Expression]]: -410 return ( -411 ( -412 self._parse_csv(self._parse_conjunction) -413 if self._match_set({TokenType.PARTITION_BY, TokenType.DISTRIBUTE_BY}) -414 else [] -415 ), -416 super()._parse_order(skip_order_token=self._match(TokenType.SORT_BY)), -417 ) -418 -419 class Generator(generator.Generator): -420 LIMIT_FETCH = "LIMIT" -421 TABLESAMPLE_WITH_METHOD = False -422 JOIN_HINTS = False -423 TABLE_HINTS = False -424 QUERY_HINTS = False -425 INDEX_ON = "ON TABLE" -426 EXTRACT_ALLOWS_QUOTES = False -427 NVL2_SUPPORTED = False -428 LAST_DAY_SUPPORTS_DATE_PART = False -429 JSON_PATH_SINGLE_QUOTE_ESCAPE = True -430 -431 EXPRESSIONS_WITHOUT_NESTED_CTES = { -432 exp.Insert, -433 exp.Select, -434 exp.Subquery, -435 exp.Union, -436 } -437 -438 SUPPORTED_JSON_PATH_PARTS = { -439 exp.JSONPathKey, -440 exp.JSONPathRoot, -441 exp.JSONPathSubscript, -442 exp.JSONPathWildcard, -443 } -444 -445 TYPE_MAPPING = { -446 **generator.Generator.TYPE_MAPPING, -447 exp.DataType.Type.BIT: "BOOLEAN", -448 exp.DataType.Type.DATETIME: "TIMESTAMP", -449 exp.DataType.Type.TEXT: "STRING", -450 exp.DataType.Type.TIME: "TIMESTAMP", -451 exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP", -452 exp.DataType.Type.VARBINARY: "BINARY", -453 } -454 -455 TRANSFORMS = { -456 **generator.Generator.TRANSFORMS, -457 exp.Group: transforms.preprocess([transforms.unalias_group]), -458 exp.Select: transforms.preprocess( -459 [ -460 transforms.eliminate_qualify, -461 transforms.eliminate_distinct_on, -462 transforms.unnest_to_explode, -463 ] -464 ), -465 exp.Property: _property_sql, -466 exp.AnyValue: rename_func("FIRST"), -467 exp.ApproxDistinct: approx_count_distinct_sql, -468 exp.ArgMax: arg_max_or_min_no_count("MAX_BY"), -469 exp.ArgMin: arg_max_or_min_no_count("MIN_BY"), -470 exp.ArrayConcat: rename_func("CONCAT"), -471 exp.ArrayJoin: lambda self, e: self.func("CONCAT_WS", e.expression, e.this), -472 exp.ArraySize: rename_func("SIZE"), -473 exp.ArraySort: _array_sort_sql, -474 exp.With: no_recursive_cte_sql, -475 exp.DateAdd: _add_date_sql, -476 exp.DateDiff: _date_diff_sql, -477 exp.DateStrToDate: datestrtodate_sql, -478 exp.DateSub: _add_date_sql, -479 exp.DateToDi: lambda self, -480 e: f"CAST(DATE_FORMAT({self.sql(e, 'this')}, {Hive.DATEINT_FORMAT}) AS INT)", -481 exp.DiToDate: lambda self, -482 e: f"TO_DATE(CAST({self.sql(e, 'this')} AS STRING), {Hive.DATEINT_FORMAT})", -483 exp.FileFormatProperty: lambda self, -484 e: f"STORED AS {self.sql(e, 'this') if isinstance(e.this, exp.InputOutputFormat) else e.name.upper()}", -485 exp.FromBase64: rename_func("UNBASE64"), -486 exp.If: if_sql(), -487 exp.ILike: no_ilike_sql, -488 exp.IsNan: rename_func("ISNAN"), -489 exp.JSONExtract: rename_func("GET_JSON_OBJECT"), -490 exp.JSONExtractScalar: rename_func("GET_JSON_OBJECT"), -491 exp.JSONFormat: _json_format_sql, -492 exp.Left: left_to_substring_sql, -493 exp.Map: var_map_sql, -494 exp.Max: max_or_greatest, -495 exp.MD5Digest: lambda self, e: self.func("UNHEX", self.func("MD5", e.this)), -496 exp.Min: min_or_least, -497 exp.MonthsBetween: lambda self, e: self.func("MONTHS_BETWEEN", e.this, e.expression), -498 exp.NotNullColumnConstraint: lambda self, e: ( -499 "" if e.args.get("allow_null") else "NOT NULL" -500 ), -501 exp.VarMap: var_map_sql, -502 exp.Create: preprocess( -503 [ -504 remove_unique_constraints, -505 ctas_with_tmp_tables_to_create_tmp_view, -506 move_schema_columns_to_partitioned_by, -507 ] -508 ), -509 exp.Quantile: rename_func("PERCENTILE"), -510 exp.ApproxQuantile: rename_func("PERCENTILE_APPROX"), -511 exp.RegexpExtract: regexp_extract_sql, -512 exp.RegexpReplace: regexp_replace_sql, -513 exp.RegexpLike: lambda self, e: self.binary(e, "RLIKE"), -514 exp.RegexpSplit: rename_func("SPLIT"), -515 exp.Right: right_to_substring_sql, -516 exp.SafeDivide: no_safe_divide_sql, -517 exp.SchemaCommentProperty: lambda self, e: self.naked_property(e), -518 exp.ArrayUniqueAgg: rename_func("COLLECT_SET"), -519 exp.Split: lambda self, -520 e: f"SPLIT({self.sql(e, 'this')}, CONCAT('\\\\Q', {self.sql(e, 'expression')}))", -521 exp.StrPosition: strposition_to_locate_sql, -522 exp.StrToDate: _str_to_date_sql, -523 exp.StrToTime: _str_to_time_sql, -524 exp.StrToUnix: _str_to_unix_sql, -525 exp.StructExtract: struct_extract_sql, -526 exp.TimeStrToDate: rename_func("TO_DATE"), -527 exp.TimeStrToTime: timestrtotime_sql, -528 exp.TimeStrToUnix: rename_func("UNIX_TIMESTAMP"), -529 exp.TimeToStr: _time_to_str, -530 exp.TimeToUnix: rename_func("UNIX_TIMESTAMP"), -531 exp.ToBase64: rename_func("BASE64"), -532 exp.TsOrDiToDi: lambda self, -533 e: f"CAST(SUBSTR(REPLACE(CAST({self.sql(e, 'this')} AS STRING), '-', ''), 1, 8) AS INT)", -534 exp.TsOrDsAdd: _add_date_sql, -535 exp.TsOrDsDiff: _date_diff_sql, -536 exp.TsOrDsToDate: _to_date_sql, -537 exp.TryCast: no_trycast_sql, -538 exp.UnixToStr: lambda self, e: self.func( -539 "FROM_UNIXTIME", e.this, time_format("hive")(self, e) -540 ), -541 exp.UnixToTime: rename_func("FROM_UNIXTIME"), -542 exp.UnixToTimeStr: rename_func("FROM_UNIXTIME"), -543 exp.PartitionedByProperty: lambda self, e: f"PARTITIONED BY {self.sql(e, 'this')}", -544 exp.SerdeProperties: lambda self, e: self.properties(e, prefix="WITH SERDEPROPERTIES"), -545 exp.NumberToStr: rename_func("FORMAT_NUMBER"), -546 exp.National: lambda self, e: self.national_sql(e, prefix=""), -547 exp.ClusteredColumnConstraint: lambda self, -548 e: f"({self.expressions(e, 'this', indent=False)})", -549 exp.NonClusteredColumnConstraint: lambda self, -550 e: f"({self.expressions(e, 'this', indent=False)})", -551 exp.NotForReplicationColumnConstraint: lambda self, e: "", -552 exp.OnProperty: lambda self, e: "", -553 exp.PrimaryKeyColumnConstraint: lambda self, e: "PRIMARY KEY", -554 } -555 -556 PROPERTIES_LOCATION = { -557 **generator.Generator.PROPERTIES_LOCATION, -558 exp.FileFormatProperty: exp.Properties.Location.POST_SCHEMA, -559 exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA, -560 exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED, -561 exp.WithDataProperty: exp.Properties.Location.UNSUPPORTED, -562 } -563 -564 def _jsonpathkey_sql(self, expression: exp.JSONPathKey) -> str: -565 if isinstance(expression.this, exp.JSONPathWildcard): -566 self.unsupported("Unsupported wildcard in JSONPathKey expression") -567 return "" -568 -569 return super()._jsonpathkey_sql(expression) -570 -571 def parameter_sql(self, expression: exp.Parameter) -> str: -572 this = self.sql(expression, "this") -573 expression_sql = self.sql(expression, "expression") -574 -575 parent = expression.parent -576 this = f"{this}:{expression_sql}" if expression_sql else this -577 -578 if isinstance(parent, exp.EQ) and isinstance(parent.parent, exp.SetItem): -579 # We need to produce SET key = value instead of SET ${key} = value -580 return this -581 -582 return f"${{{this}}}" -583 -584 def schema_sql(self, expression: exp.Schema) -> str: -585 for ordered in expression.find_all(exp.Ordered): -586 if ordered.args.get("desc") is False: -587 ordered.set("desc", None) -588 -589 return super().schema_sql(expression) -590 -591 def constraint_sql(self, expression: exp.Constraint) -> str: -592 for prop in list(expression.find_all(exp.Properties)): -593 prop.pop() -594 -595 this = self.sql(expression, "this") -596 expressions = self.expressions(expression, sep=" ", flat=True) -597 return f"CONSTRAINT {this} {expressions}" -598 -599 def rowformatserdeproperty_sql(self, expression: exp.RowFormatSerdeProperty) -> str: -600 serde_props = self.sql(expression, "serde_properties") -601 serde_props = f" {serde_props}" if serde_props else "" -602 return f"ROW FORMAT SERDE {self.sql(expression, 'this')}{serde_props}" -603 -604 def arrayagg_sql(self, expression: exp.ArrayAgg) -> str: -605 return self.func( -606 "COLLECT_LIST", -607 expression.this.this if isinstance(expression.this, exp.Order) else expression.this, -608 ) -609 -610 def with_properties(self, properties: exp.Properties) -> str: -611 return self.properties(properties, prefix=self.seg("TBLPROPERTIES")) -612 -613 def datatype_sql(self, expression: exp.DataType) -> str: -614 if ( -615 expression.this in (exp.DataType.Type.VARCHAR, exp.DataType.Type.NVARCHAR) -616 and not expression.expressions -617 ): -618 expression = exp.DataType.build("text") -619 elif expression.is_type(exp.DataType.Type.TEXT) and expression.expressions: -620 expression.set("this", exp.DataType.Type.VARCHAR) -621 elif expression.this in exp.DataType.TEMPORAL_TYPES: -622 expression = exp.DataType.build(expression.this) -623 elif expression.is_type("float"): -624 size_expression = expression.find(exp.DataTypeParam) -625 if size_expression: -626 size = int(size_expression.name) -627 expression = ( -628 exp.DataType.build("float") if size <= 32 else exp.DataType.build("double") -629 ) -630 -631 return super().datatype_sql(expression) -632 -633 def version_sql(self, expression: exp.Version) -> str: -634 sql = super().version_sql(expression) -635 return sql.replace("FOR ", "", 1) +269 VALUES_FOLLOWED_BY_PAREN = False +270 +271 FUNCTIONS = { +272 **parser.Parser.FUNCTIONS, +273 "BASE64": exp.ToBase64.from_arg_list, +274 "COLLECT_LIST": exp.ArrayAgg.from_arg_list, +275 "COLLECT_SET": exp.ArrayUniqueAgg.from_arg_list, +276 "DATE_ADD": lambda args: exp.TsOrDsAdd( +277 this=seq_get(args, 0), expression=seq_get(args, 1), unit=exp.Literal.string("DAY") +278 ), +279 "DATE_FORMAT": lambda args: format_time_lambda(exp.TimeToStr, "hive")( +280 [ +281 exp.TimeStrToTime(this=seq_get(args, 0)), +282 seq_get(args, 1), +283 ] +284 ), +285 "DATE_SUB": lambda args: exp.TsOrDsAdd( +286 this=seq_get(args, 0), +287 expression=exp.Mul(this=seq_get(args, 1), expression=exp.Literal.number(-1)), +288 unit=exp.Literal.string("DAY"), +289 ), +290 "DATEDIFF": lambda args: exp.DateDiff( +291 this=exp.TsOrDsToDate(this=seq_get(args, 0)), +292 expression=exp.TsOrDsToDate(this=seq_get(args, 1)), +293 ), +294 "DAY": lambda args: exp.Day(this=exp.TsOrDsToDate(this=seq_get(args, 0))), +295 "FIRST": _parse_ignore_nulls(exp.First), +296 "FIRST_VALUE": _parse_ignore_nulls(exp.FirstValue), +297 "FROM_UNIXTIME": format_time_lambda(exp.UnixToStr, "hive", True), +298 "GET_JSON_OBJECT": exp.JSONExtractScalar.from_arg_list, +299 "LAST": _parse_ignore_nulls(exp.Last), +300 "LAST_VALUE": _parse_ignore_nulls(exp.LastValue), +301 "LOCATE": locate_to_strposition, +302 "MAP": parse_var_map, +303 "MONTH": lambda args: exp.Month(this=exp.TsOrDsToDate.from_arg_list(args)), +304 "PERCENTILE": exp.Quantile.from_arg_list, +305 "PERCENTILE_APPROX": exp.ApproxQuantile.from_arg_list, +306 "REGEXP_EXTRACT": lambda args: exp.RegexpExtract( +307 this=seq_get(args, 0), expression=seq_get(args, 1), group=seq_get(args, 2) +308 ), +309 "SIZE": exp.ArraySize.from_arg_list, +310 "SPLIT": exp.RegexpSplit.from_arg_list, +311 "STR_TO_MAP": lambda args: exp.StrToMap( +312 this=seq_get(args, 0), +313 pair_delim=seq_get(args, 1) or exp.Literal.string(","), +314 key_value_delim=seq_get(args, 2) or exp.Literal.string(":"), +315 ), +316 "TO_DATE": format_time_lambda(exp.TsOrDsToDate, "hive"), +317 "TO_JSON": exp.JSONFormat.from_arg_list, +318 "UNBASE64": exp.FromBase64.from_arg_list, +319 "UNIX_TIMESTAMP": format_time_lambda(exp.StrToUnix, "hive", True), +320 "YEAR": lambda args: exp.Year(this=exp.TsOrDsToDate.from_arg_list(args)), +321 } +322 +323 NO_PAREN_FUNCTION_PARSERS = { +324 **parser.Parser.NO_PAREN_FUNCTION_PARSERS, +325 "TRANSFORM": lambda self: self._parse_transform(), +326 } +327 +328 PROPERTY_PARSERS = { +329 **parser.Parser.PROPERTY_PARSERS, +330 "WITH SERDEPROPERTIES": lambda self: exp.SerdeProperties( +331 expressions=self._parse_wrapped_csv(self._parse_property) +332 ), +333 } +334 +335 def _parse_transform(self) -> t.Optional[exp.Transform | exp.QueryTransform]: +336 if not self._match(TokenType.L_PAREN, advance=False): +337 self._retreat(self._index - 1) +338 return None +339 +340 args = self._parse_wrapped_csv(self._parse_lambda) +341 row_format_before = self._parse_row_format(match_row=True) +342 +343 record_writer = None +344 if self._match_text_seq("RECORDWRITER"): +345 record_writer = self._parse_string() +346 +347 if not self._match(TokenType.USING): +348 return exp.Transform.from_arg_list(args) +349 +350 command_script = self._parse_string() +351 +352 self._match(TokenType.ALIAS) +353 schema = self._parse_schema() +354 +355 row_format_after = self._parse_row_format(match_row=True) +356 record_reader = None +357 if self._match_text_seq("RECORDREADER"): +358 record_reader = self._parse_string() +359 +360 return self.expression( +361 exp.QueryTransform, +362 expressions=args, +363 command_script=command_script, +364 schema=schema, +365 row_format_before=row_format_before, +366 record_writer=record_writer, +367 row_format_after=row_format_after, +368 record_reader=record_reader, +369 ) +370 +371 def _parse_types( +372 self, check_func: bool = False, schema: bool = False, allow_identifiers: bool = True +373 ) -> t.Optional[exp.Expression]: +374 """ +375 Spark (and most likely Hive) treats casts to CHAR(length) and VARCHAR(length) as casts to +376 STRING in all contexts except for schema definitions. For example, this is in Spark v3.4.0: +377 +378 spark-sql (default)> select cast(1234 as varchar(2)); +379 23/06/06 15:51:18 WARN CharVarcharUtils: The Spark cast operator does not support +380 char/varchar type and simply treats them as string type. Please use string type +381 directly to avoid confusion. Otherwise, you can set spark.sql.legacy.charVarcharAsString +382 to true, so that Spark treat them as string type as same as Spark 3.0 and earlier +383 +384 1234 +385 Time taken: 4.265 seconds, Fetched 1 row(s) +386 +387 This shows that Spark doesn't truncate the value into '12', which is inconsistent with +388 what other dialects (e.g. postgres) do, so we need to drop the length to transpile correctly. +389 +390 Reference: https://spark.apache.org/docs/latest/sql-ref-datatypes.html +391 """ +392 this = super()._parse_types( +393 check_func=check_func, schema=schema, allow_identifiers=allow_identifiers +394 ) +395 +396 if this and not schema: +397 return this.transform( +398 lambda node: ( +399 node.replace(exp.DataType.build("text")) +400 if isinstance(node, exp.DataType) and node.is_type("char", "varchar") +401 else node +402 ), +403 copy=False, +404 ) +405 +406 return this +407 +408 def _parse_partition_and_order( +409 self, +410 ) -> t.Tuple[t.List[exp.Expression], t.Optional[exp.Expression]]: +411 return ( +412 ( +413 self._parse_csv(self._parse_conjunction) +414 if self._match_set({TokenType.PARTITION_BY, TokenType.DISTRIBUTE_BY}) +415 else [] +416 ), +417 super()._parse_order(skip_order_token=self._match(TokenType.SORT_BY)), +418 ) +419 +420 class Generator(generator.Generator): +421 LIMIT_FETCH = "LIMIT" +422 TABLESAMPLE_WITH_METHOD = False +423 JOIN_HINTS = False +424 TABLE_HINTS = False +425 QUERY_HINTS = False +426 INDEX_ON = "ON TABLE" +427 EXTRACT_ALLOWS_QUOTES = False +428 NVL2_SUPPORTED = False +429 LAST_DAY_SUPPORTS_DATE_PART = False +430 JSON_PATH_SINGLE_QUOTE_ESCAPE = True +431 +432 EXPRESSIONS_WITHOUT_NESTED_CTES = { +433 exp.Insert, +434 exp.Select, +435 exp.Subquery, +436 exp.Union, +437 } +438 +439 SUPPORTED_JSON_PATH_PARTS = { +440 exp.JSONPathKey, +441 exp.JSONPathRoot, +442 exp.JSONPathSubscript, +443 exp.JSONPathWildcard, +444 } +445 +446 TYPE_MAPPING = { +447 **generator.Generator.TYPE_MAPPING, +448 exp.DataType.Type.BIT: "BOOLEAN", +449 exp.DataType.Type.DATETIME: "TIMESTAMP", +450 exp.DataType.Type.TEXT: "STRING", +451 exp.DataType.Type.TIME: "TIMESTAMP", +452 exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP", +453 exp.DataType.Type.VARBINARY: "BINARY", +454 } +455 +456 TRANSFORMS = { +457 **generator.Generator.TRANSFORMS, +458 exp.Group: transforms.preprocess([transforms.unalias_group]), +459 exp.Select: transforms.preprocess( +460 [ +461 transforms.eliminate_qualify, +462 transforms.eliminate_distinct_on, +463 transforms.unnest_to_explode, +464 ] +465 ), +466 exp.Property: _property_sql, +467 exp.AnyValue: rename_func("FIRST"), +468 exp.ApproxDistinct: approx_count_distinct_sql, +469 exp.ArgMax: arg_max_or_min_no_count("MAX_BY"), +470 exp.ArgMin: arg_max_or_min_no_count("MIN_BY"), +471 exp.ArrayConcat: rename_func("CONCAT"), +472 exp.ArrayJoin: lambda self, e: self.func("CONCAT_WS", e.expression, e.this), +473 exp.ArraySize: rename_func("SIZE"), +474 exp.ArraySort: _array_sort_sql, +475 exp.With: no_recursive_cte_sql, +476 exp.DateAdd: _add_date_sql, +477 exp.DateDiff: _date_diff_sql, +478 exp.DateStrToDate: datestrtodate_sql, +479 exp.DateSub: _add_date_sql, +480 exp.DateToDi: lambda self, +481 e: f"CAST(DATE_FORMAT({self.sql(e, 'this')}, {Hive.DATEINT_FORMAT}) AS INT)", +482 exp.DiToDate: lambda self, +483 e: f"TO_DATE(CAST({self.sql(e, 'this')} AS STRING), {Hive.DATEINT_FORMAT})", +484 exp.FileFormatProperty: lambda self, +485 e: f"STORED AS {self.sql(e, 'this') if isinstance(e.this, exp.InputOutputFormat) else e.name.upper()}", +486 exp.FromBase64: rename_func("UNBASE64"), +487 exp.If: if_sql(), +488 exp.ILike: no_ilike_sql, +489 exp.IsNan: rename_func("ISNAN"), +490 exp.JSONExtract: rename_func("GET_JSON_OBJECT"), +491 exp.JSONExtractScalar: rename_func("GET_JSON_OBJECT"), +492 exp.JSONFormat: _json_format_sql, +493 exp.Left: left_to_substring_sql, +494 exp.Map: var_map_sql, +495 exp.Max: max_or_greatest, +496 exp.MD5Digest: lambda self, e: self.func("UNHEX", self.func("MD5", e.this)), +497 exp.Min: min_or_least, +498 exp.MonthsBetween: lambda self, e: self.func("MONTHS_BETWEEN", e.this, e.expression), +499 exp.NotNullColumnConstraint: lambda self, e: ( +500 "" if e.args.get("allow_null") else "NOT NULL" +501 ), +502 exp.VarMap: var_map_sql, +503 exp.Create: preprocess( +504 [ +505 remove_unique_constraints, +506 ctas_with_tmp_tables_to_create_tmp_view, +507 move_schema_columns_to_partitioned_by, +508 ] +509 ), +510 exp.Quantile: rename_func("PERCENTILE"), +511 exp.ApproxQuantile: rename_func("PERCENTILE_APPROX"), +512 exp.RegexpExtract: regexp_extract_sql, +513 exp.RegexpReplace: regexp_replace_sql, +514 exp.RegexpLike: lambda self, e: self.binary(e, "RLIKE"), +515 exp.RegexpSplit: rename_func("SPLIT"), +516 exp.Right: right_to_substring_sql, +517 exp.SafeDivide: no_safe_divide_sql, +518 exp.SchemaCommentProperty: lambda self, e: self.naked_property(e), +519 exp.ArrayUniqueAgg: rename_func("COLLECT_SET"), +520 exp.Split: lambda self, +521 e: f"SPLIT({self.sql(e, 'this')}, CONCAT('\\\\Q', {self.sql(e, 'expression')}))", +522 exp.StrPosition: strposition_to_locate_sql, +523 exp.StrToDate: _str_to_date_sql, +524 exp.StrToTime: _str_to_time_sql, +525 exp.StrToUnix: _str_to_unix_sql, +526 exp.StructExtract: struct_extract_sql, +527 exp.TimeStrToDate: rename_func("TO_DATE"), +528 exp.TimeStrToTime: timestrtotime_sql, +529 exp.TimeStrToUnix: rename_func("UNIX_TIMESTAMP"), +530 exp.TimeToStr: _time_to_str, +531 exp.TimeToUnix: rename_func("UNIX_TIMESTAMP"), +532 exp.ToBase64: rename_func("BASE64"), +533 exp.TsOrDiToDi: lambda self, +534 e: f"CAST(SUBSTR(REPLACE(CAST({self.sql(e, 'this')} AS STRING), '-', ''), 1, 8) AS INT)", +535 exp.TsOrDsAdd: _add_date_sql, +536 exp.TsOrDsDiff: _date_diff_sql, +537 exp.TsOrDsToDate: _to_date_sql, +538 exp.TryCast: no_trycast_sql, +539 exp.UnixToStr: lambda self, e: self.func( +540 "FROM_UNIXTIME", e.this, time_format("hive")(self, e) +541 ), +542 exp.UnixToTime: rename_func("FROM_UNIXTIME"), +543 exp.UnixToTimeStr: rename_func("FROM_UNIXTIME"), +544 exp.PartitionedByProperty: lambda self, e: f"PARTITIONED BY {self.sql(e, 'this')}", +545 exp.SerdeProperties: lambda self, e: self.properties(e, prefix="WITH SERDEPROPERTIES"), +546 exp.NumberToStr: rename_func("FORMAT_NUMBER"), +547 exp.National: lambda self, e: self.national_sql(e, prefix=""), +548 exp.ClusteredColumnConstraint: lambda self, +549 e: f"({self.expressions(e, 'this', indent=False)})", +550 exp.NonClusteredColumnConstraint: lambda self, +551 e: f"({self.expressions(e, 'this', indent=False)})", +552 exp.NotForReplicationColumnConstraint: lambda self, e: "", +553 exp.OnProperty: lambda self, e: "", +554 exp.PrimaryKeyColumnConstraint: lambda self, e: "PRIMARY KEY", +555 } +556 +557 PROPERTIES_LOCATION = { +558 **generator.Generator.PROPERTIES_LOCATION, +559 exp.FileFormatProperty: exp.Properties.Location.POST_SCHEMA, +560 exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA, +561 exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED, +562 exp.WithDataProperty: exp.Properties.Location.UNSUPPORTED, +563 } +564 +565 def _jsonpathkey_sql(self, expression: exp.JSONPathKey) -> str: +566 if isinstance(expression.this, exp.JSONPathWildcard): +567 self.unsupported("Unsupported wildcard in JSONPathKey expression") +568 return "" +569 +570 return super()._jsonpathkey_sql(expression) +571 +572 def parameter_sql(self, expression: exp.Parameter) -> str: +573 this = self.sql(expression, "this") +574 expression_sql = self.sql(expression, "expression") +575 +576 parent = expression.parent +577 this = f"{this}:{expression_sql}" if expression_sql else this +578 +579 if isinstance(parent, exp.EQ) and isinstance(parent.parent, exp.SetItem): +580 # We need to produce SET key = value instead of SET ${key} = value +581 return this +582 +583 return f"${{{this}}}" +584 +585 def schema_sql(self, expression: exp.Schema) -> str: +586 for ordered in expression.find_all(exp.Ordered): +587 if ordered.args.get("desc") is False: +588 ordered.set("desc", None) +589 +590 return super().schema_sql(expression) +591 +592 def constraint_sql(self, expression: exp.Constraint) -> str: +593 for prop in list(expression.find_all(exp.Properties)): +594 prop.pop() +595 +596 this = self.sql(expression, "this") +597 expressions = self.expressions(expression, sep=" ", flat=True) +598 return f"CONSTRAINT {this} {expressions}" +599 +600 def rowformatserdeproperty_sql(self, expression: exp.RowFormatSerdeProperty) -> str: +601 serde_props = self.sql(expression, "serde_properties") +602 serde_props = f" {serde_props}" if serde_props else "" +603 return f"ROW FORMAT SERDE {self.sql(expression, 'this')}{serde_props}" +604 +605 def arrayagg_sql(self, expression: exp.ArrayAgg) -> str: +606 return self.func( +607 "COLLECT_LIST", +608 expression.this.this if isinstance(expression.this, exp.Order) else expression.this, +609 ) +610 +611 def with_properties(self, properties: exp.Properties) -> str: +612 return self.properties(properties, prefix=self.seg("TBLPROPERTIES")) +613 +614 def datatype_sql(self, expression: exp.DataType) -> str: +615 if ( +616 expression.this in (exp.DataType.Type.VARCHAR, exp.DataType.Type.NVARCHAR) +617 and not expression.expressions +618 ): +619 expression = exp.DataType.build("text") +620 elif expression.is_type(exp.DataType.Type.TEXT) and expression.expressions: +621 expression.set("this", exp.DataType.Type.VARCHAR) +622 elif expression.this in exp.DataType.TEMPORAL_TYPES: +623 expression = exp.DataType.build(expression.this) +624 elif expression.is_type("float"): +625 size_expression = expression.find(exp.DataTypeParam) +626 if size_expression: +627 size = int(size_expression.name) +628 expression = ( +629 exp.DataType.build("float") if size <= 32 else exp.DataType.build("double") +630 ) +631 +632 return super().datatype_sql(expression) +633 +634 def version_sql(self, expression: exp.Version) -> str: +635 sql = super().version_sql(expression) +636 return sql.replace("FOR ", "", 1)
    @@ -1052,373 +1056,374 @@ 267 class Parser(parser.Parser): 268 LOG_DEFAULTS_TO_LN = True 269 STRICT_CAST = False -270 -271 FUNCTIONS = { -272 **parser.Parser.FUNCTIONS, -273 "BASE64": exp.ToBase64.from_arg_list, -274 "COLLECT_LIST": exp.ArrayAgg.from_arg_list, -275 "COLLECT_SET": exp.ArrayUniqueAgg.from_arg_list, -276 "DATE_ADD": lambda args: exp.TsOrDsAdd( -277 this=seq_get(args, 0), expression=seq_get(args, 1), unit=exp.Literal.string("DAY") -278 ), -279 "DATE_FORMAT": lambda args: format_time_lambda(exp.TimeToStr, "hive")( -280 [ -281 exp.TimeStrToTime(this=seq_get(args, 0)), -282 seq_get(args, 1), -283 ] -284 ), -285 "DATE_SUB": lambda args: exp.TsOrDsAdd( -286 this=seq_get(args, 0), -287 expression=exp.Mul(this=seq_get(args, 1), expression=exp.Literal.number(-1)), -288 unit=exp.Literal.string("DAY"), -289 ), -290 "DATEDIFF": lambda args: exp.DateDiff( -291 this=exp.TsOrDsToDate(this=seq_get(args, 0)), -292 expression=exp.TsOrDsToDate(this=seq_get(args, 1)), -293 ), -294 "DAY": lambda args: exp.Day(this=exp.TsOrDsToDate(this=seq_get(args, 0))), -295 "FIRST": _parse_ignore_nulls(exp.First), -296 "FIRST_VALUE": _parse_ignore_nulls(exp.FirstValue), -297 "FROM_UNIXTIME": format_time_lambda(exp.UnixToStr, "hive", True), -298 "GET_JSON_OBJECT": exp.JSONExtractScalar.from_arg_list, -299 "LAST": _parse_ignore_nulls(exp.Last), -300 "LAST_VALUE": _parse_ignore_nulls(exp.LastValue), -301 "LOCATE": locate_to_strposition, -302 "MAP": parse_var_map, -303 "MONTH": lambda args: exp.Month(this=exp.TsOrDsToDate.from_arg_list(args)), -304 "PERCENTILE": exp.Quantile.from_arg_list, -305 "PERCENTILE_APPROX": exp.ApproxQuantile.from_arg_list, -306 "REGEXP_EXTRACT": lambda args: exp.RegexpExtract( -307 this=seq_get(args, 0), expression=seq_get(args, 1), group=seq_get(args, 2) -308 ), -309 "SIZE": exp.ArraySize.from_arg_list, -310 "SPLIT": exp.RegexpSplit.from_arg_list, -311 "STR_TO_MAP": lambda args: exp.StrToMap( -312 this=seq_get(args, 0), -313 pair_delim=seq_get(args, 1) or exp.Literal.string(","), -314 key_value_delim=seq_get(args, 2) or exp.Literal.string(":"), -315 ), -316 "TO_DATE": format_time_lambda(exp.TsOrDsToDate, "hive"), -317 "TO_JSON": exp.JSONFormat.from_arg_list, -318 "UNBASE64": exp.FromBase64.from_arg_list, -319 "UNIX_TIMESTAMP": format_time_lambda(exp.StrToUnix, "hive", True), -320 "YEAR": lambda args: exp.Year(this=exp.TsOrDsToDate.from_arg_list(args)), -321 } -322 -323 NO_PAREN_FUNCTION_PARSERS = { -324 **parser.Parser.NO_PAREN_FUNCTION_PARSERS, -325 "TRANSFORM": lambda self: self._parse_transform(), -326 } -327 -328 PROPERTY_PARSERS = { -329 **parser.Parser.PROPERTY_PARSERS, -330 "WITH SERDEPROPERTIES": lambda self: exp.SerdeProperties( -331 expressions=self._parse_wrapped_csv(self._parse_property) -332 ), -333 } -334 -335 def _parse_transform(self) -> t.Optional[exp.Transform | exp.QueryTransform]: -336 if not self._match(TokenType.L_PAREN, advance=False): -337 self._retreat(self._index - 1) -338 return None -339 -340 args = self._parse_wrapped_csv(self._parse_lambda) -341 row_format_before = self._parse_row_format(match_row=True) -342 -343 record_writer = None -344 if self._match_text_seq("RECORDWRITER"): -345 record_writer = self._parse_string() -346 -347 if not self._match(TokenType.USING): -348 return exp.Transform.from_arg_list(args) -349 -350 command_script = self._parse_string() -351 -352 self._match(TokenType.ALIAS) -353 schema = self._parse_schema() -354 -355 row_format_after = self._parse_row_format(match_row=True) -356 record_reader = None -357 if self._match_text_seq("RECORDREADER"): -358 record_reader = self._parse_string() -359 -360 return self.expression( -361 exp.QueryTransform, -362 expressions=args, -363 command_script=command_script, -364 schema=schema, -365 row_format_before=row_format_before, -366 record_writer=record_writer, -367 row_format_after=row_format_after, -368 record_reader=record_reader, -369 ) -370 -371 def _parse_types( -372 self, check_func: bool = False, schema: bool = False, allow_identifiers: bool = True -373 ) -> t.Optional[exp.Expression]: -374 """ -375 Spark (and most likely Hive) treats casts to CHAR(length) and VARCHAR(length) as casts to -376 STRING in all contexts except for schema definitions. For example, this is in Spark v3.4.0: -377 -378 spark-sql (default)> select cast(1234 as varchar(2)); -379 23/06/06 15:51:18 WARN CharVarcharUtils: The Spark cast operator does not support -380 char/varchar type and simply treats them as string type. Please use string type -381 directly to avoid confusion. Otherwise, you can set spark.sql.legacy.charVarcharAsString -382 to true, so that Spark treat them as string type as same as Spark 3.0 and earlier -383 -384 1234 -385 Time taken: 4.265 seconds, Fetched 1 row(s) -386 -387 This shows that Spark doesn't truncate the value into '12', which is inconsistent with -388 what other dialects (e.g. postgres) do, so we need to drop the length to transpile correctly. -389 -390 Reference: https://spark.apache.org/docs/latest/sql-ref-datatypes.html -391 """ -392 this = super()._parse_types( -393 check_func=check_func, schema=schema, allow_identifiers=allow_identifiers -394 ) -395 -396 if this and not schema: -397 return this.transform( -398 lambda node: ( -399 node.replace(exp.DataType.build("text")) -400 if isinstance(node, exp.DataType) and node.is_type("char", "varchar") -401 else node -402 ), -403 copy=False, -404 ) -405 -406 return this -407 -408 def _parse_partition_and_order( -409 self, -410 ) -> t.Tuple[t.List[exp.Expression], t.Optional[exp.Expression]]: -411 return ( -412 ( -413 self._parse_csv(self._parse_conjunction) -414 if self._match_set({TokenType.PARTITION_BY, TokenType.DISTRIBUTE_BY}) -415 else [] -416 ), -417 super()._parse_order(skip_order_token=self._match(TokenType.SORT_BY)), -418 ) -419 -420 class Generator(generator.Generator): -421 LIMIT_FETCH = "LIMIT" -422 TABLESAMPLE_WITH_METHOD = False -423 JOIN_HINTS = False -424 TABLE_HINTS = False -425 QUERY_HINTS = False -426 INDEX_ON = "ON TABLE" -427 EXTRACT_ALLOWS_QUOTES = False -428 NVL2_SUPPORTED = False -429 LAST_DAY_SUPPORTS_DATE_PART = False -430 JSON_PATH_SINGLE_QUOTE_ESCAPE = True -431 -432 EXPRESSIONS_WITHOUT_NESTED_CTES = { -433 exp.Insert, -434 exp.Select, -435 exp.Subquery, -436 exp.Union, -437 } -438 -439 SUPPORTED_JSON_PATH_PARTS = { -440 exp.JSONPathKey, -441 exp.JSONPathRoot, -442 exp.JSONPathSubscript, -443 exp.JSONPathWildcard, -444 } -445 -446 TYPE_MAPPING = { -447 **generator.Generator.TYPE_MAPPING, -448 exp.DataType.Type.BIT: "BOOLEAN", -449 exp.DataType.Type.DATETIME: "TIMESTAMP", -450 exp.DataType.Type.TEXT: "STRING", -451 exp.DataType.Type.TIME: "TIMESTAMP", -452 exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP", -453 exp.DataType.Type.VARBINARY: "BINARY", -454 } -455 -456 TRANSFORMS = { -457 **generator.Generator.TRANSFORMS, -458 exp.Group: transforms.preprocess([transforms.unalias_group]), -459 exp.Select: transforms.preprocess( -460 [ -461 transforms.eliminate_qualify, -462 transforms.eliminate_distinct_on, -463 transforms.unnest_to_explode, -464 ] -465 ), -466 exp.Property: _property_sql, -467 exp.AnyValue: rename_func("FIRST"), -468 exp.ApproxDistinct: approx_count_distinct_sql, -469 exp.ArgMax: arg_max_or_min_no_count("MAX_BY"), -470 exp.ArgMin: arg_max_or_min_no_count("MIN_BY"), -471 exp.ArrayConcat: rename_func("CONCAT"), -472 exp.ArrayJoin: lambda self, e: self.func("CONCAT_WS", e.expression, e.this), -473 exp.ArraySize: rename_func("SIZE"), -474 exp.ArraySort: _array_sort_sql, -475 exp.With: no_recursive_cte_sql, -476 exp.DateAdd: _add_date_sql, -477 exp.DateDiff: _date_diff_sql, -478 exp.DateStrToDate: datestrtodate_sql, -479 exp.DateSub: _add_date_sql, -480 exp.DateToDi: lambda self, -481 e: f"CAST(DATE_FORMAT({self.sql(e, 'this')}, {Hive.DATEINT_FORMAT}) AS INT)", -482 exp.DiToDate: lambda self, -483 e: f"TO_DATE(CAST({self.sql(e, 'this')} AS STRING), {Hive.DATEINT_FORMAT})", -484 exp.FileFormatProperty: lambda self, -485 e: f"STORED AS {self.sql(e, 'this') if isinstance(e.this, exp.InputOutputFormat) else e.name.upper()}", -486 exp.FromBase64: rename_func("UNBASE64"), -487 exp.If: if_sql(), -488 exp.ILike: no_ilike_sql, -489 exp.IsNan: rename_func("ISNAN"), -490 exp.JSONExtract: rename_func("GET_JSON_OBJECT"), -491 exp.JSONExtractScalar: rename_func("GET_JSON_OBJECT"), -492 exp.JSONFormat: _json_format_sql, -493 exp.Left: left_to_substring_sql, -494 exp.Map: var_map_sql, -495 exp.Max: max_or_greatest, -496 exp.MD5Digest: lambda self, e: self.func("UNHEX", self.func("MD5", e.this)), -497 exp.Min: min_or_least, -498 exp.MonthsBetween: lambda self, e: self.func("MONTHS_BETWEEN", e.this, e.expression), -499 exp.NotNullColumnConstraint: lambda self, e: ( -500 "" if e.args.get("allow_null") else "NOT NULL" -501 ), -502 exp.VarMap: var_map_sql, -503 exp.Create: preprocess( -504 [ -505 remove_unique_constraints, -506 ctas_with_tmp_tables_to_create_tmp_view, -507 move_schema_columns_to_partitioned_by, -508 ] -509 ), -510 exp.Quantile: rename_func("PERCENTILE"), -511 exp.ApproxQuantile: rename_func("PERCENTILE_APPROX"), -512 exp.RegexpExtract: regexp_extract_sql, -513 exp.RegexpReplace: regexp_replace_sql, -514 exp.RegexpLike: lambda self, e: self.binary(e, "RLIKE"), -515 exp.RegexpSplit: rename_func("SPLIT"), -516 exp.Right: right_to_substring_sql, -517 exp.SafeDivide: no_safe_divide_sql, -518 exp.SchemaCommentProperty: lambda self, e: self.naked_property(e), -519 exp.ArrayUniqueAgg: rename_func("COLLECT_SET"), -520 exp.Split: lambda self, -521 e: f"SPLIT({self.sql(e, 'this')}, CONCAT('\\\\Q', {self.sql(e, 'expression')}))", -522 exp.StrPosition: strposition_to_locate_sql, -523 exp.StrToDate: _str_to_date_sql, -524 exp.StrToTime: _str_to_time_sql, -525 exp.StrToUnix: _str_to_unix_sql, -526 exp.StructExtract: struct_extract_sql, -527 exp.TimeStrToDate: rename_func("TO_DATE"), -528 exp.TimeStrToTime: timestrtotime_sql, -529 exp.TimeStrToUnix: rename_func("UNIX_TIMESTAMP"), -530 exp.TimeToStr: _time_to_str, -531 exp.TimeToUnix: rename_func("UNIX_TIMESTAMP"), -532 exp.ToBase64: rename_func("BASE64"), -533 exp.TsOrDiToDi: lambda self, -534 e: f"CAST(SUBSTR(REPLACE(CAST({self.sql(e, 'this')} AS STRING), '-', ''), 1, 8) AS INT)", -535 exp.TsOrDsAdd: _add_date_sql, -536 exp.TsOrDsDiff: _date_diff_sql, -537 exp.TsOrDsToDate: _to_date_sql, -538 exp.TryCast: no_trycast_sql, -539 exp.UnixToStr: lambda self, e: self.func( -540 "FROM_UNIXTIME", e.this, time_format("hive")(self, e) -541 ), -542 exp.UnixToTime: rename_func("FROM_UNIXTIME"), -543 exp.UnixToTimeStr: rename_func("FROM_UNIXTIME"), -544 exp.PartitionedByProperty: lambda self, e: f"PARTITIONED BY {self.sql(e, 'this')}", -545 exp.SerdeProperties: lambda self, e: self.properties(e, prefix="WITH SERDEPROPERTIES"), -546 exp.NumberToStr: rename_func("FORMAT_NUMBER"), -547 exp.National: lambda self, e: self.national_sql(e, prefix=""), -548 exp.ClusteredColumnConstraint: lambda self, -549 e: f"({self.expressions(e, 'this', indent=False)})", -550 exp.NonClusteredColumnConstraint: lambda self, -551 e: f"({self.expressions(e, 'this', indent=False)})", -552 exp.NotForReplicationColumnConstraint: lambda self, e: "", -553 exp.OnProperty: lambda self, e: "", -554 exp.PrimaryKeyColumnConstraint: lambda self, e: "PRIMARY KEY", -555 } -556 -557 PROPERTIES_LOCATION = { -558 **generator.Generator.PROPERTIES_LOCATION, -559 exp.FileFormatProperty: exp.Properties.Location.POST_SCHEMA, -560 exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA, -561 exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED, -562 exp.WithDataProperty: exp.Properties.Location.UNSUPPORTED, -563 } -564 -565 def _jsonpathkey_sql(self, expression: exp.JSONPathKey) -> str: -566 if isinstance(expression.this, exp.JSONPathWildcard): -567 self.unsupported("Unsupported wildcard in JSONPathKey expression") -568 return "" -569 -570 return super()._jsonpathkey_sql(expression) -571 -572 def parameter_sql(self, expression: exp.Parameter) -> str: -573 this = self.sql(expression, "this") -574 expression_sql = self.sql(expression, "expression") -575 -576 parent = expression.parent -577 this = f"{this}:{expression_sql}" if expression_sql else this -578 -579 if isinstance(parent, exp.EQ) and isinstance(parent.parent, exp.SetItem): -580 # We need to produce SET key = value instead of SET ${key} = value -581 return this -582 -583 return f"${{{this}}}" -584 -585 def schema_sql(self, expression: exp.Schema) -> str: -586 for ordered in expression.find_all(exp.Ordered): -587 if ordered.args.get("desc") is False: -588 ordered.set("desc", None) -589 -590 return super().schema_sql(expression) -591 -592 def constraint_sql(self, expression: exp.Constraint) -> str: -593 for prop in list(expression.find_all(exp.Properties)): -594 prop.pop() -595 -596 this = self.sql(expression, "this") -597 expressions = self.expressions(expression, sep=" ", flat=True) -598 return f"CONSTRAINT {this} {expressions}" -599 -600 def rowformatserdeproperty_sql(self, expression: exp.RowFormatSerdeProperty) -> str: -601 serde_props = self.sql(expression, "serde_properties") -602 serde_props = f" {serde_props}" if serde_props else "" -603 return f"ROW FORMAT SERDE {self.sql(expression, 'this')}{serde_props}" -604 -605 def arrayagg_sql(self, expression: exp.ArrayAgg) -> str: -606 return self.func( -607 "COLLECT_LIST", -608 expression.this.this if isinstance(expression.this, exp.Order) else expression.this, -609 ) -610 -611 def with_properties(self, properties: exp.Properties) -> str: -612 return self.properties(properties, prefix=self.seg("TBLPROPERTIES")) -613 -614 def datatype_sql(self, expression: exp.DataType) -> str: -615 if ( -616 expression.this in (exp.DataType.Type.VARCHAR, exp.DataType.Type.NVARCHAR) -617 and not expression.expressions -618 ): -619 expression = exp.DataType.build("text") -620 elif expression.is_type(exp.DataType.Type.TEXT) and expression.expressions: -621 expression.set("this", exp.DataType.Type.VARCHAR) -622 elif expression.this in exp.DataType.TEMPORAL_TYPES: -623 expression = exp.DataType.build(expression.this) -624 elif expression.is_type("float"): -625 size_expression = expression.find(exp.DataTypeParam) -626 if size_expression: -627 size = int(size_expression.name) -628 expression = ( -629 exp.DataType.build("float") if size <= 32 else exp.DataType.build("double") -630 ) -631 -632 return super().datatype_sql(expression) -633 -634 def version_sql(self, expression: exp.Version) -> str: -635 sql = super().version_sql(expression) -636 return sql.replace("FOR ", "", 1) +270 VALUES_FOLLOWED_BY_PAREN = False +271 +272 FUNCTIONS = { +273 **parser.Parser.FUNCTIONS, +274 "BASE64": exp.ToBase64.from_arg_list, +275 "COLLECT_LIST": exp.ArrayAgg.from_arg_list, +276 "COLLECT_SET": exp.ArrayUniqueAgg.from_arg_list, +277 "DATE_ADD": lambda args: exp.TsOrDsAdd( +278 this=seq_get(args, 0), expression=seq_get(args, 1), unit=exp.Literal.string("DAY") +279 ), +280 "DATE_FORMAT": lambda args: format_time_lambda(exp.TimeToStr, "hive")( +281 [ +282 exp.TimeStrToTime(this=seq_get(args, 0)), +283 seq_get(args, 1), +284 ] +285 ), +286 "DATE_SUB": lambda args: exp.TsOrDsAdd( +287 this=seq_get(args, 0), +288 expression=exp.Mul(this=seq_get(args, 1), expression=exp.Literal.number(-1)), +289 unit=exp.Literal.string("DAY"), +290 ), +291 "DATEDIFF": lambda args: exp.DateDiff( +292 this=exp.TsOrDsToDate(this=seq_get(args, 0)), +293 expression=exp.TsOrDsToDate(this=seq_get(args, 1)), +294 ), +295 "DAY": lambda args: exp.Day(this=exp.TsOrDsToDate(this=seq_get(args, 0))), +296 "FIRST": _parse_ignore_nulls(exp.First), +297 "FIRST_VALUE": _parse_ignore_nulls(exp.FirstValue), +298 "FROM_UNIXTIME": format_time_lambda(exp.UnixToStr, "hive", True), +299 "GET_JSON_OBJECT": exp.JSONExtractScalar.from_arg_list, +300 "LAST": _parse_ignore_nulls(exp.Last), +301 "LAST_VALUE": _parse_ignore_nulls(exp.LastValue), +302 "LOCATE": locate_to_strposition, +303 "MAP": parse_var_map, +304 "MONTH": lambda args: exp.Month(this=exp.TsOrDsToDate.from_arg_list(args)), +305 "PERCENTILE": exp.Quantile.from_arg_list, +306 "PERCENTILE_APPROX": exp.ApproxQuantile.from_arg_list, +307 "REGEXP_EXTRACT": lambda args: exp.RegexpExtract( +308 this=seq_get(args, 0), expression=seq_get(args, 1), group=seq_get(args, 2) +309 ), +310 "SIZE": exp.ArraySize.from_arg_list, +311 "SPLIT": exp.RegexpSplit.from_arg_list, +312 "STR_TO_MAP": lambda args: exp.StrToMap( +313 this=seq_get(args, 0), +314 pair_delim=seq_get(args, 1) or exp.Literal.string(","), +315 key_value_delim=seq_get(args, 2) or exp.Literal.string(":"), +316 ), +317 "TO_DATE": format_time_lambda(exp.TsOrDsToDate, "hive"), +318 "TO_JSON": exp.JSONFormat.from_arg_list, +319 "UNBASE64": exp.FromBase64.from_arg_list, +320 "UNIX_TIMESTAMP": format_time_lambda(exp.StrToUnix, "hive", True), +321 "YEAR": lambda args: exp.Year(this=exp.TsOrDsToDate.from_arg_list(args)), +322 } +323 +324 NO_PAREN_FUNCTION_PARSERS = { +325 **parser.Parser.NO_PAREN_FUNCTION_PARSERS, +326 "TRANSFORM": lambda self: self._parse_transform(), +327 } +328 +329 PROPERTY_PARSERS = { +330 **parser.Parser.PROPERTY_PARSERS, +331 "WITH SERDEPROPERTIES": lambda self: exp.SerdeProperties( +332 expressions=self._parse_wrapped_csv(self._parse_property) +333 ), +334 } +335 +336 def _parse_transform(self) -> t.Optional[exp.Transform | exp.QueryTransform]: +337 if not self._match(TokenType.L_PAREN, advance=False): +338 self._retreat(self._index - 1) +339 return None +340 +341 args = self._parse_wrapped_csv(self._parse_lambda) +342 row_format_before = self._parse_row_format(match_row=True) +343 +344 record_writer = None +345 if self._match_text_seq("RECORDWRITER"): +346 record_writer = self._parse_string() +347 +348 if not self._match(TokenType.USING): +349 return exp.Transform.from_arg_list(args) +350 +351 command_script = self._parse_string() +352 +353 self._match(TokenType.ALIAS) +354 schema = self._parse_schema() +355 +356 row_format_after = self._parse_row_format(match_row=True) +357 record_reader = None +358 if self._match_text_seq("RECORDREADER"): +359 record_reader = self._parse_string() +360 +361 return self.expression( +362 exp.QueryTransform, +363 expressions=args, +364 command_script=command_script, +365 schema=schema, +366 row_format_before=row_format_before, +367 record_writer=record_writer, +368 row_format_after=row_format_after, +369 record_reader=record_reader, +370 ) +371 +372 def _parse_types( +373 self, check_func: bool = False, schema: bool = False, allow_identifiers: bool = True +374 ) -> t.Optional[exp.Expression]: +375 """ +376 Spark (and most likely Hive) treats casts to CHAR(length) and VARCHAR(length) as casts to +377 STRING in all contexts except for schema definitions. For example, this is in Spark v3.4.0: +378 +379 spark-sql (default)> select cast(1234 as varchar(2)); +380 23/06/06 15:51:18 WARN CharVarcharUtils: The Spark cast operator does not support +381 char/varchar type and simply treats them as string type. Please use string type +382 directly to avoid confusion. Otherwise, you can set spark.sql.legacy.charVarcharAsString +383 to true, so that Spark treat them as string type as same as Spark 3.0 and earlier +384 +385 1234 +386 Time taken: 4.265 seconds, Fetched 1 row(s) +387 +388 This shows that Spark doesn't truncate the value into '12', which is inconsistent with +389 what other dialects (e.g. postgres) do, so we need to drop the length to transpile correctly. +390 +391 Reference: https://spark.apache.org/docs/latest/sql-ref-datatypes.html +392 """ +393 this = super()._parse_types( +394 check_func=check_func, schema=schema, allow_identifiers=allow_identifiers +395 ) +396 +397 if this and not schema: +398 return this.transform( +399 lambda node: ( +400 node.replace(exp.DataType.build("text")) +401 if isinstance(node, exp.DataType) and node.is_type("char", "varchar") +402 else node +403 ), +404 copy=False, +405 ) +406 +407 return this +408 +409 def _parse_partition_and_order( +410 self, +411 ) -> t.Tuple[t.List[exp.Expression], t.Optional[exp.Expression]]: +412 return ( +413 ( +414 self._parse_csv(self._parse_conjunction) +415 if self._match_set({TokenType.PARTITION_BY, TokenType.DISTRIBUTE_BY}) +416 else [] +417 ), +418 super()._parse_order(skip_order_token=self._match(TokenType.SORT_BY)), +419 ) +420 +421 class Generator(generator.Generator): +422 LIMIT_FETCH = "LIMIT" +423 TABLESAMPLE_WITH_METHOD = False +424 JOIN_HINTS = False +425 TABLE_HINTS = False +426 QUERY_HINTS = False +427 INDEX_ON = "ON TABLE" +428 EXTRACT_ALLOWS_QUOTES = False +429 NVL2_SUPPORTED = False +430 LAST_DAY_SUPPORTS_DATE_PART = False +431 JSON_PATH_SINGLE_QUOTE_ESCAPE = True +432 +433 EXPRESSIONS_WITHOUT_NESTED_CTES = { +434 exp.Insert, +435 exp.Select, +436 exp.Subquery, +437 exp.Union, +438 } +439 +440 SUPPORTED_JSON_PATH_PARTS = { +441 exp.JSONPathKey, +442 exp.JSONPathRoot, +443 exp.JSONPathSubscript, +444 exp.JSONPathWildcard, +445 } +446 +447 TYPE_MAPPING = { +448 **generator.Generator.TYPE_MAPPING, +449 exp.DataType.Type.BIT: "BOOLEAN", +450 exp.DataType.Type.DATETIME: "TIMESTAMP", +451 exp.DataType.Type.TEXT: "STRING", +452 exp.DataType.Type.TIME: "TIMESTAMP", +453 exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP", +454 exp.DataType.Type.VARBINARY: "BINARY", +455 } +456 +457 TRANSFORMS = { +458 **generator.Generator.TRANSFORMS, +459 exp.Group: transforms.preprocess([transforms.unalias_group]), +460 exp.Select: transforms.preprocess( +461 [ +462 transforms.eliminate_qualify, +463 transforms.eliminate_distinct_on, +464 transforms.unnest_to_explode, +465 ] +466 ), +467 exp.Property: _property_sql, +468 exp.AnyValue: rename_func("FIRST"), +469 exp.ApproxDistinct: approx_count_distinct_sql, +470 exp.ArgMax: arg_max_or_min_no_count("MAX_BY"), +471 exp.ArgMin: arg_max_or_min_no_count("MIN_BY"), +472 exp.ArrayConcat: rename_func("CONCAT"), +473 exp.ArrayJoin: lambda self, e: self.func("CONCAT_WS", e.expression, e.this), +474 exp.ArraySize: rename_func("SIZE"), +475 exp.ArraySort: _array_sort_sql, +476 exp.With: no_recursive_cte_sql, +477 exp.DateAdd: _add_date_sql, +478 exp.DateDiff: _date_diff_sql, +479 exp.DateStrToDate: datestrtodate_sql, +480 exp.DateSub: _add_date_sql, +481 exp.DateToDi: lambda self, +482 e: f"CAST(DATE_FORMAT({self.sql(e, 'this')}, {Hive.DATEINT_FORMAT}) AS INT)", +483 exp.DiToDate: lambda self, +484 e: f"TO_DATE(CAST({self.sql(e, 'this')} AS STRING), {Hive.DATEINT_FORMAT})", +485 exp.FileFormatProperty: lambda self, +486 e: f"STORED AS {self.sql(e, 'this') if isinstance(e.this, exp.InputOutputFormat) else e.name.upper()}", +487 exp.FromBase64: rename_func("UNBASE64"), +488 exp.If: if_sql(), +489 exp.ILike: no_ilike_sql, +490 exp.IsNan: rename_func("ISNAN"), +491 exp.JSONExtract: rename_func("GET_JSON_OBJECT"), +492 exp.JSONExtractScalar: rename_func("GET_JSON_OBJECT"), +493 exp.JSONFormat: _json_format_sql, +494 exp.Left: left_to_substring_sql, +495 exp.Map: var_map_sql, +496 exp.Max: max_or_greatest, +497 exp.MD5Digest: lambda self, e: self.func("UNHEX", self.func("MD5", e.this)), +498 exp.Min: min_or_least, +499 exp.MonthsBetween: lambda self, e: self.func("MONTHS_BETWEEN", e.this, e.expression), +500 exp.NotNullColumnConstraint: lambda self, e: ( +501 "" if e.args.get("allow_null") else "NOT NULL" +502 ), +503 exp.VarMap: var_map_sql, +504 exp.Create: preprocess( +505 [ +506 remove_unique_constraints, +507 ctas_with_tmp_tables_to_create_tmp_view, +508 move_schema_columns_to_partitioned_by, +509 ] +510 ), +511 exp.Quantile: rename_func("PERCENTILE"), +512 exp.ApproxQuantile: rename_func("PERCENTILE_APPROX"), +513 exp.RegexpExtract: regexp_extract_sql, +514 exp.RegexpReplace: regexp_replace_sql, +515 exp.RegexpLike: lambda self, e: self.binary(e, "RLIKE"), +516 exp.RegexpSplit: rename_func("SPLIT"), +517 exp.Right: right_to_substring_sql, +518 exp.SafeDivide: no_safe_divide_sql, +519 exp.SchemaCommentProperty: lambda self, e: self.naked_property(e), +520 exp.ArrayUniqueAgg: rename_func("COLLECT_SET"), +521 exp.Split: lambda self, +522 e: f"SPLIT({self.sql(e, 'this')}, CONCAT('\\\\Q', {self.sql(e, 'expression')}))", +523 exp.StrPosition: strposition_to_locate_sql, +524 exp.StrToDate: _str_to_date_sql, +525 exp.StrToTime: _str_to_time_sql, +526 exp.StrToUnix: _str_to_unix_sql, +527 exp.StructExtract: struct_extract_sql, +528 exp.TimeStrToDate: rename_func("TO_DATE"), +529 exp.TimeStrToTime: timestrtotime_sql, +530 exp.TimeStrToUnix: rename_func("UNIX_TIMESTAMP"), +531 exp.TimeToStr: _time_to_str, +532 exp.TimeToUnix: rename_func("UNIX_TIMESTAMP"), +533 exp.ToBase64: rename_func("BASE64"), +534 exp.TsOrDiToDi: lambda self, +535 e: f"CAST(SUBSTR(REPLACE(CAST({self.sql(e, 'this')} AS STRING), '-', ''), 1, 8) AS INT)", +536 exp.TsOrDsAdd: _add_date_sql, +537 exp.TsOrDsDiff: _date_diff_sql, +538 exp.TsOrDsToDate: _to_date_sql, +539 exp.TryCast: no_trycast_sql, +540 exp.UnixToStr: lambda self, e: self.func( +541 "FROM_UNIXTIME", e.this, time_format("hive")(self, e) +542 ), +543 exp.UnixToTime: rename_func("FROM_UNIXTIME"), +544 exp.UnixToTimeStr: rename_func("FROM_UNIXTIME"), +545 exp.PartitionedByProperty: lambda self, e: f"PARTITIONED BY {self.sql(e, 'this')}", +546 exp.SerdeProperties: lambda self, e: self.properties(e, prefix="WITH SERDEPROPERTIES"), +547 exp.NumberToStr: rename_func("FORMAT_NUMBER"), +548 exp.National: lambda self, e: self.national_sql(e, prefix=""), +549 exp.ClusteredColumnConstraint: lambda self, +550 e: f"({self.expressions(e, 'this', indent=False)})", +551 exp.NonClusteredColumnConstraint: lambda self, +552 e: f"({self.expressions(e, 'this', indent=False)})", +553 exp.NotForReplicationColumnConstraint: lambda self, e: "", +554 exp.OnProperty: lambda self, e: "", +555 exp.PrimaryKeyColumnConstraint: lambda self, e: "PRIMARY KEY", +556 } +557 +558 PROPERTIES_LOCATION = { +559 **generator.Generator.PROPERTIES_LOCATION, +560 exp.FileFormatProperty: exp.Properties.Location.POST_SCHEMA, +561 exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA, +562 exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED, +563 exp.WithDataProperty: exp.Properties.Location.UNSUPPORTED, +564 } +565 +566 def _jsonpathkey_sql(self, expression: exp.JSONPathKey) -> str: +567 if isinstance(expression.this, exp.JSONPathWildcard): +568 self.unsupported("Unsupported wildcard in JSONPathKey expression") +569 return "" +570 +571 return super()._jsonpathkey_sql(expression) +572 +573 def parameter_sql(self, expression: exp.Parameter) -> str: +574 this = self.sql(expression, "this") +575 expression_sql = self.sql(expression, "expression") +576 +577 parent = expression.parent +578 this = f"{this}:{expression_sql}" if expression_sql else this +579 +580 if isinstance(parent, exp.EQ) and isinstance(parent.parent, exp.SetItem): +581 # We need to produce SET key = value instead of SET ${key} = value +582 return this +583 +584 return f"${{{this}}}" +585 +586 def schema_sql(self, expression: exp.Schema) -> str: +587 for ordered in expression.find_all(exp.Ordered): +588 if ordered.args.get("desc") is False: +589 ordered.set("desc", None) +590 +591 return super().schema_sql(expression) +592 +593 def constraint_sql(self, expression: exp.Constraint) -> str: +594 for prop in list(expression.find_all(exp.Properties)): +595 prop.pop() +596 +597 this = self.sql(expression, "this") +598 expressions = self.expressions(expression, sep=" ", flat=True) +599 return f"CONSTRAINT {this} {expressions}" +600 +601 def rowformatserdeproperty_sql(self, expression: exp.RowFormatSerdeProperty) -> str: +602 serde_props = self.sql(expression, "serde_properties") +603 serde_props = f" {serde_props}" if serde_props else "" +604 return f"ROW FORMAT SERDE {self.sql(expression, 'this')}{serde_props}" +605 +606 def arrayagg_sql(self, expression: exp.ArrayAgg) -> str: +607 return self.func( +608 "COLLECT_LIST", +609 expression.this.this if isinstance(expression.this, exp.Order) else expression.this, +610 ) +611 +612 def with_properties(self, properties: exp.Properties) -> str: +613 return self.properties(properties, prefix=self.seg("TBLPROPERTIES")) +614 +615 def datatype_sql(self, expression: exp.DataType) -> str: +616 if ( +617 expression.this in (exp.DataType.Type.VARCHAR, exp.DataType.Type.NVARCHAR) +618 and not expression.expressions +619 ): +620 expression = exp.DataType.build("text") +621 elif expression.is_type(exp.DataType.Type.TEXT) and expression.expressions: +622 expression.set("this", exp.DataType.Type.VARCHAR) +623 elif expression.this in exp.DataType.TEMPORAL_TYPES: +624 expression = exp.DataType.build(expression.this) +625 elif expression.is_type("float"): +626 size_expression = expression.find(exp.DataTypeParam) +627 if size_expression: +628 size = int(size_expression.name) +629 expression = ( +630 exp.DataType.build("float") if size <= 32 else exp.DataType.build("double") +631 ) +632 +633 return super().datatype_sql(expression) +634 +635 def version_sql(self, expression: exp.Version) -> str: +636 sql = super().version_sql(expression) +637 return sql.replace("FOR ", "", 1)
    @@ -1999,155 +2004,156 @@
    267    class Parser(parser.Parser):
     268        LOG_DEFAULTS_TO_LN = True
     269        STRICT_CAST = False
    -270
    -271        FUNCTIONS = {
    -272            **parser.Parser.FUNCTIONS,
    -273            "BASE64": exp.ToBase64.from_arg_list,
    -274            "COLLECT_LIST": exp.ArrayAgg.from_arg_list,
    -275            "COLLECT_SET": exp.ArrayUniqueAgg.from_arg_list,
    -276            "DATE_ADD": lambda args: exp.TsOrDsAdd(
    -277                this=seq_get(args, 0), expression=seq_get(args, 1), unit=exp.Literal.string("DAY")
    -278            ),
    -279            "DATE_FORMAT": lambda args: format_time_lambda(exp.TimeToStr, "hive")(
    -280                [
    -281                    exp.TimeStrToTime(this=seq_get(args, 0)),
    -282                    seq_get(args, 1),
    -283                ]
    -284            ),
    -285            "DATE_SUB": lambda args: exp.TsOrDsAdd(
    -286                this=seq_get(args, 0),
    -287                expression=exp.Mul(this=seq_get(args, 1), expression=exp.Literal.number(-1)),
    -288                unit=exp.Literal.string("DAY"),
    -289            ),
    -290            "DATEDIFF": lambda args: exp.DateDiff(
    -291                this=exp.TsOrDsToDate(this=seq_get(args, 0)),
    -292                expression=exp.TsOrDsToDate(this=seq_get(args, 1)),
    -293            ),
    -294            "DAY": lambda args: exp.Day(this=exp.TsOrDsToDate(this=seq_get(args, 0))),
    -295            "FIRST": _parse_ignore_nulls(exp.First),
    -296            "FIRST_VALUE": _parse_ignore_nulls(exp.FirstValue),
    -297            "FROM_UNIXTIME": format_time_lambda(exp.UnixToStr, "hive", True),
    -298            "GET_JSON_OBJECT": exp.JSONExtractScalar.from_arg_list,
    -299            "LAST": _parse_ignore_nulls(exp.Last),
    -300            "LAST_VALUE": _parse_ignore_nulls(exp.LastValue),
    -301            "LOCATE": locate_to_strposition,
    -302            "MAP": parse_var_map,
    -303            "MONTH": lambda args: exp.Month(this=exp.TsOrDsToDate.from_arg_list(args)),
    -304            "PERCENTILE": exp.Quantile.from_arg_list,
    -305            "PERCENTILE_APPROX": exp.ApproxQuantile.from_arg_list,
    -306            "REGEXP_EXTRACT": lambda args: exp.RegexpExtract(
    -307                this=seq_get(args, 0), expression=seq_get(args, 1), group=seq_get(args, 2)
    -308            ),
    -309            "SIZE": exp.ArraySize.from_arg_list,
    -310            "SPLIT": exp.RegexpSplit.from_arg_list,
    -311            "STR_TO_MAP": lambda args: exp.StrToMap(
    -312                this=seq_get(args, 0),
    -313                pair_delim=seq_get(args, 1) or exp.Literal.string(","),
    -314                key_value_delim=seq_get(args, 2) or exp.Literal.string(":"),
    -315            ),
    -316            "TO_DATE": format_time_lambda(exp.TsOrDsToDate, "hive"),
    -317            "TO_JSON": exp.JSONFormat.from_arg_list,
    -318            "UNBASE64": exp.FromBase64.from_arg_list,
    -319            "UNIX_TIMESTAMP": format_time_lambda(exp.StrToUnix, "hive", True),
    -320            "YEAR": lambda args: exp.Year(this=exp.TsOrDsToDate.from_arg_list(args)),
    -321        }
    -322
    -323        NO_PAREN_FUNCTION_PARSERS = {
    -324            **parser.Parser.NO_PAREN_FUNCTION_PARSERS,
    -325            "TRANSFORM": lambda self: self._parse_transform(),
    -326        }
    -327
    -328        PROPERTY_PARSERS = {
    -329            **parser.Parser.PROPERTY_PARSERS,
    -330            "WITH SERDEPROPERTIES": lambda self: exp.SerdeProperties(
    -331                expressions=self._parse_wrapped_csv(self._parse_property)
    -332            ),
    -333        }
    -334
    -335        def _parse_transform(self) -> t.Optional[exp.Transform | exp.QueryTransform]:
    -336            if not self._match(TokenType.L_PAREN, advance=False):
    -337                self._retreat(self._index - 1)
    -338                return None
    -339
    -340            args = self._parse_wrapped_csv(self._parse_lambda)
    -341            row_format_before = self._parse_row_format(match_row=True)
    -342
    -343            record_writer = None
    -344            if self._match_text_seq("RECORDWRITER"):
    -345                record_writer = self._parse_string()
    -346
    -347            if not self._match(TokenType.USING):
    -348                return exp.Transform.from_arg_list(args)
    -349
    -350            command_script = self._parse_string()
    -351
    -352            self._match(TokenType.ALIAS)
    -353            schema = self._parse_schema()
    -354
    -355            row_format_after = self._parse_row_format(match_row=True)
    -356            record_reader = None
    -357            if self._match_text_seq("RECORDREADER"):
    -358                record_reader = self._parse_string()
    -359
    -360            return self.expression(
    -361                exp.QueryTransform,
    -362                expressions=args,
    -363                command_script=command_script,
    -364                schema=schema,
    -365                row_format_before=row_format_before,
    -366                record_writer=record_writer,
    -367                row_format_after=row_format_after,
    -368                record_reader=record_reader,
    -369            )
    -370
    -371        def _parse_types(
    -372            self, check_func: bool = False, schema: bool = False, allow_identifiers: bool = True
    -373        ) -> t.Optional[exp.Expression]:
    -374            """
    -375            Spark (and most likely Hive) treats casts to CHAR(length) and VARCHAR(length) as casts to
    -376            STRING in all contexts except for schema definitions. For example, this is in Spark v3.4.0:
    -377
    -378                spark-sql (default)> select cast(1234 as varchar(2));
    -379                23/06/06 15:51:18 WARN CharVarcharUtils: The Spark cast operator does not support
    -380                char/varchar type and simply treats them as string type. Please use string type
    -381                directly to avoid confusion. Otherwise, you can set spark.sql.legacy.charVarcharAsString
    -382                to true, so that Spark treat them as string type as same as Spark 3.0 and earlier
    -383
    -384                1234
    -385                Time taken: 4.265 seconds, Fetched 1 row(s)
    -386
    -387            This shows that Spark doesn't truncate the value into '12', which is inconsistent with
    -388            what other dialects (e.g. postgres) do, so we need to drop the length to transpile correctly.
    -389
    -390            Reference: https://spark.apache.org/docs/latest/sql-ref-datatypes.html
    -391            """
    -392            this = super()._parse_types(
    -393                check_func=check_func, schema=schema, allow_identifiers=allow_identifiers
    -394            )
    -395
    -396            if this and not schema:
    -397                return this.transform(
    -398                    lambda node: (
    -399                        node.replace(exp.DataType.build("text"))
    -400                        if isinstance(node, exp.DataType) and node.is_type("char", "varchar")
    -401                        else node
    -402                    ),
    -403                    copy=False,
    -404                )
    -405
    -406            return this
    -407
    -408        def _parse_partition_and_order(
    -409            self,
    -410        ) -> t.Tuple[t.List[exp.Expression], t.Optional[exp.Expression]]:
    -411            return (
    -412                (
    -413                    self._parse_csv(self._parse_conjunction)
    -414                    if self._match_set({TokenType.PARTITION_BY, TokenType.DISTRIBUTE_BY})
    -415                    else []
    -416                ),
    -417                super()._parse_order(skip_order_token=self._match(TokenType.SORT_BY)),
    -418            )
    +270        VALUES_FOLLOWED_BY_PAREN = False
    +271
    +272        FUNCTIONS = {
    +273            **parser.Parser.FUNCTIONS,
    +274            "BASE64": exp.ToBase64.from_arg_list,
    +275            "COLLECT_LIST": exp.ArrayAgg.from_arg_list,
    +276            "COLLECT_SET": exp.ArrayUniqueAgg.from_arg_list,
    +277            "DATE_ADD": lambda args: exp.TsOrDsAdd(
    +278                this=seq_get(args, 0), expression=seq_get(args, 1), unit=exp.Literal.string("DAY")
    +279            ),
    +280            "DATE_FORMAT": lambda args: format_time_lambda(exp.TimeToStr, "hive")(
    +281                [
    +282                    exp.TimeStrToTime(this=seq_get(args, 0)),
    +283                    seq_get(args, 1),
    +284                ]
    +285            ),
    +286            "DATE_SUB": lambda args: exp.TsOrDsAdd(
    +287                this=seq_get(args, 0),
    +288                expression=exp.Mul(this=seq_get(args, 1), expression=exp.Literal.number(-1)),
    +289                unit=exp.Literal.string("DAY"),
    +290            ),
    +291            "DATEDIFF": lambda args: exp.DateDiff(
    +292                this=exp.TsOrDsToDate(this=seq_get(args, 0)),
    +293                expression=exp.TsOrDsToDate(this=seq_get(args, 1)),
    +294            ),
    +295            "DAY": lambda args: exp.Day(this=exp.TsOrDsToDate(this=seq_get(args, 0))),
    +296            "FIRST": _parse_ignore_nulls(exp.First),
    +297            "FIRST_VALUE": _parse_ignore_nulls(exp.FirstValue),
    +298            "FROM_UNIXTIME": format_time_lambda(exp.UnixToStr, "hive", True),
    +299            "GET_JSON_OBJECT": exp.JSONExtractScalar.from_arg_list,
    +300            "LAST": _parse_ignore_nulls(exp.Last),
    +301            "LAST_VALUE": _parse_ignore_nulls(exp.LastValue),
    +302            "LOCATE": locate_to_strposition,
    +303            "MAP": parse_var_map,
    +304            "MONTH": lambda args: exp.Month(this=exp.TsOrDsToDate.from_arg_list(args)),
    +305            "PERCENTILE": exp.Quantile.from_arg_list,
    +306            "PERCENTILE_APPROX": exp.ApproxQuantile.from_arg_list,
    +307            "REGEXP_EXTRACT": lambda args: exp.RegexpExtract(
    +308                this=seq_get(args, 0), expression=seq_get(args, 1), group=seq_get(args, 2)
    +309            ),
    +310            "SIZE": exp.ArraySize.from_arg_list,
    +311            "SPLIT": exp.RegexpSplit.from_arg_list,
    +312            "STR_TO_MAP": lambda args: exp.StrToMap(
    +313                this=seq_get(args, 0),
    +314                pair_delim=seq_get(args, 1) or exp.Literal.string(","),
    +315                key_value_delim=seq_get(args, 2) or exp.Literal.string(":"),
    +316            ),
    +317            "TO_DATE": format_time_lambda(exp.TsOrDsToDate, "hive"),
    +318            "TO_JSON": exp.JSONFormat.from_arg_list,
    +319            "UNBASE64": exp.FromBase64.from_arg_list,
    +320            "UNIX_TIMESTAMP": format_time_lambda(exp.StrToUnix, "hive", True),
    +321            "YEAR": lambda args: exp.Year(this=exp.TsOrDsToDate.from_arg_list(args)),
    +322        }
    +323
    +324        NO_PAREN_FUNCTION_PARSERS = {
    +325            **parser.Parser.NO_PAREN_FUNCTION_PARSERS,
    +326            "TRANSFORM": lambda self: self._parse_transform(),
    +327        }
    +328
    +329        PROPERTY_PARSERS = {
    +330            **parser.Parser.PROPERTY_PARSERS,
    +331            "WITH SERDEPROPERTIES": lambda self: exp.SerdeProperties(
    +332                expressions=self._parse_wrapped_csv(self._parse_property)
    +333            ),
    +334        }
    +335
    +336        def _parse_transform(self) -> t.Optional[exp.Transform | exp.QueryTransform]:
    +337            if not self._match(TokenType.L_PAREN, advance=False):
    +338                self._retreat(self._index - 1)
    +339                return None
    +340
    +341            args = self._parse_wrapped_csv(self._parse_lambda)
    +342            row_format_before = self._parse_row_format(match_row=True)
    +343
    +344            record_writer = None
    +345            if self._match_text_seq("RECORDWRITER"):
    +346                record_writer = self._parse_string()
    +347
    +348            if not self._match(TokenType.USING):
    +349                return exp.Transform.from_arg_list(args)
    +350
    +351            command_script = self._parse_string()
    +352
    +353            self._match(TokenType.ALIAS)
    +354            schema = self._parse_schema()
    +355
    +356            row_format_after = self._parse_row_format(match_row=True)
    +357            record_reader = None
    +358            if self._match_text_seq("RECORDREADER"):
    +359                record_reader = self._parse_string()
    +360
    +361            return self.expression(
    +362                exp.QueryTransform,
    +363                expressions=args,
    +364                command_script=command_script,
    +365                schema=schema,
    +366                row_format_before=row_format_before,
    +367                record_writer=record_writer,
    +368                row_format_after=row_format_after,
    +369                record_reader=record_reader,
    +370            )
    +371
    +372        def _parse_types(
    +373            self, check_func: bool = False, schema: bool = False, allow_identifiers: bool = True
    +374        ) -> t.Optional[exp.Expression]:
    +375            """
    +376            Spark (and most likely Hive) treats casts to CHAR(length) and VARCHAR(length) as casts to
    +377            STRING in all contexts except for schema definitions. For example, this is in Spark v3.4.0:
    +378
    +379                spark-sql (default)> select cast(1234 as varchar(2));
    +380                23/06/06 15:51:18 WARN CharVarcharUtils: The Spark cast operator does not support
    +381                char/varchar type and simply treats them as string type. Please use string type
    +382                directly to avoid confusion. Otherwise, you can set spark.sql.legacy.charVarcharAsString
    +383                to true, so that Spark treat them as string type as same as Spark 3.0 and earlier
    +384
    +385                1234
    +386                Time taken: 4.265 seconds, Fetched 1 row(s)
    +387
    +388            This shows that Spark doesn't truncate the value into '12', which is inconsistent with
    +389            what other dialects (e.g. postgres) do, so we need to drop the length to transpile correctly.
    +390
    +391            Reference: https://spark.apache.org/docs/latest/sql-ref-datatypes.html
    +392            """
    +393            this = super()._parse_types(
    +394                check_func=check_func, schema=schema, allow_identifiers=allow_identifiers
    +395            )
    +396
    +397            if this and not schema:
    +398                return this.transform(
    +399                    lambda node: (
    +400                        node.replace(exp.DataType.build("text"))
    +401                        if isinstance(node, exp.DataType) and node.is_type("char", "varchar")
    +402                        else node
    +403                    ),
    +404                    copy=False,
    +405                )
    +406
    +407            return this
    +408
    +409        def _parse_partition_and_order(
    +410            self,
    +411        ) -> t.Tuple[t.List[exp.Expression], t.Optional[exp.Expression]]:
    +412            return (
    +413                (
    +414                    self._parse_csv(self._parse_conjunction)
    +415                    if self._match_set({TokenType.PARTITION_BY, TokenType.DISTRIBUTE_BY})
    +416                    else []
    +417                ),
    +418                super()._parse_order(skip_order_token=self._match(TokenType.SORT_BY)),
    +419            )
     
    @@ -2191,6 +2197,18 @@ Default: 3 +
    +
    +
    + VALUES_FOLLOWED_BY_PAREN = +False + + +
    + + + +
    @@ -2369,223 +2387,223 @@ Default: 3
    -
    420    class Generator(generator.Generator):
    -421        LIMIT_FETCH = "LIMIT"
    -422        TABLESAMPLE_WITH_METHOD = False
    -423        JOIN_HINTS = False
    -424        TABLE_HINTS = False
    -425        QUERY_HINTS = False
    -426        INDEX_ON = "ON TABLE"
    -427        EXTRACT_ALLOWS_QUOTES = False
    -428        NVL2_SUPPORTED = False
    -429        LAST_DAY_SUPPORTS_DATE_PART = False
    -430        JSON_PATH_SINGLE_QUOTE_ESCAPE = True
    -431
    -432        EXPRESSIONS_WITHOUT_NESTED_CTES = {
    -433            exp.Insert,
    -434            exp.Select,
    -435            exp.Subquery,
    -436            exp.Union,
    -437        }
    -438
    -439        SUPPORTED_JSON_PATH_PARTS = {
    -440            exp.JSONPathKey,
    -441            exp.JSONPathRoot,
    -442            exp.JSONPathSubscript,
    -443            exp.JSONPathWildcard,
    -444        }
    -445
    -446        TYPE_MAPPING = {
    -447            **generator.Generator.TYPE_MAPPING,
    -448            exp.DataType.Type.BIT: "BOOLEAN",
    -449            exp.DataType.Type.DATETIME: "TIMESTAMP",
    -450            exp.DataType.Type.TEXT: "STRING",
    -451            exp.DataType.Type.TIME: "TIMESTAMP",
    -452            exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP",
    -453            exp.DataType.Type.VARBINARY: "BINARY",
    -454        }
    -455
    -456        TRANSFORMS = {
    -457            **generator.Generator.TRANSFORMS,
    -458            exp.Group: transforms.preprocess([transforms.unalias_group]),
    -459            exp.Select: transforms.preprocess(
    -460                [
    -461                    transforms.eliminate_qualify,
    -462                    transforms.eliminate_distinct_on,
    -463                    transforms.unnest_to_explode,
    -464                ]
    -465            ),
    -466            exp.Property: _property_sql,
    -467            exp.AnyValue: rename_func("FIRST"),
    -468            exp.ApproxDistinct: approx_count_distinct_sql,
    -469            exp.ArgMax: arg_max_or_min_no_count("MAX_BY"),
    -470            exp.ArgMin: arg_max_or_min_no_count("MIN_BY"),
    -471            exp.ArrayConcat: rename_func("CONCAT"),
    -472            exp.ArrayJoin: lambda self, e: self.func("CONCAT_WS", e.expression, e.this),
    -473            exp.ArraySize: rename_func("SIZE"),
    -474            exp.ArraySort: _array_sort_sql,
    -475            exp.With: no_recursive_cte_sql,
    -476            exp.DateAdd: _add_date_sql,
    -477            exp.DateDiff: _date_diff_sql,
    -478            exp.DateStrToDate: datestrtodate_sql,
    -479            exp.DateSub: _add_date_sql,
    -480            exp.DateToDi: lambda self,
    -481            e: f"CAST(DATE_FORMAT({self.sql(e, 'this')}, {Hive.DATEINT_FORMAT}) AS INT)",
    -482            exp.DiToDate: lambda self,
    -483            e: f"TO_DATE(CAST({self.sql(e, 'this')} AS STRING), {Hive.DATEINT_FORMAT})",
    -484            exp.FileFormatProperty: lambda self,
    -485            e: f"STORED AS {self.sql(e, 'this') if isinstance(e.this, exp.InputOutputFormat) else e.name.upper()}",
    -486            exp.FromBase64: rename_func("UNBASE64"),
    -487            exp.If: if_sql(),
    -488            exp.ILike: no_ilike_sql,
    -489            exp.IsNan: rename_func("ISNAN"),
    -490            exp.JSONExtract: rename_func("GET_JSON_OBJECT"),
    -491            exp.JSONExtractScalar: rename_func("GET_JSON_OBJECT"),
    -492            exp.JSONFormat: _json_format_sql,
    -493            exp.Left: left_to_substring_sql,
    -494            exp.Map: var_map_sql,
    -495            exp.Max: max_or_greatest,
    -496            exp.MD5Digest: lambda self, e: self.func("UNHEX", self.func("MD5", e.this)),
    -497            exp.Min: min_or_least,
    -498            exp.MonthsBetween: lambda self, e: self.func("MONTHS_BETWEEN", e.this, e.expression),
    -499            exp.NotNullColumnConstraint: lambda self, e: (
    -500                "" if e.args.get("allow_null") else "NOT NULL"
    -501            ),
    -502            exp.VarMap: var_map_sql,
    -503            exp.Create: preprocess(
    -504                [
    -505                    remove_unique_constraints,
    -506                    ctas_with_tmp_tables_to_create_tmp_view,
    -507                    move_schema_columns_to_partitioned_by,
    -508                ]
    -509            ),
    -510            exp.Quantile: rename_func("PERCENTILE"),
    -511            exp.ApproxQuantile: rename_func("PERCENTILE_APPROX"),
    -512            exp.RegexpExtract: regexp_extract_sql,
    -513            exp.RegexpReplace: regexp_replace_sql,
    -514            exp.RegexpLike: lambda self, e: self.binary(e, "RLIKE"),
    -515            exp.RegexpSplit: rename_func("SPLIT"),
    -516            exp.Right: right_to_substring_sql,
    -517            exp.SafeDivide: no_safe_divide_sql,
    -518            exp.SchemaCommentProperty: lambda self, e: self.naked_property(e),
    -519            exp.ArrayUniqueAgg: rename_func("COLLECT_SET"),
    -520            exp.Split: lambda self,
    -521            e: f"SPLIT({self.sql(e, 'this')}, CONCAT('\\\\Q', {self.sql(e, 'expression')}))",
    -522            exp.StrPosition: strposition_to_locate_sql,
    -523            exp.StrToDate: _str_to_date_sql,
    -524            exp.StrToTime: _str_to_time_sql,
    -525            exp.StrToUnix: _str_to_unix_sql,
    -526            exp.StructExtract: struct_extract_sql,
    -527            exp.TimeStrToDate: rename_func("TO_DATE"),
    -528            exp.TimeStrToTime: timestrtotime_sql,
    -529            exp.TimeStrToUnix: rename_func("UNIX_TIMESTAMP"),
    -530            exp.TimeToStr: _time_to_str,
    -531            exp.TimeToUnix: rename_func("UNIX_TIMESTAMP"),
    -532            exp.ToBase64: rename_func("BASE64"),
    -533            exp.TsOrDiToDi: lambda self,
    -534            e: f"CAST(SUBSTR(REPLACE(CAST({self.sql(e, 'this')} AS STRING), '-', ''), 1, 8) AS INT)",
    -535            exp.TsOrDsAdd: _add_date_sql,
    -536            exp.TsOrDsDiff: _date_diff_sql,
    -537            exp.TsOrDsToDate: _to_date_sql,
    -538            exp.TryCast: no_trycast_sql,
    -539            exp.UnixToStr: lambda self, e: self.func(
    -540                "FROM_UNIXTIME", e.this, time_format("hive")(self, e)
    -541            ),
    -542            exp.UnixToTime: rename_func("FROM_UNIXTIME"),
    -543            exp.UnixToTimeStr: rename_func("FROM_UNIXTIME"),
    -544            exp.PartitionedByProperty: lambda self, e: f"PARTITIONED BY {self.sql(e, 'this')}",
    -545            exp.SerdeProperties: lambda self, e: self.properties(e, prefix="WITH SERDEPROPERTIES"),
    -546            exp.NumberToStr: rename_func("FORMAT_NUMBER"),
    -547            exp.National: lambda self, e: self.national_sql(e, prefix=""),
    -548            exp.ClusteredColumnConstraint: lambda self,
    -549            e: f"({self.expressions(e, 'this', indent=False)})",
    -550            exp.NonClusteredColumnConstraint: lambda self,
    -551            e: f"({self.expressions(e, 'this', indent=False)})",
    -552            exp.NotForReplicationColumnConstraint: lambda self, e: "",
    -553            exp.OnProperty: lambda self, e: "",
    -554            exp.PrimaryKeyColumnConstraint: lambda self, e: "PRIMARY KEY",
    -555        }
    -556
    -557        PROPERTIES_LOCATION = {
    -558            **generator.Generator.PROPERTIES_LOCATION,
    -559            exp.FileFormatProperty: exp.Properties.Location.POST_SCHEMA,
    -560            exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA,
    -561            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
    -562            exp.WithDataProperty: exp.Properties.Location.UNSUPPORTED,
    -563        }
    -564
    -565        def _jsonpathkey_sql(self, expression: exp.JSONPathKey) -> str:
    -566            if isinstance(expression.this, exp.JSONPathWildcard):
    -567                self.unsupported("Unsupported wildcard in JSONPathKey expression")
    -568                return ""
    -569
    -570            return super()._jsonpathkey_sql(expression)
    -571
    -572        def parameter_sql(self, expression: exp.Parameter) -> str:
    -573            this = self.sql(expression, "this")
    -574            expression_sql = self.sql(expression, "expression")
    -575
    -576            parent = expression.parent
    -577            this = f"{this}:{expression_sql}" if expression_sql else this
    -578
    -579            if isinstance(parent, exp.EQ) and isinstance(parent.parent, exp.SetItem):
    -580                # We need to produce SET key = value instead of SET ${key} = value
    -581                return this
    -582
    -583            return f"${{{this}}}"
    -584
    -585        def schema_sql(self, expression: exp.Schema) -> str:
    -586            for ordered in expression.find_all(exp.Ordered):
    -587                if ordered.args.get("desc") is False:
    -588                    ordered.set("desc", None)
    -589
    -590            return super().schema_sql(expression)
    -591
    -592        def constraint_sql(self, expression: exp.Constraint) -> str:
    -593            for prop in list(expression.find_all(exp.Properties)):
    -594                prop.pop()
    -595
    -596            this = self.sql(expression, "this")
    -597            expressions = self.expressions(expression, sep=" ", flat=True)
    -598            return f"CONSTRAINT {this} {expressions}"
    -599
    -600        def rowformatserdeproperty_sql(self, expression: exp.RowFormatSerdeProperty) -> str:
    -601            serde_props = self.sql(expression, "serde_properties")
    -602            serde_props = f" {serde_props}" if serde_props else ""
    -603            return f"ROW FORMAT SERDE {self.sql(expression, 'this')}{serde_props}"
    -604
    -605        def arrayagg_sql(self, expression: exp.ArrayAgg) -> str:
    -606            return self.func(
    -607                "COLLECT_LIST",
    -608                expression.this.this if isinstance(expression.this, exp.Order) else expression.this,
    -609            )
    -610
    -611        def with_properties(self, properties: exp.Properties) -> str:
    -612            return self.properties(properties, prefix=self.seg("TBLPROPERTIES"))
    -613
    -614        def datatype_sql(self, expression: exp.DataType) -> str:
    -615            if (
    -616                expression.this in (exp.DataType.Type.VARCHAR, exp.DataType.Type.NVARCHAR)
    -617                and not expression.expressions
    -618            ):
    -619                expression = exp.DataType.build("text")
    -620            elif expression.is_type(exp.DataType.Type.TEXT) and expression.expressions:
    -621                expression.set("this", exp.DataType.Type.VARCHAR)
    -622            elif expression.this in exp.DataType.TEMPORAL_TYPES:
    -623                expression = exp.DataType.build(expression.this)
    -624            elif expression.is_type("float"):
    -625                size_expression = expression.find(exp.DataTypeParam)
    -626                if size_expression:
    -627                    size = int(size_expression.name)
    -628                    expression = (
    -629                        exp.DataType.build("float") if size <= 32 else exp.DataType.build("double")
    -630                    )
    -631
    -632            return super().datatype_sql(expression)
    -633
    -634        def version_sql(self, expression: exp.Version) -> str:
    -635            sql = super().version_sql(expression)
    -636            return sql.replace("FOR ", "", 1)
    +            
    421    class Generator(generator.Generator):
    +422        LIMIT_FETCH = "LIMIT"
    +423        TABLESAMPLE_WITH_METHOD = False
    +424        JOIN_HINTS = False
    +425        TABLE_HINTS = False
    +426        QUERY_HINTS = False
    +427        INDEX_ON = "ON TABLE"
    +428        EXTRACT_ALLOWS_QUOTES = False
    +429        NVL2_SUPPORTED = False
    +430        LAST_DAY_SUPPORTS_DATE_PART = False
    +431        JSON_PATH_SINGLE_QUOTE_ESCAPE = True
    +432
    +433        EXPRESSIONS_WITHOUT_NESTED_CTES = {
    +434            exp.Insert,
    +435            exp.Select,
    +436            exp.Subquery,
    +437            exp.Union,
    +438        }
    +439
    +440        SUPPORTED_JSON_PATH_PARTS = {
    +441            exp.JSONPathKey,
    +442            exp.JSONPathRoot,
    +443            exp.JSONPathSubscript,
    +444            exp.JSONPathWildcard,
    +445        }
    +446
    +447        TYPE_MAPPING = {
    +448            **generator.Generator.TYPE_MAPPING,
    +449            exp.DataType.Type.BIT: "BOOLEAN",
    +450            exp.DataType.Type.DATETIME: "TIMESTAMP",
    +451            exp.DataType.Type.TEXT: "STRING",
    +452            exp.DataType.Type.TIME: "TIMESTAMP",
    +453            exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP",
    +454            exp.DataType.Type.VARBINARY: "BINARY",
    +455        }
    +456
    +457        TRANSFORMS = {
    +458            **generator.Generator.TRANSFORMS,
    +459            exp.Group: transforms.preprocess([transforms.unalias_group]),
    +460            exp.Select: transforms.preprocess(
    +461                [
    +462                    transforms.eliminate_qualify,
    +463                    transforms.eliminate_distinct_on,
    +464                    transforms.unnest_to_explode,
    +465                ]
    +466            ),
    +467            exp.Property: _property_sql,
    +468            exp.AnyValue: rename_func("FIRST"),
    +469            exp.ApproxDistinct: approx_count_distinct_sql,
    +470            exp.ArgMax: arg_max_or_min_no_count("MAX_BY"),
    +471            exp.ArgMin: arg_max_or_min_no_count("MIN_BY"),
    +472            exp.ArrayConcat: rename_func("CONCAT"),
    +473            exp.ArrayJoin: lambda self, e: self.func("CONCAT_WS", e.expression, e.this),
    +474            exp.ArraySize: rename_func("SIZE"),
    +475            exp.ArraySort: _array_sort_sql,
    +476            exp.With: no_recursive_cte_sql,
    +477            exp.DateAdd: _add_date_sql,
    +478            exp.DateDiff: _date_diff_sql,
    +479            exp.DateStrToDate: datestrtodate_sql,
    +480            exp.DateSub: _add_date_sql,
    +481            exp.DateToDi: lambda self,
    +482            e: f"CAST(DATE_FORMAT({self.sql(e, 'this')}, {Hive.DATEINT_FORMAT}) AS INT)",
    +483            exp.DiToDate: lambda self,
    +484            e: f"TO_DATE(CAST({self.sql(e, 'this')} AS STRING), {Hive.DATEINT_FORMAT})",
    +485            exp.FileFormatProperty: lambda self,
    +486            e: f"STORED AS {self.sql(e, 'this') if isinstance(e.this, exp.InputOutputFormat) else e.name.upper()}",
    +487            exp.FromBase64: rename_func("UNBASE64"),
    +488            exp.If: if_sql(),
    +489            exp.ILike: no_ilike_sql,
    +490            exp.IsNan: rename_func("ISNAN"),
    +491            exp.JSONExtract: rename_func("GET_JSON_OBJECT"),
    +492            exp.JSONExtractScalar: rename_func("GET_JSON_OBJECT"),
    +493            exp.JSONFormat: _json_format_sql,
    +494            exp.Left: left_to_substring_sql,
    +495            exp.Map: var_map_sql,
    +496            exp.Max: max_or_greatest,
    +497            exp.MD5Digest: lambda self, e: self.func("UNHEX", self.func("MD5", e.this)),
    +498            exp.Min: min_or_least,
    +499            exp.MonthsBetween: lambda self, e: self.func("MONTHS_BETWEEN", e.this, e.expression),
    +500            exp.NotNullColumnConstraint: lambda self, e: (
    +501                "" if e.args.get("allow_null") else "NOT NULL"
    +502            ),
    +503            exp.VarMap: var_map_sql,
    +504            exp.Create: preprocess(
    +505                [
    +506                    remove_unique_constraints,
    +507                    ctas_with_tmp_tables_to_create_tmp_view,
    +508                    move_schema_columns_to_partitioned_by,
    +509                ]
    +510            ),
    +511            exp.Quantile: rename_func("PERCENTILE"),
    +512            exp.ApproxQuantile: rename_func("PERCENTILE_APPROX"),
    +513            exp.RegexpExtract: regexp_extract_sql,
    +514            exp.RegexpReplace: regexp_replace_sql,
    +515            exp.RegexpLike: lambda self, e: self.binary(e, "RLIKE"),
    +516            exp.RegexpSplit: rename_func("SPLIT"),
    +517            exp.Right: right_to_substring_sql,
    +518            exp.SafeDivide: no_safe_divide_sql,
    +519            exp.SchemaCommentProperty: lambda self, e: self.naked_property(e),
    +520            exp.ArrayUniqueAgg: rename_func("COLLECT_SET"),
    +521            exp.Split: lambda self,
    +522            e: f"SPLIT({self.sql(e, 'this')}, CONCAT('\\\\Q', {self.sql(e, 'expression')}))",
    +523            exp.StrPosition: strposition_to_locate_sql,
    +524            exp.StrToDate: _str_to_date_sql,
    +525            exp.StrToTime: _str_to_time_sql,
    +526            exp.StrToUnix: _str_to_unix_sql,
    +527            exp.StructExtract: struct_extract_sql,
    +528            exp.TimeStrToDate: rename_func("TO_DATE"),
    +529            exp.TimeStrToTime: timestrtotime_sql,
    +530            exp.TimeStrToUnix: rename_func("UNIX_TIMESTAMP"),
    +531            exp.TimeToStr: _time_to_str,
    +532            exp.TimeToUnix: rename_func("UNIX_TIMESTAMP"),
    +533            exp.ToBase64: rename_func("BASE64"),
    +534            exp.TsOrDiToDi: lambda self,
    +535            e: f"CAST(SUBSTR(REPLACE(CAST({self.sql(e, 'this')} AS STRING), '-', ''), 1, 8) AS INT)",
    +536            exp.TsOrDsAdd: _add_date_sql,
    +537            exp.TsOrDsDiff: _date_diff_sql,
    +538            exp.TsOrDsToDate: _to_date_sql,
    +539            exp.TryCast: no_trycast_sql,
    +540            exp.UnixToStr: lambda self, e: self.func(
    +541                "FROM_UNIXTIME", e.this, time_format("hive")(self, e)
    +542            ),
    +543            exp.UnixToTime: rename_func("FROM_UNIXTIME"),
    +544            exp.UnixToTimeStr: rename_func("FROM_UNIXTIME"),
    +545            exp.PartitionedByProperty: lambda self, e: f"PARTITIONED BY {self.sql(e, 'this')}",
    +546            exp.SerdeProperties: lambda self, e: self.properties(e, prefix="WITH SERDEPROPERTIES"),
    +547            exp.NumberToStr: rename_func("FORMAT_NUMBER"),
    +548            exp.National: lambda self, e: self.national_sql(e, prefix=""),
    +549            exp.ClusteredColumnConstraint: lambda self,
    +550            e: f"({self.expressions(e, 'this', indent=False)})",
    +551            exp.NonClusteredColumnConstraint: lambda self,
    +552            e: f"({self.expressions(e, 'this', indent=False)})",
    +553            exp.NotForReplicationColumnConstraint: lambda self, e: "",
    +554            exp.OnProperty: lambda self, e: "",
    +555            exp.PrimaryKeyColumnConstraint: lambda self, e: "PRIMARY KEY",
    +556        }
    +557
    +558        PROPERTIES_LOCATION = {
    +559            **generator.Generator.PROPERTIES_LOCATION,
    +560            exp.FileFormatProperty: exp.Properties.Location.POST_SCHEMA,
    +561            exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA,
    +562            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
    +563            exp.WithDataProperty: exp.Properties.Location.UNSUPPORTED,
    +564        }
    +565
    +566        def _jsonpathkey_sql(self, expression: exp.JSONPathKey) -> str:
    +567            if isinstance(expression.this, exp.JSONPathWildcard):
    +568                self.unsupported("Unsupported wildcard in JSONPathKey expression")
    +569                return ""
    +570
    +571            return super()._jsonpathkey_sql(expression)
    +572
    +573        def parameter_sql(self, expression: exp.Parameter) -> str:
    +574            this = self.sql(expression, "this")
    +575            expression_sql = self.sql(expression, "expression")
    +576
    +577            parent = expression.parent
    +578            this = f"{this}:{expression_sql}" if expression_sql else this
    +579
    +580            if isinstance(parent, exp.EQ) and isinstance(parent.parent, exp.SetItem):
    +581                # We need to produce SET key = value instead of SET ${key} = value
    +582                return this
    +583
    +584            return f"${{{this}}}"
    +585
    +586        def schema_sql(self, expression: exp.Schema) -> str:
    +587            for ordered in expression.find_all(exp.Ordered):
    +588                if ordered.args.get("desc") is False:
    +589                    ordered.set("desc", None)
    +590
    +591            return super().schema_sql(expression)
    +592
    +593        def constraint_sql(self, expression: exp.Constraint) -> str:
    +594            for prop in list(expression.find_all(exp.Properties)):
    +595                prop.pop()
    +596
    +597            this = self.sql(expression, "this")
    +598            expressions = self.expressions(expression, sep=" ", flat=True)
    +599            return f"CONSTRAINT {this} {expressions}"
    +600
    +601        def rowformatserdeproperty_sql(self, expression: exp.RowFormatSerdeProperty) -> str:
    +602            serde_props = self.sql(expression, "serde_properties")
    +603            serde_props = f" {serde_props}" if serde_props else ""
    +604            return f"ROW FORMAT SERDE {self.sql(expression, 'this')}{serde_props}"
    +605
    +606        def arrayagg_sql(self, expression: exp.ArrayAgg) -> str:
    +607            return self.func(
    +608                "COLLECT_LIST",
    +609                expression.this.this if isinstance(expression.this, exp.Order) else expression.this,
    +610            )
    +611
    +612        def with_properties(self, properties: exp.Properties) -> str:
    +613            return self.properties(properties, prefix=self.seg("TBLPROPERTIES"))
    +614
    +615        def datatype_sql(self, expression: exp.DataType) -> str:
    +616            if (
    +617                expression.this in (exp.DataType.Type.VARCHAR, exp.DataType.Type.NVARCHAR)
    +618                and not expression.expressions
    +619            ):
    +620                expression = exp.DataType.build("text")
    +621            elif expression.is_type(exp.DataType.Type.TEXT) and expression.expressions:
    +622                expression.set("this", exp.DataType.Type.VARCHAR)
    +623            elif expression.this in exp.DataType.TEMPORAL_TYPES:
    +624                expression = exp.DataType.build(expression.this)
    +625            elif expression.is_type("float"):
    +626                size_expression = expression.find(exp.DataTypeParam)
    +627                if size_expression:
    +628                    size = int(size_expression.name)
    +629                    expression = (
    +630                        exp.DataType.build("float") if size <= 32 else exp.DataType.build("double")
    +631                    )
    +632
    +633            return super().datatype_sql(expression)
    +634
    +635        def version_sql(self, expression: exp.Version) -> str:
    +636            sql = super().version_sql(expression)
    +637            return sql.replace("FOR ", "", 1)
     
    @@ -2752,7 +2770,7 @@ Default: True @@ -2765,7 +2783,7 @@ Default: True @@ -2824,18 +2842,18 @@ Default: True
    -
    572        def parameter_sql(self, expression: exp.Parameter) -> str:
    -573            this = self.sql(expression, "this")
    -574            expression_sql = self.sql(expression, "expression")
    -575
    -576            parent = expression.parent
    -577            this = f"{this}:{expression_sql}" if expression_sql else this
    -578
    -579            if isinstance(parent, exp.EQ) and isinstance(parent.parent, exp.SetItem):
    -580                # We need to produce SET key = value instead of SET ${key} = value
    -581                return this
    -582
    -583            return f"${{{this}}}"
    +            
    573        def parameter_sql(self, expression: exp.Parameter) -> str:
    +574            this = self.sql(expression, "this")
    +575            expression_sql = self.sql(expression, "expression")
    +576
    +577            parent = expression.parent
    +578            this = f"{this}:{expression_sql}" if expression_sql else this
    +579
    +580            if isinstance(parent, exp.EQ) and isinstance(parent.parent, exp.SetItem):
    +581                # We need to produce SET key = value instead of SET ${key} = value
    +582                return this
    +583
    +584            return f"${{{this}}}"
     
    @@ -2853,12 +2871,12 @@ Default: True
    -
    585        def schema_sql(self, expression: exp.Schema) -> str:
    -586            for ordered in expression.find_all(exp.Ordered):
    -587                if ordered.args.get("desc") is False:
    -588                    ordered.set("desc", None)
    -589
    -590            return super().schema_sql(expression)
    +            
    586        def schema_sql(self, expression: exp.Schema) -> str:
    +587            for ordered in expression.find_all(exp.Ordered):
    +588                if ordered.args.get("desc") is False:
    +589                    ordered.set("desc", None)
    +590
    +591            return super().schema_sql(expression)
     
    @@ -2876,13 +2894,13 @@ Default: True
    -
    592        def constraint_sql(self, expression: exp.Constraint) -> str:
    -593            for prop in list(expression.find_all(exp.Properties)):
    -594                prop.pop()
    -595
    -596            this = self.sql(expression, "this")
    -597            expressions = self.expressions(expression, sep=" ", flat=True)
    -598            return f"CONSTRAINT {this} {expressions}"
    +            
    593        def constraint_sql(self, expression: exp.Constraint) -> str:
    +594            for prop in list(expression.find_all(exp.Properties)):
    +595                prop.pop()
    +596
    +597            this = self.sql(expression, "this")
    +598            expressions = self.expressions(expression, sep=" ", flat=True)
    +599            return f"CONSTRAINT {this} {expressions}"
     
    @@ -2900,10 +2918,10 @@ Default: True
    -
    600        def rowformatserdeproperty_sql(self, expression: exp.RowFormatSerdeProperty) -> str:
    -601            serde_props = self.sql(expression, "serde_properties")
    -602            serde_props = f" {serde_props}" if serde_props else ""
    -603            return f"ROW FORMAT SERDE {self.sql(expression, 'this')}{serde_props}"
    +            
    601        def rowformatserdeproperty_sql(self, expression: exp.RowFormatSerdeProperty) -> str:
    +602            serde_props = self.sql(expression, "serde_properties")
    +603            serde_props = f" {serde_props}" if serde_props else ""
    +604            return f"ROW FORMAT SERDE {self.sql(expression, 'this')}{serde_props}"
     
    @@ -2921,11 +2939,11 @@ Default: True
    -
    605        def arrayagg_sql(self, expression: exp.ArrayAgg) -> str:
    -606            return self.func(
    -607                "COLLECT_LIST",
    -608                expression.this.this if isinstance(expression.this, exp.Order) else expression.this,
    -609            )
    +            
    606        def arrayagg_sql(self, expression: exp.ArrayAgg) -> str:
    +607            return self.func(
    +608                "COLLECT_LIST",
    +609                expression.this.this if isinstance(expression.this, exp.Order) else expression.this,
    +610            )
     
    @@ -2943,8 +2961,8 @@ Default: True
    -
    611        def with_properties(self, properties: exp.Properties) -> str:
    -612            return self.properties(properties, prefix=self.seg("TBLPROPERTIES"))
    +            
    612        def with_properties(self, properties: exp.Properties) -> str:
    +613            return self.properties(properties, prefix=self.seg("TBLPROPERTIES"))
     
    @@ -2962,25 +2980,25 @@ Default: True
    -
    614        def datatype_sql(self, expression: exp.DataType) -> str:
    -615            if (
    -616                expression.this in (exp.DataType.Type.VARCHAR, exp.DataType.Type.NVARCHAR)
    -617                and not expression.expressions
    -618            ):
    -619                expression = exp.DataType.build("text")
    -620            elif expression.is_type(exp.DataType.Type.TEXT) and expression.expressions:
    -621                expression.set("this", exp.DataType.Type.VARCHAR)
    -622            elif expression.this in exp.DataType.TEMPORAL_TYPES:
    -623                expression = exp.DataType.build(expression.this)
    -624            elif expression.is_type("float"):
    -625                size_expression = expression.find(exp.DataTypeParam)
    -626                if size_expression:
    -627                    size = int(size_expression.name)
    -628                    expression = (
    -629                        exp.DataType.build("float") if size <= 32 else exp.DataType.build("double")
    -630                    )
    -631
    -632            return super().datatype_sql(expression)
    +            
    615        def datatype_sql(self, expression: exp.DataType) -> str:
    +616            if (
    +617                expression.this in (exp.DataType.Type.VARCHAR, exp.DataType.Type.NVARCHAR)
    +618                and not expression.expressions
    +619            ):
    +620                expression = exp.DataType.build("text")
    +621            elif expression.is_type(exp.DataType.Type.TEXT) and expression.expressions:
    +622                expression.set("this", exp.DataType.Type.VARCHAR)
    +623            elif expression.this in exp.DataType.TEMPORAL_TYPES:
    +624                expression = exp.DataType.build(expression.this)
    +625            elif expression.is_type("float"):
    +626                size_expression = expression.find(exp.DataTypeParam)
    +627                if size_expression:
    +628                    size = int(size_expression.name)
    +629                    expression = (
    +630                        exp.DataType.build("float") if size <= 32 else exp.DataType.build("double")
    +631                    )
    +632
    +633            return super().datatype_sql(expression)
     
    @@ -2998,9 +3016,9 @@ Default: True
    -
    634        def version_sql(self, expression: exp.Version) -> str:
    -635            sql = super().version_sql(expression)
    -636            return sql.replace("FOR ", "", 1)
    +            
    635        def version_sql(self, expression: exp.Version) -> str:
    +636            sql = super().version_sql(expression)
    +637            return sql.replace("FOR ", "", 1)
     
    diff --git a/docs/sqlglot/dialects/mysql.html b/docs/sqlglot/dialects/mysql.html index 63b58d4..1a62f09 100644 --- a/docs/sqlglot/dialects/mysql.html +++ b/docs/sqlglot/dialects/mysql.html @@ -141,6 +141,9 @@
  • STRING_ALIASES
  • +
  • + VALUES_FOLLOWED_BY_PAREN +
  • SHOW_TRIE
  • @@ -776,400 +779,401 @@
    445 446 LOG_DEFAULTS_TO_LN = True 447 STRING_ALIASES = True -448 -449 def _parse_primary_key_part(self) -> t.Optional[exp.Expression]: -450 this = self._parse_id_var() -451 if not self._match(TokenType.L_PAREN): -452 return this -453 -454 expression = self._parse_number() -455 self._match_r_paren() -456 return self.expression(exp.ColumnPrefix, this=this, expression=expression) -457 -458 def _parse_index_constraint( -459 self, kind: t.Optional[str] = None -460 ) -> exp.IndexColumnConstraint: -461 if kind: -462 self._match_texts(("INDEX", "KEY")) -463 -464 this = self._parse_id_var(any_token=False) -465 index_type = self._match(TokenType.USING) and self._advance_any() and self._prev.text -466 schema = self._parse_schema() -467 -468 options = [] -469 while True: -470 if self._match_text_seq("KEY_BLOCK_SIZE"): -471 self._match(TokenType.EQ) -472 opt = exp.IndexConstraintOption(key_block_size=self._parse_number()) -473 elif self._match(TokenType.USING): -474 opt = exp.IndexConstraintOption(using=self._advance_any() and self._prev.text) -475 elif self._match_text_seq("WITH", "PARSER"): -476 opt = exp.IndexConstraintOption(parser=self._parse_var(any_token=True)) -477 elif self._match(TokenType.COMMENT): -478 opt = exp.IndexConstraintOption(comment=self._parse_string()) -479 elif self._match_text_seq("VISIBLE"): -480 opt = exp.IndexConstraintOption(visible=True) -481 elif self._match_text_seq("INVISIBLE"): -482 opt = exp.IndexConstraintOption(visible=False) -483 elif self._match_text_seq("ENGINE_ATTRIBUTE"): -484 self._match(TokenType.EQ) -485 opt = exp.IndexConstraintOption(engine_attr=self._parse_string()) -486 elif self._match_text_seq("ENGINE_ATTRIBUTE"): -487 self._match(TokenType.EQ) -488 opt = exp.IndexConstraintOption(engine_attr=self._parse_string()) -489 elif self._match_text_seq("SECONDARY_ENGINE_ATTRIBUTE"): -490 self._match(TokenType.EQ) -491 opt = exp.IndexConstraintOption(secondary_engine_attr=self._parse_string()) -492 else: -493 opt = None -494 -495 if not opt: -496 break -497 -498 options.append(opt) -499 -500 return self.expression( -501 exp.IndexColumnConstraint, -502 this=this, -503 schema=schema, -504 kind=kind, -505 index_type=index_type, -506 options=options, -507 ) -508 -509 def _parse_show_mysql( -510 self, -511 this: str, -512 target: bool | str = False, -513 full: t.Optional[bool] = None, -514 global_: t.Optional[bool] = None, -515 ) -> exp.Show: -516 if target: -517 if isinstance(target, str): -518 self._match_text_seq(target) -519 target_id = self._parse_id_var() -520 else: -521 target_id = None -522 -523 log = self._parse_string() if self._match_text_seq("IN") else None -524 -525 if this in ("BINLOG EVENTS", "RELAYLOG EVENTS"): -526 position = self._parse_number() if self._match_text_seq("FROM") else None -527 db = None -528 else: -529 position = None -530 db = None -531 -532 if self._match(TokenType.FROM): -533 db = self._parse_id_var() -534 elif self._match(TokenType.DOT): -535 db = target_id -536 target_id = self._parse_id_var() -537 -538 channel = self._parse_id_var() if self._match_text_seq("FOR", "CHANNEL") else None -539 -540 like = self._parse_string() if self._match_text_seq("LIKE") else None -541 where = self._parse_where() -542 -543 if this == "PROFILE": -544 types = self._parse_csv(lambda: self._parse_var_from_options(self.PROFILE_TYPES)) -545 query = self._parse_number() if self._match_text_seq("FOR", "QUERY") else None -546 offset = self._parse_number() if self._match_text_seq("OFFSET") else None -547 limit = self._parse_number() if self._match_text_seq("LIMIT") else None -548 else: -549 types, query = None, None -550 offset, limit = self._parse_oldstyle_limit() -551 -552 mutex = True if self._match_text_seq("MUTEX") else None -553 mutex = False if self._match_text_seq("STATUS") else mutex -554 -555 return self.expression( -556 exp.Show, -557 this=this, -558 target=target_id, -559 full=full, -560 log=log, -561 position=position, -562 db=db, -563 channel=channel, -564 like=like, -565 where=where, -566 types=types, -567 query=query, -568 offset=offset, -569 limit=limit, -570 mutex=mutex, -571 **{"global": global_}, # type: ignore -572 ) -573 -574 def _parse_oldstyle_limit( -575 self, -576 ) -> t.Tuple[t.Optional[exp.Expression], t.Optional[exp.Expression]]: -577 limit = None -578 offset = None -579 if self._match_text_seq("LIMIT"): -580 parts = self._parse_csv(self._parse_number) -581 if len(parts) == 1: -582 limit = parts[0] -583 elif len(parts) == 2: -584 limit = parts[1] -585 offset = parts[0] -586 -587 return offset, limit -588 -589 def _parse_set_item_charset(self, kind: str) -> exp.Expression: -590 this = self._parse_string() or self._parse_id_var() -591 return self.expression(exp.SetItem, this=this, kind=kind) -592 -593 def _parse_set_item_names(self) -> exp.Expression: -594 charset = self._parse_string() or self._parse_id_var() -595 if self._match_text_seq("COLLATE"): -596 collate = self._parse_string() or self._parse_id_var() -597 else: -598 collate = None -599 -600 return self.expression(exp.SetItem, this=charset, collate=collate, kind="NAMES") -601 -602 def _parse_type(self, parse_interval: bool = True) -> t.Optional[exp.Expression]: -603 # mysql binary is special and can work anywhere, even in order by operations -604 # it operates like a no paren func -605 if self._match(TokenType.BINARY, advance=False): -606 data_type = self._parse_types(check_func=True, allow_identifiers=False) -607 -608 if isinstance(data_type, exp.DataType): -609 return self.expression(exp.Cast, this=self._parse_column(), to=data_type) -610 -611 return super()._parse_type(parse_interval=parse_interval) -612 -613 def _parse_chr(self) -> t.Optional[exp.Expression]: -614 expressions = self._parse_csv(self._parse_conjunction) -615 kwargs: t.Dict[str, t.Any] = {"this": seq_get(expressions, 0)} -616 -617 if len(expressions) > 1: -618 kwargs["expressions"] = expressions[1:] -619 -620 if self._match(TokenType.USING): -621 kwargs["charset"] = self._parse_var() -622 -623 return self.expression(exp.Chr, **kwargs) -624 -625 class Generator(generator.Generator): -626 LOCKING_READS_SUPPORTED = True -627 NULL_ORDERING_SUPPORTED = None -628 JOIN_HINTS = False -629 TABLE_HINTS = True -630 DUPLICATE_KEY_UPDATE_WITH_SET = False -631 QUERY_HINT_SEP = " " -632 VALUES_AS_TABLE = False -633 NVL2_SUPPORTED = False -634 LAST_DAY_SUPPORTS_DATE_PART = False -635 JSON_TYPE_REQUIRED_FOR_EXTRACTION = True -636 JSON_PATH_BRACKETED_KEY_SUPPORTED = False -637 JSON_KEY_VALUE_PAIR_SEP = "," -638 -639 TRANSFORMS = { -640 **generator.Generator.TRANSFORMS, -641 exp.CurrentDate: no_paren_current_date_sql, -642 exp.DateDiff: _remove_ts_or_ds_to_date( -643 lambda self, e: self.func("DATEDIFF", e.this, e.expression), ("this", "expression") -644 ), -645 exp.DateAdd: _remove_ts_or_ds_to_date(_date_add_sql("ADD")), -646 exp.DateStrToDate: datestrtodate_sql, -647 exp.DateSub: _remove_ts_or_ds_to_date(_date_add_sql("SUB")), -648 exp.DateTrunc: _date_trunc_sql, -649 exp.Day: _remove_ts_or_ds_to_date(), -650 exp.DayOfMonth: _remove_ts_or_ds_to_date(rename_func("DAYOFMONTH")), -651 exp.DayOfWeek: _remove_ts_or_ds_to_date(rename_func("DAYOFWEEK")), -652 exp.DayOfYear: _remove_ts_or_ds_to_date(rename_func("DAYOFYEAR")), -653 exp.GroupConcat: lambda self, -654 e: f"""GROUP_CONCAT({self.sql(e, "this")} SEPARATOR {self.sql(e, "separator") or "','"})""", -655 exp.ILike: no_ilike_sql, -656 exp.JSONExtractScalar: arrow_json_extract_sql, -657 exp.Max: max_or_greatest, -658 exp.Min: min_or_least, -659 exp.Month: _remove_ts_or_ds_to_date(), -660 exp.NullSafeEQ: lambda self, e: self.binary(e, "<=>"), -661 exp.NullSafeNEQ: lambda self, e: f"NOT {self.binary(e, '<=>')}", -662 exp.ParseJSON: lambda self, e: self.sql(e, "this"), -663 exp.Pivot: no_pivot_sql, -664 exp.Select: transforms.preprocess( -665 [ -666 transforms.eliminate_distinct_on, -667 transforms.eliminate_semi_and_anti_joins, -668 transforms.eliminate_qualify, -669 transforms.eliminate_full_outer_join, -670 ] -671 ), -672 exp.StrPosition: strposition_to_locate_sql, -673 exp.StrToDate: _str_to_date_sql, -674 exp.StrToTime: _str_to_date_sql, -675 exp.Stuff: rename_func("INSERT"), -676 exp.TableSample: no_tablesample_sql, -677 exp.TimeFromParts: rename_func("MAKETIME"), -678 exp.TimestampAdd: date_add_interval_sql("DATE", "ADD"), -679 exp.TimestampDiff: lambda self, e: self.func( -680 "TIMESTAMPDIFF", e.text("unit"), e.expression, e.this -681 ), -682 exp.TimestampSub: date_add_interval_sql("DATE", "SUB"), -683 exp.TimeStrToUnix: rename_func("UNIX_TIMESTAMP"), -684 exp.TimeStrToTime: lambda self, e: self.sql(exp.cast(e.this, "datetime", copy=True)), -685 exp.TimeToStr: _remove_ts_or_ds_to_date( -686 lambda self, e: self.func("DATE_FORMAT", e.this, self.format_time(e)) -687 ), -688 exp.Trim: _trim_sql, -689 exp.TryCast: no_trycast_sql, -690 exp.TsOrDsAdd: _date_add_sql("ADD"), -691 exp.TsOrDsDiff: lambda self, e: self.func("DATEDIFF", e.this, e.expression), -692 exp.TsOrDsToDate: _ts_or_ds_to_date_sql, -693 exp.Week: _remove_ts_or_ds_to_date(), -694 exp.WeekOfYear: _remove_ts_or_ds_to_date(rename_func("WEEKOFYEAR")), -695 exp.Year: _remove_ts_or_ds_to_date(), -696 } -697 -698 UNSIGNED_TYPE_MAPPING = { -699 exp.DataType.Type.UBIGINT: "BIGINT", -700 exp.DataType.Type.UINT: "INT", -701 exp.DataType.Type.UMEDIUMINT: "MEDIUMINT", -702 exp.DataType.Type.USMALLINT: "SMALLINT", -703 exp.DataType.Type.UTINYINT: "TINYINT", -704 exp.DataType.Type.UDECIMAL: "DECIMAL", -705 } -706 -707 TIMESTAMP_TYPE_MAPPING = { -708 exp.DataType.Type.TIMESTAMP: "DATETIME", -709 exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP", -710 exp.DataType.Type.TIMESTAMPLTZ: "TIMESTAMP", -711 } -712 -713 TYPE_MAPPING = { -714 **generator.Generator.TYPE_MAPPING, -715 **UNSIGNED_TYPE_MAPPING, -716 **TIMESTAMP_TYPE_MAPPING, -717 } -718 -719 TYPE_MAPPING.pop(exp.DataType.Type.MEDIUMTEXT) -720 TYPE_MAPPING.pop(exp.DataType.Type.LONGTEXT) -721 TYPE_MAPPING.pop(exp.DataType.Type.TINYTEXT) -722 TYPE_MAPPING.pop(exp.DataType.Type.MEDIUMBLOB) -723 TYPE_MAPPING.pop(exp.DataType.Type.LONGBLOB) -724 TYPE_MAPPING.pop(exp.DataType.Type.TINYBLOB) -725 -726 PROPERTIES_LOCATION = { -727 **generator.Generator.PROPERTIES_LOCATION, -728 exp.TransientProperty: exp.Properties.Location.UNSUPPORTED, -729 exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED, -730 } -731 -732 LIMIT_FETCH = "LIMIT" -733 -734 LIMIT_ONLY_LITERALS = True -735 -736 # MySQL doesn't support many datatypes in cast. -737 # https://dev.mysql.com/doc/refman/8.0/en/cast-functions.html#function_cast -738 CAST_MAPPING = { -739 exp.DataType.Type.BIGINT: "SIGNED", -740 exp.DataType.Type.BOOLEAN: "SIGNED", -741 exp.DataType.Type.INT: "SIGNED", -742 exp.DataType.Type.TEXT: "CHAR", -743 exp.DataType.Type.UBIGINT: "UNSIGNED", -744 exp.DataType.Type.VARCHAR: "CHAR", -745 } -746 -747 TIMESTAMP_FUNC_TYPES = { -748 exp.DataType.Type.TIMESTAMPTZ, -749 exp.DataType.Type.TIMESTAMPLTZ, -750 } -751 -752 def datatype_sql(self, expression: exp.DataType) -> str: -753 # https://dev.mysql.com/doc/refman/8.0/en/numeric-type-syntax.html -754 result = super().datatype_sql(expression) -755 if expression.this in self.UNSIGNED_TYPE_MAPPING: -756 result = f"{result} UNSIGNED" -757 return result -758 -759 def xor_sql(self, expression: exp.Xor) -> str: -760 if expression.expressions: -761 return self.expressions(expression, sep=" XOR ") -762 return super().xor_sql(expression) -763 -764 def jsonarraycontains_sql(self, expression: exp.JSONArrayContains) -> str: -765 return f"{self.sql(expression, 'this')} MEMBER OF({self.sql(expression, 'expression')})" -766 -767 def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str: -768 if expression.to.this in self.TIMESTAMP_FUNC_TYPES: -769 return self.func("TIMESTAMP", expression.this) -770 -771 to = self.CAST_MAPPING.get(expression.to.this) -772 -773 if to: -774 expression.to.set("this", to) -775 return super().cast_sql(expression) -776 -777 def show_sql(self, expression: exp.Show) -> str: -778 this = f" {expression.name}" -779 full = " FULL" if expression.args.get("full") else "" -780 global_ = " GLOBAL" if expression.args.get("global") else "" -781 -782 target = self.sql(expression, "target") -783 target = f" {target}" if target else "" -784 if expression.name in ("COLUMNS", "INDEX"): -785 target = f" FROM{target}" -786 elif expression.name == "GRANTS": -787 target = f" FOR{target}" -788 -789 db = self._prefixed_sql("FROM", expression, "db") -790 -791 like = self._prefixed_sql("LIKE", expression, "like") -792 where = self.sql(expression, "where") -793 -794 types = self.expressions(expression, key="types") -795 types = f" {types}" if types else types -796 query = self._prefixed_sql("FOR QUERY", expression, "query") -797 -798 if expression.name == "PROFILE": -799 offset = self._prefixed_sql("OFFSET", expression, "offset") -800 limit = self._prefixed_sql("LIMIT", expression, "limit") -801 else: -802 offset = "" -803 limit = self._oldstyle_limit_sql(expression) -804 -805 log = self._prefixed_sql("IN", expression, "log") -806 position = self._prefixed_sql("FROM", expression, "position") -807 -808 channel = self._prefixed_sql("FOR CHANNEL", expression, "channel") -809 -810 if expression.name == "ENGINE": -811 mutex_or_status = " MUTEX" if expression.args.get("mutex") else " STATUS" -812 else: -813 mutex_or_status = "" -814 -815 return f"SHOW{full}{global_}{this}{target}{types}{db}{query}{log}{position}{channel}{mutex_or_status}{like}{where}{offset}{limit}" -816 -817 def altercolumn_sql(self, expression: exp.AlterColumn) -> str: -818 dtype = self.sql(expression, "dtype") -819 if not dtype: -820 return super().altercolumn_sql(expression) -821 -822 this = self.sql(expression, "this") -823 return f"MODIFY COLUMN {this} {dtype}" -824 -825 def _prefixed_sql(self, prefix: str, expression: exp.Expression, arg: str) -> str: -826 sql = self.sql(expression, arg) -827 return f" {prefix} {sql}" if sql else "" -828 -829 def _oldstyle_limit_sql(self, expression: exp.Show) -> str: -830 limit = self.sql(expression, "limit") -831 offset = self.sql(expression, "offset") -832 if limit: -833 limit_offset = f"{offset}, {limit}" if offset else limit -834 return f" LIMIT {limit_offset}" -835 return "" -836 -837 def chr_sql(self, expression: exp.Chr) -> str: -838 this = self.expressions(sqls=[expression.this] + expression.expressions) -839 charset = expression.args.get("charset") -840 using = f" USING {self.sql(charset)}" if charset else "" -841 return f"CHAR({this}{using})" +448 VALUES_FOLLOWED_BY_PAREN = False +449 +450 def _parse_primary_key_part(self) -> t.Optional[exp.Expression]: +451 this = self._parse_id_var() +452 if not self._match(TokenType.L_PAREN): +453 return this +454 +455 expression = self._parse_number() +456 self._match_r_paren() +457 return self.expression(exp.ColumnPrefix, this=this, expression=expression) +458 +459 def _parse_index_constraint( +460 self, kind: t.Optional[str] = None +461 ) -> exp.IndexColumnConstraint: +462 if kind: +463 self._match_texts(("INDEX", "KEY")) +464 +465 this = self._parse_id_var(any_token=False) +466 index_type = self._match(TokenType.USING) and self._advance_any() and self._prev.text +467 schema = self._parse_schema() +468 +469 options = [] +470 while True: +471 if self._match_text_seq("KEY_BLOCK_SIZE"): +472 self._match(TokenType.EQ) +473 opt = exp.IndexConstraintOption(key_block_size=self._parse_number()) +474 elif self._match(TokenType.USING): +475 opt = exp.IndexConstraintOption(using=self._advance_any() and self._prev.text) +476 elif self._match_text_seq("WITH", "PARSER"): +477 opt = exp.IndexConstraintOption(parser=self._parse_var(any_token=True)) +478 elif self._match(TokenType.COMMENT): +479 opt = exp.IndexConstraintOption(comment=self._parse_string()) +480 elif self._match_text_seq("VISIBLE"): +481 opt = exp.IndexConstraintOption(visible=True) +482 elif self._match_text_seq("INVISIBLE"): +483 opt = exp.IndexConstraintOption(visible=False) +484 elif self._match_text_seq("ENGINE_ATTRIBUTE"): +485 self._match(TokenType.EQ) +486 opt = exp.IndexConstraintOption(engine_attr=self._parse_string()) +487 elif self._match_text_seq("ENGINE_ATTRIBUTE"): +488 self._match(TokenType.EQ) +489 opt = exp.IndexConstraintOption(engine_attr=self._parse_string()) +490 elif self._match_text_seq("SECONDARY_ENGINE_ATTRIBUTE"): +491 self._match(TokenType.EQ) +492 opt = exp.IndexConstraintOption(secondary_engine_attr=self._parse_string()) +493 else: +494 opt = None +495 +496 if not opt: +497 break +498 +499 options.append(opt) +500 +501 return self.expression( +502 exp.IndexColumnConstraint, +503 this=this, +504 schema=schema, +505 kind=kind, +506 index_type=index_type, +507 options=options, +508 ) +509 +510 def _parse_show_mysql( +511 self, +512 this: str, +513 target: bool | str = False, +514 full: t.Optional[bool] = None, +515 global_: t.Optional[bool] = None, +516 ) -> exp.Show: +517 if target: +518 if isinstance(target, str): +519 self._match_text_seq(target) +520 target_id = self._parse_id_var() +521 else: +522 target_id = None +523 +524 log = self._parse_string() if self._match_text_seq("IN") else None +525 +526 if this in ("BINLOG EVENTS", "RELAYLOG EVENTS"): +527 position = self._parse_number() if self._match_text_seq("FROM") else None +528 db = None +529 else: +530 position = None +531 db = None +532 +533 if self._match(TokenType.FROM): +534 db = self._parse_id_var() +535 elif self._match(TokenType.DOT): +536 db = target_id +537 target_id = self._parse_id_var() +538 +539 channel = self._parse_id_var() if self._match_text_seq("FOR", "CHANNEL") else None +540 +541 like = self._parse_string() if self._match_text_seq("LIKE") else None +542 where = self._parse_where() +543 +544 if this == "PROFILE": +545 types = self._parse_csv(lambda: self._parse_var_from_options(self.PROFILE_TYPES)) +546 query = self._parse_number() if self._match_text_seq("FOR", "QUERY") else None +547 offset = self._parse_number() if self._match_text_seq("OFFSET") else None +548 limit = self._parse_number() if self._match_text_seq("LIMIT") else None +549 else: +550 types, query = None, None +551 offset, limit = self._parse_oldstyle_limit() +552 +553 mutex = True if self._match_text_seq("MUTEX") else None +554 mutex = False if self._match_text_seq("STATUS") else mutex +555 +556 return self.expression( +557 exp.Show, +558 this=this, +559 target=target_id, +560 full=full, +561 log=log, +562 position=position, +563 db=db, +564 channel=channel, +565 like=like, +566 where=where, +567 types=types, +568 query=query, +569 offset=offset, +570 limit=limit, +571 mutex=mutex, +572 **{"global": global_}, # type: ignore +573 ) +574 +575 def _parse_oldstyle_limit( +576 self, +577 ) -> t.Tuple[t.Optional[exp.Expression], t.Optional[exp.Expression]]: +578 limit = None +579 offset = None +580 if self._match_text_seq("LIMIT"): +581 parts = self._parse_csv(self._parse_number) +582 if len(parts) == 1: +583 limit = parts[0] +584 elif len(parts) == 2: +585 limit = parts[1] +586 offset = parts[0] +587 +588 return offset, limit +589 +590 def _parse_set_item_charset(self, kind: str) -> exp.Expression: +591 this = self._parse_string() or self._parse_id_var() +592 return self.expression(exp.SetItem, this=this, kind=kind) +593 +594 def _parse_set_item_names(self) -> exp.Expression: +595 charset = self._parse_string() or self._parse_id_var() +596 if self._match_text_seq("COLLATE"): +597 collate = self._parse_string() or self._parse_id_var() +598 else: +599 collate = None +600 +601 return self.expression(exp.SetItem, this=charset, collate=collate, kind="NAMES") +602 +603 def _parse_type(self, parse_interval: bool = True) -> t.Optional[exp.Expression]: +604 # mysql binary is special and can work anywhere, even in order by operations +605 # it operates like a no paren func +606 if self._match(TokenType.BINARY, advance=False): +607 data_type = self._parse_types(check_func=True, allow_identifiers=False) +608 +609 if isinstance(data_type, exp.DataType): +610 return self.expression(exp.Cast, this=self._parse_column(), to=data_type) +611 +612 return super()._parse_type(parse_interval=parse_interval) +613 +614 def _parse_chr(self) -> t.Optional[exp.Expression]: +615 expressions = self._parse_csv(self._parse_conjunction) +616 kwargs: t.Dict[str, t.Any] = {"this": seq_get(expressions, 0)} +617 +618 if len(expressions) > 1: +619 kwargs["expressions"] = expressions[1:] +620 +621 if self._match(TokenType.USING): +622 kwargs["charset"] = self._parse_var() +623 +624 return self.expression(exp.Chr, **kwargs) +625 +626 class Generator(generator.Generator): +627 LOCKING_READS_SUPPORTED = True +628 NULL_ORDERING_SUPPORTED = None +629 JOIN_HINTS = False +630 TABLE_HINTS = True +631 DUPLICATE_KEY_UPDATE_WITH_SET = False +632 QUERY_HINT_SEP = " " +633 VALUES_AS_TABLE = False +634 NVL2_SUPPORTED = False +635 LAST_DAY_SUPPORTS_DATE_PART = False +636 JSON_TYPE_REQUIRED_FOR_EXTRACTION = True +637 JSON_PATH_BRACKETED_KEY_SUPPORTED = False +638 JSON_KEY_VALUE_PAIR_SEP = "," +639 +640 TRANSFORMS = { +641 **generator.Generator.TRANSFORMS, +642 exp.CurrentDate: no_paren_current_date_sql, +643 exp.DateDiff: _remove_ts_or_ds_to_date( +644 lambda self, e: self.func("DATEDIFF", e.this, e.expression), ("this", "expression") +645 ), +646 exp.DateAdd: _remove_ts_or_ds_to_date(_date_add_sql("ADD")), +647 exp.DateStrToDate: datestrtodate_sql, +648 exp.DateSub: _remove_ts_or_ds_to_date(_date_add_sql("SUB")), +649 exp.DateTrunc: _date_trunc_sql, +650 exp.Day: _remove_ts_or_ds_to_date(), +651 exp.DayOfMonth: _remove_ts_or_ds_to_date(rename_func("DAYOFMONTH")), +652 exp.DayOfWeek: _remove_ts_or_ds_to_date(rename_func("DAYOFWEEK")), +653 exp.DayOfYear: _remove_ts_or_ds_to_date(rename_func("DAYOFYEAR")), +654 exp.GroupConcat: lambda self, +655 e: f"""GROUP_CONCAT({self.sql(e, "this")} SEPARATOR {self.sql(e, "separator") or "','"})""", +656 exp.ILike: no_ilike_sql, +657 exp.JSONExtractScalar: arrow_json_extract_sql, +658 exp.Max: max_or_greatest, +659 exp.Min: min_or_least, +660 exp.Month: _remove_ts_or_ds_to_date(), +661 exp.NullSafeEQ: lambda self, e: self.binary(e, "<=>"), +662 exp.NullSafeNEQ: lambda self, e: f"NOT {self.binary(e, '<=>')}", +663 exp.ParseJSON: lambda self, e: self.sql(e, "this"), +664 exp.Pivot: no_pivot_sql, +665 exp.Select: transforms.preprocess( +666 [ +667 transforms.eliminate_distinct_on, +668 transforms.eliminate_semi_and_anti_joins, +669 transforms.eliminate_qualify, +670 transforms.eliminate_full_outer_join, +671 ] +672 ), +673 exp.StrPosition: strposition_to_locate_sql, +674 exp.StrToDate: _str_to_date_sql, +675 exp.StrToTime: _str_to_date_sql, +676 exp.Stuff: rename_func("INSERT"), +677 exp.TableSample: no_tablesample_sql, +678 exp.TimeFromParts: rename_func("MAKETIME"), +679 exp.TimestampAdd: date_add_interval_sql("DATE", "ADD"), +680 exp.TimestampDiff: lambda self, e: self.func( +681 "TIMESTAMPDIFF", e.text("unit"), e.expression, e.this +682 ), +683 exp.TimestampSub: date_add_interval_sql("DATE", "SUB"), +684 exp.TimeStrToUnix: rename_func("UNIX_TIMESTAMP"), +685 exp.TimeStrToTime: lambda self, e: self.sql(exp.cast(e.this, "datetime", copy=True)), +686 exp.TimeToStr: _remove_ts_or_ds_to_date( +687 lambda self, e: self.func("DATE_FORMAT", e.this, self.format_time(e)) +688 ), +689 exp.Trim: _trim_sql, +690 exp.TryCast: no_trycast_sql, +691 exp.TsOrDsAdd: _date_add_sql("ADD"), +692 exp.TsOrDsDiff: lambda self, e: self.func("DATEDIFF", e.this, e.expression), +693 exp.TsOrDsToDate: _ts_or_ds_to_date_sql, +694 exp.Week: _remove_ts_or_ds_to_date(), +695 exp.WeekOfYear: _remove_ts_or_ds_to_date(rename_func("WEEKOFYEAR")), +696 exp.Year: _remove_ts_or_ds_to_date(), +697 } +698 +699 UNSIGNED_TYPE_MAPPING = { +700 exp.DataType.Type.UBIGINT: "BIGINT", +701 exp.DataType.Type.UINT: "INT", +702 exp.DataType.Type.UMEDIUMINT: "MEDIUMINT", +703 exp.DataType.Type.USMALLINT: "SMALLINT", +704 exp.DataType.Type.UTINYINT: "TINYINT", +705 exp.DataType.Type.UDECIMAL: "DECIMAL", +706 } +707 +708 TIMESTAMP_TYPE_MAPPING = { +709 exp.DataType.Type.TIMESTAMP: "DATETIME", +710 exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP", +711 exp.DataType.Type.TIMESTAMPLTZ: "TIMESTAMP", +712 } +713 +714 TYPE_MAPPING = { +715 **generator.Generator.TYPE_MAPPING, +716 **UNSIGNED_TYPE_MAPPING, +717 **TIMESTAMP_TYPE_MAPPING, +718 } +719 +720 TYPE_MAPPING.pop(exp.DataType.Type.MEDIUMTEXT) +721 TYPE_MAPPING.pop(exp.DataType.Type.LONGTEXT) +722 TYPE_MAPPING.pop(exp.DataType.Type.TINYTEXT) +723 TYPE_MAPPING.pop(exp.DataType.Type.MEDIUMBLOB) +724 TYPE_MAPPING.pop(exp.DataType.Type.LONGBLOB) +725 TYPE_MAPPING.pop(exp.DataType.Type.TINYBLOB) +726 +727 PROPERTIES_LOCATION = { +728 **generator.Generator.PROPERTIES_LOCATION, +729 exp.TransientProperty: exp.Properties.Location.UNSUPPORTED, +730 exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED, +731 } +732 +733 LIMIT_FETCH = "LIMIT" +734 +735 LIMIT_ONLY_LITERALS = True +736 +737 # MySQL doesn't support many datatypes in cast. +738 # https://dev.mysql.com/doc/refman/8.0/en/cast-functions.html#function_cast +739 CAST_MAPPING = { +740 exp.DataType.Type.BIGINT: "SIGNED", +741 exp.DataType.Type.BOOLEAN: "SIGNED", +742 exp.DataType.Type.INT: "SIGNED", +743 exp.DataType.Type.TEXT: "CHAR", +744 exp.DataType.Type.UBIGINT: "UNSIGNED", +745 exp.DataType.Type.VARCHAR: "CHAR", +746 } +747 +748 TIMESTAMP_FUNC_TYPES = { +749 exp.DataType.Type.TIMESTAMPTZ, +750 exp.DataType.Type.TIMESTAMPLTZ, +751 } +752 +753 def datatype_sql(self, expression: exp.DataType) -> str: +754 # https://dev.mysql.com/doc/refman/8.0/en/numeric-type-syntax.html +755 result = super().datatype_sql(expression) +756 if expression.this in self.UNSIGNED_TYPE_MAPPING: +757 result = f"{result} UNSIGNED" +758 return result +759 +760 def xor_sql(self, expression: exp.Xor) -> str: +761 if expression.expressions: +762 return self.expressions(expression, sep=" XOR ") +763 return super().xor_sql(expression) +764 +765 def jsonarraycontains_sql(self, expression: exp.JSONArrayContains) -> str: +766 return f"{self.sql(expression, 'this')} MEMBER OF({self.sql(expression, 'expression')})" +767 +768 def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str: +769 if expression.to.this in self.TIMESTAMP_FUNC_TYPES: +770 return self.func("TIMESTAMP", expression.this) +771 +772 to = self.CAST_MAPPING.get(expression.to.this) +773 +774 if to: +775 expression.to.set("this", to) +776 return super().cast_sql(expression) +777 +778 def show_sql(self, expression: exp.Show) -> str: +779 this = f" {expression.name}" +780 full = " FULL" if expression.args.get("full") else "" +781 global_ = " GLOBAL" if expression.args.get("global") else "" +782 +783 target = self.sql(expression, "target") +784 target = f" {target}" if target else "" +785 if expression.name in ("COLUMNS", "INDEX"): +786 target = f" FROM{target}" +787 elif expression.name == "GRANTS": +788 target = f" FOR{target}" +789 +790 db = self._prefixed_sql("FROM", expression, "db") +791 +792 like = self._prefixed_sql("LIKE", expression, "like") +793 where = self.sql(expression, "where") +794 +795 types = self.expressions(expression, key="types") +796 types = f" {types}" if types else types +797 query = self._prefixed_sql("FOR QUERY", expression, "query") +798 +799 if expression.name == "PROFILE": +800 offset = self._prefixed_sql("OFFSET", expression, "offset") +801 limit = self._prefixed_sql("LIMIT", expression, "limit") +802 else: +803 offset = "" +804 limit = self._oldstyle_limit_sql(expression) +805 +806 log = self._prefixed_sql("IN", expression, "log") +807 position = self._prefixed_sql("FROM", expression, "position") +808 +809 channel = self._prefixed_sql("FOR CHANNEL", expression, "channel") +810 +811 if expression.name == "ENGINE": +812 mutex_or_status = " MUTEX" if expression.args.get("mutex") else " STATUS" +813 else: +814 mutex_or_status = "" +815 +816 return f"SHOW{full}{global_}{this}{target}{types}{db}{query}{log}{position}{channel}{mutex_or_status}{like}{where}{offset}{limit}" +817 +818 def altercolumn_sql(self, expression: exp.AlterColumn) -> str: +819 dtype = self.sql(expression, "dtype") +820 if not dtype: +821 return super().altercolumn_sql(expression) +822 +823 this = self.sql(expression, "this") +824 return f"MODIFY COLUMN {this} {dtype}" +825 +826 def _prefixed_sql(self, prefix: str, expression: exp.Expression, arg: str) -> str: +827 sql = self.sql(expression, arg) +828 return f" {prefix} {sql}" if sql else "" +829 +830 def _oldstyle_limit_sql(self, expression: exp.Show) -> str: +831 limit = self.sql(expression, "limit") +832 offset = self.sql(expression, "offset") +833 if limit: +834 limit_offset = f"{offset}, {limit}" if offset else limit +835 return f" LIMIT {limit_offset}" +836 return "" +837 +838 def chr_sql(self, expression: exp.Chr) -> str: +839 this = self.expressions(sqls=[expression.this] + expression.expressions) +840 charset = expression.args.get("charset") +841 using = f" USING {self.sql(charset)}" if charset else "" +842 return f"CHAR({this}{using})"
    @@ -1177,7 +1181,7 @@
    TIME_SPECIFIERS = -{'i', 'p', 'H', 'k', 'r', 's', 'S', 'l', 'f', 'I', 'h', 'T'} +{'l', 'i', 'H', 'h', 'I', 'f', 'T', 'S', 'k', 's', 'r', 'p'}
    @@ -1495,400 +1499,401 @@ 446 447 LOG_DEFAULTS_TO_LN = True 448 STRING_ALIASES = True -449 -450 def _parse_primary_key_part(self) -> t.Optional[exp.Expression]: -451 this = self._parse_id_var() -452 if not self._match(TokenType.L_PAREN): -453 return this -454 -455 expression = self._parse_number() -456 self._match_r_paren() -457 return self.expression(exp.ColumnPrefix, this=this, expression=expression) -458 -459 def _parse_index_constraint( -460 self, kind: t.Optional[str] = None -461 ) -> exp.IndexColumnConstraint: -462 if kind: -463 self._match_texts(("INDEX", "KEY")) -464 -465 this = self._parse_id_var(any_token=False) -466 index_type = self._match(TokenType.USING) and self._advance_any() and self._prev.text -467 schema = self._parse_schema() -468 -469 options = [] -470 while True: -471 if self._match_text_seq("KEY_BLOCK_SIZE"): -472 self._match(TokenType.EQ) -473 opt = exp.IndexConstraintOption(key_block_size=self._parse_number()) -474 elif self._match(TokenType.USING): -475 opt = exp.IndexConstraintOption(using=self._advance_any() and self._prev.text) -476 elif self._match_text_seq("WITH", "PARSER"): -477 opt = exp.IndexConstraintOption(parser=self._parse_var(any_token=True)) -478 elif self._match(TokenType.COMMENT): -479 opt = exp.IndexConstraintOption(comment=self._parse_string()) -480 elif self._match_text_seq("VISIBLE"): -481 opt = exp.IndexConstraintOption(visible=True) -482 elif self._match_text_seq("INVISIBLE"): -483 opt = exp.IndexConstraintOption(visible=False) -484 elif self._match_text_seq("ENGINE_ATTRIBUTE"): -485 self._match(TokenType.EQ) -486 opt = exp.IndexConstraintOption(engine_attr=self._parse_string()) -487 elif self._match_text_seq("ENGINE_ATTRIBUTE"): -488 self._match(TokenType.EQ) -489 opt = exp.IndexConstraintOption(engine_attr=self._parse_string()) -490 elif self._match_text_seq("SECONDARY_ENGINE_ATTRIBUTE"): -491 self._match(TokenType.EQ) -492 opt = exp.IndexConstraintOption(secondary_engine_attr=self._parse_string()) -493 else: -494 opt = None -495 -496 if not opt: -497 break -498 -499 options.append(opt) -500 -501 return self.expression( -502 exp.IndexColumnConstraint, -503 this=this, -504 schema=schema, -505 kind=kind, -506 index_type=index_type, -507 options=options, -508 ) -509 -510 def _parse_show_mysql( -511 self, -512 this: str, -513 target: bool | str = False, -514 full: t.Optional[bool] = None, -515 global_: t.Optional[bool] = None, -516 ) -> exp.Show: -517 if target: -518 if isinstance(target, str): -519 self._match_text_seq(target) -520 target_id = self._parse_id_var() -521 else: -522 target_id = None -523 -524 log = self._parse_string() if self._match_text_seq("IN") else None -525 -526 if this in ("BINLOG EVENTS", "RELAYLOG EVENTS"): -527 position = self._parse_number() if self._match_text_seq("FROM") else None -528 db = None -529 else: -530 position = None -531 db = None -532 -533 if self._match(TokenType.FROM): -534 db = self._parse_id_var() -535 elif self._match(TokenType.DOT): -536 db = target_id -537 target_id = self._parse_id_var() -538 -539 channel = self._parse_id_var() if self._match_text_seq("FOR", "CHANNEL") else None -540 -541 like = self._parse_string() if self._match_text_seq("LIKE") else None -542 where = self._parse_where() -543 -544 if this == "PROFILE": -545 types = self._parse_csv(lambda: self._parse_var_from_options(self.PROFILE_TYPES)) -546 query = self._parse_number() if self._match_text_seq("FOR", "QUERY") else None -547 offset = self._parse_number() if self._match_text_seq("OFFSET") else None -548 limit = self._parse_number() if self._match_text_seq("LIMIT") else None -549 else: -550 types, query = None, None -551 offset, limit = self._parse_oldstyle_limit() -552 -553 mutex = True if self._match_text_seq("MUTEX") else None -554 mutex = False if self._match_text_seq("STATUS") else mutex -555 -556 return self.expression( -557 exp.Show, -558 this=this, -559 target=target_id, -560 full=full, -561 log=log, -562 position=position, -563 db=db, -564 channel=channel, -565 like=like, -566 where=where, -567 types=types, -568 query=query, -569 offset=offset, -570 limit=limit, -571 mutex=mutex, -572 **{"global": global_}, # type: ignore -573 ) -574 -575 def _parse_oldstyle_limit( -576 self, -577 ) -> t.Tuple[t.Optional[exp.Expression], t.Optional[exp.Expression]]: -578 limit = None -579 offset = None -580 if self._match_text_seq("LIMIT"): -581 parts = self._parse_csv(self._parse_number) -582 if len(parts) == 1: -583 limit = parts[0] -584 elif len(parts) == 2: -585 limit = parts[1] -586 offset = parts[0] -587 -588 return offset, limit -589 -590 def _parse_set_item_charset(self, kind: str) -> exp.Expression: -591 this = self._parse_string() or self._parse_id_var() -592 return self.expression(exp.SetItem, this=this, kind=kind) -593 -594 def _parse_set_item_names(self) -> exp.Expression: -595 charset = self._parse_string() or self._parse_id_var() -596 if self._match_text_seq("COLLATE"): -597 collate = self._parse_string() or self._parse_id_var() -598 else: -599 collate = None -600 -601 return self.expression(exp.SetItem, this=charset, collate=collate, kind="NAMES") -602 -603 def _parse_type(self, parse_interval: bool = True) -> t.Optional[exp.Expression]: -604 # mysql binary is special and can work anywhere, even in order by operations -605 # it operates like a no paren func -606 if self._match(TokenType.BINARY, advance=False): -607 data_type = self._parse_types(check_func=True, allow_identifiers=False) -608 -609 if isinstance(data_type, exp.DataType): -610 return self.expression(exp.Cast, this=self._parse_column(), to=data_type) -611 -612 return super()._parse_type(parse_interval=parse_interval) -613 -614 def _parse_chr(self) -> t.Optional[exp.Expression]: -615 expressions = self._parse_csv(self._parse_conjunction) -616 kwargs: t.Dict[str, t.Any] = {"this": seq_get(expressions, 0)} -617 -618 if len(expressions) > 1: -619 kwargs["expressions"] = expressions[1:] -620 -621 if self._match(TokenType.USING): -622 kwargs["charset"] = self._parse_var() -623 -624 return self.expression(exp.Chr, **kwargs) -625 -626 class Generator(generator.Generator): -627 LOCKING_READS_SUPPORTED = True -628 NULL_ORDERING_SUPPORTED = None -629 JOIN_HINTS = False -630 TABLE_HINTS = True -631 DUPLICATE_KEY_UPDATE_WITH_SET = False -632 QUERY_HINT_SEP = " " -633 VALUES_AS_TABLE = False -634 NVL2_SUPPORTED = False -635 LAST_DAY_SUPPORTS_DATE_PART = False -636 JSON_TYPE_REQUIRED_FOR_EXTRACTION = True -637 JSON_PATH_BRACKETED_KEY_SUPPORTED = False -638 JSON_KEY_VALUE_PAIR_SEP = "," -639 -640 TRANSFORMS = { -641 **generator.Generator.TRANSFORMS, -642 exp.CurrentDate: no_paren_current_date_sql, -643 exp.DateDiff: _remove_ts_or_ds_to_date( -644 lambda self, e: self.func("DATEDIFF", e.this, e.expression), ("this", "expression") -645 ), -646 exp.DateAdd: _remove_ts_or_ds_to_date(_date_add_sql("ADD")), -647 exp.DateStrToDate: datestrtodate_sql, -648 exp.DateSub: _remove_ts_or_ds_to_date(_date_add_sql("SUB")), -649 exp.DateTrunc: _date_trunc_sql, -650 exp.Day: _remove_ts_or_ds_to_date(), -651 exp.DayOfMonth: _remove_ts_or_ds_to_date(rename_func("DAYOFMONTH")), -652 exp.DayOfWeek: _remove_ts_or_ds_to_date(rename_func("DAYOFWEEK")), -653 exp.DayOfYear: _remove_ts_or_ds_to_date(rename_func("DAYOFYEAR")), -654 exp.GroupConcat: lambda self, -655 e: f"""GROUP_CONCAT({self.sql(e, "this")} SEPARATOR {self.sql(e, "separator") or "','"})""", -656 exp.ILike: no_ilike_sql, -657 exp.JSONExtractScalar: arrow_json_extract_sql, -658 exp.Max: max_or_greatest, -659 exp.Min: min_or_least, -660 exp.Month: _remove_ts_or_ds_to_date(), -661 exp.NullSafeEQ: lambda self, e: self.binary(e, "<=>"), -662 exp.NullSafeNEQ: lambda self, e: f"NOT {self.binary(e, '<=>')}", -663 exp.ParseJSON: lambda self, e: self.sql(e, "this"), -664 exp.Pivot: no_pivot_sql, -665 exp.Select: transforms.preprocess( -666 [ -667 transforms.eliminate_distinct_on, -668 transforms.eliminate_semi_and_anti_joins, -669 transforms.eliminate_qualify, -670 transforms.eliminate_full_outer_join, -671 ] -672 ), -673 exp.StrPosition: strposition_to_locate_sql, -674 exp.StrToDate: _str_to_date_sql, -675 exp.StrToTime: _str_to_date_sql, -676 exp.Stuff: rename_func("INSERT"), -677 exp.TableSample: no_tablesample_sql, -678 exp.TimeFromParts: rename_func("MAKETIME"), -679 exp.TimestampAdd: date_add_interval_sql("DATE", "ADD"), -680 exp.TimestampDiff: lambda self, e: self.func( -681 "TIMESTAMPDIFF", e.text("unit"), e.expression, e.this -682 ), -683 exp.TimestampSub: date_add_interval_sql("DATE", "SUB"), -684 exp.TimeStrToUnix: rename_func("UNIX_TIMESTAMP"), -685 exp.TimeStrToTime: lambda self, e: self.sql(exp.cast(e.this, "datetime", copy=True)), -686 exp.TimeToStr: _remove_ts_or_ds_to_date( -687 lambda self, e: self.func("DATE_FORMAT", e.this, self.format_time(e)) -688 ), -689 exp.Trim: _trim_sql, -690 exp.TryCast: no_trycast_sql, -691 exp.TsOrDsAdd: _date_add_sql("ADD"), -692 exp.TsOrDsDiff: lambda self, e: self.func("DATEDIFF", e.this, e.expression), -693 exp.TsOrDsToDate: _ts_or_ds_to_date_sql, -694 exp.Week: _remove_ts_or_ds_to_date(), -695 exp.WeekOfYear: _remove_ts_or_ds_to_date(rename_func("WEEKOFYEAR")), -696 exp.Year: _remove_ts_or_ds_to_date(), -697 } -698 -699 UNSIGNED_TYPE_MAPPING = { -700 exp.DataType.Type.UBIGINT: "BIGINT", -701 exp.DataType.Type.UINT: "INT", -702 exp.DataType.Type.UMEDIUMINT: "MEDIUMINT", -703 exp.DataType.Type.USMALLINT: "SMALLINT", -704 exp.DataType.Type.UTINYINT: "TINYINT", -705 exp.DataType.Type.UDECIMAL: "DECIMAL", -706 } -707 -708 TIMESTAMP_TYPE_MAPPING = { -709 exp.DataType.Type.TIMESTAMP: "DATETIME", -710 exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP", -711 exp.DataType.Type.TIMESTAMPLTZ: "TIMESTAMP", -712 } -713 -714 TYPE_MAPPING = { -715 **generator.Generator.TYPE_MAPPING, -716 **UNSIGNED_TYPE_MAPPING, -717 **TIMESTAMP_TYPE_MAPPING, -718 } -719 -720 TYPE_MAPPING.pop(exp.DataType.Type.MEDIUMTEXT) -721 TYPE_MAPPING.pop(exp.DataType.Type.LONGTEXT) -722 TYPE_MAPPING.pop(exp.DataType.Type.TINYTEXT) -723 TYPE_MAPPING.pop(exp.DataType.Type.MEDIUMBLOB) -724 TYPE_MAPPING.pop(exp.DataType.Type.LONGBLOB) -725 TYPE_MAPPING.pop(exp.DataType.Type.TINYBLOB) -726 -727 PROPERTIES_LOCATION = { -728 **generator.Generator.PROPERTIES_LOCATION, -729 exp.TransientProperty: exp.Properties.Location.UNSUPPORTED, -730 exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED, -731 } -732 -733 LIMIT_FETCH = "LIMIT" -734 -735 LIMIT_ONLY_LITERALS = True -736 -737 # MySQL doesn't support many datatypes in cast. -738 # https://dev.mysql.com/doc/refman/8.0/en/cast-functions.html#function_cast -739 CAST_MAPPING = { -740 exp.DataType.Type.BIGINT: "SIGNED", -741 exp.DataType.Type.BOOLEAN: "SIGNED", -742 exp.DataType.Type.INT: "SIGNED", -743 exp.DataType.Type.TEXT: "CHAR", -744 exp.DataType.Type.UBIGINT: "UNSIGNED", -745 exp.DataType.Type.VARCHAR: "CHAR", -746 } -747 -748 TIMESTAMP_FUNC_TYPES = { -749 exp.DataType.Type.TIMESTAMPTZ, -750 exp.DataType.Type.TIMESTAMPLTZ, -751 } -752 -753 def datatype_sql(self, expression: exp.DataType) -> str: -754 # https://dev.mysql.com/doc/refman/8.0/en/numeric-type-syntax.html -755 result = super().datatype_sql(expression) -756 if expression.this in self.UNSIGNED_TYPE_MAPPING: -757 result = f"{result} UNSIGNED" -758 return result -759 -760 def xor_sql(self, expression: exp.Xor) -> str: -761 if expression.expressions: -762 return self.expressions(expression, sep=" XOR ") -763 return super().xor_sql(expression) -764 -765 def jsonarraycontains_sql(self, expression: exp.JSONArrayContains) -> str: -766 return f"{self.sql(expression, 'this')} MEMBER OF({self.sql(expression, 'expression')})" -767 -768 def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str: -769 if expression.to.this in self.TIMESTAMP_FUNC_TYPES: -770 return self.func("TIMESTAMP", expression.this) -771 -772 to = self.CAST_MAPPING.get(expression.to.this) -773 -774 if to: -775 expression.to.set("this", to) -776 return super().cast_sql(expression) -777 -778 def show_sql(self, expression: exp.Show) -> str: -779 this = f" {expression.name}" -780 full = " FULL" if expression.args.get("full") else "" -781 global_ = " GLOBAL" if expression.args.get("global") else "" -782 -783 target = self.sql(expression, "target") -784 target = f" {target}" if target else "" -785 if expression.name in ("COLUMNS", "INDEX"): -786 target = f" FROM{target}" -787 elif expression.name == "GRANTS": -788 target = f" FOR{target}" -789 -790 db = self._prefixed_sql("FROM", expression, "db") -791 -792 like = self._prefixed_sql("LIKE", expression, "like") -793 where = self.sql(expression, "where") -794 -795 types = self.expressions(expression, key="types") -796 types = f" {types}" if types else types -797 query = self._prefixed_sql("FOR QUERY", expression, "query") -798 -799 if expression.name == "PROFILE": -800 offset = self._prefixed_sql("OFFSET", expression, "offset") -801 limit = self._prefixed_sql("LIMIT", expression, "limit") -802 else: -803 offset = "" -804 limit = self._oldstyle_limit_sql(expression) -805 -806 log = self._prefixed_sql("IN", expression, "log") -807 position = self._prefixed_sql("FROM", expression, "position") -808 -809 channel = self._prefixed_sql("FOR CHANNEL", expression, "channel") -810 -811 if expression.name == "ENGINE": -812 mutex_or_status = " MUTEX" if expression.args.get("mutex") else " STATUS" -813 else: -814 mutex_or_status = "" -815 -816 return f"SHOW{full}{global_}{this}{target}{types}{db}{query}{log}{position}{channel}{mutex_or_status}{like}{where}{offset}{limit}" -817 -818 def altercolumn_sql(self, expression: exp.AlterColumn) -> str: -819 dtype = self.sql(expression, "dtype") -820 if not dtype: -821 return super().altercolumn_sql(expression) -822 -823 this = self.sql(expression, "this") -824 return f"MODIFY COLUMN {this} {dtype}" -825 -826 def _prefixed_sql(self, prefix: str, expression: exp.Expression, arg: str) -> str: -827 sql = self.sql(expression, arg) -828 return f" {prefix} {sql}" if sql else "" -829 -830 def _oldstyle_limit_sql(self, expression: exp.Show) -> str: -831 limit = self.sql(expression, "limit") -832 offset = self.sql(expression, "offset") -833 if limit: -834 limit_offset = f"{offset}, {limit}" if offset else limit -835 return f" LIMIT {limit_offset}" -836 return "" -837 -838 def chr_sql(self, expression: exp.Chr) -> str: -839 this = self.expressions(sqls=[expression.this] + expression.expressions) -840 charset = expression.args.get("charset") -841 using = f" USING {self.sql(charset)}" if charset else "" -842 return f"CHAR({this}{using})" +449 VALUES_FOLLOWED_BY_PAREN = False +450 +451 def _parse_primary_key_part(self) -> t.Optional[exp.Expression]: +452 this = self._parse_id_var() +453 if not self._match(TokenType.L_PAREN): +454 return this +455 +456 expression = self._parse_number() +457 self._match_r_paren() +458 return self.expression(exp.ColumnPrefix, this=this, expression=expression) +459 +460 def _parse_index_constraint( +461 self, kind: t.Optional[str] = None +462 ) -> exp.IndexColumnConstraint: +463 if kind: +464 self._match_texts(("INDEX", "KEY")) +465 +466 this = self._parse_id_var(any_token=False) +467 index_type = self._match(TokenType.USING) and self._advance_any() and self._prev.text +468 schema = self._parse_schema() +469 +470 options = [] +471 while True: +472 if self._match_text_seq("KEY_BLOCK_SIZE"): +473 self._match(TokenType.EQ) +474 opt = exp.IndexConstraintOption(key_block_size=self._parse_number()) +475 elif self._match(TokenType.USING): +476 opt = exp.IndexConstraintOption(using=self._advance_any() and self._prev.text) +477 elif self._match_text_seq("WITH", "PARSER"): +478 opt = exp.IndexConstraintOption(parser=self._parse_var(any_token=True)) +479 elif self._match(TokenType.COMMENT): +480 opt = exp.IndexConstraintOption(comment=self._parse_string()) +481 elif self._match_text_seq("VISIBLE"): +482 opt = exp.IndexConstraintOption(visible=True) +483 elif self._match_text_seq("INVISIBLE"): +484 opt = exp.IndexConstraintOption(visible=False) +485 elif self._match_text_seq("ENGINE_ATTRIBUTE"): +486 self._match(TokenType.EQ) +487 opt = exp.IndexConstraintOption(engine_attr=self._parse_string()) +488 elif self._match_text_seq("ENGINE_ATTRIBUTE"): +489 self._match(TokenType.EQ) +490 opt = exp.IndexConstraintOption(engine_attr=self._parse_string()) +491 elif self._match_text_seq("SECONDARY_ENGINE_ATTRIBUTE"): +492 self._match(TokenType.EQ) +493 opt = exp.IndexConstraintOption(secondary_engine_attr=self._parse_string()) +494 else: +495 opt = None +496 +497 if not opt: +498 break +499 +500 options.append(opt) +501 +502 return self.expression( +503 exp.IndexColumnConstraint, +504 this=this, +505 schema=schema, +506 kind=kind, +507 index_type=index_type, +508 options=options, +509 ) +510 +511 def _parse_show_mysql( +512 self, +513 this: str, +514 target: bool | str = False, +515 full: t.Optional[bool] = None, +516 global_: t.Optional[bool] = None, +517 ) -> exp.Show: +518 if target: +519 if isinstance(target, str): +520 self._match_text_seq(target) +521 target_id = self._parse_id_var() +522 else: +523 target_id = None +524 +525 log = self._parse_string() if self._match_text_seq("IN") else None +526 +527 if this in ("BINLOG EVENTS", "RELAYLOG EVENTS"): +528 position = self._parse_number() if self._match_text_seq("FROM") else None +529 db = None +530 else: +531 position = None +532 db = None +533 +534 if self._match(TokenType.FROM): +535 db = self._parse_id_var() +536 elif self._match(TokenType.DOT): +537 db = target_id +538 target_id = self._parse_id_var() +539 +540 channel = self._parse_id_var() if self._match_text_seq("FOR", "CHANNEL") else None +541 +542 like = self._parse_string() if self._match_text_seq("LIKE") else None +543 where = self._parse_where() +544 +545 if this == "PROFILE": +546 types = self._parse_csv(lambda: self._parse_var_from_options(self.PROFILE_TYPES)) +547 query = self._parse_number() if self._match_text_seq("FOR", "QUERY") else None +548 offset = self._parse_number() if self._match_text_seq("OFFSET") else None +549 limit = self._parse_number() if self._match_text_seq("LIMIT") else None +550 else: +551 types, query = None, None +552 offset, limit = self._parse_oldstyle_limit() +553 +554 mutex = True if self._match_text_seq("MUTEX") else None +555 mutex = False if self._match_text_seq("STATUS") else mutex +556 +557 return self.expression( +558 exp.Show, +559 this=this, +560 target=target_id, +561 full=full, +562 log=log, +563 position=position, +564 db=db, +565 channel=channel, +566 like=like, +567 where=where, +568 types=types, +569 query=query, +570 offset=offset, +571 limit=limit, +572 mutex=mutex, +573 **{"global": global_}, # type: ignore +574 ) +575 +576 def _parse_oldstyle_limit( +577 self, +578 ) -> t.Tuple[t.Optional[exp.Expression], t.Optional[exp.Expression]]: +579 limit = None +580 offset = None +581 if self._match_text_seq("LIMIT"): +582 parts = self._parse_csv(self._parse_number) +583 if len(parts) == 1: +584 limit = parts[0] +585 elif len(parts) == 2: +586 limit = parts[1] +587 offset = parts[0] +588 +589 return offset, limit +590 +591 def _parse_set_item_charset(self, kind: str) -> exp.Expression: +592 this = self._parse_string() or self._parse_id_var() +593 return self.expression(exp.SetItem, this=this, kind=kind) +594 +595 def _parse_set_item_names(self) -> exp.Expression: +596 charset = self._parse_string() or self._parse_id_var() +597 if self._match_text_seq("COLLATE"): +598 collate = self._parse_string() or self._parse_id_var() +599 else: +600 collate = None +601 +602 return self.expression(exp.SetItem, this=charset, collate=collate, kind="NAMES") +603 +604 def _parse_type(self, parse_interval: bool = True) -> t.Optional[exp.Expression]: +605 # mysql binary is special and can work anywhere, even in order by operations +606 # it operates like a no paren func +607 if self._match(TokenType.BINARY, advance=False): +608 data_type = self._parse_types(check_func=True, allow_identifiers=False) +609 +610 if isinstance(data_type, exp.DataType): +611 return self.expression(exp.Cast, this=self._parse_column(), to=data_type) +612 +613 return super()._parse_type(parse_interval=parse_interval) +614 +615 def _parse_chr(self) -> t.Optional[exp.Expression]: +616 expressions = self._parse_csv(self._parse_conjunction) +617 kwargs: t.Dict[str, t.Any] = {"this": seq_get(expressions, 0)} +618 +619 if len(expressions) > 1: +620 kwargs["expressions"] = expressions[1:] +621 +622 if self._match(TokenType.USING): +623 kwargs["charset"] = self._parse_var() +624 +625 return self.expression(exp.Chr, **kwargs) +626 +627 class Generator(generator.Generator): +628 LOCKING_READS_SUPPORTED = True +629 NULL_ORDERING_SUPPORTED = None +630 JOIN_HINTS = False +631 TABLE_HINTS = True +632 DUPLICATE_KEY_UPDATE_WITH_SET = False +633 QUERY_HINT_SEP = " " +634 VALUES_AS_TABLE = False +635 NVL2_SUPPORTED = False +636 LAST_DAY_SUPPORTS_DATE_PART = False +637 JSON_TYPE_REQUIRED_FOR_EXTRACTION = True +638 JSON_PATH_BRACKETED_KEY_SUPPORTED = False +639 JSON_KEY_VALUE_PAIR_SEP = "," +640 +641 TRANSFORMS = { +642 **generator.Generator.TRANSFORMS, +643 exp.CurrentDate: no_paren_current_date_sql, +644 exp.DateDiff: _remove_ts_or_ds_to_date( +645 lambda self, e: self.func("DATEDIFF", e.this, e.expression), ("this", "expression") +646 ), +647 exp.DateAdd: _remove_ts_or_ds_to_date(_date_add_sql("ADD")), +648 exp.DateStrToDate: datestrtodate_sql, +649 exp.DateSub: _remove_ts_or_ds_to_date(_date_add_sql("SUB")), +650 exp.DateTrunc: _date_trunc_sql, +651 exp.Day: _remove_ts_or_ds_to_date(), +652 exp.DayOfMonth: _remove_ts_or_ds_to_date(rename_func("DAYOFMONTH")), +653 exp.DayOfWeek: _remove_ts_or_ds_to_date(rename_func("DAYOFWEEK")), +654 exp.DayOfYear: _remove_ts_or_ds_to_date(rename_func("DAYOFYEAR")), +655 exp.GroupConcat: lambda self, +656 e: f"""GROUP_CONCAT({self.sql(e, "this")} SEPARATOR {self.sql(e, "separator") or "','"})""", +657 exp.ILike: no_ilike_sql, +658 exp.JSONExtractScalar: arrow_json_extract_sql, +659 exp.Max: max_or_greatest, +660 exp.Min: min_or_least, +661 exp.Month: _remove_ts_or_ds_to_date(), +662 exp.NullSafeEQ: lambda self, e: self.binary(e, "<=>"), +663 exp.NullSafeNEQ: lambda self, e: f"NOT {self.binary(e, '<=>')}", +664 exp.ParseJSON: lambda self, e: self.sql(e, "this"), +665 exp.Pivot: no_pivot_sql, +666 exp.Select: transforms.preprocess( +667 [ +668 transforms.eliminate_distinct_on, +669 transforms.eliminate_semi_and_anti_joins, +670 transforms.eliminate_qualify, +671 transforms.eliminate_full_outer_join, +672 ] +673 ), +674 exp.StrPosition: strposition_to_locate_sql, +675 exp.StrToDate: _str_to_date_sql, +676 exp.StrToTime: _str_to_date_sql, +677 exp.Stuff: rename_func("INSERT"), +678 exp.TableSample: no_tablesample_sql, +679 exp.TimeFromParts: rename_func("MAKETIME"), +680 exp.TimestampAdd: date_add_interval_sql("DATE", "ADD"), +681 exp.TimestampDiff: lambda self, e: self.func( +682 "TIMESTAMPDIFF", e.text("unit"), e.expression, e.this +683 ), +684 exp.TimestampSub: date_add_interval_sql("DATE", "SUB"), +685 exp.TimeStrToUnix: rename_func("UNIX_TIMESTAMP"), +686 exp.TimeStrToTime: lambda self, e: self.sql(exp.cast(e.this, "datetime", copy=True)), +687 exp.TimeToStr: _remove_ts_or_ds_to_date( +688 lambda self, e: self.func("DATE_FORMAT", e.this, self.format_time(e)) +689 ), +690 exp.Trim: _trim_sql, +691 exp.TryCast: no_trycast_sql, +692 exp.TsOrDsAdd: _date_add_sql("ADD"), +693 exp.TsOrDsDiff: lambda self, e: self.func("DATEDIFF", e.this, e.expression), +694 exp.TsOrDsToDate: _ts_or_ds_to_date_sql, +695 exp.Week: _remove_ts_or_ds_to_date(), +696 exp.WeekOfYear: _remove_ts_or_ds_to_date(rename_func("WEEKOFYEAR")), +697 exp.Year: _remove_ts_or_ds_to_date(), +698 } +699 +700 UNSIGNED_TYPE_MAPPING = { +701 exp.DataType.Type.UBIGINT: "BIGINT", +702 exp.DataType.Type.UINT: "INT", +703 exp.DataType.Type.UMEDIUMINT: "MEDIUMINT", +704 exp.DataType.Type.USMALLINT: "SMALLINT", +705 exp.DataType.Type.UTINYINT: "TINYINT", +706 exp.DataType.Type.UDECIMAL: "DECIMAL", +707 } +708 +709 TIMESTAMP_TYPE_MAPPING = { +710 exp.DataType.Type.TIMESTAMP: "DATETIME", +711 exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP", +712 exp.DataType.Type.TIMESTAMPLTZ: "TIMESTAMP", +713 } +714 +715 TYPE_MAPPING = { +716 **generator.Generator.TYPE_MAPPING, +717 **UNSIGNED_TYPE_MAPPING, +718 **TIMESTAMP_TYPE_MAPPING, +719 } +720 +721 TYPE_MAPPING.pop(exp.DataType.Type.MEDIUMTEXT) +722 TYPE_MAPPING.pop(exp.DataType.Type.LONGTEXT) +723 TYPE_MAPPING.pop(exp.DataType.Type.TINYTEXT) +724 TYPE_MAPPING.pop(exp.DataType.Type.MEDIUMBLOB) +725 TYPE_MAPPING.pop(exp.DataType.Type.LONGBLOB) +726 TYPE_MAPPING.pop(exp.DataType.Type.TINYBLOB) +727 +728 PROPERTIES_LOCATION = { +729 **generator.Generator.PROPERTIES_LOCATION, +730 exp.TransientProperty: exp.Properties.Location.UNSUPPORTED, +731 exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED, +732 } +733 +734 LIMIT_FETCH = "LIMIT" +735 +736 LIMIT_ONLY_LITERALS = True +737 +738 # MySQL doesn't support many datatypes in cast. +739 # https://dev.mysql.com/doc/refman/8.0/en/cast-functions.html#function_cast +740 CAST_MAPPING = { +741 exp.DataType.Type.BIGINT: "SIGNED", +742 exp.DataType.Type.BOOLEAN: "SIGNED", +743 exp.DataType.Type.INT: "SIGNED", +744 exp.DataType.Type.TEXT: "CHAR", +745 exp.DataType.Type.UBIGINT: "UNSIGNED", +746 exp.DataType.Type.VARCHAR: "CHAR", +747 } +748 +749 TIMESTAMP_FUNC_TYPES = { +750 exp.DataType.Type.TIMESTAMPTZ, +751 exp.DataType.Type.TIMESTAMPLTZ, +752 } +753 +754 def datatype_sql(self, expression: exp.DataType) -> str: +755 # https://dev.mysql.com/doc/refman/8.0/en/numeric-type-syntax.html +756 result = super().datatype_sql(expression) +757 if expression.this in self.UNSIGNED_TYPE_MAPPING: +758 result = f"{result} UNSIGNED" +759 return result +760 +761 def xor_sql(self, expression: exp.Xor) -> str: +762 if expression.expressions: +763 return self.expressions(expression, sep=" XOR ") +764 return super().xor_sql(expression) +765 +766 def jsonarraycontains_sql(self, expression: exp.JSONArrayContains) -> str: +767 return f"{self.sql(expression, 'this')} MEMBER OF({self.sql(expression, 'expression')})" +768 +769 def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str: +770 if expression.to.this in self.TIMESTAMP_FUNC_TYPES: +771 return self.func("TIMESTAMP", expression.this) +772 +773 to = self.CAST_MAPPING.get(expression.to.this) +774 +775 if to: +776 expression.to.set("this", to) +777 return super().cast_sql(expression) +778 +779 def show_sql(self, expression: exp.Show) -> str: +780 this = f" {expression.name}" +781 full = " FULL" if expression.args.get("full") else "" +782 global_ = " GLOBAL" if expression.args.get("global") else "" +783 +784 target = self.sql(expression, "target") +785 target = f" {target}" if target else "" +786 if expression.name in ("COLUMNS", "INDEX"): +787 target = f" FROM{target}" +788 elif expression.name == "GRANTS": +789 target = f" FOR{target}" +790 +791 db = self._prefixed_sql("FROM", expression, "db") +792 +793 like = self._prefixed_sql("LIKE", expression, "like") +794 where = self.sql(expression, "where") +795 +796 types = self.expressions(expression, key="types") +797 types = f" {types}" if types else types +798 query = self._prefixed_sql("FOR QUERY", expression, "query") +799 +800 if expression.name == "PROFILE": +801 offset = self._prefixed_sql("OFFSET", expression, "offset") +802 limit = self._prefixed_sql("LIMIT", expression, "limit") +803 else: +804 offset = "" +805 limit = self._oldstyle_limit_sql(expression) +806 +807 log = self._prefixed_sql("IN", expression, "log") +808 position = self._prefixed_sql("FROM", expression, "position") +809 +810 channel = self._prefixed_sql("FOR CHANNEL", expression, "channel") +811 +812 if expression.name == "ENGINE": +813 mutex_or_status = " MUTEX" if expression.args.get("mutex") else " STATUS" +814 else: +815 mutex_or_status = "" +816 +817 return f"SHOW{full}{global_}{this}{target}{types}{db}{query}{log}{position}{channel}{mutex_or_status}{like}{where}{offset}{limit}" +818 +819 def altercolumn_sql(self, expression: exp.AlterColumn) -> str: +820 dtype = self.sql(expression, "dtype") +821 if not dtype: +822 return super().altercolumn_sql(expression) +823 +824 this = self.sql(expression, "this") +825 return f"MODIFY COLUMN {this} {dtype}" +826 +827 def _prefixed_sql(self, prefix: str, expression: exp.Expression, arg: str) -> str: +828 sql = self.sql(expression, arg) +829 return f" {prefix} {sql}" if sql else "" +830 +831 def _oldstyle_limit_sql(self, expression: exp.Show) -> str: +832 limit = self.sql(expression, "limit") +833 offset = self.sql(expression, "offset") +834 if limit: +835 limit_offset = f"{offset}, {limit}" if offset else limit +836 return f" LIMIT {limit_offset}" +837 return "" +838 +839 def chr_sql(self, expression: exp.Chr) -> str: +840 this = self.expressions(sqls=[expression.this] + expression.expressions) +841 charset = expression.args.get("charset") +842 using = f" USING {self.sql(charset)}" if charset else "" +843 return f"CHAR({this}{using})"
    @@ -2473,7 +2478,7 @@
    COMMANDS = -{<TokenType.COMMAND: 'COMMAND'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.FETCH: 'FETCH'>} +{<TokenType.FETCH: 'FETCH'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.EXECUTE: 'EXECUTE'>}
    @@ -2710,182 +2715,183 @@ 446 447 LOG_DEFAULTS_TO_LN = True 448 STRING_ALIASES = True -449 -450 def _parse_primary_key_part(self) -> t.Optional[exp.Expression]: -451 this = self._parse_id_var() -452 if not self._match(TokenType.L_PAREN): -453 return this -454 -455 expression = self._parse_number() -456 self._match_r_paren() -457 return self.expression(exp.ColumnPrefix, this=this, expression=expression) -458 -459 def _parse_index_constraint( -460 self, kind: t.Optional[str] = None -461 ) -> exp.IndexColumnConstraint: -462 if kind: -463 self._match_texts(("INDEX", "KEY")) -464 -465 this = self._parse_id_var(any_token=False) -466 index_type = self._match(TokenType.USING) and self._advance_any() and self._prev.text -467 schema = self._parse_schema() -468 -469 options = [] -470 while True: -471 if self._match_text_seq("KEY_BLOCK_SIZE"): -472 self._match(TokenType.EQ) -473 opt = exp.IndexConstraintOption(key_block_size=self._parse_number()) -474 elif self._match(TokenType.USING): -475 opt = exp.IndexConstraintOption(using=self._advance_any() and self._prev.text) -476 elif self._match_text_seq("WITH", "PARSER"): -477 opt = exp.IndexConstraintOption(parser=self._parse_var(any_token=True)) -478 elif self._match(TokenType.COMMENT): -479 opt = exp.IndexConstraintOption(comment=self._parse_string()) -480 elif self._match_text_seq("VISIBLE"): -481 opt = exp.IndexConstraintOption(visible=True) -482 elif self._match_text_seq("INVISIBLE"): -483 opt = exp.IndexConstraintOption(visible=False) -484 elif self._match_text_seq("ENGINE_ATTRIBUTE"): -485 self._match(TokenType.EQ) -486 opt = exp.IndexConstraintOption(engine_attr=self._parse_string()) -487 elif self._match_text_seq("ENGINE_ATTRIBUTE"): -488 self._match(TokenType.EQ) -489 opt = exp.IndexConstraintOption(engine_attr=self._parse_string()) -490 elif self._match_text_seq("SECONDARY_ENGINE_ATTRIBUTE"): -491 self._match(TokenType.EQ) -492 opt = exp.IndexConstraintOption(secondary_engine_attr=self._parse_string()) -493 else: -494 opt = None -495 -496 if not opt: -497 break -498 -499 options.append(opt) -500 -501 return self.expression( -502 exp.IndexColumnConstraint, -503 this=this, -504 schema=schema, -505 kind=kind, -506 index_type=index_type, -507 options=options, -508 ) -509 -510 def _parse_show_mysql( -511 self, -512 this: str, -513 target: bool | str = False, -514 full: t.Optional[bool] = None, -515 global_: t.Optional[bool] = None, -516 ) -> exp.Show: -517 if target: -518 if isinstance(target, str): -519 self._match_text_seq(target) -520 target_id = self._parse_id_var() -521 else: -522 target_id = None -523 -524 log = self._parse_string() if self._match_text_seq("IN") else None -525 -526 if this in ("BINLOG EVENTS", "RELAYLOG EVENTS"): -527 position = self._parse_number() if self._match_text_seq("FROM") else None -528 db = None -529 else: -530 position = None -531 db = None -532 -533 if self._match(TokenType.FROM): -534 db = self._parse_id_var() -535 elif self._match(TokenType.DOT): -536 db = target_id -537 target_id = self._parse_id_var() -538 -539 channel = self._parse_id_var() if self._match_text_seq("FOR", "CHANNEL") else None -540 -541 like = self._parse_string() if self._match_text_seq("LIKE") else None -542 where = self._parse_where() -543 -544 if this == "PROFILE": -545 types = self._parse_csv(lambda: self._parse_var_from_options(self.PROFILE_TYPES)) -546 query = self._parse_number() if self._match_text_seq("FOR", "QUERY") else None -547 offset = self._parse_number() if self._match_text_seq("OFFSET") else None -548 limit = self._parse_number() if self._match_text_seq("LIMIT") else None -549 else: -550 types, query = None, None -551 offset, limit = self._parse_oldstyle_limit() -552 -553 mutex = True if self._match_text_seq("MUTEX") else None -554 mutex = False if self._match_text_seq("STATUS") else mutex -555 -556 return self.expression( -557 exp.Show, -558 this=this, -559 target=target_id, -560 full=full, -561 log=log, -562 position=position, -563 db=db, -564 channel=channel, -565 like=like, -566 where=where, -567 types=types, -568 query=query, -569 offset=offset, -570 limit=limit, -571 mutex=mutex, -572 **{"global": global_}, # type: ignore -573 ) -574 -575 def _parse_oldstyle_limit( -576 self, -577 ) -> t.Tuple[t.Optional[exp.Expression], t.Optional[exp.Expression]]: -578 limit = None -579 offset = None -580 if self._match_text_seq("LIMIT"): -581 parts = self._parse_csv(self._parse_number) -582 if len(parts) == 1: -583 limit = parts[0] -584 elif len(parts) == 2: -585 limit = parts[1] -586 offset = parts[0] -587 -588 return offset, limit -589 -590 def _parse_set_item_charset(self, kind: str) -> exp.Expression: -591 this = self._parse_string() or self._parse_id_var() -592 return self.expression(exp.SetItem, this=this, kind=kind) -593 -594 def _parse_set_item_names(self) -> exp.Expression: -595 charset = self._parse_string() or self._parse_id_var() -596 if self._match_text_seq("COLLATE"): -597 collate = self._parse_string() or self._parse_id_var() -598 else: -599 collate = None -600 -601 return self.expression(exp.SetItem, this=charset, collate=collate, kind="NAMES") -602 -603 def _parse_type(self, parse_interval: bool = True) -> t.Optional[exp.Expression]: -604 # mysql binary is special and can work anywhere, even in order by operations -605 # it operates like a no paren func -606 if self._match(TokenType.BINARY, advance=False): -607 data_type = self._parse_types(check_func=True, allow_identifiers=False) -608 -609 if isinstance(data_type, exp.DataType): -610 return self.expression(exp.Cast, this=self._parse_column(), to=data_type) -611 -612 return super()._parse_type(parse_interval=parse_interval) -613 -614 def _parse_chr(self) -> t.Optional[exp.Expression]: -615 expressions = self._parse_csv(self._parse_conjunction) -616 kwargs: t.Dict[str, t.Any] = {"this": seq_get(expressions, 0)} -617 -618 if len(expressions) > 1: -619 kwargs["expressions"] = expressions[1:] -620 -621 if self._match(TokenType.USING): -622 kwargs["charset"] = self._parse_var() -623 -624 return self.expression(exp.Chr, **kwargs) +449 VALUES_FOLLOWED_BY_PAREN = False +450 +451 def _parse_primary_key_part(self) -> t.Optional[exp.Expression]: +452 this = self._parse_id_var() +453 if not self._match(TokenType.L_PAREN): +454 return this +455 +456 expression = self._parse_number() +457 self._match_r_paren() +458 return self.expression(exp.ColumnPrefix, this=this, expression=expression) +459 +460 def _parse_index_constraint( +461 self, kind: t.Optional[str] = None +462 ) -> exp.IndexColumnConstraint: +463 if kind: +464 self._match_texts(("INDEX", "KEY")) +465 +466 this = self._parse_id_var(any_token=False) +467 index_type = self._match(TokenType.USING) and self._advance_any() and self._prev.text +468 schema = self._parse_schema() +469 +470 options = [] +471 while True: +472 if self._match_text_seq("KEY_BLOCK_SIZE"): +473 self._match(TokenType.EQ) +474 opt = exp.IndexConstraintOption(key_block_size=self._parse_number()) +475 elif self._match(TokenType.USING): +476 opt = exp.IndexConstraintOption(using=self._advance_any() and self._prev.text) +477 elif self._match_text_seq("WITH", "PARSER"): +478 opt = exp.IndexConstraintOption(parser=self._parse_var(any_token=True)) +479 elif self._match(TokenType.COMMENT): +480 opt = exp.IndexConstraintOption(comment=self._parse_string()) +481 elif self._match_text_seq("VISIBLE"): +482 opt = exp.IndexConstraintOption(visible=True) +483 elif self._match_text_seq("INVISIBLE"): +484 opt = exp.IndexConstraintOption(visible=False) +485 elif self._match_text_seq("ENGINE_ATTRIBUTE"): +486 self._match(TokenType.EQ) +487 opt = exp.IndexConstraintOption(engine_attr=self._parse_string()) +488 elif self._match_text_seq("ENGINE_ATTRIBUTE"): +489 self._match(TokenType.EQ) +490 opt = exp.IndexConstraintOption(engine_attr=self._parse_string()) +491 elif self._match_text_seq("SECONDARY_ENGINE_ATTRIBUTE"): +492 self._match(TokenType.EQ) +493 opt = exp.IndexConstraintOption(secondary_engine_attr=self._parse_string()) +494 else: +495 opt = None +496 +497 if not opt: +498 break +499 +500 options.append(opt) +501 +502 return self.expression( +503 exp.IndexColumnConstraint, +504 this=this, +505 schema=schema, +506 kind=kind, +507 index_type=index_type, +508 options=options, +509 ) +510 +511 def _parse_show_mysql( +512 self, +513 this: str, +514 target: bool | str = False, +515 full: t.Optional[bool] = None, +516 global_: t.Optional[bool] = None, +517 ) -> exp.Show: +518 if target: +519 if isinstance(target, str): +520 self._match_text_seq(target) +521 target_id = self._parse_id_var() +522 else: +523 target_id = None +524 +525 log = self._parse_string() if self._match_text_seq("IN") else None +526 +527 if this in ("BINLOG EVENTS", "RELAYLOG EVENTS"): +528 position = self._parse_number() if self._match_text_seq("FROM") else None +529 db = None +530 else: +531 position = None +532 db = None +533 +534 if self._match(TokenType.FROM): +535 db = self._parse_id_var() +536 elif self._match(TokenType.DOT): +537 db = target_id +538 target_id = self._parse_id_var() +539 +540 channel = self._parse_id_var() if self._match_text_seq("FOR", "CHANNEL") else None +541 +542 like = self._parse_string() if self._match_text_seq("LIKE") else None +543 where = self._parse_where() +544 +545 if this == "PROFILE": +546 types = self._parse_csv(lambda: self._parse_var_from_options(self.PROFILE_TYPES)) +547 query = self._parse_number() if self._match_text_seq("FOR", "QUERY") else None +548 offset = self._parse_number() if self._match_text_seq("OFFSET") else None +549 limit = self._parse_number() if self._match_text_seq("LIMIT") else None +550 else: +551 types, query = None, None +552 offset, limit = self._parse_oldstyle_limit() +553 +554 mutex = True if self._match_text_seq("MUTEX") else None +555 mutex = False if self._match_text_seq("STATUS") else mutex +556 +557 return self.expression( +558 exp.Show, +559 this=this, +560 target=target_id, +561 full=full, +562 log=log, +563 position=position, +564 db=db, +565 channel=channel, +566 like=like, +567 where=where, +568 types=types, +569 query=query, +570 offset=offset, +571 limit=limit, +572 mutex=mutex, +573 **{"global": global_}, # type: ignore +574 ) +575 +576 def _parse_oldstyle_limit( +577 self, +578 ) -> t.Tuple[t.Optional[exp.Expression], t.Optional[exp.Expression]]: +579 limit = None +580 offset = None +581 if self._match_text_seq("LIMIT"): +582 parts = self._parse_csv(self._parse_number) +583 if len(parts) == 1: +584 limit = parts[0] +585 elif len(parts) == 2: +586 limit = parts[1] +587 offset = parts[0] +588 +589 return offset, limit +590 +591 def _parse_set_item_charset(self, kind: str) -> exp.Expression: +592 this = self._parse_string() or self._parse_id_var() +593 return self.expression(exp.SetItem, this=this, kind=kind) +594 +595 def _parse_set_item_names(self) -> exp.Expression: +596 charset = self._parse_string() or self._parse_id_var() +597 if self._match_text_seq("COLLATE"): +598 collate = self._parse_string() or self._parse_id_var() +599 else: +600 collate = None +601 +602 return self.expression(exp.SetItem, this=charset, collate=collate, kind="NAMES") +603 +604 def _parse_type(self, parse_interval: bool = True) -> t.Optional[exp.Expression]: +605 # mysql binary is special and can work anywhere, even in order by operations +606 # it operates like a no paren func +607 if self._match(TokenType.BINARY, advance=False): +608 data_type = self._parse_types(check_func=True, allow_identifiers=False) +609 +610 if isinstance(data_type, exp.DataType): +611 return self.expression(exp.Cast, this=self._parse_column(), to=data_type) +612 +613 return super()._parse_type(parse_interval=parse_interval) +614 +615 def _parse_chr(self) -> t.Optional[exp.Expression]: +616 expressions = self._parse_csv(self._parse_conjunction) +617 kwargs: t.Dict[str, t.Any] = {"this": seq_get(expressions, 0)} +618 +619 if len(expressions) > 1: +620 kwargs["expressions"] = expressions[1:] +621 +622 if self._match(TokenType.USING): +623 kwargs["charset"] = self._parse_var() +624 +625 return self.expression(exp.Chr, **kwargs)
    @@ -2910,7 +2916,7 @@ Default: 3
    FUNC_TOKENS = - {<TokenType.RLIKE: 'RLIKE'>, <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>, <TokenType.ILIKE: 'ILIKE'>, <TokenType.UNIQUEIDENTIFIER: 'UNIQUEIDENTIFIER'>, <TokenType.VALUES: 'VALUES'>, <TokenType.TSTZRANGE: 'TSTZRANGE'>, <TokenType.PRIMARY_KEY: 'PRIMARY_KEY'>, <TokenType.TEXT: 'TEXT'>, <TokenType.UINT: 'UINT'>, <TokenType.XOR: 'XOR'>, <TokenType.LONGBLOB: 'LONGBLOB'>, <TokenType.DATETIME: 'DATETIME'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.BIGINT: 'BIGINT'>, <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, <TokenType.SMALLSERIAL: 'SMALLSERIAL'>, <TokenType.TABLE: 'TABLE'>, <TokenType.BPCHAR: 'BPCHAR'>, <TokenType.DATETIME64: 'DATETIME64'>, <TokenType.VAR: 'VAR'>, <TokenType.BIGSERIAL: 'BIGSERIAL'>, <TokenType.UMEDIUMINT: 'UMEDIUMINT'>, <TokenType.ANY: 'ANY'>, <TokenType.FILTER: 'FILTER'>, <TokenType.TIMESTAMP_MS: 'TIMESTAMP_MS'>, <TokenType.SERIAL: 'SERIAL'>, <TokenType.ALL: 'ALL'>, <TokenType.TIMETZ: 'TIMETZ'>, <TokenType.TINYBLOB: 'TINYBLOB'>, <TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>, <TokenType.SMALLINT: 'SMALLINT'>, <TokenType.IPPREFIX: 'IPPREFIX'>, <TokenType.UNKNOWN: 'UNKNOWN'>, <TokenType.NESTED: 'NESTED'>, <TokenType.NUMRANGE: 'NUMRANGE'>, <TokenType.INTERVAL: 'INTERVAL'>, <TokenType.JSON: 'JSON'>, <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, <TokenType.JSONB: 'JSONB'>, <TokenType.ROW: 'ROW'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, <TokenType.ENUM: 'ENUM'>, <TokenType.SMALLMONEY: 'SMALLMONEY'>, <TokenType.EXISTS: 'EXISTS'>, <TokenType.IDENTIFIER: 'IDENTIFIER'>, <TokenType.RANGE: 'RANGE'>, <TokenType.HLLSKETCH: 'HLLSKETCH'>, <TokenType.BIT: 'BIT'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.TSRANGE: 'TSRANGE'>, <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, <TokenType.INT256: 'INT256'>, <TokenType.CHAR: 'CHAR'>, <TokenType.VARBINARY: 'VARBINARY'>, <TokenType.DOUBLE: 'DOUBLE'>, <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, <TokenType.ISNULL: 'ISNULL'>, <TokenType.DECIMAL: 'DECIMAL'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, <TokenType.NULL: 'NULL'>, <TokenType.SCHEMA: 'SCHEMA'>, <TokenType.MEDIUMINT: 'MEDIUMINT'>, <TokenType.UDECIMAL: 'UDECIMAL'>, <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, <TokenType.FIXEDSTRING: 'FIXEDSTRING'>, <TokenType.USMALLINT: 'USMALLINT'>, <TokenType.BOOLEAN: 'BOOLEAN'>, <TokenType.ENUM16: 'ENUM16'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>, <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, <TokenType.INT8RANGE: 'INT8RANGE'>, <TokenType.REPLACE: 'REPLACE'>, <TokenType.TINYTEXT: 'TINYTEXT'>, <TokenType.LIKE: 'LIKE'>, <TokenType.FIRST: 'FIRST'>, <TokenType.OFFSET: 'OFFSET'>, <TokenType.YEAR: 'YEAR'>, <TokenType.UINT256: 'UINT256'>, <TokenType.UINT128: 'UINT128'>, <TokenType.TIMESTAMP_NS: 'TIMESTAMP_NS'>, <TokenType.RIGHT: 'RIGHT'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.SIMPLEAGGREGATEFUNCTION: 'SIMPLEAGGREGATEFUNCTION'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, <TokenType.LEFT: 'LEFT'>, <TokenType.IPADDRESS: 'IPADDRESS'>, <TokenType.NVARCHAR: 'NVARCHAR'>, <TokenType.LONGTEXT: 'LONGTEXT'>, <TokenType.BINARY: 'BINARY'>, <TokenType.TIME: 'TIME'>, <TokenType.MONEY: 'MONEY'>, <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, <TokenType.IMAGE: 'IMAGE'>, <TokenType.IPV6: 'IPV6'>, <TokenType.INT: 'INT'>, <TokenType.ROWVERSION: 'ROWVERSION'>, <TokenType.VARIANT: 'VARIANT'>, <TokenType.UTINYINT: 'UTINYINT'>, <TokenType.SOME: 'SOME'>, <TokenType.SUPER: 'SUPER'>, <TokenType.DATABASE: 'DATABASE'>, <TokenType.DATE: 'DATE'>, <TokenType.HSTORE: 'HSTORE'>, <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, <TokenType.MAP: 'MAP'>, <TokenType.INSERT: 'INSERT'>, <TokenType.TINYINT: 'TINYINT'>, <TokenType.IPV4: 'IPV4'>, <TokenType.AGGREGATEFUNCTION: 'AGGREGATEFUNCTION'>, <TokenType.INT4RANGE: 'INT4RANGE'>, <TokenType.ENUM8: 'ENUM8'>, <TokenType.UUID: 'UUID'>, <TokenType.FORMAT: 'FORMAT'>, <TokenType.NCHAR: 'NCHAR'>, <TokenType.OBJECT: 'OBJECT'>, <TokenType.GEOMETRY: 'GEOMETRY'>, <TokenType.DATERANGE: 'DATERANGE'>, <TokenType.INDEX: 'INDEX'>, <TokenType.FLOAT: 'FLOAT'>, <TokenType.GLOB: 'GLOB'>, <TokenType.VARCHAR: 'VARCHAR'>, <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, <TokenType.INET: 'INET'>, <TokenType.WINDOW: 'WINDOW'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.COLLATE: 'COLLATE'>, <TokenType.TIMESTAMP_S: 'TIMESTAMP_S'>, <TokenType.UBIGINT: 'UBIGINT'>, <TokenType.USERDEFINED: 'USERDEFINED'>, <TokenType.DATE32: 'DATE32'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, <TokenType.INT128: 'INT128'>, <TokenType.UNNEST: 'UNNEST'>, <TokenType.MERGE: 'MERGE'>, <TokenType.XML: 'XML'>, <TokenType.STRUCT: 'STRUCT'>} + {<TokenType.BIT: 'BIT'>, <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, <TokenType.NVARCHAR: 'NVARCHAR'>, <TokenType.OBJECT: 'OBJECT'>, <TokenType.TIME: 'TIME'>, <TokenType.FIRST: 'FIRST'>, <TokenType.RANGE: 'RANGE'>, <TokenType.INT: 'INT'>, <TokenType.UMEDIUMINT: 'UMEDIUMINT'>, <TokenType.UTINYINT: 'UTINYINT'>, <TokenType.RLIKE: 'RLIKE'>, <TokenType.XML: 'XML'>, <TokenType.BIGINT: 'BIGINT'>, <TokenType.WINDOW: 'WINDOW'>, <TokenType.VAR: 'VAR'>, <TokenType.DOUBLE: 'DOUBLE'>, <TokenType.MERGE: 'MERGE'>, <TokenType.ANY: 'ANY'>, <TokenType.DATERANGE: 'DATERANGE'>, <TokenType.LEFT: 'LEFT'>, <TokenType.UDECIMAL: 'UDECIMAL'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>, <TokenType.STRUCT: 'STRUCT'>, <TokenType.HLLSKETCH: 'HLLSKETCH'>, <TokenType.JSONB: 'JSONB'>, <TokenType.ENUM: 'ENUM'>, <TokenType.VALUES: 'VALUES'>, <TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>, <TokenType.INDEX: 'INDEX'>, <TokenType.TIMESTAMP_NS: 'TIMESTAMP_NS'>, <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, <TokenType.LIKE: 'LIKE'>, <TokenType.LONGTEXT: 'LONGTEXT'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.FIXEDSTRING: 'FIXEDSTRING'>, <TokenType.IPV6: 'IPV6'>, <TokenType.IDENTIFIER: 'IDENTIFIER'>, <TokenType.DATABASE: 'DATABASE'>, <TokenType.SMALLINT: 'SMALLINT'>, <TokenType.ISNULL: 'ISNULL'>, <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, <TokenType.BINARY: 'BINARY'>, <TokenType.UNIQUEIDENTIFIER: 'UNIQUEIDENTIFIER'>, <TokenType.HSTORE: 'HSTORE'>, <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, <TokenType.DATE: 'DATE'>, <TokenType.SUPER: 'SUPER'>, <TokenType.UNNEST: 'UNNEST'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.VARCHAR: 'VARCHAR'>, <TokenType.FILTER: 'FILTER'>, <TokenType.TEXT: 'TEXT'>, <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, <TokenType.XOR: 'XOR'>, <TokenType.TSTZRANGE: 'TSTZRANGE'>, <TokenType.NESTED: 'NESTED'>, <TokenType.INTERVAL: 'INTERVAL'>, <TokenType.BOOLEAN: 'BOOLEAN'>, <TokenType.VARIANT: 'VARIANT'>, <TokenType.IPADDRESS: 'IPADDRESS'>, <TokenType.OFFSET: 'OFFSET'>, <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>, <TokenType.MONEY: 'MONEY'>, <TokenType.ENUM8: 'ENUM8'>, <TokenType.IPPREFIX: 'IPPREFIX'>, <TokenType.NULL: 'NULL'>, <TokenType.COLLATE: 'COLLATE'>, <TokenType.ILIKE: 'ILIKE'>, <TokenType.RIGHT: 'RIGHT'>, <TokenType.TSRANGE: 'TSRANGE'>, <TokenType.ALL: 'ALL'>, <TokenType.ROW: 'ROW'>, <TokenType.UBIGINT: 'UBIGINT'>, <TokenType.TABLE: 'TABLE'>, <TokenType.SERIAL: 'SERIAL'>, <TokenType.INT8RANGE: 'INT8RANGE'>, <TokenType.BPCHAR: 'BPCHAR'>, <TokenType.FLOAT: 'FLOAT'>, <TokenType.YEAR: 'YEAR'>, <TokenType.IPV4: 'IPV4'>, <TokenType.UNKNOWN: 'UNKNOWN'>, <TokenType.SCHEMA: 'SCHEMA'>, <TokenType.INT256: 'INT256'>, <TokenType.DECIMAL: 'DECIMAL'>, <TokenType.TINYBLOB: 'TINYBLOB'>, <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, <TokenType.TIMESTAMP_S: 'TIMESTAMP_S'>, <TokenType.INET: 'INET'>, <TokenType.ROWVERSION: 'ROWVERSION'>, <TokenType.TINYTEXT: 'TINYTEXT'>, <TokenType.MAP: 'MAP'>, <TokenType.FORMAT: 'FORMAT'>, <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, <TokenType.SIMPLEAGGREGATEFUNCTION: 'SIMPLEAGGREGATEFUNCTION'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, <TokenType.REPLACE: 'REPLACE'>, <TokenType.USERDEFINED: 'USERDEFINED'>, <TokenType.SMALLMONEY: 'SMALLMONEY'>, <TokenType.EXISTS: 'EXISTS'>, <TokenType.UINT: 'UINT'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.UINT256: 'UINT256'>, <TokenType.CHAR: 'CHAR'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, <TokenType.GEOMETRY: 'GEOMETRY'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, <TokenType.UINT128: 'UINT128'>, <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, <TokenType.INSERT: 'INSERT'>, <TokenType.GLOB: 'GLOB'>, <TokenType.NCHAR: 'NCHAR'>, <TokenType.UUID: 'UUID'>, <TokenType.ENUM16: 'ENUM16'>, <TokenType.TINYINT: 'TINYINT'>, <TokenType.DATE32: 'DATE32'>, <TokenType.NUMRANGE: 'NUMRANGE'>, <TokenType.DATETIME: 'DATETIME'>, <TokenType.JSON: 'JSON'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, <TokenType.IMAGE: 'IMAGE'>, <TokenType.SMALLSERIAL: 'SMALLSERIAL'>, <TokenType.INT128: 'INT128'>, <TokenType.TIMETZ: 'TIMETZ'>, <TokenType.DATETIME64: 'DATETIME64'>, <TokenType.PRIMARY_KEY: 'PRIMARY_KEY'>, <TokenType.VARBINARY: 'VARBINARY'>, <TokenType.USMALLINT: 'USMALLINT'>, <TokenType.AGGREGATEFUNCTION: 'AGGREGATEFUNCTION'>, <TokenType.BIGSERIAL: 'BIGSERIAL'>, <TokenType.TIMESTAMP_MS: 'TIMESTAMP_MS'>, <TokenType.MEDIUMINT: 'MEDIUMINT'>, <TokenType.SOME: 'SOME'>, <TokenType.INT4RANGE: 'INT4RANGE'>, <TokenType.LONGBLOB: 'LONGBLOB'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>}
    @@ -2936,7 +2942,7 @@ Default: 3
    TABLE_ALIAS_TOKENS = - {<TokenType.UPDATE: 'UPDATE'>, <TokenType.DEFAULT: 'DEFAULT'>, <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>, <TokenType.UNIQUEIDENTIFIER: 'UNIQUEIDENTIFIER'>, <TokenType.PIVOT: 'PIVOT'>, <TokenType.PARTITION: 'PARTITION'>, <TokenType.TSTZRANGE: 'TSTZRANGE'>, <TokenType.VIEW: 'VIEW'>, <TokenType.COMMENT: 'COMMENT'>, <TokenType.TEXT: 'TEXT'>, <TokenType.UINT: 'UINT'>, <TokenType.DELETE: 'DELETE'>, <TokenType.TOP: 'TOP'>, <TokenType.LONGBLOB: 'LONGBLOB'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.DATETIME: 'DATETIME'>, <TokenType.BIGINT: 'BIGINT'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, <TokenType.KEEP: 'KEEP'>, <TokenType.SMALLSERIAL: 'SMALLSERIAL'>, <TokenType.NEXT: 'NEXT'>, <TokenType.BPCHAR: 'BPCHAR'>, <TokenType.TABLE: 'TABLE'>, <TokenType.DATETIME64: 'DATETIME64'>, <TokenType.VAR: 'VAR'>, <TokenType.BIGSERIAL: 'BIGSERIAL'>, <TokenType.ANY: 'ANY'>, <TokenType.UMEDIUMINT: 'UMEDIUMINT'>, <TokenType.FILTER: 'FILTER'>, <TokenType.TIMESTAMP_MS: 'TIMESTAMP_MS'>, <TokenType.SERIAL: 'SERIAL'>, <TokenType.END: 'END'>, <TokenType.ALL: 'ALL'>, <TokenType.TIMETZ: 'TIMETZ'>, <TokenType.TINYBLOB: 'TINYBLOB'>, <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, <TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>, <TokenType.SMALLINT: 'SMALLINT'>, <TokenType.TRUE: 'TRUE'>, <TokenType.FUNCTION: 'FUNCTION'>, <TokenType.IPPREFIX: 'IPPREFIX'>, <TokenType.UNKNOWN: 'UNKNOWN'>, <TokenType.INTERVAL: 'INTERVAL'>, <TokenType.NESTED: 'NESTED'>, <TokenType.NUMRANGE: 'NUMRANGE'>, <TokenType.JSON: 'JSON'>, <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, <TokenType.ANTI: 'ANTI'>, <TokenType.JSONB: 'JSONB'>, <TokenType.ROW: 'ROW'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, <TokenType.ENUM: 'ENUM'>, <TokenType.SMALLMONEY: 'SMALLMONEY'>, <TokenType.REFERENCES: 'REFERENCES'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.EXISTS: 'EXISTS'>, <TokenType.SEMI: 'SEMI'>, <TokenType.RANGE: 'RANGE'>, <TokenType.SET: 'SET'>, <TokenType.IS: 'IS'>, <TokenType.HLLSKETCH: 'HLLSKETCH'>, <TokenType.BIT: 'BIT'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.TSRANGE: 'TSRANGE'>, <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, <TokenType.INT256: 'INT256'>, <TokenType.FALSE: 'FALSE'>, <TokenType.CHAR: 'CHAR'>, <TokenType.ASC: 'ASC'>, <TokenType.VARBINARY: 'VARBINARY'>, <TokenType.DOUBLE: 'DOUBLE'>, <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, <TokenType.ORDINALITY: 'ORDINALITY'>, <TokenType.ESCAPE: 'ESCAPE'>, <TokenType.ISNULL: 'ISNULL'>, <TokenType.SHOW: 'SHOW'>, <TokenType.UNPIVOT: 'UNPIVOT'>, <TokenType.DECIMAL: 'DECIMAL'>, <TokenType.OVERWRITE: 'OVERWRITE'>, <TokenType.PERCENT: 'PERCENT'>, <TokenType.SCHEMA: 'SCHEMA'>, <TokenType.NULL: 'NULL'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, <TokenType.RECURSIVE: 'RECURSIVE'>, <TokenType.MEDIUMINT: 'MEDIUMINT'>, <TokenType.UDECIMAL: 'UDECIMAL'>, <TokenType.COLUMN: 'COLUMN'>, <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, <TokenType.DIV: 'DIV'>, <TokenType.FIXEDSTRING: 'FIXEDSTRING'>, <TokenType.USMALLINT: 'USMALLINT'>, <TokenType.BOOLEAN: 'BOOLEAN'>, <TokenType.CASE: 'CASE'>, <TokenType.ENUM16: 'ENUM16'>, <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>, <TokenType.INT8RANGE: 'INT8RANGE'>, <TokenType.REPLACE: 'REPLACE'>, <TokenType.TINYTEXT: 'TINYTEXT'>, <TokenType.FIRST: 'FIRST'>, <TokenType.YEAR: 'YEAR'>, <TokenType.UINT256: 'UINT256'>, <TokenType.MODEL: 'MODEL'>, <TokenType.UINT128: 'UINT128'>, <TokenType.OVERLAPS: 'OVERLAPS'>, <TokenType.TIMESTAMP_NS: 'TIMESTAMP_NS'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.SIMPLEAGGREGATEFUNCTION: 'SIMPLEAGGREGATEFUNCTION'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, <TokenType.PRAGMA: 'PRAGMA'>, <TokenType.PROCEDURE: 'PROCEDURE'>, <TokenType.IPADDRESS: 'IPADDRESS'>, <TokenType.NVARCHAR: 'NVARCHAR'>, <TokenType.LONGTEXT: 'LONGTEXT'>, <TokenType.BINARY: 'BINARY'>, <TokenType.DICTIONARY: 'DICTIONARY'>, <TokenType.TIME: 'TIME'>, <TokenType.MONEY: 'MONEY'>, <TokenType.COMMIT: 'COMMIT'>, <TokenType.LOAD: 'LOAD'>, <TokenType.CACHE: 'CACHE'>, <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, <TokenType.IMAGE: 'IMAGE'>, <TokenType.IPV6: 'IPV6'>, <TokenType.INT: 'INT'>, <TokenType.ROWVERSION: 'ROWVERSION'>, <TokenType.VARIANT: 'VARIANT'>, <TokenType.BEGIN: 'BEGIN'>, <TokenType.UTINYINT: 'UTINYINT'>, <TokenType.VOLATILE: 'VOLATILE'>, <TokenType.SOME: 'SOME'>, <TokenType.SUPER: 'SUPER'>, <TokenType.OPERATOR: 'OPERATOR'>, <TokenType.DATABASE: 'DATABASE'>, <TokenType.KILL: 'KILL'>, <TokenType.DATE: 'DATE'>, <TokenType.DESC: 'DESC'>, <TokenType.HSTORE: 'HSTORE'>, <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, <TokenType.MAP: 'MAP'>, <TokenType.TINYINT: 'TINYINT'>, <TokenType.IPV4: 'IPV4'>, <TokenType.AGGREGATEFUNCTION: 'AGGREGATEFUNCTION'>, <TokenType.INT4RANGE: 'INT4RANGE'>, <TokenType.ENUM8: 'ENUM8'>, <TokenType.UUID: 'UUID'>, <TokenType.FORMAT: 'FORMAT'>, <TokenType.NCHAR: 'NCHAR'>, <TokenType.OBJECT: 'OBJECT'>, <TokenType.DESCRIBE: 'DESCRIBE'>, <TokenType.GEOMETRY: 'GEOMETRY'>, <TokenType.UNIQUE: 'UNIQUE'>, <TokenType.DATERANGE: 'DATERANGE'>, <TokenType.INDEX: 'INDEX'>, <TokenType.REFRESH: 'REFRESH'>, <TokenType.FLOAT: 'FLOAT'>, <TokenType.ROWS: 'ROWS'>, <TokenType.VARCHAR: 'VARCHAR'>, <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, <TokenType.INET: 'INET'>, <TokenType.FINAL: 'FINAL'>, <TokenType.SETTINGS: 'SETTINGS'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.COLLATE: 'COLLATE'>, <TokenType.TEMPORARY: 'TEMPORARY'>, <TokenType.TIMESTAMP_S: 'TIMESTAMP_S'>, <TokenType.UBIGINT: 'UBIGINT'>, <TokenType.USERDEFINED: 'USERDEFINED'>, <TokenType.DATE32: 'DATE32'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, <TokenType.INT128: 'INT128'>, <TokenType.MERGE: 'MERGE'>, <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, <TokenType.XML: 'XML'>, <TokenType.CONSTRAINT: 'CONSTRAINT'>, <TokenType.STRUCT: 'STRUCT'>} + {<TokenType.BIT: 'BIT'>, <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, <TokenType.TEMPORARY: 'TEMPORARY'>, <TokenType.NVARCHAR: 'NVARCHAR'>, <TokenType.OBJECT: 'OBJECT'>, <TokenType.FIRST: 'FIRST'>, <TokenType.RANGE: 'RANGE'>, <TokenType.TIME: 'TIME'>, <TokenType.INT: 'INT'>, <TokenType.UMEDIUMINT: 'UMEDIUMINT'>, <TokenType.UTINYINT: 'UTINYINT'>, <TokenType.IS: 'IS'>, <TokenType.XML: 'XML'>, <TokenType.BIGINT: 'BIGINT'>, <TokenType.VAR: 'VAR'>, <TokenType.DOUBLE: 'DOUBLE'>, <TokenType.ROWS: 'ROWS'>, <TokenType.MERGE: 'MERGE'>, <TokenType.ANY: 'ANY'>, <TokenType.DATERANGE: 'DATERANGE'>, <TokenType.UDECIMAL: 'UDECIMAL'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>, <TokenType.REFERENCES: 'REFERENCES'>, <TokenType.PERCENT: 'PERCENT'>, <TokenType.CACHE: 'CACHE'>, <TokenType.STRUCT: 'STRUCT'>, <TokenType.HLLSKETCH: 'HLLSKETCH'>, <TokenType.JSONB: 'JSONB'>, <TokenType.COMMIT: 'COMMIT'>, <TokenType.ENUM: 'ENUM'>, <TokenType.SETTINGS: 'SETTINGS'>, <TokenType.ORDINALITY: 'ORDINALITY'>, <TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>, <TokenType.INDEX: 'INDEX'>, <TokenType.TIMESTAMP_NS: 'TIMESTAMP_NS'>, <TokenType.FINAL: 'FINAL'>, <TokenType.VOLATILE: 'VOLATILE'>, <TokenType.DICTIONARY: 'DICTIONARY'>, <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, <TokenType.FUNCTION: 'FUNCTION'>, <TokenType.LONGTEXT: 'LONGTEXT'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.FIXEDSTRING: 'FIXEDSTRING'>, <TokenType.IPV6: 'IPV6'>, <TokenType.DATABASE: 'DATABASE'>, <TokenType.SMALLINT: 'SMALLINT'>, <TokenType.ASC: 'ASC'>, <TokenType.RECURSIVE: 'RECURSIVE'>, <TokenType.ISNULL: 'ISNULL'>, <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, <TokenType.BINARY: 'BINARY'>, <TokenType.UNIQUEIDENTIFIER: 'UNIQUEIDENTIFIER'>, <TokenType.HSTORE: 'HSTORE'>, <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, <TokenType.DATE: 'DATE'>, <TokenType.SUPER: 'SUPER'>, <TokenType.TOP: 'TOP'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.VARCHAR: 'VARCHAR'>, <TokenType.FILTER: 'FILTER'>, <TokenType.TEXT: 'TEXT'>, <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, <TokenType.ESCAPE: 'ESCAPE'>, <TokenType.SHOW: 'SHOW'>, <TokenType.TSTZRANGE: 'TSTZRANGE'>, <TokenType.SET: 'SET'>, <TokenType.NESTED: 'NESTED'>, <TokenType.INTERVAL: 'INTERVAL'>, <TokenType.BOOLEAN: 'BOOLEAN'>, <TokenType.VARIANT: 'VARIANT'>, <TokenType.IPADDRESS: 'IPADDRESS'>, <TokenType.DESCRIBE: 'DESCRIBE'>, <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>, <TokenType.MONEY: 'MONEY'>, <TokenType.ENUM8: 'ENUM8'>, <TokenType.IPPREFIX: 'IPPREFIX'>, <TokenType.NULL: 'NULL'>, <TokenType.COLLATE: 'COLLATE'>, <TokenType.NEXT: 'NEXT'>, <TokenType.TSRANGE: 'TSRANGE'>, <TokenType.PRAGMA: 'PRAGMA'>, <TokenType.ALL: 'ALL'>, <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, <TokenType.ROW: 'ROW'>, <TokenType.UBIGINT: 'UBIGINT'>, <TokenType.TABLE: 'TABLE'>, <TokenType.SERIAL: 'SERIAL'>, <TokenType.CASE: 'CASE'>, <TokenType.BPCHAR: 'BPCHAR'>, <TokenType.INT8RANGE: 'INT8RANGE'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.FLOAT: 'FLOAT'>, <TokenType.MODEL: 'MODEL'>, <TokenType.YEAR: 'YEAR'>, <TokenType.IPV4: 'IPV4'>, <TokenType.UNKNOWN: 'UNKNOWN'>, <TokenType.SCHEMA: 'SCHEMA'>, <TokenType.INT256: 'INT256'>, <TokenType.UNPIVOT: 'UNPIVOT'>, <TokenType.DECIMAL: 'DECIMAL'>, <TokenType.TINYBLOB: 'TINYBLOB'>, <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, <TokenType.TIMESTAMP_S: 'TIMESTAMP_S'>, <TokenType.INET: 'INET'>, <TokenType.CONSTRAINT: 'CONSTRAINT'>, <TokenType.ROWVERSION: 'ROWVERSION'>, <TokenType.TINYTEXT: 'TINYTEXT'>, <TokenType.MAP: 'MAP'>, <TokenType.FORMAT: 'FORMAT'>, <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, <TokenType.PIVOT: 'PIVOT'>, <TokenType.SIMPLEAGGREGATEFUNCTION: 'SIMPLEAGGREGATEFUNCTION'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.REPLACE: 'REPLACE'>, <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, <TokenType.USERDEFINED: 'USERDEFINED'>, <TokenType.PROCEDURE: 'PROCEDURE'>, <TokenType.PARTITION: 'PARTITION'>, <TokenType.SMALLMONEY: 'SMALLMONEY'>, <TokenType.EXISTS: 'EXISTS'>, <TokenType.UINT: 'UINT'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.UINT256: 'UINT256'>, <TokenType.OPERATOR: 'OPERATOR'>, <TokenType.CHAR: 'CHAR'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, <TokenType.DIV: 'DIV'>, <TokenType.GEOMETRY: 'GEOMETRY'>, <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, <TokenType.COMMENT: 'COMMENT'>, <TokenType.BEGIN: 'BEGIN'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, <TokenType.UINT128: 'UINT128'>, <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, <TokenType.OVERLAPS: 'OVERLAPS'>, <TokenType.DELETE: 'DELETE'>, <TokenType.LOAD: 'LOAD'>, <TokenType.NCHAR: 'NCHAR'>, <TokenType.UUID: 'UUID'>, <TokenType.ENUM16: 'ENUM16'>, <TokenType.TINYINT: 'TINYINT'>, <TokenType.DATE32: 'DATE32'>, <TokenType.NUMRANGE: 'NUMRANGE'>, <TokenType.DATETIME: 'DATETIME'>, <TokenType.VIEW: 'VIEW'>, <TokenType.JSON: 'JSON'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, <TokenType.COLUMN: 'COLUMN'>, <TokenType.IMAGE: 'IMAGE'>, <TokenType.KEEP: 'KEEP'>, <TokenType.SMALLSERIAL: 'SMALLSERIAL'>, <TokenType.END: 'END'>, <TokenType.INT128: 'INT128'>, <TokenType.KILL: 'KILL'>, <TokenType.TIMETZ: 'TIMETZ'>, <TokenType.DATETIME64: 'DATETIME64'>, <TokenType.REFRESH: 'REFRESH'>, <TokenType.VARBINARY: 'VARBINARY'>, <TokenType.USMALLINT: 'USMALLINT'>, <TokenType.AGGREGATEFUNCTION: 'AGGREGATEFUNCTION'>, <TokenType.UNIQUE: 'UNIQUE'>, <TokenType.SEMI: 'SEMI'>, <TokenType.DEFAULT: 'DEFAULT'>, <TokenType.BIGSERIAL: 'BIGSERIAL'>, <TokenType.TIMESTAMP_MS: 'TIMESTAMP_MS'>, <TokenType.MEDIUMINT: 'MEDIUMINT'>, <TokenType.SOME: 'SOME'>, <TokenType.ANTI: 'ANTI'>, <TokenType.FALSE: 'FALSE'>, <TokenType.INT4RANGE: 'INT4RANGE'>, <TokenType.DESC: 'DESC'>, <TokenType.OVERWRITE: 'OVERWRITE'>, <TokenType.LONGBLOB: 'LONGBLOB'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.TRUE: 'TRUE'>, <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, <TokenType.UPDATE: 'UPDATE'>}
    @@ -3040,7 +3046,7 @@ Default: 3
    SCHEMA_UNNAMED_CONSTRAINTS = - {'SPATIAL', 'LIKE', 'FULLTEXT', 'UNIQUE', 'KEY', 'FOREIGN KEY', 'PERIOD', 'INDEX', 'CHECK', 'PRIMARY KEY'} + {'FOREIGN KEY', 'INDEX', 'FULLTEXT', 'PERIOD', 'UNIQUE', 'KEY', 'SPATIAL', 'CHECK', 'LIKE', 'PRIMARY KEY'}
    @@ -3052,7 +3058,7 @@ Default: 3
    PROFILE_TYPES = -{'CPU', 'SOURCE', 'BLOCK IO', 'CONTEXT SWITCHES', 'IPC', 'PAGE FAULTS', 'MEMORY', 'SWAPS', 'ALL'} +{'IPC', 'BLOCK IO', 'ALL', 'CONTEXT SWITCHES', 'CPU', 'SOURCE', 'MEMORY', 'SWAPS', 'PAGE FAULTS'}
    @@ -3065,7 +3071,7 @@ Default: 3
    TYPE_TOKENS = - {<TokenType.FIXEDSTRING: 'FIXEDSTRING'>, <TokenType.USMALLINT: 'USMALLINT'>, <TokenType.BOOLEAN: 'BOOLEAN'>, <TokenType.ENUM16: 'ENUM16'>, <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>, <TokenType.INT8RANGE: 'INT8RANGE'>, <TokenType.TINYTEXT: 'TINYTEXT'>, <TokenType.UNIQUEIDENTIFIER: 'UNIQUEIDENTIFIER'>, <TokenType.YEAR: 'YEAR'>, <TokenType.UINT256: 'UINT256'>, <TokenType.TSTZRANGE: 'TSTZRANGE'>, <TokenType.UINT128: 'UINT128'>, <TokenType.TEXT: 'TEXT'>, <TokenType.UINT: 'UINT'>, <TokenType.TIMESTAMP_NS: 'TIMESTAMP_NS'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.SIMPLEAGGREGATEFUNCTION: 'SIMPLEAGGREGATEFUNCTION'>, <TokenType.LONGBLOB: 'LONGBLOB'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.DATETIME: 'DATETIME'>, <TokenType.IPADDRESS: 'IPADDRESS'>, <TokenType.BIGINT: 'BIGINT'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, <TokenType.SMALLSERIAL: 'SMALLSERIAL'>, <TokenType.NVARCHAR: 'NVARCHAR'>, <TokenType.LONGTEXT: 'LONGTEXT'>, <TokenType.BINARY: 'BINARY'>, <TokenType.BPCHAR: 'BPCHAR'>, <TokenType.TIME: 'TIME'>, <TokenType.MONEY: 'MONEY'>, <TokenType.DATETIME64: 'DATETIME64'>, <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, <TokenType.IMAGE: 'IMAGE'>, <TokenType.IPV6: 'IPV6'>, <TokenType.INT: 'INT'>, <TokenType.BIGSERIAL: 'BIGSERIAL'>, <TokenType.UMEDIUMINT: 'UMEDIUMINT'>, <TokenType.ROWVERSION: 'ROWVERSION'>, <TokenType.VARIANT: 'VARIANT'>, <TokenType.UTINYINT: 'UTINYINT'>, <TokenType.TIMESTAMP_MS: 'TIMESTAMP_MS'>, <TokenType.SERIAL: 'SERIAL'>, <TokenType.SUPER: 'SUPER'>, <TokenType.TIMETZ: 'TIMETZ'>, <TokenType.TINYBLOB: 'TINYBLOB'>, <TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>, <TokenType.DATE: 'DATE'>, <TokenType.SMALLINT: 'SMALLINT'>, <TokenType.IPPREFIX: 'IPPREFIX'>, <TokenType.UNKNOWN: 'UNKNOWN'>, <TokenType.NESTED: 'NESTED'>, <TokenType.NUMRANGE: 'NUMRANGE'>, <TokenType.INTERVAL: 'INTERVAL'>, <TokenType.HSTORE: 'HSTORE'>, <TokenType.JSON: 'JSON'>, <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, <TokenType.JSONB: 'JSONB'>, <TokenType.MAP: 'MAP'>, <TokenType.TINYINT: 'TINYINT'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.IPV4: 'IPV4'>, <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, <TokenType.AGGREGATEFUNCTION: 'AGGREGATEFUNCTION'>, <TokenType.INT4RANGE: 'INT4RANGE'>, <TokenType.ENUM8: 'ENUM8'>, <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, <TokenType.ENUM: 'ENUM'>, <TokenType.UUID: 'UUID'>, <TokenType.NCHAR: 'NCHAR'>, <TokenType.OBJECT: 'OBJECT'>, <TokenType.SMALLMONEY: 'SMALLMONEY'>, <TokenType.GEOMETRY: 'GEOMETRY'>, <TokenType.DATERANGE: 'DATERANGE'>, <TokenType.HLLSKETCH: 'HLLSKETCH'>, <TokenType.SET: 'SET'>, <TokenType.BIT: 'BIT'>, <TokenType.FLOAT: 'FLOAT'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.VARCHAR: 'VARCHAR'>, <TokenType.TSRANGE: 'TSRANGE'>, <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, <TokenType.INET: 'INET'>, <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, <TokenType.INT256: 'INT256'>, <TokenType.CHAR: 'CHAR'>, <TokenType.VARBINARY: 'VARBINARY'>, <TokenType.TIMESTAMP_S: 'TIMESTAMP_S'>, <TokenType.DOUBLE: 'DOUBLE'>, <TokenType.UBIGINT: 'UBIGINT'>, <TokenType.USERDEFINED: 'USERDEFINED'>, <TokenType.DATE32: 'DATE32'>, <TokenType.INT128: 'INT128'>, <TokenType.DECIMAL: 'DECIMAL'>, <TokenType.NULL: 'NULL'>, <TokenType.MEDIUMINT: 'MEDIUMINT'>, <TokenType.UDECIMAL: 'UDECIMAL'>, <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, <TokenType.XML: 'XML'>, <TokenType.STRUCT: 'STRUCT'>} + {<TokenType.BIT: 'BIT'>, <TokenType.INT8RANGE: 'INT8RANGE'>, <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, <TokenType.NVARCHAR: 'NVARCHAR'>, <TokenType.OBJECT: 'OBJECT'>, <TokenType.TIME: 'TIME'>, <TokenType.BPCHAR: 'BPCHAR'>, <TokenType.FLOAT: 'FLOAT'>, <TokenType.INT: 'INT'>, <TokenType.UMEDIUMINT: 'UMEDIUMINT'>, <TokenType.YEAR: 'YEAR'>, <TokenType.IPV4: 'IPV4'>, <TokenType.UNKNOWN: 'UNKNOWN'>, <TokenType.UTINYINT: 'UTINYINT'>, <TokenType.XML: 'XML'>, <TokenType.BIGINT: 'BIGINT'>, <TokenType.INT256: 'INT256'>, <TokenType.DOUBLE: 'DOUBLE'>, <TokenType.DATERANGE: 'DATERANGE'>, <TokenType.DECIMAL: 'DECIMAL'>, <TokenType.TINYBLOB: 'TINYBLOB'>, <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, <TokenType.UDECIMAL: 'UDECIMAL'>, <TokenType.TIMESTAMP_S: 'TIMESTAMP_S'>, <TokenType.INET: 'INET'>, <TokenType.ROWVERSION: 'ROWVERSION'>, <TokenType.STRUCT: 'STRUCT'>, <TokenType.TINYTEXT: 'TINYTEXT'>, <TokenType.MAP: 'MAP'>, <TokenType.HLLSKETCH: 'HLLSKETCH'>, <TokenType.JSONB: 'JSONB'>, <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, <TokenType.ENUM: 'ENUM'>, <TokenType.SIMPLEAGGREGATEFUNCTION: 'SIMPLEAGGREGATEFUNCTION'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, <TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>, <TokenType.USERDEFINED: 'USERDEFINED'>, <TokenType.TIMESTAMP_NS: 'TIMESTAMP_NS'>, <TokenType.SMALLMONEY: 'SMALLMONEY'>, <TokenType.UINT: 'UINT'>, <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.UINT256: 'UINT256'>, <TokenType.LONGTEXT: 'LONGTEXT'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.FIXEDSTRING: 'FIXEDSTRING'>, <TokenType.IPV6: 'IPV6'>, <TokenType.CHAR: 'CHAR'>, <TokenType.SMALLINT: 'SMALLINT'>, <TokenType.GEOMETRY: 'GEOMETRY'>, <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.BINARY: 'BINARY'>, <TokenType.UNIQUEIDENTIFIER: 'UNIQUEIDENTIFIER'>, <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, <TokenType.UINT128: 'UINT128'>, <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, <TokenType.HSTORE: 'HSTORE'>, <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, <TokenType.NCHAR: 'NCHAR'>, <TokenType.UUID: 'UUID'>, <TokenType.DATE: 'DATE'>, <TokenType.SUPER: 'SUPER'>, <TokenType.ENUM16: 'ENUM16'>, <TokenType.TINYINT: 'TINYINT'>, <TokenType.DATE32: 'DATE32'>, <TokenType.NUMRANGE: 'NUMRANGE'>, <TokenType.DATETIME: 'DATETIME'>, <TokenType.VARCHAR: 'VARCHAR'>, <TokenType.TEXT: 'TEXT'>, <TokenType.JSON: 'JSON'>, <TokenType.IMAGE: 'IMAGE'>, <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, <TokenType.SMALLSERIAL: 'SMALLSERIAL'>, <TokenType.TSTZRANGE: 'TSTZRANGE'>, <TokenType.INT128: 'INT128'>, <TokenType.NESTED: 'NESTED'>, <TokenType.TIMETZ: 'TIMETZ'>, <TokenType.DATETIME64: 'DATETIME64'>, <TokenType.SET: 'SET'>, <TokenType.INTERVAL: 'INTERVAL'>, <TokenType.VARBINARY: 'VARBINARY'>, <TokenType.BOOLEAN: 'BOOLEAN'>, <TokenType.VARIANT: 'VARIANT'>, <TokenType.IPADDRESS: 'IPADDRESS'>, <TokenType.USMALLINT: 'USMALLINT'>, <TokenType.AGGREGATEFUNCTION: 'AGGREGATEFUNCTION'>, <TokenType.BIGSERIAL: 'BIGSERIAL'>, <TokenType.TIMESTAMP_MS: 'TIMESTAMP_MS'>, <TokenType.MEDIUMINT: 'MEDIUMINT'>, <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>, <TokenType.MONEY: 'MONEY'>, <TokenType.ENUM8: 'ENUM8'>, <TokenType.IPPREFIX: 'IPPREFIX'>, <TokenType.NULL: 'NULL'>, <TokenType.INT4RANGE: 'INT4RANGE'>, <TokenType.LONGBLOB: 'LONGBLOB'>, <TokenType.TSRANGE: 'TSRANGE'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.UBIGINT: 'UBIGINT'>, <TokenType.SERIAL: 'SERIAL'>, <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>}
    @@ -3078,7 +3084,7 @@ Default: 3
    ENUM_TYPE_TOKENS = - {<TokenType.ENUM16: 'ENUM16'>, <TokenType.SET: 'SET'>, <TokenType.ENUM: 'ENUM'>, <TokenType.ENUM8: 'ENUM8'>} + {<TokenType.ENUM16: 'ENUM16'>, <TokenType.SET: 'SET'>, <TokenType.ENUM8: 'ENUM8'>, <TokenType.ENUM: 'ENUM'>}
    @@ -3110,6 +3116,18 @@ Default: 3 +
    +
    +
    + VALUES_FOLLOWED_BY_PAREN = +False + + +
    + + + +
    @@ -3241,223 +3259,223 @@ Default: 3
    -
    626    class Generator(generator.Generator):
    -627        LOCKING_READS_SUPPORTED = True
    -628        NULL_ORDERING_SUPPORTED = None
    -629        JOIN_HINTS = False
    -630        TABLE_HINTS = True
    -631        DUPLICATE_KEY_UPDATE_WITH_SET = False
    -632        QUERY_HINT_SEP = " "
    -633        VALUES_AS_TABLE = False
    -634        NVL2_SUPPORTED = False
    -635        LAST_DAY_SUPPORTS_DATE_PART = False
    -636        JSON_TYPE_REQUIRED_FOR_EXTRACTION = True
    -637        JSON_PATH_BRACKETED_KEY_SUPPORTED = False
    -638        JSON_KEY_VALUE_PAIR_SEP = ","
    -639
    -640        TRANSFORMS = {
    -641            **generator.Generator.TRANSFORMS,
    -642            exp.CurrentDate: no_paren_current_date_sql,
    -643            exp.DateDiff: _remove_ts_or_ds_to_date(
    -644                lambda self, e: self.func("DATEDIFF", e.this, e.expression), ("this", "expression")
    -645            ),
    -646            exp.DateAdd: _remove_ts_or_ds_to_date(_date_add_sql("ADD")),
    -647            exp.DateStrToDate: datestrtodate_sql,
    -648            exp.DateSub: _remove_ts_or_ds_to_date(_date_add_sql("SUB")),
    -649            exp.DateTrunc: _date_trunc_sql,
    -650            exp.Day: _remove_ts_or_ds_to_date(),
    -651            exp.DayOfMonth: _remove_ts_or_ds_to_date(rename_func("DAYOFMONTH")),
    -652            exp.DayOfWeek: _remove_ts_or_ds_to_date(rename_func("DAYOFWEEK")),
    -653            exp.DayOfYear: _remove_ts_or_ds_to_date(rename_func("DAYOFYEAR")),
    -654            exp.GroupConcat: lambda self,
    -655            e: f"""GROUP_CONCAT({self.sql(e, "this")} SEPARATOR {self.sql(e, "separator") or "','"})""",
    -656            exp.ILike: no_ilike_sql,
    -657            exp.JSONExtractScalar: arrow_json_extract_sql,
    -658            exp.Max: max_or_greatest,
    -659            exp.Min: min_or_least,
    -660            exp.Month: _remove_ts_or_ds_to_date(),
    -661            exp.NullSafeEQ: lambda self, e: self.binary(e, "<=>"),
    -662            exp.NullSafeNEQ: lambda self, e: f"NOT {self.binary(e, '<=>')}",
    -663            exp.ParseJSON: lambda self, e: self.sql(e, "this"),
    -664            exp.Pivot: no_pivot_sql,
    -665            exp.Select: transforms.preprocess(
    -666                [
    -667                    transforms.eliminate_distinct_on,
    -668                    transforms.eliminate_semi_and_anti_joins,
    -669                    transforms.eliminate_qualify,
    -670                    transforms.eliminate_full_outer_join,
    -671                ]
    -672            ),
    -673            exp.StrPosition: strposition_to_locate_sql,
    -674            exp.StrToDate: _str_to_date_sql,
    -675            exp.StrToTime: _str_to_date_sql,
    -676            exp.Stuff: rename_func("INSERT"),
    -677            exp.TableSample: no_tablesample_sql,
    -678            exp.TimeFromParts: rename_func("MAKETIME"),
    -679            exp.TimestampAdd: date_add_interval_sql("DATE", "ADD"),
    -680            exp.TimestampDiff: lambda self, e: self.func(
    -681                "TIMESTAMPDIFF", e.text("unit"), e.expression, e.this
    -682            ),
    -683            exp.TimestampSub: date_add_interval_sql("DATE", "SUB"),
    -684            exp.TimeStrToUnix: rename_func("UNIX_TIMESTAMP"),
    -685            exp.TimeStrToTime: lambda self, e: self.sql(exp.cast(e.this, "datetime", copy=True)),
    -686            exp.TimeToStr: _remove_ts_or_ds_to_date(
    -687                lambda self, e: self.func("DATE_FORMAT", e.this, self.format_time(e))
    -688            ),
    -689            exp.Trim: _trim_sql,
    -690            exp.TryCast: no_trycast_sql,
    -691            exp.TsOrDsAdd: _date_add_sql("ADD"),
    -692            exp.TsOrDsDiff: lambda self, e: self.func("DATEDIFF", e.this, e.expression),
    -693            exp.TsOrDsToDate: _ts_or_ds_to_date_sql,
    -694            exp.Week: _remove_ts_or_ds_to_date(),
    -695            exp.WeekOfYear: _remove_ts_or_ds_to_date(rename_func("WEEKOFYEAR")),
    -696            exp.Year: _remove_ts_or_ds_to_date(),
    -697        }
    -698
    -699        UNSIGNED_TYPE_MAPPING = {
    -700            exp.DataType.Type.UBIGINT: "BIGINT",
    -701            exp.DataType.Type.UINT: "INT",
    -702            exp.DataType.Type.UMEDIUMINT: "MEDIUMINT",
    -703            exp.DataType.Type.USMALLINT: "SMALLINT",
    -704            exp.DataType.Type.UTINYINT: "TINYINT",
    -705            exp.DataType.Type.UDECIMAL: "DECIMAL",
    -706        }
    -707
    -708        TIMESTAMP_TYPE_MAPPING = {
    -709            exp.DataType.Type.TIMESTAMP: "DATETIME",
    -710            exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP",
    -711            exp.DataType.Type.TIMESTAMPLTZ: "TIMESTAMP",
    -712        }
    -713
    -714        TYPE_MAPPING = {
    -715            **generator.Generator.TYPE_MAPPING,
    -716            **UNSIGNED_TYPE_MAPPING,
    -717            **TIMESTAMP_TYPE_MAPPING,
    -718        }
    -719
    -720        TYPE_MAPPING.pop(exp.DataType.Type.MEDIUMTEXT)
    -721        TYPE_MAPPING.pop(exp.DataType.Type.LONGTEXT)
    -722        TYPE_MAPPING.pop(exp.DataType.Type.TINYTEXT)
    -723        TYPE_MAPPING.pop(exp.DataType.Type.MEDIUMBLOB)
    -724        TYPE_MAPPING.pop(exp.DataType.Type.LONGBLOB)
    -725        TYPE_MAPPING.pop(exp.DataType.Type.TINYBLOB)
    -726
    -727        PROPERTIES_LOCATION = {
    -728            **generator.Generator.PROPERTIES_LOCATION,
    -729            exp.TransientProperty: exp.Properties.Location.UNSUPPORTED,
    -730            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
    -731        }
    -732
    -733        LIMIT_FETCH = "LIMIT"
    -734
    -735        LIMIT_ONLY_LITERALS = True
    -736
    -737        # MySQL doesn't support many datatypes in cast.
    -738        # https://dev.mysql.com/doc/refman/8.0/en/cast-functions.html#function_cast
    -739        CAST_MAPPING = {
    -740            exp.DataType.Type.BIGINT: "SIGNED",
    -741            exp.DataType.Type.BOOLEAN: "SIGNED",
    -742            exp.DataType.Type.INT: "SIGNED",
    -743            exp.DataType.Type.TEXT: "CHAR",
    -744            exp.DataType.Type.UBIGINT: "UNSIGNED",
    -745            exp.DataType.Type.VARCHAR: "CHAR",
    -746        }
    -747
    -748        TIMESTAMP_FUNC_TYPES = {
    -749            exp.DataType.Type.TIMESTAMPTZ,
    -750            exp.DataType.Type.TIMESTAMPLTZ,
    -751        }
    -752
    -753        def datatype_sql(self, expression: exp.DataType) -> str:
    -754            # https://dev.mysql.com/doc/refman/8.0/en/numeric-type-syntax.html
    -755            result = super().datatype_sql(expression)
    -756            if expression.this in self.UNSIGNED_TYPE_MAPPING:
    -757                result = f"{result} UNSIGNED"
    -758            return result
    -759
    -760        def xor_sql(self, expression: exp.Xor) -> str:
    -761            if expression.expressions:
    -762                return self.expressions(expression, sep=" XOR ")
    -763            return super().xor_sql(expression)
    -764
    -765        def jsonarraycontains_sql(self, expression: exp.JSONArrayContains) -> str:
    -766            return f"{self.sql(expression, 'this')} MEMBER OF({self.sql(expression, 'expression')})"
    -767
    -768        def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str:
    -769            if expression.to.this in self.TIMESTAMP_FUNC_TYPES:
    -770                return self.func("TIMESTAMP", expression.this)
    -771
    -772            to = self.CAST_MAPPING.get(expression.to.this)
    -773
    -774            if to:
    -775                expression.to.set("this", to)
    -776            return super().cast_sql(expression)
    -777
    -778        def show_sql(self, expression: exp.Show) -> str:
    -779            this = f" {expression.name}"
    -780            full = " FULL" if expression.args.get("full") else ""
    -781            global_ = " GLOBAL" if expression.args.get("global") else ""
    -782
    -783            target = self.sql(expression, "target")
    -784            target = f" {target}" if target else ""
    -785            if expression.name in ("COLUMNS", "INDEX"):
    -786                target = f" FROM{target}"
    -787            elif expression.name == "GRANTS":
    -788                target = f" FOR{target}"
    -789
    -790            db = self._prefixed_sql("FROM", expression, "db")
    -791
    -792            like = self._prefixed_sql("LIKE", expression, "like")
    -793            where = self.sql(expression, "where")
    -794
    -795            types = self.expressions(expression, key="types")
    -796            types = f" {types}" if types else types
    -797            query = self._prefixed_sql("FOR QUERY", expression, "query")
    -798
    -799            if expression.name == "PROFILE":
    -800                offset = self._prefixed_sql("OFFSET", expression, "offset")
    -801                limit = self._prefixed_sql("LIMIT", expression, "limit")
    -802            else:
    -803                offset = ""
    -804                limit = self._oldstyle_limit_sql(expression)
    -805
    -806            log = self._prefixed_sql("IN", expression, "log")
    -807            position = self._prefixed_sql("FROM", expression, "position")
    -808
    -809            channel = self._prefixed_sql("FOR CHANNEL", expression, "channel")
    -810
    -811            if expression.name == "ENGINE":
    -812                mutex_or_status = " MUTEX" if expression.args.get("mutex") else " STATUS"
    -813            else:
    -814                mutex_or_status = ""
    -815
    -816            return f"SHOW{full}{global_}{this}{target}{types}{db}{query}{log}{position}{channel}{mutex_or_status}{like}{where}{offset}{limit}"
    -817
    -818        def altercolumn_sql(self, expression: exp.AlterColumn) -> str:
    -819            dtype = self.sql(expression, "dtype")
    -820            if not dtype:
    -821                return super().altercolumn_sql(expression)
    -822
    -823            this = self.sql(expression, "this")
    -824            return f"MODIFY COLUMN {this} {dtype}"
    -825
    -826        def _prefixed_sql(self, prefix: str, expression: exp.Expression, arg: str) -> str:
    -827            sql = self.sql(expression, arg)
    -828            return f" {prefix} {sql}" if sql else ""
    -829
    -830        def _oldstyle_limit_sql(self, expression: exp.Show) -> str:
    -831            limit = self.sql(expression, "limit")
    -832            offset = self.sql(expression, "offset")
    -833            if limit:
    -834                limit_offset = f"{offset}, {limit}" if offset else limit
    -835                return f" LIMIT {limit_offset}"
    -836            return ""
    -837
    -838        def chr_sql(self, expression: exp.Chr) -> str:
    -839            this = self.expressions(sqls=[expression.this] + expression.expressions)
    -840            charset = expression.args.get("charset")
    -841            using = f" USING {self.sql(charset)}" if charset else ""
    -842            return f"CHAR({this}{using})"
    +            
    627    class Generator(generator.Generator):
    +628        LOCKING_READS_SUPPORTED = True
    +629        NULL_ORDERING_SUPPORTED = None
    +630        JOIN_HINTS = False
    +631        TABLE_HINTS = True
    +632        DUPLICATE_KEY_UPDATE_WITH_SET = False
    +633        QUERY_HINT_SEP = " "
    +634        VALUES_AS_TABLE = False
    +635        NVL2_SUPPORTED = False
    +636        LAST_DAY_SUPPORTS_DATE_PART = False
    +637        JSON_TYPE_REQUIRED_FOR_EXTRACTION = True
    +638        JSON_PATH_BRACKETED_KEY_SUPPORTED = False
    +639        JSON_KEY_VALUE_PAIR_SEP = ","
    +640
    +641        TRANSFORMS = {
    +642            **generator.Generator.TRANSFORMS,
    +643            exp.CurrentDate: no_paren_current_date_sql,
    +644            exp.DateDiff: _remove_ts_or_ds_to_date(
    +645                lambda self, e: self.func("DATEDIFF", e.this, e.expression), ("this", "expression")
    +646            ),
    +647            exp.DateAdd: _remove_ts_or_ds_to_date(_date_add_sql("ADD")),
    +648            exp.DateStrToDate: datestrtodate_sql,
    +649            exp.DateSub: _remove_ts_or_ds_to_date(_date_add_sql("SUB")),
    +650            exp.DateTrunc: _date_trunc_sql,
    +651            exp.Day: _remove_ts_or_ds_to_date(),
    +652            exp.DayOfMonth: _remove_ts_or_ds_to_date(rename_func("DAYOFMONTH")),
    +653            exp.DayOfWeek: _remove_ts_or_ds_to_date(rename_func("DAYOFWEEK")),
    +654            exp.DayOfYear: _remove_ts_or_ds_to_date(rename_func("DAYOFYEAR")),
    +655            exp.GroupConcat: lambda self,
    +656            e: f"""GROUP_CONCAT({self.sql(e, "this")} SEPARATOR {self.sql(e, "separator") or "','"})""",
    +657            exp.ILike: no_ilike_sql,
    +658            exp.JSONExtractScalar: arrow_json_extract_sql,
    +659            exp.Max: max_or_greatest,
    +660            exp.Min: min_or_least,
    +661            exp.Month: _remove_ts_or_ds_to_date(),
    +662            exp.NullSafeEQ: lambda self, e: self.binary(e, "<=>"),
    +663            exp.NullSafeNEQ: lambda self, e: f"NOT {self.binary(e, '<=>')}",
    +664            exp.ParseJSON: lambda self, e: self.sql(e, "this"),
    +665            exp.Pivot: no_pivot_sql,
    +666            exp.Select: transforms.preprocess(
    +667                [
    +668                    transforms.eliminate_distinct_on,
    +669                    transforms.eliminate_semi_and_anti_joins,
    +670                    transforms.eliminate_qualify,
    +671                    transforms.eliminate_full_outer_join,
    +672                ]
    +673            ),
    +674            exp.StrPosition: strposition_to_locate_sql,
    +675            exp.StrToDate: _str_to_date_sql,
    +676            exp.StrToTime: _str_to_date_sql,
    +677            exp.Stuff: rename_func("INSERT"),
    +678            exp.TableSample: no_tablesample_sql,
    +679            exp.TimeFromParts: rename_func("MAKETIME"),
    +680            exp.TimestampAdd: date_add_interval_sql("DATE", "ADD"),
    +681            exp.TimestampDiff: lambda self, e: self.func(
    +682                "TIMESTAMPDIFF", e.text("unit"), e.expression, e.this
    +683            ),
    +684            exp.TimestampSub: date_add_interval_sql("DATE", "SUB"),
    +685            exp.TimeStrToUnix: rename_func("UNIX_TIMESTAMP"),
    +686            exp.TimeStrToTime: lambda self, e: self.sql(exp.cast(e.this, "datetime", copy=True)),
    +687            exp.TimeToStr: _remove_ts_or_ds_to_date(
    +688                lambda self, e: self.func("DATE_FORMAT", e.this, self.format_time(e))
    +689            ),
    +690            exp.Trim: _trim_sql,
    +691            exp.TryCast: no_trycast_sql,
    +692            exp.TsOrDsAdd: _date_add_sql("ADD"),
    +693            exp.TsOrDsDiff: lambda self, e: self.func("DATEDIFF", e.this, e.expression),
    +694            exp.TsOrDsToDate: _ts_or_ds_to_date_sql,
    +695            exp.Week: _remove_ts_or_ds_to_date(),
    +696            exp.WeekOfYear: _remove_ts_or_ds_to_date(rename_func("WEEKOFYEAR")),
    +697            exp.Year: _remove_ts_or_ds_to_date(),
    +698        }
    +699
    +700        UNSIGNED_TYPE_MAPPING = {
    +701            exp.DataType.Type.UBIGINT: "BIGINT",
    +702            exp.DataType.Type.UINT: "INT",
    +703            exp.DataType.Type.UMEDIUMINT: "MEDIUMINT",
    +704            exp.DataType.Type.USMALLINT: "SMALLINT",
    +705            exp.DataType.Type.UTINYINT: "TINYINT",
    +706            exp.DataType.Type.UDECIMAL: "DECIMAL",
    +707        }
    +708
    +709        TIMESTAMP_TYPE_MAPPING = {
    +710            exp.DataType.Type.TIMESTAMP: "DATETIME",
    +711            exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP",
    +712            exp.DataType.Type.TIMESTAMPLTZ: "TIMESTAMP",
    +713        }
    +714
    +715        TYPE_MAPPING = {
    +716            **generator.Generator.TYPE_MAPPING,
    +717            **UNSIGNED_TYPE_MAPPING,
    +718            **TIMESTAMP_TYPE_MAPPING,
    +719        }
    +720
    +721        TYPE_MAPPING.pop(exp.DataType.Type.MEDIUMTEXT)
    +722        TYPE_MAPPING.pop(exp.DataType.Type.LONGTEXT)
    +723        TYPE_MAPPING.pop(exp.DataType.Type.TINYTEXT)
    +724        TYPE_MAPPING.pop(exp.DataType.Type.MEDIUMBLOB)
    +725        TYPE_MAPPING.pop(exp.DataType.Type.LONGBLOB)
    +726        TYPE_MAPPING.pop(exp.DataType.Type.TINYBLOB)
    +727
    +728        PROPERTIES_LOCATION = {
    +729            **generator.Generator.PROPERTIES_LOCATION,
    +730            exp.TransientProperty: exp.Properties.Location.UNSUPPORTED,
    +731            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
    +732        }
    +733
    +734        LIMIT_FETCH = "LIMIT"
    +735
    +736        LIMIT_ONLY_LITERALS = True
    +737
    +738        # MySQL doesn't support many datatypes in cast.
    +739        # https://dev.mysql.com/doc/refman/8.0/en/cast-functions.html#function_cast
    +740        CAST_MAPPING = {
    +741            exp.DataType.Type.BIGINT: "SIGNED",
    +742            exp.DataType.Type.BOOLEAN: "SIGNED",
    +743            exp.DataType.Type.INT: "SIGNED",
    +744            exp.DataType.Type.TEXT: "CHAR",
    +745            exp.DataType.Type.UBIGINT: "UNSIGNED",
    +746            exp.DataType.Type.VARCHAR: "CHAR",
    +747        }
    +748
    +749        TIMESTAMP_FUNC_TYPES = {
    +750            exp.DataType.Type.TIMESTAMPTZ,
    +751            exp.DataType.Type.TIMESTAMPLTZ,
    +752        }
    +753
    +754        def datatype_sql(self, expression: exp.DataType) -> str:
    +755            # https://dev.mysql.com/doc/refman/8.0/en/numeric-type-syntax.html
    +756            result = super().datatype_sql(expression)
    +757            if expression.this in self.UNSIGNED_TYPE_MAPPING:
    +758                result = f"{result} UNSIGNED"
    +759            return result
    +760
    +761        def xor_sql(self, expression: exp.Xor) -> str:
    +762            if expression.expressions:
    +763                return self.expressions(expression, sep=" XOR ")
    +764            return super().xor_sql(expression)
    +765
    +766        def jsonarraycontains_sql(self, expression: exp.JSONArrayContains) -> str:
    +767            return f"{self.sql(expression, 'this')} MEMBER OF({self.sql(expression, 'expression')})"
    +768
    +769        def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str:
    +770            if expression.to.this in self.TIMESTAMP_FUNC_TYPES:
    +771                return self.func("TIMESTAMP", expression.this)
    +772
    +773            to = self.CAST_MAPPING.get(expression.to.this)
    +774
    +775            if to:
    +776                expression.to.set("this", to)
    +777            return super().cast_sql(expression)
    +778
    +779        def show_sql(self, expression: exp.Show) -> str:
    +780            this = f" {expression.name}"
    +781            full = " FULL" if expression.args.get("full") else ""
    +782            global_ = " GLOBAL" if expression.args.get("global") else ""
    +783
    +784            target = self.sql(expression, "target")
    +785            target = f" {target}" if target else ""
    +786            if expression.name in ("COLUMNS", "INDEX"):
    +787                target = f" FROM{target}"
    +788            elif expression.name == "GRANTS":
    +789                target = f" FOR{target}"
    +790
    +791            db = self._prefixed_sql("FROM", expression, "db")
    +792
    +793            like = self._prefixed_sql("LIKE", expression, "like")
    +794            where = self.sql(expression, "where")
    +795
    +796            types = self.expressions(expression, key="types")
    +797            types = f" {types}" if types else types
    +798            query = self._prefixed_sql("FOR QUERY", expression, "query")
    +799
    +800            if expression.name == "PROFILE":
    +801                offset = self._prefixed_sql("OFFSET", expression, "offset")
    +802                limit = self._prefixed_sql("LIMIT", expression, "limit")
    +803            else:
    +804                offset = ""
    +805                limit = self._oldstyle_limit_sql(expression)
    +806
    +807            log = self._prefixed_sql("IN", expression, "log")
    +808            position = self._prefixed_sql("FROM", expression, "position")
    +809
    +810            channel = self._prefixed_sql("FOR CHANNEL", expression, "channel")
    +811
    +812            if expression.name == "ENGINE":
    +813                mutex_or_status = " MUTEX" if expression.args.get("mutex") else " STATUS"
    +814            else:
    +815                mutex_or_status = ""
    +816
    +817            return f"SHOW{full}{global_}{this}{target}{types}{db}{query}{log}{position}{channel}{mutex_or_status}{like}{where}{offset}{limit}"
    +818
    +819        def altercolumn_sql(self, expression: exp.AlterColumn) -> str:
    +820            dtype = self.sql(expression, "dtype")
    +821            if not dtype:
    +822                return super().altercolumn_sql(expression)
    +823
    +824            this = self.sql(expression, "this")
    +825            return f"MODIFY COLUMN {this} {dtype}"
    +826
    +827        def _prefixed_sql(self, prefix: str, expression: exp.Expression, arg: str) -> str:
    +828            sql = self.sql(expression, arg)
    +829            return f" {prefix} {sql}" if sql else ""
    +830
    +831        def _oldstyle_limit_sql(self, expression: exp.Show) -> str:
    +832            limit = self.sql(expression, "limit")
    +833            offset = self.sql(expression, "offset")
    +834            if limit:
    +835                limit_offset = f"{offset}, {limit}" if offset else limit
    +836                return f" LIMIT {limit_offset}"
    +837            return ""
    +838
    +839        def chr_sql(self, expression: exp.Chr) -> str:
    +840            this = self.expressions(sqls=[expression.this] + expression.expressions)
    +841            charset = expression.args.get("charset")
    +842            using = f" USING {self.sql(charset)}" if charset else ""
    +843            return f"CHAR({this}{using})"
     
    @@ -3749,7 +3767,7 @@ Default: True
    TIMESTAMP_FUNC_TYPES = -{<Type.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <Type.TIMESTAMPTZ: 'TIMESTAMPTZ'>} +{<Type.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <Type.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>}
    @@ -3769,12 +3787,12 @@ Default: True
    -
    753        def datatype_sql(self, expression: exp.DataType) -> str:
    -754            # https://dev.mysql.com/doc/refman/8.0/en/numeric-type-syntax.html
    -755            result = super().datatype_sql(expression)
    -756            if expression.this in self.UNSIGNED_TYPE_MAPPING:
    -757                result = f"{result} UNSIGNED"
    -758            return result
    +            
    754        def datatype_sql(self, expression: exp.DataType) -> str:
    +755            # https://dev.mysql.com/doc/refman/8.0/en/numeric-type-syntax.html
    +756            result = super().datatype_sql(expression)
    +757            if expression.this in self.UNSIGNED_TYPE_MAPPING:
    +758                result = f"{result} UNSIGNED"
    +759            return result
     
    @@ -3792,10 +3810,10 @@ Default: True
    -
    760        def xor_sql(self, expression: exp.Xor) -> str:
    -761            if expression.expressions:
    -762                return self.expressions(expression, sep=" XOR ")
    -763            return super().xor_sql(expression)
    +            
    761        def xor_sql(self, expression: exp.Xor) -> str:
    +762            if expression.expressions:
    +763                return self.expressions(expression, sep=" XOR ")
    +764            return super().xor_sql(expression)
     
    @@ -3813,8 +3831,8 @@ Default: True
    -
    765        def jsonarraycontains_sql(self, expression: exp.JSONArrayContains) -> str:
    -766            return f"{self.sql(expression, 'this')} MEMBER OF({self.sql(expression, 'expression')})"
    +            
    766        def jsonarraycontains_sql(self, expression: exp.JSONArrayContains) -> str:
    +767            return f"{self.sql(expression, 'this')} MEMBER OF({self.sql(expression, 'expression')})"
     
    @@ -3832,15 +3850,15 @@ Default: True
    -
    768        def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str:
    -769            if expression.to.this in self.TIMESTAMP_FUNC_TYPES:
    -770                return self.func("TIMESTAMP", expression.this)
    -771
    -772            to = self.CAST_MAPPING.get(expression.to.this)
    -773
    -774            if to:
    -775                expression.to.set("this", to)
    -776            return super().cast_sql(expression)
    +            
    769        def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str:
    +770            if expression.to.this in self.TIMESTAMP_FUNC_TYPES:
    +771                return self.func("TIMESTAMP", expression.this)
    +772
    +773            to = self.CAST_MAPPING.get(expression.to.this)
    +774
    +775            if to:
    +776                expression.to.set("this", to)
    +777            return super().cast_sql(expression)
     
    @@ -3858,45 +3876,45 @@ Default: True
    -
    778        def show_sql(self, expression: exp.Show) -> str:
    -779            this = f" {expression.name}"
    -780            full = " FULL" if expression.args.get("full") else ""
    -781            global_ = " GLOBAL" if expression.args.get("global") else ""
    -782
    -783            target = self.sql(expression, "target")
    -784            target = f" {target}" if target else ""
    -785            if expression.name in ("COLUMNS", "INDEX"):
    -786                target = f" FROM{target}"
    -787            elif expression.name == "GRANTS":
    -788                target = f" FOR{target}"
    -789
    -790            db = self._prefixed_sql("FROM", expression, "db")
    -791
    -792            like = self._prefixed_sql("LIKE", expression, "like")
    -793            where = self.sql(expression, "where")
    -794
    -795            types = self.expressions(expression, key="types")
    -796            types = f" {types}" if types else types
    -797            query = self._prefixed_sql("FOR QUERY", expression, "query")
    -798
    -799            if expression.name == "PROFILE":
    -800                offset = self._prefixed_sql("OFFSET", expression, "offset")
    -801                limit = self._prefixed_sql("LIMIT", expression, "limit")
    -802            else:
    -803                offset = ""
    -804                limit = self._oldstyle_limit_sql(expression)
    -805
    -806            log = self._prefixed_sql("IN", expression, "log")
    -807            position = self._prefixed_sql("FROM", expression, "position")
    -808
    -809            channel = self._prefixed_sql("FOR CHANNEL", expression, "channel")
    -810
    -811            if expression.name == "ENGINE":
    -812                mutex_or_status = " MUTEX" if expression.args.get("mutex") else " STATUS"
    -813            else:
    -814                mutex_or_status = ""
    -815
    -816            return f"SHOW{full}{global_}{this}{target}{types}{db}{query}{log}{position}{channel}{mutex_or_status}{like}{where}{offset}{limit}"
    +            
    779        def show_sql(self, expression: exp.Show) -> str:
    +780            this = f" {expression.name}"
    +781            full = " FULL" if expression.args.get("full") else ""
    +782            global_ = " GLOBAL" if expression.args.get("global") else ""
    +783
    +784            target = self.sql(expression, "target")
    +785            target = f" {target}" if target else ""
    +786            if expression.name in ("COLUMNS", "INDEX"):
    +787                target = f" FROM{target}"
    +788            elif expression.name == "GRANTS":
    +789                target = f" FOR{target}"
    +790
    +791            db = self._prefixed_sql("FROM", expression, "db")
    +792
    +793            like = self._prefixed_sql("LIKE", expression, "like")
    +794            where = self.sql(expression, "where")
    +795
    +796            types = self.expressions(expression, key="types")
    +797            types = f" {types}" if types else types
    +798            query = self._prefixed_sql("FOR QUERY", expression, "query")
    +799
    +800            if expression.name == "PROFILE":
    +801                offset = self._prefixed_sql("OFFSET", expression, "offset")
    +802                limit = self._prefixed_sql("LIMIT", expression, "limit")
    +803            else:
    +804                offset = ""
    +805                limit = self._oldstyle_limit_sql(expression)
    +806
    +807            log = self._prefixed_sql("IN", expression, "log")
    +808            position = self._prefixed_sql("FROM", expression, "position")
    +809
    +810            channel = self._prefixed_sql("FOR CHANNEL", expression, "channel")
    +811
    +812            if expression.name == "ENGINE":
    +813                mutex_or_status = " MUTEX" if expression.args.get("mutex") else " STATUS"
    +814            else:
    +815                mutex_or_status = ""
    +816
    +817            return f"SHOW{full}{global_}{this}{target}{types}{db}{query}{log}{position}{channel}{mutex_or_status}{like}{where}{offset}{limit}"
     
    @@ -3914,13 +3932,13 @@ Default: True
    -
    818        def altercolumn_sql(self, expression: exp.AlterColumn) -> str:
    -819            dtype = self.sql(expression, "dtype")
    -820            if not dtype:
    -821                return super().altercolumn_sql(expression)
    -822
    -823            this = self.sql(expression, "this")
    -824            return f"MODIFY COLUMN {this} {dtype}"
    +            
    819        def altercolumn_sql(self, expression: exp.AlterColumn) -> str:
    +820            dtype = self.sql(expression, "dtype")
    +821            if not dtype:
    +822                return super().altercolumn_sql(expression)
    +823
    +824            this = self.sql(expression, "this")
    +825            return f"MODIFY COLUMN {this} {dtype}"
     
    @@ -3938,11 +3956,11 @@ Default: True
    -
    838        def chr_sql(self, expression: exp.Chr) -> str:
    -839            this = self.expressions(sqls=[expression.this] + expression.expressions)
    -840            charset = expression.args.get("charset")
    -841            using = f" USING {self.sql(charset)}" if charset else ""
    -842            return f"CHAR({this}{using})"
    +            
    839        def chr_sql(self, expression: exp.Chr) -> str:
    +840            this = self.expressions(sqls=[expression.this] + expression.expressions)
    +841            charset = expression.args.get("charset")
    +842            using = f" USING {self.sql(charset)}" if charset else ""
    +843            return f"CHAR({this}{using})"
     
    diff --git a/docs/sqlglot/dialects/oracle.html b/docs/sqlglot/dialects/oracle.html index 829c96e..5ab5f8a 100644 --- a/docs/sqlglot/dialects/oracle.html +++ b/docs/sqlglot/dialects/oracle.html @@ -60,6 +60,9 @@
  • WINDOW_BEFORE_PAREN_TOKENS
  • +
  • + VALUES_FOLLOWED_BY_PAREN +
  • FUNCTIONS
  • @@ -335,179 +338,180 @@
    88 class Parser(parser.Parser): 89 ALTER_TABLE_ADD_REQUIRED_FOR_EACH_COLUMN = False 90 WINDOW_BEFORE_PAREN_TOKENS = {TokenType.OVER, TokenType.KEEP} - 91 - 92 FUNCTIONS = { - 93 **parser.Parser.FUNCTIONS, - 94 "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)), - 95 "TO_CHAR": to_char, - 96 "TO_TIMESTAMP": format_time_lambda(exp.StrToTime, "oracle"), - 97 "TO_DATE": format_time_lambda(exp.StrToDate, "oracle"), - 98 } - 99 -100 FUNCTION_PARSERS: t.Dict[str, t.Callable] = { -101 **parser.Parser.FUNCTION_PARSERS, -102 "JSON_ARRAY": lambda self: self._parse_json_array( -103 exp.JSONArray, -104 expressions=self._parse_csv(lambda: self._parse_format_json(self._parse_bitwise())), -105 ), -106 "JSON_ARRAYAGG": lambda self: self._parse_json_array( -107 exp.JSONArrayAgg, -108 this=self._parse_format_json(self._parse_bitwise()), -109 order=self._parse_order(), -110 ), -111 "XMLTABLE": _parse_xml_table, -112 } -113 -114 QUERY_MODIFIER_PARSERS = { -115 **parser.Parser.QUERY_MODIFIER_PARSERS, -116 TokenType.ORDER_SIBLINGS_BY: lambda self: ("order", self._parse_order()), -117 } -118 -119 TYPE_LITERAL_PARSERS = { -120 exp.DataType.Type.DATE: lambda self, this, _: self.expression( -121 exp.DateStrToDate, this=this -122 ) -123 } -124 -125 # SELECT UNIQUE .. is old-style Oracle syntax for SELECT DISTINCT .. -126 # Reference: https://stackoverflow.com/a/336455 -127 DISTINCT_TOKENS = {TokenType.DISTINCT, TokenType.UNIQUE} -128 -129 def _parse_json_array(self, expr_type: t.Type[E], **kwargs) -> E: -130 return self.expression( -131 expr_type, -132 null_handling=self._parse_on_handling("NULL", "NULL", "ABSENT"), -133 return_type=self._match_text_seq("RETURNING") and self._parse_type(), -134 strict=self._match_text_seq("STRICT"), -135 **kwargs, -136 ) -137 -138 def _parse_column(self) -> t.Optional[exp.Expression]: -139 column = super()._parse_column() -140 if column: -141 column.set("join_mark", self._match(TokenType.JOIN_MARKER)) -142 return column -143 -144 def _parse_hint(self) -> t.Optional[exp.Hint]: -145 if self._match(TokenType.HINT): -146 start = self._curr -147 while self._curr and not self._match_pair(TokenType.STAR, TokenType.SLASH): -148 self._advance() -149 -150 if not self._curr: -151 self.raise_error("Expected */ after HINT") -152 -153 end = self._tokens[self._index - 3] -154 return exp.Hint(expressions=[self._find_sql(start, end)]) -155 -156 return None -157 -158 class Generator(generator.Generator): -159 LOCKING_READS_SUPPORTED = True -160 JOIN_HINTS = False -161 TABLE_HINTS = False -162 COLUMN_JOIN_MARKS_SUPPORTED = True -163 DATA_TYPE_SPECIFIERS_ALLOWED = True -164 ALTER_TABLE_INCLUDE_COLUMN_KEYWORD = False -165 LIMIT_FETCH = "FETCH" -166 TABLESAMPLE_KEYWORDS = "SAMPLE" -167 LAST_DAY_SUPPORTS_DATE_PART = False -168 SUPPORTS_SELECT_INTO = True -169 TZ_TO_WITH_TIME_ZONE = True -170 -171 TYPE_MAPPING = { -172 **generator.Generator.TYPE_MAPPING, -173 exp.DataType.Type.TINYINT: "NUMBER", -174 exp.DataType.Type.SMALLINT: "NUMBER", -175 exp.DataType.Type.INT: "NUMBER", -176 exp.DataType.Type.BIGINT: "NUMBER", -177 exp.DataType.Type.DECIMAL: "NUMBER", -178 exp.DataType.Type.DOUBLE: "DOUBLE PRECISION", -179 exp.DataType.Type.VARCHAR: "VARCHAR2", -180 exp.DataType.Type.NVARCHAR: "NVARCHAR2", -181 exp.DataType.Type.NCHAR: "NCHAR", -182 exp.DataType.Type.TEXT: "CLOB", -183 exp.DataType.Type.TIMETZ: "TIME", -184 exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP", -185 exp.DataType.Type.BINARY: "BLOB", -186 exp.DataType.Type.VARBINARY: "BLOB", -187 } -188 -189 TRANSFORMS = { -190 **generator.Generator.TRANSFORMS, -191 exp.DateStrToDate: lambda self, e: self.func( -192 "TO_DATE", e.this, exp.Literal.string("YYYY-MM-DD") -193 ), -194 exp.Group: transforms.preprocess([transforms.unalias_group]), -195 exp.ILike: no_ilike_sql, -196 exp.Select: transforms.preprocess( -197 [ -198 transforms.eliminate_distinct_on, -199 transforms.eliminate_qualify, -200 ] -201 ), -202 exp.StrToTime: lambda self, -203 e: f"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})", -204 exp.StrToDate: lambda self, e: f"TO_DATE({self.sql(e, 'this')}, {self.format_time(e)})", -205 exp.Subquery: lambda self, e: self.subquery_sql(e, sep=" "), -206 exp.Substring: rename_func("SUBSTR"), -207 exp.Table: lambda self, e: self.table_sql(e, sep=" "), -208 exp.TableSample: lambda self, e: self.tablesample_sql(e, sep=" "), -209 exp.TimeToStr: lambda self, e: f"TO_CHAR({self.sql(e, 'this')}, {self.format_time(e)})", -210 exp.ToChar: lambda self, e: self.function_fallback_sql(e), -211 exp.Trim: trim_sql, -212 exp.UnixToTime: lambda self, -213 e: f"TO_DATE('1970-01-01','YYYY-MM-DD') + ({self.sql(e, 'this')} / 86400)", -214 } -215 -216 PROPERTIES_LOCATION = { -217 **generator.Generator.PROPERTIES_LOCATION, -218 exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED, -219 } -220 -221 def currenttimestamp_sql(self, expression: exp.CurrentTimestamp) -> str: -222 this = expression.this -223 return self.func("CURRENT_TIMESTAMP", this) if this else "CURRENT_TIMESTAMP" -224 -225 def offset_sql(self, expression: exp.Offset) -> str: -226 return f"{super().offset_sql(expression)} ROWS" -227 -228 def xmltable_sql(self, expression: exp.XMLTable) -> str: -229 this = self.sql(expression, "this") -230 passing = self.expressions(expression, key="passing") -231 passing = f"{self.sep()}PASSING{self.seg(passing)}" if passing else "" -232 columns = self.expressions(expression, key="columns") -233 columns = f"{self.sep()}COLUMNS{self.seg(columns)}" if columns else "" -234 by_ref = ( -235 f"{self.sep()}RETURNING SEQUENCE BY REF" if expression.args.get("by_ref") else "" -236 ) -237 return f"XMLTABLE({self.sep('')}{self.indent(this + passing + by_ref + columns)}{self.seg(')', sep='')}" -238 -239 def add_column_sql(self, expression: exp.AlterTable) -> str: -240 actions = self.expressions(expression, key="actions", flat=True) -241 if len(expression.args.get("actions", [])) > 1: -242 return f"ADD ({actions})" -243 return f"ADD {actions}" -244 -245 class Tokenizer(tokens.Tokenizer): -246 VAR_SINGLE_TOKENS = {"@", "$", "#"} -247 -248 KEYWORDS = { -249 **tokens.Tokenizer.KEYWORDS, -250 "(+)": TokenType.JOIN_MARKER, -251 "BINARY_DOUBLE": TokenType.DOUBLE, -252 "BINARY_FLOAT": TokenType.FLOAT, -253 "COLUMNS": TokenType.COLUMN, -254 "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE, -255 "MINUS": TokenType.EXCEPT, -256 "NVARCHAR2": TokenType.NVARCHAR, -257 "ORDER SIBLINGS BY": TokenType.ORDER_SIBLINGS_BY, -258 "SAMPLE": TokenType.TABLE_SAMPLE, -259 "START": TokenType.BEGIN, -260 "SYSDATE": TokenType.CURRENT_TIMESTAMP, -261 "TOP": TokenType.TOP, -262 "VARCHAR2": TokenType.VARCHAR, -263 } + 91 VALUES_FOLLOWED_BY_PAREN = False + 92 + 93 FUNCTIONS = { + 94 **parser.Parser.FUNCTIONS, + 95 "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)), + 96 "TO_CHAR": to_char, + 97 "TO_TIMESTAMP": format_time_lambda(exp.StrToTime, "oracle"), + 98 "TO_DATE": format_time_lambda(exp.StrToDate, "oracle"), + 99 } +100 +101 FUNCTION_PARSERS: t.Dict[str, t.Callable] = { +102 **parser.Parser.FUNCTION_PARSERS, +103 "JSON_ARRAY": lambda self: self._parse_json_array( +104 exp.JSONArray, +105 expressions=self._parse_csv(lambda: self._parse_format_json(self._parse_bitwise())), +106 ), +107 "JSON_ARRAYAGG": lambda self: self._parse_json_array( +108 exp.JSONArrayAgg, +109 this=self._parse_format_json(self._parse_bitwise()), +110 order=self._parse_order(), +111 ), +112 "XMLTABLE": _parse_xml_table, +113 } +114 +115 QUERY_MODIFIER_PARSERS = { +116 **parser.Parser.QUERY_MODIFIER_PARSERS, +117 TokenType.ORDER_SIBLINGS_BY: lambda self: ("order", self._parse_order()), +118 } +119 +120 TYPE_LITERAL_PARSERS = { +121 exp.DataType.Type.DATE: lambda self, this, _: self.expression( +122 exp.DateStrToDate, this=this +123 ) +124 } +125 +126 # SELECT UNIQUE .. is old-style Oracle syntax for SELECT DISTINCT .. +127 # Reference: https://stackoverflow.com/a/336455 +128 DISTINCT_TOKENS = {TokenType.DISTINCT, TokenType.UNIQUE} +129 +130 def _parse_json_array(self, expr_type: t.Type[E], **kwargs) -> E: +131 return self.expression( +132 expr_type, +133 null_handling=self._parse_on_handling("NULL", "NULL", "ABSENT"), +134 return_type=self._match_text_seq("RETURNING") and self._parse_type(), +135 strict=self._match_text_seq("STRICT"), +136 **kwargs, +137 ) +138 +139 def _parse_column(self) -> t.Optional[exp.Expression]: +140 column = super()._parse_column() +141 if column: +142 column.set("join_mark", self._match(TokenType.JOIN_MARKER)) +143 return column +144 +145 def _parse_hint(self) -> t.Optional[exp.Hint]: +146 if self._match(TokenType.HINT): +147 start = self._curr +148 while self._curr and not self._match_pair(TokenType.STAR, TokenType.SLASH): +149 self._advance() +150 +151 if not self._curr: +152 self.raise_error("Expected */ after HINT") +153 +154 end = self._tokens[self._index - 3] +155 return exp.Hint(expressions=[self._find_sql(start, end)]) +156 +157 return None +158 +159 class Generator(generator.Generator): +160 LOCKING_READS_SUPPORTED = True +161 JOIN_HINTS = False +162 TABLE_HINTS = False +163 COLUMN_JOIN_MARKS_SUPPORTED = True +164 DATA_TYPE_SPECIFIERS_ALLOWED = True +165 ALTER_TABLE_INCLUDE_COLUMN_KEYWORD = False +166 LIMIT_FETCH = "FETCH" +167 TABLESAMPLE_KEYWORDS = "SAMPLE" +168 LAST_DAY_SUPPORTS_DATE_PART = False +169 SUPPORTS_SELECT_INTO = True +170 TZ_TO_WITH_TIME_ZONE = True +171 +172 TYPE_MAPPING = { +173 **generator.Generator.TYPE_MAPPING, +174 exp.DataType.Type.TINYINT: "NUMBER", +175 exp.DataType.Type.SMALLINT: "NUMBER", +176 exp.DataType.Type.INT: "NUMBER", +177 exp.DataType.Type.BIGINT: "NUMBER", +178 exp.DataType.Type.DECIMAL: "NUMBER", +179 exp.DataType.Type.DOUBLE: "DOUBLE PRECISION", +180 exp.DataType.Type.VARCHAR: "VARCHAR2", +181 exp.DataType.Type.NVARCHAR: "NVARCHAR2", +182 exp.DataType.Type.NCHAR: "NCHAR", +183 exp.DataType.Type.TEXT: "CLOB", +184 exp.DataType.Type.TIMETZ: "TIME", +185 exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP", +186 exp.DataType.Type.BINARY: "BLOB", +187 exp.DataType.Type.VARBINARY: "BLOB", +188 } +189 +190 TRANSFORMS = { +191 **generator.Generator.TRANSFORMS, +192 exp.DateStrToDate: lambda self, e: self.func( +193 "TO_DATE", e.this, exp.Literal.string("YYYY-MM-DD") +194 ), +195 exp.Group: transforms.preprocess([transforms.unalias_group]), +196 exp.ILike: no_ilike_sql, +197 exp.Select: transforms.preprocess( +198 [ +199 transforms.eliminate_distinct_on, +200 transforms.eliminate_qualify, +201 ] +202 ), +203 exp.StrToTime: lambda self, +204 e: f"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})", +205 exp.StrToDate: lambda self, e: f"TO_DATE({self.sql(e, 'this')}, {self.format_time(e)})", +206 exp.Subquery: lambda self, e: self.subquery_sql(e, sep=" "), +207 exp.Substring: rename_func("SUBSTR"), +208 exp.Table: lambda self, e: self.table_sql(e, sep=" "), +209 exp.TableSample: lambda self, e: self.tablesample_sql(e, sep=" "), +210 exp.TimeToStr: lambda self, e: f"TO_CHAR({self.sql(e, 'this')}, {self.format_time(e)})", +211 exp.ToChar: lambda self, e: self.function_fallback_sql(e), +212 exp.Trim: trim_sql, +213 exp.UnixToTime: lambda self, +214 e: f"TO_DATE('1970-01-01','YYYY-MM-DD') + ({self.sql(e, 'this')} / 86400)", +215 } +216 +217 PROPERTIES_LOCATION = { +218 **generator.Generator.PROPERTIES_LOCATION, +219 exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED, +220 } +221 +222 def currenttimestamp_sql(self, expression: exp.CurrentTimestamp) -> str: +223 this = expression.this +224 return self.func("CURRENT_TIMESTAMP", this) if this else "CURRENT_TIMESTAMP" +225 +226 def offset_sql(self, expression: exp.Offset) -> str: +227 return f"{super().offset_sql(expression)} ROWS" +228 +229 def xmltable_sql(self, expression: exp.XMLTable) -> str: +230 this = self.sql(expression, "this") +231 passing = self.expressions(expression, key="passing") +232 passing = f"{self.sep()}PASSING{self.seg(passing)}" if passing else "" +233 columns = self.expressions(expression, key="columns") +234 columns = f"{self.sep()}COLUMNS{self.seg(columns)}" if columns else "" +235 by_ref = ( +236 f"{self.sep()}RETURNING SEQUENCE BY REF" if expression.args.get("by_ref") else "" +237 ) +238 return f"XMLTABLE({self.sep('')}{self.indent(this + passing + by_ref + columns)}{self.seg(')', sep='')}" +239 +240 def add_column_sql(self, expression: exp.AlterTable) -> str: +241 actions = self.expressions(expression, key="actions", flat=True) +242 if len(expression.args.get("actions", [])) > 1: +243 return f"ADD ({actions})" +244 return f"ADD {actions}" +245 +246 class Tokenizer(tokens.Tokenizer): +247 VAR_SINGLE_TOKENS = {"@", "$", "#"} +248 +249 KEYWORDS = { +250 **tokens.Tokenizer.KEYWORDS, +251 "(+)": TokenType.JOIN_MARKER, +252 "BINARY_DOUBLE": TokenType.DOUBLE, +253 "BINARY_FLOAT": TokenType.FLOAT, +254 "COLUMNS": TokenType.COLUMN, +255 "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE, +256 "MINUS": TokenType.EXCEPT, +257 "NVARCHAR2": TokenType.NVARCHAR, +258 "ORDER SIBLINGS BY": TokenType.ORDER_SIBLINGS_BY, +259 "SAMPLE": TokenType.TABLE_SAMPLE, +260 "START": TokenType.BEGIN, +261 "SYSDATE": TokenType.CURRENT_TIMESTAMP, +262 "TOP": TokenType.TOP, +263 "VARCHAR2": TokenType.VARCHAR, +264 }
    @@ -589,179 +593,180 @@
    89 class Parser(parser.Parser): 90 ALTER_TABLE_ADD_REQUIRED_FOR_EACH_COLUMN = False 91 WINDOW_BEFORE_PAREN_TOKENS = {TokenType.OVER, TokenType.KEEP} - 92 - 93 FUNCTIONS = { - 94 **parser.Parser.FUNCTIONS, - 95 "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)), - 96 "TO_CHAR": to_char, - 97 "TO_TIMESTAMP": format_time_lambda(exp.StrToTime, "oracle"), - 98 "TO_DATE": format_time_lambda(exp.StrToDate, "oracle"), - 99 } -100 -101 FUNCTION_PARSERS: t.Dict[str, t.Callable] = { -102 **parser.Parser.FUNCTION_PARSERS, -103 "JSON_ARRAY": lambda self: self._parse_json_array( -104 exp.JSONArray, -105 expressions=self._parse_csv(lambda: self._parse_format_json(self._parse_bitwise())), -106 ), -107 "JSON_ARRAYAGG": lambda self: self._parse_json_array( -108 exp.JSONArrayAgg, -109 this=self._parse_format_json(self._parse_bitwise()), -110 order=self._parse_order(), -111 ), -112 "XMLTABLE": _parse_xml_table, -113 } -114 -115 QUERY_MODIFIER_PARSERS = { -116 **parser.Parser.QUERY_MODIFIER_PARSERS, -117 TokenType.ORDER_SIBLINGS_BY: lambda self: ("order", self._parse_order()), -118 } -119 -120 TYPE_LITERAL_PARSERS = { -121 exp.DataType.Type.DATE: lambda self, this, _: self.expression( -122 exp.DateStrToDate, this=this -123 ) -124 } -125 -126 # SELECT UNIQUE .. is old-style Oracle syntax for SELECT DISTINCT .. -127 # Reference: https://stackoverflow.com/a/336455 -128 DISTINCT_TOKENS = {TokenType.DISTINCT, TokenType.UNIQUE} -129 -130 def _parse_json_array(self, expr_type: t.Type[E], **kwargs) -> E: -131 return self.expression( -132 expr_type, -133 null_handling=self._parse_on_handling("NULL", "NULL", "ABSENT"), -134 return_type=self._match_text_seq("RETURNING") and self._parse_type(), -135 strict=self._match_text_seq("STRICT"), -136 **kwargs, -137 ) -138 -139 def _parse_column(self) -> t.Optional[exp.Expression]: -140 column = super()._parse_column() -141 if column: -142 column.set("join_mark", self._match(TokenType.JOIN_MARKER)) -143 return column -144 -145 def _parse_hint(self) -> t.Optional[exp.Hint]: -146 if self._match(TokenType.HINT): -147 start = self._curr -148 while self._curr and not self._match_pair(TokenType.STAR, TokenType.SLASH): -149 self._advance() -150 -151 if not self._curr: -152 self.raise_error("Expected */ after HINT") -153 -154 end = self._tokens[self._index - 3] -155 return exp.Hint(expressions=[self._find_sql(start, end)]) -156 -157 return None -158 -159 class Generator(generator.Generator): -160 LOCKING_READS_SUPPORTED = True -161 JOIN_HINTS = False -162 TABLE_HINTS = False -163 COLUMN_JOIN_MARKS_SUPPORTED = True -164 DATA_TYPE_SPECIFIERS_ALLOWED = True -165 ALTER_TABLE_INCLUDE_COLUMN_KEYWORD = False -166 LIMIT_FETCH = "FETCH" -167 TABLESAMPLE_KEYWORDS = "SAMPLE" -168 LAST_DAY_SUPPORTS_DATE_PART = False -169 SUPPORTS_SELECT_INTO = True -170 TZ_TO_WITH_TIME_ZONE = True -171 -172 TYPE_MAPPING = { -173 **generator.Generator.TYPE_MAPPING, -174 exp.DataType.Type.TINYINT: "NUMBER", -175 exp.DataType.Type.SMALLINT: "NUMBER", -176 exp.DataType.Type.INT: "NUMBER", -177 exp.DataType.Type.BIGINT: "NUMBER", -178 exp.DataType.Type.DECIMAL: "NUMBER", -179 exp.DataType.Type.DOUBLE: "DOUBLE PRECISION", -180 exp.DataType.Type.VARCHAR: "VARCHAR2", -181 exp.DataType.Type.NVARCHAR: "NVARCHAR2", -182 exp.DataType.Type.NCHAR: "NCHAR", -183 exp.DataType.Type.TEXT: "CLOB", -184 exp.DataType.Type.TIMETZ: "TIME", -185 exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP", -186 exp.DataType.Type.BINARY: "BLOB", -187 exp.DataType.Type.VARBINARY: "BLOB", -188 } -189 -190 TRANSFORMS = { -191 **generator.Generator.TRANSFORMS, -192 exp.DateStrToDate: lambda self, e: self.func( -193 "TO_DATE", e.this, exp.Literal.string("YYYY-MM-DD") -194 ), -195 exp.Group: transforms.preprocess([transforms.unalias_group]), -196 exp.ILike: no_ilike_sql, -197 exp.Select: transforms.preprocess( -198 [ -199 transforms.eliminate_distinct_on, -200 transforms.eliminate_qualify, -201 ] -202 ), -203 exp.StrToTime: lambda self, -204 e: f"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})", -205 exp.StrToDate: lambda self, e: f"TO_DATE({self.sql(e, 'this')}, {self.format_time(e)})", -206 exp.Subquery: lambda self, e: self.subquery_sql(e, sep=" "), -207 exp.Substring: rename_func("SUBSTR"), -208 exp.Table: lambda self, e: self.table_sql(e, sep=" "), -209 exp.TableSample: lambda self, e: self.tablesample_sql(e, sep=" "), -210 exp.TimeToStr: lambda self, e: f"TO_CHAR({self.sql(e, 'this')}, {self.format_time(e)})", -211 exp.ToChar: lambda self, e: self.function_fallback_sql(e), -212 exp.Trim: trim_sql, -213 exp.UnixToTime: lambda self, -214 e: f"TO_DATE('1970-01-01','YYYY-MM-DD') + ({self.sql(e, 'this')} / 86400)", -215 } -216 -217 PROPERTIES_LOCATION = { -218 **generator.Generator.PROPERTIES_LOCATION, -219 exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED, -220 } -221 -222 def currenttimestamp_sql(self, expression: exp.CurrentTimestamp) -> str: -223 this = expression.this -224 return self.func("CURRENT_TIMESTAMP", this) if this else "CURRENT_TIMESTAMP" -225 -226 def offset_sql(self, expression: exp.Offset) -> str: -227 return f"{super().offset_sql(expression)} ROWS" -228 -229 def xmltable_sql(self, expression: exp.XMLTable) -> str: -230 this = self.sql(expression, "this") -231 passing = self.expressions(expression, key="passing") -232 passing = f"{self.sep()}PASSING{self.seg(passing)}" if passing else "" -233 columns = self.expressions(expression, key="columns") -234 columns = f"{self.sep()}COLUMNS{self.seg(columns)}" if columns else "" -235 by_ref = ( -236 f"{self.sep()}RETURNING SEQUENCE BY REF" if expression.args.get("by_ref") else "" -237 ) -238 return f"XMLTABLE({self.sep('')}{self.indent(this + passing + by_ref + columns)}{self.seg(')', sep='')}" -239 -240 def add_column_sql(self, expression: exp.AlterTable) -> str: -241 actions = self.expressions(expression, key="actions", flat=True) -242 if len(expression.args.get("actions", [])) > 1: -243 return f"ADD ({actions})" -244 return f"ADD {actions}" -245 -246 class Tokenizer(tokens.Tokenizer): -247 VAR_SINGLE_TOKENS = {"@", "$", "#"} -248 -249 KEYWORDS = { -250 **tokens.Tokenizer.KEYWORDS, -251 "(+)": TokenType.JOIN_MARKER, -252 "BINARY_DOUBLE": TokenType.DOUBLE, -253 "BINARY_FLOAT": TokenType.FLOAT, -254 "COLUMNS": TokenType.COLUMN, -255 "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE, -256 "MINUS": TokenType.EXCEPT, -257 "NVARCHAR2": TokenType.NVARCHAR, -258 "ORDER SIBLINGS BY": TokenType.ORDER_SIBLINGS_BY, -259 "SAMPLE": TokenType.TABLE_SAMPLE, -260 "START": TokenType.BEGIN, -261 "SYSDATE": TokenType.CURRENT_TIMESTAMP, -262 "TOP": TokenType.TOP, -263 "VARCHAR2": TokenType.VARCHAR, -264 } + 92 VALUES_FOLLOWED_BY_PAREN = False + 93 + 94 FUNCTIONS = { + 95 **parser.Parser.FUNCTIONS, + 96 "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)), + 97 "TO_CHAR": to_char, + 98 "TO_TIMESTAMP": format_time_lambda(exp.StrToTime, "oracle"), + 99 "TO_DATE": format_time_lambda(exp.StrToDate, "oracle"), +100 } +101 +102 FUNCTION_PARSERS: t.Dict[str, t.Callable] = { +103 **parser.Parser.FUNCTION_PARSERS, +104 "JSON_ARRAY": lambda self: self._parse_json_array( +105 exp.JSONArray, +106 expressions=self._parse_csv(lambda: self._parse_format_json(self._parse_bitwise())), +107 ), +108 "JSON_ARRAYAGG": lambda self: self._parse_json_array( +109 exp.JSONArrayAgg, +110 this=self._parse_format_json(self._parse_bitwise()), +111 order=self._parse_order(), +112 ), +113 "XMLTABLE": _parse_xml_table, +114 } +115 +116 QUERY_MODIFIER_PARSERS = { +117 **parser.Parser.QUERY_MODIFIER_PARSERS, +118 TokenType.ORDER_SIBLINGS_BY: lambda self: ("order", self._parse_order()), +119 } +120 +121 TYPE_LITERAL_PARSERS = { +122 exp.DataType.Type.DATE: lambda self, this, _: self.expression( +123 exp.DateStrToDate, this=this +124 ) +125 } +126 +127 # SELECT UNIQUE .. is old-style Oracle syntax for SELECT DISTINCT .. +128 # Reference: https://stackoverflow.com/a/336455 +129 DISTINCT_TOKENS = {TokenType.DISTINCT, TokenType.UNIQUE} +130 +131 def _parse_json_array(self, expr_type: t.Type[E], **kwargs) -> E: +132 return self.expression( +133 expr_type, +134 null_handling=self._parse_on_handling("NULL", "NULL", "ABSENT"), +135 return_type=self._match_text_seq("RETURNING") and self._parse_type(), +136 strict=self._match_text_seq("STRICT"), +137 **kwargs, +138 ) +139 +140 def _parse_column(self) -> t.Optional[exp.Expression]: +141 column = super()._parse_column() +142 if column: +143 column.set("join_mark", self._match(TokenType.JOIN_MARKER)) +144 return column +145 +146 def _parse_hint(self) -> t.Optional[exp.Hint]: +147 if self._match(TokenType.HINT): +148 start = self._curr +149 while self._curr and not self._match_pair(TokenType.STAR, TokenType.SLASH): +150 self._advance() +151 +152 if not self._curr: +153 self.raise_error("Expected */ after HINT") +154 +155 end = self._tokens[self._index - 3] +156 return exp.Hint(expressions=[self._find_sql(start, end)]) +157 +158 return None +159 +160 class Generator(generator.Generator): +161 LOCKING_READS_SUPPORTED = True +162 JOIN_HINTS = False +163 TABLE_HINTS = False +164 COLUMN_JOIN_MARKS_SUPPORTED = True +165 DATA_TYPE_SPECIFIERS_ALLOWED = True +166 ALTER_TABLE_INCLUDE_COLUMN_KEYWORD = False +167 LIMIT_FETCH = "FETCH" +168 TABLESAMPLE_KEYWORDS = "SAMPLE" +169 LAST_DAY_SUPPORTS_DATE_PART = False +170 SUPPORTS_SELECT_INTO = True +171 TZ_TO_WITH_TIME_ZONE = True +172 +173 TYPE_MAPPING = { +174 **generator.Generator.TYPE_MAPPING, +175 exp.DataType.Type.TINYINT: "NUMBER", +176 exp.DataType.Type.SMALLINT: "NUMBER", +177 exp.DataType.Type.INT: "NUMBER", +178 exp.DataType.Type.BIGINT: "NUMBER", +179 exp.DataType.Type.DECIMAL: "NUMBER", +180 exp.DataType.Type.DOUBLE: "DOUBLE PRECISION", +181 exp.DataType.Type.VARCHAR: "VARCHAR2", +182 exp.DataType.Type.NVARCHAR: "NVARCHAR2", +183 exp.DataType.Type.NCHAR: "NCHAR", +184 exp.DataType.Type.TEXT: "CLOB", +185 exp.DataType.Type.TIMETZ: "TIME", +186 exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP", +187 exp.DataType.Type.BINARY: "BLOB", +188 exp.DataType.Type.VARBINARY: "BLOB", +189 } +190 +191 TRANSFORMS = { +192 **generator.Generator.TRANSFORMS, +193 exp.DateStrToDate: lambda self, e: self.func( +194 "TO_DATE", e.this, exp.Literal.string("YYYY-MM-DD") +195 ), +196 exp.Group: transforms.preprocess([transforms.unalias_group]), +197 exp.ILike: no_ilike_sql, +198 exp.Select: transforms.preprocess( +199 [ +200 transforms.eliminate_distinct_on, +201 transforms.eliminate_qualify, +202 ] +203 ), +204 exp.StrToTime: lambda self, +205 e: f"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})", +206 exp.StrToDate: lambda self, e: f"TO_DATE({self.sql(e, 'this')}, {self.format_time(e)})", +207 exp.Subquery: lambda self, e: self.subquery_sql(e, sep=" "), +208 exp.Substring: rename_func("SUBSTR"), +209 exp.Table: lambda self, e: self.table_sql(e, sep=" "), +210 exp.TableSample: lambda self, e: self.tablesample_sql(e, sep=" "), +211 exp.TimeToStr: lambda self, e: f"TO_CHAR({self.sql(e, 'this')}, {self.format_time(e)})", +212 exp.ToChar: lambda self, e: self.function_fallback_sql(e), +213 exp.Trim: trim_sql, +214 exp.UnixToTime: lambda self, +215 e: f"TO_DATE('1970-01-01','YYYY-MM-DD') + ({self.sql(e, 'this')} / 86400)", +216 } +217 +218 PROPERTIES_LOCATION = { +219 **generator.Generator.PROPERTIES_LOCATION, +220 exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED, +221 } +222 +223 def currenttimestamp_sql(self, expression: exp.CurrentTimestamp) -> str: +224 this = expression.this +225 return self.func("CURRENT_TIMESTAMP", this) if this else "CURRENT_TIMESTAMP" +226 +227 def offset_sql(self, expression: exp.Offset) -> str: +228 return f"{super().offset_sql(expression)} ROWS" +229 +230 def xmltable_sql(self, expression: exp.XMLTable) -> str: +231 this = self.sql(expression, "this") +232 passing = self.expressions(expression, key="passing") +233 passing = f"{self.sep()}PASSING{self.seg(passing)}" if passing else "" +234 columns = self.expressions(expression, key="columns") +235 columns = f"{self.sep()}COLUMNS{self.seg(columns)}" if columns else "" +236 by_ref = ( +237 f"{self.sep()}RETURNING SEQUENCE BY REF" if expression.args.get("by_ref") else "" +238 ) +239 return f"XMLTABLE({self.sep('')}{self.indent(this + passing + by_ref + columns)}{self.seg(')', sep='')}" +240 +241 def add_column_sql(self, expression: exp.AlterTable) -> str: +242 actions = self.expressions(expression, key="actions", flat=True) +243 if len(expression.args.get("actions", [])) > 1: +244 return f"ADD ({actions})" +245 return f"ADD {actions}" +246 +247 class Tokenizer(tokens.Tokenizer): +248 VAR_SINGLE_TOKENS = {"@", "$", "#"} +249 +250 KEYWORDS = { +251 **tokens.Tokenizer.KEYWORDS, +252 "(+)": TokenType.JOIN_MARKER, +253 "BINARY_DOUBLE": TokenType.DOUBLE, +254 "BINARY_FLOAT": TokenType.FLOAT, +255 "COLUMNS": TokenType.COLUMN, +256 "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE, +257 "MINUS": TokenType.EXCEPT, +258 "NVARCHAR2": TokenType.NVARCHAR, +259 "ORDER SIBLINGS BY": TokenType.ORDER_SIBLINGS_BY, +260 "SAMPLE": TokenType.TABLE_SAMPLE, +261 "START": TokenType.BEGIN, +262 "SYSDATE": TokenType.CURRENT_TIMESTAMP, +263 "TOP": TokenType.TOP, +264 "VARCHAR2": TokenType.VARCHAR, +265 }
    @@ -1140,72 +1145,73 @@
     89    class Parser(parser.Parser):
      90        ALTER_TABLE_ADD_REQUIRED_FOR_EACH_COLUMN = False
      91        WINDOW_BEFORE_PAREN_TOKENS = {TokenType.OVER, TokenType.KEEP}
    - 92
    - 93        FUNCTIONS = {
    - 94            **parser.Parser.FUNCTIONS,
    - 95            "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),
    - 96            "TO_CHAR": to_char,
    - 97            "TO_TIMESTAMP": format_time_lambda(exp.StrToTime, "oracle"),
    - 98            "TO_DATE": format_time_lambda(exp.StrToDate, "oracle"),
    - 99        }
    -100
    -101        FUNCTION_PARSERS: t.Dict[str, t.Callable] = {
    -102            **parser.Parser.FUNCTION_PARSERS,
    -103            "JSON_ARRAY": lambda self: self._parse_json_array(
    -104                exp.JSONArray,
    -105                expressions=self._parse_csv(lambda: self._parse_format_json(self._parse_bitwise())),
    -106            ),
    -107            "JSON_ARRAYAGG": lambda self: self._parse_json_array(
    -108                exp.JSONArrayAgg,
    -109                this=self._parse_format_json(self._parse_bitwise()),
    -110                order=self._parse_order(),
    -111            ),
    -112            "XMLTABLE": _parse_xml_table,
    -113        }
    -114
    -115        QUERY_MODIFIER_PARSERS = {
    -116            **parser.Parser.QUERY_MODIFIER_PARSERS,
    -117            TokenType.ORDER_SIBLINGS_BY: lambda self: ("order", self._parse_order()),
    -118        }
    -119
    -120        TYPE_LITERAL_PARSERS = {
    -121            exp.DataType.Type.DATE: lambda self, this, _: self.expression(
    -122                exp.DateStrToDate, this=this
    -123            )
    -124        }
    -125
    -126        # SELECT UNIQUE .. is old-style Oracle syntax for SELECT DISTINCT ..
    -127        # Reference: https://stackoverflow.com/a/336455
    -128        DISTINCT_TOKENS = {TokenType.DISTINCT, TokenType.UNIQUE}
    -129
    -130        def _parse_json_array(self, expr_type: t.Type[E], **kwargs) -> E:
    -131            return self.expression(
    -132                expr_type,
    -133                null_handling=self._parse_on_handling("NULL", "NULL", "ABSENT"),
    -134                return_type=self._match_text_seq("RETURNING") and self._parse_type(),
    -135                strict=self._match_text_seq("STRICT"),
    -136                **kwargs,
    -137            )
    -138
    -139        def _parse_column(self) -> t.Optional[exp.Expression]:
    -140            column = super()._parse_column()
    -141            if column:
    -142                column.set("join_mark", self._match(TokenType.JOIN_MARKER))
    -143            return column
    -144
    -145        def _parse_hint(self) -> t.Optional[exp.Hint]:
    -146            if self._match(TokenType.HINT):
    -147                start = self._curr
    -148                while self._curr and not self._match_pair(TokenType.STAR, TokenType.SLASH):
    -149                    self._advance()
    -150
    -151                if not self._curr:
    -152                    self.raise_error("Expected */ after HINT")
    -153
    -154                end = self._tokens[self._index - 3]
    -155                return exp.Hint(expressions=[self._find_sql(start, end)])
    -156
    -157            return None
    + 92        VALUES_FOLLOWED_BY_PAREN = False
    + 93
    + 94        FUNCTIONS = {
    + 95            **parser.Parser.FUNCTIONS,
    + 96            "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),
    + 97            "TO_CHAR": to_char,
    + 98            "TO_TIMESTAMP": format_time_lambda(exp.StrToTime, "oracle"),
    + 99            "TO_DATE": format_time_lambda(exp.StrToDate, "oracle"),
    +100        }
    +101
    +102        FUNCTION_PARSERS: t.Dict[str, t.Callable] = {
    +103            **parser.Parser.FUNCTION_PARSERS,
    +104            "JSON_ARRAY": lambda self: self._parse_json_array(
    +105                exp.JSONArray,
    +106                expressions=self._parse_csv(lambda: self._parse_format_json(self._parse_bitwise())),
    +107            ),
    +108            "JSON_ARRAYAGG": lambda self: self._parse_json_array(
    +109                exp.JSONArrayAgg,
    +110                this=self._parse_format_json(self._parse_bitwise()),
    +111                order=self._parse_order(),
    +112            ),
    +113            "XMLTABLE": _parse_xml_table,
    +114        }
    +115
    +116        QUERY_MODIFIER_PARSERS = {
    +117            **parser.Parser.QUERY_MODIFIER_PARSERS,
    +118            TokenType.ORDER_SIBLINGS_BY: lambda self: ("order", self._parse_order()),
    +119        }
    +120
    +121        TYPE_LITERAL_PARSERS = {
    +122            exp.DataType.Type.DATE: lambda self, this, _: self.expression(
    +123                exp.DateStrToDate, this=this
    +124            )
    +125        }
    +126
    +127        # SELECT UNIQUE .. is old-style Oracle syntax for SELECT DISTINCT ..
    +128        # Reference: https://stackoverflow.com/a/336455
    +129        DISTINCT_TOKENS = {TokenType.DISTINCT, TokenType.UNIQUE}
    +130
    +131        def _parse_json_array(self, expr_type: t.Type[E], **kwargs) -> E:
    +132            return self.expression(
    +133                expr_type,
    +134                null_handling=self._parse_on_handling("NULL", "NULL", "ABSENT"),
    +135                return_type=self._match_text_seq("RETURNING") and self._parse_type(),
    +136                strict=self._match_text_seq("STRICT"),
    +137                **kwargs,
    +138            )
    +139
    +140        def _parse_column(self) -> t.Optional[exp.Expression]:
    +141            column = super()._parse_column()
    +142            if column:
    +143                column.set("join_mark", self._match(TokenType.JOIN_MARKER))
    +144            return column
    +145
    +146        def _parse_hint(self) -> t.Optional[exp.Hint]:
    +147            if self._match(TokenType.HINT):
    +148                start = self._curr
    +149                while self._curr and not self._match_pair(TokenType.STAR, TokenType.SLASH):
    +150                    self._advance()
    +151
    +152                if not self._curr:
    +153                    self.raise_error("Expected */ after HINT")
    +154
    +155                end = self._tokens[self._index - 3]
    +156                return exp.Hint(expressions=[self._find_sql(start, end)])
    +157
    +158            return None
     
    @@ -1249,6 +1255,18 @@ Default: 3 +
    +
    +
    + VALUES_FOLLOWED_BY_PAREN = +False + + +
    + + + +
    @@ -1304,7 +1322,7 @@ Default: 3
    DISTINCT_TOKENS = -{<TokenType.DISTINCT: 'DISTINCT'>, <TokenType.UNIQUE: 'UNIQUE'>} +{<TokenType.UNIQUE: 'UNIQUE'>, <TokenType.DISTINCT: 'DISTINCT'>}
    @@ -1449,92 +1467,92 @@ Default: 3
    -
    159    class Generator(generator.Generator):
    -160        LOCKING_READS_SUPPORTED = True
    -161        JOIN_HINTS = False
    -162        TABLE_HINTS = False
    -163        COLUMN_JOIN_MARKS_SUPPORTED = True
    -164        DATA_TYPE_SPECIFIERS_ALLOWED = True
    -165        ALTER_TABLE_INCLUDE_COLUMN_KEYWORD = False
    -166        LIMIT_FETCH = "FETCH"
    -167        TABLESAMPLE_KEYWORDS = "SAMPLE"
    -168        LAST_DAY_SUPPORTS_DATE_PART = False
    -169        SUPPORTS_SELECT_INTO = True
    -170        TZ_TO_WITH_TIME_ZONE = True
    -171
    -172        TYPE_MAPPING = {
    -173            **generator.Generator.TYPE_MAPPING,
    -174            exp.DataType.Type.TINYINT: "NUMBER",
    -175            exp.DataType.Type.SMALLINT: "NUMBER",
    -176            exp.DataType.Type.INT: "NUMBER",
    -177            exp.DataType.Type.BIGINT: "NUMBER",
    -178            exp.DataType.Type.DECIMAL: "NUMBER",
    -179            exp.DataType.Type.DOUBLE: "DOUBLE PRECISION",
    -180            exp.DataType.Type.VARCHAR: "VARCHAR2",
    -181            exp.DataType.Type.NVARCHAR: "NVARCHAR2",
    -182            exp.DataType.Type.NCHAR: "NCHAR",
    -183            exp.DataType.Type.TEXT: "CLOB",
    -184            exp.DataType.Type.TIMETZ: "TIME",
    -185            exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP",
    -186            exp.DataType.Type.BINARY: "BLOB",
    -187            exp.DataType.Type.VARBINARY: "BLOB",
    -188        }
    -189
    -190        TRANSFORMS = {
    -191            **generator.Generator.TRANSFORMS,
    -192            exp.DateStrToDate: lambda self, e: self.func(
    -193                "TO_DATE", e.this, exp.Literal.string("YYYY-MM-DD")
    -194            ),
    -195            exp.Group: transforms.preprocess([transforms.unalias_group]),
    -196            exp.ILike: no_ilike_sql,
    -197            exp.Select: transforms.preprocess(
    -198                [
    -199                    transforms.eliminate_distinct_on,
    -200                    transforms.eliminate_qualify,
    -201                ]
    -202            ),
    -203            exp.StrToTime: lambda self,
    -204            e: f"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})",
    -205            exp.StrToDate: lambda self, e: f"TO_DATE({self.sql(e, 'this')}, {self.format_time(e)})",
    -206            exp.Subquery: lambda self, e: self.subquery_sql(e, sep=" "),
    -207            exp.Substring: rename_func("SUBSTR"),
    -208            exp.Table: lambda self, e: self.table_sql(e, sep=" "),
    -209            exp.TableSample: lambda self, e: self.tablesample_sql(e, sep=" "),
    -210            exp.TimeToStr: lambda self, e: f"TO_CHAR({self.sql(e, 'this')}, {self.format_time(e)})",
    -211            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
    -212            exp.Trim: trim_sql,
    -213            exp.UnixToTime: lambda self,
    -214            e: f"TO_DATE('1970-01-01','YYYY-MM-DD') + ({self.sql(e, 'this')} / 86400)",
    -215        }
    -216
    -217        PROPERTIES_LOCATION = {
    -218            **generator.Generator.PROPERTIES_LOCATION,
    -219            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
    -220        }
    -221
    -222        def currenttimestamp_sql(self, expression: exp.CurrentTimestamp) -> str:
    -223            this = expression.this
    -224            return self.func("CURRENT_TIMESTAMP", this) if this else "CURRENT_TIMESTAMP"
    -225
    -226        def offset_sql(self, expression: exp.Offset) -> str:
    -227            return f"{super().offset_sql(expression)} ROWS"
    -228
    -229        def xmltable_sql(self, expression: exp.XMLTable) -> str:
    -230            this = self.sql(expression, "this")
    -231            passing = self.expressions(expression, key="passing")
    -232            passing = f"{self.sep()}PASSING{self.seg(passing)}" if passing else ""
    -233            columns = self.expressions(expression, key="columns")
    -234            columns = f"{self.sep()}COLUMNS{self.seg(columns)}" if columns else ""
    -235            by_ref = (
    -236                f"{self.sep()}RETURNING SEQUENCE BY REF" if expression.args.get("by_ref") else ""
    -237            )
    -238            return f"XMLTABLE({self.sep('')}{self.indent(this + passing + by_ref + columns)}{self.seg(')', sep='')}"
    -239
    -240        def add_column_sql(self, expression: exp.AlterTable) -> str:
    -241            actions = self.expressions(expression, key="actions", flat=True)
    -242            if len(expression.args.get("actions", [])) > 1:
    -243                return f"ADD ({actions})"
    -244            return f"ADD {actions}"
    +            
    160    class Generator(generator.Generator):
    +161        LOCKING_READS_SUPPORTED = True
    +162        JOIN_HINTS = False
    +163        TABLE_HINTS = False
    +164        COLUMN_JOIN_MARKS_SUPPORTED = True
    +165        DATA_TYPE_SPECIFIERS_ALLOWED = True
    +166        ALTER_TABLE_INCLUDE_COLUMN_KEYWORD = False
    +167        LIMIT_FETCH = "FETCH"
    +168        TABLESAMPLE_KEYWORDS = "SAMPLE"
    +169        LAST_DAY_SUPPORTS_DATE_PART = False
    +170        SUPPORTS_SELECT_INTO = True
    +171        TZ_TO_WITH_TIME_ZONE = True
    +172
    +173        TYPE_MAPPING = {
    +174            **generator.Generator.TYPE_MAPPING,
    +175            exp.DataType.Type.TINYINT: "NUMBER",
    +176            exp.DataType.Type.SMALLINT: "NUMBER",
    +177            exp.DataType.Type.INT: "NUMBER",
    +178            exp.DataType.Type.BIGINT: "NUMBER",
    +179            exp.DataType.Type.DECIMAL: "NUMBER",
    +180            exp.DataType.Type.DOUBLE: "DOUBLE PRECISION",
    +181            exp.DataType.Type.VARCHAR: "VARCHAR2",
    +182            exp.DataType.Type.NVARCHAR: "NVARCHAR2",
    +183            exp.DataType.Type.NCHAR: "NCHAR",
    +184            exp.DataType.Type.TEXT: "CLOB",
    +185            exp.DataType.Type.TIMETZ: "TIME",
    +186            exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP",
    +187            exp.DataType.Type.BINARY: "BLOB",
    +188            exp.DataType.Type.VARBINARY: "BLOB",
    +189        }
    +190
    +191        TRANSFORMS = {
    +192            **generator.Generator.TRANSFORMS,
    +193            exp.DateStrToDate: lambda self, e: self.func(
    +194                "TO_DATE", e.this, exp.Literal.string("YYYY-MM-DD")
    +195            ),
    +196            exp.Group: transforms.preprocess([transforms.unalias_group]),
    +197            exp.ILike: no_ilike_sql,
    +198            exp.Select: transforms.preprocess(
    +199                [
    +200                    transforms.eliminate_distinct_on,
    +201                    transforms.eliminate_qualify,
    +202                ]
    +203            ),
    +204            exp.StrToTime: lambda self,
    +205            e: f"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})",
    +206            exp.StrToDate: lambda self, e: f"TO_DATE({self.sql(e, 'this')}, {self.format_time(e)})",
    +207            exp.Subquery: lambda self, e: self.subquery_sql(e, sep=" "),
    +208            exp.Substring: rename_func("SUBSTR"),
    +209            exp.Table: lambda self, e: self.table_sql(e, sep=" "),
    +210            exp.TableSample: lambda self, e: self.tablesample_sql(e, sep=" "),
    +211            exp.TimeToStr: lambda self, e: f"TO_CHAR({self.sql(e, 'this')}, {self.format_time(e)})",
    +212            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
    +213            exp.Trim: trim_sql,
    +214            exp.UnixToTime: lambda self,
    +215            e: f"TO_DATE('1970-01-01','YYYY-MM-DD') + ({self.sql(e, 'this')} / 86400)",
    +216        }
    +217
    +218        PROPERTIES_LOCATION = {
    +219            **generator.Generator.PROPERTIES_LOCATION,
    +220            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
    +221        }
    +222
    +223        def currenttimestamp_sql(self, expression: exp.CurrentTimestamp) -> str:
    +224            this = expression.this
    +225            return self.func("CURRENT_TIMESTAMP", this) if this else "CURRENT_TIMESTAMP"
    +226
    +227        def offset_sql(self, expression: exp.Offset) -> str:
    +228            return f"{super().offset_sql(expression)} ROWS"
    +229
    +230        def xmltable_sql(self, expression: exp.XMLTable) -> str:
    +231            this = self.sql(expression, "this")
    +232            passing = self.expressions(expression, key="passing")
    +233            passing = f"{self.sep()}PASSING{self.seg(passing)}" if passing else ""
    +234            columns = self.expressions(expression, key="columns")
    +235            columns = f"{self.sep()}COLUMNS{self.seg(columns)}" if columns else ""
    +236            by_ref = (
    +237                f"{self.sep()}RETURNING SEQUENCE BY REF" if expression.args.get("by_ref") else ""
    +238            )
    +239            return f"XMLTABLE({self.sep('')}{self.indent(this + passing + by_ref + columns)}{self.seg(')', sep='')}"
    +240
    +241        def add_column_sql(self, expression: exp.AlterTable) -> str:
    +242            actions = self.expressions(expression, key="actions", flat=True)
    +243            if len(expression.args.get("actions", [])) > 1:
    +244                return f"ADD ({actions})"
    +245            return f"ADD {actions}"
     
    @@ -1759,9 +1777,9 @@ Default: True
    -
    222        def currenttimestamp_sql(self, expression: exp.CurrentTimestamp) -> str:
    -223            this = expression.this
    -224            return self.func("CURRENT_TIMESTAMP", this) if this else "CURRENT_TIMESTAMP"
    +            
    223        def currenttimestamp_sql(self, expression: exp.CurrentTimestamp) -> str:
    +224            this = expression.this
    +225            return self.func("CURRENT_TIMESTAMP", this) if this else "CURRENT_TIMESTAMP"
     
    @@ -1779,8 +1797,8 @@ Default: True
    -
    226        def offset_sql(self, expression: exp.Offset) -> str:
    -227            return f"{super().offset_sql(expression)} ROWS"
    +            
    227        def offset_sql(self, expression: exp.Offset) -> str:
    +228            return f"{super().offset_sql(expression)} ROWS"
     
    @@ -1798,16 +1816,16 @@ Default: True
    -
    229        def xmltable_sql(self, expression: exp.XMLTable) -> str:
    -230            this = self.sql(expression, "this")
    -231            passing = self.expressions(expression, key="passing")
    -232            passing = f"{self.sep()}PASSING{self.seg(passing)}" if passing else ""
    -233            columns = self.expressions(expression, key="columns")
    -234            columns = f"{self.sep()}COLUMNS{self.seg(columns)}" if columns else ""
    -235            by_ref = (
    -236                f"{self.sep()}RETURNING SEQUENCE BY REF" if expression.args.get("by_ref") else ""
    -237            )
    -238            return f"XMLTABLE({self.sep('')}{self.indent(this + passing + by_ref + columns)}{self.seg(')', sep='')}"
    +            
    230        def xmltable_sql(self, expression: exp.XMLTable) -> str:
    +231            this = self.sql(expression, "this")
    +232            passing = self.expressions(expression, key="passing")
    +233            passing = f"{self.sep()}PASSING{self.seg(passing)}" if passing else ""
    +234            columns = self.expressions(expression, key="columns")
    +235            columns = f"{self.sep()}COLUMNS{self.seg(columns)}" if columns else ""
    +236            by_ref = (
    +237                f"{self.sep()}RETURNING SEQUENCE BY REF" if expression.args.get("by_ref") else ""
    +238            )
    +239            return f"XMLTABLE({self.sep('')}{self.indent(this + passing + by_ref + columns)}{self.seg(')', sep='')}"
     
    @@ -1825,11 +1843,11 @@ Default: True
    -
    240        def add_column_sql(self, expression: exp.AlterTable) -> str:
    -241            actions = self.expressions(expression, key="actions", flat=True)
    -242            if len(expression.args.get("actions", [])) > 1:
    -243                return f"ADD ({actions})"
    -244            return f"ADD {actions}"
    +            
    241        def add_column_sql(self, expression: exp.AlterTable) -> str:
    +242            actions = self.expressions(expression, key="actions", flat=True)
    +243            if len(expression.args.get("actions", [])) > 1:
    +244                return f"ADD ({actions})"
    +245            return f"ADD {actions}"
     
    @@ -2234,25 +2252,25 @@ Default: True
    -
    246    class Tokenizer(tokens.Tokenizer):
    -247        VAR_SINGLE_TOKENS = {"@", "$", "#"}
    -248
    -249        KEYWORDS = {
    -250            **tokens.Tokenizer.KEYWORDS,
    -251            "(+)": TokenType.JOIN_MARKER,
    -252            "BINARY_DOUBLE": TokenType.DOUBLE,
    -253            "BINARY_FLOAT": TokenType.FLOAT,
    -254            "COLUMNS": TokenType.COLUMN,
    -255            "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE,
    -256            "MINUS": TokenType.EXCEPT,
    -257            "NVARCHAR2": TokenType.NVARCHAR,
    -258            "ORDER SIBLINGS BY": TokenType.ORDER_SIBLINGS_BY,
    -259            "SAMPLE": TokenType.TABLE_SAMPLE,
    -260            "START": TokenType.BEGIN,
    -261            "SYSDATE": TokenType.CURRENT_TIMESTAMP,
    -262            "TOP": TokenType.TOP,
    -263            "VARCHAR2": TokenType.VARCHAR,
    -264        }
    +            
    247    class Tokenizer(tokens.Tokenizer):
    +248        VAR_SINGLE_TOKENS = {"@", "$", "#"}
    +249
    +250        KEYWORDS = {
    +251            **tokens.Tokenizer.KEYWORDS,
    +252            "(+)": TokenType.JOIN_MARKER,
    +253            "BINARY_DOUBLE": TokenType.DOUBLE,
    +254            "BINARY_FLOAT": TokenType.FLOAT,
    +255            "COLUMNS": TokenType.COLUMN,
    +256            "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE,
    +257            "MINUS": TokenType.EXCEPT,
    +258            "NVARCHAR2": TokenType.NVARCHAR,
    +259            "ORDER SIBLINGS BY": TokenType.ORDER_SIBLINGS_BY,
    +260            "SAMPLE": TokenType.TABLE_SAMPLE,
    +261            "START": TokenType.BEGIN,
    +262            "SYSDATE": TokenType.CURRENT_TIMESTAMP,
    +263            "TOP": TokenType.TOP,
    +264            "VARCHAR2": TokenType.VARCHAR,
    +265        }
     
    @@ -2261,7 +2279,7 @@ Default: True
    VAR_SINGLE_TOKENS = -{'@', '#', '$'} +{'#', '@', '$'}
    diff --git a/docs/sqlglot/dialects/postgres.html b/docs/sqlglot/dialects/postgres.html index 234dab8..c197dd5 100644 --- a/docs/sqlglot/dialects/postgres.html +++ b/docs/sqlglot/dialects/postgres.html @@ -2026,6 +2026,7 @@ Default: 3
    MODIFIERS_ATTACHED_TO_UNION
    UNION_MODIFIERS
    NO_PAREN_IF_COMMANDS
    +
    VALUES_FOLLOWED_BY_PAREN
    error_level
    error_message_context
    max_errors
    @@ -2411,7 +2412,7 @@ Default: True diff --git a/docs/sqlglot/dialects/presto.html b/docs/sqlglot/dialects/presto.html index bf2d6db..9243539 100644 --- a/docs/sqlglot/dialects/presto.html +++ b/docs/sqlglot/dialects/presto.html @@ -75,6 +75,9 @@
  • Presto.Parser
      +
    • + VALUES_FOLLOWED_BY_PAREN +
    • FUNCTIONS
    • @@ -481,293 +484,295 @@ 225 } 226 227 class Parser(parser.Parser): -228 FUNCTIONS = { -229 **parser.Parser.FUNCTIONS, -230 "ARBITRARY": exp.AnyValue.from_arg_list, -231 "APPROX_DISTINCT": exp.ApproxDistinct.from_arg_list, -232 "APPROX_PERCENTILE": _approx_percentile, -233 "BITWISE_AND": binary_from_function(exp.BitwiseAnd), -234 "BITWISE_NOT": lambda args: exp.BitwiseNot(this=seq_get(args, 0)), -235 "BITWISE_OR": binary_from_function(exp.BitwiseOr), -236 "BITWISE_XOR": binary_from_function(exp.BitwiseXor), -237 "CARDINALITY": exp.ArraySize.from_arg_list, -238 "CONTAINS": exp.ArrayContains.from_arg_list, -239 "DATE_ADD": lambda args: exp.DateAdd( -240 this=seq_get(args, 2), expression=seq_get(args, 1), unit=seq_get(args, 0) -241 ), -242 "DATE_DIFF": lambda args: exp.DateDiff( -243 this=seq_get(args, 2), expression=seq_get(args, 1), unit=seq_get(args, 0) -244 ), -245 "DATE_FORMAT": format_time_lambda(exp.TimeToStr, "presto"), -246 "DATE_PARSE": format_time_lambda(exp.StrToTime, "presto"), -247 "DATE_TRUNC": date_trunc_to_time, -248 "ELEMENT_AT": lambda args: exp.Bracket( -249 this=seq_get(args, 0), expressions=[seq_get(args, 1)], offset=1, safe=True -250 ), -251 "FROM_HEX": exp.Unhex.from_arg_list, -252 "FROM_UNIXTIME": _from_unixtime, -253 "FROM_UTF8": lambda args: exp.Decode( -254 this=seq_get(args, 0), replace=seq_get(args, 1), charset=exp.Literal.string("utf-8") -255 ), -256 "NOW": exp.CurrentTimestamp.from_arg_list, -257 "REGEXP_EXTRACT": lambda args: exp.RegexpExtract( -258 this=seq_get(args, 0), expression=seq_get(args, 1), group=seq_get(args, 2) -259 ), -260 "REGEXP_REPLACE": lambda args: exp.RegexpReplace( -261 this=seq_get(args, 0), -262 expression=seq_get(args, 1), -263 replacement=seq_get(args, 2) or exp.Literal.string(""), -264 ), -265 "ROW": exp.Struct.from_arg_list, -266 "SEQUENCE": exp.GenerateSeries.from_arg_list, -267 "SET_AGG": exp.ArrayUniqueAgg.from_arg_list, -268 "SPLIT_TO_MAP": exp.StrToMap.from_arg_list, -269 "STRPOS": lambda args: exp.StrPosition( -270 this=seq_get(args, 0), substr=seq_get(args, 1), instance=seq_get(args, 2) -271 ), -272 "TO_CHAR": _parse_to_char, -273 "TO_HEX": exp.Hex.from_arg_list, -274 "TO_UNIXTIME": exp.TimeToUnix.from_arg_list, -275 "TO_UTF8": lambda args: exp.Encode( -276 this=seq_get(args, 0), charset=exp.Literal.string("utf-8") -277 ), -278 } -279 -280 FUNCTION_PARSERS = parser.Parser.FUNCTION_PARSERS.copy() -281 FUNCTION_PARSERS.pop("TRIM") -282 -283 class Generator(generator.Generator): -284 INTERVAL_ALLOWS_PLURAL_FORM = False -285 JOIN_HINTS = False -286 TABLE_HINTS = False -287 QUERY_HINTS = False -288 IS_BOOL_ALLOWED = False -289 TZ_TO_WITH_TIME_ZONE = True -290 NVL2_SUPPORTED = False -291 STRUCT_DELIMITER = ("(", ")") -292 LIMIT_ONLY_LITERALS = True -293 SUPPORTS_SINGLE_ARG_CONCAT = False -294 LIKE_PROPERTY_INSIDE_SCHEMA = True -295 MULTI_ARG_DISTINCT = False -296 -297 PROPERTIES_LOCATION = { -298 **generator.Generator.PROPERTIES_LOCATION, -299 exp.LocationProperty: exp.Properties.Location.UNSUPPORTED, -300 exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED, -301 } -302 -303 TYPE_MAPPING = { -304 **generator.Generator.TYPE_MAPPING, -305 exp.DataType.Type.INT: "INTEGER", -306 exp.DataType.Type.FLOAT: "REAL", -307 exp.DataType.Type.BINARY: "VARBINARY", -308 exp.DataType.Type.TEXT: "VARCHAR", -309 exp.DataType.Type.TIMETZ: "TIME", -310 exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP", -311 exp.DataType.Type.STRUCT: "ROW", -312 exp.DataType.Type.DATETIME: "TIMESTAMP", -313 exp.DataType.Type.DATETIME64: "TIMESTAMP", -314 } -315 -316 TRANSFORMS = { -317 **generator.Generator.TRANSFORMS, -318 exp.AnyValue: rename_func("ARBITRARY"), -319 exp.ApproxDistinct: _approx_distinct_sql, -320 exp.ApproxQuantile: rename_func("APPROX_PERCENTILE"), -321 exp.ArgMax: rename_func("MAX_BY"), -322 exp.ArgMin: rename_func("MIN_BY"), -323 exp.Array: lambda self, e: f"ARRAY[{self.expressions(e, flat=True)}]", -324 exp.ArrayConcat: rename_func("CONCAT"), -325 exp.ArrayContains: rename_func("CONTAINS"), -326 exp.ArraySize: rename_func("CARDINALITY"), -327 exp.ArrayUniqueAgg: rename_func("SET_AGG"), -328 exp.AtTimeZone: rename_func("AT_TIMEZONE"), -329 exp.BitwiseAnd: lambda self, -330 e: f"BITWISE_AND({self.sql(e, 'this')}, {self.sql(e, 'expression')})", -331 exp.BitwiseLeftShift: lambda self, -332 e: f"BITWISE_ARITHMETIC_SHIFT_LEFT({self.sql(e, 'this')}, {self.sql(e, 'expression')})", -333 exp.BitwiseNot: lambda self, e: f"BITWISE_NOT({self.sql(e, 'this')})", -334 exp.BitwiseOr: lambda self, -335 e: f"BITWISE_OR({self.sql(e, 'this')}, {self.sql(e, 'expression')})", -336 exp.BitwiseRightShift: lambda self, -337 e: f"BITWISE_ARITHMETIC_SHIFT_RIGHT({self.sql(e, 'this')}, {self.sql(e, 'expression')})", -338 exp.BitwiseXor: lambda self, -339 e: f"BITWISE_XOR({self.sql(e, 'this')}, {self.sql(e, 'expression')})", -340 exp.Cast: transforms.preprocess([transforms.epoch_cast_to_ts]), -341 exp.CurrentTimestamp: lambda *_: "CURRENT_TIMESTAMP", -342 exp.DateAdd: lambda self, e: self.func( -343 "DATE_ADD", -344 exp.Literal.string(e.text("unit") or "DAY"), -345 _to_int( -346 e.expression, -347 ), -348 e.this, -349 ), -350 exp.DateDiff: lambda self, e: self.func( -351 "DATE_DIFF", exp.Literal.string(e.text("unit") or "DAY"), e.expression, e.this -352 ), -353 exp.DateStrToDate: datestrtodate_sql, -354 exp.DateToDi: lambda self, -355 e: f"CAST(DATE_FORMAT({self.sql(e, 'this')}, {Presto.DATEINT_FORMAT}) AS INT)", -356 exp.DateSub: lambda self, e: self.func( -357 "DATE_ADD", -358 exp.Literal.string(e.text("unit") or "DAY"), -359 _to_int(e.expression * -1), -360 e.this, -361 ), -362 exp.Decode: lambda self, e: encode_decode_sql(self, e, "FROM_UTF8"), -363 exp.DiToDate: lambda self, -364 e: f"CAST(DATE_PARSE(CAST({self.sql(e, 'this')} AS VARCHAR), {Presto.DATEINT_FORMAT}) AS DATE)", -365 exp.Encode: lambda self, e: encode_decode_sql(self, e, "TO_UTF8"), -366 exp.FileFormatProperty: lambda self, e: f"FORMAT='{e.name.upper()}'", -367 exp.First: _first_last_sql, -368 exp.FirstValue: _first_last_sql, -369 exp.FromTimeZone: lambda self, -370 e: f"WITH_TIMEZONE({self.sql(e, 'this')}, {self.sql(e, 'zone')}) AT TIME ZONE 'UTC'", -371 exp.Group: transforms.preprocess([transforms.unalias_group]), -372 exp.GroupConcat: lambda self, e: self.func( -373 "ARRAY_JOIN", self.func("ARRAY_AGG", e.this), e.args.get("separator") -374 ), -375 exp.Hex: rename_func("TO_HEX"), -376 exp.If: if_sql(), -377 exp.ILike: no_ilike_sql, -378 exp.Initcap: _initcap_sql, -379 exp.ParseJSON: rename_func("JSON_PARSE"), -380 exp.Last: _first_last_sql, -381 exp.LastValue: _first_last_sql, -382 exp.LastDay: lambda self, e: self.func("LAST_DAY_OF_MONTH", e.this), -383 exp.Lateral: _explode_to_unnest_sql, -384 exp.Left: left_to_substring_sql, -385 exp.Levenshtein: rename_func("LEVENSHTEIN_DISTANCE"), -386 exp.LogicalAnd: rename_func("BOOL_AND"), -387 exp.LogicalOr: rename_func("BOOL_OR"), -388 exp.Pivot: no_pivot_sql, -389 exp.Quantile: _quantile_sql, -390 exp.RegexpExtract: regexp_extract_sql, -391 exp.Right: right_to_substring_sql, -392 exp.SafeDivide: no_safe_divide_sql, -393 exp.Schema: _schema_sql, -394 exp.SchemaCommentProperty: lambda self, e: self.naked_property(e), -395 exp.Select: transforms.preprocess( -396 [ -397 transforms.eliminate_qualify, -398 transforms.eliminate_distinct_on, -399 transforms.explode_to_unnest(1), -400 transforms.eliminate_semi_and_anti_joins, -401 ] -402 ), -403 exp.SortArray: _no_sort_array, -404 exp.StrPosition: rename_func("STRPOS"), -405 exp.StrToDate: lambda self, e: f"CAST({_str_to_time_sql(self, e)} AS DATE)", -406 exp.StrToMap: rename_func("SPLIT_TO_MAP"), -407 exp.StrToTime: _str_to_time_sql, -408 exp.StrToUnix: lambda self, -409 e: f"TO_UNIXTIME(DATE_PARSE({self.sql(e, 'this')}, {self.format_time(e)}))", -410 exp.StructExtract: struct_extract_sql, -411 exp.Table: transforms.preprocess([_unnest_sequence]), -412 exp.Timestamp: no_timestamp_sql, -413 exp.TimestampTrunc: timestamptrunc_sql, -414 exp.TimeStrToDate: timestrtotime_sql, -415 exp.TimeStrToTime: timestrtotime_sql, -416 exp.TimeStrToUnix: lambda self, -417 e: f"TO_UNIXTIME(DATE_PARSE({self.sql(e, 'this')}, {Presto.TIME_FORMAT}))", -418 exp.TimeToStr: lambda self, -419 e: f"DATE_FORMAT({self.sql(e, 'this')}, {self.format_time(e)})", -420 exp.TimeToUnix: rename_func("TO_UNIXTIME"), -421 exp.ToChar: lambda self, -422 e: f"DATE_FORMAT({self.sql(e, 'this')}, {self.format_time(e)})", -423 exp.TryCast: transforms.preprocess([transforms.epoch_cast_to_ts]), -424 exp.TsOrDiToDi: lambda self, -425 e: f"CAST(SUBSTR(REPLACE(CAST({self.sql(e, 'this')} AS VARCHAR), '-', ''), 1, 8) AS INT)", -426 exp.TsOrDsAdd: _ts_or_ds_add_sql, -427 exp.TsOrDsDiff: _ts_or_ds_diff_sql, -428 exp.TsOrDsToDate: _ts_or_ds_to_date_sql, -429 exp.Unhex: rename_func("FROM_HEX"), -430 exp.UnixToStr: lambda self, -431 e: f"DATE_FORMAT(FROM_UNIXTIME({self.sql(e, 'this')}), {self.format_time(e)})", -432 exp.UnixToTime: _unix_to_time_sql, -433 exp.UnixToTimeStr: lambda self, -434 e: f"CAST(FROM_UNIXTIME({self.sql(e, 'this')}) AS VARCHAR)", -435 exp.VariancePop: rename_func("VAR_POP"), -436 exp.With: transforms.preprocess([transforms.add_recursive_cte_column_names]), -437 exp.WithinGroup: transforms.preprocess( -438 [transforms.remove_within_group_for_percentiles] -439 ), -440 exp.Xor: bool_xor_sql, -441 } -442 -443 def bracket_sql(self, expression: exp.Bracket) -> str: -444 if expression.args.get("safe"): -445 return self.func( -446 "ELEMENT_AT", -447 expression.this, -448 seq_get( -449 apply_index_offset( -450 expression.this, -451 expression.expressions, -452 1 - expression.args.get("offset", 0), -453 ), -454 0, -455 ), -456 ) -457 return super().bracket_sql(expression) -458 -459 def struct_sql(self, expression: exp.Struct) -> str: -460 if any(isinstance(arg, self.KEY_VALUE_DEFINITIONS) for arg in expression.expressions): -461 self.unsupported("Struct with key-value definitions is unsupported.") -462 return self.function_fallback_sql(expression) -463 -464 return rename_func("ROW")(self, expression) +228 VALUES_FOLLOWED_BY_PAREN = False +229 +230 FUNCTIONS = { +231 **parser.Parser.FUNCTIONS, +232 "ARBITRARY": exp.AnyValue.from_arg_list, +233 "APPROX_DISTINCT": exp.ApproxDistinct.from_arg_list, +234 "APPROX_PERCENTILE": _approx_percentile, +235 "BITWISE_AND": binary_from_function(exp.BitwiseAnd), +236 "BITWISE_NOT": lambda args: exp.BitwiseNot(this=seq_get(args, 0)), +237 "BITWISE_OR": binary_from_function(exp.BitwiseOr), +238 "BITWISE_XOR": binary_from_function(exp.BitwiseXor), +239 "CARDINALITY": exp.ArraySize.from_arg_list, +240 "CONTAINS": exp.ArrayContains.from_arg_list, +241 "DATE_ADD": lambda args: exp.DateAdd( +242 this=seq_get(args, 2), expression=seq_get(args, 1), unit=seq_get(args, 0) +243 ), +244 "DATE_DIFF": lambda args: exp.DateDiff( +245 this=seq_get(args, 2), expression=seq_get(args, 1), unit=seq_get(args, 0) +246 ), +247 "DATE_FORMAT": format_time_lambda(exp.TimeToStr, "presto"), +248 "DATE_PARSE": format_time_lambda(exp.StrToTime, "presto"), +249 "DATE_TRUNC": date_trunc_to_time, +250 "ELEMENT_AT": lambda args: exp.Bracket( +251 this=seq_get(args, 0), expressions=[seq_get(args, 1)], offset=1, safe=True +252 ), +253 "FROM_HEX": exp.Unhex.from_arg_list, +254 "FROM_UNIXTIME": _from_unixtime, +255 "FROM_UTF8": lambda args: exp.Decode( +256 this=seq_get(args, 0), replace=seq_get(args, 1), charset=exp.Literal.string("utf-8") +257 ), +258 "NOW": exp.CurrentTimestamp.from_arg_list, +259 "REGEXP_EXTRACT": lambda args: exp.RegexpExtract( +260 this=seq_get(args, 0), expression=seq_get(args, 1), group=seq_get(args, 2) +261 ), +262 "REGEXP_REPLACE": lambda args: exp.RegexpReplace( +263 this=seq_get(args, 0), +264 expression=seq_get(args, 1), +265 replacement=seq_get(args, 2) or exp.Literal.string(""), +266 ), +267 "ROW": exp.Struct.from_arg_list, +268 "SEQUENCE": exp.GenerateSeries.from_arg_list, +269 "SET_AGG": exp.ArrayUniqueAgg.from_arg_list, +270 "SPLIT_TO_MAP": exp.StrToMap.from_arg_list, +271 "STRPOS": lambda args: exp.StrPosition( +272 this=seq_get(args, 0), substr=seq_get(args, 1), instance=seq_get(args, 2) +273 ), +274 "TO_CHAR": _parse_to_char, +275 "TO_HEX": exp.Hex.from_arg_list, +276 "TO_UNIXTIME": exp.TimeToUnix.from_arg_list, +277 "TO_UTF8": lambda args: exp.Encode( +278 this=seq_get(args, 0), charset=exp.Literal.string("utf-8") +279 ), +280 } +281 +282 FUNCTION_PARSERS = parser.Parser.FUNCTION_PARSERS.copy() +283 FUNCTION_PARSERS.pop("TRIM") +284 +285 class Generator(generator.Generator): +286 INTERVAL_ALLOWS_PLURAL_FORM = False +287 JOIN_HINTS = False +288 TABLE_HINTS = False +289 QUERY_HINTS = False +290 IS_BOOL_ALLOWED = False +291 TZ_TO_WITH_TIME_ZONE = True +292 NVL2_SUPPORTED = False +293 STRUCT_DELIMITER = ("(", ")") +294 LIMIT_ONLY_LITERALS = True +295 SUPPORTS_SINGLE_ARG_CONCAT = False +296 LIKE_PROPERTY_INSIDE_SCHEMA = True +297 MULTI_ARG_DISTINCT = False +298 +299 PROPERTIES_LOCATION = { +300 **generator.Generator.PROPERTIES_LOCATION, +301 exp.LocationProperty: exp.Properties.Location.UNSUPPORTED, +302 exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED, +303 } +304 +305 TYPE_MAPPING = { +306 **generator.Generator.TYPE_MAPPING, +307 exp.DataType.Type.INT: "INTEGER", +308 exp.DataType.Type.FLOAT: "REAL", +309 exp.DataType.Type.BINARY: "VARBINARY", +310 exp.DataType.Type.TEXT: "VARCHAR", +311 exp.DataType.Type.TIMETZ: "TIME", +312 exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP", +313 exp.DataType.Type.STRUCT: "ROW", +314 exp.DataType.Type.DATETIME: "TIMESTAMP", +315 exp.DataType.Type.DATETIME64: "TIMESTAMP", +316 } +317 +318 TRANSFORMS = { +319 **generator.Generator.TRANSFORMS, +320 exp.AnyValue: rename_func("ARBITRARY"), +321 exp.ApproxDistinct: _approx_distinct_sql, +322 exp.ApproxQuantile: rename_func("APPROX_PERCENTILE"), +323 exp.ArgMax: rename_func("MAX_BY"), +324 exp.ArgMin: rename_func("MIN_BY"), +325 exp.Array: lambda self, e: f"ARRAY[{self.expressions(e, flat=True)}]", +326 exp.ArrayConcat: rename_func("CONCAT"), +327 exp.ArrayContains: rename_func("CONTAINS"), +328 exp.ArraySize: rename_func("CARDINALITY"), +329 exp.ArrayUniqueAgg: rename_func("SET_AGG"), +330 exp.AtTimeZone: rename_func("AT_TIMEZONE"), +331 exp.BitwiseAnd: lambda self, +332 e: f"BITWISE_AND({self.sql(e, 'this')}, {self.sql(e, 'expression')})", +333 exp.BitwiseLeftShift: lambda self, +334 e: f"BITWISE_ARITHMETIC_SHIFT_LEFT({self.sql(e, 'this')}, {self.sql(e, 'expression')})", +335 exp.BitwiseNot: lambda self, e: f"BITWISE_NOT({self.sql(e, 'this')})", +336 exp.BitwiseOr: lambda self, +337 e: f"BITWISE_OR({self.sql(e, 'this')}, {self.sql(e, 'expression')})", +338 exp.BitwiseRightShift: lambda self, +339 e: f"BITWISE_ARITHMETIC_SHIFT_RIGHT({self.sql(e, 'this')}, {self.sql(e, 'expression')})", +340 exp.BitwiseXor: lambda self, +341 e: f"BITWISE_XOR({self.sql(e, 'this')}, {self.sql(e, 'expression')})", +342 exp.Cast: transforms.preprocess([transforms.epoch_cast_to_ts]), +343 exp.CurrentTimestamp: lambda *_: "CURRENT_TIMESTAMP", +344 exp.DateAdd: lambda self, e: self.func( +345 "DATE_ADD", +346 exp.Literal.string(e.text("unit") or "DAY"), +347 _to_int( +348 e.expression, +349 ), +350 e.this, +351 ), +352 exp.DateDiff: lambda self, e: self.func( +353 "DATE_DIFF", exp.Literal.string(e.text("unit") or "DAY"), e.expression, e.this +354 ), +355 exp.DateStrToDate: datestrtodate_sql, +356 exp.DateToDi: lambda self, +357 e: f"CAST(DATE_FORMAT({self.sql(e, 'this')}, {Presto.DATEINT_FORMAT}) AS INT)", +358 exp.DateSub: lambda self, e: self.func( +359 "DATE_ADD", +360 exp.Literal.string(e.text("unit") or "DAY"), +361 _to_int(e.expression * -1), +362 e.this, +363 ), +364 exp.Decode: lambda self, e: encode_decode_sql(self, e, "FROM_UTF8"), +365 exp.DiToDate: lambda self, +366 e: f"CAST(DATE_PARSE(CAST({self.sql(e, 'this')} AS VARCHAR), {Presto.DATEINT_FORMAT}) AS DATE)", +367 exp.Encode: lambda self, e: encode_decode_sql(self, e, "TO_UTF8"), +368 exp.FileFormatProperty: lambda self, e: f"FORMAT='{e.name.upper()}'", +369 exp.First: _first_last_sql, +370 exp.FirstValue: _first_last_sql, +371 exp.FromTimeZone: lambda self, +372 e: f"WITH_TIMEZONE({self.sql(e, 'this')}, {self.sql(e, 'zone')}) AT TIME ZONE 'UTC'", +373 exp.Group: transforms.preprocess([transforms.unalias_group]), +374 exp.GroupConcat: lambda self, e: self.func( +375 "ARRAY_JOIN", self.func("ARRAY_AGG", e.this), e.args.get("separator") +376 ), +377 exp.Hex: rename_func("TO_HEX"), +378 exp.If: if_sql(), +379 exp.ILike: no_ilike_sql, +380 exp.Initcap: _initcap_sql, +381 exp.ParseJSON: rename_func("JSON_PARSE"), +382 exp.Last: _first_last_sql, +383 exp.LastValue: _first_last_sql, +384 exp.LastDay: lambda self, e: self.func("LAST_DAY_OF_MONTH", e.this), +385 exp.Lateral: _explode_to_unnest_sql, +386 exp.Left: left_to_substring_sql, +387 exp.Levenshtein: rename_func("LEVENSHTEIN_DISTANCE"), +388 exp.LogicalAnd: rename_func("BOOL_AND"), +389 exp.LogicalOr: rename_func("BOOL_OR"), +390 exp.Pivot: no_pivot_sql, +391 exp.Quantile: _quantile_sql, +392 exp.RegexpExtract: regexp_extract_sql, +393 exp.Right: right_to_substring_sql, +394 exp.SafeDivide: no_safe_divide_sql, +395 exp.Schema: _schema_sql, +396 exp.SchemaCommentProperty: lambda self, e: self.naked_property(e), +397 exp.Select: transforms.preprocess( +398 [ +399 transforms.eliminate_qualify, +400 transforms.eliminate_distinct_on, +401 transforms.explode_to_unnest(1), +402 transforms.eliminate_semi_and_anti_joins, +403 ] +404 ), +405 exp.SortArray: _no_sort_array, +406 exp.StrPosition: rename_func("STRPOS"), +407 exp.StrToDate: lambda self, e: f"CAST({_str_to_time_sql(self, e)} AS DATE)", +408 exp.StrToMap: rename_func("SPLIT_TO_MAP"), +409 exp.StrToTime: _str_to_time_sql, +410 exp.StrToUnix: lambda self, +411 e: f"TO_UNIXTIME(DATE_PARSE({self.sql(e, 'this')}, {self.format_time(e)}))", +412 exp.StructExtract: struct_extract_sql, +413 exp.Table: transforms.preprocess([_unnest_sequence]), +414 exp.Timestamp: no_timestamp_sql, +415 exp.TimestampTrunc: timestamptrunc_sql, +416 exp.TimeStrToDate: timestrtotime_sql, +417 exp.TimeStrToTime: timestrtotime_sql, +418 exp.TimeStrToUnix: lambda self, +419 e: f"TO_UNIXTIME(DATE_PARSE({self.sql(e, 'this')}, {Presto.TIME_FORMAT}))", +420 exp.TimeToStr: lambda self, +421 e: f"DATE_FORMAT({self.sql(e, 'this')}, {self.format_time(e)})", +422 exp.TimeToUnix: rename_func("TO_UNIXTIME"), +423 exp.ToChar: lambda self, +424 e: f"DATE_FORMAT({self.sql(e, 'this')}, {self.format_time(e)})", +425 exp.TryCast: transforms.preprocess([transforms.epoch_cast_to_ts]), +426 exp.TsOrDiToDi: lambda self, +427 e: f"CAST(SUBSTR(REPLACE(CAST({self.sql(e, 'this')} AS VARCHAR), '-', ''), 1, 8) AS INT)", +428 exp.TsOrDsAdd: _ts_or_ds_add_sql, +429 exp.TsOrDsDiff: _ts_or_ds_diff_sql, +430 exp.TsOrDsToDate: _ts_or_ds_to_date_sql, +431 exp.Unhex: rename_func("FROM_HEX"), +432 exp.UnixToStr: lambda self, +433 e: f"DATE_FORMAT(FROM_UNIXTIME({self.sql(e, 'this')}), {self.format_time(e)})", +434 exp.UnixToTime: _unix_to_time_sql, +435 exp.UnixToTimeStr: lambda self, +436 e: f"CAST(FROM_UNIXTIME({self.sql(e, 'this')}) AS VARCHAR)", +437 exp.VariancePop: rename_func("VAR_POP"), +438 exp.With: transforms.preprocess([transforms.add_recursive_cte_column_names]), +439 exp.WithinGroup: transforms.preprocess( +440 [transforms.remove_within_group_for_percentiles] +441 ), +442 exp.Xor: bool_xor_sql, +443 } +444 +445 def bracket_sql(self, expression: exp.Bracket) -> str: +446 if expression.args.get("safe"): +447 return self.func( +448 "ELEMENT_AT", +449 expression.this, +450 seq_get( +451 apply_index_offset( +452 expression.this, +453 expression.expressions, +454 1 - expression.args.get("offset", 0), +455 ), +456 0, +457 ), +458 ) +459 return super().bracket_sql(expression) +460 +461 def struct_sql(self, expression: exp.Struct) -> str: +462 if any(isinstance(arg, self.KEY_VALUE_DEFINITIONS) for arg in expression.expressions): +463 self.unsupported("Struct with key-value definitions is unsupported.") +464 return self.function_fallback_sql(expression) 465 -466 def interval_sql(self, expression: exp.Interval) -> str: -467 unit = self.sql(expression, "unit") -468 if expression.this and unit.startswith("WEEK"): -469 return f"({expression.this.name} * INTERVAL '7' DAY)" -470 return super().interval_sql(expression) -471 -472 def transaction_sql(self, expression: exp.Transaction) -> str: -473 modes = expression.args.get("modes") -474 modes = f" {', '.join(modes)}" if modes else "" -475 return f"START TRANSACTION{modes}" -476 -477 def generateseries_sql(self, expression: exp.GenerateSeries) -> str: -478 start = expression.args["start"] -479 end = expression.args["end"] -480 step = expression.args.get("step") -481 -482 if isinstance(start, exp.Cast): -483 target_type = start.to -484 elif isinstance(end, exp.Cast): -485 target_type = end.to -486 else: -487 target_type = None -488 -489 if target_type and target_type.is_type("timestamp"): -490 if target_type is start.to: -491 end = exp.cast(end, target_type) -492 else: -493 start = exp.cast(start, target_type) -494 -495 return self.func("SEQUENCE", start, end, step) +466 return rename_func("ROW")(self, expression) +467 +468 def interval_sql(self, expression: exp.Interval) -> str: +469 unit = self.sql(expression, "unit") +470 if expression.this and unit.startswith("WEEK"): +471 return f"({expression.this.name} * INTERVAL '7' DAY)" +472 return super().interval_sql(expression) +473 +474 def transaction_sql(self, expression: exp.Transaction) -> str: +475 modes = expression.args.get("modes") +476 modes = f" {', '.join(modes)}" if modes else "" +477 return f"START TRANSACTION{modes}" +478 +479 def generateseries_sql(self, expression: exp.GenerateSeries) -> str: +480 start = expression.args["start"] +481 end = expression.args["end"] +482 step = expression.args.get("step") +483 +484 if isinstance(start, exp.Cast): +485 target_type = start.to +486 elif isinstance(end, exp.Cast): +487 target_type = end.to +488 else: +489 target_type = None +490 +491 if target_type and target_type.is_type("timestamp"): +492 if target_type is start.to: +493 end = exp.cast(end, target_type) +494 else: +495 start = exp.cast(start, target_type) 496 -497 def offset_limit_modifiers( -498 self, expression: exp.Expression, fetch: bool, limit: t.Optional[exp.Fetch | exp.Limit] -499 ) -> t.List[str]: -500 return [ -501 self.sql(expression, "offset"), -502 self.sql(limit), -503 ] -504 -505 def create_sql(self, expression: exp.Create) -> str: -506 """ -507 Presto doesn't support CREATE VIEW with expressions (ex: `CREATE VIEW x (cola)` then `(cola)` is the expression), -508 so we need to remove them -509 """ -510 kind = expression.args["kind"] -511 schema = expression.this -512 if kind == "VIEW" and schema.expressions: -513 expression.this.set("expressions", None) -514 return super().create_sql(expression) +497 return self.func("SEQUENCE", start, end, step) +498 +499 def offset_limit_modifiers( +500 self, expression: exp.Expression, fetch: bool, limit: t.Optional[exp.Fetch | exp.Limit] +501 ) -> t.List[str]: +502 return [ +503 self.sql(expression, "offset"), +504 self.sql(limit), +505 ] +506 +507 def create_sql(self, expression: exp.Create) -> str: +508 """ +509 Presto doesn't support CREATE VIEW with expressions (ex: `CREATE VIEW x (cola)` then `(cola)` is the expression), +510 so we need to remove them +511 """ +512 kind = expression.args["kind"] +513 schema = expression.this +514 if kind == "VIEW" and schema.expressions: +515 expression.this.set("expressions", None) +516 return super().create_sql(expression)
  • @@ -815,293 +820,295 @@ 226 } 227 228 class Parser(parser.Parser): -229 FUNCTIONS = { -230 **parser.Parser.FUNCTIONS, -231 "ARBITRARY": exp.AnyValue.from_arg_list, -232 "APPROX_DISTINCT": exp.ApproxDistinct.from_arg_list, -233 "APPROX_PERCENTILE": _approx_percentile, -234 "BITWISE_AND": binary_from_function(exp.BitwiseAnd), -235 "BITWISE_NOT": lambda args: exp.BitwiseNot(this=seq_get(args, 0)), -236 "BITWISE_OR": binary_from_function(exp.BitwiseOr), -237 "BITWISE_XOR": binary_from_function(exp.BitwiseXor), -238 "CARDINALITY": exp.ArraySize.from_arg_list, -239 "CONTAINS": exp.ArrayContains.from_arg_list, -240 "DATE_ADD": lambda args: exp.DateAdd( -241 this=seq_get(args, 2), expression=seq_get(args, 1), unit=seq_get(args, 0) -242 ), -243 "DATE_DIFF": lambda args: exp.DateDiff( -244 this=seq_get(args, 2), expression=seq_get(args, 1), unit=seq_get(args, 0) -245 ), -246 "DATE_FORMAT": format_time_lambda(exp.TimeToStr, "presto"), -247 "DATE_PARSE": format_time_lambda(exp.StrToTime, "presto"), -248 "DATE_TRUNC": date_trunc_to_time, -249 "ELEMENT_AT": lambda args: exp.Bracket( -250 this=seq_get(args, 0), expressions=[seq_get(args, 1)], offset=1, safe=True -251 ), -252 "FROM_HEX": exp.Unhex.from_arg_list, -253 "FROM_UNIXTIME": _from_unixtime, -254 "FROM_UTF8": lambda args: exp.Decode( -255 this=seq_get(args, 0), replace=seq_get(args, 1), charset=exp.Literal.string("utf-8") -256 ), -257 "NOW": exp.CurrentTimestamp.from_arg_list, -258 "REGEXP_EXTRACT": lambda args: exp.RegexpExtract( -259 this=seq_get(args, 0), expression=seq_get(args, 1), group=seq_get(args, 2) -260 ), -261 "REGEXP_REPLACE": lambda args: exp.RegexpReplace( -262 this=seq_get(args, 0), -263 expression=seq_get(args, 1), -264 replacement=seq_get(args, 2) or exp.Literal.string(""), -265 ), -266 "ROW": exp.Struct.from_arg_list, -267 "SEQUENCE": exp.GenerateSeries.from_arg_list, -268 "SET_AGG": exp.ArrayUniqueAgg.from_arg_list, -269 "SPLIT_TO_MAP": exp.StrToMap.from_arg_list, -270 "STRPOS": lambda args: exp.StrPosition( -271 this=seq_get(args, 0), substr=seq_get(args, 1), instance=seq_get(args, 2) -272 ), -273 "TO_CHAR": _parse_to_char, -274 "TO_HEX": exp.Hex.from_arg_list, -275 "TO_UNIXTIME": exp.TimeToUnix.from_arg_list, -276 "TO_UTF8": lambda args: exp.Encode( -277 this=seq_get(args, 0), charset=exp.Literal.string("utf-8") -278 ), -279 } -280 -281 FUNCTION_PARSERS = parser.Parser.FUNCTION_PARSERS.copy() -282 FUNCTION_PARSERS.pop("TRIM") -283 -284 class Generator(generator.Generator): -285 INTERVAL_ALLOWS_PLURAL_FORM = False -286 JOIN_HINTS = False -287 TABLE_HINTS = False -288 QUERY_HINTS = False -289 IS_BOOL_ALLOWED = False -290 TZ_TO_WITH_TIME_ZONE = True -291 NVL2_SUPPORTED = False -292 STRUCT_DELIMITER = ("(", ")") -293 LIMIT_ONLY_LITERALS = True -294 SUPPORTS_SINGLE_ARG_CONCAT = False -295 LIKE_PROPERTY_INSIDE_SCHEMA = True -296 MULTI_ARG_DISTINCT = False -297 -298 PROPERTIES_LOCATION = { -299 **generator.Generator.PROPERTIES_LOCATION, -300 exp.LocationProperty: exp.Properties.Location.UNSUPPORTED, -301 exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED, -302 } -303 -304 TYPE_MAPPING = { -305 **generator.Generator.TYPE_MAPPING, -306 exp.DataType.Type.INT: "INTEGER", -307 exp.DataType.Type.FLOAT: "REAL", -308 exp.DataType.Type.BINARY: "VARBINARY", -309 exp.DataType.Type.TEXT: "VARCHAR", -310 exp.DataType.Type.TIMETZ: "TIME", -311 exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP", -312 exp.DataType.Type.STRUCT: "ROW", -313 exp.DataType.Type.DATETIME: "TIMESTAMP", -314 exp.DataType.Type.DATETIME64: "TIMESTAMP", -315 } -316 -317 TRANSFORMS = { -318 **generator.Generator.TRANSFORMS, -319 exp.AnyValue: rename_func("ARBITRARY"), -320 exp.ApproxDistinct: _approx_distinct_sql, -321 exp.ApproxQuantile: rename_func("APPROX_PERCENTILE"), -322 exp.ArgMax: rename_func("MAX_BY"), -323 exp.ArgMin: rename_func("MIN_BY"), -324 exp.Array: lambda self, e: f"ARRAY[{self.expressions(e, flat=True)}]", -325 exp.ArrayConcat: rename_func("CONCAT"), -326 exp.ArrayContains: rename_func("CONTAINS"), -327 exp.ArraySize: rename_func("CARDINALITY"), -328 exp.ArrayUniqueAgg: rename_func("SET_AGG"), -329 exp.AtTimeZone: rename_func("AT_TIMEZONE"), -330 exp.BitwiseAnd: lambda self, -331 e: f"BITWISE_AND({self.sql(e, 'this')}, {self.sql(e, 'expression')})", -332 exp.BitwiseLeftShift: lambda self, -333 e: f"BITWISE_ARITHMETIC_SHIFT_LEFT({self.sql(e, 'this')}, {self.sql(e, 'expression')})", -334 exp.BitwiseNot: lambda self, e: f"BITWISE_NOT({self.sql(e, 'this')})", -335 exp.BitwiseOr: lambda self, -336 e: f"BITWISE_OR({self.sql(e, 'this')}, {self.sql(e, 'expression')})", -337 exp.BitwiseRightShift: lambda self, -338 e: f"BITWISE_ARITHMETIC_SHIFT_RIGHT({self.sql(e, 'this')}, {self.sql(e, 'expression')})", -339 exp.BitwiseXor: lambda self, -340 e: f"BITWISE_XOR({self.sql(e, 'this')}, {self.sql(e, 'expression')})", -341 exp.Cast: transforms.preprocess([transforms.epoch_cast_to_ts]), -342 exp.CurrentTimestamp: lambda *_: "CURRENT_TIMESTAMP", -343 exp.DateAdd: lambda self, e: self.func( -344 "DATE_ADD", -345 exp.Literal.string(e.text("unit") or "DAY"), -346 _to_int( -347 e.expression, -348 ), -349 e.this, -350 ), -351 exp.DateDiff: lambda self, e: self.func( -352 "DATE_DIFF", exp.Literal.string(e.text("unit") or "DAY"), e.expression, e.this -353 ), -354 exp.DateStrToDate: datestrtodate_sql, -355 exp.DateToDi: lambda self, -356 e: f"CAST(DATE_FORMAT({self.sql(e, 'this')}, {Presto.DATEINT_FORMAT}) AS INT)", -357 exp.DateSub: lambda self, e: self.func( -358 "DATE_ADD", -359 exp.Literal.string(e.text("unit") or "DAY"), -360 _to_int(e.expression * -1), -361 e.this, -362 ), -363 exp.Decode: lambda self, e: encode_decode_sql(self, e, "FROM_UTF8"), -364 exp.DiToDate: lambda self, -365 e: f"CAST(DATE_PARSE(CAST({self.sql(e, 'this')} AS VARCHAR), {Presto.DATEINT_FORMAT}) AS DATE)", -366 exp.Encode: lambda self, e: encode_decode_sql(self, e, "TO_UTF8"), -367 exp.FileFormatProperty: lambda self, e: f"FORMAT='{e.name.upper()}'", -368 exp.First: _first_last_sql, -369 exp.FirstValue: _first_last_sql, -370 exp.FromTimeZone: lambda self, -371 e: f"WITH_TIMEZONE({self.sql(e, 'this')}, {self.sql(e, 'zone')}) AT TIME ZONE 'UTC'", -372 exp.Group: transforms.preprocess([transforms.unalias_group]), -373 exp.GroupConcat: lambda self, e: self.func( -374 "ARRAY_JOIN", self.func("ARRAY_AGG", e.this), e.args.get("separator") -375 ), -376 exp.Hex: rename_func("TO_HEX"), -377 exp.If: if_sql(), -378 exp.ILike: no_ilike_sql, -379 exp.Initcap: _initcap_sql, -380 exp.ParseJSON: rename_func("JSON_PARSE"), -381 exp.Last: _first_last_sql, -382 exp.LastValue: _first_last_sql, -383 exp.LastDay: lambda self, e: self.func("LAST_DAY_OF_MONTH", e.this), -384 exp.Lateral: _explode_to_unnest_sql, -385 exp.Left: left_to_substring_sql, -386 exp.Levenshtein: rename_func("LEVENSHTEIN_DISTANCE"), -387 exp.LogicalAnd: rename_func("BOOL_AND"), -388 exp.LogicalOr: rename_func("BOOL_OR"), -389 exp.Pivot: no_pivot_sql, -390 exp.Quantile: _quantile_sql, -391 exp.RegexpExtract: regexp_extract_sql, -392 exp.Right: right_to_substring_sql, -393 exp.SafeDivide: no_safe_divide_sql, -394 exp.Schema: _schema_sql, -395 exp.SchemaCommentProperty: lambda self, e: self.naked_property(e), -396 exp.Select: transforms.preprocess( -397 [ -398 transforms.eliminate_qualify, -399 transforms.eliminate_distinct_on, -400 transforms.explode_to_unnest(1), -401 transforms.eliminate_semi_and_anti_joins, -402 ] -403 ), -404 exp.SortArray: _no_sort_array, -405 exp.StrPosition: rename_func("STRPOS"), -406 exp.StrToDate: lambda self, e: f"CAST({_str_to_time_sql(self, e)} AS DATE)", -407 exp.StrToMap: rename_func("SPLIT_TO_MAP"), -408 exp.StrToTime: _str_to_time_sql, -409 exp.StrToUnix: lambda self, -410 e: f"TO_UNIXTIME(DATE_PARSE({self.sql(e, 'this')}, {self.format_time(e)}))", -411 exp.StructExtract: struct_extract_sql, -412 exp.Table: transforms.preprocess([_unnest_sequence]), -413 exp.Timestamp: no_timestamp_sql, -414 exp.TimestampTrunc: timestamptrunc_sql, -415 exp.TimeStrToDate: timestrtotime_sql, -416 exp.TimeStrToTime: timestrtotime_sql, -417 exp.TimeStrToUnix: lambda self, -418 e: f"TO_UNIXTIME(DATE_PARSE({self.sql(e, 'this')}, {Presto.TIME_FORMAT}))", -419 exp.TimeToStr: lambda self, -420 e: f"DATE_FORMAT({self.sql(e, 'this')}, {self.format_time(e)})", -421 exp.TimeToUnix: rename_func("TO_UNIXTIME"), -422 exp.ToChar: lambda self, -423 e: f"DATE_FORMAT({self.sql(e, 'this')}, {self.format_time(e)})", -424 exp.TryCast: transforms.preprocess([transforms.epoch_cast_to_ts]), -425 exp.TsOrDiToDi: lambda self, -426 e: f"CAST(SUBSTR(REPLACE(CAST({self.sql(e, 'this')} AS VARCHAR), '-', ''), 1, 8) AS INT)", -427 exp.TsOrDsAdd: _ts_or_ds_add_sql, -428 exp.TsOrDsDiff: _ts_or_ds_diff_sql, -429 exp.TsOrDsToDate: _ts_or_ds_to_date_sql, -430 exp.Unhex: rename_func("FROM_HEX"), -431 exp.UnixToStr: lambda self, -432 e: f"DATE_FORMAT(FROM_UNIXTIME({self.sql(e, 'this')}), {self.format_time(e)})", -433 exp.UnixToTime: _unix_to_time_sql, -434 exp.UnixToTimeStr: lambda self, -435 e: f"CAST(FROM_UNIXTIME({self.sql(e, 'this')}) AS VARCHAR)", -436 exp.VariancePop: rename_func("VAR_POP"), -437 exp.With: transforms.preprocess([transforms.add_recursive_cte_column_names]), -438 exp.WithinGroup: transforms.preprocess( -439 [transforms.remove_within_group_for_percentiles] -440 ), -441 exp.Xor: bool_xor_sql, -442 } -443 -444 def bracket_sql(self, expression: exp.Bracket) -> str: -445 if expression.args.get("safe"): -446 return self.func( -447 "ELEMENT_AT", -448 expression.this, -449 seq_get( -450 apply_index_offset( -451 expression.this, -452 expression.expressions, -453 1 - expression.args.get("offset", 0), -454 ), -455 0, -456 ), -457 ) -458 return super().bracket_sql(expression) -459 -460 def struct_sql(self, expression: exp.Struct) -> str: -461 if any(isinstance(arg, self.KEY_VALUE_DEFINITIONS) for arg in expression.expressions): -462 self.unsupported("Struct with key-value definitions is unsupported.") -463 return self.function_fallback_sql(expression) -464 -465 return rename_func("ROW")(self, expression) +229 VALUES_FOLLOWED_BY_PAREN = False +230 +231 FUNCTIONS = { +232 **parser.Parser.FUNCTIONS, +233 "ARBITRARY": exp.AnyValue.from_arg_list, +234 "APPROX_DISTINCT": exp.ApproxDistinct.from_arg_list, +235 "APPROX_PERCENTILE": _approx_percentile, +236 "BITWISE_AND": binary_from_function(exp.BitwiseAnd), +237 "BITWISE_NOT": lambda args: exp.BitwiseNot(this=seq_get(args, 0)), +238 "BITWISE_OR": binary_from_function(exp.BitwiseOr), +239 "BITWISE_XOR": binary_from_function(exp.BitwiseXor), +240 "CARDINALITY": exp.ArraySize.from_arg_list, +241 "CONTAINS": exp.ArrayContains.from_arg_list, +242 "DATE_ADD": lambda args: exp.DateAdd( +243 this=seq_get(args, 2), expression=seq_get(args, 1), unit=seq_get(args, 0) +244 ), +245 "DATE_DIFF": lambda args: exp.DateDiff( +246 this=seq_get(args, 2), expression=seq_get(args, 1), unit=seq_get(args, 0) +247 ), +248 "DATE_FORMAT": format_time_lambda(exp.TimeToStr, "presto"), +249 "DATE_PARSE": format_time_lambda(exp.StrToTime, "presto"), +250 "DATE_TRUNC": date_trunc_to_time, +251 "ELEMENT_AT": lambda args: exp.Bracket( +252 this=seq_get(args, 0), expressions=[seq_get(args, 1)], offset=1, safe=True +253 ), +254 "FROM_HEX": exp.Unhex.from_arg_list, +255 "FROM_UNIXTIME": _from_unixtime, +256 "FROM_UTF8": lambda args: exp.Decode( +257 this=seq_get(args, 0), replace=seq_get(args, 1), charset=exp.Literal.string("utf-8") +258 ), +259 "NOW": exp.CurrentTimestamp.from_arg_list, +260 "REGEXP_EXTRACT": lambda args: exp.RegexpExtract( +261 this=seq_get(args, 0), expression=seq_get(args, 1), group=seq_get(args, 2) +262 ), +263 "REGEXP_REPLACE": lambda args: exp.RegexpReplace( +264 this=seq_get(args, 0), +265 expression=seq_get(args, 1), +266 replacement=seq_get(args, 2) or exp.Literal.string(""), +267 ), +268 "ROW": exp.Struct.from_arg_list, +269 "SEQUENCE": exp.GenerateSeries.from_arg_list, +270 "SET_AGG": exp.ArrayUniqueAgg.from_arg_list, +271 "SPLIT_TO_MAP": exp.StrToMap.from_arg_list, +272 "STRPOS": lambda args: exp.StrPosition( +273 this=seq_get(args, 0), substr=seq_get(args, 1), instance=seq_get(args, 2) +274 ), +275 "TO_CHAR": _parse_to_char, +276 "TO_HEX": exp.Hex.from_arg_list, +277 "TO_UNIXTIME": exp.TimeToUnix.from_arg_list, +278 "TO_UTF8": lambda args: exp.Encode( +279 this=seq_get(args, 0), charset=exp.Literal.string("utf-8") +280 ), +281 } +282 +283 FUNCTION_PARSERS = parser.Parser.FUNCTION_PARSERS.copy() +284 FUNCTION_PARSERS.pop("TRIM") +285 +286 class Generator(generator.Generator): +287 INTERVAL_ALLOWS_PLURAL_FORM = False +288 JOIN_HINTS = False +289 TABLE_HINTS = False +290 QUERY_HINTS = False +291 IS_BOOL_ALLOWED = False +292 TZ_TO_WITH_TIME_ZONE = True +293 NVL2_SUPPORTED = False +294 STRUCT_DELIMITER = ("(", ")") +295 LIMIT_ONLY_LITERALS = True +296 SUPPORTS_SINGLE_ARG_CONCAT = False +297 LIKE_PROPERTY_INSIDE_SCHEMA = True +298 MULTI_ARG_DISTINCT = False +299 +300 PROPERTIES_LOCATION = { +301 **generator.Generator.PROPERTIES_LOCATION, +302 exp.LocationProperty: exp.Properties.Location.UNSUPPORTED, +303 exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED, +304 } +305 +306 TYPE_MAPPING = { +307 **generator.Generator.TYPE_MAPPING, +308 exp.DataType.Type.INT: "INTEGER", +309 exp.DataType.Type.FLOAT: "REAL", +310 exp.DataType.Type.BINARY: "VARBINARY", +311 exp.DataType.Type.TEXT: "VARCHAR", +312 exp.DataType.Type.TIMETZ: "TIME", +313 exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP", +314 exp.DataType.Type.STRUCT: "ROW", +315 exp.DataType.Type.DATETIME: "TIMESTAMP", +316 exp.DataType.Type.DATETIME64: "TIMESTAMP", +317 } +318 +319 TRANSFORMS = { +320 **generator.Generator.TRANSFORMS, +321 exp.AnyValue: rename_func("ARBITRARY"), +322 exp.ApproxDistinct: _approx_distinct_sql, +323 exp.ApproxQuantile: rename_func("APPROX_PERCENTILE"), +324 exp.ArgMax: rename_func("MAX_BY"), +325 exp.ArgMin: rename_func("MIN_BY"), +326 exp.Array: lambda self, e: f"ARRAY[{self.expressions(e, flat=True)}]", +327 exp.ArrayConcat: rename_func("CONCAT"), +328 exp.ArrayContains: rename_func("CONTAINS"), +329 exp.ArraySize: rename_func("CARDINALITY"), +330 exp.ArrayUniqueAgg: rename_func("SET_AGG"), +331 exp.AtTimeZone: rename_func("AT_TIMEZONE"), +332 exp.BitwiseAnd: lambda self, +333 e: f"BITWISE_AND({self.sql(e, 'this')}, {self.sql(e, 'expression')})", +334 exp.BitwiseLeftShift: lambda self, +335 e: f"BITWISE_ARITHMETIC_SHIFT_LEFT({self.sql(e, 'this')}, {self.sql(e, 'expression')})", +336 exp.BitwiseNot: lambda self, e: f"BITWISE_NOT({self.sql(e, 'this')})", +337 exp.BitwiseOr: lambda self, +338 e: f"BITWISE_OR({self.sql(e, 'this')}, {self.sql(e, 'expression')})", +339 exp.BitwiseRightShift: lambda self, +340 e: f"BITWISE_ARITHMETIC_SHIFT_RIGHT({self.sql(e, 'this')}, {self.sql(e, 'expression')})", +341 exp.BitwiseXor: lambda self, +342 e: f"BITWISE_XOR({self.sql(e, 'this')}, {self.sql(e, 'expression')})", +343 exp.Cast: transforms.preprocess([transforms.epoch_cast_to_ts]), +344 exp.CurrentTimestamp: lambda *_: "CURRENT_TIMESTAMP", +345 exp.DateAdd: lambda self, e: self.func( +346 "DATE_ADD", +347 exp.Literal.string(e.text("unit") or "DAY"), +348 _to_int( +349 e.expression, +350 ), +351 e.this, +352 ), +353 exp.DateDiff: lambda self, e: self.func( +354 "DATE_DIFF", exp.Literal.string(e.text("unit") or "DAY"), e.expression, e.this +355 ), +356 exp.DateStrToDate: datestrtodate_sql, +357 exp.DateToDi: lambda self, +358 e: f"CAST(DATE_FORMAT({self.sql(e, 'this')}, {Presto.DATEINT_FORMAT}) AS INT)", +359 exp.DateSub: lambda self, e: self.func( +360 "DATE_ADD", +361 exp.Literal.string(e.text("unit") or "DAY"), +362 _to_int(e.expression * -1), +363 e.this, +364 ), +365 exp.Decode: lambda self, e: encode_decode_sql(self, e, "FROM_UTF8"), +366 exp.DiToDate: lambda self, +367 e: f"CAST(DATE_PARSE(CAST({self.sql(e, 'this')} AS VARCHAR), {Presto.DATEINT_FORMAT}) AS DATE)", +368 exp.Encode: lambda self, e: encode_decode_sql(self, e, "TO_UTF8"), +369 exp.FileFormatProperty: lambda self, e: f"FORMAT='{e.name.upper()}'", +370 exp.First: _first_last_sql, +371 exp.FirstValue: _first_last_sql, +372 exp.FromTimeZone: lambda self, +373 e: f"WITH_TIMEZONE({self.sql(e, 'this')}, {self.sql(e, 'zone')}) AT TIME ZONE 'UTC'", +374 exp.Group: transforms.preprocess([transforms.unalias_group]), +375 exp.GroupConcat: lambda self, e: self.func( +376 "ARRAY_JOIN", self.func("ARRAY_AGG", e.this), e.args.get("separator") +377 ), +378 exp.Hex: rename_func("TO_HEX"), +379 exp.If: if_sql(), +380 exp.ILike: no_ilike_sql, +381 exp.Initcap: _initcap_sql, +382 exp.ParseJSON: rename_func("JSON_PARSE"), +383 exp.Last: _first_last_sql, +384 exp.LastValue: _first_last_sql, +385 exp.LastDay: lambda self, e: self.func("LAST_DAY_OF_MONTH", e.this), +386 exp.Lateral: _explode_to_unnest_sql, +387 exp.Left: left_to_substring_sql, +388 exp.Levenshtein: rename_func("LEVENSHTEIN_DISTANCE"), +389 exp.LogicalAnd: rename_func("BOOL_AND"), +390 exp.LogicalOr: rename_func("BOOL_OR"), +391 exp.Pivot: no_pivot_sql, +392 exp.Quantile: _quantile_sql, +393 exp.RegexpExtract: regexp_extract_sql, +394 exp.Right: right_to_substring_sql, +395 exp.SafeDivide: no_safe_divide_sql, +396 exp.Schema: _schema_sql, +397 exp.SchemaCommentProperty: lambda self, e: self.naked_property(e), +398 exp.Select: transforms.preprocess( +399 [ +400 transforms.eliminate_qualify, +401 transforms.eliminate_distinct_on, +402 transforms.explode_to_unnest(1), +403 transforms.eliminate_semi_and_anti_joins, +404 ] +405 ), +406 exp.SortArray: _no_sort_array, +407 exp.StrPosition: rename_func("STRPOS"), +408 exp.StrToDate: lambda self, e: f"CAST({_str_to_time_sql(self, e)} AS DATE)", +409 exp.StrToMap: rename_func("SPLIT_TO_MAP"), +410 exp.StrToTime: _str_to_time_sql, +411 exp.StrToUnix: lambda self, +412 e: f"TO_UNIXTIME(DATE_PARSE({self.sql(e, 'this')}, {self.format_time(e)}))", +413 exp.StructExtract: struct_extract_sql, +414 exp.Table: transforms.preprocess([_unnest_sequence]), +415 exp.Timestamp: no_timestamp_sql, +416 exp.TimestampTrunc: timestamptrunc_sql, +417 exp.TimeStrToDate: timestrtotime_sql, +418 exp.TimeStrToTime: timestrtotime_sql, +419 exp.TimeStrToUnix: lambda self, +420 e: f"TO_UNIXTIME(DATE_PARSE({self.sql(e, 'this')}, {Presto.TIME_FORMAT}))", +421 exp.TimeToStr: lambda self, +422 e: f"DATE_FORMAT({self.sql(e, 'this')}, {self.format_time(e)})", +423 exp.TimeToUnix: rename_func("TO_UNIXTIME"), +424 exp.ToChar: lambda self, +425 e: f"DATE_FORMAT({self.sql(e, 'this')}, {self.format_time(e)})", +426 exp.TryCast: transforms.preprocess([transforms.epoch_cast_to_ts]), +427 exp.TsOrDiToDi: lambda self, +428 e: f"CAST(SUBSTR(REPLACE(CAST({self.sql(e, 'this')} AS VARCHAR), '-', ''), 1, 8) AS INT)", +429 exp.TsOrDsAdd: _ts_or_ds_add_sql, +430 exp.TsOrDsDiff: _ts_or_ds_diff_sql, +431 exp.TsOrDsToDate: _ts_or_ds_to_date_sql, +432 exp.Unhex: rename_func("FROM_HEX"), +433 exp.UnixToStr: lambda self, +434 e: f"DATE_FORMAT(FROM_UNIXTIME({self.sql(e, 'this')}), {self.format_time(e)})", +435 exp.UnixToTime: _unix_to_time_sql, +436 exp.UnixToTimeStr: lambda self, +437 e: f"CAST(FROM_UNIXTIME({self.sql(e, 'this')}) AS VARCHAR)", +438 exp.VariancePop: rename_func("VAR_POP"), +439 exp.With: transforms.preprocess([transforms.add_recursive_cte_column_names]), +440 exp.WithinGroup: transforms.preprocess( +441 [transforms.remove_within_group_for_percentiles] +442 ), +443 exp.Xor: bool_xor_sql, +444 } +445 +446 def bracket_sql(self, expression: exp.Bracket) -> str: +447 if expression.args.get("safe"): +448 return self.func( +449 "ELEMENT_AT", +450 expression.this, +451 seq_get( +452 apply_index_offset( +453 expression.this, +454 expression.expressions, +455 1 - expression.args.get("offset", 0), +456 ), +457 0, +458 ), +459 ) +460 return super().bracket_sql(expression) +461 +462 def struct_sql(self, expression: exp.Struct) -> str: +463 if any(isinstance(arg, self.KEY_VALUE_DEFINITIONS) for arg in expression.expressions): +464 self.unsupported("Struct with key-value definitions is unsupported.") +465 return self.function_fallback_sql(expression) 466 -467 def interval_sql(self, expression: exp.Interval) -> str: -468 unit = self.sql(expression, "unit") -469 if expression.this and unit.startswith("WEEK"): -470 return f"({expression.this.name} * INTERVAL '7' DAY)" -471 return super().interval_sql(expression) -472 -473 def transaction_sql(self, expression: exp.Transaction) -> str: -474 modes = expression.args.get("modes") -475 modes = f" {', '.join(modes)}" if modes else "" -476 return f"START TRANSACTION{modes}" -477 -478 def generateseries_sql(self, expression: exp.GenerateSeries) -> str: -479 start = expression.args["start"] -480 end = expression.args["end"] -481 step = expression.args.get("step") -482 -483 if isinstance(start, exp.Cast): -484 target_type = start.to -485 elif isinstance(end, exp.Cast): -486 target_type = end.to -487 else: -488 target_type = None -489 -490 if target_type and target_type.is_type("timestamp"): -491 if target_type is start.to: -492 end = exp.cast(end, target_type) -493 else: -494 start = exp.cast(start, target_type) -495 -496 return self.func("SEQUENCE", start, end, step) +467 return rename_func("ROW")(self, expression) +468 +469 def interval_sql(self, expression: exp.Interval) -> str: +470 unit = self.sql(expression, "unit") +471 if expression.this and unit.startswith("WEEK"): +472 return f"({expression.this.name} * INTERVAL '7' DAY)" +473 return super().interval_sql(expression) +474 +475 def transaction_sql(self, expression: exp.Transaction) -> str: +476 modes = expression.args.get("modes") +477 modes = f" {', '.join(modes)}" if modes else "" +478 return f"START TRANSACTION{modes}" +479 +480 def generateseries_sql(self, expression: exp.GenerateSeries) -> str: +481 start = expression.args["start"] +482 end = expression.args["end"] +483 step = expression.args.get("step") +484 +485 if isinstance(start, exp.Cast): +486 target_type = start.to +487 elif isinstance(end, exp.Cast): +488 target_type = end.to +489 else: +490 target_type = None +491 +492 if target_type and target_type.is_type("timestamp"): +493 if target_type is start.to: +494 end = exp.cast(end, target_type) +495 else: +496 start = exp.cast(start, target_type) 497 -498 def offset_limit_modifiers( -499 self, expression: exp.Expression, fetch: bool, limit: t.Optional[exp.Fetch | exp.Limit] -500 ) -> t.List[str]: -501 return [ -502 self.sql(expression, "offset"), -503 self.sql(limit), -504 ] -505 -506 def create_sql(self, expression: exp.Create) -> str: -507 """ -508 Presto doesn't support CREATE VIEW with expressions (ex: `CREATE VIEW x (cola)` then `(cola)` is the expression), -509 so we need to remove them -510 """ -511 kind = expression.args["kind"] -512 schema = expression.this -513 if kind == "VIEW" and schema.expressions: -514 expression.this.set("expressions", None) -515 return super().create_sql(expression) +498 return self.func("SEQUENCE", start, end, step) +499 +500 def offset_limit_modifiers( +501 self, expression: exp.Expression, fetch: bool, limit: t.Optional[exp.Fetch | exp.Limit] +502 ) -> t.List[str]: +503 return [ +504 self.sql(expression, "offset"), +505 self.sql(limit), +506 ] +507 +508 def create_sql(self, expression: exp.Create) -> str: +509 """ +510 Presto doesn't support CREATE VIEW with expressions (ex: `CREATE VIEW x (cola)` then `(cola)` is the expression), +511 so we need to remove them +512 """ +513 kind = expression.args["kind"] +514 schema = expression.this +515 if kind == "VIEW" and schema.expressions: +516 expression.this.set("expressions", None) +517 return super().create_sql(expression)
    @@ -1624,60 +1631,62 @@ True means a / b is integer division if both a and
    228    class Parser(parser.Parser):
    -229        FUNCTIONS = {
    -230            **parser.Parser.FUNCTIONS,
    -231            "ARBITRARY": exp.AnyValue.from_arg_list,
    -232            "APPROX_DISTINCT": exp.ApproxDistinct.from_arg_list,
    -233            "APPROX_PERCENTILE": _approx_percentile,
    -234            "BITWISE_AND": binary_from_function(exp.BitwiseAnd),
    -235            "BITWISE_NOT": lambda args: exp.BitwiseNot(this=seq_get(args, 0)),
    -236            "BITWISE_OR": binary_from_function(exp.BitwiseOr),
    -237            "BITWISE_XOR": binary_from_function(exp.BitwiseXor),
    -238            "CARDINALITY": exp.ArraySize.from_arg_list,
    -239            "CONTAINS": exp.ArrayContains.from_arg_list,
    -240            "DATE_ADD": lambda args: exp.DateAdd(
    -241                this=seq_get(args, 2), expression=seq_get(args, 1), unit=seq_get(args, 0)
    -242            ),
    -243            "DATE_DIFF": lambda args: exp.DateDiff(
    -244                this=seq_get(args, 2), expression=seq_get(args, 1), unit=seq_get(args, 0)
    -245            ),
    -246            "DATE_FORMAT": format_time_lambda(exp.TimeToStr, "presto"),
    -247            "DATE_PARSE": format_time_lambda(exp.StrToTime, "presto"),
    -248            "DATE_TRUNC": date_trunc_to_time,
    -249            "ELEMENT_AT": lambda args: exp.Bracket(
    -250                this=seq_get(args, 0), expressions=[seq_get(args, 1)], offset=1, safe=True
    -251            ),
    -252            "FROM_HEX": exp.Unhex.from_arg_list,
    -253            "FROM_UNIXTIME": _from_unixtime,
    -254            "FROM_UTF8": lambda args: exp.Decode(
    -255                this=seq_get(args, 0), replace=seq_get(args, 1), charset=exp.Literal.string("utf-8")
    -256            ),
    -257            "NOW": exp.CurrentTimestamp.from_arg_list,
    -258            "REGEXP_EXTRACT": lambda args: exp.RegexpExtract(
    -259                this=seq_get(args, 0), expression=seq_get(args, 1), group=seq_get(args, 2)
    -260            ),
    -261            "REGEXP_REPLACE": lambda args: exp.RegexpReplace(
    -262                this=seq_get(args, 0),
    -263                expression=seq_get(args, 1),
    -264                replacement=seq_get(args, 2) or exp.Literal.string(""),
    -265            ),
    -266            "ROW": exp.Struct.from_arg_list,
    -267            "SEQUENCE": exp.GenerateSeries.from_arg_list,
    -268            "SET_AGG": exp.ArrayUniqueAgg.from_arg_list,
    -269            "SPLIT_TO_MAP": exp.StrToMap.from_arg_list,
    -270            "STRPOS": lambda args: exp.StrPosition(
    -271                this=seq_get(args, 0), substr=seq_get(args, 1), instance=seq_get(args, 2)
    -272            ),
    -273            "TO_CHAR": _parse_to_char,
    -274            "TO_HEX": exp.Hex.from_arg_list,
    -275            "TO_UNIXTIME": exp.TimeToUnix.from_arg_list,
    -276            "TO_UTF8": lambda args: exp.Encode(
    -277                this=seq_get(args, 0), charset=exp.Literal.string("utf-8")
    -278            ),
    -279        }
    -280
    -281        FUNCTION_PARSERS = parser.Parser.FUNCTION_PARSERS.copy()
    -282        FUNCTION_PARSERS.pop("TRIM")
    +229        VALUES_FOLLOWED_BY_PAREN = False
    +230
    +231        FUNCTIONS = {
    +232            **parser.Parser.FUNCTIONS,
    +233            "ARBITRARY": exp.AnyValue.from_arg_list,
    +234            "APPROX_DISTINCT": exp.ApproxDistinct.from_arg_list,
    +235            "APPROX_PERCENTILE": _approx_percentile,
    +236            "BITWISE_AND": binary_from_function(exp.BitwiseAnd),
    +237            "BITWISE_NOT": lambda args: exp.BitwiseNot(this=seq_get(args, 0)),
    +238            "BITWISE_OR": binary_from_function(exp.BitwiseOr),
    +239            "BITWISE_XOR": binary_from_function(exp.BitwiseXor),
    +240            "CARDINALITY": exp.ArraySize.from_arg_list,
    +241            "CONTAINS": exp.ArrayContains.from_arg_list,
    +242            "DATE_ADD": lambda args: exp.DateAdd(
    +243                this=seq_get(args, 2), expression=seq_get(args, 1), unit=seq_get(args, 0)
    +244            ),
    +245            "DATE_DIFF": lambda args: exp.DateDiff(
    +246                this=seq_get(args, 2), expression=seq_get(args, 1), unit=seq_get(args, 0)
    +247            ),
    +248            "DATE_FORMAT": format_time_lambda(exp.TimeToStr, "presto"),
    +249            "DATE_PARSE": format_time_lambda(exp.StrToTime, "presto"),
    +250            "DATE_TRUNC": date_trunc_to_time,
    +251            "ELEMENT_AT": lambda args: exp.Bracket(
    +252                this=seq_get(args, 0), expressions=[seq_get(args, 1)], offset=1, safe=True
    +253            ),
    +254            "FROM_HEX": exp.Unhex.from_arg_list,
    +255            "FROM_UNIXTIME": _from_unixtime,
    +256            "FROM_UTF8": lambda args: exp.Decode(
    +257                this=seq_get(args, 0), replace=seq_get(args, 1), charset=exp.Literal.string("utf-8")
    +258            ),
    +259            "NOW": exp.CurrentTimestamp.from_arg_list,
    +260            "REGEXP_EXTRACT": lambda args: exp.RegexpExtract(
    +261                this=seq_get(args, 0), expression=seq_get(args, 1), group=seq_get(args, 2)
    +262            ),
    +263            "REGEXP_REPLACE": lambda args: exp.RegexpReplace(
    +264                this=seq_get(args, 0),
    +265                expression=seq_get(args, 1),
    +266                replacement=seq_get(args, 2) or exp.Literal.string(""),
    +267            ),
    +268            "ROW": exp.Struct.from_arg_list,
    +269            "SEQUENCE": exp.GenerateSeries.from_arg_list,
    +270            "SET_AGG": exp.ArrayUniqueAgg.from_arg_list,
    +271            "SPLIT_TO_MAP": exp.StrToMap.from_arg_list,
    +272            "STRPOS": lambda args: exp.StrPosition(
    +273                this=seq_get(args, 0), substr=seq_get(args, 1), instance=seq_get(args, 2)
    +274            ),
    +275            "TO_CHAR": _parse_to_char,
    +276            "TO_HEX": exp.Hex.from_arg_list,
    +277            "TO_UNIXTIME": exp.TimeToUnix.from_arg_list,
    +278            "TO_UTF8": lambda args: exp.Encode(
    +279                this=seq_get(args, 0), charset=exp.Literal.string("utf-8")
    +280            ),
    +281        }
    +282
    +283        FUNCTION_PARSERS = parser.Parser.FUNCTION_PARSERS.copy()
    +284        FUNCTION_PARSERS.pop("TRIM")
     
    @@ -1698,6 +1707,18 @@ Default: 3
    +
    +
    + VALUES_FOLLOWED_BY_PAREN = +False + + +
    + + + + +
    FUNCTIONS = @@ -1728,7 +1749,7 @@ Default: 3
    TABLE_ALIAS_TOKENS = - {<TokenType.UPDATE: 'UPDATE'>, <TokenType.DEFAULT: 'DEFAULT'>, <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>, <TokenType.UNIQUEIDENTIFIER: 'UNIQUEIDENTIFIER'>, <TokenType.PIVOT: 'PIVOT'>, <TokenType.PARTITION: 'PARTITION'>, <TokenType.TSTZRANGE: 'TSTZRANGE'>, <TokenType.VIEW: 'VIEW'>, <TokenType.COMMENT: 'COMMENT'>, <TokenType.TEXT: 'TEXT'>, <TokenType.UINT: 'UINT'>, <TokenType.DELETE: 'DELETE'>, <TokenType.TOP: 'TOP'>, <TokenType.LONGBLOB: 'LONGBLOB'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.DATETIME: 'DATETIME'>, <TokenType.BIGINT: 'BIGINT'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, <TokenType.KEEP: 'KEEP'>, <TokenType.SMALLSERIAL: 'SMALLSERIAL'>, <TokenType.NEXT: 'NEXT'>, <TokenType.BPCHAR: 'BPCHAR'>, <TokenType.TABLE: 'TABLE'>, <TokenType.DATETIME64: 'DATETIME64'>, <TokenType.VAR: 'VAR'>, <TokenType.BIGSERIAL: 'BIGSERIAL'>, <TokenType.ANY: 'ANY'>, <TokenType.UMEDIUMINT: 'UMEDIUMINT'>, <TokenType.FILTER: 'FILTER'>, <TokenType.TIMESTAMP_MS: 'TIMESTAMP_MS'>, <TokenType.SERIAL: 'SERIAL'>, <TokenType.END: 'END'>, <TokenType.ALL: 'ALL'>, <TokenType.TIMETZ: 'TIMETZ'>, <TokenType.TINYBLOB: 'TINYBLOB'>, <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, <TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>, <TokenType.SMALLINT: 'SMALLINT'>, <TokenType.TRUE: 'TRUE'>, <TokenType.FUNCTION: 'FUNCTION'>, <TokenType.IPPREFIX: 'IPPREFIX'>, <TokenType.UNKNOWN: 'UNKNOWN'>, <TokenType.INTERVAL: 'INTERVAL'>, <TokenType.NESTED: 'NESTED'>, <TokenType.NUMRANGE: 'NUMRANGE'>, <TokenType.JSON: 'JSON'>, <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, <TokenType.ANTI: 'ANTI'>, <TokenType.JSONB: 'JSONB'>, <TokenType.ROW: 'ROW'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, <TokenType.ENUM: 'ENUM'>, <TokenType.SMALLMONEY: 'SMALLMONEY'>, <TokenType.REFERENCES: 'REFERENCES'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.EXISTS: 'EXISTS'>, <TokenType.SEMI: 'SEMI'>, <TokenType.RANGE: 'RANGE'>, <TokenType.SET: 'SET'>, <TokenType.IS: 'IS'>, <TokenType.HLLSKETCH: 'HLLSKETCH'>, <TokenType.BIT: 'BIT'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.TSRANGE: 'TSRANGE'>, <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, <TokenType.INT256: 'INT256'>, <TokenType.FALSE: 'FALSE'>, <TokenType.CHAR: 'CHAR'>, <TokenType.ASC: 'ASC'>, <TokenType.VARBINARY: 'VARBINARY'>, <TokenType.DOUBLE: 'DOUBLE'>, <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, <TokenType.ORDINALITY: 'ORDINALITY'>, <TokenType.ESCAPE: 'ESCAPE'>, <TokenType.ISNULL: 'ISNULL'>, <TokenType.SHOW: 'SHOW'>, <TokenType.UNPIVOT: 'UNPIVOT'>, <TokenType.DECIMAL: 'DECIMAL'>, <TokenType.OVERWRITE: 'OVERWRITE'>, <TokenType.PERCENT: 'PERCENT'>, <TokenType.SCHEMA: 'SCHEMA'>, <TokenType.NULL: 'NULL'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, <TokenType.RECURSIVE: 'RECURSIVE'>, <TokenType.MEDIUMINT: 'MEDIUMINT'>, <TokenType.UDECIMAL: 'UDECIMAL'>, <TokenType.COLUMN: 'COLUMN'>, <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, <TokenType.DIV: 'DIV'>, <TokenType.FIXEDSTRING: 'FIXEDSTRING'>, <TokenType.USMALLINT: 'USMALLINT'>, <TokenType.BOOLEAN: 'BOOLEAN'>, <TokenType.CASE: 'CASE'>, <TokenType.ENUM16: 'ENUM16'>, <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>, <TokenType.INT8RANGE: 'INT8RANGE'>, <TokenType.REPLACE: 'REPLACE'>, <TokenType.TINYTEXT: 'TINYTEXT'>, <TokenType.FIRST: 'FIRST'>, <TokenType.YEAR: 'YEAR'>, <TokenType.UINT256: 'UINT256'>, <TokenType.MODEL: 'MODEL'>, <TokenType.UINT128: 'UINT128'>, <TokenType.OVERLAPS: 'OVERLAPS'>, <TokenType.TIMESTAMP_NS: 'TIMESTAMP_NS'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.SIMPLEAGGREGATEFUNCTION: 'SIMPLEAGGREGATEFUNCTION'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, <TokenType.PRAGMA: 'PRAGMA'>, <TokenType.PROCEDURE: 'PROCEDURE'>, <TokenType.IPADDRESS: 'IPADDRESS'>, <TokenType.NVARCHAR: 'NVARCHAR'>, <TokenType.LONGTEXT: 'LONGTEXT'>, <TokenType.BINARY: 'BINARY'>, <TokenType.DICTIONARY: 'DICTIONARY'>, <TokenType.USE: 'USE'>, <TokenType.TIME: 'TIME'>, <TokenType.COMMIT: 'COMMIT'>, <TokenType.MONEY: 'MONEY'>, <TokenType.LOAD: 'LOAD'>, <TokenType.CACHE: 'CACHE'>, <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, <TokenType.IMAGE: 'IMAGE'>, <TokenType.IPV6: 'IPV6'>, <TokenType.INT: 'INT'>, <TokenType.ROWVERSION: 'ROWVERSION'>, <TokenType.VARIANT: 'VARIANT'>, <TokenType.BEGIN: 'BEGIN'>, <TokenType.UTINYINT: 'UTINYINT'>, <TokenType.VOLATILE: 'VOLATILE'>, <TokenType.SOME: 'SOME'>, <TokenType.SUPER: 'SUPER'>, <TokenType.OPERATOR: 'OPERATOR'>, <TokenType.DATABASE: 'DATABASE'>, <TokenType.KILL: 'KILL'>, <TokenType.DATE: 'DATE'>, <TokenType.DESC: 'DESC'>, <TokenType.HSTORE: 'HSTORE'>, <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, <TokenType.MAP: 'MAP'>, <TokenType.TINYINT: 'TINYINT'>, <TokenType.IPV4: 'IPV4'>, <TokenType.AGGREGATEFUNCTION: 'AGGREGATEFUNCTION'>, <TokenType.INT4RANGE: 'INT4RANGE'>, <TokenType.ENUM8: 'ENUM8'>, <TokenType.UUID: 'UUID'>, <TokenType.FORMAT: 'FORMAT'>, <TokenType.NCHAR: 'NCHAR'>, <TokenType.OBJECT: 'OBJECT'>, <TokenType.DESCRIBE: 'DESCRIBE'>, <TokenType.GEOMETRY: 'GEOMETRY'>, <TokenType.UNIQUE: 'UNIQUE'>, <TokenType.DATERANGE: 'DATERANGE'>, <TokenType.INDEX: 'INDEX'>, <TokenType.REFRESH: 'REFRESH'>, <TokenType.FLOAT: 'FLOAT'>, <TokenType.ROWS: 'ROWS'>, <TokenType.VARCHAR: 'VARCHAR'>, <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, <TokenType.INET: 'INET'>, <TokenType.FINAL: 'FINAL'>, <TokenType.SETTINGS: 'SETTINGS'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.COLLATE: 'COLLATE'>, <TokenType.TEMPORARY: 'TEMPORARY'>, <TokenType.TIMESTAMP_S: 'TIMESTAMP_S'>, <TokenType.UBIGINT: 'UBIGINT'>, <TokenType.USERDEFINED: 'USERDEFINED'>, <TokenType.DATE32: 'DATE32'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, <TokenType.INT128: 'INT128'>, <TokenType.MERGE: 'MERGE'>, <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, <TokenType.XML: 'XML'>, <TokenType.CONSTRAINT: 'CONSTRAINT'>, <TokenType.STRUCT: 'STRUCT'>} + {<TokenType.BIT: 'BIT'>, <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, <TokenType.TEMPORARY: 'TEMPORARY'>, <TokenType.NVARCHAR: 'NVARCHAR'>, <TokenType.OBJECT: 'OBJECT'>, <TokenType.FIRST: 'FIRST'>, <TokenType.RANGE: 'RANGE'>, <TokenType.TIME: 'TIME'>, <TokenType.INT: 'INT'>, <TokenType.UMEDIUMINT: 'UMEDIUMINT'>, <TokenType.UTINYINT: 'UTINYINT'>, <TokenType.IS: 'IS'>, <TokenType.XML: 'XML'>, <TokenType.BIGINT: 'BIGINT'>, <TokenType.VAR: 'VAR'>, <TokenType.DOUBLE: 'DOUBLE'>, <TokenType.ROWS: 'ROWS'>, <TokenType.MERGE: 'MERGE'>, <TokenType.ANY: 'ANY'>, <TokenType.DATERANGE: 'DATERANGE'>, <TokenType.UDECIMAL: 'UDECIMAL'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>, <TokenType.REFERENCES: 'REFERENCES'>, <TokenType.PERCENT: 'PERCENT'>, <TokenType.CACHE: 'CACHE'>, <TokenType.STRUCT: 'STRUCT'>, <TokenType.JSONB: 'JSONB'>, <TokenType.COMMIT: 'COMMIT'>, <TokenType.ENUM: 'ENUM'>, <TokenType.SETTINGS: 'SETTINGS'>, <TokenType.ORDINALITY: 'ORDINALITY'>, <TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>, <TokenType.INDEX: 'INDEX'>, <TokenType.TIMESTAMP_NS: 'TIMESTAMP_NS'>, <TokenType.FINAL: 'FINAL'>, <TokenType.VOLATILE: 'VOLATILE'>, <TokenType.DICTIONARY: 'DICTIONARY'>, <TokenType.USE: 'USE'>, <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, <TokenType.FUNCTION: 'FUNCTION'>, <TokenType.LONGTEXT: 'LONGTEXT'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.FIXEDSTRING: 'FIXEDSTRING'>, <TokenType.IPV6: 'IPV6'>, <TokenType.DATABASE: 'DATABASE'>, <TokenType.SMALLINT: 'SMALLINT'>, <TokenType.ASC: 'ASC'>, <TokenType.RECURSIVE: 'RECURSIVE'>, <TokenType.ISNULL: 'ISNULL'>, <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, <TokenType.BINARY: 'BINARY'>, <TokenType.UNIQUEIDENTIFIER: 'UNIQUEIDENTIFIER'>, <TokenType.HSTORE: 'HSTORE'>, <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, <TokenType.DATE: 'DATE'>, <TokenType.SUPER: 'SUPER'>, <TokenType.TOP: 'TOP'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.VARCHAR: 'VARCHAR'>, <TokenType.FILTER: 'FILTER'>, <TokenType.TEXT: 'TEXT'>, <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, <TokenType.TRUE: 'TRUE'>, <TokenType.ESCAPE: 'ESCAPE'>, <TokenType.SHOW: 'SHOW'>, <TokenType.TSTZRANGE: 'TSTZRANGE'>, <TokenType.SET: 'SET'>, <TokenType.NESTED: 'NESTED'>, <TokenType.INTERVAL: 'INTERVAL'>, <TokenType.BOOLEAN: 'BOOLEAN'>, <TokenType.VARIANT: 'VARIANT'>, <TokenType.IPADDRESS: 'IPADDRESS'>, <TokenType.DESCRIBE: 'DESCRIBE'>, <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>, <TokenType.MONEY: 'MONEY'>, <TokenType.ENUM8: 'ENUM8'>, <TokenType.IPPREFIX: 'IPPREFIX'>, <TokenType.NULL: 'NULL'>, <TokenType.COLLATE: 'COLLATE'>, <TokenType.NEXT: 'NEXT'>, <TokenType.TSRANGE: 'TSRANGE'>, <TokenType.PRAGMA: 'PRAGMA'>, <TokenType.ALL: 'ALL'>, <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, <TokenType.ROW: 'ROW'>, <TokenType.UBIGINT: 'UBIGINT'>, <TokenType.TABLE: 'TABLE'>, <TokenType.SERIAL: 'SERIAL'>, <TokenType.CASE: 'CASE'>, <TokenType.BPCHAR: 'BPCHAR'>, <TokenType.INT8RANGE: 'INT8RANGE'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.FLOAT: 'FLOAT'>, <TokenType.MODEL: 'MODEL'>, <TokenType.YEAR: 'YEAR'>, <TokenType.IPV4: 'IPV4'>, <TokenType.UNKNOWN: 'UNKNOWN'>, <TokenType.SCHEMA: 'SCHEMA'>, <TokenType.INT256: 'INT256'>, <TokenType.UNPIVOT: 'UNPIVOT'>, <TokenType.DECIMAL: 'DECIMAL'>, <TokenType.TINYBLOB: 'TINYBLOB'>, <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, <TokenType.TIMESTAMP_S: 'TIMESTAMP_S'>, <TokenType.INET: 'INET'>, <TokenType.CONSTRAINT: 'CONSTRAINT'>, <TokenType.ROWVERSION: 'ROWVERSION'>, <TokenType.TINYTEXT: 'TINYTEXT'>, <TokenType.MAP: 'MAP'>, <TokenType.FORMAT: 'FORMAT'>, <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, <TokenType.PIVOT: 'PIVOT'>, <TokenType.SIMPLEAGGREGATEFUNCTION: 'SIMPLEAGGREGATEFUNCTION'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.REPLACE: 'REPLACE'>, <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, <TokenType.USERDEFINED: 'USERDEFINED'>, <TokenType.PROCEDURE: 'PROCEDURE'>, <TokenType.PARTITION: 'PARTITION'>, <TokenType.SMALLMONEY: 'SMALLMONEY'>, <TokenType.EXISTS: 'EXISTS'>, <TokenType.UINT: 'UINT'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.UINT256: 'UINT256'>, <TokenType.OPERATOR: 'OPERATOR'>, <TokenType.CHAR: 'CHAR'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, <TokenType.DIV: 'DIV'>, <TokenType.GEOMETRY: 'GEOMETRY'>, <TokenType.COMMENT: 'COMMENT'>, <TokenType.BEGIN: 'BEGIN'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, <TokenType.UINT128: 'UINT128'>, <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, <TokenType.OVERLAPS: 'OVERLAPS'>, <TokenType.DELETE: 'DELETE'>, <TokenType.LOAD: 'LOAD'>, <TokenType.NCHAR: 'NCHAR'>, <TokenType.UUID: 'UUID'>, <TokenType.ENUM16: 'ENUM16'>, <TokenType.TINYINT: 'TINYINT'>, <TokenType.DATE32: 'DATE32'>, <TokenType.NUMRANGE: 'NUMRANGE'>, <TokenType.DATETIME: 'DATETIME'>, <TokenType.VIEW: 'VIEW'>, <TokenType.JSON: 'JSON'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, <TokenType.COLUMN: 'COLUMN'>, <TokenType.IMAGE: 'IMAGE'>, <TokenType.KEEP: 'KEEP'>, <TokenType.SMALLSERIAL: 'SMALLSERIAL'>, <TokenType.END: 'END'>, <TokenType.INT128: 'INT128'>, <TokenType.KILL: 'KILL'>, <TokenType.TIMETZ: 'TIMETZ'>, <TokenType.DATETIME64: 'DATETIME64'>, <TokenType.REFRESH: 'REFRESH'>, <TokenType.VARBINARY: 'VARBINARY'>, <TokenType.USMALLINT: 'USMALLINT'>, <TokenType.AGGREGATEFUNCTION: 'AGGREGATEFUNCTION'>, <TokenType.UNIQUE: 'UNIQUE'>, <TokenType.SEMI: 'SEMI'>, <TokenType.DEFAULT: 'DEFAULT'>, <TokenType.BIGSERIAL: 'BIGSERIAL'>, <TokenType.TIMESTAMP_MS: 'TIMESTAMP_MS'>, <TokenType.MEDIUMINT: 'MEDIUMINT'>, <TokenType.SOME: 'SOME'>, <TokenType.ANTI: 'ANTI'>, <TokenType.FALSE: 'FALSE'>, <TokenType.INT4RANGE: 'INT4RANGE'>, <TokenType.DESC: 'DESC'>, <TokenType.OVERWRITE: 'OVERWRITE'>, <TokenType.LONGBLOB: 'LONGBLOB'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, <TokenType.HLLSKETCH: 'HLLSKETCH'>, <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, <TokenType.UPDATE: 'UPDATE'>}
    @@ -1877,238 +1898,238 @@ Default: 3
    -
    284    class Generator(generator.Generator):
    -285        INTERVAL_ALLOWS_PLURAL_FORM = False
    -286        JOIN_HINTS = False
    -287        TABLE_HINTS = False
    -288        QUERY_HINTS = False
    -289        IS_BOOL_ALLOWED = False
    -290        TZ_TO_WITH_TIME_ZONE = True
    -291        NVL2_SUPPORTED = False
    -292        STRUCT_DELIMITER = ("(", ")")
    -293        LIMIT_ONLY_LITERALS = True
    -294        SUPPORTS_SINGLE_ARG_CONCAT = False
    -295        LIKE_PROPERTY_INSIDE_SCHEMA = True
    -296        MULTI_ARG_DISTINCT = False
    -297
    -298        PROPERTIES_LOCATION = {
    -299            **generator.Generator.PROPERTIES_LOCATION,
    -300            exp.LocationProperty: exp.Properties.Location.UNSUPPORTED,
    -301            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
    -302        }
    -303
    -304        TYPE_MAPPING = {
    -305            **generator.Generator.TYPE_MAPPING,
    -306            exp.DataType.Type.INT: "INTEGER",
    -307            exp.DataType.Type.FLOAT: "REAL",
    -308            exp.DataType.Type.BINARY: "VARBINARY",
    -309            exp.DataType.Type.TEXT: "VARCHAR",
    -310            exp.DataType.Type.TIMETZ: "TIME",
    -311            exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP",
    -312            exp.DataType.Type.STRUCT: "ROW",
    -313            exp.DataType.Type.DATETIME: "TIMESTAMP",
    -314            exp.DataType.Type.DATETIME64: "TIMESTAMP",
    -315        }
    -316
    -317        TRANSFORMS = {
    -318            **generator.Generator.TRANSFORMS,
    -319            exp.AnyValue: rename_func("ARBITRARY"),
    -320            exp.ApproxDistinct: _approx_distinct_sql,
    -321            exp.ApproxQuantile: rename_func("APPROX_PERCENTILE"),
    -322            exp.ArgMax: rename_func("MAX_BY"),
    -323            exp.ArgMin: rename_func("MIN_BY"),
    -324            exp.Array: lambda self, e: f"ARRAY[{self.expressions(e, flat=True)}]",
    -325            exp.ArrayConcat: rename_func("CONCAT"),
    -326            exp.ArrayContains: rename_func("CONTAINS"),
    -327            exp.ArraySize: rename_func("CARDINALITY"),
    -328            exp.ArrayUniqueAgg: rename_func("SET_AGG"),
    -329            exp.AtTimeZone: rename_func("AT_TIMEZONE"),
    -330            exp.BitwiseAnd: lambda self,
    -331            e: f"BITWISE_AND({self.sql(e, 'this')}, {self.sql(e, 'expression')})",
    -332            exp.BitwiseLeftShift: lambda self,
    -333            e: f"BITWISE_ARITHMETIC_SHIFT_LEFT({self.sql(e, 'this')}, {self.sql(e, 'expression')})",
    -334            exp.BitwiseNot: lambda self, e: f"BITWISE_NOT({self.sql(e, 'this')})",
    -335            exp.BitwiseOr: lambda self,
    -336            e: f"BITWISE_OR({self.sql(e, 'this')}, {self.sql(e, 'expression')})",
    -337            exp.BitwiseRightShift: lambda self,
    -338            e: f"BITWISE_ARITHMETIC_SHIFT_RIGHT({self.sql(e, 'this')}, {self.sql(e, 'expression')})",
    -339            exp.BitwiseXor: lambda self,
    -340            e: f"BITWISE_XOR({self.sql(e, 'this')}, {self.sql(e, 'expression')})",
    -341            exp.Cast: transforms.preprocess([transforms.epoch_cast_to_ts]),
    -342            exp.CurrentTimestamp: lambda *_: "CURRENT_TIMESTAMP",
    -343            exp.DateAdd: lambda self, e: self.func(
    -344                "DATE_ADD",
    -345                exp.Literal.string(e.text("unit") or "DAY"),
    -346                _to_int(
    -347                    e.expression,
    -348                ),
    -349                e.this,
    -350            ),
    -351            exp.DateDiff: lambda self, e: self.func(
    -352                "DATE_DIFF", exp.Literal.string(e.text("unit") or "DAY"), e.expression, e.this
    -353            ),
    -354            exp.DateStrToDate: datestrtodate_sql,
    -355            exp.DateToDi: lambda self,
    -356            e: f"CAST(DATE_FORMAT({self.sql(e, 'this')}, {Presto.DATEINT_FORMAT}) AS INT)",
    -357            exp.DateSub: lambda self, e: self.func(
    -358                "DATE_ADD",
    -359                exp.Literal.string(e.text("unit") or "DAY"),
    -360                _to_int(e.expression * -1),
    -361                e.this,
    -362            ),
    -363            exp.Decode: lambda self, e: encode_decode_sql(self, e, "FROM_UTF8"),
    -364            exp.DiToDate: lambda self,
    -365            e: f"CAST(DATE_PARSE(CAST({self.sql(e, 'this')} AS VARCHAR), {Presto.DATEINT_FORMAT}) AS DATE)",
    -366            exp.Encode: lambda self, e: encode_decode_sql(self, e, "TO_UTF8"),
    -367            exp.FileFormatProperty: lambda self, e: f"FORMAT='{e.name.upper()}'",
    -368            exp.First: _first_last_sql,
    -369            exp.FirstValue: _first_last_sql,
    -370            exp.FromTimeZone: lambda self,
    -371            e: f"WITH_TIMEZONE({self.sql(e, 'this')}, {self.sql(e, 'zone')}) AT TIME ZONE 'UTC'",
    -372            exp.Group: transforms.preprocess([transforms.unalias_group]),
    -373            exp.GroupConcat: lambda self, e: self.func(
    -374                "ARRAY_JOIN", self.func("ARRAY_AGG", e.this), e.args.get("separator")
    -375            ),
    -376            exp.Hex: rename_func("TO_HEX"),
    -377            exp.If: if_sql(),
    -378            exp.ILike: no_ilike_sql,
    -379            exp.Initcap: _initcap_sql,
    -380            exp.ParseJSON: rename_func("JSON_PARSE"),
    -381            exp.Last: _first_last_sql,
    -382            exp.LastValue: _first_last_sql,
    -383            exp.LastDay: lambda self, e: self.func("LAST_DAY_OF_MONTH", e.this),
    -384            exp.Lateral: _explode_to_unnest_sql,
    -385            exp.Left: left_to_substring_sql,
    -386            exp.Levenshtein: rename_func("LEVENSHTEIN_DISTANCE"),
    -387            exp.LogicalAnd: rename_func("BOOL_AND"),
    -388            exp.LogicalOr: rename_func("BOOL_OR"),
    -389            exp.Pivot: no_pivot_sql,
    -390            exp.Quantile: _quantile_sql,
    -391            exp.RegexpExtract: regexp_extract_sql,
    -392            exp.Right: right_to_substring_sql,
    -393            exp.SafeDivide: no_safe_divide_sql,
    -394            exp.Schema: _schema_sql,
    -395            exp.SchemaCommentProperty: lambda self, e: self.naked_property(e),
    -396            exp.Select: transforms.preprocess(
    -397                [
    -398                    transforms.eliminate_qualify,
    -399                    transforms.eliminate_distinct_on,
    -400                    transforms.explode_to_unnest(1),
    -401                    transforms.eliminate_semi_and_anti_joins,
    -402                ]
    -403            ),
    -404            exp.SortArray: _no_sort_array,
    -405            exp.StrPosition: rename_func("STRPOS"),
    -406            exp.StrToDate: lambda self, e: f"CAST({_str_to_time_sql(self, e)} AS DATE)",
    -407            exp.StrToMap: rename_func("SPLIT_TO_MAP"),
    -408            exp.StrToTime: _str_to_time_sql,
    -409            exp.StrToUnix: lambda self,
    -410            e: f"TO_UNIXTIME(DATE_PARSE({self.sql(e, 'this')}, {self.format_time(e)}))",
    -411            exp.StructExtract: struct_extract_sql,
    -412            exp.Table: transforms.preprocess([_unnest_sequence]),
    -413            exp.Timestamp: no_timestamp_sql,
    -414            exp.TimestampTrunc: timestamptrunc_sql,
    -415            exp.TimeStrToDate: timestrtotime_sql,
    -416            exp.TimeStrToTime: timestrtotime_sql,
    -417            exp.TimeStrToUnix: lambda self,
    -418            e: f"TO_UNIXTIME(DATE_PARSE({self.sql(e, 'this')}, {Presto.TIME_FORMAT}))",
    -419            exp.TimeToStr: lambda self,
    -420            e: f"DATE_FORMAT({self.sql(e, 'this')}, {self.format_time(e)})",
    -421            exp.TimeToUnix: rename_func("TO_UNIXTIME"),
    -422            exp.ToChar: lambda self,
    -423            e: f"DATE_FORMAT({self.sql(e, 'this')}, {self.format_time(e)})",
    -424            exp.TryCast: transforms.preprocess([transforms.epoch_cast_to_ts]),
    -425            exp.TsOrDiToDi: lambda self,
    -426            e: f"CAST(SUBSTR(REPLACE(CAST({self.sql(e, 'this')} AS VARCHAR), '-', ''), 1, 8) AS INT)",
    -427            exp.TsOrDsAdd: _ts_or_ds_add_sql,
    -428            exp.TsOrDsDiff: _ts_or_ds_diff_sql,
    -429            exp.TsOrDsToDate: _ts_or_ds_to_date_sql,
    -430            exp.Unhex: rename_func("FROM_HEX"),
    -431            exp.UnixToStr: lambda self,
    -432            e: f"DATE_FORMAT(FROM_UNIXTIME({self.sql(e, 'this')}), {self.format_time(e)})",
    -433            exp.UnixToTime: _unix_to_time_sql,
    -434            exp.UnixToTimeStr: lambda self,
    -435            e: f"CAST(FROM_UNIXTIME({self.sql(e, 'this')}) AS VARCHAR)",
    -436            exp.VariancePop: rename_func("VAR_POP"),
    -437            exp.With: transforms.preprocess([transforms.add_recursive_cte_column_names]),
    -438            exp.WithinGroup: transforms.preprocess(
    -439                [transforms.remove_within_group_for_percentiles]
    -440            ),
    -441            exp.Xor: bool_xor_sql,
    -442        }
    -443
    -444        def bracket_sql(self, expression: exp.Bracket) -> str:
    -445            if expression.args.get("safe"):
    -446                return self.func(
    -447                    "ELEMENT_AT",
    -448                    expression.this,
    -449                    seq_get(
    -450                        apply_index_offset(
    -451                            expression.this,
    -452                            expression.expressions,
    -453                            1 - expression.args.get("offset", 0),
    -454                        ),
    -455                        0,
    -456                    ),
    -457                )
    -458            return super().bracket_sql(expression)
    -459
    -460        def struct_sql(self, expression: exp.Struct) -> str:
    -461            if any(isinstance(arg, self.KEY_VALUE_DEFINITIONS) for arg in expression.expressions):
    -462                self.unsupported("Struct with key-value definitions is unsupported.")
    -463                return self.function_fallback_sql(expression)
    -464
    -465            return rename_func("ROW")(self, expression)
    +            
    286    class Generator(generator.Generator):
    +287        INTERVAL_ALLOWS_PLURAL_FORM = False
    +288        JOIN_HINTS = False
    +289        TABLE_HINTS = False
    +290        QUERY_HINTS = False
    +291        IS_BOOL_ALLOWED = False
    +292        TZ_TO_WITH_TIME_ZONE = True
    +293        NVL2_SUPPORTED = False
    +294        STRUCT_DELIMITER = ("(", ")")
    +295        LIMIT_ONLY_LITERALS = True
    +296        SUPPORTS_SINGLE_ARG_CONCAT = False
    +297        LIKE_PROPERTY_INSIDE_SCHEMA = True
    +298        MULTI_ARG_DISTINCT = False
    +299
    +300        PROPERTIES_LOCATION = {
    +301            **generator.Generator.PROPERTIES_LOCATION,
    +302            exp.LocationProperty: exp.Properties.Location.UNSUPPORTED,
    +303            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
    +304        }
    +305
    +306        TYPE_MAPPING = {
    +307            **generator.Generator.TYPE_MAPPING,
    +308            exp.DataType.Type.INT: "INTEGER",
    +309            exp.DataType.Type.FLOAT: "REAL",
    +310            exp.DataType.Type.BINARY: "VARBINARY",
    +311            exp.DataType.Type.TEXT: "VARCHAR",
    +312            exp.DataType.Type.TIMETZ: "TIME",
    +313            exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP",
    +314            exp.DataType.Type.STRUCT: "ROW",
    +315            exp.DataType.Type.DATETIME: "TIMESTAMP",
    +316            exp.DataType.Type.DATETIME64: "TIMESTAMP",
    +317        }
    +318
    +319        TRANSFORMS = {
    +320            **generator.Generator.TRANSFORMS,
    +321            exp.AnyValue: rename_func("ARBITRARY"),
    +322            exp.ApproxDistinct: _approx_distinct_sql,
    +323            exp.ApproxQuantile: rename_func("APPROX_PERCENTILE"),
    +324            exp.ArgMax: rename_func("MAX_BY"),
    +325            exp.ArgMin: rename_func("MIN_BY"),
    +326            exp.Array: lambda self, e: f"ARRAY[{self.expressions(e, flat=True)}]",
    +327            exp.ArrayConcat: rename_func("CONCAT"),
    +328            exp.ArrayContains: rename_func("CONTAINS"),
    +329            exp.ArraySize: rename_func("CARDINALITY"),
    +330            exp.ArrayUniqueAgg: rename_func("SET_AGG"),
    +331            exp.AtTimeZone: rename_func("AT_TIMEZONE"),
    +332            exp.BitwiseAnd: lambda self,
    +333            e: f"BITWISE_AND({self.sql(e, 'this')}, {self.sql(e, 'expression')})",
    +334            exp.BitwiseLeftShift: lambda self,
    +335            e: f"BITWISE_ARITHMETIC_SHIFT_LEFT({self.sql(e, 'this')}, {self.sql(e, 'expression')})",
    +336            exp.BitwiseNot: lambda self, e: f"BITWISE_NOT({self.sql(e, 'this')})",
    +337            exp.BitwiseOr: lambda self,
    +338            e: f"BITWISE_OR({self.sql(e, 'this')}, {self.sql(e, 'expression')})",
    +339            exp.BitwiseRightShift: lambda self,
    +340            e: f"BITWISE_ARITHMETIC_SHIFT_RIGHT({self.sql(e, 'this')}, {self.sql(e, 'expression')})",
    +341            exp.BitwiseXor: lambda self,
    +342            e: f"BITWISE_XOR({self.sql(e, 'this')}, {self.sql(e, 'expression')})",
    +343            exp.Cast: transforms.preprocess([transforms.epoch_cast_to_ts]),
    +344            exp.CurrentTimestamp: lambda *_: "CURRENT_TIMESTAMP",
    +345            exp.DateAdd: lambda self, e: self.func(
    +346                "DATE_ADD",
    +347                exp.Literal.string(e.text("unit") or "DAY"),
    +348                _to_int(
    +349                    e.expression,
    +350                ),
    +351                e.this,
    +352            ),
    +353            exp.DateDiff: lambda self, e: self.func(
    +354                "DATE_DIFF", exp.Literal.string(e.text("unit") or "DAY"), e.expression, e.this
    +355            ),
    +356            exp.DateStrToDate: datestrtodate_sql,
    +357            exp.DateToDi: lambda self,
    +358            e: f"CAST(DATE_FORMAT({self.sql(e, 'this')}, {Presto.DATEINT_FORMAT}) AS INT)",
    +359            exp.DateSub: lambda self, e: self.func(
    +360                "DATE_ADD",
    +361                exp.Literal.string(e.text("unit") or "DAY"),
    +362                _to_int(e.expression * -1),
    +363                e.this,
    +364            ),
    +365            exp.Decode: lambda self, e: encode_decode_sql(self, e, "FROM_UTF8"),
    +366            exp.DiToDate: lambda self,
    +367            e: f"CAST(DATE_PARSE(CAST({self.sql(e, 'this')} AS VARCHAR), {Presto.DATEINT_FORMAT}) AS DATE)",
    +368            exp.Encode: lambda self, e: encode_decode_sql(self, e, "TO_UTF8"),
    +369            exp.FileFormatProperty: lambda self, e: f"FORMAT='{e.name.upper()}'",
    +370            exp.First: _first_last_sql,
    +371            exp.FirstValue: _first_last_sql,
    +372            exp.FromTimeZone: lambda self,
    +373            e: f"WITH_TIMEZONE({self.sql(e, 'this')}, {self.sql(e, 'zone')}) AT TIME ZONE 'UTC'",
    +374            exp.Group: transforms.preprocess([transforms.unalias_group]),
    +375            exp.GroupConcat: lambda self, e: self.func(
    +376                "ARRAY_JOIN", self.func("ARRAY_AGG", e.this), e.args.get("separator")
    +377            ),
    +378            exp.Hex: rename_func("TO_HEX"),
    +379            exp.If: if_sql(),
    +380            exp.ILike: no_ilike_sql,
    +381            exp.Initcap: _initcap_sql,
    +382            exp.ParseJSON: rename_func("JSON_PARSE"),
    +383            exp.Last: _first_last_sql,
    +384            exp.LastValue: _first_last_sql,
    +385            exp.LastDay: lambda self, e: self.func("LAST_DAY_OF_MONTH", e.this),
    +386            exp.Lateral: _explode_to_unnest_sql,
    +387            exp.Left: left_to_substring_sql,
    +388            exp.Levenshtein: rename_func("LEVENSHTEIN_DISTANCE"),
    +389            exp.LogicalAnd: rename_func("BOOL_AND"),
    +390            exp.LogicalOr: rename_func("BOOL_OR"),
    +391            exp.Pivot: no_pivot_sql,
    +392            exp.Quantile: _quantile_sql,
    +393            exp.RegexpExtract: regexp_extract_sql,
    +394            exp.Right: right_to_substring_sql,
    +395            exp.SafeDivide: no_safe_divide_sql,
    +396            exp.Schema: _schema_sql,
    +397            exp.SchemaCommentProperty: lambda self, e: self.naked_property(e),
    +398            exp.Select: transforms.preprocess(
    +399                [
    +400                    transforms.eliminate_qualify,
    +401                    transforms.eliminate_distinct_on,
    +402                    transforms.explode_to_unnest(1),
    +403                    transforms.eliminate_semi_and_anti_joins,
    +404                ]
    +405            ),
    +406            exp.SortArray: _no_sort_array,
    +407            exp.StrPosition: rename_func("STRPOS"),
    +408            exp.StrToDate: lambda self, e: f"CAST({_str_to_time_sql(self, e)} AS DATE)",
    +409            exp.StrToMap: rename_func("SPLIT_TO_MAP"),
    +410            exp.StrToTime: _str_to_time_sql,
    +411            exp.StrToUnix: lambda self,
    +412            e: f"TO_UNIXTIME(DATE_PARSE({self.sql(e, 'this')}, {self.format_time(e)}))",
    +413            exp.StructExtract: struct_extract_sql,
    +414            exp.Table: transforms.preprocess([_unnest_sequence]),
    +415            exp.Timestamp: no_timestamp_sql,
    +416            exp.TimestampTrunc: timestamptrunc_sql,
    +417            exp.TimeStrToDate: timestrtotime_sql,
    +418            exp.TimeStrToTime: timestrtotime_sql,
    +419            exp.TimeStrToUnix: lambda self,
    +420            e: f"TO_UNIXTIME(DATE_PARSE({self.sql(e, 'this')}, {Presto.TIME_FORMAT}))",
    +421            exp.TimeToStr: lambda self,
    +422            e: f"DATE_FORMAT({self.sql(e, 'this')}, {self.format_time(e)})",
    +423            exp.TimeToUnix: rename_func("TO_UNIXTIME"),
    +424            exp.ToChar: lambda self,
    +425            e: f"DATE_FORMAT({self.sql(e, 'this')}, {self.format_time(e)})",
    +426            exp.TryCast: transforms.preprocess([transforms.epoch_cast_to_ts]),
    +427            exp.TsOrDiToDi: lambda self,
    +428            e: f"CAST(SUBSTR(REPLACE(CAST({self.sql(e, 'this')} AS VARCHAR), '-', ''), 1, 8) AS INT)",
    +429            exp.TsOrDsAdd: _ts_or_ds_add_sql,
    +430            exp.TsOrDsDiff: _ts_or_ds_diff_sql,
    +431            exp.TsOrDsToDate: _ts_or_ds_to_date_sql,
    +432            exp.Unhex: rename_func("FROM_HEX"),
    +433            exp.UnixToStr: lambda self,
    +434            e: f"DATE_FORMAT(FROM_UNIXTIME({self.sql(e, 'this')}), {self.format_time(e)})",
    +435            exp.UnixToTime: _unix_to_time_sql,
    +436            exp.UnixToTimeStr: lambda self,
    +437            e: f"CAST(FROM_UNIXTIME({self.sql(e, 'this')}) AS VARCHAR)",
    +438            exp.VariancePop: rename_func("VAR_POP"),
    +439            exp.With: transforms.preprocess([transforms.add_recursive_cte_column_names]),
    +440            exp.WithinGroup: transforms.preprocess(
    +441                [transforms.remove_within_group_for_percentiles]
    +442            ),
    +443            exp.Xor: bool_xor_sql,
    +444        }
    +445
    +446        def bracket_sql(self, expression: exp.Bracket) -> str:
    +447            if expression.args.get("safe"):
    +448                return self.func(
    +449                    "ELEMENT_AT",
    +450                    expression.this,
    +451                    seq_get(
    +452                        apply_index_offset(
    +453                            expression.this,
    +454                            expression.expressions,
    +455                            1 - expression.args.get("offset", 0),
    +456                        ),
    +457                        0,
    +458                    ),
    +459                )
    +460            return super().bracket_sql(expression)
    +461
    +462        def struct_sql(self, expression: exp.Struct) -> str:
    +463            if any(isinstance(arg, self.KEY_VALUE_DEFINITIONS) for arg in expression.expressions):
    +464                self.unsupported("Struct with key-value definitions is unsupported.")
    +465                return self.function_fallback_sql(expression)
     466
    -467        def interval_sql(self, expression: exp.Interval) -> str:
    -468            unit = self.sql(expression, "unit")
    -469            if expression.this and unit.startswith("WEEK"):
    -470                return f"({expression.this.name} * INTERVAL '7' DAY)"
    -471            return super().interval_sql(expression)
    -472
    -473        def transaction_sql(self, expression: exp.Transaction) -> str:
    -474            modes = expression.args.get("modes")
    -475            modes = f" {', '.join(modes)}" if modes else ""
    -476            return f"START TRANSACTION{modes}"
    -477
    -478        def generateseries_sql(self, expression: exp.GenerateSeries) -> str:
    -479            start = expression.args["start"]
    -480            end = expression.args["end"]
    -481            step = expression.args.get("step")
    -482
    -483            if isinstance(start, exp.Cast):
    -484                target_type = start.to
    -485            elif isinstance(end, exp.Cast):
    -486                target_type = end.to
    -487            else:
    -488                target_type = None
    -489
    -490            if target_type and target_type.is_type("timestamp"):
    -491                if target_type is start.to:
    -492                    end = exp.cast(end, target_type)
    -493                else:
    -494                    start = exp.cast(start, target_type)
    -495
    -496            return self.func("SEQUENCE", start, end, step)
    +467            return rename_func("ROW")(self, expression)
    +468
    +469        def interval_sql(self, expression: exp.Interval) -> str:
    +470            unit = self.sql(expression, "unit")
    +471            if expression.this and unit.startswith("WEEK"):
    +472                return f"({expression.this.name} * INTERVAL '7' DAY)"
    +473            return super().interval_sql(expression)
    +474
    +475        def transaction_sql(self, expression: exp.Transaction) -> str:
    +476            modes = expression.args.get("modes")
    +477            modes = f" {', '.join(modes)}" if modes else ""
    +478            return f"START TRANSACTION{modes}"
    +479
    +480        def generateseries_sql(self, expression: exp.GenerateSeries) -> str:
    +481            start = expression.args["start"]
    +482            end = expression.args["end"]
    +483            step = expression.args.get("step")
    +484
    +485            if isinstance(start, exp.Cast):
    +486                target_type = start.to
    +487            elif isinstance(end, exp.Cast):
    +488                target_type = end.to
    +489            else:
    +490                target_type = None
    +491
    +492            if target_type and target_type.is_type("timestamp"):
    +493                if target_type is start.to:
    +494                    end = exp.cast(end, target_type)
    +495                else:
    +496                    start = exp.cast(start, target_type)
     497
    -498        def offset_limit_modifiers(
    -499            self, expression: exp.Expression, fetch: bool, limit: t.Optional[exp.Fetch | exp.Limit]
    -500        ) -> t.List[str]:
    -501            return [
    -502                self.sql(expression, "offset"),
    -503                self.sql(limit),
    -504            ]
    -505
    -506        def create_sql(self, expression: exp.Create) -> str:
    -507            """
    -508            Presto doesn't support CREATE VIEW with expressions (ex: `CREATE VIEW x (cola)` then `(cola)` is the expression),
    -509            so we need to remove them
    -510            """
    -511            kind = expression.args["kind"]
    -512            schema = expression.this
    -513            if kind == "VIEW" and schema.expressions:
    -514                expression.this.set("expressions", None)
    -515            return super().create_sql(expression)
    +498            return self.func("SEQUENCE", start, end, step)
    +499
    +500        def offset_limit_modifiers(
    +501            self, expression: exp.Expression, fetch: bool, limit: t.Optional[exp.Fetch | exp.Limit]
    +502        ) -> t.List[str]:
    +503            return [
    +504                self.sql(expression, "offset"),
    +505                self.sql(limit),
    +506            ]
    +507
    +508        def create_sql(self, expression: exp.Create) -> str:
    +509            """
    +510            Presto doesn't support CREATE VIEW with expressions (ex: `CREATE VIEW x (cola)` then `(cola)` is the expression),
    +511            so we need to remove them
    +512            """
    +513            kind = expression.args["kind"]
    +514            schema = expression.this
    +515            if kind == "VIEW" and schema.expressions:
    +516                expression.this.set("expressions", None)
    +517            return super().create_sql(expression)
     
    @@ -2345,21 +2366,21 @@ Default: True
    -
    444        def bracket_sql(self, expression: exp.Bracket) -> str:
    -445            if expression.args.get("safe"):
    -446                return self.func(
    -447                    "ELEMENT_AT",
    -448                    expression.this,
    -449                    seq_get(
    -450                        apply_index_offset(
    -451                            expression.this,
    -452                            expression.expressions,
    -453                            1 - expression.args.get("offset", 0),
    -454                        ),
    -455                        0,
    -456                    ),
    -457                )
    -458            return super().bracket_sql(expression)
    +            
    446        def bracket_sql(self, expression: exp.Bracket) -> str:
    +447            if expression.args.get("safe"):
    +448                return self.func(
    +449                    "ELEMENT_AT",
    +450                    expression.this,
    +451                    seq_get(
    +452                        apply_index_offset(
    +453                            expression.this,
    +454                            expression.expressions,
    +455                            1 - expression.args.get("offset", 0),
    +456                        ),
    +457                        0,
    +458                    ),
    +459                )
    +460            return super().bracket_sql(expression)
     
    @@ -2377,12 +2398,12 @@ Default: True
    -
    460        def struct_sql(self, expression: exp.Struct) -> str:
    -461            if any(isinstance(arg, self.KEY_VALUE_DEFINITIONS) for arg in expression.expressions):
    -462                self.unsupported("Struct with key-value definitions is unsupported.")
    -463                return self.function_fallback_sql(expression)
    -464
    -465            return rename_func("ROW")(self, expression)
    +            
    462        def struct_sql(self, expression: exp.Struct) -> str:
    +463            if any(isinstance(arg, self.KEY_VALUE_DEFINITIONS) for arg in expression.expressions):
    +464                self.unsupported("Struct with key-value definitions is unsupported.")
    +465                return self.function_fallback_sql(expression)
    +466
    +467            return rename_func("ROW")(self, expression)
     
    @@ -2400,11 +2421,11 @@ Default: True
    -
    467        def interval_sql(self, expression: exp.Interval) -> str:
    -468            unit = self.sql(expression, "unit")
    -469            if expression.this and unit.startswith("WEEK"):
    -470                return f"({expression.this.name} * INTERVAL '7' DAY)"
    -471            return super().interval_sql(expression)
    +            
    469        def interval_sql(self, expression: exp.Interval) -> str:
    +470            unit = self.sql(expression, "unit")
    +471            if expression.this and unit.startswith("WEEK"):
    +472                return f"({expression.this.name} * INTERVAL '7' DAY)"
    +473            return super().interval_sql(expression)
     
    @@ -2422,10 +2443,10 @@ Default: True
    -
    473        def transaction_sql(self, expression: exp.Transaction) -> str:
    -474            modes = expression.args.get("modes")
    -475            modes = f" {', '.join(modes)}" if modes else ""
    -476            return f"START TRANSACTION{modes}"
    +            
    475        def transaction_sql(self, expression: exp.Transaction) -> str:
    +476            modes = expression.args.get("modes")
    +477            modes = f" {', '.join(modes)}" if modes else ""
    +478            return f"START TRANSACTION{modes}"
     
    @@ -2443,25 +2464,25 @@ Default: True
    -
    478        def generateseries_sql(self, expression: exp.GenerateSeries) -> str:
    -479            start = expression.args["start"]
    -480            end = expression.args["end"]
    -481            step = expression.args.get("step")
    -482
    -483            if isinstance(start, exp.Cast):
    -484                target_type = start.to
    -485            elif isinstance(end, exp.Cast):
    -486                target_type = end.to
    -487            else:
    -488                target_type = None
    -489
    -490            if target_type and target_type.is_type("timestamp"):
    -491                if target_type is start.to:
    -492                    end = exp.cast(end, target_type)
    -493                else:
    -494                    start = exp.cast(start, target_type)
    -495
    -496            return self.func("SEQUENCE", start, end, step)
    +            
    480        def generateseries_sql(self, expression: exp.GenerateSeries) -> str:
    +481            start = expression.args["start"]
    +482            end = expression.args["end"]
    +483            step = expression.args.get("step")
    +484
    +485            if isinstance(start, exp.Cast):
    +486                target_type = start.to
    +487            elif isinstance(end, exp.Cast):
    +488                target_type = end.to
    +489            else:
    +490                target_type = None
    +491
    +492            if target_type and target_type.is_type("timestamp"):
    +493                if target_type is start.to:
    +494                    end = exp.cast(end, target_type)
    +495                else:
    +496                    start = exp.cast(start, target_type)
    +497
    +498            return self.func("SEQUENCE", start, end, step)
     
    @@ -2479,13 +2500,13 @@ Default: True
    -
    498        def offset_limit_modifiers(
    -499            self, expression: exp.Expression, fetch: bool, limit: t.Optional[exp.Fetch | exp.Limit]
    -500        ) -> t.List[str]:
    -501            return [
    -502                self.sql(expression, "offset"),
    -503                self.sql(limit),
    -504            ]
    +            
    500        def offset_limit_modifiers(
    +501            self, expression: exp.Expression, fetch: bool, limit: t.Optional[exp.Fetch | exp.Limit]
    +502        ) -> t.List[str]:
    +503            return [
    +504                self.sql(expression, "offset"),
    +505                self.sql(limit),
    +506            ]
     
    @@ -2503,16 +2524,16 @@ Default: True
    -
    506        def create_sql(self, expression: exp.Create) -> str:
    -507            """
    -508            Presto doesn't support CREATE VIEW with expressions (ex: `CREATE VIEW x (cola)` then `(cola)` is the expression),
    -509            so we need to remove them
    -510            """
    -511            kind = expression.args["kind"]
    -512            schema = expression.this
    -513            if kind == "VIEW" and schema.expressions:
    -514                expression.this.set("expressions", None)
    -515            return super().create_sql(expression)
    +            
    508        def create_sql(self, expression: exp.Create) -> str:
    +509            """
    +510            Presto doesn't support CREATE VIEW with expressions (ex: `CREATE VIEW x (cola)` then `(cola)` is the expression),
    +511            so we need to remove them
    +512            """
    +513            kind = expression.args["kind"]
    +514            schema = expression.this
    +515            if kind == "VIEW" and schema.expressions:
    +516                expression.this.set("expressions", None)
    +517            return super().create_sql(expression)
     
    diff --git a/docs/sqlglot/dialects/redshift.html b/docs/sqlglot/dialects/redshift.html index e291fcd..cd80428 100644 --- a/docs/sqlglot/dialects/redshift.html +++ b/docs/sqlglot/dialects/redshift.html @@ -378,96 +378,97 @@
    158 "UNLOAD": TokenType.COMMAND, 159 "VARBYTE": TokenType.VARBINARY, 160 } -161 -162 # Redshift allows # to appear as a table identifier prefix -163 SINGLE_TOKENS = Postgres.Tokenizer.SINGLE_TOKENS.copy() -164 SINGLE_TOKENS.pop("#") -165 -166 class Generator(Postgres.Generator): -167 LOCKING_READS_SUPPORTED = False -168 QUERY_HINTS = False -169 VALUES_AS_TABLE = False -170 TZ_TO_WITH_TIME_ZONE = True -171 NVL2_SUPPORTED = True -172 LAST_DAY_SUPPORTS_DATE_PART = False -173 -174 TYPE_MAPPING = { -175 **Postgres.Generator.TYPE_MAPPING, -176 exp.DataType.Type.BINARY: "VARBYTE", -177 exp.DataType.Type.INT: "INTEGER", -178 exp.DataType.Type.TIMETZ: "TIME", -179 exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP", -180 exp.DataType.Type.VARBINARY: "VARBYTE", -181 } -182 -183 TRANSFORMS = { -184 **Postgres.Generator.TRANSFORMS, -185 exp.Concat: concat_to_dpipe_sql, -186 exp.ConcatWs: concat_ws_to_dpipe_sql, -187 exp.ApproxDistinct: lambda self, -188 e: f"APPROXIMATE COUNT(DISTINCT {self.sql(e, 'this')})", -189 exp.CurrentTimestamp: lambda self, e: ( -190 "SYSDATE" if e.args.get("transaction") else "GETDATE()" -191 ), -192 exp.DateAdd: date_delta_sql("DATEADD"), -193 exp.DateDiff: date_delta_sql("DATEDIFF"), -194 exp.DistKeyProperty: lambda self, e: f"DISTKEY({e.name})", -195 exp.DistStyleProperty: lambda self, e: self.naked_property(e), -196 exp.FromBase: rename_func("STRTOL"), -197 exp.GeneratedAsIdentityColumnConstraint: generatedasidentitycolumnconstraint_sql, -198 exp.JSONExtract: json_extract_segments("JSON_EXTRACT_PATH_TEXT"), -199 exp.GroupConcat: rename_func("LISTAGG"), -200 exp.ParseJSON: rename_func("JSON_PARSE"), -201 exp.Select: transforms.preprocess( -202 [transforms.eliminate_distinct_on, transforms.eliminate_semi_and_anti_joins] -203 ), -204 exp.SortKeyProperty: lambda self, -205 e: f"{'COMPOUND ' if e.args['compound'] else ''}SORTKEY({self.format_args(*e.this)})", -206 exp.TableSample: no_tablesample_sql, -207 exp.TsOrDsAdd: date_delta_sql("DATEADD"), -208 exp.TsOrDsDiff: date_delta_sql("DATEDIFF"), -209 } -210 -211 # Postgres maps exp.Pivot to no_pivot_sql, but Redshift support pivots -212 TRANSFORMS.pop(exp.Pivot) -213 -214 # Redshift uses the POW | POWER (expr1, expr2) syntax instead of expr1 ^ expr2 (postgres) -215 TRANSFORMS.pop(exp.Pow) -216 -217 # Redshift supports ANY_VALUE(..) -218 TRANSFORMS.pop(exp.AnyValue) -219 -220 # Redshift supports LAST_DAY(..) -221 TRANSFORMS.pop(exp.LastDay) -222 -223 RESERVED_KEYWORDS = {*Postgres.Generator.RESERVED_KEYWORDS, "snapshot", "type"} -224 -225 def with_properties(self, properties: exp.Properties) -> str: -226 """Redshift doesn't have `WITH` as part of their with_properties so we remove it""" -227 return self.properties(properties, prefix=" ", suffix="") -228 -229 def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str: -230 if expression.is_type(exp.DataType.Type.JSON): -231 # Redshift doesn't support a JSON type, so casting to it is treated as a noop -232 return self.sql(expression, "this") -233 -234 return super().cast_sql(expression, safe_prefix=safe_prefix) -235 -236 def datatype_sql(self, expression: exp.DataType) -> str: -237 """ -238 Redshift converts the `TEXT` data type to `VARCHAR(255)` by default when people more generally mean -239 VARCHAR of max length which is `VARCHAR(max)` in Redshift. Therefore if we get a `TEXT` data type -240 without precision we convert it to `VARCHAR(max)` and if it does have precision then we just convert -241 `TEXT` to `VARCHAR`. -242 """ -243 if expression.is_type("text"): -244 expression.set("this", exp.DataType.Type.VARCHAR) -245 precision = expression.args.get("expressions") -246 -247 if not precision: -248 expression.append("expressions", exp.var("MAX")) -249 -250 return super().datatype_sql(expression) +161 KEYWORDS.pop("VALUES") +162 +163 # Redshift allows # to appear as a table identifier prefix +164 SINGLE_TOKENS = Postgres.Tokenizer.SINGLE_TOKENS.copy() +165 SINGLE_TOKENS.pop("#") +166 +167 class Generator(Postgres.Generator): +168 LOCKING_READS_SUPPORTED = False +169 QUERY_HINTS = False +170 VALUES_AS_TABLE = False +171 TZ_TO_WITH_TIME_ZONE = True +172 NVL2_SUPPORTED = True +173 LAST_DAY_SUPPORTS_DATE_PART = False +174 +175 TYPE_MAPPING = { +176 **Postgres.Generator.TYPE_MAPPING, +177 exp.DataType.Type.BINARY: "VARBYTE", +178 exp.DataType.Type.INT: "INTEGER", +179 exp.DataType.Type.TIMETZ: "TIME", +180 exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP", +181 exp.DataType.Type.VARBINARY: "VARBYTE", +182 } +183 +184 TRANSFORMS = { +185 **Postgres.Generator.TRANSFORMS, +186 exp.Concat: concat_to_dpipe_sql, +187 exp.ConcatWs: concat_ws_to_dpipe_sql, +188 exp.ApproxDistinct: lambda self, +189 e: f"APPROXIMATE COUNT(DISTINCT {self.sql(e, 'this')})", +190 exp.CurrentTimestamp: lambda self, e: ( +191 "SYSDATE" if e.args.get("transaction") else "GETDATE()" +192 ), +193 exp.DateAdd: date_delta_sql("DATEADD"), +194 exp.DateDiff: date_delta_sql("DATEDIFF"), +195 exp.DistKeyProperty: lambda self, e: f"DISTKEY({e.name})", +196 exp.DistStyleProperty: lambda self, e: self.naked_property(e), +197 exp.FromBase: rename_func("STRTOL"), +198 exp.GeneratedAsIdentityColumnConstraint: generatedasidentitycolumnconstraint_sql, +199 exp.JSONExtract: json_extract_segments("JSON_EXTRACT_PATH_TEXT"), +200 exp.GroupConcat: rename_func("LISTAGG"), +201 exp.ParseJSON: rename_func("JSON_PARSE"), +202 exp.Select: transforms.preprocess( +203 [transforms.eliminate_distinct_on, transforms.eliminate_semi_and_anti_joins] +204 ), +205 exp.SortKeyProperty: lambda self, +206 e: f"{'COMPOUND ' if e.args['compound'] else ''}SORTKEY({self.format_args(*e.this)})", +207 exp.TableSample: no_tablesample_sql, +208 exp.TsOrDsAdd: date_delta_sql("DATEADD"), +209 exp.TsOrDsDiff: date_delta_sql("DATEDIFF"), +210 } +211 +212 # Postgres maps exp.Pivot to no_pivot_sql, but Redshift support pivots +213 TRANSFORMS.pop(exp.Pivot) +214 +215 # Redshift uses the POW | POWER (expr1, expr2) syntax instead of expr1 ^ expr2 (postgres) +216 TRANSFORMS.pop(exp.Pow) +217 +218 # Redshift supports ANY_VALUE(..) +219 TRANSFORMS.pop(exp.AnyValue) +220 +221 # Redshift supports LAST_DAY(..) +222 TRANSFORMS.pop(exp.LastDay) +223 +224 RESERVED_KEYWORDS = {*Postgres.Generator.RESERVED_KEYWORDS, "snapshot", "type"} +225 +226 def with_properties(self, properties: exp.Properties) -> str: +227 """Redshift doesn't have `WITH` as part of their with_properties so we remove it""" +228 return self.properties(properties, prefix=" ", suffix="") +229 +230 def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str: +231 if expression.is_type(exp.DataType.Type.JSON): +232 # Redshift doesn't support a JSON type, so casting to it is treated as a noop +233 return self.sql(expression, "this") +234 +235 return super().cast_sql(expression, safe_prefix=safe_prefix) +236 +237 def datatype_sql(self, expression: exp.DataType) -> str: +238 """ +239 Redshift converts the `TEXT` data type to `VARCHAR(255)` by default when people more generally mean +240 VARCHAR of max length which is `VARCHAR(max)` in Redshift. Therefore if we get a `TEXT` data type +241 without precision we convert it to `VARCHAR(max)` and if it does have precision then we just convert +242 `TEXT` to `VARCHAR`. +243 """ +244 if expression.is_type("text"): +245 expression.set("this", exp.DataType.Type.VARCHAR) +246 precision = expression.args.get("expressions") +247 +248 if not precision: +249 expression.append("expressions", exp.var("MAX")) +250 +251 return super().datatype_sql(expression)
    @@ -609,96 +610,97 @@ 159 "UNLOAD": TokenType.COMMAND, 160 "VARBYTE": TokenType.VARBINARY, 161 } -162 -163 # Redshift allows # to appear as a table identifier prefix -164 SINGLE_TOKENS = Postgres.Tokenizer.SINGLE_TOKENS.copy() -165 SINGLE_TOKENS.pop("#") -166 -167 class Generator(Postgres.Generator): -168 LOCKING_READS_SUPPORTED = False -169 QUERY_HINTS = False -170 VALUES_AS_TABLE = False -171 TZ_TO_WITH_TIME_ZONE = True -172 NVL2_SUPPORTED = True -173 LAST_DAY_SUPPORTS_DATE_PART = False -174 -175 TYPE_MAPPING = { -176 **Postgres.Generator.TYPE_MAPPING, -177 exp.DataType.Type.BINARY: "VARBYTE", -178 exp.DataType.Type.INT: "INTEGER", -179 exp.DataType.Type.TIMETZ: "TIME", -180 exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP", -181 exp.DataType.Type.VARBINARY: "VARBYTE", -182 } -183 -184 TRANSFORMS = { -185 **Postgres.Generator.TRANSFORMS, -186 exp.Concat: concat_to_dpipe_sql, -187 exp.ConcatWs: concat_ws_to_dpipe_sql, -188 exp.ApproxDistinct: lambda self, -189 e: f"APPROXIMATE COUNT(DISTINCT {self.sql(e, 'this')})", -190 exp.CurrentTimestamp: lambda self, e: ( -191 "SYSDATE" if e.args.get("transaction") else "GETDATE()" -192 ), -193 exp.DateAdd: date_delta_sql("DATEADD"), -194 exp.DateDiff: date_delta_sql("DATEDIFF"), -195 exp.DistKeyProperty: lambda self, e: f"DISTKEY({e.name})", -196 exp.DistStyleProperty: lambda self, e: self.naked_property(e), -197 exp.FromBase: rename_func("STRTOL"), -198 exp.GeneratedAsIdentityColumnConstraint: generatedasidentitycolumnconstraint_sql, -199 exp.JSONExtract: json_extract_segments("JSON_EXTRACT_PATH_TEXT"), -200 exp.GroupConcat: rename_func("LISTAGG"), -201 exp.ParseJSON: rename_func("JSON_PARSE"), -202 exp.Select: transforms.preprocess( -203 [transforms.eliminate_distinct_on, transforms.eliminate_semi_and_anti_joins] -204 ), -205 exp.SortKeyProperty: lambda self, -206 e: f"{'COMPOUND ' if e.args['compound'] else ''}SORTKEY({self.format_args(*e.this)})", -207 exp.TableSample: no_tablesample_sql, -208 exp.TsOrDsAdd: date_delta_sql("DATEADD"), -209 exp.TsOrDsDiff: date_delta_sql("DATEDIFF"), -210 } -211 -212 # Postgres maps exp.Pivot to no_pivot_sql, but Redshift support pivots -213 TRANSFORMS.pop(exp.Pivot) -214 -215 # Redshift uses the POW | POWER (expr1, expr2) syntax instead of expr1 ^ expr2 (postgres) -216 TRANSFORMS.pop(exp.Pow) -217 -218 # Redshift supports ANY_VALUE(..) -219 TRANSFORMS.pop(exp.AnyValue) -220 -221 # Redshift supports LAST_DAY(..) -222 TRANSFORMS.pop(exp.LastDay) -223 -224 RESERVED_KEYWORDS = {*Postgres.Generator.RESERVED_KEYWORDS, "snapshot", "type"} -225 -226 def with_properties(self, properties: exp.Properties) -> str: -227 """Redshift doesn't have `WITH` as part of their with_properties so we remove it""" -228 return self.properties(properties, prefix=" ", suffix="") -229 -230 def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str: -231 if expression.is_type(exp.DataType.Type.JSON): -232 # Redshift doesn't support a JSON type, so casting to it is treated as a noop -233 return self.sql(expression, "this") -234 -235 return super().cast_sql(expression, safe_prefix=safe_prefix) -236 -237 def datatype_sql(self, expression: exp.DataType) -> str: -238 """ -239 Redshift converts the `TEXT` data type to `VARCHAR(255)` by default when people more generally mean -240 VARCHAR of max length which is `VARCHAR(max)` in Redshift. Therefore if we get a `TEXT` data type -241 without precision we convert it to `VARCHAR(max)` and if it does have precision then we just convert -242 `TEXT` to `VARCHAR`. -243 """ -244 if expression.is_type("text"): -245 expression.set("this", exp.DataType.Type.VARCHAR) -246 precision = expression.args.get("expressions") -247 -248 if not precision: -249 expression.append("expressions", exp.var("MAX")) -250 -251 return super().datatype_sql(expression) +162 KEYWORDS.pop("VALUES") +163 +164 # Redshift allows # to appear as a table identifier prefix +165 SINGLE_TOKENS = Postgres.Tokenizer.SINGLE_TOKENS.copy() +166 SINGLE_TOKENS.pop("#") +167 +168 class Generator(Postgres.Generator): +169 LOCKING_READS_SUPPORTED = False +170 QUERY_HINTS = False +171 VALUES_AS_TABLE = False +172 TZ_TO_WITH_TIME_ZONE = True +173 NVL2_SUPPORTED = True +174 LAST_DAY_SUPPORTS_DATE_PART = False +175 +176 TYPE_MAPPING = { +177 **Postgres.Generator.TYPE_MAPPING, +178 exp.DataType.Type.BINARY: "VARBYTE", +179 exp.DataType.Type.INT: "INTEGER", +180 exp.DataType.Type.TIMETZ: "TIME", +181 exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP", +182 exp.DataType.Type.VARBINARY: "VARBYTE", +183 } +184 +185 TRANSFORMS = { +186 **Postgres.Generator.TRANSFORMS, +187 exp.Concat: concat_to_dpipe_sql, +188 exp.ConcatWs: concat_ws_to_dpipe_sql, +189 exp.ApproxDistinct: lambda self, +190 e: f"APPROXIMATE COUNT(DISTINCT {self.sql(e, 'this')})", +191 exp.CurrentTimestamp: lambda self, e: ( +192 "SYSDATE" if e.args.get("transaction") else "GETDATE()" +193 ), +194 exp.DateAdd: date_delta_sql("DATEADD"), +195 exp.DateDiff: date_delta_sql("DATEDIFF"), +196 exp.DistKeyProperty: lambda self, e: f"DISTKEY({e.name})", +197 exp.DistStyleProperty: lambda self, e: self.naked_property(e), +198 exp.FromBase: rename_func("STRTOL"), +199 exp.GeneratedAsIdentityColumnConstraint: generatedasidentitycolumnconstraint_sql, +200 exp.JSONExtract: json_extract_segments("JSON_EXTRACT_PATH_TEXT"), +201 exp.GroupConcat: rename_func("LISTAGG"), +202 exp.ParseJSON: rename_func("JSON_PARSE"), +203 exp.Select: transforms.preprocess( +204 [transforms.eliminate_distinct_on, transforms.eliminate_semi_and_anti_joins] +205 ), +206 exp.SortKeyProperty: lambda self, +207 e: f"{'COMPOUND ' if e.args['compound'] else ''}SORTKEY({self.format_args(*e.this)})", +208 exp.TableSample: no_tablesample_sql, +209 exp.TsOrDsAdd: date_delta_sql("DATEADD"), +210 exp.TsOrDsDiff: date_delta_sql("DATEDIFF"), +211 } +212 +213 # Postgres maps exp.Pivot to no_pivot_sql, but Redshift support pivots +214 TRANSFORMS.pop(exp.Pivot) +215 +216 # Redshift uses the POW | POWER (expr1, expr2) syntax instead of expr1 ^ expr2 (postgres) +217 TRANSFORMS.pop(exp.Pow) +218 +219 # Redshift supports ANY_VALUE(..) +220 TRANSFORMS.pop(exp.AnyValue) +221 +222 # Redshift supports LAST_DAY(..) +223 TRANSFORMS.pop(exp.LastDay) +224 +225 RESERVED_KEYWORDS = {*Postgres.Generator.RESERVED_KEYWORDS, "snapshot", "type"} +226 +227 def with_properties(self, properties: exp.Properties) -> str: +228 """Redshift doesn't have `WITH` as part of their with_properties so we remove it""" +229 return self.properties(properties, prefix=" ", suffix="") +230 +231 def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str: +232 if expression.is_type(exp.DataType.Type.JSON): +233 # Redshift doesn't support a JSON type, so casting to it is treated as a noop +234 return self.sql(expression, "this") +235 +236 return super().cast_sql(expression, safe_prefix=safe_prefix) +237 +238 def datatype_sql(self, expression: exp.DataType) -> str: +239 """ +240 Redshift converts the `TEXT` data type to `VARCHAR(255)` by default when people more generally mean +241 VARCHAR of max length which is `VARCHAR(max)` in Redshift. Therefore if we get a `TEXT` data type +242 without precision we convert it to `VARCHAR(max)` and if it does have precision then we just convert +243 `TEXT` to `VARCHAR`. +244 """ +245 if expression.is_type("text"): +246 expression.set("this", exp.DataType.Type.VARCHAR) +247 precision = expression.args.get("expressions") +248 +249 if not precision: +250 expression.append("expressions", exp.var("MAX")) +251 +252 return super().datatype_sql(expression)
    @@ -1326,6 +1328,7 @@ Default: 3
    MODIFIERS_ATTACHED_TO_UNION
    UNION_MODIFIERS
    NO_PAREN_IF_COMMANDS
    +
    VALUES_FOLLOWED_BY_PAREN
    error_level
    error_message_context
    max_errors
    @@ -1377,10 +1380,11 @@ Default: 3 159 "UNLOAD": TokenType.COMMAND, 160 "VARBYTE": TokenType.VARBINARY, 161 } -162 -163 # Redshift allows # to appear as a table identifier prefix -164 SINGLE_TOKENS = Postgres.Tokenizer.SINGLE_TOKENS.copy() -165 SINGLE_TOKENS.pop("#") +162 KEYWORDS.pop("VALUES") +163 +164 # Redshift allows # to appear as a table identifier prefix +165 SINGLE_TOKENS = Postgres.Tokenizer.SINGLE_TOKENS.copy() +166 SINGLE_TOKENS.pop("#")
    @@ -1426,7 +1430,7 @@ Default: 3
    KEYWORDS = - {'{%': <TokenType.BLOCK_START: 'BLOCK_START'>, '{%+': <TokenType.BLOCK_START: 'BLOCK_START'>, '{%-': <TokenType.BLOCK_START: 'BLOCK_START'>, '%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '+%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '-%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '{{+': <TokenType.BLOCK_START: 'BLOCK_START'>, '{{-': <TokenType.BLOCK_START: 'BLOCK_START'>, '+}}': <TokenType.BLOCK_END: 'BLOCK_END'>, '-}}': <TokenType.BLOCK_END: 'BLOCK_END'>, '/*+': <TokenType.HINT: 'HINT'>, '==': <TokenType.EQ: 'EQ'>, '::': <TokenType.DCOLON: 'DCOLON'>, '||': <TokenType.DPIPE: 'DPIPE'>, '>=': <TokenType.GTE: 'GTE'>, '<=': <TokenType.LTE: 'LTE'>, '<>': <TokenType.NEQ: 'NEQ'>, '!=': <TokenType.NEQ: 'NEQ'>, ':=': <TokenType.COLON_EQ: 'COLON_EQ'>, '<=>': <TokenType.NULLSAFE_EQ: 'NULLSAFE_EQ'>, '->': <TokenType.ARROW: 'ARROW'>, '->>': <TokenType.DARROW: 'DARROW'>, '=>': <TokenType.FARROW: 'FARROW'>, '#>': <TokenType.HASH_ARROW: 'HASH_ARROW'>, '#>>': <TokenType.DHASH_ARROW: 'DHASH_ARROW'>, '<->': <TokenType.LR_ARROW: 'LR_ARROW'>, '&&': <TokenType.DAMP: 'DAMP'>, '??': <TokenType.DQMARK: 'DQMARK'>, 'ALL': <TokenType.ALL: 'ALL'>, 'ALWAYS': <TokenType.ALWAYS: 'ALWAYS'>, 'AND': <TokenType.AND: 'AND'>, 'ANTI': <TokenType.ANTI: 'ANTI'>, 'ANY': <TokenType.ANY: 'ANY'>, 'ASC': <TokenType.ASC: 'ASC'>, 'AS': <TokenType.ALIAS: 'ALIAS'>, 'ASOF': <TokenType.ASOF: 'ASOF'>, 'AUTOINCREMENT': <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, 'AUTO_INCREMENT': <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, 'BEGIN': <TokenType.COMMAND: 'COMMAND'>, 'BETWEEN': <TokenType.BETWEEN: 'BETWEEN'>, 'CACHE': <TokenType.CACHE: 'CACHE'>, 'UNCACHE': <TokenType.UNCACHE: 'UNCACHE'>, 'CASE': <TokenType.CASE: 'CASE'>, 'CHARACTER SET': <TokenType.CHARACTER_SET: 'CHARACTER_SET'>, 'CLUSTER BY': <TokenType.CLUSTER_BY: 'CLUSTER_BY'>, 'COLLATE': <TokenType.COLLATE: 'COLLATE'>, 'COLUMN': <TokenType.COLUMN: 'COLUMN'>, 'COMMIT': <TokenType.COMMIT: 'COMMIT'>, 'CONNECT BY': <TokenType.CONNECT_BY: 'CONNECT_BY'>, 'CONSTRAINT': <TokenType.CONSTRAINT: 'CONSTRAINT'>, 'CREATE': <TokenType.CREATE: 'CREATE'>, 'CROSS': <TokenType.CROSS: 'CROSS'>, 'CUBE': <TokenType.CUBE: 'CUBE'>, 'CURRENT_DATE': <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, 'CURRENT_TIME': <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, 'CURRENT_TIMESTAMP': <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, 'CURRENT_USER': <TokenType.CURRENT_USER: 'CURRENT_USER'>, 'DATABASE': <TokenType.DATABASE: 'DATABASE'>, 'DEFAULT': <TokenType.DEFAULT: 'DEFAULT'>, 'DELETE': <TokenType.DELETE: 'DELETE'>, 'DESC': <TokenType.DESC: 'DESC'>, 'DESCRIBE': <TokenType.DESCRIBE: 'DESCRIBE'>, 'DISTINCT': <TokenType.DISTINCT: 'DISTINCT'>, 'DISTRIBUTE BY': <TokenType.DISTRIBUTE_BY: 'DISTRIBUTE_BY'>, 'DIV': <TokenType.DIV: 'DIV'>, 'DROP': <TokenType.DROP: 'DROP'>, 'ELSE': <TokenType.ELSE: 'ELSE'>, 'END': <TokenType.END: 'END'>, 'ESCAPE': <TokenType.ESCAPE: 'ESCAPE'>, 'EXCEPT': <TokenType.EXCEPT: 'EXCEPT'>, 'EXECUTE': <TokenType.EXECUTE: 'EXECUTE'>, 'EXISTS': <TokenType.EXISTS: 'EXISTS'>, 'FALSE': <TokenType.FALSE: 'FALSE'>, 'FETCH': <TokenType.FETCH: 'FETCH'>, 'FILTER': <TokenType.FILTER: 'FILTER'>, 'FIRST': <TokenType.FIRST: 'FIRST'>, 'FULL': <TokenType.FULL: 'FULL'>, 'FUNCTION': <TokenType.FUNCTION: 'FUNCTION'>, 'FOR': <TokenType.FOR: 'FOR'>, 'FOREIGN KEY': <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, 'FORMAT': <TokenType.FORMAT: 'FORMAT'>, 'FROM': <TokenType.FROM: 'FROM'>, 'GEOGRAPHY': <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, 'GEOMETRY': <TokenType.GEOMETRY: 'GEOMETRY'>, 'GLOB': <TokenType.GLOB: 'GLOB'>, 'GROUP BY': <TokenType.GROUP_BY: 'GROUP_BY'>, 'GROUPING SETS': <TokenType.GROUPING_SETS: 'GROUPING_SETS'>, 'HAVING': <TokenType.HAVING: 'HAVING'>, 'ILIKE': <TokenType.ILIKE: 'ILIKE'>, 'IN': <TokenType.IN: 'IN'>, 'INDEX': <TokenType.INDEX: 'INDEX'>, 'INET': <TokenType.INET: 'INET'>, 'INNER': <TokenType.INNER: 'INNER'>, 'INSERT': <TokenType.INSERT: 'INSERT'>, 'INTERVAL': <TokenType.INTERVAL: 'INTERVAL'>, 'INTERSECT': <TokenType.INTERSECT: 'INTERSECT'>, 'INTO': <TokenType.INTO: 'INTO'>, 'IS': <TokenType.IS: 'IS'>, 'ISNULL': <TokenType.ISNULL: 'ISNULL'>, 'JOIN': <TokenType.JOIN: 'JOIN'>, 'KEEP': <TokenType.KEEP: 'KEEP'>, 'KILL': <TokenType.KILL: 'KILL'>, 'LATERAL': <TokenType.LATERAL: 'LATERAL'>, 'LEFT': <TokenType.LEFT: 'LEFT'>, 'LIKE': <TokenType.LIKE: 'LIKE'>, 'LIMIT': <TokenType.LIMIT: 'LIMIT'>, 'LOAD': <TokenType.LOAD: 'LOAD'>, 'LOCK': <TokenType.LOCK: 'LOCK'>, 'MERGE': <TokenType.MERGE: 'MERGE'>, 'NATURAL': <TokenType.NATURAL: 'NATURAL'>, 'NEXT': <TokenType.NEXT: 'NEXT'>, 'NOT': <TokenType.NOT: 'NOT'>, 'NOTNULL': <TokenType.NOTNULL: 'NOTNULL'>, 'NULL': <TokenType.NULL: 'NULL'>, 'OBJECT': <TokenType.OBJECT: 'OBJECT'>, 'OFFSET': <TokenType.OFFSET: 'OFFSET'>, 'ON': <TokenType.ON: 'ON'>, 'OR': <TokenType.OR: 'OR'>, 'XOR': <TokenType.XOR: 'XOR'>, 'ORDER BY': <TokenType.ORDER_BY: 'ORDER_BY'>, 'ORDINALITY': <TokenType.ORDINALITY: 'ORDINALITY'>, 'OUTER': <TokenType.OUTER: 'OUTER'>, 'OVER': <TokenType.OVER: 'OVER'>, 'OVERLAPS': <TokenType.OVERLAPS: 'OVERLAPS'>, 'OVERWRITE': <TokenType.OVERWRITE: 'OVERWRITE'>, 'PARTITION': <TokenType.PARTITION: 'PARTITION'>, 'PARTITION BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PARTITIONED BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PARTITIONED_BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PERCENT': <TokenType.PERCENT: 'PERCENT'>, 'PIVOT': <TokenType.PIVOT: 'PIVOT'>, 'PRAGMA': <TokenType.PRAGMA: 'PRAGMA'>, 'PRIMARY KEY': <TokenType.PRIMARY_KEY: 'PRIMARY_KEY'>, 'PROCEDURE': <TokenType.PROCEDURE: 'PROCEDURE'>, 'QUALIFY': <TokenType.QUALIFY: 'QUALIFY'>, 'RANGE': <TokenType.RANGE: 'RANGE'>, 'RECURSIVE': <TokenType.RECURSIVE: 'RECURSIVE'>, 'REGEXP': <TokenType.RLIKE: 'RLIKE'>, 'REPLACE': <TokenType.REPLACE: 'REPLACE'>, 'RETURNING': <TokenType.RETURNING: 'RETURNING'>, 'REFERENCES': <TokenType.REFERENCES: 'REFERENCES'>, 'RIGHT': <TokenType.RIGHT: 'RIGHT'>, 'RLIKE': <TokenType.RLIKE: 'RLIKE'>, 'ROLLBACK': <TokenType.ROLLBACK: 'ROLLBACK'>, 'ROLLUP': <TokenType.ROLLUP: 'ROLLUP'>, 'ROW': <TokenType.ROW: 'ROW'>, 'ROWS': <TokenType.ROWS: 'ROWS'>, 'SCHEMA': <TokenType.SCHEMA: 'SCHEMA'>, 'SELECT': <TokenType.SELECT: 'SELECT'>, 'SEMI': <TokenType.SEMI: 'SEMI'>, 'SET': <TokenType.SET: 'SET'>, 'SETTINGS': <TokenType.SETTINGS: 'SETTINGS'>, 'SHOW': <TokenType.SHOW: 'SHOW'>, 'SIMILAR TO': <TokenType.SIMILAR_TO: 'SIMILAR_TO'>, 'SOME': <TokenType.SOME: 'SOME'>, 'SORT BY': <TokenType.SORT_BY: 'SORT_BY'>, 'START WITH': <TokenType.START_WITH: 'START_WITH'>, 'TABLE': <TokenType.TABLE: 'TABLE'>, 'TABLESAMPLE': <TokenType.TABLE_SAMPLE: 'TABLE_SAMPLE'>, 'TEMP': <TokenType.TEMPORARY: 'TEMPORARY'>, 'TEMPORARY': <TokenType.TEMPORARY: 'TEMPORARY'>, 'THEN': <TokenType.THEN: 'THEN'>, 'TRUE': <TokenType.TRUE: 'TRUE'>, 'UNION': <TokenType.UNION: 'UNION'>, 'UNKNOWN': <TokenType.UNKNOWN: 'UNKNOWN'>, 'UNNEST': <TokenType.UNNEST: 'UNNEST'>, 'UNPIVOT': <TokenType.UNPIVOT: 'UNPIVOT'>, 'UPDATE': <TokenType.UPDATE: 'UPDATE'>, 'USE': <TokenType.USE: 'USE'>, 'USING': <TokenType.USING: 'USING'>, 'UUID': <TokenType.UUID: 'UUID'>, 'VALUES': <TokenType.VALUES: 'VALUES'>, 'VIEW': <TokenType.VIEW: 'VIEW'>, 'VOLATILE': <TokenType.VOLATILE: 'VOLATILE'>, 'WHEN': <TokenType.WHEN: 'WHEN'>, 'WHERE': <TokenType.WHERE: 'WHERE'>, 'WINDOW': <TokenType.WINDOW: 'WINDOW'>, 'WITH': <TokenType.WITH: 'WITH'>, 'APPLY': <TokenType.APPLY: 'APPLY'>, 'ARRAY': <TokenType.ARRAY: 'ARRAY'>, 'BIT': <TokenType.BIT: 'BIT'>, 'BOOL': <TokenType.BOOLEAN: 'BOOLEAN'>, 'BOOLEAN': <TokenType.BOOLEAN: 'BOOLEAN'>, 'BYTE': <TokenType.TINYINT: 'TINYINT'>, 'MEDIUMINT': <TokenType.MEDIUMINT: 'MEDIUMINT'>, 'INT1': <TokenType.TINYINT: 'TINYINT'>, 'TINYINT': <TokenType.TINYINT: 'TINYINT'>, 'INT16': <TokenType.SMALLINT: 'SMALLINT'>, 'SHORT': <TokenType.SMALLINT: 'SMALLINT'>, 'SMALLINT': <TokenType.SMALLINT: 'SMALLINT'>, 'INT128': <TokenType.INT128: 'INT128'>, 'HUGEINT': <TokenType.INT128: 'INT128'>, 'INT2': <TokenType.SMALLINT: 'SMALLINT'>, 'INTEGER': <TokenType.INT: 'INT'>, 'INT': <TokenType.INT: 'INT'>, 'INT4': <TokenType.INT: 'INT'>, 'INT32': <TokenType.INT: 'INT'>, 'INT64': <TokenType.BIGINT: 'BIGINT'>, 'LONG': <TokenType.BIGINT: 'BIGINT'>, 'BIGINT': <TokenType.BIGINT: 'BIGINT'>, 'INT8': <TokenType.TINYINT: 'TINYINT'>, 'DEC': <TokenType.DECIMAL: 'DECIMAL'>, 'DECIMAL': <TokenType.DECIMAL: 'DECIMAL'>, 'BIGDECIMAL': <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, 'BIGNUMERIC': <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, 'MAP': <TokenType.MAP: 'MAP'>, 'NULLABLE': <TokenType.NULLABLE: 'NULLABLE'>, 'NUMBER': <TokenType.DECIMAL: 'DECIMAL'>, 'NUMERIC': <TokenType.DECIMAL: 'DECIMAL'>, 'FIXED': <TokenType.DECIMAL: 'DECIMAL'>, 'REAL': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT4': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT8': <TokenType.DOUBLE: 'DOUBLE'>, 'DOUBLE': <TokenType.DOUBLE: 'DOUBLE'>, 'DOUBLE PRECISION': <TokenType.DOUBLE: 'DOUBLE'>, 'JSON': <TokenType.JSON: 'JSON'>, 'CHAR': <TokenType.CHAR: 'CHAR'>, 'CHARACTER': <TokenType.CHAR: 'CHAR'>, 'NCHAR': <TokenType.NCHAR: 'NCHAR'>, 'VARCHAR': <TokenType.VARCHAR: 'VARCHAR'>, 'VARCHAR2': <TokenType.VARCHAR: 'VARCHAR'>, 'NVARCHAR': <TokenType.NVARCHAR: 'NVARCHAR'>, 'NVARCHAR2': <TokenType.NVARCHAR: 'NVARCHAR'>, 'BPCHAR': <TokenType.BPCHAR: 'BPCHAR'>, 'STR': <TokenType.TEXT: 'TEXT'>, 'STRING': <TokenType.TEXT: 'TEXT'>, 'TEXT': <TokenType.TEXT: 'TEXT'>, 'LONGTEXT': <TokenType.LONGTEXT: 'LONGTEXT'>, 'MEDIUMTEXT': <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, 'TINYTEXT': <TokenType.TINYTEXT: 'TINYTEXT'>, 'CLOB': <TokenType.TEXT: 'TEXT'>, 'LONGVARCHAR': <TokenType.TEXT: 'TEXT'>, 'BINARY': <TokenType.BINARY: 'BINARY'>, 'BLOB': <TokenType.VARBINARY: 'VARBINARY'>, 'LONGBLOB': <TokenType.LONGBLOB: 'LONGBLOB'>, 'MEDIUMBLOB': <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, 'TINYBLOB': <TokenType.TINYBLOB: 'TINYBLOB'>, 'BYTEA': <TokenType.VARBINARY: 'VARBINARY'>, 'VARBINARY': <TokenType.VARBINARY: 'VARBINARY'>, 'TIME': <TokenType.TIME: 'TIME'>, 'TIMETZ': <TokenType.TIMETZ: 'TIMETZ'>, 'TIMESTAMP': <TokenType.TIMESTAMP: 'TIMESTAMP'>, 'TIMESTAMPTZ': <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, 'TIMESTAMPLTZ': <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, 'DATE': <TokenType.DATE: 'DATE'>, 'DATETIME': <TokenType.DATETIME: 'DATETIME'>, 'INT4RANGE': <TokenType.INT4RANGE: 'INT4RANGE'>, 'INT4MULTIRANGE': <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, 'INT8RANGE': <TokenType.INT8RANGE: 'INT8RANGE'>, 'INT8MULTIRANGE': <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, 'NUMRANGE': <TokenType.NUMRANGE: 'NUMRANGE'>, 'NUMMULTIRANGE': <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, 'TSRANGE': <TokenType.TSRANGE: 'TSRANGE'>, 'TSMULTIRANGE': <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, 'TSTZRANGE': <TokenType.TSTZRANGE: 'TSTZRANGE'>, 'TSTZMULTIRANGE': <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, 'DATERANGE': <TokenType.DATERANGE: 'DATERANGE'>, 'DATEMULTIRANGE': <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, 'UNIQUE': <TokenType.UNIQUE: 'UNIQUE'>, 'STRUCT': <TokenType.STRUCT: 'STRUCT'>, 'VARIANT': <TokenType.VARIANT: 'VARIANT'>, 'ALTER': <TokenType.ALTER: 'ALTER'>, 'ANALYZE': <TokenType.COMMAND: 'COMMAND'>, 'CALL': <TokenType.COMMAND: 'COMMAND'>, 'COMMENT': <TokenType.COMMENT: 'COMMENT'>, 'COPY': <TokenType.COMMAND: 'COMMAND'>, 'EXPLAIN': <TokenType.COMMAND: 'COMMAND'>, 'GRANT': <TokenType.COMMAND: 'COMMAND'>, 'OPTIMIZE': <TokenType.COMMAND: 'COMMAND'>, 'PREPARE': <TokenType.COMMAND: 'COMMAND'>, 'TRUNCATE': <TokenType.COMMAND: 'COMMAND'>, 'VACUUM': <TokenType.COMMAND: 'COMMAND'>, 'USER-DEFINED': <TokenType.USERDEFINED: 'USERDEFINED'>, 'FOR VERSION': <TokenType.VERSION_SNAPSHOT: 'VERSION_SNAPSHOT'>, 'FOR TIMESTAMP': <TokenType.TIMESTAMP_SNAPSHOT: 'TIMESTAMP_SNAPSHOT'>, '~~': <TokenType.LIKE: 'LIKE'>, '~~*': <TokenType.ILIKE: 'ILIKE'>, '~*': <TokenType.IRLIKE: 'IRLIKE'>, '~': <TokenType.RLIKE: 'RLIKE'>, '@@': <TokenType.DAT: 'DAT'>, '@>': <TokenType.AT_GT: 'AT_GT'>, '<@': <TokenType.LT_AT: 'LT_AT'>, '|/': <TokenType.PIPE_SLASH: 'PIPE_SLASH'>, '||/': <TokenType.DPIPE_SLASH: 'DPIPE_SLASH'>, 'BEGIN TRANSACTION': <TokenType.BEGIN: 'BEGIN'>, 'BIGSERIAL': <TokenType.BIGSERIAL: 'BIGSERIAL'>, 'CHARACTER VARYING': <TokenType.VARCHAR: 'VARCHAR'>, 'CONSTRAINT TRIGGER': <TokenType.COMMAND: 'COMMAND'>, 'DECLARE': <TokenType.COMMAND: 'COMMAND'>, 'DO': <TokenType.COMMAND: 'COMMAND'>, 'EXEC': <TokenType.COMMAND: 'COMMAND'>, 'HSTORE': <TokenType.HSTORE: 'HSTORE'>, 'JSONB': <TokenType.JSONB: 'JSONB'>, 'MONEY': <TokenType.MONEY: 'MONEY'>, 'REFRESH': <TokenType.COMMAND: 'COMMAND'>, 'REINDEX': <TokenType.COMMAND: 'COMMAND'>, 'RESET': <TokenType.COMMAND: 'COMMAND'>, 'REVOKE': <TokenType.COMMAND: 'COMMAND'>, 'SERIAL': <TokenType.SERIAL: 'SERIAL'>, 'SMALLSERIAL': <TokenType.SMALLSERIAL: 'SMALLSERIAL'>, 'CSTRING': <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>, 'OID': <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, 'OPERATOR': <TokenType.OPERATOR: 'OPERATOR'>, 'REGCLASS': <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, 'REGCOLLATION': <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, 'REGCONFIG': <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, 'REGDICTIONARY': <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, 'REGNAMESPACE': <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, 'REGOPER': <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, 'REGOPERATOR': <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, 'REGPROC': <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, 'REGPROCEDURE': <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, 'REGROLE': <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, 'REGTYPE': <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, 'HLLSKETCH': <TokenType.HLLSKETCH: 'HLLSKETCH'>, 'SUPER': <TokenType.SUPER: 'SUPER'>, 'TOP': <TokenType.TOP: 'TOP'>, 'UNLOAD': <TokenType.COMMAND: 'COMMAND'>, 'VARBYTE': <TokenType.VARBINARY: 'VARBINARY'>} + {'{%': <TokenType.BLOCK_START: 'BLOCK_START'>, '{%+': <TokenType.BLOCK_START: 'BLOCK_START'>, '{%-': <TokenType.BLOCK_START: 'BLOCK_START'>, '%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '+%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '-%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '{{+': <TokenType.BLOCK_START: 'BLOCK_START'>, '{{-': <TokenType.BLOCK_START: 'BLOCK_START'>, '+}}': <TokenType.BLOCK_END: 'BLOCK_END'>, '-}}': <TokenType.BLOCK_END: 'BLOCK_END'>, '/*+': <TokenType.HINT: 'HINT'>, '==': <TokenType.EQ: 'EQ'>, '::': <TokenType.DCOLON: 'DCOLON'>, '||': <TokenType.DPIPE: 'DPIPE'>, '>=': <TokenType.GTE: 'GTE'>, '<=': <TokenType.LTE: 'LTE'>, '<>': <TokenType.NEQ: 'NEQ'>, '!=': <TokenType.NEQ: 'NEQ'>, ':=': <TokenType.COLON_EQ: 'COLON_EQ'>, '<=>': <TokenType.NULLSAFE_EQ: 'NULLSAFE_EQ'>, '->': <TokenType.ARROW: 'ARROW'>, '->>': <TokenType.DARROW: 'DARROW'>, '=>': <TokenType.FARROW: 'FARROW'>, '#>': <TokenType.HASH_ARROW: 'HASH_ARROW'>, '#>>': <TokenType.DHASH_ARROW: 'DHASH_ARROW'>, '<->': <TokenType.LR_ARROW: 'LR_ARROW'>, '&&': <TokenType.DAMP: 'DAMP'>, '??': <TokenType.DQMARK: 'DQMARK'>, 'ALL': <TokenType.ALL: 'ALL'>, 'ALWAYS': <TokenType.ALWAYS: 'ALWAYS'>, 'AND': <TokenType.AND: 'AND'>, 'ANTI': <TokenType.ANTI: 'ANTI'>, 'ANY': <TokenType.ANY: 'ANY'>, 'ASC': <TokenType.ASC: 'ASC'>, 'AS': <TokenType.ALIAS: 'ALIAS'>, 'ASOF': <TokenType.ASOF: 'ASOF'>, 'AUTOINCREMENT': <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, 'AUTO_INCREMENT': <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, 'BEGIN': <TokenType.COMMAND: 'COMMAND'>, 'BETWEEN': <TokenType.BETWEEN: 'BETWEEN'>, 'CACHE': <TokenType.CACHE: 'CACHE'>, 'UNCACHE': <TokenType.UNCACHE: 'UNCACHE'>, 'CASE': <TokenType.CASE: 'CASE'>, 'CHARACTER SET': <TokenType.CHARACTER_SET: 'CHARACTER_SET'>, 'CLUSTER BY': <TokenType.CLUSTER_BY: 'CLUSTER_BY'>, 'COLLATE': <TokenType.COLLATE: 'COLLATE'>, 'COLUMN': <TokenType.COLUMN: 'COLUMN'>, 'COMMIT': <TokenType.COMMIT: 'COMMIT'>, 'CONNECT BY': <TokenType.CONNECT_BY: 'CONNECT_BY'>, 'CONSTRAINT': <TokenType.CONSTRAINT: 'CONSTRAINT'>, 'CREATE': <TokenType.CREATE: 'CREATE'>, 'CROSS': <TokenType.CROSS: 'CROSS'>, 'CUBE': <TokenType.CUBE: 'CUBE'>, 'CURRENT_DATE': <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, 'CURRENT_TIME': <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, 'CURRENT_TIMESTAMP': <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, 'CURRENT_USER': <TokenType.CURRENT_USER: 'CURRENT_USER'>, 'DATABASE': <TokenType.DATABASE: 'DATABASE'>, 'DEFAULT': <TokenType.DEFAULT: 'DEFAULT'>, 'DELETE': <TokenType.DELETE: 'DELETE'>, 'DESC': <TokenType.DESC: 'DESC'>, 'DESCRIBE': <TokenType.DESCRIBE: 'DESCRIBE'>, 'DISTINCT': <TokenType.DISTINCT: 'DISTINCT'>, 'DISTRIBUTE BY': <TokenType.DISTRIBUTE_BY: 'DISTRIBUTE_BY'>, 'DIV': <TokenType.DIV: 'DIV'>, 'DROP': <TokenType.DROP: 'DROP'>, 'ELSE': <TokenType.ELSE: 'ELSE'>, 'END': <TokenType.END: 'END'>, 'ESCAPE': <TokenType.ESCAPE: 'ESCAPE'>, 'EXCEPT': <TokenType.EXCEPT: 'EXCEPT'>, 'EXECUTE': <TokenType.EXECUTE: 'EXECUTE'>, 'EXISTS': <TokenType.EXISTS: 'EXISTS'>, 'FALSE': <TokenType.FALSE: 'FALSE'>, 'FETCH': <TokenType.FETCH: 'FETCH'>, 'FILTER': <TokenType.FILTER: 'FILTER'>, 'FIRST': <TokenType.FIRST: 'FIRST'>, 'FULL': <TokenType.FULL: 'FULL'>, 'FUNCTION': <TokenType.FUNCTION: 'FUNCTION'>, 'FOR': <TokenType.FOR: 'FOR'>, 'FOREIGN KEY': <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, 'FORMAT': <TokenType.FORMAT: 'FORMAT'>, 'FROM': <TokenType.FROM: 'FROM'>, 'GEOGRAPHY': <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, 'GEOMETRY': <TokenType.GEOMETRY: 'GEOMETRY'>, 'GLOB': <TokenType.GLOB: 'GLOB'>, 'GROUP BY': <TokenType.GROUP_BY: 'GROUP_BY'>, 'GROUPING SETS': <TokenType.GROUPING_SETS: 'GROUPING_SETS'>, 'HAVING': <TokenType.HAVING: 'HAVING'>, 'ILIKE': <TokenType.ILIKE: 'ILIKE'>, 'IN': <TokenType.IN: 'IN'>, 'INDEX': <TokenType.INDEX: 'INDEX'>, 'INET': <TokenType.INET: 'INET'>, 'INNER': <TokenType.INNER: 'INNER'>, 'INSERT': <TokenType.INSERT: 'INSERT'>, 'INTERVAL': <TokenType.INTERVAL: 'INTERVAL'>, 'INTERSECT': <TokenType.INTERSECT: 'INTERSECT'>, 'INTO': <TokenType.INTO: 'INTO'>, 'IS': <TokenType.IS: 'IS'>, 'ISNULL': <TokenType.ISNULL: 'ISNULL'>, 'JOIN': <TokenType.JOIN: 'JOIN'>, 'KEEP': <TokenType.KEEP: 'KEEP'>, 'KILL': <TokenType.KILL: 'KILL'>, 'LATERAL': <TokenType.LATERAL: 'LATERAL'>, 'LEFT': <TokenType.LEFT: 'LEFT'>, 'LIKE': <TokenType.LIKE: 'LIKE'>, 'LIMIT': <TokenType.LIMIT: 'LIMIT'>, 'LOAD': <TokenType.LOAD: 'LOAD'>, 'LOCK': <TokenType.LOCK: 'LOCK'>, 'MERGE': <TokenType.MERGE: 'MERGE'>, 'NATURAL': <TokenType.NATURAL: 'NATURAL'>, 'NEXT': <TokenType.NEXT: 'NEXT'>, 'NOT': <TokenType.NOT: 'NOT'>, 'NOTNULL': <TokenType.NOTNULL: 'NOTNULL'>, 'NULL': <TokenType.NULL: 'NULL'>, 'OBJECT': <TokenType.OBJECT: 'OBJECT'>, 'OFFSET': <TokenType.OFFSET: 'OFFSET'>, 'ON': <TokenType.ON: 'ON'>, 'OR': <TokenType.OR: 'OR'>, 'XOR': <TokenType.XOR: 'XOR'>, 'ORDER BY': <TokenType.ORDER_BY: 'ORDER_BY'>, 'ORDINALITY': <TokenType.ORDINALITY: 'ORDINALITY'>, 'OUTER': <TokenType.OUTER: 'OUTER'>, 'OVER': <TokenType.OVER: 'OVER'>, 'OVERLAPS': <TokenType.OVERLAPS: 'OVERLAPS'>, 'OVERWRITE': <TokenType.OVERWRITE: 'OVERWRITE'>, 'PARTITION': <TokenType.PARTITION: 'PARTITION'>, 'PARTITION BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PARTITIONED BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PARTITIONED_BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PERCENT': <TokenType.PERCENT: 'PERCENT'>, 'PIVOT': <TokenType.PIVOT: 'PIVOT'>, 'PRAGMA': <TokenType.PRAGMA: 'PRAGMA'>, 'PRIMARY KEY': <TokenType.PRIMARY_KEY: 'PRIMARY_KEY'>, 'PROCEDURE': <TokenType.PROCEDURE: 'PROCEDURE'>, 'QUALIFY': <TokenType.QUALIFY: 'QUALIFY'>, 'RANGE': <TokenType.RANGE: 'RANGE'>, 'RECURSIVE': <TokenType.RECURSIVE: 'RECURSIVE'>, 'REGEXP': <TokenType.RLIKE: 'RLIKE'>, 'REPLACE': <TokenType.REPLACE: 'REPLACE'>, 'RETURNING': <TokenType.RETURNING: 'RETURNING'>, 'REFERENCES': <TokenType.REFERENCES: 'REFERENCES'>, 'RIGHT': <TokenType.RIGHT: 'RIGHT'>, 'RLIKE': <TokenType.RLIKE: 'RLIKE'>, 'ROLLBACK': <TokenType.ROLLBACK: 'ROLLBACK'>, 'ROLLUP': <TokenType.ROLLUP: 'ROLLUP'>, 'ROW': <TokenType.ROW: 'ROW'>, 'ROWS': <TokenType.ROWS: 'ROWS'>, 'SCHEMA': <TokenType.SCHEMA: 'SCHEMA'>, 'SELECT': <TokenType.SELECT: 'SELECT'>, 'SEMI': <TokenType.SEMI: 'SEMI'>, 'SET': <TokenType.SET: 'SET'>, 'SETTINGS': <TokenType.SETTINGS: 'SETTINGS'>, 'SHOW': <TokenType.SHOW: 'SHOW'>, 'SIMILAR TO': <TokenType.SIMILAR_TO: 'SIMILAR_TO'>, 'SOME': <TokenType.SOME: 'SOME'>, 'SORT BY': <TokenType.SORT_BY: 'SORT_BY'>, 'START WITH': <TokenType.START_WITH: 'START_WITH'>, 'TABLE': <TokenType.TABLE: 'TABLE'>, 'TABLESAMPLE': <TokenType.TABLE_SAMPLE: 'TABLE_SAMPLE'>, 'TEMP': <TokenType.TEMPORARY: 'TEMPORARY'>, 'TEMPORARY': <TokenType.TEMPORARY: 'TEMPORARY'>, 'THEN': <TokenType.THEN: 'THEN'>, 'TRUE': <TokenType.TRUE: 'TRUE'>, 'UNION': <TokenType.UNION: 'UNION'>, 'UNKNOWN': <TokenType.UNKNOWN: 'UNKNOWN'>, 'UNNEST': <TokenType.UNNEST: 'UNNEST'>, 'UNPIVOT': <TokenType.UNPIVOT: 'UNPIVOT'>, 'UPDATE': <TokenType.UPDATE: 'UPDATE'>, 'USE': <TokenType.USE: 'USE'>, 'USING': <TokenType.USING: 'USING'>, 'UUID': <TokenType.UUID: 'UUID'>, 'VIEW': <TokenType.VIEW: 'VIEW'>, 'VOLATILE': <TokenType.VOLATILE: 'VOLATILE'>, 'WHEN': <TokenType.WHEN: 'WHEN'>, 'WHERE': <TokenType.WHERE: 'WHERE'>, 'WINDOW': <TokenType.WINDOW: 'WINDOW'>, 'WITH': <TokenType.WITH: 'WITH'>, 'APPLY': <TokenType.APPLY: 'APPLY'>, 'ARRAY': <TokenType.ARRAY: 'ARRAY'>, 'BIT': <TokenType.BIT: 'BIT'>, 'BOOL': <TokenType.BOOLEAN: 'BOOLEAN'>, 'BOOLEAN': <TokenType.BOOLEAN: 'BOOLEAN'>, 'BYTE': <TokenType.TINYINT: 'TINYINT'>, 'MEDIUMINT': <TokenType.MEDIUMINT: 'MEDIUMINT'>, 'INT1': <TokenType.TINYINT: 'TINYINT'>, 'TINYINT': <TokenType.TINYINT: 'TINYINT'>, 'INT16': <TokenType.SMALLINT: 'SMALLINT'>, 'SHORT': <TokenType.SMALLINT: 'SMALLINT'>, 'SMALLINT': <TokenType.SMALLINT: 'SMALLINT'>, 'INT128': <TokenType.INT128: 'INT128'>, 'HUGEINT': <TokenType.INT128: 'INT128'>, 'INT2': <TokenType.SMALLINT: 'SMALLINT'>, 'INTEGER': <TokenType.INT: 'INT'>, 'INT': <TokenType.INT: 'INT'>, 'INT4': <TokenType.INT: 'INT'>, 'INT32': <TokenType.INT: 'INT'>, 'INT64': <TokenType.BIGINT: 'BIGINT'>, 'LONG': <TokenType.BIGINT: 'BIGINT'>, 'BIGINT': <TokenType.BIGINT: 'BIGINT'>, 'INT8': <TokenType.TINYINT: 'TINYINT'>, 'DEC': <TokenType.DECIMAL: 'DECIMAL'>, 'DECIMAL': <TokenType.DECIMAL: 'DECIMAL'>, 'BIGDECIMAL': <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, 'BIGNUMERIC': <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, 'MAP': <TokenType.MAP: 'MAP'>, 'NULLABLE': <TokenType.NULLABLE: 'NULLABLE'>, 'NUMBER': <TokenType.DECIMAL: 'DECIMAL'>, 'NUMERIC': <TokenType.DECIMAL: 'DECIMAL'>, 'FIXED': <TokenType.DECIMAL: 'DECIMAL'>, 'REAL': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT4': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT8': <TokenType.DOUBLE: 'DOUBLE'>, 'DOUBLE': <TokenType.DOUBLE: 'DOUBLE'>, 'DOUBLE PRECISION': <TokenType.DOUBLE: 'DOUBLE'>, 'JSON': <TokenType.JSON: 'JSON'>, 'CHAR': <TokenType.CHAR: 'CHAR'>, 'CHARACTER': <TokenType.CHAR: 'CHAR'>, 'NCHAR': <TokenType.NCHAR: 'NCHAR'>, 'VARCHAR': <TokenType.VARCHAR: 'VARCHAR'>, 'VARCHAR2': <TokenType.VARCHAR: 'VARCHAR'>, 'NVARCHAR': <TokenType.NVARCHAR: 'NVARCHAR'>, 'NVARCHAR2': <TokenType.NVARCHAR: 'NVARCHAR'>, 'BPCHAR': <TokenType.BPCHAR: 'BPCHAR'>, 'STR': <TokenType.TEXT: 'TEXT'>, 'STRING': <TokenType.TEXT: 'TEXT'>, 'TEXT': <TokenType.TEXT: 'TEXT'>, 'LONGTEXT': <TokenType.LONGTEXT: 'LONGTEXT'>, 'MEDIUMTEXT': <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, 'TINYTEXT': <TokenType.TINYTEXT: 'TINYTEXT'>, 'CLOB': <TokenType.TEXT: 'TEXT'>, 'LONGVARCHAR': <TokenType.TEXT: 'TEXT'>, 'BINARY': <TokenType.BINARY: 'BINARY'>, 'BLOB': <TokenType.VARBINARY: 'VARBINARY'>, 'LONGBLOB': <TokenType.LONGBLOB: 'LONGBLOB'>, 'MEDIUMBLOB': <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, 'TINYBLOB': <TokenType.TINYBLOB: 'TINYBLOB'>, 'BYTEA': <TokenType.VARBINARY: 'VARBINARY'>, 'VARBINARY': <TokenType.VARBINARY: 'VARBINARY'>, 'TIME': <TokenType.TIME: 'TIME'>, 'TIMETZ': <TokenType.TIMETZ: 'TIMETZ'>, 'TIMESTAMP': <TokenType.TIMESTAMP: 'TIMESTAMP'>, 'TIMESTAMPTZ': <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, 'TIMESTAMPLTZ': <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, 'DATE': <TokenType.DATE: 'DATE'>, 'DATETIME': <TokenType.DATETIME: 'DATETIME'>, 'INT4RANGE': <TokenType.INT4RANGE: 'INT4RANGE'>, 'INT4MULTIRANGE': <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, 'INT8RANGE': <TokenType.INT8RANGE: 'INT8RANGE'>, 'INT8MULTIRANGE': <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, 'NUMRANGE': <TokenType.NUMRANGE: 'NUMRANGE'>, 'NUMMULTIRANGE': <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, 'TSRANGE': <TokenType.TSRANGE: 'TSRANGE'>, 'TSMULTIRANGE': <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, 'TSTZRANGE': <TokenType.TSTZRANGE: 'TSTZRANGE'>, 'TSTZMULTIRANGE': <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, 'DATERANGE': <TokenType.DATERANGE: 'DATERANGE'>, 'DATEMULTIRANGE': <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, 'UNIQUE': <TokenType.UNIQUE: 'UNIQUE'>, 'STRUCT': <TokenType.STRUCT: 'STRUCT'>, 'VARIANT': <TokenType.VARIANT: 'VARIANT'>, 'ALTER': <TokenType.ALTER: 'ALTER'>, 'ANALYZE': <TokenType.COMMAND: 'COMMAND'>, 'CALL': <TokenType.COMMAND: 'COMMAND'>, 'COMMENT': <TokenType.COMMENT: 'COMMENT'>, 'COPY': <TokenType.COMMAND: 'COMMAND'>, 'EXPLAIN': <TokenType.COMMAND: 'COMMAND'>, 'GRANT': <TokenType.COMMAND: 'COMMAND'>, 'OPTIMIZE': <TokenType.COMMAND: 'COMMAND'>, 'PREPARE': <TokenType.COMMAND: 'COMMAND'>, 'TRUNCATE': <TokenType.COMMAND: 'COMMAND'>, 'VACUUM': <TokenType.COMMAND: 'COMMAND'>, 'USER-DEFINED': <TokenType.USERDEFINED: 'USERDEFINED'>, 'FOR VERSION': <TokenType.VERSION_SNAPSHOT: 'VERSION_SNAPSHOT'>, 'FOR TIMESTAMP': <TokenType.TIMESTAMP_SNAPSHOT: 'TIMESTAMP_SNAPSHOT'>, '~~': <TokenType.LIKE: 'LIKE'>, '~~*': <TokenType.ILIKE: 'ILIKE'>, '~*': <TokenType.IRLIKE: 'IRLIKE'>, '~': <TokenType.RLIKE: 'RLIKE'>, '@@': <TokenType.DAT: 'DAT'>, '@>': <TokenType.AT_GT: 'AT_GT'>, '<@': <TokenType.LT_AT: 'LT_AT'>, '|/': <TokenType.PIPE_SLASH: 'PIPE_SLASH'>, '||/': <TokenType.DPIPE_SLASH: 'DPIPE_SLASH'>, 'BEGIN TRANSACTION': <TokenType.BEGIN: 'BEGIN'>, 'BIGSERIAL': <TokenType.BIGSERIAL: 'BIGSERIAL'>, 'CHARACTER VARYING': <TokenType.VARCHAR: 'VARCHAR'>, 'CONSTRAINT TRIGGER': <TokenType.COMMAND: 'COMMAND'>, 'DECLARE': <TokenType.COMMAND: 'COMMAND'>, 'DO': <TokenType.COMMAND: 'COMMAND'>, 'EXEC': <TokenType.COMMAND: 'COMMAND'>, 'HSTORE': <TokenType.HSTORE: 'HSTORE'>, 'JSONB': <TokenType.JSONB: 'JSONB'>, 'MONEY': <TokenType.MONEY: 'MONEY'>, 'REFRESH': <TokenType.COMMAND: 'COMMAND'>, 'REINDEX': <TokenType.COMMAND: 'COMMAND'>, 'RESET': <TokenType.COMMAND: 'COMMAND'>, 'REVOKE': <TokenType.COMMAND: 'COMMAND'>, 'SERIAL': <TokenType.SERIAL: 'SERIAL'>, 'SMALLSERIAL': <TokenType.SMALLSERIAL: 'SMALLSERIAL'>, 'CSTRING': <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>, 'OID': <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, 'OPERATOR': <TokenType.OPERATOR: 'OPERATOR'>, 'REGCLASS': <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, 'REGCOLLATION': <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, 'REGCONFIG': <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, 'REGDICTIONARY': <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, 'REGNAMESPACE': <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, 'REGOPER': <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, 'REGOPERATOR': <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, 'REGPROC': <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, 'REGPROCEDURE': <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, 'REGROLE': <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, 'REGTYPE': <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, 'HLLSKETCH': <TokenType.HLLSKETCH: 'HLLSKETCH'>, 'SUPER': <TokenType.SUPER: 'SUPER'>, 'TOP': <TokenType.TOP: 'TOP'>, 'UNLOAD': <TokenType.COMMAND: 'COMMAND'>, 'VARBYTE': <TokenType.VARBINARY: 'VARBINARY'>}
    @@ -1495,91 +1499,91 @@ Default: 3
    -
    167    class Generator(Postgres.Generator):
    -168        LOCKING_READS_SUPPORTED = False
    -169        QUERY_HINTS = False
    -170        VALUES_AS_TABLE = False
    -171        TZ_TO_WITH_TIME_ZONE = True
    -172        NVL2_SUPPORTED = True
    -173        LAST_DAY_SUPPORTS_DATE_PART = False
    -174
    -175        TYPE_MAPPING = {
    -176            **Postgres.Generator.TYPE_MAPPING,
    -177            exp.DataType.Type.BINARY: "VARBYTE",
    -178            exp.DataType.Type.INT: "INTEGER",
    -179            exp.DataType.Type.TIMETZ: "TIME",
    -180            exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP",
    -181            exp.DataType.Type.VARBINARY: "VARBYTE",
    -182        }
    -183
    -184        TRANSFORMS = {
    -185            **Postgres.Generator.TRANSFORMS,
    -186            exp.Concat: concat_to_dpipe_sql,
    -187            exp.ConcatWs: concat_ws_to_dpipe_sql,
    -188            exp.ApproxDistinct: lambda self,
    -189            e: f"APPROXIMATE COUNT(DISTINCT {self.sql(e, 'this')})",
    -190            exp.CurrentTimestamp: lambda self, e: (
    -191                "SYSDATE" if e.args.get("transaction") else "GETDATE()"
    -192            ),
    -193            exp.DateAdd: date_delta_sql("DATEADD"),
    -194            exp.DateDiff: date_delta_sql("DATEDIFF"),
    -195            exp.DistKeyProperty: lambda self, e: f"DISTKEY({e.name})",
    -196            exp.DistStyleProperty: lambda self, e: self.naked_property(e),
    -197            exp.FromBase: rename_func("STRTOL"),
    -198            exp.GeneratedAsIdentityColumnConstraint: generatedasidentitycolumnconstraint_sql,
    -199            exp.JSONExtract: json_extract_segments("JSON_EXTRACT_PATH_TEXT"),
    -200            exp.GroupConcat: rename_func("LISTAGG"),
    -201            exp.ParseJSON: rename_func("JSON_PARSE"),
    -202            exp.Select: transforms.preprocess(
    -203                [transforms.eliminate_distinct_on, transforms.eliminate_semi_and_anti_joins]
    -204            ),
    -205            exp.SortKeyProperty: lambda self,
    -206            e: f"{'COMPOUND ' if e.args['compound'] else ''}SORTKEY({self.format_args(*e.this)})",
    -207            exp.TableSample: no_tablesample_sql,
    -208            exp.TsOrDsAdd: date_delta_sql("DATEADD"),
    -209            exp.TsOrDsDiff: date_delta_sql("DATEDIFF"),
    -210        }
    -211
    -212        # Postgres maps exp.Pivot to no_pivot_sql, but Redshift support pivots
    -213        TRANSFORMS.pop(exp.Pivot)
    -214
    -215        # Redshift uses the POW | POWER (expr1, expr2) syntax instead of expr1 ^ expr2 (postgres)
    -216        TRANSFORMS.pop(exp.Pow)
    -217
    -218        # Redshift supports ANY_VALUE(..)
    -219        TRANSFORMS.pop(exp.AnyValue)
    -220
    -221        # Redshift supports LAST_DAY(..)
    -222        TRANSFORMS.pop(exp.LastDay)
    -223
    -224        RESERVED_KEYWORDS = {*Postgres.Generator.RESERVED_KEYWORDS, "snapshot", "type"}
    -225
    -226        def with_properties(self, properties: exp.Properties) -> str:
    -227            """Redshift doesn't have `WITH` as part of their with_properties so we remove it"""
    -228            return self.properties(properties, prefix=" ", suffix="")
    -229
    -230        def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str:
    -231            if expression.is_type(exp.DataType.Type.JSON):
    -232                # Redshift doesn't support a JSON type, so casting to it is treated as a noop
    -233                return self.sql(expression, "this")
    -234
    -235            return super().cast_sql(expression, safe_prefix=safe_prefix)
    -236
    -237        def datatype_sql(self, expression: exp.DataType) -> str:
    -238            """
    -239            Redshift converts the `TEXT` data type to `VARCHAR(255)` by default when people more generally mean
    -240            VARCHAR of max length which is `VARCHAR(max)` in Redshift. Therefore if we get a `TEXT` data type
    -241            without precision we convert it to `VARCHAR(max)` and if it does have precision then we just convert
    -242            `TEXT` to `VARCHAR`.
    -243            """
    -244            if expression.is_type("text"):
    -245                expression.set("this", exp.DataType.Type.VARCHAR)
    -246                precision = expression.args.get("expressions")
    -247
    -248                if not precision:
    -249                    expression.append("expressions", exp.var("MAX"))
    -250
    -251            return super().datatype_sql(expression)
    +            
    168    class Generator(Postgres.Generator):
    +169        LOCKING_READS_SUPPORTED = False
    +170        QUERY_HINTS = False
    +171        VALUES_AS_TABLE = False
    +172        TZ_TO_WITH_TIME_ZONE = True
    +173        NVL2_SUPPORTED = True
    +174        LAST_DAY_SUPPORTS_DATE_PART = False
    +175
    +176        TYPE_MAPPING = {
    +177            **Postgres.Generator.TYPE_MAPPING,
    +178            exp.DataType.Type.BINARY: "VARBYTE",
    +179            exp.DataType.Type.INT: "INTEGER",
    +180            exp.DataType.Type.TIMETZ: "TIME",
    +181            exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP",
    +182            exp.DataType.Type.VARBINARY: "VARBYTE",
    +183        }
    +184
    +185        TRANSFORMS = {
    +186            **Postgres.Generator.TRANSFORMS,
    +187            exp.Concat: concat_to_dpipe_sql,
    +188            exp.ConcatWs: concat_ws_to_dpipe_sql,
    +189            exp.ApproxDistinct: lambda self,
    +190            e: f"APPROXIMATE COUNT(DISTINCT {self.sql(e, 'this')})",
    +191            exp.CurrentTimestamp: lambda self, e: (
    +192                "SYSDATE" if e.args.get("transaction") else "GETDATE()"
    +193            ),
    +194            exp.DateAdd: date_delta_sql("DATEADD"),
    +195            exp.DateDiff: date_delta_sql("DATEDIFF"),
    +196            exp.DistKeyProperty: lambda self, e: f"DISTKEY({e.name})",
    +197            exp.DistStyleProperty: lambda self, e: self.naked_property(e),
    +198            exp.FromBase: rename_func("STRTOL"),
    +199            exp.GeneratedAsIdentityColumnConstraint: generatedasidentitycolumnconstraint_sql,
    +200            exp.JSONExtract: json_extract_segments("JSON_EXTRACT_PATH_TEXT"),
    +201            exp.GroupConcat: rename_func("LISTAGG"),
    +202            exp.ParseJSON: rename_func("JSON_PARSE"),
    +203            exp.Select: transforms.preprocess(
    +204                [transforms.eliminate_distinct_on, transforms.eliminate_semi_and_anti_joins]
    +205            ),
    +206            exp.SortKeyProperty: lambda self,
    +207            e: f"{'COMPOUND ' if e.args['compound'] else ''}SORTKEY({self.format_args(*e.this)})",
    +208            exp.TableSample: no_tablesample_sql,
    +209            exp.TsOrDsAdd: date_delta_sql("DATEADD"),
    +210            exp.TsOrDsDiff: date_delta_sql("DATEDIFF"),
    +211        }
    +212
    +213        # Postgres maps exp.Pivot to no_pivot_sql, but Redshift support pivots
    +214        TRANSFORMS.pop(exp.Pivot)
    +215
    +216        # Redshift uses the POW | POWER (expr1, expr2) syntax instead of expr1 ^ expr2 (postgres)
    +217        TRANSFORMS.pop(exp.Pow)
    +218
    +219        # Redshift supports ANY_VALUE(..)
    +220        TRANSFORMS.pop(exp.AnyValue)
    +221
    +222        # Redshift supports LAST_DAY(..)
    +223        TRANSFORMS.pop(exp.LastDay)
    +224
    +225        RESERVED_KEYWORDS = {*Postgres.Generator.RESERVED_KEYWORDS, "snapshot", "type"}
    +226
    +227        def with_properties(self, properties: exp.Properties) -> str:
    +228            """Redshift doesn't have `WITH` as part of their with_properties so we remove it"""
    +229            return self.properties(properties, prefix=" ", suffix="")
    +230
    +231        def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str:
    +232            if expression.is_type(exp.DataType.Type.JSON):
    +233                # Redshift doesn't support a JSON type, so casting to it is treated as a noop
    +234                return self.sql(expression, "this")
    +235
    +236            return super().cast_sql(expression, safe_prefix=safe_prefix)
    +237
    +238        def datatype_sql(self, expression: exp.DataType) -> str:
    +239            """
    +240            Redshift converts the `TEXT` data type to `VARCHAR(255)` by default when people more generally mean
    +241            VARCHAR of max length which is `VARCHAR(max)` in Redshift. Therefore if we get a `TEXT` data type
    +242            without precision we convert it to `VARCHAR(max)` and if it does have precision then we just convert
    +243            `TEXT` to `VARCHAR`.
    +244            """
    +245            if expression.is_type("text"):
    +246                expression.set("this", exp.DataType.Type.VARCHAR)
    +247                precision = expression.args.get("expressions")
    +248
    +249                if not precision:
    +250                    expression.append("expressions", exp.var("MAX"))
    +251
    +252            return super().datatype_sql(expression)
     
    @@ -1723,7 +1727,7 @@ Default: True
    RESERVED_KEYWORDS = -{'type', 'snapshot'} +{'snapshot', 'type'}
    @@ -1743,9 +1747,9 @@ Default: True
    -
    226        def with_properties(self, properties: exp.Properties) -> str:
    -227            """Redshift doesn't have `WITH` as part of their with_properties so we remove it"""
    -228            return self.properties(properties, prefix=" ", suffix="")
    +            
    227        def with_properties(self, properties: exp.Properties) -> str:
    +228            """Redshift doesn't have `WITH` as part of their with_properties so we remove it"""
    +229            return self.properties(properties, prefix=" ", suffix="")
     
    @@ -1765,12 +1769,12 @@ Default: True
    -
    230        def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str:
    -231            if expression.is_type(exp.DataType.Type.JSON):
    -232                # Redshift doesn't support a JSON type, so casting to it is treated as a noop
    -233                return self.sql(expression, "this")
    -234
    -235            return super().cast_sql(expression, safe_prefix=safe_prefix)
    +            
    231        def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str:
    +232            if expression.is_type(exp.DataType.Type.JSON):
    +233                # Redshift doesn't support a JSON type, so casting to it is treated as a noop
    +234                return self.sql(expression, "this")
    +235
    +236            return super().cast_sql(expression, safe_prefix=safe_prefix)
     
    @@ -1788,21 +1792,21 @@ Default: True
    -
    237        def datatype_sql(self, expression: exp.DataType) -> str:
    -238            """
    -239            Redshift converts the `TEXT` data type to `VARCHAR(255)` by default when people more generally mean
    -240            VARCHAR of max length which is `VARCHAR(max)` in Redshift. Therefore if we get a `TEXT` data type
    -241            without precision we convert it to `VARCHAR(max)` and if it does have precision then we just convert
    -242            `TEXT` to `VARCHAR`.
    -243            """
    -244            if expression.is_type("text"):
    -245                expression.set("this", exp.DataType.Type.VARCHAR)
    -246                precision = expression.args.get("expressions")
    -247
    -248                if not precision:
    -249                    expression.append("expressions", exp.var("MAX"))
    -250
    -251            return super().datatype_sql(expression)
    +            
    238        def datatype_sql(self, expression: exp.DataType) -> str:
    +239            """
    +240            Redshift converts the `TEXT` data type to `VARCHAR(255)` by default when people more generally mean
    +241            VARCHAR of max length which is `VARCHAR(max)` in Redshift. Therefore if we get a `TEXT` data type
    +242            without precision we convert it to `VARCHAR(max)` and if it does have precision then we just convert
    +243            `TEXT` to `VARCHAR`.
    +244            """
    +245            if expression.is_type("text"):
    +246                expression.set("this", exp.DataType.Type.VARCHAR)
    +247                precision = expression.args.get("expressions")
    +248
    +249                if not precision:
    +250                    expression.append("expressions", exp.var("MAX"))
    +251
    +252            return super().datatype_sql(expression)
     
    diff --git a/docs/sqlglot/dialects/snowflake.html b/docs/sqlglot/dialects/snowflake.html index 4f45f33..bd0c719 100644 --- a/docs/sqlglot/dialects/snowflake.html +++ b/docs/sqlglot/dialects/snowflake.html @@ -2674,7 +2674,7 @@ Default: 3
    TABLE_ALIAS_TOKENS = - {<TokenType.UPDATE: 'UPDATE'>, <TokenType.DEFAULT: 'DEFAULT'>, <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>, <TokenType.UNIQUEIDENTIFIER: 'UNIQUEIDENTIFIER'>, <TokenType.PIVOT: 'PIVOT'>, <TokenType.PARTITION: 'PARTITION'>, <TokenType.TSTZRANGE: 'TSTZRANGE'>, <TokenType.VIEW: 'VIEW'>, <TokenType.COMMENT: 'COMMENT'>, <TokenType.TEXT: 'TEXT'>, <TokenType.UINT: 'UINT'>, <TokenType.DELETE: 'DELETE'>, <TokenType.TOP: 'TOP'>, <TokenType.LONGBLOB: 'LONGBLOB'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.DATETIME: 'DATETIME'>, <TokenType.BIGINT: 'BIGINT'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, <TokenType.KEEP: 'KEEP'>, <TokenType.SMALLSERIAL: 'SMALLSERIAL'>, <TokenType.NEXT: 'NEXT'>, <TokenType.BPCHAR: 'BPCHAR'>, <TokenType.TABLE: 'TABLE'>, <TokenType.DATETIME64: 'DATETIME64'>, <TokenType.VAR: 'VAR'>, <TokenType.BIGSERIAL: 'BIGSERIAL'>, <TokenType.ANY: 'ANY'>, <TokenType.UMEDIUMINT: 'UMEDIUMINT'>, <TokenType.FILTER: 'FILTER'>, <TokenType.TIMESTAMP_MS: 'TIMESTAMP_MS'>, <TokenType.SERIAL: 'SERIAL'>, <TokenType.END: 'END'>, <TokenType.ALL: 'ALL'>, <TokenType.TIMETZ: 'TIMETZ'>, <TokenType.TINYBLOB: 'TINYBLOB'>, <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, <TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>, <TokenType.SMALLINT: 'SMALLINT'>, <TokenType.TRUE: 'TRUE'>, <TokenType.FUNCTION: 'FUNCTION'>, <TokenType.IPPREFIX: 'IPPREFIX'>, <TokenType.UNKNOWN: 'UNKNOWN'>, <TokenType.INTERVAL: 'INTERVAL'>, <TokenType.NESTED: 'NESTED'>, <TokenType.NUMRANGE: 'NUMRANGE'>, <TokenType.JSON: 'JSON'>, <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, <TokenType.ANTI: 'ANTI'>, <TokenType.JSONB: 'JSONB'>, <TokenType.ROW: 'ROW'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, <TokenType.ENUM: 'ENUM'>, <TokenType.SMALLMONEY: 'SMALLMONEY'>, <TokenType.REFERENCES: 'REFERENCES'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.EXISTS: 'EXISTS'>, <TokenType.SEMI: 'SEMI'>, <TokenType.RANGE: 'RANGE'>, <TokenType.SET: 'SET'>, <TokenType.IS: 'IS'>, <TokenType.HLLSKETCH: 'HLLSKETCH'>, <TokenType.BIT: 'BIT'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.TSRANGE: 'TSRANGE'>, <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, <TokenType.INT256: 'INT256'>, <TokenType.FALSE: 'FALSE'>, <TokenType.CHAR: 'CHAR'>, <TokenType.ASC: 'ASC'>, <TokenType.VARBINARY: 'VARBINARY'>, <TokenType.DOUBLE: 'DOUBLE'>, <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, <TokenType.ORDINALITY: 'ORDINALITY'>, <TokenType.ESCAPE: 'ESCAPE'>, <TokenType.ISNULL: 'ISNULL'>, <TokenType.SHOW: 'SHOW'>, <TokenType.UNPIVOT: 'UNPIVOT'>, <TokenType.DECIMAL: 'DECIMAL'>, <TokenType.OVERWRITE: 'OVERWRITE'>, <TokenType.PERCENT: 'PERCENT'>, <TokenType.SCHEMA: 'SCHEMA'>, <TokenType.NULL: 'NULL'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, <TokenType.RECURSIVE: 'RECURSIVE'>, <TokenType.MEDIUMINT: 'MEDIUMINT'>, <TokenType.UDECIMAL: 'UDECIMAL'>, <TokenType.COLUMN: 'COLUMN'>, <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, <TokenType.DIV: 'DIV'>, <TokenType.FIXEDSTRING: 'FIXEDSTRING'>, <TokenType.USMALLINT: 'USMALLINT'>, <TokenType.BOOLEAN: 'BOOLEAN'>, <TokenType.CASE: 'CASE'>, <TokenType.ENUM16: 'ENUM16'>, <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>, <TokenType.INT8RANGE: 'INT8RANGE'>, <TokenType.REPLACE: 'REPLACE'>, <TokenType.TINYTEXT: 'TINYTEXT'>, <TokenType.FIRST: 'FIRST'>, <TokenType.YEAR: 'YEAR'>, <TokenType.UINT256: 'UINT256'>, <TokenType.MODEL: 'MODEL'>, <TokenType.UINT128: 'UINT128'>, <TokenType.OVERLAPS: 'OVERLAPS'>, <TokenType.TIMESTAMP_NS: 'TIMESTAMP_NS'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.SIMPLEAGGREGATEFUNCTION: 'SIMPLEAGGREGATEFUNCTION'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, <TokenType.PRAGMA: 'PRAGMA'>, <TokenType.PROCEDURE: 'PROCEDURE'>, <TokenType.IPADDRESS: 'IPADDRESS'>, <TokenType.NVARCHAR: 'NVARCHAR'>, <TokenType.LONGTEXT: 'LONGTEXT'>, <TokenType.BINARY: 'BINARY'>, <TokenType.DICTIONARY: 'DICTIONARY'>, <TokenType.USE: 'USE'>, <TokenType.TIME: 'TIME'>, <TokenType.COMMIT: 'COMMIT'>, <TokenType.MONEY: 'MONEY'>, <TokenType.LOAD: 'LOAD'>, <TokenType.CACHE: 'CACHE'>, <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, <TokenType.IMAGE: 'IMAGE'>, <TokenType.IPV6: 'IPV6'>, <TokenType.INT: 'INT'>, <TokenType.ROWVERSION: 'ROWVERSION'>, <TokenType.VARIANT: 'VARIANT'>, <TokenType.BEGIN: 'BEGIN'>, <TokenType.UTINYINT: 'UTINYINT'>, <TokenType.VOLATILE: 'VOLATILE'>, <TokenType.SOME: 'SOME'>, <TokenType.SUPER: 'SUPER'>, <TokenType.OPERATOR: 'OPERATOR'>, <TokenType.DATABASE: 'DATABASE'>, <TokenType.KILL: 'KILL'>, <TokenType.DATE: 'DATE'>, <TokenType.DESC: 'DESC'>, <TokenType.HSTORE: 'HSTORE'>, <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, <TokenType.MAP: 'MAP'>, <TokenType.TINYINT: 'TINYINT'>, <TokenType.IPV4: 'IPV4'>, <TokenType.AGGREGATEFUNCTION: 'AGGREGATEFUNCTION'>, <TokenType.INT4RANGE: 'INT4RANGE'>, <TokenType.ENUM8: 'ENUM8'>, <TokenType.UUID: 'UUID'>, <TokenType.FORMAT: 'FORMAT'>, <TokenType.NCHAR: 'NCHAR'>, <TokenType.OBJECT: 'OBJECT'>, <TokenType.DESCRIBE: 'DESCRIBE'>, <TokenType.GEOMETRY: 'GEOMETRY'>, <TokenType.UNIQUE: 'UNIQUE'>, <TokenType.DATERANGE: 'DATERANGE'>, <TokenType.INDEX: 'INDEX'>, <TokenType.REFRESH: 'REFRESH'>, <TokenType.FLOAT: 'FLOAT'>, <TokenType.ROWS: 'ROWS'>, <TokenType.VARCHAR: 'VARCHAR'>, <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, <TokenType.INET: 'INET'>, <TokenType.FINAL: 'FINAL'>, <TokenType.WINDOW: 'WINDOW'>, <TokenType.SETTINGS: 'SETTINGS'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.COLLATE: 'COLLATE'>, <TokenType.TEMPORARY: 'TEMPORARY'>, <TokenType.TIMESTAMP_S: 'TIMESTAMP_S'>, <TokenType.UBIGINT: 'UBIGINT'>, <TokenType.USERDEFINED: 'USERDEFINED'>, <TokenType.DATE32: 'DATE32'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, <TokenType.INT128: 'INT128'>, <TokenType.MERGE: 'MERGE'>, <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, <TokenType.XML: 'XML'>, <TokenType.CONSTRAINT: 'CONSTRAINT'>, <TokenType.STRUCT: 'STRUCT'>} + {<TokenType.BIT: 'BIT'>, <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, <TokenType.TEMPORARY: 'TEMPORARY'>, <TokenType.NVARCHAR: 'NVARCHAR'>, <TokenType.OBJECT: 'OBJECT'>, <TokenType.FIRST: 'FIRST'>, <TokenType.RANGE: 'RANGE'>, <TokenType.TIME: 'TIME'>, <TokenType.INT: 'INT'>, <TokenType.UMEDIUMINT: 'UMEDIUMINT'>, <TokenType.UTINYINT: 'UTINYINT'>, <TokenType.IS: 'IS'>, <TokenType.XML: 'XML'>, <TokenType.BIGINT: 'BIGINT'>, <TokenType.WINDOW: 'WINDOW'>, <TokenType.VAR: 'VAR'>, <TokenType.DOUBLE: 'DOUBLE'>, <TokenType.ROWS: 'ROWS'>, <TokenType.MERGE: 'MERGE'>, <TokenType.ANY: 'ANY'>, <TokenType.DATERANGE: 'DATERANGE'>, <TokenType.UDECIMAL: 'UDECIMAL'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>, <TokenType.REFERENCES: 'REFERENCES'>, <TokenType.PERCENT: 'PERCENT'>, <TokenType.CACHE: 'CACHE'>, <TokenType.STRUCT: 'STRUCT'>, <TokenType.JSONB: 'JSONB'>, <TokenType.COMMIT: 'COMMIT'>, <TokenType.ENUM: 'ENUM'>, <TokenType.SETTINGS: 'SETTINGS'>, <TokenType.ORDINALITY: 'ORDINALITY'>, <TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>, <TokenType.INDEX: 'INDEX'>, <TokenType.TIMESTAMP_NS: 'TIMESTAMP_NS'>, <TokenType.FINAL: 'FINAL'>, <TokenType.VOLATILE: 'VOLATILE'>, <TokenType.DICTIONARY: 'DICTIONARY'>, <TokenType.USE: 'USE'>, <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, <TokenType.FUNCTION: 'FUNCTION'>, <TokenType.LONGTEXT: 'LONGTEXT'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.FIXEDSTRING: 'FIXEDSTRING'>, <TokenType.IPV6: 'IPV6'>, <TokenType.DATABASE: 'DATABASE'>, <TokenType.SMALLINT: 'SMALLINT'>, <TokenType.ASC: 'ASC'>, <TokenType.RECURSIVE: 'RECURSIVE'>, <TokenType.ISNULL: 'ISNULL'>, <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, <TokenType.BINARY: 'BINARY'>, <TokenType.UNIQUEIDENTIFIER: 'UNIQUEIDENTIFIER'>, <TokenType.HSTORE: 'HSTORE'>, <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, <TokenType.DATE: 'DATE'>, <TokenType.SUPER: 'SUPER'>, <TokenType.TOP: 'TOP'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.VARCHAR: 'VARCHAR'>, <TokenType.FILTER: 'FILTER'>, <TokenType.TEXT: 'TEXT'>, <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, <TokenType.TRUE: 'TRUE'>, <TokenType.ESCAPE: 'ESCAPE'>, <TokenType.SHOW: 'SHOW'>, <TokenType.TSTZRANGE: 'TSTZRANGE'>, <TokenType.SET: 'SET'>, <TokenType.NESTED: 'NESTED'>, <TokenType.INTERVAL: 'INTERVAL'>, <TokenType.BOOLEAN: 'BOOLEAN'>, <TokenType.VARIANT: 'VARIANT'>, <TokenType.IPADDRESS: 'IPADDRESS'>, <TokenType.DESCRIBE: 'DESCRIBE'>, <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>, <TokenType.MONEY: 'MONEY'>, <TokenType.ENUM8: 'ENUM8'>, <TokenType.IPPREFIX: 'IPPREFIX'>, <TokenType.NULL: 'NULL'>, <TokenType.COLLATE: 'COLLATE'>, <TokenType.NEXT: 'NEXT'>, <TokenType.TSRANGE: 'TSRANGE'>, <TokenType.PRAGMA: 'PRAGMA'>, <TokenType.ALL: 'ALL'>, <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, <TokenType.ROW: 'ROW'>, <TokenType.UBIGINT: 'UBIGINT'>, <TokenType.TABLE: 'TABLE'>, <TokenType.SERIAL: 'SERIAL'>, <TokenType.CASE: 'CASE'>, <TokenType.BPCHAR: 'BPCHAR'>, <TokenType.INT8RANGE: 'INT8RANGE'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.FLOAT: 'FLOAT'>, <TokenType.MODEL: 'MODEL'>, <TokenType.YEAR: 'YEAR'>, <TokenType.IPV4: 'IPV4'>, <TokenType.UNKNOWN: 'UNKNOWN'>, <TokenType.SCHEMA: 'SCHEMA'>, <TokenType.INT256: 'INT256'>, <TokenType.UNPIVOT: 'UNPIVOT'>, <TokenType.DECIMAL: 'DECIMAL'>, <TokenType.TINYBLOB: 'TINYBLOB'>, <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, <TokenType.TIMESTAMP_S: 'TIMESTAMP_S'>, <TokenType.INET: 'INET'>, <TokenType.CONSTRAINT: 'CONSTRAINT'>, <TokenType.ROWVERSION: 'ROWVERSION'>, <TokenType.TINYTEXT: 'TINYTEXT'>, <TokenType.MAP: 'MAP'>, <TokenType.FORMAT: 'FORMAT'>, <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, <TokenType.PIVOT: 'PIVOT'>, <TokenType.SIMPLEAGGREGATEFUNCTION: 'SIMPLEAGGREGATEFUNCTION'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.REPLACE: 'REPLACE'>, <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, <TokenType.USERDEFINED: 'USERDEFINED'>, <TokenType.PROCEDURE: 'PROCEDURE'>, <TokenType.PARTITION: 'PARTITION'>, <TokenType.SMALLMONEY: 'SMALLMONEY'>, <TokenType.EXISTS: 'EXISTS'>, <TokenType.UINT: 'UINT'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.UINT256: 'UINT256'>, <TokenType.OPERATOR: 'OPERATOR'>, <TokenType.CHAR: 'CHAR'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, <TokenType.DIV: 'DIV'>, <TokenType.GEOMETRY: 'GEOMETRY'>, <TokenType.COMMENT: 'COMMENT'>, <TokenType.BEGIN: 'BEGIN'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, <TokenType.UINT128: 'UINT128'>, <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, <TokenType.OVERLAPS: 'OVERLAPS'>, <TokenType.DELETE: 'DELETE'>, <TokenType.LOAD: 'LOAD'>, <TokenType.NCHAR: 'NCHAR'>, <TokenType.UUID: 'UUID'>, <TokenType.ENUM16: 'ENUM16'>, <TokenType.TINYINT: 'TINYINT'>, <TokenType.DATE32: 'DATE32'>, <TokenType.NUMRANGE: 'NUMRANGE'>, <TokenType.DATETIME: 'DATETIME'>, <TokenType.VIEW: 'VIEW'>, <TokenType.JSON: 'JSON'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, <TokenType.COLUMN: 'COLUMN'>, <TokenType.IMAGE: 'IMAGE'>, <TokenType.KEEP: 'KEEP'>, <TokenType.SMALLSERIAL: 'SMALLSERIAL'>, <TokenType.END: 'END'>, <TokenType.INT128: 'INT128'>, <TokenType.KILL: 'KILL'>, <TokenType.TIMETZ: 'TIMETZ'>, <TokenType.DATETIME64: 'DATETIME64'>, <TokenType.REFRESH: 'REFRESH'>, <TokenType.VARBINARY: 'VARBINARY'>, <TokenType.USMALLINT: 'USMALLINT'>, <TokenType.AGGREGATEFUNCTION: 'AGGREGATEFUNCTION'>, <TokenType.UNIQUE: 'UNIQUE'>, <TokenType.SEMI: 'SEMI'>, <TokenType.DEFAULT: 'DEFAULT'>, <TokenType.BIGSERIAL: 'BIGSERIAL'>, <TokenType.TIMESTAMP_MS: 'TIMESTAMP_MS'>, <TokenType.MEDIUMINT: 'MEDIUMINT'>, <TokenType.SOME: 'SOME'>, <TokenType.ANTI: 'ANTI'>, <TokenType.FALSE: 'FALSE'>, <TokenType.INT4RANGE: 'INT4RANGE'>, <TokenType.DESC: 'DESC'>, <TokenType.OVERWRITE: 'OVERWRITE'>, <TokenType.LONGBLOB: 'LONGBLOB'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, <TokenType.HLLSKETCH: 'HLLSKETCH'>, <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, <TokenType.UPDATE: 'UPDATE'>}
    @@ -2713,7 +2713,7 @@ Default: 3
    TIMESTAMPS = - {<TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.TIMETZ: 'TIMETZ'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>} + {<TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.TIMETZ: 'TIMETZ'>}
    @@ -2790,7 +2790,7 @@ Default: 3
    STAGED_FILE_SINGLE_TOKENS = -{<TokenType.DOT: 'DOT'>, <TokenType.SLASH: 'SLASH'>, <TokenType.MOD: 'MOD'>} +{<TokenType.SLASH: 'SLASH'>, <TokenType.DOT: 'DOT'>, <TokenType.MOD: 'MOD'>}
    @@ -2916,6 +2916,7 @@ Default: 3
    MODIFIERS_ATTACHED_TO_UNION
    UNION_MODIFIERS
    NO_PAREN_IF_COMMANDS
    +
    VALUES_FOLLOWED_BY_PAREN
    error_level
    error_message_context
    max_errors
    @@ -3078,7 +3079,7 @@ Default: 3
    COMMANDS = -{<TokenType.COMMAND: 'COMMAND'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.FETCH: 'FETCH'>} +{<TokenType.FETCH: 'FETCH'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.EXECUTE: 'EXECUTE'>}
    @@ -3587,7 +3588,7 @@ Default: True diff --git a/docs/sqlglot/dialects/spark.html b/docs/sqlglot/dialects/spark.html index ac5ac2d..528d1ca 100644 --- a/docs/sqlglot/dialects/spark.html +++ b/docs/sqlglot/dialects/spark.html @@ -955,6 +955,7 @@ Default: 3
    sqlglot.dialects.hive.Hive.Parser
    LOG_DEFAULTS_TO_LN
    STRICT_CAST
    +
    VALUES_FOLLOWED_BY_PAREN
    NO_PAREN_FUNCTION_PARSERS
    PROPERTY_PARSERS
    diff --git a/docs/sqlglot/dialects/spark2.html b/docs/sqlglot/dialects/spark2.html index d45f6c6..581201c 100644 --- a/docs/sqlglot/dialects/spark2.html +++ b/docs/sqlglot/dialects/spark2.html @@ -1279,6 +1279,7 @@ Default: 3
    sqlglot.dialects.hive.Hive.Parser
    LOG_DEFAULTS_TO_LN
    STRICT_CAST
    +
    VALUES_FOLLOWED_BY_PAREN
    NO_PAREN_FUNCTION_PARSERS
    PROPERTY_PARSERS
    diff --git a/docs/sqlglot/dialects/sqlite.html b/docs/sqlglot/dialects/sqlite.html index 8d7e256..8dd8299 100644 --- a/docs/sqlglot/dialects/sqlite.html +++ b/docs/sqlglot/dialects/sqlite.html @@ -1126,7 +1126,7 @@ Default: 3
    TABLE_ALIAS_TOKENS = - {<TokenType.UPDATE: 'UPDATE'>, <TokenType.DEFAULT: 'DEFAULT'>, <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>, <TokenType.UNIQUEIDENTIFIER: 'UNIQUEIDENTIFIER'>, <TokenType.PIVOT: 'PIVOT'>, <TokenType.PARTITION: 'PARTITION'>, <TokenType.TSTZRANGE: 'TSTZRANGE'>, <TokenType.VIEW: 'VIEW'>, <TokenType.COMMENT: 'COMMENT'>, <TokenType.TEXT: 'TEXT'>, <TokenType.UINT: 'UINT'>, <TokenType.DELETE: 'DELETE'>, <TokenType.TOP: 'TOP'>, <TokenType.LONGBLOB: 'LONGBLOB'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.DATETIME: 'DATETIME'>, <TokenType.BIGINT: 'BIGINT'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, <TokenType.KEEP: 'KEEP'>, <TokenType.SMALLSERIAL: 'SMALLSERIAL'>, <TokenType.NEXT: 'NEXT'>, <TokenType.BPCHAR: 'BPCHAR'>, <TokenType.TABLE: 'TABLE'>, <TokenType.DATETIME64: 'DATETIME64'>, <TokenType.VAR: 'VAR'>, <TokenType.BIGSERIAL: 'BIGSERIAL'>, <TokenType.ANY: 'ANY'>, <TokenType.UMEDIUMINT: 'UMEDIUMINT'>, <TokenType.FILTER: 'FILTER'>, <TokenType.TIMESTAMP_MS: 'TIMESTAMP_MS'>, <TokenType.SERIAL: 'SERIAL'>, <TokenType.END: 'END'>, <TokenType.ALL: 'ALL'>, <TokenType.TIMETZ: 'TIMETZ'>, <TokenType.TINYBLOB: 'TINYBLOB'>, <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, <TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>, <TokenType.SMALLINT: 'SMALLINT'>, <TokenType.TRUE: 'TRUE'>, <TokenType.FUNCTION: 'FUNCTION'>, <TokenType.IPPREFIX: 'IPPREFIX'>, <TokenType.UNKNOWN: 'UNKNOWN'>, <TokenType.INTERVAL: 'INTERVAL'>, <TokenType.NESTED: 'NESTED'>, <TokenType.NUMRANGE: 'NUMRANGE'>, <TokenType.JSON: 'JSON'>, <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, <TokenType.ANTI: 'ANTI'>, <TokenType.JSONB: 'JSONB'>, <TokenType.ROW: 'ROW'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, <TokenType.ENUM: 'ENUM'>, <TokenType.SMALLMONEY: 'SMALLMONEY'>, <TokenType.REFERENCES: 'REFERENCES'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.EXISTS: 'EXISTS'>, <TokenType.SEMI: 'SEMI'>, <TokenType.RANGE: 'RANGE'>, <TokenType.SET: 'SET'>, <TokenType.IS: 'IS'>, <TokenType.HLLSKETCH: 'HLLSKETCH'>, <TokenType.BIT: 'BIT'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.TSRANGE: 'TSRANGE'>, <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, <TokenType.INT256: 'INT256'>, <TokenType.FALSE: 'FALSE'>, <TokenType.CHAR: 'CHAR'>, <TokenType.ASC: 'ASC'>, <TokenType.VARBINARY: 'VARBINARY'>, <TokenType.DOUBLE: 'DOUBLE'>, <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, <TokenType.ORDINALITY: 'ORDINALITY'>, <TokenType.ESCAPE: 'ESCAPE'>, <TokenType.ISNULL: 'ISNULL'>, <TokenType.SHOW: 'SHOW'>, <TokenType.UNPIVOT: 'UNPIVOT'>, <TokenType.DECIMAL: 'DECIMAL'>, <TokenType.OVERWRITE: 'OVERWRITE'>, <TokenType.PERCENT: 'PERCENT'>, <TokenType.SCHEMA: 'SCHEMA'>, <TokenType.NULL: 'NULL'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, <TokenType.RECURSIVE: 'RECURSIVE'>, <TokenType.MEDIUMINT: 'MEDIUMINT'>, <TokenType.UDECIMAL: 'UDECIMAL'>, <TokenType.COLUMN: 'COLUMN'>, <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, <TokenType.DIV: 'DIV'>, <TokenType.FIXEDSTRING: 'FIXEDSTRING'>, <TokenType.USMALLINT: 'USMALLINT'>, <TokenType.BOOLEAN: 'BOOLEAN'>, <TokenType.CASE: 'CASE'>, <TokenType.ENUM16: 'ENUM16'>, <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>, <TokenType.INT8RANGE: 'INT8RANGE'>, <TokenType.REPLACE: 'REPLACE'>, <TokenType.TINYTEXT: 'TINYTEXT'>, <TokenType.FIRST: 'FIRST'>, <TokenType.YEAR: 'YEAR'>, <TokenType.UINT256: 'UINT256'>, <TokenType.MODEL: 'MODEL'>, <TokenType.UINT128: 'UINT128'>, <TokenType.OVERLAPS: 'OVERLAPS'>, <TokenType.TIMESTAMP_NS: 'TIMESTAMP_NS'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.SIMPLEAGGREGATEFUNCTION: 'SIMPLEAGGREGATEFUNCTION'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, <TokenType.PRAGMA: 'PRAGMA'>, <TokenType.PROCEDURE: 'PROCEDURE'>, <TokenType.IPADDRESS: 'IPADDRESS'>, <TokenType.NVARCHAR: 'NVARCHAR'>, <TokenType.LONGTEXT: 'LONGTEXT'>, <TokenType.BINARY: 'BINARY'>, <TokenType.DICTIONARY: 'DICTIONARY'>, <TokenType.USE: 'USE'>, <TokenType.TIME: 'TIME'>, <TokenType.COMMIT: 'COMMIT'>, <TokenType.MONEY: 'MONEY'>, <TokenType.LOAD: 'LOAD'>, <TokenType.CACHE: 'CACHE'>, <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, <TokenType.IMAGE: 'IMAGE'>, <TokenType.IPV6: 'IPV6'>, <TokenType.INT: 'INT'>, <TokenType.ROWVERSION: 'ROWVERSION'>, <TokenType.VARIANT: 'VARIANT'>, <TokenType.BEGIN: 'BEGIN'>, <TokenType.UTINYINT: 'UTINYINT'>, <TokenType.VOLATILE: 'VOLATILE'>, <TokenType.SOME: 'SOME'>, <TokenType.SUPER: 'SUPER'>, <TokenType.OPERATOR: 'OPERATOR'>, <TokenType.DATABASE: 'DATABASE'>, <TokenType.KILL: 'KILL'>, <TokenType.DATE: 'DATE'>, <TokenType.DESC: 'DESC'>, <TokenType.HSTORE: 'HSTORE'>, <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, <TokenType.MAP: 'MAP'>, <TokenType.TINYINT: 'TINYINT'>, <TokenType.IPV4: 'IPV4'>, <TokenType.AGGREGATEFUNCTION: 'AGGREGATEFUNCTION'>, <TokenType.INT4RANGE: 'INT4RANGE'>, <TokenType.ENUM8: 'ENUM8'>, <TokenType.UUID: 'UUID'>, <TokenType.FORMAT: 'FORMAT'>, <TokenType.NCHAR: 'NCHAR'>, <TokenType.OBJECT: 'OBJECT'>, <TokenType.DESCRIBE: 'DESCRIBE'>, <TokenType.GEOMETRY: 'GEOMETRY'>, <TokenType.UNIQUE: 'UNIQUE'>, <TokenType.DATERANGE: 'DATERANGE'>, <TokenType.INDEX: 'INDEX'>, <TokenType.REFRESH: 'REFRESH'>, <TokenType.FLOAT: 'FLOAT'>, <TokenType.ROWS: 'ROWS'>, <TokenType.VARCHAR: 'VARCHAR'>, <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, <TokenType.INET: 'INET'>, <TokenType.FINAL: 'FINAL'>, <TokenType.SETTINGS: 'SETTINGS'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.COLLATE: 'COLLATE'>, <TokenType.TEMPORARY: 'TEMPORARY'>, <TokenType.TIMESTAMP_S: 'TIMESTAMP_S'>, <TokenType.UBIGINT: 'UBIGINT'>, <TokenType.USERDEFINED: 'USERDEFINED'>, <TokenType.DATE32: 'DATE32'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, <TokenType.INT128: 'INT128'>, <TokenType.MERGE: 'MERGE'>, <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, <TokenType.XML: 'XML'>, <TokenType.CONSTRAINT: 'CONSTRAINT'>, <TokenType.STRUCT: 'STRUCT'>} + {<TokenType.BIT: 'BIT'>, <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, <TokenType.TEMPORARY: 'TEMPORARY'>, <TokenType.NVARCHAR: 'NVARCHAR'>, <TokenType.OBJECT: 'OBJECT'>, <TokenType.FIRST: 'FIRST'>, <TokenType.RANGE: 'RANGE'>, <TokenType.TIME: 'TIME'>, <TokenType.INT: 'INT'>, <TokenType.UMEDIUMINT: 'UMEDIUMINT'>, <TokenType.UTINYINT: 'UTINYINT'>, <TokenType.IS: 'IS'>, <TokenType.XML: 'XML'>, <TokenType.BIGINT: 'BIGINT'>, <TokenType.VAR: 'VAR'>, <TokenType.DOUBLE: 'DOUBLE'>, <TokenType.ROWS: 'ROWS'>, <TokenType.MERGE: 'MERGE'>, <TokenType.ANY: 'ANY'>, <TokenType.DATERANGE: 'DATERANGE'>, <TokenType.UDECIMAL: 'UDECIMAL'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>, <TokenType.REFERENCES: 'REFERENCES'>, <TokenType.PERCENT: 'PERCENT'>, <TokenType.CACHE: 'CACHE'>, <TokenType.STRUCT: 'STRUCT'>, <TokenType.JSONB: 'JSONB'>, <TokenType.COMMIT: 'COMMIT'>, <TokenType.ENUM: 'ENUM'>, <TokenType.SETTINGS: 'SETTINGS'>, <TokenType.ORDINALITY: 'ORDINALITY'>, <TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>, <TokenType.INDEX: 'INDEX'>, <TokenType.TIMESTAMP_NS: 'TIMESTAMP_NS'>, <TokenType.FINAL: 'FINAL'>, <TokenType.VOLATILE: 'VOLATILE'>, <TokenType.DICTIONARY: 'DICTIONARY'>, <TokenType.USE: 'USE'>, <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, <TokenType.FUNCTION: 'FUNCTION'>, <TokenType.LONGTEXT: 'LONGTEXT'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.FIXEDSTRING: 'FIXEDSTRING'>, <TokenType.IPV6: 'IPV6'>, <TokenType.DATABASE: 'DATABASE'>, <TokenType.SMALLINT: 'SMALLINT'>, <TokenType.ASC: 'ASC'>, <TokenType.RECURSIVE: 'RECURSIVE'>, <TokenType.ISNULL: 'ISNULL'>, <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, <TokenType.BINARY: 'BINARY'>, <TokenType.UNIQUEIDENTIFIER: 'UNIQUEIDENTIFIER'>, <TokenType.HSTORE: 'HSTORE'>, <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, <TokenType.DATE: 'DATE'>, <TokenType.SUPER: 'SUPER'>, <TokenType.TOP: 'TOP'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.VARCHAR: 'VARCHAR'>, <TokenType.FILTER: 'FILTER'>, <TokenType.TEXT: 'TEXT'>, <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, <TokenType.TRUE: 'TRUE'>, <TokenType.ESCAPE: 'ESCAPE'>, <TokenType.SHOW: 'SHOW'>, <TokenType.TSTZRANGE: 'TSTZRANGE'>, <TokenType.SET: 'SET'>, <TokenType.NESTED: 'NESTED'>, <TokenType.INTERVAL: 'INTERVAL'>, <TokenType.BOOLEAN: 'BOOLEAN'>, <TokenType.VARIANT: 'VARIANT'>, <TokenType.IPADDRESS: 'IPADDRESS'>, <TokenType.DESCRIBE: 'DESCRIBE'>, <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>, <TokenType.MONEY: 'MONEY'>, <TokenType.ENUM8: 'ENUM8'>, <TokenType.IPPREFIX: 'IPPREFIX'>, <TokenType.NULL: 'NULL'>, <TokenType.COLLATE: 'COLLATE'>, <TokenType.NEXT: 'NEXT'>, <TokenType.TSRANGE: 'TSRANGE'>, <TokenType.PRAGMA: 'PRAGMA'>, <TokenType.ALL: 'ALL'>, <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, <TokenType.ROW: 'ROW'>, <TokenType.UBIGINT: 'UBIGINT'>, <TokenType.TABLE: 'TABLE'>, <TokenType.SERIAL: 'SERIAL'>, <TokenType.CASE: 'CASE'>, <TokenType.BPCHAR: 'BPCHAR'>, <TokenType.INT8RANGE: 'INT8RANGE'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.FLOAT: 'FLOAT'>, <TokenType.MODEL: 'MODEL'>, <TokenType.YEAR: 'YEAR'>, <TokenType.IPV4: 'IPV4'>, <TokenType.UNKNOWN: 'UNKNOWN'>, <TokenType.SCHEMA: 'SCHEMA'>, <TokenType.INT256: 'INT256'>, <TokenType.UNPIVOT: 'UNPIVOT'>, <TokenType.DECIMAL: 'DECIMAL'>, <TokenType.TINYBLOB: 'TINYBLOB'>, <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, <TokenType.TIMESTAMP_S: 'TIMESTAMP_S'>, <TokenType.INET: 'INET'>, <TokenType.CONSTRAINT: 'CONSTRAINT'>, <TokenType.ROWVERSION: 'ROWVERSION'>, <TokenType.TINYTEXT: 'TINYTEXT'>, <TokenType.MAP: 'MAP'>, <TokenType.FORMAT: 'FORMAT'>, <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, <TokenType.PIVOT: 'PIVOT'>, <TokenType.SIMPLEAGGREGATEFUNCTION: 'SIMPLEAGGREGATEFUNCTION'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.REPLACE: 'REPLACE'>, <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, <TokenType.USERDEFINED: 'USERDEFINED'>, <TokenType.PROCEDURE: 'PROCEDURE'>, <TokenType.PARTITION: 'PARTITION'>, <TokenType.SMALLMONEY: 'SMALLMONEY'>, <TokenType.EXISTS: 'EXISTS'>, <TokenType.UINT: 'UINT'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.UINT256: 'UINT256'>, <TokenType.OPERATOR: 'OPERATOR'>, <TokenType.CHAR: 'CHAR'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, <TokenType.DIV: 'DIV'>, <TokenType.GEOMETRY: 'GEOMETRY'>, <TokenType.COMMENT: 'COMMENT'>, <TokenType.BEGIN: 'BEGIN'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, <TokenType.UINT128: 'UINT128'>, <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, <TokenType.OVERLAPS: 'OVERLAPS'>, <TokenType.DELETE: 'DELETE'>, <TokenType.LOAD: 'LOAD'>, <TokenType.NCHAR: 'NCHAR'>, <TokenType.UUID: 'UUID'>, <TokenType.ENUM16: 'ENUM16'>, <TokenType.TINYINT: 'TINYINT'>, <TokenType.DATE32: 'DATE32'>, <TokenType.NUMRANGE: 'NUMRANGE'>, <TokenType.DATETIME: 'DATETIME'>, <TokenType.VIEW: 'VIEW'>, <TokenType.JSON: 'JSON'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, <TokenType.COLUMN: 'COLUMN'>, <TokenType.IMAGE: 'IMAGE'>, <TokenType.KEEP: 'KEEP'>, <TokenType.SMALLSERIAL: 'SMALLSERIAL'>, <TokenType.END: 'END'>, <TokenType.INT128: 'INT128'>, <TokenType.KILL: 'KILL'>, <TokenType.TIMETZ: 'TIMETZ'>, <TokenType.DATETIME64: 'DATETIME64'>, <TokenType.REFRESH: 'REFRESH'>, <TokenType.VARBINARY: 'VARBINARY'>, <TokenType.USMALLINT: 'USMALLINT'>, <TokenType.AGGREGATEFUNCTION: 'AGGREGATEFUNCTION'>, <TokenType.UNIQUE: 'UNIQUE'>, <TokenType.SEMI: 'SEMI'>, <TokenType.DEFAULT: 'DEFAULT'>, <TokenType.BIGSERIAL: 'BIGSERIAL'>, <TokenType.TIMESTAMP_MS: 'TIMESTAMP_MS'>, <TokenType.MEDIUMINT: 'MEDIUMINT'>, <TokenType.SOME: 'SOME'>, <TokenType.ANTI: 'ANTI'>, <TokenType.FALSE: 'FALSE'>, <TokenType.INT4RANGE: 'INT4RANGE'>, <TokenType.DESC: 'DESC'>, <TokenType.OVERWRITE: 'OVERWRITE'>, <TokenType.LONGBLOB: 'LONGBLOB'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, <TokenType.HLLSKETCH: 'HLLSKETCH'>, <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, <TokenType.UPDATE: 'UPDATE'>}
    @@ -1246,6 +1246,7 @@ Default: 3
    MODIFIERS_ATTACHED_TO_UNION
    UNION_MODIFIERS
    NO_PAREN_IF_COMMANDS
    +
    VALUES_FOLLOWED_BY_PAREN
    error_level
    error_message_context
    max_errors
    @@ -1537,7 +1538,7 @@ Default: True diff --git a/docs/sqlglot/dialects/starrocks.html b/docs/sqlglot/dialects/starrocks.html index 67c6a19..452d39e 100644 --- a/docs/sqlglot/dialects/starrocks.html +++ b/docs/sqlglot/dialects/starrocks.html @@ -636,7 +636,7 @@ Default: 3
    TABLE_ALIAS_TOKENS = - {<TokenType.UPDATE: 'UPDATE'>, <TokenType.DEFAULT: 'DEFAULT'>, <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>, <TokenType.UNIQUEIDENTIFIER: 'UNIQUEIDENTIFIER'>, <TokenType.PIVOT: 'PIVOT'>, <TokenType.PARTITION: 'PARTITION'>, <TokenType.TSTZRANGE: 'TSTZRANGE'>, <TokenType.VIEW: 'VIEW'>, <TokenType.COMMENT: 'COMMENT'>, <TokenType.TEXT: 'TEXT'>, <TokenType.UINT: 'UINT'>, <TokenType.DELETE: 'DELETE'>, <TokenType.TOP: 'TOP'>, <TokenType.LONGBLOB: 'LONGBLOB'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.DATETIME: 'DATETIME'>, <TokenType.BIGINT: 'BIGINT'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, <TokenType.KEEP: 'KEEP'>, <TokenType.SMALLSERIAL: 'SMALLSERIAL'>, <TokenType.NEXT: 'NEXT'>, <TokenType.BPCHAR: 'BPCHAR'>, <TokenType.TABLE: 'TABLE'>, <TokenType.DATETIME64: 'DATETIME64'>, <TokenType.VAR: 'VAR'>, <TokenType.BIGSERIAL: 'BIGSERIAL'>, <TokenType.ANY: 'ANY'>, <TokenType.UMEDIUMINT: 'UMEDIUMINT'>, <TokenType.FILTER: 'FILTER'>, <TokenType.TIMESTAMP_MS: 'TIMESTAMP_MS'>, <TokenType.SERIAL: 'SERIAL'>, <TokenType.END: 'END'>, <TokenType.ALL: 'ALL'>, <TokenType.TIMETZ: 'TIMETZ'>, <TokenType.TINYBLOB: 'TINYBLOB'>, <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, <TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>, <TokenType.SMALLINT: 'SMALLINT'>, <TokenType.TRUE: 'TRUE'>, <TokenType.FUNCTION: 'FUNCTION'>, <TokenType.IPPREFIX: 'IPPREFIX'>, <TokenType.UNKNOWN: 'UNKNOWN'>, <TokenType.INTERVAL: 'INTERVAL'>, <TokenType.NESTED: 'NESTED'>, <TokenType.NUMRANGE: 'NUMRANGE'>, <TokenType.JSON: 'JSON'>, <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, <TokenType.ANTI: 'ANTI'>, <TokenType.JSONB: 'JSONB'>, <TokenType.ROW: 'ROW'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, <TokenType.ENUM: 'ENUM'>, <TokenType.SMALLMONEY: 'SMALLMONEY'>, <TokenType.REFERENCES: 'REFERENCES'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.EXISTS: 'EXISTS'>, <TokenType.SEMI: 'SEMI'>, <TokenType.RANGE: 'RANGE'>, <TokenType.SET: 'SET'>, <TokenType.IS: 'IS'>, <TokenType.HLLSKETCH: 'HLLSKETCH'>, <TokenType.BIT: 'BIT'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.TSRANGE: 'TSRANGE'>, <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, <TokenType.INT256: 'INT256'>, <TokenType.FALSE: 'FALSE'>, <TokenType.CHAR: 'CHAR'>, <TokenType.ASC: 'ASC'>, <TokenType.VARBINARY: 'VARBINARY'>, <TokenType.DOUBLE: 'DOUBLE'>, <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, <TokenType.ORDINALITY: 'ORDINALITY'>, <TokenType.ESCAPE: 'ESCAPE'>, <TokenType.ISNULL: 'ISNULL'>, <TokenType.SHOW: 'SHOW'>, <TokenType.UNPIVOT: 'UNPIVOT'>, <TokenType.DECIMAL: 'DECIMAL'>, <TokenType.OVERWRITE: 'OVERWRITE'>, <TokenType.PERCENT: 'PERCENT'>, <TokenType.SCHEMA: 'SCHEMA'>, <TokenType.NULL: 'NULL'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, <TokenType.RECURSIVE: 'RECURSIVE'>, <TokenType.MEDIUMINT: 'MEDIUMINT'>, <TokenType.UDECIMAL: 'UDECIMAL'>, <TokenType.COLUMN: 'COLUMN'>, <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, <TokenType.DIV: 'DIV'>, <TokenType.FIXEDSTRING: 'FIXEDSTRING'>, <TokenType.USMALLINT: 'USMALLINT'>, <TokenType.BOOLEAN: 'BOOLEAN'>, <TokenType.CASE: 'CASE'>, <TokenType.ENUM16: 'ENUM16'>, <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>, <TokenType.INT8RANGE: 'INT8RANGE'>, <TokenType.REPLACE: 'REPLACE'>, <TokenType.TINYTEXT: 'TINYTEXT'>, <TokenType.FIRST: 'FIRST'>, <TokenType.YEAR: 'YEAR'>, <TokenType.UINT256: 'UINT256'>, <TokenType.MODEL: 'MODEL'>, <TokenType.UINT128: 'UINT128'>, <TokenType.OVERLAPS: 'OVERLAPS'>, <TokenType.TIMESTAMP_NS: 'TIMESTAMP_NS'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.SIMPLEAGGREGATEFUNCTION: 'SIMPLEAGGREGATEFUNCTION'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, <TokenType.PRAGMA: 'PRAGMA'>, <TokenType.PROCEDURE: 'PROCEDURE'>, <TokenType.IPADDRESS: 'IPADDRESS'>, <TokenType.NVARCHAR: 'NVARCHAR'>, <TokenType.LONGTEXT: 'LONGTEXT'>, <TokenType.BINARY: 'BINARY'>, <TokenType.DICTIONARY: 'DICTIONARY'>, <TokenType.TIME: 'TIME'>, <TokenType.MONEY: 'MONEY'>, <TokenType.COMMIT: 'COMMIT'>, <TokenType.LOAD: 'LOAD'>, <TokenType.CACHE: 'CACHE'>, <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, <TokenType.IMAGE: 'IMAGE'>, <TokenType.IPV6: 'IPV6'>, <TokenType.INT: 'INT'>, <TokenType.ROWVERSION: 'ROWVERSION'>, <TokenType.VARIANT: 'VARIANT'>, <TokenType.BEGIN: 'BEGIN'>, <TokenType.UTINYINT: 'UTINYINT'>, <TokenType.VOLATILE: 'VOLATILE'>, <TokenType.SOME: 'SOME'>, <TokenType.SUPER: 'SUPER'>, <TokenType.OPERATOR: 'OPERATOR'>, <TokenType.DATABASE: 'DATABASE'>, <TokenType.KILL: 'KILL'>, <TokenType.DATE: 'DATE'>, <TokenType.DESC: 'DESC'>, <TokenType.HSTORE: 'HSTORE'>, <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, <TokenType.MAP: 'MAP'>, <TokenType.TINYINT: 'TINYINT'>, <TokenType.IPV4: 'IPV4'>, <TokenType.AGGREGATEFUNCTION: 'AGGREGATEFUNCTION'>, <TokenType.INT4RANGE: 'INT4RANGE'>, <TokenType.ENUM8: 'ENUM8'>, <TokenType.UUID: 'UUID'>, <TokenType.FORMAT: 'FORMAT'>, <TokenType.NCHAR: 'NCHAR'>, <TokenType.OBJECT: 'OBJECT'>, <TokenType.DESCRIBE: 'DESCRIBE'>, <TokenType.GEOMETRY: 'GEOMETRY'>, <TokenType.UNIQUE: 'UNIQUE'>, <TokenType.DATERANGE: 'DATERANGE'>, <TokenType.INDEX: 'INDEX'>, <TokenType.REFRESH: 'REFRESH'>, <TokenType.FLOAT: 'FLOAT'>, <TokenType.ROWS: 'ROWS'>, <TokenType.VARCHAR: 'VARCHAR'>, <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, <TokenType.INET: 'INET'>, <TokenType.FINAL: 'FINAL'>, <TokenType.SETTINGS: 'SETTINGS'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.COLLATE: 'COLLATE'>, <TokenType.TEMPORARY: 'TEMPORARY'>, <TokenType.TIMESTAMP_S: 'TIMESTAMP_S'>, <TokenType.UBIGINT: 'UBIGINT'>, <TokenType.USERDEFINED: 'USERDEFINED'>, <TokenType.DATE32: 'DATE32'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, <TokenType.INT128: 'INT128'>, <TokenType.MERGE: 'MERGE'>, <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, <TokenType.XML: 'XML'>, <TokenType.CONSTRAINT: 'CONSTRAINT'>, <TokenType.STRUCT: 'STRUCT'>} + {<TokenType.BIT: 'BIT'>, <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, <TokenType.TEMPORARY: 'TEMPORARY'>, <TokenType.NVARCHAR: 'NVARCHAR'>, <TokenType.OBJECT: 'OBJECT'>, <TokenType.FIRST: 'FIRST'>, <TokenType.RANGE: 'RANGE'>, <TokenType.TIME: 'TIME'>, <TokenType.INT: 'INT'>, <TokenType.UMEDIUMINT: 'UMEDIUMINT'>, <TokenType.UTINYINT: 'UTINYINT'>, <TokenType.IS: 'IS'>, <TokenType.XML: 'XML'>, <TokenType.BIGINT: 'BIGINT'>, <TokenType.VAR: 'VAR'>, <TokenType.DOUBLE: 'DOUBLE'>, <TokenType.ROWS: 'ROWS'>, <TokenType.MERGE: 'MERGE'>, <TokenType.ANY: 'ANY'>, <TokenType.DATERANGE: 'DATERANGE'>, <TokenType.UDECIMAL: 'UDECIMAL'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>, <TokenType.REFERENCES: 'REFERENCES'>, <TokenType.PERCENT: 'PERCENT'>, <TokenType.CACHE: 'CACHE'>, <TokenType.STRUCT: 'STRUCT'>, <TokenType.HLLSKETCH: 'HLLSKETCH'>, <TokenType.JSONB: 'JSONB'>, <TokenType.COMMIT: 'COMMIT'>, <TokenType.ENUM: 'ENUM'>, <TokenType.SETTINGS: 'SETTINGS'>, <TokenType.ORDINALITY: 'ORDINALITY'>, <TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>, <TokenType.INDEX: 'INDEX'>, <TokenType.TIMESTAMP_NS: 'TIMESTAMP_NS'>, <TokenType.FINAL: 'FINAL'>, <TokenType.VOLATILE: 'VOLATILE'>, <TokenType.DICTIONARY: 'DICTIONARY'>, <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, <TokenType.FUNCTION: 'FUNCTION'>, <TokenType.LONGTEXT: 'LONGTEXT'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.FIXEDSTRING: 'FIXEDSTRING'>, <TokenType.IPV6: 'IPV6'>, <TokenType.DATABASE: 'DATABASE'>, <TokenType.SMALLINT: 'SMALLINT'>, <TokenType.ASC: 'ASC'>, <TokenType.RECURSIVE: 'RECURSIVE'>, <TokenType.ISNULL: 'ISNULL'>, <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, <TokenType.BINARY: 'BINARY'>, <TokenType.UNIQUEIDENTIFIER: 'UNIQUEIDENTIFIER'>, <TokenType.HSTORE: 'HSTORE'>, <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, <TokenType.DATE: 'DATE'>, <TokenType.SUPER: 'SUPER'>, <TokenType.TOP: 'TOP'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.VARCHAR: 'VARCHAR'>, <TokenType.FILTER: 'FILTER'>, <TokenType.TEXT: 'TEXT'>, <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, <TokenType.ESCAPE: 'ESCAPE'>, <TokenType.SHOW: 'SHOW'>, <TokenType.TSTZRANGE: 'TSTZRANGE'>, <TokenType.SET: 'SET'>, <TokenType.NESTED: 'NESTED'>, <TokenType.INTERVAL: 'INTERVAL'>, <TokenType.BOOLEAN: 'BOOLEAN'>, <TokenType.VARIANT: 'VARIANT'>, <TokenType.IPADDRESS: 'IPADDRESS'>, <TokenType.DESCRIBE: 'DESCRIBE'>, <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>, <TokenType.MONEY: 'MONEY'>, <TokenType.ENUM8: 'ENUM8'>, <TokenType.IPPREFIX: 'IPPREFIX'>, <TokenType.NULL: 'NULL'>, <TokenType.COLLATE: 'COLLATE'>, <TokenType.NEXT: 'NEXT'>, <TokenType.TSRANGE: 'TSRANGE'>, <TokenType.PRAGMA: 'PRAGMA'>, <TokenType.ALL: 'ALL'>, <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, <TokenType.ROW: 'ROW'>, <TokenType.UBIGINT: 'UBIGINT'>, <TokenType.TABLE: 'TABLE'>, <TokenType.SERIAL: 'SERIAL'>, <TokenType.CASE: 'CASE'>, <TokenType.BPCHAR: 'BPCHAR'>, <TokenType.INT8RANGE: 'INT8RANGE'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.FLOAT: 'FLOAT'>, <TokenType.MODEL: 'MODEL'>, <TokenType.YEAR: 'YEAR'>, <TokenType.IPV4: 'IPV4'>, <TokenType.UNKNOWN: 'UNKNOWN'>, <TokenType.SCHEMA: 'SCHEMA'>, <TokenType.INT256: 'INT256'>, <TokenType.UNPIVOT: 'UNPIVOT'>, <TokenType.DECIMAL: 'DECIMAL'>, <TokenType.TINYBLOB: 'TINYBLOB'>, <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, <TokenType.TIMESTAMP_S: 'TIMESTAMP_S'>, <TokenType.INET: 'INET'>, <TokenType.CONSTRAINT: 'CONSTRAINT'>, <TokenType.ROWVERSION: 'ROWVERSION'>, <TokenType.TINYTEXT: 'TINYTEXT'>, <TokenType.MAP: 'MAP'>, <TokenType.FORMAT: 'FORMAT'>, <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, <TokenType.PIVOT: 'PIVOT'>, <TokenType.SIMPLEAGGREGATEFUNCTION: 'SIMPLEAGGREGATEFUNCTION'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.REPLACE: 'REPLACE'>, <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, <TokenType.USERDEFINED: 'USERDEFINED'>, <TokenType.PROCEDURE: 'PROCEDURE'>, <TokenType.PARTITION: 'PARTITION'>, <TokenType.SMALLMONEY: 'SMALLMONEY'>, <TokenType.EXISTS: 'EXISTS'>, <TokenType.UINT: 'UINT'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.UINT256: 'UINT256'>, <TokenType.OPERATOR: 'OPERATOR'>, <TokenType.CHAR: 'CHAR'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, <TokenType.DIV: 'DIV'>, <TokenType.GEOMETRY: 'GEOMETRY'>, <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, <TokenType.COMMENT: 'COMMENT'>, <TokenType.BEGIN: 'BEGIN'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, <TokenType.UINT128: 'UINT128'>, <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, <TokenType.OVERLAPS: 'OVERLAPS'>, <TokenType.DELETE: 'DELETE'>, <TokenType.LOAD: 'LOAD'>, <TokenType.NCHAR: 'NCHAR'>, <TokenType.UUID: 'UUID'>, <TokenType.ENUM16: 'ENUM16'>, <TokenType.TINYINT: 'TINYINT'>, <TokenType.DATE32: 'DATE32'>, <TokenType.NUMRANGE: 'NUMRANGE'>, <TokenType.DATETIME: 'DATETIME'>, <TokenType.VIEW: 'VIEW'>, <TokenType.JSON: 'JSON'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, <TokenType.COLUMN: 'COLUMN'>, <TokenType.IMAGE: 'IMAGE'>, <TokenType.KEEP: 'KEEP'>, <TokenType.SMALLSERIAL: 'SMALLSERIAL'>, <TokenType.END: 'END'>, <TokenType.INT128: 'INT128'>, <TokenType.KILL: 'KILL'>, <TokenType.TIMETZ: 'TIMETZ'>, <TokenType.DATETIME64: 'DATETIME64'>, <TokenType.REFRESH: 'REFRESH'>, <TokenType.VARBINARY: 'VARBINARY'>, <TokenType.USMALLINT: 'USMALLINT'>, <TokenType.AGGREGATEFUNCTION: 'AGGREGATEFUNCTION'>, <TokenType.UNIQUE: 'UNIQUE'>, <TokenType.SEMI: 'SEMI'>, <TokenType.DEFAULT: 'DEFAULT'>, <TokenType.BIGSERIAL: 'BIGSERIAL'>, <TokenType.TIMESTAMP_MS: 'TIMESTAMP_MS'>, <TokenType.MEDIUMINT: 'MEDIUMINT'>, <TokenType.SOME: 'SOME'>, <TokenType.ANTI: 'ANTI'>, <TokenType.FALSE: 'FALSE'>, <TokenType.INT4RANGE: 'INT4RANGE'>, <TokenType.DESC: 'DESC'>, <TokenType.OVERWRITE: 'OVERWRITE'>, <TokenType.LONGBLOB: 'LONGBLOB'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.TRUE: 'TRUE'>, <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, <TokenType.UPDATE: 'UPDATE'>}
    @@ -776,6 +776,7 @@ Default: 3
    ENUM_TYPE_TOKENS
    LOG_DEFAULTS_TO_LN
    STRING_ALIASES
    +
    VALUES_FOLLOWED_BY_PAREN
    diff --git a/docs/sqlglot/dialects/tableau.html b/docs/sqlglot/dialects/tableau.html index 6984f32..b64f84a 100644 --- a/docs/sqlglot/dialects/tableau.html +++ b/docs/sqlglot/dialects/tableau.html @@ -1410,6 +1410,7 @@ Default: 3
    STRING_ALIASES
    MODIFIERS_ATTACHED_TO_UNION
    UNION_MODIFIERS
    +
    VALUES_FOLLOWED_BY_PAREN
    error_level
    error_message_context
    max_errors
    diff --git a/docs/sqlglot/dialects/teradata.html b/docs/sqlglot/dialects/teradata.html index 128d3c1..32598f0 100644 --- a/docs/sqlglot/dialects/teradata.html +++ b/docs/sqlglot/dialects/teradata.html @@ -60,6 +60,9 @@
  • TABLESAMPLE_CSV
  • +
  • + VALUES_FOLLOWED_BY_PAREN +
  • CHARSET_TRANSLATORS
  • @@ -312,198 +315,199 @@ 74 75 class Parser(parser.Parser): 76 TABLESAMPLE_CSV = True - 77 - 78 CHARSET_TRANSLATORS = { - 79 "GRAPHIC_TO_KANJISJIS", - 80 "GRAPHIC_TO_LATIN", - 81 "GRAPHIC_TO_UNICODE", - 82 "GRAPHIC_TO_UNICODE_PadSpace", - 83 "KANJI1_KanjiEBCDIC_TO_UNICODE", - 84 "KANJI1_KanjiEUC_TO_UNICODE", - 85 "KANJI1_KANJISJIS_TO_UNICODE", - 86 "KANJI1_SBC_TO_UNICODE", - 87 "KANJISJIS_TO_GRAPHIC", - 88 "KANJISJIS_TO_LATIN", - 89 "KANJISJIS_TO_UNICODE", - 90 "LATIN_TO_GRAPHIC", - 91 "LATIN_TO_KANJISJIS", - 92 "LATIN_TO_UNICODE", - 93 "LOCALE_TO_UNICODE", - 94 "UNICODE_TO_GRAPHIC", - 95 "UNICODE_TO_GRAPHIC_PadGraphic", - 96 "UNICODE_TO_GRAPHIC_VarGraphic", - 97 "UNICODE_TO_KANJI1_KanjiEBCDIC", - 98 "UNICODE_TO_KANJI1_KanjiEUC", - 99 "UNICODE_TO_KANJI1_KANJISJIS", -100 "UNICODE_TO_KANJI1_SBC", -101 "UNICODE_TO_KANJISJIS", -102 "UNICODE_TO_LATIN", -103 "UNICODE_TO_LOCALE", -104 "UNICODE_TO_UNICODE_FoldSpace", -105 "UNICODE_TO_UNICODE_Fullwidth", -106 "UNICODE_TO_UNICODE_Halfwidth", -107 "UNICODE_TO_UNICODE_NFC", -108 "UNICODE_TO_UNICODE_NFD", -109 "UNICODE_TO_UNICODE_NFKC", -110 "UNICODE_TO_UNICODE_NFKD", -111 } -112 -113 FUNC_TOKENS = {*parser.Parser.FUNC_TOKENS} -114 FUNC_TOKENS.remove(TokenType.REPLACE) -115 -116 STATEMENT_PARSERS = { -117 **parser.Parser.STATEMENT_PARSERS, -118 TokenType.DATABASE: lambda self: self.expression( -119 exp.Use, this=self._parse_table(schema=False) -120 ), -121 TokenType.REPLACE: lambda self: self._parse_create(), -122 } -123 -124 FUNCTION_PARSERS = { -125 **parser.Parser.FUNCTION_PARSERS, -126 # https://docs.teradata.com/r/SQL-Functions-Operators-Expressions-and-Predicates/June-2017/Data-Type-Conversions/TRYCAST -127 "TRYCAST": parser.Parser.FUNCTION_PARSERS["TRY_CAST"], -128 "RANGE_N": lambda self: self._parse_rangen(), -129 "TRANSLATE": lambda self: self._parse_translate(self.STRICT_CAST), -130 } -131 -132 EXPONENT = { -133 TokenType.DSTAR: exp.Pow, -134 } -135 -136 def _parse_translate(self, strict: bool) -> exp.Expression: -137 this = self._parse_conjunction() -138 -139 if not self._match(TokenType.USING): -140 self.raise_error("Expected USING in TRANSLATE") -141 -142 if self._match_texts(self.CHARSET_TRANSLATORS): -143 charset_split = self._prev.text.split("_TO_") -144 to = self.expression(exp.CharacterSet, this=charset_split[1]) -145 else: -146 self.raise_error("Expected a character set translator after USING in TRANSLATE") -147 -148 return self.expression(exp.Cast if strict else exp.TryCast, this=this, to=to) -149 -150 # FROM before SET in Teradata UPDATE syntax -151 # https://docs.teradata.com/r/Enterprise_IntelliFlex_VMware/Teradata-VantageTM-SQL-Data-Manipulation-Language-17.20/Statement-Syntax/UPDATE/UPDATE-Syntax-Basic-Form-FROM-Clause -152 def _parse_update(self) -> exp.Update: -153 return self.expression( -154 exp.Update, -155 **{ # type: ignore -156 "this": self._parse_table(alias_tokens=self.UPDATE_ALIAS_TOKENS), -157 "from": self._parse_from(joins=True), -158 "expressions": self._match(TokenType.SET) -159 and self._parse_csv(self._parse_equality), -160 "where": self._parse_where(), -161 }, -162 ) -163 -164 def _parse_rangen(self): -165 this = self._parse_id_var() -166 self._match(TokenType.BETWEEN) -167 -168 expressions = self._parse_csv(self._parse_conjunction) -169 each = self._match_text_seq("EACH") and self._parse_conjunction() -170 -171 return self.expression(exp.RangeN, this=this, expressions=expressions, each=each) -172 -173 class Generator(generator.Generator): -174 LIMIT_IS_TOP = True -175 JOIN_HINTS = False -176 TABLE_HINTS = False -177 QUERY_HINTS = False -178 TABLESAMPLE_KEYWORDS = "SAMPLE" -179 LAST_DAY_SUPPORTS_DATE_PART = False -180 -181 TYPE_MAPPING = { -182 **generator.Generator.TYPE_MAPPING, -183 exp.DataType.Type.GEOMETRY: "ST_GEOMETRY", -184 } -185 -186 PROPERTIES_LOCATION = { -187 **generator.Generator.PROPERTIES_LOCATION, -188 exp.OnCommitProperty: exp.Properties.Location.POST_INDEX, -189 exp.PartitionedByProperty: exp.Properties.Location.POST_EXPRESSION, -190 exp.StabilityProperty: exp.Properties.Location.POST_CREATE, -191 } -192 -193 TRANSFORMS = { -194 **generator.Generator.TRANSFORMS, -195 exp.ArgMax: rename_func("MAX_BY"), -196 exp.ArgMin: rename_func("MIN_BY"), -197 exp.Max: max_or_greatest, -198 exp.Min: min_or_least, -199 exp.Pow: lambda self, e: self.binary(e, "**"), -200 exp.Select: transforms.preprocess( -201 [transforms.eliminate_distinct_on, transforms.eliminate_semi_and_anti_joins] -202 ), -203 exp.StrToDate: lambda self, -204 e: f"CAST({self.sql(e, 'this')} AS DATE FORMAT {self.format_time(e)})", -205 exp.ToChar: lambda self, e: self.function_fallback_sql(e), -206 exp.Use: lambda self, e: f"DATABASE {self.sql(e, 'this')}", -207 } -208 -209 def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str: -210 if expression.to.this == exp.DataType.Type.UNKNOWN and expression.args.get("format"): -211 # We don't actually want to print the unknown type in CAST(<value> AS FORMAT <format>) -212 expression.to.pop() -213 -214 return super().cast_sql(expression, safe_prefix=safe_prefix) -215 -216 def trycast_sql(self, expression: exp.TryCast) -> str: -217 return self.cast_sql(expression, safe_prefix="TRY") -218 -219 def tablesample_sql( -220 self, -221 expression: exp.TableSample, -222 sep: str = " AS ", -223 tablesample_keyword: t.Optional[str] = None, -224 ) -> str: -225 return f"{self.sql(expression, 'this')} SAMPLE {self.expressions(expression)}" -226 -227 def partitionedbyproperty_sql(self, expression: exp.PartitionedByProperty) -> str: -228 return f"PARTITION BY {self.sql(expression, 'this')}" -229 -230 # FROM before SET in Teradata UPDATE syntax -231 # https://docs.teradata.com/r/Enterprise_IntelliFlex_VMware/Teradata-VantageTM-SQL-Data-Manipulation-Language-17.20/Statement-Syntax/UPDATE/UPDATE-Syntax-Basic-Form-FROM-Clause -232 def update_sql(self, expression: exp.Update) -> str: -233 this = self.sql(expression, "this") -234 from_sql = self.sql(expression, "from") -235 set_sql = self.expressions(expression, flat=True) -236 where_sql = self.sql(expression, "where") -237 sql = f"UPDATE {this}{from_sql} SET {set_sql}{where_sql}" -238 return self.prepend_ctes(expression, sql) -239 -240 def mod_sql(self, expression: exp.Mod) -> str: -241 return self.binary(expression, "MOD") -242 -243 def datatype_sql(self, expression: exp.DataType) -> str: -244 type_sql = super().datatype_sql(expression) -245 prefix_sql = expression.args.get("prefix") -246 return f"SYSUDTLIB.{type_sql}" if prefix_sql else type_sql -247 -248 def rangen_sql(self, expression: exp.RangeN) -> str: -249 this = self.sql(expression, "this") -250 expressions_sql = self.expressions(expression) -251 each_sql = self.sql(expression, "each") -252 each_sql = f" EACH {each_sql}" if each_sql else "" -253 -254 return f"RANGE_N({this} BETWEEN {expressions_sql}{each_sql})" -255 -256 def createable_sql(self, expression: exp.Create, locations: t.DefaultDict) -> str: -257 kind = self.sql(expression, "kind").upper() -258 if kind == "TABLE" and locations.get(exp.Properties.Location.POST_NAME): -259 this_name = self.sql(expression.this, "this") -260 this_properties = self.properties( -261 exp.Properties(expressions=locations[exp.Properties.Location.POST_NAME]), -262 wrapped=False, -263 prefix=",", -264 ) -265 this_schema = self.schema_columns_sql(expression.this) -266 return f"{this_name}{this_properties}{self.sep()}{this_schema}" -267 -268 return super().createable_sql(expression, locations) + 77 VALUES_FOLLOWED_BY_PAREN = False + 78 + 79 CHARSET_TRANSLATORS = { + 80 "GRAPHIC_TO_KANJISJIS", + 81 "GRAPHIC_TO_LATIN", + 82 "GRAPHIC_TO_UNICODE", + 83 "GRAPHIC_TO_UNICODE_PadSpace", + 84 "KANJI1_KanjiEBCDIC_TO_UNICODE", + 85 "KANJI1_KanjiEUC_TO_UNICODE", + 86 "KANJI1_KANJISJIS_TO_UNICODE", + 87 "KANJI1_SBC_TO_UNICODE", + 88 "KANJISJIS_TO_GRAPHIC", + 89 "KANJISJIS_TO_LATIN", + 90 "KANJISJIS_TO_UNICODE", + 91 "LATIN_TO_GRAPHIC", + 92 "LATIN_TO_KANJISJIS", + 93 "LATIN_TO_UNICODE", + 94 "LOCALE_TO_UNICODE", + 95 "UNICODE_TO_GRAPHIC", + 96 "UNICODE_TO_GRAPHIC_PadGraphic", + 97 "UNICODE_TO_GRAPHIC_VarGraphic", + 98 "UNICODE_TO_KANJI1_KanjiEBCDIC", + 99 "UNICODE_TO_KANJI1_KanjiEUC", +100 "UNICODE_TO_KANJI1_KANJISJIS", +101 "UNICODE_TO_KANJI1_SBC", +102 "UNICODE_TO_KANJISJIS", +103 "UNICODE_TO_LATIN", +104 "UNICODE_TO_LOCALE", +105 "UNICODE_TO_UNICODE_FoldSpace", +106 "UNICODE_TO_UNICODE_Fullwidth", +107 "UNICODE_TO_UNICODE_Halfwidth", +108 "UNICODE_TO_UNICODE_NFC", +109 "UNICODE_TO_UNICODE_NFD", +110 "UNICODE_TO_UNICODE_NFKC", +111 "UNICODE_TO_UNICODE_NFKD", +112 } +113 +114 FUNC_TOKENS = {*parser.Parser.FUNC_TOKENS} +115 FUNC_TOKENS.remove(TokenType.REPLACE) +116 +117 STATEMENT_PARSERS = { +118 **parser.Parser.STATEMENT_PARSERS, +119 TokenType.DATABASE: lambda self: self.expression( +120 exp.Use, this=self._parse_table(schema=False) +121 ), +122 TokenType.REPLACE: lambda self: self._parse_create(), +123 } +124 +125 FUNCTION_PARSERS = { +126 **parser.Parser.FUNCTION_PARSERS, +127 # https://docs.teradata.com/r/SQL-Functions-Operators-Expressions-and-Predicates/June-2017/Data-Type-Conversions/TRYCAST +128 "TRYCAST": parser.Parser.FUNCTION_PARSERS["TRY_CAST"], +129 "RANGE_N": lambda self: self._parse_rangen(), +130 "TRANSLATE": lambda self: self._parse_translate(self.STRICT_CAST), +131 } +132 +133 EXPONENT = { +134 TokenType.DSTAR: exp.Pow, +135 } +136 +137 def _parse_translate(self, strict: bool) -> exp.Expression: +138 this = self._parse_conjunction() +139 +140 if not self._match(TokenType.USING): +141 self.raise_error("Expected USING in TRANSLATE") +142 +143 if self._match_texts(self.CHARSET_TRANSLATORS): +144 charset_split = self._prev.text.split("_TO_") +145 to = self.expression(exp.CharacterSet, this=charset_split[1]) +146 else: +147 self.raise_error("Expected a character set translator after USING in TRANSLATE") +148 +149 return self.expression(exp.Cast if strict else exp.TryCast, this=this, to=to) +150 +151 # FROM before SET in Teradata UPDATE syntax +152 # https://docs.teradata.com/r/Enterprise_IntelliFlex_VMware/Teradata-VantageTM-SQL-Data-Manipulation-Language-17.20/Statement-Syntax/UPDATE/UPDATE-Syntax-Basic-Form-FROM-Clause +153 def _parse_update(self) -> exp.Update: +154 return self.expression( +155 exp.Update, +156 **{ # type: ignore +157 "this": self._parse_table(alias_tokens=self.UPDATE_ALIAS_TOKENS), +158 "from": self._parse_from(joins=True), +159 "expressions": self._match(TokenType.SET) +160 and self._parse_csv(self._parse_equality), +161 "where": self._parse_where(), +162 }, +163 ) +164 +165 def _parse_rangen(self): +166 this = self._parse_id_var() +167 self._match(TokenType.BETWEEN) +168 +169 expressions = self._parse_csv(self._parse_conjunction) +170 each = self._match_text_seq("EACH") and self._parse_conjunction() +171 +172 return self.expression(exp.RangeN, this=this, expressions=expressions, each=each) +173 +174 class Generator(generator.Generator): +175 LIMIT_IS_TOP = True +176 JOIN_HINTS = False +177 TABLE_HINTS = False +178 QUERY_HINTS = False +179 TABLESAMPLE_KEYWORDS = "SAMPLE" +180 LAST_DAY_SUPPORTS_DATE_PART = False +181 +182 TYPE_MAPPING = { +183 **generator.Generator.TYPE_MAPPING, +184 exp.DataType.Type.GEOMETRY: "ST_GEOMETRY", +185 } +186 +187 PROPERTIES_LOCATION = { +188 **generator.Generator.PROPERTIES_LOCATION, +189 exp.OnCommitProperty: exp.Properties.Location.POST_INDEX, +190 exp.PartitionedByProperty: exp.Properties.Location.POST_EXPRESSION, +191 exp.StabilityProperty: exp.Properties.Location.POST_CREATE, +192 } +193 +194 TRANSFORMS = { +195 **generator.Generator.TRANSFORMS, +196 exp.ArgMax: rename_func("MAX_BY"), +197 exp.ArgMin: rename_func("MIN_BY"), +198 exp.Max: max_or_greatest, +199 exp.Min: min_or_least, +200 exp.Pow: lambda self, e: self.binary(e, "**"), +201 exp.Select: transforms.preprocess( +202 [transforms.eliminate_distinct_on, transforms.eliminate_semi_and_anti_joins] +203 ), +204 exp.StrToDate: lambda self, +205 e: f"CAST({self.sql(e, 'this')} AS DATE FORMAT {self.format_time(e)})", +206 exp.ToChar: lambda self, e: self.function_fallback_sql(e), +207 exp.Use: lambda self, e: f"DATABASE {self.sql(e, 'this')}", +208 } +209 +210 def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str: +211 if expression.to.this == exp.DataType.Type.UNKNOWN and expression.args.get("format"): +212 # We don't actually want to print the unknown type in CAST(<value> AS FORMAT <format>) +213 expression.to.pop() +214 +215 return super().cast_sql(expression, safe_prefix=safe_prefix) +216 +217 def trycast_sql(self, expression: exp.TryCast) -> str: +218 return self.cast_sql(expression, safe_prefix="TRY") +219 +220 def tablesample_sql( +221 self, +222 expression: exp.TableSample, +223 sep: str = " AS ", +224 tablesample_keyword: t.Optional[str] = None, +225 ) -> str: +226 return f"{self.sql(expression, 'this')} SAMPLE {self.expressions(expression)}" +227 +228 def partitionedbyproperty_sql(self, expression: exp.PartitionedByProperty) -> str: +229 return f"PARTITION BY {self.sql(expression, 'this')}" +230 +231 # FROM before SET in Teradata UPDATE syntax +232 # https://docs.teradata.com/r/Enterprise_IntelliFlex_VMware/Teradata-VantageTM-SQL-Data-Manipulation-Language-17.20/Statement-Syntax/UPDATE/UPDATE-Syntax-Basic-Form-FROM-Clause +233 def update_sql(self, expression: exp.Update) -> str: +234 this = self.sql(expression, "this") +235 from_sql = self.sql(expression, "from") +236 set_sql = self.expressions(expression, flat=True) +237 where_sql = self.sql(expression, "where") +238 sql = f"UPDATE {this}{from_sql} SET {set_sql}{where_sql}" +239 return self.prepend_ctes(expression, sql) +240 +241 def mod_sql(self, expression: exp.Mod) -> str: +242 return self.binary(expression, "MOD") +243 +244 def datatype_sql(self, expression: exp.DataType) -> str: +245 type_sql = super().datatype_sql(expression) +246 prefix_sql = expression.args.get("prefix") +247 return f"SYSUDTLIB.{type_sql}" if prefix_sql else type_sql +248 +249 def rangen_sql(self, expression: exp.RangeN) -> str: +250 this = self.sql(expression, "this") +251 expressions_sql = self.expressions(expression) +252 each_sql = self.sql(expression, "each") +253 each_sql = f" EACH {each_sql}" if each_sql else "" +254 +255 return f"RANGE_N({this} BETWEEN {expressions_sql}{each_sql})" +256 +257 def createable_sql(self, expression: exp.Create, locations: t.DefaultDict) -> str: +258 kind = self.sql(expression, "kind").upper() +259 if kind == "TABLE" and locations.get(exp.Properties.Location.POST_NAME): +260 this_name = self.sql(expression.this, "this") +261 this_properties = self.properties( +262 exp.Properties(expressions=locations[exp.Properties.Location.POST_NAME]), +263 wrapped=False, +264 prefix=",", +265 ) +266 this_schema = self.schema_columns_sql(expression.this) +267 return f"{this_name}{this_properties}{self.sep()}{this_schema}" +268 +269 return super().createable_sql(expression, locations)
    @@ -586,198 +590,199 @@
    75 76 class Parser(parser.Parser): 77 TABLESAMPLE_CSV = True - 78 - 79 CHARSET_TRANSLATORS = { - 80 "GRAPHIC_TO_KANJISJIS", - 81 "GRAPHIC_TO_LATIN", - 82 "GRAPHIC_TO_UNICODE", - 83 "GRAPHIC_TO_UNICODE_PadSpace", - 84 "KANJI1_KanjiEBCDIC_TO_UNICODE", - 85 "KANJI1_KanjiEUC_TO_UNICODE", - 86 "KANJI1_KANJISJIS_TO_UNICODE", - 87 "KANJI1_SBC_TO_UNICODE", - 88 "KANJISJIS_TO_GRAPHIC", - 89 "KANJISJIS_TO_LATIN", - 90 "KANJISJIS_TO_UNICODE", - 91 "LATIN_TO_GRAPHIC", - 92 "LATIN_TO_KANJISJIS", - 93 "LATIN_TO_UNICODE", - 94 "LOCALE_TO_UNICODE", - 95 "UNICODE_TO_GRAPHIC", - 96 "UNICODE_TO_GRAPHIC_PadGraphic", - 97 "UNICODE_TO_GRAPHIC_VarGraphic", - 98 "UNICODE_TO_KANJI1_KanjiEBCDIC", - 99 "UNICODE_TO_KANJI1_KanjiEUC", -100 "UNICODE_TO_KANJI1_KANJISJIS", -101 "UNICODE_TO_KANJI1_SBC", -102 "UNICODE_TO_KANJISJIS", -103 "UNICODE_TO_LATIN", -104 "UNICODE_TO_LOCALE", -105 "UNICODE_TO_UNICODE_FoldSpace", -106 "UNICODE_TO_UNICODE_Fullwidth", -107 "UNICODE_TO_UNICODE_Halfwidth", -108 "UNICODE_TO_UNICODE_NFC", -109 "UNICODE_TO_UNICODE_NFD", -110 "UNICODE_TO_UNICODE_NFKC", -111 "UNICODE_TO_UNICODE_NFKD", -112 } -113 -114 FUNC_TOKENS = {*parser.Parser.FUNC_TOKENS} -115 FUNC_TOKENS.remove(TokenType.REPLACE) -116 -117 STATEMENT_PARSERS = { -118 **parser.Parser.STATEMENT_PARSERS, -119 TokenType.DATABASE: lambda self: self.expression( -120 exp.Use, this=self._parse_table(schema=False) -121 ), -122 TokenType.REPLACE: lambda self: self._parse_create(), -123 } -124 -125 FUNCTION_PARSERS = { -126 **parser.Parser.FUNCTION_PARSERS, -127 # https://docs.teradata.com/r/SQL-Functions-Operators-Expressions-and-Predicates/June-2017/Data-Type-Conversions/TRYCAST -128 "TRYCAST": parser.Parser.FUNCTION_PARSERS["TRY_CAST"], -129 "RANGE_N": lambda self: self._parse_rangen(), -130 "TRANSLATE": lambda self: self._parse_translate(self.STRICT_CAST), -131 } -132 -133 EXPONENT = { -134 TokenType.DSTAR: exp.Pow, -135 } -136 -137 def _parse_translate(self, strict: bool) -> exp.Expression: -138 this = self._parse_conjunction() -139 -140 if not self._match(TokenType.USING): -141 self.raise_error("Expected USING in TRANSLATE") -142 -143 if self._match_texts(self.CHARSET_TRANSLATORS): -144 charset_split = self._prev.text.split("_TO_") -145 to = self.expression(exp.CharacterSet, this=charset_split[1]) -146 else: -147 self.raise_error("Expected a character set translator after USING in TRANSLATE") -148 -149 return self.expression(exp.Cast if strict else exp.TryCast, this=this, to=to) -150 -151 # FROM before SET in Teradata UPDATE syntax -152 # https://docs.teradata.com/r/Enterprise_IntelliFlex_VMware/Teradata-VantageTM-SQL-Data-Manipulation-Language-17.20/Statement-Syntax/UPDATE/UPDATE-Syntax-Basic-Form-FROM-Clause -153 def _parse_update(self) -> exp.Update: -154 return self.expression( -155 exp.Update, -156 **{ # type: ignore -157 "this": self._parse_table(alias_tokens=self.UPDATE_ALIAS_TOKENS), -158 "from": self._parse_from(joins=True), -159 "expressions": self._match(TokenType.SET) -160 and self._parse_csv(self._parse_equality), -161 "where": self._parse_where(), -162 }, -163 ) -164 -165 def _parse_rangen(self): -166 this = self._parse_id_var() -167 self._match(TokenType.BETWEEN) -168 -169 expressions = self._parse_csv(self._parse_conjunction) -170 each = self._match_text_seq("EACH") and self._parse_conjunction() -171 -172 return self.expression(exp.RangeN, this=this, expressions=expressions, each=each) -173 -174 class Generator(generator.Generator): -175 LIMIT_IS_TOP = True -176 JOIN_HINTS = False -177 TABLE_HINTS = False -178 QUERY_HINTS = False -179 TABLESAMPLE_KEYWORDS = "SAMPLE" -180 LAST_DAY_SUPPORTS_DATE_PART = False -181 -182 TYPE_MAPPING = { -183 **generator.Generator.TYPE_MAPPING, -184 exp.DataType.Type.GEOMETRY: "ST_GEOMETRY", -185 } -186 -187 PROPERTIES_LOCATION = { -188 **generator.Generator.PROPERTIES_LOCATION, -189 exp.OnCommitProperty: exp.Properties.Location.POST_INDEX, -190 exp.PartitionedByProperty: exp.Properties.Location.POST_EXPRESSION, -191 exp.StabilityProperty: exp.Properties.Location.POST_CREATE, -192 } -193 -194 TRANSFORMS = { -195 **generator.Generator.TRANSFORMS, -196 exp.ArgMax: rename_func("MAX_BY"), -197 exp.ArgMin: rename_func("MIN_BY"), -198 exp.Max: max_or_greatest, -199 exp.Min: min_or_least, -200 exp.Pow: lambda self, e: self.binary(e, "**"), -201 exp.Select: transforms.preprocess( -202 [transforms.eliminate_distinct_on, transforms.eliminate_semi_and_anti_joins] -203 ), -204 exp.StrToDate: lambda self, -205 e: f"CAST({self.sql(e, 'this')} AS DATE FORMAT {self.format_time(e)})", -206 exp.ToChar: lambda self, e: self.function_fallback_sql(e), -207 exp.Use: lambda self, e: f"DATABASE {self.sql(e, 'this')}", -208 } -209 -210 def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str: -211 if expression.to.this == exp.DataType.Type.UNKNOWN and expression.args.get("format"): -212 # We don't actually want to print the unknown type in CAST(<value> AS FORMAT <format>) -213 expression.to.pop() -214 -215 return super().cast_sql(expression, safe_prefix=safe_prefix) -216 -217 def trycast_sql(self, expression: exp.TryCast) -> str: -218 return self.cast_sql(expression, safe_prefix="TRY") -219 -220 def tablesample_sql( -221 self, -222 expression: exp.TableSample, -223 sep: str = " AS ", -224 tablesample_keyword: t.Optional[str] = None, -225 ) -> str: -226 return f"{self.sql(expression, 'this')} SAMPLE {self.expressions(expression)}" -227 -228 def partitionedbyproperty_sql(self, expression: exp.PartitionedByProperty) -> str: -229 return f"PARTITION BY {self.sql(expression, 'this')}" -230 -231 # FROM before SET in Teradata UPDATE syntax -232 # https://docs.teradata.com/r/Enterprise_IntelliFlex_VMware/Teradata-VantageTM-SQL-Data-Manipulation-Language-17.20/Statement-Syntax/UPDATE/UPDATE-Syntax-Basic-Form-FROM-Clause -233 def update_sql(self, expression: exp.Update) -> str: -234 this = self.sql(expression, "this") -235 from_sql = self.sql(expression, "from") -236 set_sql = self.expressions(expression, flat=True) -237 where_sql = self.sql(expression, "where") -238 sql = f"UPDATE {this}{from_sql} SET {set_sql}{where_sql}" -239 return self.prepend_ctes(expression, sql) -240 -241 def mod_sql(self, expression: exp.Mod) -> str: -242 return self.binary(expression, "MOD") -243 -244 def datatype_sql(self, expression: exp.DataType) -> str: -245 type_sql = super().datatype_sql(expression) -246 prefix_sql = expression.args.get("prefix") -247 return f"SYSUDTLIB.{type_sql}" if prefix_sql else type_sql -248 -249 def rangen_sql(self, expression: exp.RangeN) -> str: -250 this = self.sql(expression, "this") -251 expressions_sql = self.expressions(expression) -252 each_sql = self.sql(expression, "each") -253 each_sql = f" EACH {each_sql}" if each_sql else "" -254 -255 return f"RANGE_N({this} BETWEEN {expressions_sql}{each_sql})" -256 -257 def createable_sql(self, expression: exp.Create, locations: t.DefaultDict) -> str: -258 kind = self.sql(expression, "kind").upper() -259 if kind == "TABLE" and locations.get(exp.Properties.Location.POST_NAME): -260 this_name = self.sql(expression.this, "this") -261 this_properties = self.properties( -262 exp.Properties(expressions=locations[exp.Properties.Location.POST_NAME]), -263 wrapped=False, -264 prefix=",", -265 ) -266 this_schema = self.schema_columns_sql(expression.this) -267 return f"{this_name}{this_properties}{self.sep()}{this_schema}" -268 -269 return super().createable_sql(expression, locations) + 78 VALUES_FOLLOWED_BY_PAREN = False + 79 + 80 CHARSET_TRANSLATORS = { + 81 "GRAPHIC_TO_KANJISJIS", + 82 "GRAPHIC_TO_LATIN", + 83 "GRAPHIC_TO_UNICODE", + 84 "GRAPHIC_TO_UNICODE_PadSpace", + 85 "KANJI1_KanjiEBCDIC_TO_UNICODE", + 86 "KANJI1_KanjiEUC_TO_UNICODE", + 87 "KANJI1_KANJISJIS_TO_UNICODE", + 88 "KANJI1_SBC_TO_UNICODE", + 89 "KANJISJIS_TO_GRAPHIC", + 90 "KANJISJIS_TO_LATIN", + 91 "KANJISJIS_TO_UNICODE", + 92 "LATIN_TO_GRAPHIC", + 93 "LATIN_TO_KANJISJIS", + 94 "LATIN_TO_UNICODE", + 95 "LOCALE_TO_UNICODE", + 96 "UNICODE_TO_GRAPHIC", + 97 "UNICODE_TO_GRAPHIC_PadGraphic", + 98 "UNICODE_TO_GRAPHIC_VarGraphic", + 99 "UNICODE_TO_KANJI1_KanjiEBCDIC", +100 "UNICODE_TO_KANJI1_KanjiEUC", +101 "UNICODE_TO_KANJI1_KANJISJIS", +102 "UNICODE_TO_KANJI1_SBC", +103 "UNICODE_TO_KANJISJIS", +104 "UNICODE_TO_LATIN", +105 "UNICODE_TO_LOCALE", +106 "UNICODE_TO_UNICODE_FoldSpace", +107 "UNICODE_TO_UNICODE_Fullwidth", +108 "UNICODE_TO_UNICODE_Halfwidth", +109 "UNICODE_TO_UNICODE_NFC", +110 "UNICODE_TO_UNICODE_NFD", +111 "UNICODE_TO_UNICODE_NFKC", +112 "UNICODE_TO_UNICODE_NFKD", +113 } +114 +115 FUNC_TOKENS = {*parser.Parser.FUNC_TOKENS} +116 FUNC_TOKENS.remove(TokenType.REPLACE) +117 +118 STATEMENT_PARSERS = { +119 **parser.Parser.STATEMENT_PARSERS, +120 TokenType.DATABASE: lambda self: self.expression( +121 exp.Use, this=self._parse_table(schema=False) +122 ), +123 TokenType.REPLACE: lambda self: self._parse_create(), +124 } +125 +126 FUNCTION_PARSERS = { +127 **parser.Parser.FUNCTION_PARSERS, +128 # https://docs.teradata.com/r/SQL-Functions-Operators-Expressions-and-Predicates/June-2017/Data-Type-Conversions/TRYCAST +129 "TRYCAST": parser.Parser.FUNCTION_PARSERS["TRY_CAST"], +130 "RANGE_N": lambda self: self._parse_rangen(), +131 "TRANSLATE": lambda self: self._parse_translate(self.STRICT_CAST), +132 } +133 +134 EXPONENT = { +135 TokenType.DSTAR: exp.Pow, +136 } +137 +138 def _parse_translate(self, strict: bool) -> exp.Expression: +139 this = self._parse_conjunction() +140 +141 if not self._match(TokenType.USING): +142 self.raise_error("Expected USING in TRANSLATE") +143 +144 if self._match_texts(self.CHARSET_TRANSLATORS): +145 charset_split = self._prev.text.split("_TO_") +146 to = self.expression(exp.CharacterSet, this=charset_split[1]) +147 else: +148 self.raise_error("Expected a character set translator after USING in TRANSLATE") +149 +150 return self.expression(exp.Cast if strict else exp.TryCast, this=this, to=to) +151 +152 # FROM before SET in Teradata UPDATE syntax +153 # https://docs.teradata.com/r/Enterprise_IntelliFlex_VMware/Teradata-VantageTM-SQL-Data-Manipulation-Language-17.20/Statement-Syntax/UPDATE/UPDATE-Syntax-Basic-Form-FROM-Clause +154 def _parse_update(self) -> exp.Update: +155 return self.expression( +156 exp.Update, +157 **{ # type: ignore +158 "this": self._parse_table(alias_tokens=self.UPDATE_ALIAS_TOKENS), +159 "from": self._parse_from(joins=True), +160 "expressions": self._match(TokenType.SET) +161 and self._parse_csv(self._parse_equality), +162 "where": self._parse_where(), +163 }, +164 ) +165 +166 def _parse_rangen(self): +167 this = self._parse_id_var() +168 self._match(TokenType.BETWEEN) +169 +170 expressions = self._parse_csv(self._parse_conjunction) +171 each = self._match_text_seq("EACH") and self._parse_conjunction() +172 +173 return self.expression(exp.RangeN, this=this, expressions=expressions, each=each) +174 +175 class Generator(generator.Generator): +176 LIMIT_IS_TOP = True +177 JOIN_HINTS = False +178 TABLE_HINTS = False +179 QUERY_HINTS = False +180 TABLESAMPLE_KEYWORDS = "SAMPLE" +181 LAST_DAY_SUPPORTS_DATE_PART = False +182 +183 TYPE_MAPPING = { +184 **generator.Generator.TYPE_MAPPING, +185 exp.DataType.Type.GEOMETRY: "ST_GEOMETRY", +186 } +187 +188 PROPERTIES_LOCATION = { +189 **generator.Generator.PROPERTIES_LOCATION, +190 exp.OnCommitProperty: exp.Properties.Location.POST_INDEX, +191 exp.PartitionedByProperty: exp.Properties.Location.POST_EXPRESSION, +192 exp.StabilityProperty: exp.Properties.Location.POST_CREATE, +193 } +194 +195 TRANSFORMS = { +196 **generator.Generator.TRANSFORMS, +197 exp.ArgMax: rename_func("MAX_BY"), +198 exp.ArgMin: rename_func("MIN_BY"), +199 exp.Max: max_or_greatest, +200 exp.Min: min_or_least, +201 exp.Pow: lambda self, e: self.binary(e, "**"), +202 exp.Select: transforms.preprocess( +203 [transforms.eliminate_distinct_on, transforms.eliminate_semi_and_anti_joins] +204 ), +205 exp.StrToDate: lambda self, +206 e: f"CAST({self.sql(e, 'this')} AS DATE FORMAT {self.format_time(e)})", +207 exp.ToChar: lambda self, e: self.function_fallback_sql(e), +208 exp.Use: lambda self, e: f"DATABASE {self.sql(e, 'this')}", +209 } +210 +211 def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str: +212 if expression.to.this == exp.DataType.Type.UNKNOWN and expression.args.get("format"): +213 # We don't actually want to print the unknown type in CAST(<value> AS FORMAT <format>) +214 expression.to.pop() +215 +216 return super().cast_sql(expression, safe_prefix=safe_prefix) +217 +218 def trycast_sql(self, expression: exp.TryCast) -> str: +219 return self.cast_sql(expression, safe_prefix="TRY") +220 +221 def tablesample_sql( +222 self, +223 expression: exp.TableSample, +224 sep: str = " AS ", +225 tablesample_keyword: t.Optional[str] = None, +226 ) -> str: +227 return f"{self.sql(expression, 'this')} SAMPLE {self.expressions(expression)}" +228 +229 def partitionedbyproperty_sql(self, expression: exp.PartitionedByProperty) -> str: +230 return f"PARTITION BY {self.sql(expression, 'this')}" +231 +232 # FROM before SET in Teradata UPDATE syntax +233 # https://docs.teradata.com/r/Enterprise_IntelliFlex_VMware/Teradata-VantageTM-SQL-Data-Manipulation-Language-17.20/Statement-Syntax/UPDATE/UPDATE-Syntax-Basic-Form-FROM-Clause +234 def update_sql(self, expression: exp.Update) -> str: +235 this = self.sql(expression, "this") +236 from_sql = self.sql(expression, "from") +237 set_sql = self.expressions(expression, flat=True) +238 where_sql = self.sql(expression, "where") +239 sql = f"UPDATE {this}{from_sql} SET {set_sql}{where_sql}" +240 return self.prepend_ctes(expression, sql) +241 +242 def mod_sql(self, expression: exp.Mod) -> str: +243 return self.binary(expression, "MOD") +244 +245 def datatype_sql(self, expression: exp.DataType) -> str: +246 type_sql = super().datatype_sql(expression) +247 prefix_sql = expression.args.get("prefix") +248 return f"SYSUDTLIB.{type_sql}" if prefix_sql else type_sql +249 +250 def rangen_sql(self, expression: exp.RangeN) -> str: +251 this = self.sql(expression, "this") +252 expressions_sql = self.expressions(expression) +253 each_sql = self.sql(expression, "each") +254 each_sql = f" EACH {each_sql}" if each_sql else "" +255 +256 return f"RANGE_N({this} BETWEEN {expressions_sql}{each_sql})" +257 +258 def createable_sql(self, expression: exp.Create, locations: t.DefaultDict) -> str: +259 kind = self.sql(expression, "kind").upper() +260 if kind == "TABLE" and locations.get(exp.Properties.Location.POST_NAME): +261 this_name = self.sql(expression.this, "this") +262 this_properties = self.properties( +263 exp.Properties(expressions=locations[exp.Properties.Location.POST_NAME]), +264 wrapped=False, +265 prefix=",", +266 ) +267 this_schema = self.schema_columns_sql(expression.this) +268 return f"{this_name}{this_properties}{self.sep()}{this_schema}" +269 +270 return super().createable_sql(expression, locations)
    @@ -1241,101 +1246,102 @@ True means a / b is integer division if both a and
     76    class Parser(parser.Parser):
      77        TABLESAMPLE_CSV = True
    - 78
    - 79        CHARSET_TRANSLATORS = {
    - 80            "GRAPHIC_TO_KANJISJIS",
    - 81            "GRAPHIC_TO_LATIN",
    - 82            "GRAPHIC_TO_UNICODE",
    - 83            "GRAPHIC_TO_UNICODE_PadSpace",
    - 84            "KANJI1_KanjiEBCDIC_TO_UNICODE",
    - 85            "KANJI1_KanjiEUC_TO_UNICODE",
    - 86            "KANJI1_KANJISJIS_TO_UNICODE",
    - 87            "KANJI1_SBC_TO_UNICODE",
    - 88            "KANJISJIS_TO_GRAPHIC",
    - 89            "KANJISJIS_TO_LATIN",
    - 90            "KANJISJIS_TO_UNICODE",
    - 91            "LATIN_TO_GRAPHIC",
    - 92            "LATIN_TO_KANJISJIS",
    - 93            "LATIN_TO_UNICODE",
    - 94            "LOCALE_TO_UNICODE",
    - 95            "UNICODE_TO_GRAPHIC",
    - 96            "UNICODE_TO_GRAPHIC_PadGraphic",
    - 97            "UNICODE_TO_GRAPHIC_VarGraphic",
    - 98            "UNICODE_TO_KANJI1_KanjiEBCDIC",
    - 99            "UNICODE_TO_KANJI1_KanjiEUC",
    -100            "UNICODE_TO_KANJI1_KANJISJIS",
    -101            "UNICODE_TO_KANJI1_SBC",
    -102            "UNICODE_TO_KANJISJIS",
    -103            "UNICODE_TO_LATIN",
    -104            "UNICODE_TO_LOCALE",
    -105            "UNICODE_TO_UNICODE_FoldSpace",
    -106            "UNICODE_TO_UNICODE_Fullwidth",
    -107            "UNICODE_TO_UNICODE_Halfwidth",
    -108            "UNICODE_TO_UNICODE_NFC",
    -109            "UNICODE_TO_UNICODE_NFD",
    -110            "UNICODE_TO_UNICODE_NFKC",
    -111            "UNICODE_TO_UNICODE_NFKD",
    -112        }
    -113
    -114        FUNC_TOKENS = {*parser.Parser.FUNC_TOKENS}
    -115        FUNC_TOKENS.remove(TokenType.REPLACE)
    -116
    -117        STATEMENT_PARSERS = {
    -118            **parser.Parser.STATEMENT_PARSERS,
    -119            TokenType.DATABASE: lambda self: self.expression(
    -120                exp.Use, this=self._parse_table(schema=False)
    -121            ),
    -122            TokenType.REPLACE: lambda self: self._parse_create(),
    -123        }
    -124
    -125        FUNCTION_PARSERS = {
    -126            **parser.Parser.FUNCTION_PARSERS,
    -127            # https://docs.teradata.com/r/SQL-Functions-Operators-Expressions-and-Predicates/June-2017/Data-Type-Conversions/TRYCAST
    -128            "TRYCAST": parser.Parser.FUNCTION_PARSERS["TRY_CAST"],
    -129            "RANGE_N": lambda self: self._parse_rangen(),
    -130            "TRANSLATE": lambda self: self._parse_translate(self.STRICT_CAST),
    -131        }
    -132
    -133        EXPONENT = {
    -134            TokenType.DSTAR: exp.Pow,
    -135        }
    -136
    -137        def _parse_translate(self, strict: bool) -> exp.Expression:
    -138            this = self._parse_conjunction()
    -139
    -140            if not self._match(TokenType.USING):
    -141                self.raise_error("Expected USING in TRANSLATE")
    -142
    -143            if self._match_texts(self.CHARSET_TRANSLATORS):
    -144                charset_split = self._prev.text.split("_TO_")
    -145                to = self.expression(exp.CharacterSet, this=charset_split[1])
    -146            else:
    -147                self.raise_error("Expected a character set translator after USING in TRANSLATE")
    -148
    -149            return self.expression(exp.Cast if strict else exp.TryCast, this=this, to=to)
    -150
    -151        # FROM before SET in Teradata UPDATE syntax
    -152        # https://docs.teradata.com/r/Enterprise_IntelliFlex_VMware/Teradata-VantageTM-SQL-Data-Manipulation-Language-17.20/Statement-Syntax/UPDATE/UPDATE-Syntax-Basic-Form-FROM-Clause
    -153        def _parse_update(self) -> exp.Update:
    -154            return self.expression(
    -155                exp.Update,
    -156                **{  # type: ignore
    -157                    "this": self._parse_table(alias_tokens=self.UPDATE_ALIAS_TOKENS),
    -158                    "from": self._parse_from(joins=True),
    -159                    "expressions": self._match(TokenType.SET)
    -160                    and self._parse_csv(self._parse_equality),
    -161                    "where": self._parse_where(),
    -162                },
    -163            )
    -164
    -165        def _parse_rangen(self):
    -166            this = self._parse_id_var()
    -167            self._match(TokenType.BETWEEN)
    -168
    -169            expressions = self._parse_csv(self._parse_conjunction)
    -170            each = self._match_text_seq("EACH") and self._parse_conjunction()
    -171
    -172            return self.expression(exp.RangeN, this=this, expressions=expressions, each=each)
    + 78        VALUES_FOLLOWED_BY_PAREN = False
    + 79
    + 80        CHARSET_TRANSLATORS = {
    + 81            "GRAPHIC_TO_KANJISJIS",
    + 82            "GRAPHIC_TO_LATIN",
    + 83            "GRAPHIC_TO_UNICODE",
    + 84            "GRAPHIC_TO_UNICODE_PadSpace",
    + 85            "KANJI1_KanjiEBCDIC_TO_UNICODE",
    + 86            "KANJI1_KanjiEUC_TO_UNICODE",
    + 87            "KANJI1_KANJISJIS_TO_UNICODE",
    + 88            "KANJI1_SBC_TO_UNICODE",
    + 89            "KANJISJIS_TO_GRAPHIC",
    + 90            "KANJISJIS_TO_LATIN",
    + 91            "KANJISJIS_TO_UNICODE",
    + 92            "LATIN_TO_GRAPHIC",
    + 93            "LATIN_TO_KANJISJIS",
    + 94            "LATIN_TO_UNICODE",
    + 95            "LOCALE_TO_UNICODE",
    + 96            "UNICODE_TO_GRAPHIC",
    + 97            "UNICODE_TO_GRAPHIC_PadGraphic",
    + 98            "UNICODE_TO_GRAPHIC_VarGraphic",
    + 99            "UNICODE_TO_KANJI1_KanjiEBCDIC",
    +100            "UNICODE_TO_KANJI1_KanjiEUC",
    +101            "UNICODE_TO_KANJI1_KANJISJIS",
    +102            "UNICODE_TO_KANJI1_SBC",
    +103            "UNICODE_TO_KANJISJIS",
    +104            "UNICODE_TO_LATIN",
    +105            "UNICODE_TO_LOCALE",
    +106            "UNICODE_TO_UNICODE_FoldSpace",
    +107            "UNICODE_TO_UNICODE_Fullwidth",
    +108            "UNICODE_TO_UNICODE_Halfwidth",
    +109            "UNICODE_TO_UNICODE_NFC",
    +110            "UNICODE_TO_UNICODE_NFD",
    +111            "UNICODE_TO_UNICODE_NFKC",
    +112            "UNICODE_TO_UNICODE_NFKD",
    +113        }
    +114
    +115        FUNC_TOKENS = {*parser.Parser.FUNC_TOKENS}
    +116        FUNC_TOKENS.remove(TokenType.REPLACE)
    +117
    +118        STATEMENT_PARSERS = {
    +119            **parser.Parser.STATEMENT_PARSERS,
    +120            TokenType.DATABASE: lambda self: self.expression(
    +121                exp.Use, this=self._parse_table(schema=False)
    +122            ),
    +123            TokenType.REPLACE: lambda self: self._parse_create(),
    +124        }
    +125
    +126        FUNCTION_PARSERS = {
    +127            **parser.Parser.FUNCTION_PARSERS,
    +128            # https://docs.teradata.com/r/SQL-Functions-Operators-Expressions-and-Predicates/June-2017/Data-Type-Conversions/TRYCAST
    +129            "TRYCAST": parser.Parser.FUNCTION_PARSERS["TRY_CAST"],
    +130            "RANGE_N": lambda self: self._parse_rangen(),
    +131            "TRANSLATE": lambda self: self._parse_translate(self.STRICT_CAST),
    +132        }
    +133
    +134        EXPONENT = {
    +135            TokenType.DSTAR: exp.Pow,
    +136        }
    +137
    +138        def _parse_translate(self, strict: bool) -> exp.Expression:
    +139            this = self._parse_conjunction()
    +140
    +141            if not self._match(TokenType.USING):
    +142                self.raise_error("Expected USING in TRANSLATE")
    +143
    +144            if self._match_texts(self.CHARSET_TRANSLATORS):
    +145                charset_split = self._prev.text.split("_TO_")
    +146                to = self.expression(exp.CharacterSet, this=charset_split[1])
    +147            else:
    +148                self.raise_error("Expected a character set translator after USING in TRANSLATE")
    +149
    +150            return self.expression(exp.Cast if strict else exp.TryCast, this=this, to=to)
    +151
    +152        # FROM before SET in Teradata UPDATE syntax
    +153        # https://docs.teradata.com/r/Enterprise_IntelliFlex_VMware/Teradata-VantageTM-SQL-Data-Manipulation-Language-17.20/Statement-Syntax/UPDATE/UPDATE-Syntax-Basic-Form-FROM-Clause
    +154        def _parse_update(self) -> exp.Update:
    +155            return self.expression(
    +156                exp.Update,
    +157                **{  # type: ignore
    +158                    "this": self._parse_table(alias_tokens=self.UPDATE_ALIAS_TOKENS),
    +159                    "from": self._parse_from(joins=True),
    +160                    "expressions": self._match(TokenType.SET)
    +161                    and self._parse_csv(self._parse_equality),
    +162                    "where": self._parse_where(),
    +163                },
    +164            )
    +165
    +166        def _parse_rangen(self):
    +167            this = self._parse_id_var()
    +168            self._match(TokenType.BETWEEN)
    +169
    +170            expressions = self._parse_csv(self._parse_conjunction)
    +171            each = self._match_text_seq("EACH") and self._parse_conjunction()
    +172
    +173            return self.expression(exp.RangeN, this=this, expressions=expressions, each=each)
     
    @@ -1367,12 +1373,24 @@ Default: 3 +
    +
    +
    + VALUES_FOLLOWED_BY_PAREN = +False + + +
    + + + +
    CHARSET_TRANSLATORS = - {'KANJI1_SBC_TO_UNICODE', 'UNICODE_TO_LATIN', 'UNICODE_TO_UNICODE_NFC', 'KANJI1_KanjiEUC_TO_UNICODE', 'GRAPHIC_TO_KANJISJIS', 'UNICODE_TO_UNICODE_Fullwidth', 'UNICODE_TO_GRAPHIC_PadGraphic', 'UNICODE_TO_GRAPHIC', 'UNICODE_TO_KANJI1_KanjiEBCDIC', 'KANJI1_KANJISJIS_TO_UNICODE', 'UNICODE_TO_KANJI1_KANJISJIS', 'GRAPHIC_TO_UNICODE', 'GRAPHIC_TO_UNICODE_PadSpace', 'LATIN_TO_GRAPHIC', 'LATIN_TO_UNICODE', 'UNICODE_TO_KANJISJIS', 'UNICODE_TO_GRAPHIC_VarGraphic', 'KANJISJIS_TO_GRAPHIC', 'UNICODE_TO_UNICODE_FoldSpace', 'UNICODE_TO_KANJI1_SBC', 'UNICODE_TO_UNICODE_NFKC', 'KANJI1_KanjiEBCDIC_TO_UNICODE', 'LOCALE_TO_UNICODE', 'KANJISJIS_TO_UNICODE', 'KANJISJIS_TO_LATIN', 'GRAPHIC_TO_LATIN', 'UNICODE_TO_LOCALE', 'UNICODE_TO_UNICODE_NFKD', 'LATIN_TO_KANJISJIS', 'UNICODE_TO_KANJI1_KanjiEUC', 'UNICODE_TO_UNICODE_Halfwidth', 'UNICODE_TO_UNICODE_NFD'} + {'GRAPHIC_TO_KANJISJIS', 'LOCALE_TO_UNICODE', 'KANJI1_KanjiEBCDIC_TO_UNICODE', 'GRAPHIC_TO_LATIN', 'UNICODE_TO_UNICODE_NFC', 'UNICODE_TO_UNICODE_Halfwidth', 'UNICODE_TO_UNICODE_NFKD', 'KANJISJIS_TO_UNICODE', 'LATIN_TO_UNICODE', 'LATIN_TO_GRAPHIC', 'KANJI1_SBC_TO_UNICODE', 'UNICODE_TO_LOCALE', 'GRAPHIC_TO_UNICODE_PadSpace', 'UNICODE_TO_GRAPHIC_VarGraphic', 'UNICODE_TO_UNICODE_Fullwidth', 'UNICODE_TO_GRAPHIC', 'UNICODE_TO_LATIN', 'GRAPHIC_TO_UNICODE', 'KANJISJIS_TO_LATIN', 'KANJI1_KanjiEUC_TO_UNICODE', 'UNICODE_TO_KANJI1_KANJISJIS', 'LATIN_TO_KANJISJIS', 'UNICODE_TO_UNICODE_NFKC', 'KANJI1_KANJISJIS_TO_UNICODE', 'UNICODE_TO_GRAPHIC_PadGraphic', 'UNICODE_TO_KANJI1_KanjiEUC', 'UNICODE_TO_UNICODE_FoldSpace', 'UNICODE_TO_UNICODE_NFD', 'KANJISJIS_TO_GRAPHIC', 'UNICODE_TO_KANJI1_SBC', 'UNICODE_TO_KANJI1_KanjiEBCDIC', 'UNICODE_TO_KANJISJIS'}
    @@ -1385,7 +1403,7 @@ Default: 3
    FUNC_TOKENS = - {<TokenType.RLIKE: 'RLIKE'>, <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>, <TokenType.ILIKE: 'ILIKE'>, <TokenType.UNIQUEIDENTIFIER: 'UNIQUEIDENTIFIER'>, <TokenType.TSTZRANGE: 'TSTZRANGE'>, <TokenType.PRIMARY_KEY: 'PRIMARY_KEY'>, <TokenType.TEXT: 'TEXT'>, <TokenType.UINT: 'UINT'>, <TokenType.XOR: 'XOR'>, <TokenType.LONGBLOB: 'LONGBLOB'>, <TokenType.DATETIME: 'DATETIME'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.BIGINT: 'BIGINT'>, <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, <TokenType.SMALLSERIAL: 'SMALLSERIAL'>, <TokenType.TABLE: 'TABLE'>, <TokenType.BPCHAR: 'BPCHAR'>, <TokenType.DATETIME64: 'DATETIME64'>, <TokenType.VAR: 'VAR'>, <TokenType.BIGSERIAL: 'BIGSERIAL'>, <TokenType.UMEDIUMINT: 'UMEDIUMINT'>, <TokenType.ANY: 'ANY'>, <TokenType.FILTER: 'FILTER'>, <TokenType.TIMESTAMP_MS: 'TIMESTAMP_MS'>, <TokenType.SERIAL: 'SERIAL'>, <TokenType.ALL: 'ALL'>, <TokenType.TIMETZ: 'TIMETZ'>, <TokenType.TINYBLOB: 'TINYBLOB'>, <TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>, <TokenType.SMALLINT: 'SMALLINT'>, <TokenType.IPPREFIX: 'IPPREFIX'>, <TokenType.UNKNOWN: 'UNKNOWN'>, <TokenType.NESTED: 'NESTED'>, <TokenType.NUMRANGE: 'NUMRANGE'>, <TokenType.INTERVAL: 'INTERVAL'>, <TokenType.JSON: 'JSON'>, <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, <TokenType.JSONB: 'JSONB'>, <TokenType.ROW: 'ROW'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, <TokenType.ENUM: 'ENUM'>, <TokenType.SMALLMONEY: 'SMALLMONEY'>, <TokenType.EXISTS: 'EXISTS'>, <TokenType.IDENTIFIER: 'IDENTIFIER'>, <TokenType.RANGE: 'RANGE'>, <TokenType.HLLSKETCH: 'HLLSKETCH'>, <TokenType.BIT: 'BIT'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.TSRANGE: 'TSRANGE'>, <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, <TokenType.INT256: 'INT256'>, <TokenType.CHAR: 'CHAR'>, <TokenType.VARBINARY: 'VARBINARY'>, <TokenType.DOUBLE: 'DOUBLE'>, <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, <TokenType.ISNULL: 'ISNULL'>, <TokenType.DECIMAL: 'DECIMAL'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, <TokenType.NULL: 'NULL'>, <TokenType.MEDIUMINT: 'MEDIUMINT'>, <TokenType.UDECIMAL: 'UDECIMAL'>, <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, <TokenType.FIXEDSTRING: 'FIXEDSTRING'>, <TokenType.USMALLINT: 'USMALLINT'>, <TokenType.BOOLEAN: 'BOOLEAN'>, <TokenType.ENUM16: 'ENUM16'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>, <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, <TokenType.INT8RANGE: 'INT8RANGE'>, <TokenType.TINYTEXT: 'TINYTEXT'>, <TokenType.LIKE: 'LIKE'>, <TokenType.FIRST: 'FIRST'>, <TokenType.OFFSET: 'OFFSET'>, <TokenType.YEAR: 'YEAR'>, <TokenType.UINT256: 'UINT256'>, <TokenType.UINT128: 'UINT128'>, <TokenType.TIMESTAMP_NS: 'TIMESTAMP_NS'>, <TokenType.RIGHT: 'RIGHT'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.SIMPLEAGGREGATEFUNCTION: 'SIMPLEAGGREGATEFUNCTION'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, <TokenType.LEFT: 'LEFT'>, <TokenType.IPADDRESS: 'IPADDRESS'>, <TokenType.NVARCHAR: 'NVARCHAR'>, <TokenType.LONGTEXT: 'LONGTEXT'>, <TokenType.BINARY: 'BINARY'>, <TokenType.TIME: 'TIME'>, <TokenType.MONEY: 'MONEY'>, <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, <TokenType.IMAGE: 'IMAGE'>, <TokenType.IPV6: 'IPV6'>, <TokenType.INT: 'INT'>, <TokenType.ROWVERSION: 'ROWVERSION'>, <TokenType.VARIANT: 'VARIANT'>, <TokenType.UTINYINT: 'UTINYINT'>, <TokenType.SOME: 'SOME'>, <TokenType.SUPER: 'SUPER'>, <TokenType.DATE: 'DATE'>, <TokenType.HSTORE: 'HSTORE'>, <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, <TokenType.MAP: 'MAP'>, <TokenType.INSERT: 'INSERT'>, <TokenType.TINYINT: 'TINYINT'>, <TokenType.IPV4: 'IPV4'>, <TokenType.AGGREGATEFUNCTION: 'AGGREGATEFUNCTION'>, <TokenType.INT4RANGE: 'INT4RANGE'>, <TokenType.ENUM8: 'ENUM8'>, <TokenType.UUID: 'UUID'>, <TokenType.FORMAT: 'FORMAT'>, <TokenType.NCHAR: 'NCHAR'>, <TokenType.OBJECT: 'OBJECT'>, <TokenType.GEOMETRY: 'GEOMETRY'>, <TokenType.DATERANGE: 'DATERANGE'>, <TokenType.INDEX: 'INDEX'>, <TokenType.FLOAT: 'FLOAT'>, <TokenType.GLOB: 'GLOB'>, <TokenType.VARCHAR: 'VARCHAR'>, <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, <TokenType.INET: 'INET'>, <TokenType.WINDOW: 'WINDOW'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.COLLATE: 'COLLATE'>, <TokenType.TIMESTAMP_S: 'TIMESTAMP_S'>, <TokenType.UBIGINT: 'UBIGINT'>, <TokenType.USERDEFINED: 'USERDEFINED'>, <TokenType.DATE32: 'DATE32'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, <TokenType.INT128: 'INT128'>, <TokenType.UNNEST: 'UNNEST'>, <TokenType.MERGE: 'MERGE'>, <TokenType.XML: 'XML'>, <TokenType.STRUCT: 'STRUCT'>} + {<TokenType.BIT: 'BIT'>, <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, <TokenType.NVARCHAR: 'NVARCHAR'>, <TokenType.OBJECT: 'OBJECT'>, <TokenType.TIME: 'TIME'>, <TokenType.FIRST: 'FIRST'>, <TokenType.RANGE: 'RANGE'>, <TokenType.INT: 'INT'>, <TokenType.UMEDIUMINT: 'UMEDIUMINT'>, <TokenType.UTINYINT: 'UTINYINT'>, <TokenType.RLIKE: 'RLIKE'>, <TokenType.XML: 'XML'>, <TokenType.BIGINT: 'BIGINT'>, <TokenType.WINDOW: 'WINDOW'>, <TokenType.VAR: 'VAR'>, <TokenType.DOUBLE: 'DOUBLE'>, <TokenType.MERGE: 'MERGE'>, <TokenType.ANY: 'ANY'>, <TokenType.DATERANGE: 'DATERANGE'>, <TokenType.LEFT: 'LEFT'>, <TokenType.UDECIMAL: 'UDECIMAL'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>, <TokenType.STRUCT: 'STRUCT'>, <TokenType.HLLSKETCH: 'HLLSKETCH'>, <TokenType.JSONB: 'JSONB'>, <TokenType.ENUM: 'ENUM'>, <TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>, <TokenType.INDEX: 'INDEX'>, <TokenType.TIMESTAMP_NS: 'TIMESTAMP_NS'>, <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, <TokenType.LIKE: 'LIKE'>, <TokenType.LONGTEXT: 'LONGTEXT'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.FIXEDSTRING: 'FIXEDSTRING'>, <TokenType.IPV6: 'IPV6'>, <TokenType.IDENTIFIER: 'IDENTIFIER'>, <TokenType.SMALLINT: 'SMALLINT'>, <TokenType.ISNULL: 'ISNULL'>, <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, <TokenType.BINARY: 'BINARY'>, <TokenType.UNIQUEIDENTIFIER: 'UNIQUEIDENTIFIER'>, <TokenType.HSTORE: 'HSTORE'>, <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, <TokenType.DATE: 'DATE'>, <TokenType.SUPER: 'SUPER'>, <TokenType.UNNEST: 'UNNEST'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.VARCHAR: 'VARCHAR'>, <TokenType.FILTER: 'FILTER'>, <TokenType.TEXT: 'TEXT'>, <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, <TokenType.XOR: 'XOR'>, <TokenType.TSTZRANGE: 'TSTZRANGE'>, <TokenType.NESTED: 'NESTED'>, <TokenType.INTERVAL: 'INTERVAL'>, <TokenType.BOOLEAN: 'BOOLEAN'>, <TokenType.VARIANT: 'VARIANT'>, <TokenType.IPADDRESS: 'IPADDRESS'>, <TokenType.OFFSET: 'OFFSET'>, <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>, <TokenType.MONEY: 'MONEY'>, <TokenType.ENUM8: 'ENUM8'>, <TokenType.IPPREFIX: 'IPPREFIX'>, <TokenType.NULL: 'NULL'>, <TokenType.COLLATE: 'COLLATE'>, <TokenType.ILIKE: 'ILIKE'>, <TokenType.RIGHT: 'RIGHT'>, <TokenType.TSRANGE: 'TSRANGE'>, <TokenType.ALL: 'ALL'>, <TokenType.ROW: 'ROW'>, <TokenType.UBIGINT: 'UBIGINT'>, <TokenType.TABLE: 'TABLE'>, <TokenType.SERIAL: 'SERIAL'>, <TokenType.INT8RANGE: 'INT8RANGE'>, <TokenType.BPCHAR: 'BPCHAR'>, <TokenType.FLOAT: 'FLOAT'>, <TokenType.YEAR: 'YEAR'>, <TokenType.IPV4: 'IPV4'>, <TokenType.UNKNOWN: 'UNKNOWN'>, <TokenType.INT256: 'INT256'>, <TokenType.DECIMAL: 'DECIMAL'>, <TokenType.TINYBLOB: 'TINYBLOB'>, <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, <TokenType.TIMESTAMP_S: 'TIMESTAMP_S'>, <TokenType.INET: 'INET'>, <TokenType.ROWVERSION: 'ROWVERSION'>, <TokenType.TINYTEXT: 'TINYTEXT'>, <TokenType.MAP: 'MAP'>, <TokenType.FORMAT: 'FORMAT'>, <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, <TokenType.SIMPLEAGGREGATEFUNCTION: 'SIMPLEAGGREGATEFUNCTION'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, <TokenType.USERDEFINED: 'USERDEFINED'>, <TokenType.SMALLMONEY: 'SMALLMONEY'>, <TokenType.EXISTS: 'EXISTS'>, <TokenType.UINT: 'UINT'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.UINT256: 'UINT256'>, <TokenType.CHAR: 'CHAR'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, <TokenType.GEOMETRY: 'GEOMETRY'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, <TokenType.UINT128: 'UINT128'>, <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, <TokenType.INSERT: 'INSERT'>, <TokenType.GLOB: 'GLOB'>, <TokenType.NCHAR: 'NCHAR'>, <TokenType.UUID: 'UUID'>, <TokenType.ENUM16: 'ENUM16'>, <TokenType.TINYINT: 'TINYINT'>, <TokenType.DATE32: 'DATE32'>, <TokenType.NUMRANGE: 'NUMRANGE'>, <TokenType.DATETIME: 'DATETIME'>, <TokenType.JSON: 'JSON'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, <TokenType.IMAGE: 'IMAGE'>, <TokenType.SMALLSERIAL: 'SMALLSERIAL'>, <TokenType.INT128: 'INT128'>, <TokenType.TIMETZ: 'TIMETZ'>, <TokenType.DATETIME64: 'DATETIME64'>, <TokenType.PRIMARY_KEY: 'PRIMARY_KEY'>, <TokenType.VARBINARY: 'VARBINARY'>, <TokenType.USMALLINT: 'USMALLINT'>, <TokenType.AGGREGATEFUNCTION: 'AGGREGATEFUNCTION'>, <TokenType.BIGSERIAL: 'BIGSERIAL'>, <TokenType.TIMESTAMP_MS: 'TIMESTAMP_MS'>, <TokenType.MEDIUMINT: 'MEDIUMINT'>, <TokenType.SOME: 'SOME'>, <TokenType.INT4RANGE: 'INT4RANGE'>, <TokenType.LONGBLOB: 'LONGBLOB'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>}
    @@ -1436,7 +1454,7 @@ Default: 3
    TABLE_ALIAS_TOKENS = - {<TokenType.UPDATE: 'UPDATE'>, <TokenType.DEFAULT: 'DEFAULT'>, <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>, <TokenType.UNIQUEIDENTIFIER: 'UNIQUEIDENTIFIER'>, <TokenType.PIVOT: 'PIVOT'>, <TokenType.PARTITION: 'PARTITION'>, <TokenType.TSTZRANGE: 'TSTZRANGE'>, <TokenType.VIEW: 'VIEW'>, <TokenType.COMMENT: 'COMMENT'>, <TokenType.TEXT: 'TEXT'>, <TokenType.UINT: 'UINT'>, <TokenType.DELETE: 'DELETE'>, <TokenType.TOP: 'TOP'>, <TokenType.LONGBLOB: 'LONGBLOB'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.DATETIME: 'DATETIME'>, <TokenType.BIGINT: 'BIGINT'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, <TokenType.KEEP: 'KEEP'>, <TokenType.SMALLSERIAL: 'SMALLSERIAL'>, <TokenType.NEXT: 'NEXT'>, <TokenType.BPCHAR: 'BPCHAR'>, <TokenType.TABLE: 'TABLE'>, <TokenType.DATETIME64: 'DATETIME64'>, <TokenType.VAR: 'VAR'>, <TokenType.BIGSERIAL: 'BIGSERIAL'>, <TokenType.ANY: 'ANY'>, <TokenType.UMEDIUMINT: 'UMEDIUMINT'>, <TokenType.FILTER: 'FILTER'>, <TokenType.TIMESTAMP_MS: 'TIMESTAMP_MS'>, <TokenType.SERIAL: 'SERIAL'>, <TokenType.END: 'END'>, <TokenType.ALL: 'ALL'>, <TokenType.TIMETZ: 'TIMETZ'>, <TokenType.TINYBLOB: 'TINYBLOB'>, <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, <TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>, <TokenType.SMALLINT: 'SMALLINT'>, <TokenType.TRUE: 'TRUE'>, <TokenType.FUNCTION: 'FUNCTION'>, <TokenType.IPPREFIX: 'IPPREFIX'>, <TokenType.UNKNOWN: 'UNKNOWN'>, <TokenType.INTERVAL: 'INTERVAL'>, <TokenType.NESTED: 'NESTED'>, <TokenType.NUMRANGE: 'NUMRANGE'>, <TokenType.JSON: 'JSON'>, <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, <TokenType.ANTI: 'ANTI'>, <TokenType.JSONB: 'JSONB'>, <TokenType.ROW: 'ROW'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, <TokenType.ENUM: 'ENUM'>, <TokenType.SMALLMONEY: 'SMALLMONEY'>, <TokenType.REFERENCES: 'REFERENCES'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.EXISTS: 'EXISTS'>, <TokenType.SEMI: 'SEMI'>, <TokenType.RANGE: 'RANGE'>, <TokenType.SET: 'SET'>, <TokenType.IS: 'IS'>, <TokenType.HLLSKETCH: 'HLLSKETCH'>, <TokenType.BIT: 'BIT'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.TSRANGE: 'TSRANGE'>, <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, <TokenType.INT256: 'INT256'>, <TokenType.FALSE: 'FALSE'>, <TokenType.CHAR: 'CHAR'>, <TokenType.ASC: 'ASC'>, <TokenType.VARBINARY: 'VARBINARY'>, <TokenType.DOUBLE: 'DOUBLE'>, <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, <TokenType.ORDINALITY: 'ORDINALITY'>, <TokenType.ESCAPE: 'ESCAPE'>, <TokenType.ISNULL: 'ISNULL'>, <TokenType.SHOW: 'SHOW'>, <TokenType.UNPIVOT: 'UNPIVOT'>, <TokenType.DECIMAL: 'DECIMAL'>, <TokenType.OVERWRITE: 'OVERWRITE'>, <TokenType.PERCENT: 'PERCENT'>, <TokenType.SCHEMA: 'SCHEMA'>, <TokenType.NULL: 'NULL'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, <TokenType.RECURSIVE: 'RECURSIVE'>, <TokenType.MEDIUMINT: 'MEDIUMINT'>, <TokenType.UDECIMAL: 'UDECIMAL'>, <TokenType.COLUMN: 'COLUMN'>, <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, <TokenType.DIV: 'DIV'>, <TokenType.FIXEDSTRING: 'FIXEDSTRING'>, <TokenType.USMALLINT: 'USMALLINT'>, <TokenType.BOOLEAN: 'BOOLEAN'>, <TokenType.CASE: 'CASE'>, <TokenType.ENUM16: 'ENUM16'>, <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>, <TokenType.INT8RANGE: 'INT8RANGE'>, <TokenType.REPLACE: 'REPLACE'>, <TokenType.TINYTEXT: 'TINYTEXT'>, <TokenType.FIRST: 'FIRST'>, <TokenType.YEAR: 'YEAR'>, <TokenType.UINT256: 'UINT256'>, <TokenType.MODEL: 'MODEL'>, <TokenType.UINT128: 'UINT128'>, <TokenType.OVERLAPS: 'OVERLAPS'>, <TokenType.TIMESTAMP_NS: 'TIMESTAMP_NS'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.SIMPLEAGGREGATEFUNCTION: 'SIMPLEAGGREGATEFUNCTION'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, <TokenType.PRAGMA: 'PRAGMA'>, <TokenType.PROCEDURE: 'PROCEDURE'>, <TokenType.IPADDRESS: 'IPADDRESS'>, <TokenType.NVARCHAR: 'NVARCHAR'>, <TokenType.LONGTEXT: 'LONGTEXT'>, <TokenType.BINARY: 'BINARY'>, <TokenType.DICTIONARY: 'DICTIONARY'>, <TokenType.USE: 'USE'>, <TokenType.TIME: 'TIME'>, <TokenType.COMMIT: 'COMMIT'>, <TokenType.MONEY: 'MONEY'>, <TokenType.LOAD: 'LOAD'>, <TokenType.CACHE: 'CACHE'>, <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, <TokenType.IMAGE: 'IMAGE'>, <TokenType.IPV6: 'IPV6'>, <TokenType.INT: 'INT'>, <TokenType.ROWVERSION: 'ROWVERSION'>, <TokenType.VARIANT: 'VARIANT'>, <TokenType.BEGIN: 'BEGIN'>, <TokenType.UTINYINT: 'UTINYINT'>, <TokenType.VOLATILE: 'VOLATILE'>, <TokenType.SOME: 'SOME'>, <TokenType.SUPER: 'SUPER'>, <TokenType.OPERATOR: 'OPERATOR'>, <TokenType.DATABASE: 'DATABASE'>, <TokenType.KILL: 'KILL'>, <TokenType.DATE: 'DATE'>, <TokenType.DESC: 'DESC'>, <TokenType.HSTORE: 'HSTORE'>, <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, <TokenType.MAP: 'MAP'>, <TokenType.TINYINT: 'TINYINT'>, <TokenType.IPV4: 'IPV4'>, <TokenType.AGGREGATEFUNCTION: 'AGGREGATEFUNCTION'>, <TokenType.INT4RANGE: 'INT4RANGE'>, <TokenType.ENUM8: 'ENUM8'>, <TokenType.UUID: 'UUID'>, <TokenType.FORMAT: 'FORMAT'>, <TokenType.NCHAR: 'NCHAR'>, <TokenType.OBJECT: 'OBJECT'>, <TokenType.DESCRIBE: 'DESCRIBE'>, <TokenType.GEOMETRY: 'GEOMETRY'>, <TokenType.UNIQUE: 'UNIQUE'>, <TokenType.DATERANGE: 'DATERANGE'>, <TokenType.INDEX: 'INDEX'>, <TokenType.REFRESH: 'REFRESH'>, <TokenType.FLOAT: 'FLOAT'>, <TokenType.ROWS: 'ROWS'>, <TokenType.VARCHAR: 'VARCHAR'>, <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, <TokenType.INET: 'INET'>, <TokenType.FINAL: 'FINAL'>, <TokenType.SETTINGS: 'SETTINGS'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.COLLATE: 'COLLATE'>, <TokenType.TEMPORARY: 'TEMPORARY'>, <TokenType.TIMESTAMP_S: 'TIMESTAMP_S'>, <TokenType.UBIGINT: 'UBIGINT'>, <TokenType.USERDEFINED: 'USERDEFINED'>, <TokenType.DATE32: 'DATE32'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, <TokenType.INT128: 'INT128'>, <TokenType.MERGE: 'MERGE'>, <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, <TokenType.XML: 'XML'>, <TokenType.CONSTRAINT: 'CONSTRAINT'>, <TokenType.STRUCT: 'STRUCT'>} + {<TokenType.BIT: 'BIT'>, <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, <TokenType.TEMPORARY: 'TEMPORARY'>, <TokenType.NVARCHAR: 'NVARCHAR'>, <TokenType.OBJECT: 'OBJECT'>, <TokenType.FIRST: 'FIRST'>, <TokenType.RANGE: 'RANGE'>, <TokenType.TIME: 'TIME'>, <TokenType.INT: 'INT'>, <TokenType.UMEDIUMINT: 'UMEDIUMINT'>, <TokenType.UTINYINT: 'UTINYINT'>, <TokenType.IS: 'IS'>, <TokenType.XML: 'XML'>, <TokenType.BIGINT: 'BIGINT'>, <TokenType.VAR: 'VAR'>, <TokenType.DOUBLE: 'DOUBLE'>, <TokenType.ROWS: 'ROWS'>, <TokenType.MERGE: 'MERGE'>, <TokenType.ANY: 'ANY'>, <TokenType.DATERANGE: 'DATERANGE'>, <TokenType.UDECIMAL: 'UDECIMAL'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>, <TokenType.REFERENCES: 'REFERENCES'>, <TokenType.PERCENT: 'PERCENT'>, <TokenType.CACHE: 'CACHE'>, <TokenType.STRUCT: 'STRUCT'>, <TokenType.JSONB: 'JSONB'>, <TokenType.COMMIT: 'COMMIT'>, <TokenType.ENUM: 'ENUM'>, <TokenType.SETTINGS: 'SETTINGS'>, <TokenType.ORDINALITY: 'ORDINALITY'>, <TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>, <TokenType.INDEX: 'INDEX'>, <TokenType.TIMESTAMP_NS: 'TIMESTAMP_NS'>, <TokenType.FINAL: 'FINAL'>, <TokenType.VOLATILE: 'VOLATILE'>, <TokenType.DICTIONARY: 'DICTIONARY'>, <TokenType.USE: 'USE'>, <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, <TokenType.FUNCTION: 'FUNCTION'>, <TokenType.LONGTEXT: 'LONGTEXT'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.FIXEDSTRING: 'FIXEDSTRING'>, <TokenType.IPV6: 'IPV6'>, <TokenType.DATABASE: 'DATABASE'>, <TokenType.SMALLINT: 'SMALLINT'>, <TokenType.ASC: 'ASC'>, <TokenType.RECURSIVE: 'RECURSIVE'>, <TokenType.ISNULL: 'ISNULL'>, <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, <TokenType.BINARY: 'BINARY'>, <TokenType.UNIQUEIDENTIFIER: 'UNIQUEIDENTIFIER'>, <TokenType.HSTORE: 'HSTORE'>, <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, <TokenType.DATE: 'DATE'>, <TokenType.SUPER: 'SUPER'>, <TokenType.TOP: 'TOP'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.VARCHAR: 'VARCHAR'>, <TokenType.FILTER: 'FILTER'>, <TokenType.TEXT: 'TEXT'>, <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, <TokenType.TRUE: 'TRUE'>, <TokenType.ESCAPE: 'ESCAPE'>, <TokenType.SHOW: 'SHOW'>, <TokenType.TSTZRANGE: 'TSTZRANGE'>, <TokenType.SET: 'SET'>, <TokenType.NESTED: 'NESTED'>, <TokenType.INTERVAL: 'INTERVAL'>, <TokenType.BOOLEAN: 'BOOLEAN'>, <TokenType.VARIANT: 'VARIANT'>, <TokenType.IPADDRESS: 'IPADDRESS'>, <TokenType.DESCRIBE: 'DESCRIBE'>, <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>, <TokenType.MONEY: 'MONEY'>, <TokenType.ENUM8: 'ENUM8'>, <TokenType.IPPREFIX: 'IPPREFIX'>, <TokenType.NULL: 'NULL'>, <TokenType.COLLATE: 'COLLATE'>, <TokenType.NEXT: 'NEXT'>, <TokenType.TSRANGE: 'TSRANGE'>, <TokenType.PRAGMA: 'PRAGMA'>, <TokenType.ALL: 'ALL'>, <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, <TokenType.ROW: 'ROW'>, <TokenType.UBIGINT: 'UBIGINT'>, <TokenType.TABLE: 'TABLE'>, <TokenType.SERIAL: 'SERIAL'>, <TokenType.CASE: 'CASE'>, <TokenType.BPCHAR: 'BPCHAR'>, <TokenType.INT8RANGE: 'INT8RANGE'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.FLOAT: 'FLOAT'>, <TokenType.MODEL: 'MODEL'>, <TokenType.YEAR: 'YEAR'>, <TokenType.IPV4: 'IPV4'>, <TokenType.UNKNOWN: 'UNKNOWN'>, <TokenType.SCHEMA: 'SCHEMA'>, <TokenType.INT256: 'INT256'>, <TokenType.UNPIVOT: 'UNPIVOT'>, <TokenType.DECIMAL: 'DECIMAL'>, <TokenType.TINYBLOB: 'TINYBLOB'>, <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, <TokenType.TIMESTAMP_S: 'TIMESTAMP_S'>, <TokenType.INET: 'INET'>, <TokenType.CONSTRAINT: 'CONSTRAINT'>, <TokenType.ROWVERSION: 'ROWVERSION'>, <TokenType.TINYTEXT: 'TINYTEXT'>, <TokenType.MAP: 'MAP'>, <TokenType.FORMAT: 'FORMAT'>, <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, <TokenType.PIVOT: 'PIVOT'>, <TokenType.SIMPLEAGGREGATEFUNCTION: 'SIMPLEAGGREGATEFUNCTION'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.REPLACE: 'REPLACE'>, <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, <TokenType.USERDEFINED: 'USERDEFINED'>, <TokenType.PROCEDURE: 'PROCEDURE'>, <TokenType.PARTITION: 'PARTITION'>, <TokenType.SMALLMONEY: 'SMALLMONEY'>, <TokenType.EXISTS: 'EXISTS'>, <TokenType.UINT: 'UINT'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.UINT256: 'UINT256'>, <TokenType.OPERATOR: 'OPERATOR'>, <TokenType.CHAR: 'CHAR'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, <TokenType.DIV: 'DIV'>, <TokenType.GEOMETRY: 'GEOMETRY'>, <TokenType.COMMENT: 'COMMENT'>, <TokenType.BEGIN: 'BEGIN'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, <TokenType.UINT128: 'UINT128'>, <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, <TokenType.OVERLAPS: 'OVERLAPS'>, <TokenType.DELETE: 'DELETE'>, <TokenType.LOAD: 'LOAD'>, <TokenType.NCHAR: 'NCHAR'>, <TokenType.UUID: 'UUID'>, <TokenType.ENUM16: 'ENUM16'>, <TokenType.TINYINT: 'TINYINT'>, <TokenType.DATE32: 'DATE32'>, <TokenType.NUMRANGE: 'NUMRANGE'>, <TokenType.DATETIME: 'DATETIME'>, <TokenType.VIEW: 'VIEW'>, <TokenType.JSON: 'JSON'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, <TokenType.COLUMN: 'COLUMN'>, <TokenType.IMAGE: 'IMAGE'>, <TokenType.KEEP: 'KEEP'>, <TokenType.SMALLSERIAL: 'SMALLSERIAL'>, <TokenType.END: 'END'>, <TokenType.INT128: 'INT128'>, <TokenType.KILL: 'KILL'>, <TokenType.TIMETZ: 'TIMETZ'>, <TokenType.DATETIME64: 'DATETIME64'>, <TokenType.REFRESH: 'REFRESH'>, <TokenType.VARBINARY: 'VARBINARY'>, <TokenType.USMALLINT: 'USMALLINT'>, <TokenType.AGGREGATEFUNCTION: 'AGGREGATEFUNCTION'>, <TokenType.UNIQUE: 'UNIQUE'>, <TokenType.SEMI: 'SEMI'>, <TokenType.DEFAULT: 'DEFAULT'>, <TokenType.BIGSERIAL: 'BIGSERIAL'>, <TokenType.TIMESTAMP_MS: 'TIMESTAMP_MS'>, <TokenType.MEDIUMINT: 'MEDIUMINT'>, <TokenType.SOME: 'SOME'>, <TokenType.ANTI: 'ANTI'>, <TokenType.FALSE: 'FALSE'>, <TokenType.INT4RANGE: 'INT4RANGE'>, <TokenType.DESC: 'DESC'>, <TokenType.OVERWRITE: 'OVERWRITE'>, <TokenType.LONGBLOB: 'LONGBLOB'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, <TokenType.HLLSKETCH: 'HLLSKETCH'>, <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, <TokenType.UPDATE: 'UPDATE'>}
    @@ -1582,102 +1600,102 @@ Default: 3
    -
    174    class Generator(generator.Generator):
    -175        LIMIT_IS_TOP = True
    -176        JOIN_HINTS = False
    -177        TABLE_HINTS = False
    -178        QUERY_HINTS = False
    -179        TABLESAMPLE_KEYWORDS = "SAMPLE"
    -180        LAST_DAY_SUPPORTS_DATE_PART = False
    -181
    -182        TYPE_MAPPING = {
    -183            **generator.Generator.TYPE_MAPPING,
    -184            exp.DataType.Type.GEOMETRY: "ST_GEOMETRY",
    -185        }
    -186
    -187        PROPERTIES_LOCATION = {
    -188            **generator.Generator.PROPERTIES_LOCATION,
    -189            exp.OnCommitProperty: exp.Properties.Location.POST_INDEX,
    -190            exp.PartitionedByProperty: exp.Properties.Location.POST_EXPRESSION,
    -191            exp.StabilityProperty: exp.Properties.Location.POST_CREATE,
    -192        }
    -193
    -194        TRANSFORMS = {
    -195            **generator.Generator.TRANSFORMS,
    -196            exp.ArgMax: rename_func("MAX_BY"),
    -197            exp.ArgMin: rename_func("MIN_BY"),
    -198            exp.Max: max_or_greatest,
    -199            exp.Min: min_or_least,
    -200            exp.Pow: lambda self, e: self.binary(e, "**"),
    -201            exp.Select: transforms.preprocess(
    -202                [transforms.eliminate_distinct_on, transforms.eliminate_semi_and_anti_joins]
    -203            ),
    -204            exp.StrToDate: lambda self,
    -205            e: f"CAST({self.sql(e, 'this')} AS DATE FORMAT {self.format_time(e)})",
    -206            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
    -207            exp.Use: lambda self, e: f"DATABASE {self.sql(e, 'this')}",
    -208        }
    -209
    -210        def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str:
    -211            if expression.to.this == exp.DataType.Type.UNKNOWN and expression.args.get("format"):
    -212                # We don't actually want to print the unknown type in CAST(<value> AS FORMAT <format>)
    -213                expression.to.pop()
    -214
    -215            return super().cast_sql(expression, safe_prefix=safe_prefix)
    -216
    -217        def trycast_sql(self, expression: exp.TryCast) -> str:
    -218            return self.cast_sql(expression, safe_prefix="TRY")
    -219
    -220        def tablesample_sql(
    -221            self,
    -222            expression: exp.TableSample,
    -223            sep: str = " AS ",
    -224            tablesample_keyword: t.Optional[str] = None,
    -225        ) -> str:
    -226            return f"{self.sql(expression, 'this')} SAMPLE {self.expressions(expression)}"
    -227
    -228        def partitionedbyproperty_sql(self, expression: exp.PartitionedByProperty) -> str:
    -229            return f"PARTITION BY {self.sql(expression, 'this')}"
    -230
    -231        # FROM before SET in Teradata UPDATE syntax
    -232        # https://docs.teradata.com/r/Enterprise_IntelliFlex_VMware/Teradata-VantageTM-SQL-Data-Manipulation-Language-17.20/Statement-Syntax/UPDATE/UPDATE-Syntax-Basic-Form-FROM-Clause
    -233        def update_sql(self, expression: exp.Update) -> str:
    -234            this = self.sql(expression, "this")
    -235            from_sql = self.sql(expression, "from")
    -236            set_sql = self.expressions(expression, flat=True)
    -237            where_sql = self.sql(expression, "where")
    -238            sql = f"UPDATE {this}{from_sql} SET {set_sql}{where_sql}"
    -239            return self.prepend_ctes(expression, sql)
    -240
    -241        def mod_sql(self, expression: exp.Mod) -> str:
    -242            return self.binary(expression, "MOD")
    -243
    -244        def datatype_sql(self, expression: exp.DataType) -> str:
    -245            type_sql = super().datatype_sql(expression)
    -246            prefix_sql = expression.args.get("prefix")
    -247            return f"SYSUDTLIB.{type_sql}" if prefix_sql else type_sql
    -248
    -249        def rangen_sql(self, expression: exp.RangeN) -> str:
    -250            this = self.sql(expression, "this")
    -251            expressions_sql = self.expressions(expression)
    -252            each_sql = self.sql(expression, "each")
    -253            each_sql = f" EACH {each_sql}" if each_sql else ""
    -254
    -255            return f"RANGE_N({this} BETWEEN {expressions_sql}{each_sql})"
    -256
    -257        def createable_sql(self, expression: exp.Create, locations: t.DefaultDict) -> str:
    -258            kind = self.sql(expression, "kind").upper()
    -259            if kind == "TABLE" and locations.get(exp.Properties.Location.POST_NAME):
    -260                this_name = self.sql(expression.this, "this")
    -261                this_properties = self.properties(
    -262                    exp.Properties(expressions=locations[exp.Properties.Location.POST_NAME]),
    -263                    wrapped=False,
    -264                    prefix=",",
    -265                )
    -266                this_schema = self.schema_columns_sql(expression.this)
    -267                return f"{this_name}{this_properties}{self.sep()}{this_schema}"
    -268
    -269            return super().createable_sql(expression, locations)
    +            
    175    class Generator(generator.Generator):
    +176        LIMIT_IS_TOP = True
    +177        JOIN_HINTS = False
    +178        TABLE_HINTS = False
    +179        QUERY_HINTS = False
    +180        TABLESAMPLE_KEYWORDS = "SAMPLE"
    +181        LAST_DAY_SUPPORTS_DATE_PART = False
    +182
    +183        TYPE_MAPPING = {
    +184            **generator.Generator.TYPE_MAPPING,
    +185            exp.DataType.Type.GEOMETRY: "ST_GEOMETRY",
    +186        }
    +187
    +188        PROPERTIES_LOCATION = {
    +189            **generator.Generator.PROPERTIES_LOCATION,
    +190            exp.OnCommitProperty: exp.Properties.Location.POST_INDEX,
    +191            exp.PartitionedByProperty: exp.Properties.Location.POST_EXPRESSION,
    +192            exp.StabilityProperty: exp.Properties.Location.POST_CREATE,
    +193        }
    +194
    +195        TRANSFORMS = {
    +196            **generator.Generator.TRANSFORMS,
    +197            exp.ArgMax: rename_func("MAX_BY"),
    +198            exp.ArgMin: rename_func("MIN_BY"),
    +199            exp.Max: max_or_greatest,
    +200            exp.Min: min_or_least,
    +201            exp.Pow: lambda self, e: self.binary(e, "**"),
    +202            exp.Select: transforms.preprocess(
    +203                [transforms.eliminate_distinct_on, transforms.eliminate_semi_and_anti_joins]
    +204            ),
    +205            exp.StrToDate: lambda self,
    +206            e: f"CAST({self.sql(e, 'this')} AS DATE FORMAT {self.format_time(e)})",
    +207            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
    +208            exp.Use: lambda self, e: f"DATABASE {self.sql(e, 'this')}",
    +209        }
    +210
    +211        def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str:
    +212            if expression.to.this == exp.DataType.Type.UNKNOWN and expression.args.get("format"):
    +213                # We don't actually want to print the unknown type in CAST(<value> AS FORMAT <format>)
    +214                expression.to.pop()
    +215
    +216            return super().cast_sql(expression, safe_prefix=safe_prefix)
    +217
    +218        def trycast_sql(self, expression: exp.TryCast) -> str:
    +219            return self.cast_sql(expression, safe_prefix="TRY")
    +220
    +221        def tablesample_sql(
    +222            self,
    +223            expression: exp.TableSample,
    +224            sep: str = " AS ",
    +225            tablesample_keyword: t.Optional[str] = None,
    +226        ) -> str:
    +227            return f"{self.sql(expression, 'this')} SAMPLE {self.expressions(expression)}"
    +228
    +229        def partitionedbyproperty_sql(self, expression: exp.PartitionedByProperty) -> str:
    +230            return f"PARTITION BY {self.sql(expression, 'this')}"
    +231
    +232        # FROM before SET in Teradata UPDATE syntax
    +233        # https://docs.teradata.com/r/Enterprise_IntelliFlex_VMware/Teradata-VantageTM-SQL-Data-Manipulation-Language-17.20/Statement-Syntax/UPDATE/UPDATE-Syntax-Basic-Form-FROM-Clause
    +234        def update_sql(self, expression: exp.Update) -> str:
    +235            this = self.sql(expression, "this")
    +236            from_sql = self.sql(expression, "from")
    +237            set_sql = self.expressions(expression, flat=True)
    +238            where_sql = self.sql(expression, "where")
    +239            sql = f"UPDATE {this}{from_sql} SET {set_sql}{where_sql}"
    +240            return self.prepend_ctes(expression, sql)
    +241
    +242        def mod_sql(self, expression: exp.Mod) -> str:
    +243            return self.binary(expression, "MOD")
    +244
    +245        def datatype_sql(self, expression: exp.DataType) -> str:
    +246            type_sql = super().datatype_sql(expression)
    +247            prefix_sql = expression.args.get("prefix")
    +248            return f"SYSUDTLIB.{type_sql}" if prefix_sql else type_sql
    +249
    +250        def rangen_sql(self, expression: exp.RangeN) -> str:
    +251            this = self.sql(expression, "this")
    +252            expressions_sql = self.expressions(expression)
    +253            each_sql = self.sql(expression, "each")
    +254            each_sql = f" EACH {each_sql}" if each_sql else ""
    +255
    +256            return f"RANGE_N({this} BETWEEN {expressions_sql}{each_sql})"
    +257
    +258        def createable_sql(self, expression: exp.Create, locations: t.DefaultDict) -> str:
    +259            kind = self.sql(expression, "kind").upper()
    +260            if kind == "TABLE" and locations.get(exp.Properties.Location.POST_NAME):
    +261                this_name = self.sql(expression.this, "this")
    +262                this_properties = self.properties(
    +263                    exp.Properties(expressions=locations[exp.Properties.Location.POST_NAME]),
    +264                    wrapped=False,
    +265                    prefix=",",
    +266                )
    +267                this_schema = self.schema_columns_sql(expression.this)
    +268                return f"{this_name}{this_properties}{self.sep()}{this_schema}"
    +269
    +270            return super().createable_sql(expression, locations)
     
    @@ -1842,12 +1860,12 @@ Default: True
    -
    210        def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str:
    -211            if expression.to.this == exp.DataType.Type.UNKNOWN and expression.args.get("format"):
    -212                # We don't actually want to print the unknown type in CAST(<value> AS FORMAT <format>)
    -213                expression.to.pop()
    -214
    -215            return super().cast_sql(expression, safe_prefix=safe_prefix)
    +            
    211        def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str:
    +212            if expression.to.this == exp.DataType.Type.UNKNOWN and expression.args.get("format"):
    +213                # We don't actually want to print the unknown type in CAST(<value> AS FORMAT <format>)
    +214                expression.to.pop()
    +215
    +216            return super().cast_sql(expression, safe_prefix=safe_prefix)
     
    @@ -1865,8 +1883,8 @@ Default: True
    -
    217        def trycast_sql(self, expression: exp.TryCast) -> str:
    -218            return self.cast_sql(expression, safe_prefix="TRY")
    +            
    218        def trycast_sql(self, expression: exp.TryCast) -> str:
    +219            return self.cast_sql(expression, safe_prefix="TRY")
     
    @@ -1884,13 +1902,13 @@ Default: True
    -
    220        def tablesample_sql(
    -221            self,
    -222            expression: exp.TableSample,
    -223            sep: str = " AS ",
    -224            tablesample_keyword: t.Optional[str] = None,
    -225        ) -> str:
    -226            return f"{self.sql(expression, 'this')} SAMPLE {self.expressions(expression)}"
    +            
    221        def tablesample_sql(
    +222            self,
    +223            expression: exp.TableSample,
    +224            sep: str = " AS ",
    +225            tablesample_keyword: t.Optional[str] = None,
    +226        ) -> str:
    +227            return f"{self.sql(expression, 'this')} SAMPLE {self.expressions(expression)}"
     
    @@ -1908,8 +1926,8 @@ Default: True
    -
    228        def partitionedbyproperty_sql(self, expression: exp.PartitionedByProperty) -> str:
    -229            return f"PARTITION BY {self.sql(expression, 'this')}"
    +            
    229        def partitionedbyproperty_sql(self, expression: exp.PartitionedByProperty) -> str:
    +230            return f"PARTITION BY {self.sql(expression, 'this')}"
     
    @@ -1927,13 +1945,13 @@ Default: True
    -
    233        def update_sql(self, expression: exp.Update) -> str:
    -234            this = self.sql(expression, "this")
    -235            from_sql = self.sql(expression, "from")
    -236            set_sql = self.expressions(expression, flat=True)
    -237            where_sql = self.sql(expression, "where")
    -238            sql = f"UPDATE {this}{from_sql} SET {set_sql}{where_sql}"
    -239            return self.prepend_ctes(expression, sql)
    +            
    234        def update_sql(self, expression: exp.Update) -> str:
    +235            this = self.sql(expression, "this")
    +236            from_sql = self.sql(expression, "from")
    +237            set_sql = self.expressions(expression, flat=True)
    +238            where_sql = self.sql(expression, "where")
    +239            sql = f"UPDATE {this}{from_sql} SET {set_sql}{where_sql}"
    +240            return self.prepend_ctes(expression, sql)
     
    @@ -1951,8 +1969,8 @@ Default: True
    -
    241        def mod_sql(self, expression: exp.Mod) -> str:
    -242            return self.binary(expression, "MOD")
    +            
    242        def mod_sql(self, expression: exp.Mod) -> str:
    +243            return self.binary(expression, "MOD")
     
    @@ -1970,10 +1988,10 @@ Default: True
    -
    244        def datatype_sql(self, expression: exp.DataType) -> str:
    -245            type_sql = super().datatype_sql(expression)
    -246            prefix_sql = expression.args.get("prefix")
    -247            return f"SYSUDTLIB.{type_sql}" if prefix_sql else type_sql
    +            
    245        def datatype_sql(self, expression: exp.DataType) -> str:
    +246            type_sql = super().datatype_sql(expression)
    +247            prefix_sql = expression.args.get("prefix")
    +248            return f"SYSUDTLIB.{type_sql}" if prefix_sql else type_sql
     
    @@ -1991,13 +2009,13 @@ Default: True
    -
    249        def rangen_sql(self, expression: exp.RangeN) -> str:
    -250            this = self.sql(expression, "this")
    -251            expressions_sql = self.expressions(expression)
    -252            each_sql = self.sql(expression, "each")
    -253            each_sql = f" EACH {each_sql}" if each_sql else ""
    -254
    -255            return f"RANGE_N({this} BETWEEN {expressions_sql}{each_sql})"
    +            
    250        def rangen_sql(self, expression: exp.RangeN) -> str:
    +251            this = self.sql(expression, "this")
    +252            expressions_sql = self.expressions(expression)
    +253            each_sql = self.sql(expression, "each")
    +254            each_sql = f" EACH {each_sql}" if each_sql else ""
    +255
    +256            return f"RANGE_N({this} BETWEEN {expressions_sql}{each_sql})"
     
    @@ -2015,19 +2033,19 @@ Default: True
    -
    257        def createable_sql(self, expression: exp.Create, locations: t.DefaultDict) -> str:
    -258            kind = self.sql(expression, "kind").upper()
    -259            if kind == "TABLE" and locations.get(exp.Properties.Location.POST_NAME):
    -260                this_name = self.sql(expression.this, "this")
    -261                this_properties = self.properties(
    -262                    exp.Properties(expressions=locations[exp.Properties.Location.POST_NAME]),
    -263                    wrapped=False,
    -264                    prefix=",",
    -265                )
    -266                this_schema = self.schema_columns_sql(expression.this)
    -267                return f"{this_name}{this_properties}{self.sep()}{this_schema}"
    -268
    -269            return super().createable_sql(expression, locations)
    +            
    258        def createable_sql(self, expression: exp.Create, locations: t.DefaultDict) -> str:
    +259            kind = self.sql(expression, "kind").upper()
    +260            if kind == "TABLE" and locations.get(exp.Properties.Location.POST_NAME):
    +261                this_name = self.sql(expression.this, "this")
    +262                this_properties = self.properties(
    +263                    exp.Properties(expressions=locations[exp.Properties.Location.POST_NAME]),
    +264                    wrapped=False,
    +265                    prefix=",",
    +266                )
    +267                this_schema = self.schema_columns_sql(expression.this)
    +268                return f"{this_name}{this_properties}{self.sep()}{this_schema}"
    +269
    +270            return super().createable_sql(expression, locations)
     
    diff --git a/docs/sqlglot/dialects/trino.html b/docs/sqlglot/dialects/trino.html index c047d2d..1f8b4e5 100644 --- a/docs/sqlglot/dialects/trino.html +++ b/docs/sqlglot/dialects/trino.html @@ -605,7 +605,7 @@ Default: True diff --git a/docs/sqlglot/dialects/tsql.html b/docs/sqlglot/dialects/tsql.html index 13773c4..db63e25 100644 --- a/docs/sqlglot/dialects/tsql.html +++ b/docs/sqlglot/dialects/tsql.html @@ -1426,7 +1426,7 @@ @@ -1438,7 +1438,7 @@
    DATEPART_ONLY_FORMATS = -{'HOUR', 'DW', 'QUARTER'} +{'DW', 'QUARTER', 'HOUR'}
    @@ -2710,7 +2710,7 @@ True means a / b is integer division if both a and
    VAR_SINGLE_TOKENS = -{'@', '#', '$'} +{'#', '@', '$'}
    @@ -3068,7 +3068,7 @@ Default: 3
    JOIN_HINTS = -{'MERGE', 'REMOTE', 'HASH', 'LOOP'} +{'LOOP', 'MERGE', 'REMOTE', 'HASH'}
    @@ -3080,7 +3080,7 @@ Default: 3
    VAR_LENGTH_DATATYPES = -{<Type.VARCHAR: 'VARCHAR'>, <Type.NVARCHAR: 'NVARCHAR'>, <Type.NCHAR: 'NCHAR'>, <Type.CHAR: 'CHAR'>} +{<Type.VARCHAR: 'VARCHAR'>, <Type.NVARCHAR: 'NVARCHAR'>, <Type.CHAR: 'CHAR'>, <Type.NCHAR: 'NCHAR'>}
    @@ -3093,7 +3093,7 @@ Default: 3
    RETURNS_TABLE_TOKENS = - {<TokenType.UPDATE: 'UPDATE'>, <TokenType.DEFAULT: 'DEFAULT'>, <TokenType.PIVOT: 'PIVOT'>, <TokenType.PARTITION: 'PARTITION'>, <TokenType.VIEW: 'VIEW'>, <TokenType.COMMENT: 'COMMENT'>, <TokenType.DELETE: 'DELETE'>, <TokenType.TOP: 'TOP'>, <TokenType.KEEP: 'KEEP'>, <TokenType.NEXT: 'NEXT'>, <TokenType.VAR: 'VAR'>, <TokenType.ANY: 'ANY'>, <TokenType.FILTER: 'FILTER'>, <TokenType.END: 'END'>, <TokenType.ALL: 'ALL'>, <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, <TokenType.TRUE: 'TRUE'>, <TokenType.FUNCTION: 'FUNCTION'>, <TokenType.ANTI: 'ANTI'>, <TokenType.ROW: 'ROW'>, <TokenType.REFERENCES: 'REFERENCES'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.EXISTS: 'EXISTS'>, <TokenType.SEMI: 'SEMI'>, <TokenType.RANGE: 'RANGE'>, <TokenType.SET: 'SET'>, <TokenType.IS: 'IS'>, <TokenType.NATURAL: 'NATURAL'>, <TokenType.FALSE: 'FALSE'>, <TokenType.ASC: 'ASC'>, <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, <TokenType.ORDINALITY: 'ORDINALITY'>, <TokenType.ESCAPE: 'ESCAPE'>, <TokenType.ISNULL: 'ISNULL'>, <TokenType.SHOW: 'SHOW'>, <TokenType.UNPIVOT: 'UNPIVOT'>, <TokenType.OVERWRITE: 'OVERWRITE'>, <TokenType.PERCENT: 'PERCENT'>, <TokenType.FULL: 'FULL'>, <TokenType.SCHEMA: 'SCHEMA'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, <TokenType.RECURSIVE: 'RECURSIVE'>, <TokenType.COLUMN: 'COLUMN'>, <TokenType.DIV: 'DIV'>, <TokenType.CASE: 'CASE'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>, <TokenType.REPLACE: 'REPLACE'>, <TokenType.FIRST: 'FIRST'>, <TokenType.OFFSET: 'OFFSET'>, <TokenType.MODEL: 'MODEL'>, <TokenType.OVERLAPS: 'OVERLAPS'>, <TokenType.RIGHT: 'RIGHT'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, <TokenType.LEFT: 'LEFT'>, <TokenType.PRAGMA: 'PRAGMA'>, <TokenType.PROCEDURE: 'PROCEDURE'>, <TokenType.DICTIONARY: 'DICTIONARY'>, <TokenType.USE: 'USE'>, <TokenType.COMMIT: 'COMMIT'>, <TokenType.LOAD: 'LOAD'>, <TokenType.CACHE: 'CACHE'>, <TokenType.BEGIN: 'BEGIN'>, <TokenType.VOLATILE: 'VOLATILE'>, <TokenType.SOME: 'SOME'>, <TokenType.OPERATOR: 'OPERATOR'>, <TokenType.DATABASE: 'DATABASE'>, <TokenType.KILL: 'KILL'>, <TokenType.DESC: 'DESC'>, <TokenType.FORMAT: 'FORMAT'>, <TokenType.DESCRIBE: 'DESCRIBE'>, <TokenType.UNIQUE: 'UNIQUE'>, <TokenType.INDEX: 'INDEX'>, <TokenType.REFRESH: 'REFRESH'>, <TokenType.ROWS: 'ROWS'>, <TokenType.FINAL: 'FINAL'>, <TokenType.WINDOW: 'WINDOW'>, <TokenType.SETTINGS: 'SETTINGS'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.COLLATE: 'COLLATE'>, <TokenType.TEMPORARY: 'TEMPORARY'>, <TokenType.APPLY: 'APPLY'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, <TokenType.MERGE: 'MERGE'>, <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, <TokenType.CONSTRAINT: 'CONSTRAINT'>} + {<TokenType.TEMPORARY: 'TEMPORARY'>, <TokenType.RANGE: 'RANGE'>, <TokenType.FIRST: 'FIRST'>, <TokenType.IS: 'IS'>, <TokenType.WINDOW: 'WINDOW'>, <TokenType.VAR: 'VAR'>, <TokenType.ROWS: 'ROWS'>, <TokenType.MERGE: 'MERGE'>, <TokenType.ANY: 'ANY'>, <TokenType.LEFT: 'LEFT'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>, <TokenType.REFERENCES: 'REFERENCES'>, <TokenType.PERCENT: 'PERCENT'>, <TokenType.CACHE: 'CACHE'>, <TokenType.COMMIT: 'COMMIT'>, <TokenType.SETTINGS: 'SETTINGS'>, <TokenType.ORDINALITY: 'ORDINALITY'>, <TokenType.INDEX: 'INDEX'>, <TokenType.FINAL: 'FINAL'>, <TokenType.VOLATILE: 'VOLATILE'>, <TokenType.DICTIONARY: 'DICTIONARY'>, <TokenType.USE: 'USE'>, <TokenType.FUNCTION: 'FUNCTION'>, <TokenType.DATABASE: 'DATABASE'>, <TokenType.ASC: 'ASC'>, <TokenType.RECURSIVE: 'RECURSIVE'>, <TokenType.ISNULL: 'ISNULL'>, <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, <TokenType.TOP: 'TOP'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.FILTER: 'FILTER'>, <TokenType.ESCAPE: 'ESCAPE'>, <TokenType.SHOW: 'SHOW'>, <TokenType.SET: 'SET'>, <TokenType.DESCRIBE: 'DESCRIBE'>, <TokenType.OFFSET: 'OFFSET'>, <TokenType.COLLATE: 'COLLATE'>, <TokenType.NEXT: 'NEXT'>, <TokenType.RIGHT: 'RIGHT'>, <TokenType.PRAGMA: 'PRAGMA'>, <TokenType.ALL: 'ALL'>, <TokenType.ROW: 'ROW'>, <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, <TokenType.CASE: 'CASE'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.MODEL: 'MODEL'>, <TokenType.SCHEMA: 'SCHEMA'>, <TokenType.UNPIVOT: 'UNPIVOT'>, <TokenType.CONSTRAINT: 'CONSTRAINT'>, <TokenType.FORMAT: 'FORMAT'>, <TokenType.PIVOT: 'PIVOT'>, <TokenType.REPLACE: 'REPLACE'>, <TokenType.PROCEDURE: 'PROCEDURE'>, <TokenType.PARTITION: 'PARTITION'>, <TokenType.EXISTS: 'EXISTS'>, <TokenType.OPERATOR: 'OPERATOR'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, <TokenType.DIV: 'DIV'>, <TokenType.NATURAL: 'NATURAL'>, <TokenType.COMMENT: 'COMMENT'>, <TokenType.BEGIN: 'BEGIN'>, <TokenType.FULL: 'FULL'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, <TokenType.OVERLAPS: 'OVERLAPS'>, <TokenType.DELETE: 'DELETE'>, <TokenType.LOAD: 'LOAD'>, <TokenType.APPLY: 'APPLY'>, <TokenType.VIEW: 'VIEW'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, <TokenType.COLUMN: 'COLUMN'>, <TokenType.KEEP: 'KEEP'>, <TokenType.END: 'END'>, <TokenType.KILL: 'KILL'>, <TokenType.REFRESH: 'REFRESH'>, <TokenType.SEMI: 'SEMI'>, <TokenType.ANTI: 'ANTI'>, <TokenType.DEFAULT: 'DEFAULT'>, <TokenType.UNIQUE: 'UNIQUE'>, <TokenType.SOME: 'SOME'>, <TokenType.FALSE: 'FALSE'>, <TokenType.DESC: 'DESC'>, <TokenType.OVERWRITE: 'OVERWRITE'>, <TokenType.TRUE: 'TRUE'>, <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, <TokenType.UPDATE: 'UPDATE'>}
    @@ -3167,7 +3167,7 @@ Default: 3
    TABLE_ALIAS_TOKENS = - {<TokenType.UPDATE: 'UPDATE'>, <TokenType.DEFAULT: 'DEFAULT'>, <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>, <TokenType.UNIQUEIDENTIFIER: 'UNIQUEIDENTIFIER'>, <TokenType.PIVOT: 'PIVOT'>, <TokenType.PARTITION: 'PARTITION'>, <TokenType.TSTZRANGE: 'TSTZRANGE'>, <TokenType.VIEW: 'VIEW'>, <TokenType.COMMENT: 'COMMENT'>, <TokenType.TEXT: 'TEXT'>, <TokenType.UINT: 'UINT'>, <TokenType.DELETE: 'DELETE'>, <TokenType.TOP: 'TOP'>, <TokenType.LONGBLOB: 'LONGBLOB'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.DATETIME: 'DATETIME'>, <TokenType.BIGINT: 'BIGINT'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, <TokenType.KEEP: 'KEEP'>, <TokenType.SMALLSERIAL: 'SMALLSERIAL'>, <TokenType.NEXT: 'NEXT'>, <TokenType.BPCHAR: 'BPCHAR'>, <TokenType.TABLE: 'TABLE'>, <TokenType.DATETIME64: 'DATETIME64'>, <TokenType.VAR: 'VAR'>, <TokenType.BIGSERIAL: 'BIGSERIAL'>, <TokenType.ANY: 'ANY'>, <TokenType.UMEDIUMINT: 'UMEDIUMINT'>, <TokenType.FILTER: 'FILTER'>, <TokenType.TIMESTAMP_MS: 'TIMESTAMP_MS'>, <TokenType.SERIAL: 'SERIAL'>, <TokenType.END: 'END'>, <TokenType.ALL: 'ALL'>, <TokenType.TIMETZ: 'TIMETZ'>, <TokenType.TINYBLOB: 'TINYBLOB'>, <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, <TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>, <TokenType.SMALLINT: 'SMALLINT'>, <TokenType.TRUE: 'TRUE'>, <TokenType.FUNCTION: 'FUNCTION'>, <TokenType.IPPREFIX: 'IPPREFIX'>, <TokenType.UNKNOWN: 'UNKNOWN'>, <TokenType.INTERVAL: 'INTERVAL'>, <TokenType.NESTED: 'NESTED'>, <TokenType.NUMRANGE: 'NUMRANGE'>, <TokenType.JSON: 'JSON'>, <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, <TokenType.ANTI: 'ANTI'>, <TokenType.JSONB: 'JSONB'>, <TokenType.ROW: 'ROW'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, <TokenType.ENUM: 'ENUM'>, <TokenType.SMALLMONEY: 'SMALLMONEY'>, <TokenType.REFERENCES: 'REFERENCES'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.EXISTS: 'EXISTS'>, <TokenType.SEMI: 'SEMI'>, <TokenType.RANGE: 'RANGE'>, <TokenType.SET: 'SET'>, <TokenType.IS: 'IS'>, <TokenType.HLLSKETCH: 'HLLSKETCH'>, <TokenType.BIT: 'BIT'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.TSRANGE: 'TSRANGE'>, <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, <TokenType.INT256: 'INT256'>, <TokenType.FALSE: 'FALSE'>, <TokenType.CHAR: 'CHAR'>, <TokenType.ASC: 'ASC'>, <TokenType.VARBINARY: 'VARBINARY'>, <TokenType.DOUBLE: 'DOUBLE'>, <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, <TokenType.ORDINALITY: 'ORDINALITY'>, <TokenType.ESCAPE: 'ESCAPE'>, <TokenType.ISNULL: 'ISNULL'>, <TokenType.SHOW: 'SHOW'>, <TokenType.UNPIVOT: 'UNPIVOT'>, <TokenType.DECIMAL: 'DECIMAL'>, <TokenType.OVERWRITE: 'OVERWRITE'>, <TokenType.PERCENT: 'PERCENT'>, <TokenType.SCHEMA: 'SCHEMA'>, <TokenType.NULL: 'NULL'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, <TokenType.RECURSIVE: 'RECURSIVE'>, <TokenType.MEDIUMINT: 'MEDIUMINT'>, <TokenType.UDECIMAL: 'UDECIMAL'>, <TokenType.COLUMN: 'COLUMN'>, <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, <TokenType.DIV: 'DIV'>, <TokenType.FIXEDSTRING: 'FIXEDSTRING'>, <TokenType.USMALLINT: 'USMALLINT'>, <TokenType.BOOLEAN: 'BOOLEAN'>, <TokenType.CASE: 'CASE'>, <TokenType.ENUM16: 'ENUM16'>, <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>, <TokenType.INT8RANGE: 'INT8RANGE'>, <TokenType.REPLACE: 'REPLACE'>, <TokenType.TINYTEXT: 'TINYTEXT'>, <TokenType.FIRST: 'FIRST'>, <TokenType.YEAR: 'YEAR'>, <TokenType.UINT256: 'UINT256'>, <TokenType.MODEL: 'MODEL'>, <TokenType.UINT128: 'UINT128'>, <TokenType.OVERLAPS: 'OVERLAPS'>, <TokenType.TIMESTAMP_NS: 'TIMESTAMP_NS'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.SIMPLEAGGREGATEFUNCTION: 'SIMPLEAGGREGATEFUNCTION'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, <TokenType.PRAGMA: 'PRAGMA'>, <TokenType.PROCEDURE: 'PROCEDURE'>, <TokenType.IPADDRESS: 'IPADDRESS'>, <TokenType.NVARCHAR: 'NVARCHAR'>, <TokenType.LONGTEXT: 'LONGTEXT'>, <TokenType.BINARY: 'BINARY'>, <TokenType.DICTIONARY: 'DICTIONARY'>, <TokenType.USE: 'USE'>, <TokenType.TIME: 'TIME'>, <TokenType.COMMIT: 'COMMIT'>, <TokenType.MONEY: 'MONEY'>, <TokenType.LOAD: 'LOAD'>, <TokenType.CACHE: 'CACHE'>, <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, <TokenType.IMAGE: 'IMAGE'>, <TokenType.IPV6: 'IPV6'>, <TokenType.INT: 'INT'>, <TokenType.ROWVERSION: 'ROWVERSION'>, <TokenType.VARIANT: 'VARIANT'>, <TokenType.BEGIN: 'BEGIN'>, <TokenType.UTINYINT: 'UTINYINT'>, <TokenType.VOLATILE: 'VOLATILE'>, <TokenType.SOME: 'SOME'>, <TokenType.SUPER: 'SUPER'>, <TokenType.OPERATOR: 'OPERATOR'>, <TokenType.DATABASE: 'DATABASE'>, <TokenType.KILL: 'KILL'>, <TokenType.DATE: 'DATE'>, <TokenType.DESC: 'DESC'>, <TokenType.HSTORE: 'HSTORE'>, <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, <TokenType.MAP: 'MAP'>, <TokenType.TINYINT: 'TINYINT'>, <TokenType.IPV4: 'IPV4'>, <TokenType.AGGREGATEFUNCTION: 'AGGREGATEFUNCTION'>, <TokenType.INT4RANGE: 'INT4RANGE'>, <TokenType.ENUM8: 'ENUM8'>, <TokenType.UUID: 'UUID'>, <TokenType.FORMAT: 'FORMAT'>, <TokenType.NCHAR: 'NCHAR'>, <TokenType.OBJECT: 'OBJECT'>, <TokenType.DESCRIBE: 'DESCRIBE'>, <TokenType.GEOMETRY: 'GEOMETRY'>, <TokenType.UNIQUE: 'UNIQUE'>, <TokenType.DATERANGE: 'DATERANGE'>, <TokenType.INDEX: 'INDEX'>, <TokenType.REFRESH: 'REFRESH'>, <TokenType.FLOAT: 'FLOAT'>, <TokenType.ROWS: 'ROWS'>, <TokenType.VARCHAR: 'VARCHAR'>, <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, <TokenType.INET: 'INET'>, <TokenType.FINAL: 'FINAL'>, <TokenType.SETTINGS: 'SETTINGS'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.COLLATE: 'COLLATE'>, <TokenType.TEMPORARY: 'TEMPORARY'>, <TokenType.TIMESTAMP_S: 'TIMESTAMP_S'>, <TokenType.UBIGINT: 'UBIGINT'>, <TokenType.USERDEFINED: 'USERDEFINED'>, <TokenType.DATE32: 'DATE32'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, <TokenType.INT128: 'INT128'>, <TokenType.MERGE: 'MERGE'>, <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, <TokenType.XML: 'XML'>, <TokenType.CONSTRAINT: 'CONSTRAINT'>, <TokenType.STRUCT: 'STRUCT'>} + {<TokenType.BIT: 'BIT'>, <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, <TokenType.TEMPORARY: 'TEMPORARY'>, <TokenType.NVARCHAR: 'NVARCHAR'>, <TokenType.OBJECT: 'OBJECT'>, <TokenType.FIRST: 'FIRST'>, <TokenType.RANGE: 'RANGE'>, <TokenType.TIME: 'TIME'>, <TokenType.INT: 'INT'>, <TokenType.UMEDIUMINT: 'UMEDIUMINT'>, <TokenType.UTINYINT: 'UTINYINT'>, <TokenType.IS: 'IS'>, <TokenType.XML: 'XML'>, <TokenType.BIGINT: 'BIGINT'>, <TokenType.VAR: 'VAR'>, <TokenType.DOUBLE: 'DOUBLE'>, <TokenType.ROWS: 'ROWS'>, <TokenType.MERGE: 'MERGE'>, <TokenType.ANY: 'ANY'>, <TokenType.DATERANGE: 'DATERANGE'>, <TokenType.UDECIMAL: 'UDECIMAL'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>, <TokenType.REFERENCES: 'REFERENCES'>, <TokenType.PERCENT: 'PERCENT'>, <TokenType.CACHE: 'CACHE'>, <TokenType.STRUCT: 'STRUCT'>, <TokenType.JSONB: 'JSONB'>, <TokenType.COMMIT: 'COMMIT'>, <TokenType.ENUM: 'ENUM'>, <TokenType.SETTINGS: 'SETTINGS'>, <TokenType.ORDINALITY: 'ORDINALITY'>, <TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>, <TokenType.INDEX: 'INDEX'>, <TokenType.TIMESTAMP_NS: 'TIMESTAMP_NS'>, <TokenType.FINAL: 'FINAL'>, <TokenType.VOLATILE: 'VOLATILE'>, <TokenType.DICTIONARY: 'DICTIONARY'>, <TokenType.USE: 'USE'>, <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, <TokenType.FUNCTION: 'FUNCTION'>, <TokenType.LONGTEXT: 'LONGTEXT'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.FIXEDSTRING: 'FIXEDSTRING'>, <TokenType.IPV6: 'IPV6'>, <TokenType.DATABASE: 'DATABASE'>, <TokenType.SMALLINT: 'SMALLINT'>, <TokenType.ASC: 'ASC'>, <TokenType.RECURSIVE: 'RECURSIVE'>, <TokenType.ISNULL: 'ISNULL'>, <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, <TokenType.BINARY: 'BINARY'>, <TokenType.UNIQUEIDENTIFIER: 'UNIQUEIDENTIFIER'>, <TokenType.HSTORE: 'HSTORE'>, <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, <TokenType.DATE: 'DATE'>, <TokenType.SUPER: 'SUPER'>, <TokenType.TOP: 'TOP'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.VARCHAR: 'VARCHAR'>, <TokenType.FILTER: 'FILTER'>, <TokenType.TEXT: 'TEXT'>, <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, <TokenType.TRUE: 'TRUE'>, <TokenType.ESCAPE: 'ESCAPE'>, <TokenType.SHOW: 'SHOW'>, <TokenType.TSTZRANGE: 'TSTZRANGE'>, <TokenType.SET: 'SET'>, <TokenType.NESTED: 'NESTED'>, <TokenType.INTERVAL: 'INTERVAL'>, <TokenType.BOOLEAN: 'BOOLEAN'>, <TokenType.VARIANT: 'VARIANT'>, <TokenType.IPADDRESS: 'IPADDRESS'>, <TokenType.DESCRIBE: 'DESCRIBE'>, <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>, <TokenType.MONEY: 'MONEY'>, <TokenType.ENUM8: 'ENUM8'>, <TokenType.IPPREFIX: 'IPPREFIX'>, <TokenType.NULL: 'NULL'>, <TokenType.COLLATE: 'COLLATE'>, <TokenType.NEXT: 'NEXT'>, <TokenType.TSRANGE: 'TSRANGE'>, <TokenType.PRAGMA: 'PRAGMA'>, <TokenType.ALL: 'ALL'>, <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, <TokenType.ROW: 'ROW'>, <TokenType.UBIGINT: 'UBIGINT'>, <TokenType.TABLE: 'TABLE'>, <TokenType.SERIAL: 'SERIAL'>, <TokenType.CASE: 'CASE'>, <TokenType.BPCHAR: 'BPCHAR'>, <TokenType.INT8RANGE: 'INT8RANGE'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.FLOAT: 'FLOAT'>, <TokenType.MODEL: 'MODEL'>, <TokenType.YEAR: 'YEAR'>, <TokenType.IPV4: 'IPV4'>, <TokenType.UNKNOWN: 'UNKNOWN'>, <TokenType.SCHEMA: 'SCHEMA'>, <TokenType.INT256: 'INT256'>, <TokenType.UNPIVOT: 'UNPIVOT'>, <TokenType.DECIMAL: 'DECIMAL'>, <TokenType.TINYBLOB: 'TINYBLOB'>, <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, <TokenType.TIMESTAMP_S: 'TIMESTAMP_S'>, <TokenType.INET: 'INET'>, <TokenType.CONSTRAINT: 'CONSTRAINT'>, <TokenType.ROWVERSION: 'ROWVERSION'>, <TokenType.TINYTEXT: 'TINYTEXT'>, <TokenType.MAP: 'MAP'>, <TokenType.FORMAT: 'FORMAT'>, <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, <TokenType.PIVOT: 'PIVOT'>, <TokenType.SIMPLEAGGREGATEFUNCTION: 'SIMPLEAGGREGATEFUNCTION'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.REPLACE: 'REPLACE'>, <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, <TokenType.USERDEFINED: 'USERDEFINED'>, <TokenType.PROCEDURE: 'PROCEDURE'>, <TokenType.PARTITION: 'PARTITION'>, <TokenType.SMALLMONEY: 'SMALLMONEY'>, <TokenType.EXISTS: 'EXISTS'>, <TokenType.UINT: 'UINT'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.UINT256: 'UINT256'>, <TokenType.OPERATOR: 'OPERATOR'>, <TokenType.CHAR: 'CHAR'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, <TokenType.DIV: 'DIV'>, <TokenType.GEOMETRY: 'GEOMETRY'>, <TokenType.COMMENT: 'COMMENT'>, <TokenType.BEGIN: 'BEGIN'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, <TokenType.UINT128: 'UINT128'>, <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, <TokenType.OVERLAPS: 'OVERLAPS'>, <TokenType.DELETE: 'DELETE'>, <TokenType.LOAD: 'LOAD'>, <TokenType.NCHAR: 'NCHAR'>, <TokenType.UUID: 'UUID'>, <TokenType.ENUM16: 'ENUM16'>, <TokenType.TINYINT: 'TINYINT'>, <TokenType.DATE32: 'DATE32'>, <TokenType.NUMRANGE: 'NUMRANGE'>, <TokenType.DATETIME: 'DATETIME'>, <TokenType.VIEW: 'VIEW'>, <TokenType.JSON: 'JSON'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, <TokenType.COLUMN: 'COLUMN'>, <TokenType.IMAGE: 'IMAGE'>, <TokenType.KEEP: 'KEEP'>, <TokenType.SMALLSERIAL: 'SMALLSERIAL'>, <TokenType.END: 'END'>, <TokenType.INT128: 'INT128'>, <TokenType.KILL: 'KILL'>, <TokenType.TIMETZ: 'TIMETZ'>, <TokenType.DATETIME64: 'DATETIME64'>, <TokenType.REFRESH: 'REFRESH'>, <TokenType.VARBINARY: 'VARBINARY'>, <TokenType.USMALLINT: 'USMALLINT'>, <TokenType.AGGREGATEFUNCTION: 'AGGREGATEFUNCTION'>, <TokenType.UNIQUE: 'UNIQUE'>, <TokenType.SEMI: 'SEMI'>, <TokenType.DEFAULT: 'DEFAULT'>, <TokenType.BIGSERIAL: 'BIGSERIAL'>, <TokenType.TIMESTAMP_MS: 'TIMESTAMP_MS'>, <TokenType.MEDIUMINT: 'MEDIUMINT'>, <TokenType.SOME: 'SOME'>, <TokenType.ANTI: 'ANTI'>, <TokenType.FALSE: 'FALSE'>, <TokenType.INT4RANGE: 'INT4RANGE'>, <TokenType.DESC: 'DESC'>, <TokenType.OVERWRITE: 'OVERWRITE'>, <TokenType.LONGBLOB: 'LONGBLOB'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, <TokenType.HLLSKETCH: 'HLLSKETCH'>, <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, <TokenType.UPDATE: 'UPDATE'>}
    @@ -3281,6 +3281,7 @@ Default: 3
    TRIM_PATTERN_FIRST
    MODIFIERS_ATTACHED_TO_UNION
    UNION_MODIFIERS
    +
    VALUES_FOLLOWED_BY_PAREN
    error_level
    error_message_context
    max_errors
    @@ -3831,7 +3832,7 @@ Default: True @@ -3844,7 +3845,7 @@ Default: True diff --git a/docs/sqlglot/expressions.html b/docs/sqlglot/expressions.html index d240af2..e9f8952 100644 --- a/docs/sqlglot/expressions.html +++ b/docs/sqlglot/expressions.html @@ -14009,268 +14009,269 @@ SQL expressions, such as select.

    6894 table = to_table( 6895 new_name, 6896 **{k: v for k, v in node.args.items() if k not in TABLE_PARTS}, -6897 ) -6898 table.add_comments([original]) -6899 return table -6900 return node -6901 -6902 return expression.transform(_replace_tables, copy=copy) -6903 +6897 dialect=dialect, +6898 ) +6899 table.add_comments([original]) +6900 return table +6901 return node +6902 +6903 return expression.transform(_replace_tables, copy=copy) 6904 -6905def replace_placeholders(expression: Expression, *args, **kwargs) -> Expression: -6906 """Replace placeholders in an expression. -6907 -6908 Args: -6909 expression: expression node to be transformed and replaced. -6910 args: positional names that will substitute unnamed placeholders in the given order. -6911 kwargs: keyword arguments that will substitute named placeholders. -6912 -6913 Examples: -6914 >>> from sqlglot import exp, parse_one -6915 >>> replace_placeholders( -6916 ... parse_one("select * from :tbl where ? = ?"), -6917 ... exp.to_identifier("str_col"), "b", tbl=exp.to_identifier("foo") -6918 ... ).sql() -6919 "SELECT * FROM foo WHERE str_col = 'b'" -6920 -6921 Returns: -6922 The mapped expression. -6923 """ -6924 -6925 def _replace_placeholders(node: Expression, args, **kwargs) -> Expression: -6926 if isinstance(node, Placeholder): -6927 if node.name: -6928 new_name = kwargs.get(node.name) -6929 if new_name: -6930 return convert(new_name) -6931 else: -6932 try: -6933 return convert(next(args)) -6934 except StopIteration: -6935 pass -6936 return node -6937 -6938 return expression.transform(_replace_placeholders, iter(args), **kwargs) -6939 +6905 +6906def replace_placeholders(expression: Expression, *args, **kwargs) -> Expression: +6907 """Replace placeholders in an expression. +6908 +6909 Args: +6910 expression: expression node to be transformed and replaced. +6911 args: positional names that will substitute unnamed placeholders in the given order. +6912 kwargs: keyword arguments that will substitute named placeholders. +6913 +6914 Examples: +6915 >>> from sqlglot import exp, parse_one +6916 >>> replace_placeholders( +6917 ... parse_one("select * from :tbl where ? = ?"), +6918 ... exp.to_identifier("str_col"), "b", tbl=exp.to_identifier("foo") +6919 ... ).sql() +6920 "SELECT * FROM foo WHERE str_col = 'b'" +6921 +6922 Returns: +6923 The mapped expression. +6924 """ +6925 +6926 def _replace_placeholders(node: Expression, args, **kwargs) -> Expression: +6927 if isinstance(node, Placeholder): +6928 if node.name: +6929 new_name = kwargs.get(node.name) +6930 if new_name: +6931 return convert(new_name) +6932 else: +6933 try: +6934 return convert(next(args)) +6935 except StopIteration: +6936 pass +6937 return node +6938 +6939 return expression.transform(_replace_placeholders, iter(args), **kwargs) 6940 -6941def expand( -6942 expression: Expression, -6943 sources: t.Dict[str, Subqueryable], -6944 dialect: DialectType = None, -6945 copy: bool = True, -6946) -> Expression: -6947 """Transforms an expression by expanding all referenced sources into subqueries. -6948 -6949 Examples: -6950 >>> from sqlglot import parse_one -6951 >>> expand(parse_one("select * from x AS z"), {"x": parse_one("select * from y")}).sql() -6952 'SELECT * FROM (SELECT * FROM y) AS z /* source: x */' -6953 -6954 >>> expand(parse_one("select * from x AS z"), {"x": parse_one("select * from y"), "y": parse_one("select * from z")}).sql() -6955 'SELECT * FROM (SELECT * FROM (SELECT * FROM z) AS y /* source: y */) AS z /* source: x */' -6956 -6957 Args: -6958 expression: The expression to expand. -6959 sources: A dictionary of name to Subqueryables. -6960 dialect: The dialect of the sources dict. -6961 copy: Whether or not to copy the expression during transformation. Defaults to True. -6962 -6963 Returns: -6964 The transformed expression. -6965 """ -6966 sources = {normalize_table_name(k, dialect=dialect): v for k, v in sources.items()} -6967 -6968 def _expand(node: Expression): -6969 if isinstance(node, Table): -6970 name = normalize_table_name(node, dialect=dialect) -6971 source = sources.get(name) -6972 if source: -6973 subquery = source.subquery(node.alias or name) -6974 subquery.comments = [f"source: {name}"] -6975 return subquery.transform(_expand, copy=False) -6976 return node -6977 -6978 return expression.transform(_expand, copy=copy) -6979 +6941 +6942def expand( +6943 expression: Expression, +6944 sources: t.Dict[str, Subqueryable], +6945 dialect: DialectType = None, +6946 copy: bool = True, +6947) -> Expression: +6948 """Transforms an expression by expanding all referenced sources into subqueries. +6949 +6950 Examples: +6951 >>> from sqlglot import parse_one +6952 >>> expand(parse_one("select * from x AS z"), {"x": parse_one("select * from y")}).sql() +6953 'SELECT * FROM (SELECT * FROM y) AS z /* source: x */' +6954 +6955 >>> expand(parse_one("select * from x AS z"), {"x": parse_one("select * from y"), "y": parse_one("select * from z")}).sql() +6956 'SELECT * FROM (SELECT * FROM (SELECT * FROM z) AS y /* source: y */) AS z /* source: x */' +6957 +6958 Args: +6959 expression: The expression to expand. +6960 sources: A dictionary of name to Subqueryables. +6961 dialect: The dialect of the sources dict. +6962 copy: Whether or not to copy the expression during transformation. Defaults to True. +6963 +6964 Returns: +6965 The transformed expression. +6966 """ +6967 sources = {normalize_table_name(k, dialect=dialect): v for k, v in sources.items()} +6968 +6969 def _expand(node: Expression): +6970 if isinstance(node, Table): +6971 name = normalize_table_name(node, dialect=dialect) +6972 source = sources.get(name) +6973 if source: +6974 subquery = source.subquery(node.alias or name) +6975 subquery.comments = [f"source: {name}"] +6976 return subquery.transform(_expand, copy=False) +6977 return node +6978 +6979 return expression.transform(_expand, copy=copy) 6980 -6981def func(name: str, *args, copy: bool = True, dialect: DialectType = None, **kwargs) -> Func: -6982 """ -6983 Returns a Func expression. -6984 -6985 Examples: -6986 >>> func("abs", 5).sql() -6987 'ABS(5)' -6988 -6989 >>> func("cast", this=5, to=DataType.build("DOUBLE")).sql() -6990 'CAST(5 AS DOUBLE)' -6991 -6992 Args: -6993 name: the name of the function to build. -6994 args: the args used to instantiate the function of interest. -6995 copy: whether or not to copy the argument expressions. -6996 dialect: the source dialect. -6997 kwargs: the kwargs used to instantiate the function of interest. -6998 -6999 Note: -7000 The arguments `args` and `kwargs` are mutually exclusive. -7001 -7002 Returns: -7003 An instance of the function of interest, or an anonymous function, if `name` doesn't -7004 correspond to an existing `sqlglot.expressions.Func` class. -7005 """ -7006 if args and kwargs: -7007 raise ValueError("Can't use both args and kwargs to instantiate a function.") -7008 -7009 from sqlglot.dialects.dialect import Dialect -7010 -7011 dialect = Dialect.get_or_raise(dialect) -7012 -7013 converted: t.List[Expression] = [maybe_parse(arg, dialect=dialect, copy=copy) for arg in args] -7014 kwargs = {key: maybe_parse(value, dialect=dialect, copy=copy) for key, value in kwargs.items()} -7015 -7016 constructor = dialect.parser_class.FUNCTIONS.get(name.upper()) -7017 if constructor: -7018 if converted: -7019 if "dialect" in constructor.__code__.co_varnames: -7020 function = constructor(converted, dialect=dialect) -7021 else: -7022 function = constructor(converted) -7023 elif constructor.__name__ == "from_arg_list": -7024 function = constructor.__self__(**kwargs) # type: ignore -7025 else: -7026 constructor = FUNCTION_BY_NAME.get(name.upper()) -7027 if constructor: -7028 function = constructor(**kwargs) -7029 else: -7030 raise ValueError( -7031 f"Unable to convert '{name}' into a Func. Either manually construct " -7032 "the Func expression of interest or parse the function call." -7033 ) -7034 else: -7035 kwargs = kwargs or {"expressions": converted} -7036 function = Anonymous(this=name, **kwargs) -7037 -7038 for error_message in function.error_messages(converted): -7039 raise ValueError(error_message) -7040 -7041 return function -7042 +6981 +6982def func(name: str, *args, copy: bool = True, dialect: DialectType = None, **kwargs) -> Func: +6983 """ +6984 Returns a Func expression. +6985 +6986 Examples: +6987 >>> func("abs", 5).sql() +6988 'ABS(5)' +6989 +6990 >>> func("cast", this=5, to=DataType.build("DOUBLE")).sql() +6991 'CAST(5 AS DOUBLE)' +6992 +6993 Args: +6994 name: the name of the function to build. +6995 args: the args used to instantiate the function of interest. +6996 copy: whether or not to copy the argument expressions. +6997 dialect: the source dialect. +6998 kwargs: the kwargs used to instantiate the function of interest. +6999 +7000 Note: +7001 The arguments `args` and `kwargs` are mutually exclusive. +7002 +7003 Returns: +7004 An instance of the function of interest, or an anonymous function, if `name` doesn't +7005 correspond to an existing `sqlglot.expressions.Func` class. +7006 """ +7007 if args and kwargs: +7008 raise ValueError("Can't use both args and kwargs to instantiate a function.") +7009 +7010 from sqlglot.dialects.dialect import Dialect +7011 +7012 dialect = Dialect.get_or_raise(dialect) +7013 +7014 converted: t.List[Expression] = [maybe_parse(arg, dialect=dialect, copy=copy) for arg in args] +7015 kwargs = {key: maybe_parse(value, dialect=dialect, copy=copy) for key, value in kwargs.items()} +7016 +7017 constructor = dialect.parser_class.FUNCTIONS.get(name.upper()) +7018 if constructor: +7019 if converted: +7020 if "dialect" in constructor.__code__.co_varnames: +7021 function = constructor(converted, dialect=dialect) +7022 else: +7023 function = constructor(converted) +7024 elif constructor.__name__ == "from_arg_list": +7025 function = constructor.__self__(**kwargs) # type: ignore +7026 else: +7027 constructor = FUNCTION_BY_NAME.get(name.upper()) +7028 if constructor: +7029 function = constructor(**kwargs) +7030 else: +7031 raise ValueError( +7032 f"Unable to convert '{name}' into a Func. Either manually construct " +7033 "the Func expression of interest or parse the function call." +7034 ) +7035 else: +7036 kwargs = kwargs or {"expressions": converted} +7037 function = Anonymous(this=name, **kwargs) +7038 +7039 for error_message in function.error_messages(converted): +7040 raise ValueError(error_message) +7041 +7042 return function 7043 -7044def case( -7045 expression: t.Optional[ExpOrStr] = None, -7046 **opts, -7047) -> Case: -7048 """ -7049 Initialize a CASE statement. -7050 -7051 Example: -7052 case().when("a = 1", "foo").else_("bar") -7053 -7054 Args: -7055 expression: Optionally, the input expression (not all dialects support this) -7056 **opts: Extra keyword arguments for parsing `expression` -7057 """ -7058 if expression is not None: -7059 this = maybe_parse(expression, **opts) -7060 else: -7061 this = None -7062 return Case(this=this, ifs=[]) -7063 +7044 +7045def case( +7046 expression: t.Optional[ExpOrStr] = None, +7047 **opts, +7048) -> Case: +7049 """ +7050 Initialize a CASE statement. +7051 +7052 Example: +7053 case().when("a = 1", "foo").else_("bar") +7054 +7055 Args: +7056 expression: Optionally, the input expression (not all dialects support this) +7057 **opts: Extra keyword arguments for parsing `expression` +7058 """ +7059 if expression is not None: +7060 this = maybe_parse(expression, **opts) +7061 else: +7062 this = None +7063 return Case(this=this, ifs=[]) 7064 -7065def cast_unless( -7066 expression: ExpOrStr, -7067 to: DATA_TYPE, -7068 *types: DATA_TYPE, -7069 **opts: t.Any, -7070) -> Expression | Cast: -7071 """ -7072 Cast an expression to a data type unless it is a specified type. -7073 -7074 Args: -7075 expression: The expression to cast. -7076 to: The data type to cast to. -7077 **types: The types to exclude from casting. -7078 **opts: Extra keyword arguments for parsing `expression` -7079 """ -7080 expr = maybe_parse(expression, **opts) -7081 if expr.is_type(*types): -7082 return expr -7083 return cast(expr, to, **opts) -7084 +7065 +7066def cast_unless( +7067 expression: ExpOrStr, +7068 to: DATA_TYPE, +7069 *types: DATA_TYPE, +7070 **opts: t.Any, +7071) -> Expression | Cast: +7072 """ +7073 Cast an expression to a data type unless it is a specified type. +7074 +7075 Args: +7076 expression: The expression to cast. +7077 to: The data type to cast to. +7078 **types: The types to exclude from casting. +7079 **opts: Extra keyword arguments for parsing `expression` +7080 """ +7081 expr = maybe_parse(expression, **opts) +7082 if expr.is_type(*types): +7083 return expr +7084 return cast(expr, to, **opts) 7085 -7086def array( -7087 *expressions: ExpOrStr, copy: bool = True, dialect: DialectType = None, **kwargs -7088) -> Array: -7089 """ -7090 Returns an array. -7091 -7092 Examples: -7093 >>> array(1, 'x').sql() -7094 'ARRAY(1, x)' -7095 -7096 Args: -7097 expressions: the expressions to add to the array. -7098 copy: whether or not to copy the argument expressions. -7099 dialect: the source dialect. -7100 kwargs: the kwargs used to instantiate the function of interest. -7101 -7102 Returns: -7103 An array expression. -7104 """ -7105 return Array( -7106 expressions=[ -7107 maybe_parse(expression, copy=copy, dialect=dialect, **kwargs) -7108 for expression in expressions -7109 ] -7110 ) -7111 +7086 +7087def array( +7088 *expressions: ExpOrStr, copy: bool = True, dialect: DialectType = None, **kwargs +7089) -> Array: +7090 """ +7091 Returns an array. +7092 +7093 Examples: +7094 >>> array(1, 'x').sql() +7095 'ARRAY(1, x)' +7096 +7097 Args: +7098 expressions: the expressions to add to the array. +7099 copy: whether or not to copy the argument expressions. +7100 dialect: the source dialect. +7101 kwargs: the kwargs used to instantiate the function of interest. +7102 +7103 Returns: +7104 An array expression. +7105 """ +7106 return Array( +7107 expressions=[ +7108 maybe_parse(expression, copy=copy, dialect=dialect, **kwargs) +7109 for expression in expressions +7110 ] +7111 ) 7112 -7113def tuple_( -7114 *expressions: ExpOrStr, copy: bool = True, dialect: DialectType = None, **kwargs -7115) -> Tuple: -7116 """ -7117 Returns an tuple. -7118 -7119 Examples: -7120 >>> tuple_(1, 'x').sql() -7121 '(1, x)' -7122 -7123 Args: -7124 expressions: the expressions to add to the tuple. -7125 copy: whether or not to copy the argument expressions. -7126 dialect: the source dialect. -7127 kwargs: the kwargs used to instantiate the function of interest. -7128 -7129 Returns: -7130 A tuple expression. -7131 """ -7132 return Tuple( -7133 expressions=[ -7134 maybe_parse(expression, copy=copy, dialect=dialect, **kwargs) -7135 for expression in expressions -7136 ] -7137 ) -7138 +7113 +7114def tuple_( +7115 *expressions: ExpOrStr, copy: bool = True, dialect: DialectType = None, **kwargs +7116) -> Tuple: +7117 """ +7118 Returns an tuple. +7119 +7120 Examples: +7121 >>> tuple_(1, 'x').sql() +7122 '(1, x)' +7123 +7124 Args: +7125 expressions: the expressions to add to the tuple. +7126 copy: whether or not to copy the argument expressions. +7127 dialect: the source dialect. +7128 kwargs: the kwargs used to instantiate the function of interest. +7129 +7130 Returns: +7131 A tuple expression. +7132 """ +7133 return Tuple( +7134 expressions=[ +7135 maybe_parse(expression, copy=copy, dialect=dialect, **kwargs) +7136 for expression in expressions +7137 ] +7138 ) 7139 -7140def true() -> Boolean: -7141 """ -7142 Returns a true Boolean expression. -7143 """ -7144 return Boolean(this=True) -7145 +7140 +7141def true() -> Boolean: +7142 """ +7143 Returns a true Boolean expression. +7144 """ +7145 return Boolean(this=True) 7146 -7147def false() -> Boolean: -7148 """ -7149 Returns a false Boolean expression. -7150 """ -7151 return Boolean(this=False) -7152 +7147 +7148def false() -> Boolean: +7149 """ +7150 Returns a false Boolean expression. +7151 """ +7152 return Boolean(this=False) 7153 -7154def null() -> Null: -7155 """ -7156 Returns a Null expression. -7157 """ -7158 return Null() +7154 +7155def null() -> Null: +7156 """ +7157 Returns a Null expression. +7158 """ +7159 return Null()
    @@ -48027,7 +48028,7 @@ If an Expression instance is passed, it w
    TEXT_TYPES = - {<Type.VARCHAR: 'VARCHAR'>, <Type.NVARCHAR: 'NVARCHAR'>, <Type.TEXT: 'TEXT'>, <Type.CHAR: 'CHAR'>, <Type.NCHAR: 'NCHAR'>} + {<Type.NVARCHAR: 'NVARCHAR'>, <Type.VARCHAR: 'VARCHAR'>, <Type.CHAR: 'CHAR'>, <Type.TEXT: 'TEXT'>, <Type.NCHAR: 'NCHAR'>}
    @@ -48040,7 +48041,7 @@ If an Expression instance is passed, it w
    INTEGER_TYPES = - {<Type.INT128: 'INT128'>, <Type.BIGINT: 'BIGINT'>, <Type.INT256: 'INT256'>, <Type.TINYINT: 'TINYINT'>, <Type.BIT: 'BIT'>, <Type.INT: 'INT'>, <Type.SMALLINT: 'SMALLINT'>} + {<Type.BIT: 'BIT'>, <Type.TINYINT: 'TINYINT'>, <Type.INT: 'INT'>, <Type.SMALLINT: 'SMALLINT'>, <Type.BIGINT: 'BIGINT'>, <Type.INT256: 'INT256'>, <Type.INT128: 'INT128'>}
    @@ -48052,7 +48053,7 @@ If an Expression instance is passed, it w
    FLOAT_TYPES = -{<Type.FLOAT: 'FLOAT'>, <Type.DOUBLE: 'DOUBLE'>} +{<Type.DOUBLE: 'DOUBLE'>, <Type.FLOAT: 'FLOAT'>}
    @@ -48065,7 +48066,7 @@ If an Expression instance is passed, it w
    NUMERIC_TYPES = - {<Type.INT128: 'INT128'>, <Type.BIGINT: 'BIGINT'>, <Type.DOUBLE: 'DOUBLE'>, <Type.INT256: 'INT256'>, <Type.TINYINT: 'TINYINT'>, <Type.BIT: 'BIT'>, <Type.FLOAT: 'FLOAT'>, <Type.INT: 'INT'>, <Type.SMALLINT: 'SMALLINT'>} + {<Type.DOUBLE: 'DOUBLE'>, <Type.BIT: 'BIT'>, <Type.TINYINT: 'TINYINT'>, <Type.FLOAT: 'FLOAT'>, <Type.INT: 'INT'>, <Type.SMALLINT: 'SMALLINT'>, <Type.BIGINT: 'BIGINT'>, <Type.INT256: 'INT256'>, <Type.INT128: 'INT128'>}
    @@ -48078,7 +48079,7 @@ If an Expression instance is passed, it w
    TEMPORAL_TYPES = - {<Type.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <Type.DATETIME: 'DATETIME'>, <Type.DATE32: 'DATE32'>, <Type.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <Type.TIME: 'TIME'>, <Type.TIMESTAMP_MS: 'TIMESTAMP_MS'>, <Type.TIMESTAMP_NS: 'TIMESTAMP_NS'>, <Type.TIMESTAMP: 'TIMESTAMP'>, <Type.TIMETZ: 'TIMETZ'>, <Type.DATETIME64: 'DATETIME64'>, <Type.TIMESTAMP_S: 'TIMESTAMP_S'>, <Type.DATE: 'DATE'>} + {<Type.TIMESTAMP_MS: 'TIMESTAMP_MS'>, <Type.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <Type.DATE: 'DATE'>, <Type.DATE32: 'DATE32'>, <Type.TIME: 'TIME'>, <Type.DATETIME: 'DATETIME'>, <Type.TIMESTAMP: 'TIMESTAMP'>, <Type.TIMESTAMP_S: 'TIMESTAMP_S'>, <Type.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <Type.TIMESTAMP_NS: 'TIMESTAMP_NS'>, <Type.DATETIME64: 'DATETIME64'>, <Type.TIMETZ: 'TIMETZ'>}
    @@ -90681,12 +90682,13 @@ True: Always quote. 6895 table = to_table( 6896 new_name, 6897 **{k: v for k, v in node.args.items() if k not in TABLE_PARTS}, -6898 ) -6899 table.add_comments([original]) -6900 return table -6901 return node -6902 -6903 return expression.transform(_replace_tables, copy=copy) +6898 dialect=dialect, +6899 ) +6900 table.add_comments([original]) +6901 return table +6902 return node +6903 +6904 return expression.transform(_replace_tables, copy=copy)
    @@ -90732,40 +90734,40 @@ True: Always quote.
    -
    6906def replace_placeholders(expression: Expression, *args, **kwargs) -> Expression:
    -6907    """Replace placeholders in an expression.
    -6908
    -6909    Args:
    -6910        expression: expression node to be transformed and replaced.
    -6911        args: positional names that will substitute unnamed placeholders in the given order.
    -6912        kwargs: keyword arguments that will substitute named placeholders.
    -6913
    -6914    Examples:
    -6915        >>> from sqlglot import exp, parse_one
    -6916        >>> replace_placeholders(
    -6917        ...     parse_one("select * from :tbl where ? = ?"),
    -6918        ...     exp.to_identifier("str_col"), "b", tbl=exp.to_identifier("foo")
    -6919        ... ).sql()
    -6920        "SELECT * FROM foo WHERE str_col = 'b'"
    -6921
    -6922    Returns:
    -6923        The mapped expression.
    -6924    """
    -6925
    -6926    def _replace_placeholders(node: Expression, args, **kwargs) -> Expression:
    -6927        if isinstance(node, Placeholder):
    -6928            if node.name:
    -6929                new_name = kwargs.get(node.name)
    -6930                if new_name:
    -6931                    return convert(new_name)
    -6932            else:
    -6933                try:
    -6934                    return convert(next(args))
    -6935                except StopIteration:
    -6936                    pass
    -6937        return node
    -6938
    -6939    return expression.transform(_replace_placeholders, iter(args), **kwargs)
    +            
    6907def replace_placeholders(expression: Expression, *args, **kwargs) -> Expression:
    +6908    """Replace placeholders in an expression.
    +6909
    +6910    Args:
    +6911        expression: expression node to be transformed and replaced.
    +6912        args: positional names that will substitute unnamed placeholders in the given order.
    +6913        kwargs: keyword arguments that will substitute named placeholders.
    +6914
    +6915    Examples:
    +6916        >>> from sqlglot import exp, parse_one
    +6917        >>> replace_placeholders(
    +6918        ...     parse_one("select * from :tbl where ? = ?"),
    +6919        ...     exp.to_identifier("str_col"), "b", tbl=exp.to_identifier("foo")
    +6920        ... ).sql()
    +6921        "SELECT * FROM foo WHERE str_col = 'b'"
    +6922
    +6923    Returns:
    +6924        The mapped expression.
    +6925    """
    +6926
    +6927    def _replace_placeholders(node: Expression, args, **kwargs) -> Expression:
    +6928        if isinstance(node, Placeholder):
    +6929            if node.name:
    +6930                new_name = kwargs.get(node.name)
    +6931                if new_name:
    +6932                    return convert(new_name)
    +6933            else:
    +6934                try:
    +6935                    return convert(next(args))
    +6936                except StopIteration:
    +6937                    pass
    +6938        return node
    +6939
    +6940    return expression.transform(_replace_placeholders, iter(args), **kwargs)
     
    @@ -90813,44 +90815,44 @@ True: Always quote.
    -
    6942def expand(
    -6943    expression: Expression,
    -6944    sources: t.Dict[str, Subqueryable],
    -6945    dialect: DialectType = None,
    -6946    copy: bool = True,
    -6947) -> Expression:
    -6948    """Transforms an expression by expanding all referenced sources into subqueries.
    -6949
    -6950    Examples:
    -6951        >>> from sqlglot import parse_one
    -6952        >>> expand(parse_one("select * from x AS z"), {"x": parse_one("select * from y")}).sql()
    -6953        'SELECT * FROM (SELECT * FROM y) AS z /* source: x */'
    -6954
    -6955        >>> expand(parse_one("select * from x AS z"), {"x": parse_one("select * from y"), "y": parse_one("select * from z")}).sql()
    -6956        'SELECT * FROM (SELECT * FROM (SELECT * FROM z) AS y /* source: y */) AS z /* source: x */'
    -6957
    -6958    Args:
    -6959        expression: The expression to expand.
    -6960        sources: A dictionary of name to Subqueryables.
    -6961        dialect: The dialect of the sources dict.
    -6962        copy: Whether or not to copy the expression during transformation. Defaults to True.
    -6963
    -6964    Returns:
    -6965        The transformed expression.
    -6966    """
    -6967    sources = {normalize_table_name(k, dialect=dialect): v for k, v in sources.items()}
    -6968
    -6969    def _expand(node: Expression):
    -6970        if isinstance(node, Table):
    -6971            name = normalize_table_name(node, dialect=dialect)
    -6972            source = sources.get(name)
    -6973            if source:
    -6974                subquery = source.subquery(node.alias or name)
    -6975                subquery.comments = [f"source: {name}"]
    -6976                return subquery.transform(_expand, copy=False)
    -6977        return node
    -6978
    -6979    return expression.transform(_expand, copy=copy)
    +            
    6943def expand(
    +6944    expression: Expression,
    +6945    sources: t.Dict[str, Subqueryable],
    +6946    dialect: DialectType = None,
    +6947    copy: bool = True,
    +6948) -> Expression:
    +6949    """Transforms an expression by expanding all referenced sources into subqueries.
    +6950
    +6951    Examples:
    +6952        >>> from sqlglot import parse_one
    +6953        >>> expand(parse_one("select * from x AS z"), {"x": parse_one("select * from y")}).sql()
    +6954        'SELECT * FROM (SELECT * FROM y) AS z /* source: x */'
    +6955
    +6956        >>> expand(parse_one("select * from x AS z"), {"x": parse_one("select * from y"), "y": parse_one("select * from z")}).sql()
    +6957        'SELECT * FROM (SELECT * FROM (SELECT * FROM z) AS y /* source: y */) AS z /* source: x */'
    +6958
    +6959    Args:
    +6960        expression: The expression to expand.
    +6961        sources: A dictionary of name to Subqueryables.
    +6962        dialect: The dialect of the sources dict.
    +6963        copy: Whether or not to copy the expression during transformation. Defaults to True.
    +6964
    +6965    Returns:
    +6966        The transformed expression.
    +6967    """
    +6968    sources = {normalize_table_name(k, dialect=dialect): v for k, v in sources.items()}
    +6969
    +6970    def _expand(node: Expression):
    +6971        if isinstance(node, Table):
    +6972            name = normalize_table_name(node, dialect=dialect)
    +6973            source = sources.get(name)
    +6974            if source:
    +6975                subquery = source.subquery(node.alias or name)
    +6976                subquery.comments = [f"source: {name}"]
    +6977                return subquery.transform(_expand, copy=False)
    +6978        return node
    +6979
    +6980    return expression.transform(_expand, copy=copy)
     
    @@ -90902,67 +90904,67 @@ True: Always quote.
    -
    6982def func(name: str, *args, copy: bool = True, dialect: DialectType = None, **kwargs) -> Func:
    -6983    """
    -6984    Returns a Func expression.
    -6985
    -6986    Examples:
    -6987        >>> func("abs", 5).sql()
    -6988        'ABS(5)'
    -6989
    -6990        >>> func("cast", this=5, to=DataType.build("DOUBLE")).sql()
    -6991        'CAST(5 AS DOUBLE)'
    -6992
    -6993    Args:
    -6994        name: the name of the function to build.
    -6995        args: the args used to instantiate the function of interest.
    -6996        copy: whether or not to copy the argument expressions.
    -6997        dialect: the source dialect.
    -6998        kwargs: the kwargs used to instantiate the function of interest.
    -6999
    -7000    Note:
    -7001        The arguments `args` and `kwargs` are mutually exclusive.
    -7002
    -7003    Returns:
    -7004        An instance of the function of interest, or an anonymous function, if `name` doesn't
    -7005        correspond to an existing `sqlglot.expressions.Func` class.
    -7006    """
    -7007    if args and kwargs:
    -7008        raise ValueError("Can't use both args and kwargs to instantiate a function.")
    -7009
    -7010    from sqlglot.dialects.dialect import Dialect
    -7011
    -7012    dialect = Dialect.get_or_raise(dialect)
    -7013
    -7014    converted: t.List[Expression] = [maybe_parse(arg, dialect=dialect, copy=copy) for arg in args]
    -7015    kwargs = {key: maybe_parse(value, dialect=dialect, copy=copy) for key, value in kwargs.items()}
    -7016
    -7017    constructor = dialect.parser_class.FUNCTIONS.get(name.upper())
    -7018    if constructor:
    -7019        if converted:
    -7020            if "dialect" in constructor.__code__.co_varnames:
    -7021                function = constructor(converted, dialect=dialect)
    -7022            else:
    -7023                function = constructor(converted)
    -7024        elif constructor.__name__ == "from_arg_list":
    -7025            function = constructor.__self__(**kwargs)  # type: ignore
    -7026        else:
    -7027            constructor = FUNCTION_BY_NAME.get(name.upper())
    -7028            if constructor:
    -7029                function = constructor(**kwargs)
    -7030            else:
    -7031                raise ValueError(
    -7032                    f"Unable to convert '{name}' into a Func. Either manually construct "
    -7033                    "the Func expression of interest or parse the function call."
    -7034                )
    -7035    else:
    -7036        kwargs = kwargs or {"expressions": converted}
    -7037        function = Anonymous(this=name, **kwargs)
    -7038
    -7039    for error_message in function.error_messages(converted):
    -7040        raise ValueError(error_message)
    -7041
    -7042    return function
    +            
    6983def func(name: str, *args, copy: bool = True, dialect: DialectType = None, **kwargs) -> Func:
    +6984    """
    +6985    Returns a Func expression.
    +6986
    +6987    Examples:
    +6988        >>> func("abs", 5).sql()
    +6989        'ABS(5)'
    +6990
    +6991        >>> func("cast", this=5, to=DataType.build("DOUBLE")).sql()
    +6992        'CAST(5 AS DOUBLE)'
    +6993
    +6994    Args:
    +6995        name: the name of the function to build.
    +6996        args: the args used to instantiate the function of interest.
    +6997        copy: whether or not to copy the argument expressions.
    +6998        dialect: the source dialect.
    +6999        kwargs: the kwargs used to instantiate the function of interest.
    +7000
    +7001    Note:
    +7002        The arguments `args` and `kwargs` are mutually exclusive.
    +7003
    +7004    Returns:
    +7005        An instance of the function of interest, or an anonymous function, if `name` doesn't
    +7006        correspond to an existing `sqlglot.expressions.Func` class.
    +7007    """
    +7008    if args and kwargs:
    +7009        raise ValueError("Can't use both args and kwargs to instantiate a function.")
    +7010
    +7011    from sqlglot.dialects.dialect import Dialect
    +7012
    +7013    dialect = Dialect.get_or_raise(dialect)
    +7014
    +7015    converted: t.List[Expression] = [maybe_parse(arg, dialect=dialect, copy=copy) for arg in args]
    +7016    kwargs = {key: maybe_parse(value, dialect=dialect, copy=copy) for key, value in kwargs.items()}
    +7017
    +7018    constructor = dialect.parser_class.FUNCTIONS.get(name.upper())
    +7019    if constructor:
    +7020        if converted:
    +7021            if "dialect" in constructor.__code__.co_varnames:
    +7022                function = constructor(converted, dialect=dialect)
    +7023            else:
    +7024                function = constructor(converted)
    +7025        elif constructor.__name__ == "from_arg_list":
    +7026            function = constructor.__self__(**kwargs)  # type: ignore
    +7027        else:
    +7028            constructor = FUNCTION_BY_NAME.get(name.upper())
    +7029            if constructor:
    +7030                function = constructor(**kwargs)
    +7031            else:
    +7032                raise ValueError(
    +7033                    f"Unable to convert '{name}' into a Func. Either manually construct "
    +7034                    "the Func expression of interest or parse the function call."
    +7035                )
    +7036    else:
    +7037        kwargs = kwargs or {"expressions": converted}
    +7038        function = Anonymous(this=name, **kwargs)
    +7039
    +7040    for error_message in function.error_messages(converted):
    +7041        raise ValueError(error_message)
    +7042
    +7043    return function
     
    @@ -91021,25 +91023,25 @@ True: Always quote.
    -
    7045def case(
    -7046    expression: t.Optional[ExpOrStr] = None,
    -7047    **opts,
    -7048) -> Case:
    -7049    """
    -7050    Initialize a CASE statement.
    -7051
    -7052    Example:
    -7053        case().when("a = 1", "foo").else_("bar")
    -7054
    -7055    Args:
    -7056        expression: Optionally, the input expression (not all dialects support this)
    -7057        **opts: Extra keyword arguments for parsing `expression`
    -7058    """
    -7059    if expression is not None:
    -7060        this = maybe_parse(expression, **opts)
    -7061    else:
    -7062        this = None
    -7063    return Case(this=this, ifs=[])
    +            
    7046def case(
    +7047    expression: t.Optional[ExpOrStr] = None,
    +7048    **opts,
    +7049) -> Case:
    +7050    """
    +7051    Initialize a CASE statement.
    +7052
    +7053    Example:
    +7054        case().when("a = 1", "foo").else_("bar")
    +7055
    +7056    Args:
    +7057        expression: Optionally, the input expression (not all dialects support this)
    +7058        **opts: Extra keyword arguments for parsing `expression`
    +7059    """
    +7060    if expression is not None:
    +7061        this = maybe_parse(expression, **opts)
    +7062    else:
    +7063        this = None
    +7064    return Case(this=this, ifs=[])
     
    @@ -91072,25 +91074,25 @@ True: Always quote.
    -
    7066def cast_unless(
    -7067    expression: ExpOrStr,
    -7068    to: DATA_TYPE,
    -7069    *types: DATA_TYPE,
    -7070    **opts: t.Any,
    -7071) -> Expression | Cast:
    -7072    """
    -7073    Cast an expression to a data type unless it is a specified type.
    -7074
    -7075    Args:
    -7076        expression: The expression to cast.
    -7077        to: The data type to cast to.
    -7078        **types: The types to exclude from casting.
    -7079        **opts: Extra keyword arguments for parsing `expression`
    -7080    """
    -7081    expr = maybe_parse(expression, **opts)
    -7082    if expr.is_type(*types):
    -7083        return expr
    -7084    return cast(expr, to, **opts)
    +            
    7067def cast_unless(
    +7068    expression: ExpOrStr,
    +7069    to: DATA_TYPE,
    +7070    *types: DATA_TYPE,
    +7071    **opts: t.Any,
    +7072) -> Expression | Cast:
    +7073    """
    +7074    Cast an expression to a data type unless it is a specified type.
    +7075
    +7076    Args:
    +7077        expression: The expression to cast.
    +7078        to: The data type to cast to.
    +7079        **types: The types to exclude from casting.
    +7080        **opts: Extra keyword arguments for parsing `expression`
    +7081    """
    +7082    expr = maybe_parse(expression, **opts)
    +7083    if expr.is_type(*types):
    +7084        return expr
    +7085    return cast(expr, to, **opts)
     
    @@ -91119,31 +91121,31 @@ True: Always quote.
    -
    7087def array(
    -7088    *expressions: ExpOrStr, copy: bool = True, dialect: DialectType = None, **kwargs
    -7089) -> Array:
    -7090    """
    -7091    Returns an array.
    -7092
    -7093    Examples:
    -7094        >>> array(1, 'x').sql()
    -7095        'ARRAY(1, x)'
    -7096
    -7097    Args:
    -7098        expressions: the expressions to add to the array.
    -7099        copy: whether or not to copy the argument expressions.
    -7100        dialect: the source dialect.
    -7101        kwargs: the kwargs used to instantiate the function of interest.
    -7102
    -7103    Returns:
    -7104        An array expression.
    -7105    """
    -7106    return Array(
    -7107        expressions=[
    -7108            maybe_parse(expression, copy=copy, dialect=dialect, **kwargs)
    -7109            for expression in expressions
    -7110        ]
    -7111    )
    +            
    7088def array(
    +7089    *expressions: ExpOrStr, copy: bool = True, dialect: DialectType = None, **kwargs
    +7090) -> Array:
    +7091    """
    +7092    Returns an array.
    +7093
    +7094    Examples:
    +7095        >>> array(1, 'x').sql()
    +7096        'ARRAY(1, x)'
    +7097
    +7098    Args:
    +7099        expressions: the expressions to add to the array.
    +7100        copy: whether or not to copy the argument expressions.
    +7101        dialect: the source dialect.
    +7102        kwargs: the kwargs used to instantiate the function of interest.
    +7103
    +7104    Returns:
    +7105        An array expression.
    +7106    """
    +7107    return Array(
    +7108        expressions=[
    +7109            maybe_parse(expression, copy=copy, dialect=dialect, **kwargs)
    +7110            for expression in expressions
    +7111        ]
    +7112    )
     
    @@ -91188,31 +91190,31 @@ True: Always quote.
    -
    7114def tuple_(
    -7115    *expressions: ExpOrStr, copy: bool = True, dialect: DialectType = None, **kwargs
    -7116) -> Tuple:
    -7117    """
    -7118    Returns an tuple.
    -7119
    -7120    Examples:
    -7121        >>> tuple_(1, 'x').sql()
    -7122        '(1, x)'
    -7123
    -7124    Args:
    -7125        expressions: the expressions to add to the tuple.
    -7126        copy: whether or not to copy the argument expressions.
    -7127        dialect: the source dialect.
    -7128        kwargs: the kwargs used to instantiate the function of interest.
    -7129
    -7130    Returns:
    -7131        A tuple expression.
    -7132    """
    -7133    return Tuple(
    -7134        expressions=[
    -7135            maybe_parse(expression, copy=copy, dialect=dialect, **kwargs)
    -7136            for expression in expressions
    -7137        ]
    -7138    )
    +            
    7115def tuple_(
    +7116    *expressions: ExpOrStr, copy: bool = True, dialect: DialectType = None, **kwargs
    +7117) -> Tuple:
    +7118    """
    +7119    Returns an tuple.
    +7120
    +7121    Examples:
    +7122        >>> tuple_(1, 'x').sql()
    +7123        '(1, x)'
    +7124
    +7125    Args:
    +7126        expressions: the expressions to add to the tuple.
    +7127        copy: whether or not to copy the argument expressions.
    +7128        dialect: the source dialect.
    +7129        kwargs: the kwargs used to instantiate the function of interest.
    +7130
    +7131    Returns:
    +7132        A tuple expression.
    +7133    """
    +7134    return Tuple(
    +7135        expressions=[
    +7136            maybe_parse(expression, copy=copy, dialect=dialect, **kwargs)
    +7137            for expression in expressions
    +7138        ]
    +7139    )
     
    @@ -91257,11 +91259,11 @@ True: Always quote.
    -
    7141def true() -> Boolean:
    -7142    """
    -7143    Returns a true Boolean expression.
    -7144    """
    -7145    return Boolean(this=True)
    +            
    7142def true() -> Boolean:
    +7143    """
    +7144    Returns a true Boolean expression.
    +7145    """
    +7146    return Boolean(this=True)
     
    @@ -91281,11 +91283,11 @@ True: Always quote.
    -
    7148def false() -> Boolean:
    -7149    """
    -7150    Returns a false Boolean expression.
    -7151    """
    -7152    return Boolean(this=False)
    +            
    7149def false() -> Boolean:
    +7150    """
    +7151    Returns a false Boolean expression.
    +7152    """
    +7153    return Boolean(this=False)
     
    @@ -91305,11 +91307,11 @@ True: Always quote.
    -
    7155def null() -> Null:
    -7156    """
    -7157    Returns a Null expression.
    -7158    """
    -7159    return Null()
    +            
    7156def null() -> Null:
    +7157    """
    +7158    Returns a Null expression.
    +7159    """
    +7160    return Null()
     
    diff --git a/docs/sqlglot/helper.html b/docs/sqlglot/helper.html index b020d89..115c17f 100644 --- a/docs/sqlglot/helper.html +++ b/docs/sqlglot/helper.html @@ -126,6 +126,15 @@
  • is_date_unit
  • +
  • + SingleValuedMapping + + +
  • @@ -157,7 +166,7 @@
    6import re 7import sys 8import typing as t - 9from collections.abc import Collection + 9from collections.abc import Collection, Set 10from contextlib import contextmanager 11from copy import copy 12from enum import Enum @@ -647,6 +656,34 @@ 496 497def is_date_unit(expression: t.Optional[exp.Expression]) -> bool: 498 return expression is not None and expression.name.lower() in DATE_UNITS +499 +500 +501K = t.TypeVar("K") +502V = t.TypeVar("V") +503 +504 +505class SingleValuedMapping(t.Mapping[K, V]): +506 """ +507 Mapping where all keys return the same value. +508 +509 This rigamarole is meant to avoid copying keys, which was originally intended +510 as an optimization while qualifying columns for tables with lots of columns. +511 """ +512 +513 def __init__(self, keys: t.Collection[K], value: V): +514 self._keys = keys if isinstance(keys, Set) else set(keys) +515 self._value = value +516 +517 def __getitem__(self, key: K) -> V: +518 if key in self._keys: +519 return self._value +520 raise KeyError(key) +521 +522 def __len__(self) -> int: +523 return len(self._keys) +524 +525 def __iter__(self) -> t.Iterator[K]: +526 return iter(self._keys)
    @@ -1808,7 +1845,7 @@ belong to some totally-ordered set.

    DATE_UNITS = -{'quarter', 'month', 'day', 'year', 'week', 'year_month'} +{'month', 'day', 'year_month', 'quarter', 'week', 'year'}
    @@ -1835,6 +1872,81 @@ belong to some totally-ordered set.

    +
    +
    + +
    + + class + SingleValuedMapping(typing.Mapping[~K, ~V]): + + + +
    + +
    506class SingleValuedMapping(t.Mapping[K, V]):
    +507    """
    +508    Mapping where all keys return the same value.
    +509
    +510    This rigamarole is meant to avoid copying keys, which was originally intended
    +511    as an optimization while qualifying columns for tables with lots of columns.
    +512    """
    +513
    +514    def __init__(self, keys: t.Collection[K], value: V):
    +515        self._keys = keys if isinstance(keys, Set) else set(keys)
    +516        self._value = value
    +517
    +518    def __getitem__(self, key: K) -> V:
    +519        if key in self._keys:
    +520            return self._value
    +521        raise KeyError(key)
    +522
    +523    def __len__(self) -> int:
    +524        return len(self._keys)
    +525
    +526    def __iter__(self) -> t.Iterator[K]:
    +527        return iter(self._keys)
    +
    + + +

    Mapping where all keys return the same value.

    + +

    This rigamarole is meant to avoid copying keys, which was originally intended +as an optimization while qualifying columns for tables with lots of columns.

    +
    + + +
    + +
    + + SingleValuedMapping(keys: Collection[~K], value: ~V) + + + +
    + +
    514    def __init__(self, keys: t.Collection[K], value: V):
    +515        self._keys = keys if isinstance(keys, Set) else set(keys)
    +516        self._value = value
    +
    + + + + +
    +
    +
    Inherited Members
    +
    +
    collections.abc.Mapping
    +
    get
    +
    keys
    +
    items
    +
    values
    + +
    +
    +