Supports BigQuery Standard SQL.
1"""Supports BigQuery Standard SQL.""" 2 3from __future__ import annotations 4 5import re 6import typing as t 7 8from sqlglot import exp, generator, parser, tokens, transforms 9from sqlglot.dialects.dialect import ( 10 Dialect, 11 datestrtodate_sql, 12 inline_array_sql, 13 max_or_greatest, 14 min_or_least, 15 no_ilike_sql, 16 rename_func, 17 timestrtotime_sql, 18 ts_or_ds_to_date_sql, 19) 20from sqlglot.helper import seq_get 21from sqlglot.tokens import TokenType 22 23E = t.TypeVar("E", bound=exp.Expression) 24 25 26def _date_add(expression_class: t.Type[E]) -> t.Callable[[t.Sequence], E]: 27 def func(args): 28 interval = seq_get(args, 1) 29 return expression_class( 30 this=seq_get(args, 0), 31 expression=interval.this, 32 unit=interval.args.get("unit"), 33 ) 34 35 return func 36 37 38def _date_add_sql( 39 data_type: str, kind: str 40) -> t.Callable[[generator.Generator, exp.Expression], str]: 41 def func(self, expression): 42 this = self.sql(expression, "this") 43 unit = expression.args.get("unit") 44 unit = exp.var(unit.name.upper() if unit else "DAY") 45 interval = exp.Interval(this=expression.expression, unit=unit) 46 return f"{data_type}_{kind}({this}, {self.sql(interval)})" 47 48 return func 49 50 51def _derived_table_values_to_unnest(self: generator.Generator, expression: exp.Values) -> str: 52 if not isinstance(expression.unnest().parent, exp.From): 53 expression = t.cast(exp.Values, transforms.remove_precision_parameterized_types(expression)) 54 return self.values_sql(expression) 55 rows = [tuple_exp.expressions for tuple_exp in expression.find_all(exp.Tuple)] 56 structs = [] 57 for row in rows: 58 aliases = [ 59 exp.alias_(value, column_name) 60 for value, column_name in zip(row, expression.args["alias"].args["columns"]) 61 ] 62 structs.append(exp.Struct(expressions=aliases)) 63 unnest_exp = exp.Unnest(expressions=[exp.Array(expressions=structs)]) 64 return self.unnest_sql(unnest_exp) 65 66 67def _returnsproperty_sql(self: generator.Generator, expression: exp.ReturnsProperty) -> str: 68 this = expression.this 69 if isinstance(this, exp.Schema): 70 this = f"{this.this} <{self.expressions(this)}>" 71 else: 72 this = self.sql(this) 73 return f"RETURNS {this}" 74 75 76def _create_sql(self: generator.Generator, expression: exp.Create) -> str: 77 kind = expression.args["kind"] 78 returns = expression.find(exp.ReturnsProperty) 79 if kind.upper() == "FUNCTION" and returns and returns.args.get("is_table"): 80 expression = expression.copy() 81 expression.set("kind", "TABLE FUNCTION") 82 if isinstance( 83 expression.expression, 84 ( 85 exp.Subquery, 86 exp.Literal, 87 ), 88 ): 89 expression.set("expression", expression.expression.this) 90 91 return self.create_sql(expression) 92 93 return self.create_sql(expression) 94 95 96def _unqualify_unnest(expression: exp.Expression) -> exp.Expression: 97 """Remove references to unnest table aliases since bigquery doesn't allow them. 98 99 These are added by the optimizer's qualify_column step. 100 """ 101 if isinstance(expression, exp.Select): 102 unnests = { 103 unnest.alias 104 for unnest in expression.args.get("from", exp.From(expressions=[])).expressions 105 if isinstance(unnest, exp.Unnest) and unnest.alias 106 } 107 108 if unnests: 109 expression = expression.copy() 110 111 for select in expression.expressions: 112 for column in select.find_all(exp.Column): 113 if column.table in unnests: 114 column.set("table", None) 115 116 return expression 117 118 119class BigQuery(Dialect): 120 unnest_column_only = True 121 time_mapping = { 122 "%M": "%-M", 123 "%d": "%-d", 124 "%m": "%-m", 125 "%y": "%-y", 126 "%H": "%-H", 127 "%I": "%-I", 128 "%S": "%-S", 129 "%j": "%-j", 130 } 131 132 class Tokenizer(tokens.Tokenizer): 133 QUOTES = [ 134 (prefix + quote, quote) if prefix else quote 135 for quote in ["'", '"', '"""', "'''"] 136 for prefix in ["", "r", "R"] 137 ] 138 COMMENTS = ["--", "#", ("/*", "*/")] 139 IDENTIFIERS = ["`"] 140 STRING_ESCAPES = ["\\"] 141 HEX_STRINGS = [("0x", ""), ("0X", "")] 142 143 KEYWORDS = { 144 **tokens.Tokenizer.KEYWORDS, 145 "BEGIN": TokenType.COMMAND, 146 "BEGIN TRANSACTION": TokenType.BEGIN, 147 "CURRENT_DATETIME": TokenType.CURRENT_DATETIME, 148 "DECLARE": TokenType.COMMAND, 149 "GEOGRAPHY": TokenType.GEOGRAPHY, 150 "FLOAT64": TokenType.DOUBLE, 151 "INT64": TokenType.BIGINT, 152 "NOT DETERMINISTIC": TokenType.VOLATILE, 153 "UNKNOWN": TokenType.NULL, 154 } 155 KEYWORDS.pop("DIV") 156 157 class Parser(parser.Parser): 158 FUNCTIONS = { 159 **parser.Parser.FUNCTIONS, # type: ignore 160 "DATE_TRUNC": lambda args: exp.DateTrunc( 161 unit=exp.Literal.string(seq_get(args, 1).name), # type: ignore 162 this=seq_get(args, 0), 163 ), 164 "DATE_ADD": _date_add(exp.DateAdd), 165 "DATETIME_ADD": _date_add(exp.DatetimeAdd), 166 "DIV": lambda args: exp.IntDiv(this=seq_get(args, 0), expression=seq_get(args, 1)), 167 "REGEXP_CONTAINS": exp.RegexpLike.from_arg_list, 168 "REGEXP_EXTRACT": lambda args: exp.RegexpExtract( 169 this=seq_get(args, 0), 170 expression=seq_get(args, 1), 171 position=seq_get(args, 2), 172 occurrence=seq_get(args, 3), 173 group=exp.Literal.number(1) 174 if re.compile(str(seq_get(args, 1))).groups == 1 175 else None, 176 ), 177 "TIME_ADD": _date_add(exp.TimeAdd), 178 "TIMESTAMP_ADD": _date_add(exp.TimestampAdd), 179 "DATE_SUB": _date_add(exp.DateSub), 180 "DATETIME_SUB": _date_add(exp.DatetimeSub), 181 "TIME_SUB": _date_add(exp.TimeSub), 182 "TIMESTAMP_SUB": _date_add(exp.TimestampSub), 183 "PARSE_TIMESTAMP": lambda args: exp.StrToTime( 184 this=seq_get(args, 1), format=seq_get(args, 0) 185 ), 186 } 187 188 FUNCTION_PARSERS = { 189 **parser.Parser.FUNCTION_PARSERS, # type: ignore 190 "ARRAY": lambda self: self.expression(exp.Array, expressions=[self._parse_statement()]), 191 } 192 FUNCTION_PARSERS.pop("TRIM") 193 194 NO_PAREN_FUNCTIONS = { 195 **parser.Parser.NO_PAREN_FUNCTIONS, # type: ignore 196 TokenType.CURRENT_DATETIME: exp.CurrentDatetime, 197 } 198 199 NESTED_TYPE_TOKENS = { 200 *parser.Parser.NESTED_TYPE_TOKENS, # type: ignore 201 TokenType.TABLE, 202 } 203 204 ID_VAR_TOKENS = { 205 *parser.Parser.ID_VAR_TOKENS, # type: ignore 206 TokenType.VALUES, 207 } 208 209 PROPERTY_PARSERS = { 210 **parser.Parser.PROPERTY_PARSERS, # type: ignore 211 "NOT DETERMINISTIC": lambda self: self.expression( 212 exp.VolatilityProperty, this=exp.Literal.string("VOLATILE") 213 ), 214 } 215 216 LOG_BASE_FIRST = False 217 LOG_DEFAULTS_TO_LN = True 218 219 class Generator(generator.Generator): 220 TRANSFORMS = { 221 **generator.Generator.TRANSFORMS, # type: ignore 222 **transforms.REMOVE_PRECISION_PARAMETERIZED_TYPES, # type: ignore 223 exp.ArraySize: rename_func("ARRAY_LENGTH"), 224 exp.AtTimeZone: lambda self, e: self.func( 225 "TIMESTAMP", self.func("DATETIME", e.this, e.args.get("zone")) 226 ), 227 exp.DateAdd: _date_add_sql("DATE", "ADD"), 228 exp.DateSub: _date_add_sql("DATE", "SUB"), 229 exp.DatetimeAdd: _date_add_sql("DATETIME", "ADD"), 230 exp.DatetimeSub: _date_add_sql("DATETIME", "SUB"), 231 exp.DateDiff: lambda self, e: f"DATE_DIFF({self.sql(e, 'this')}, {self.sql(e, 'expression')}, {self.sql(e.args.get('unit', 'DAY'))})", 232 exp.DateStrToDate: datestrtodate_sql, 233 exp.DateTrunc: lambda self, e: self.func("DATE_TRUNC", e.this, e.text("unit")), 234 exp.GroupConcat: rename_func("STRING_AGG"), 235 exp.ILike: no_ilike_sql, 236 exp.IntDiv: rename_func("DIV"), 237 exp.Max: max_or_greatest, 238 exp.Min: min_or_least, 239 exp.Select: transforms.preprocess( 240 [_unqualify_unnest], transforms.delegate("select_sql") 241 ), 242 exp.StrToTime: lambda self, e: f"PARSE_TIMESTAMP({self.format_time(e)}, {self.sql(e, 'this')})", 243 exp.TimeAdd: _date_add_sql("TIME", "ADD"), 244 exp.TimeSub: _date_add_sql("TIME", "SUB"), 245 exp.TimestampAdd: _date_add_sql("TIMESTAMP", "ADD"), 246 exp.TimestampSub: _date_add_sql("TIMESTAMP", "SUB"), 247 exp.TimeStrToTime: timestrtotime_sql, 248 exp.TsOrDsToDate: ts_or_ds_to_date_sql("bigquery"), 249 exp.TsOrDsAdd: _date_add_sql("DATE", "ADD"), 250 exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}", 251 exp.VariancePop: rename_func("VAR_POP"), 252 exp.Values: _derived_table_values_to_unnest, 253 exp.ReturnsProperty: _returnsproperty_sql, 254 exp.Create: _create_sql, 255 exp.Trim: lambda self, e: self.func(f"TRIM", e.this, e.expression), 256 exp.VolatilityProperty: lambda self, e: f"DETERMINISTIC" 257 if e.name == "IMMUTABLE" 258 else "NOT DETERMINISTIC", 259 exp.RegexpLike: rename_func("REGEXP_CONTAINS"), 260 } 261 262 TYPE_MAPPING = { 263 **generator.Generator.TYPE_MAPPING, # type: ignore 264 exp.DataType.Type.BIGINT: "INT64", 265 exp.DataType.Type.BOOLEAN: "BOOL", 266 exp.DataType.Type.CHAR: "STRING", 267 exp.DataType.Type.DECIMAL: "NUMERIC", 268 exp.DataType.Type.DOUBLE: "FLOAT64", 269 exp.DataType.Type.FLOAT: "FLOAT64", 270 exp.DataType.Type.INT: "INT64", 271 exp.DataType.Type.NCHAR: "STRING", 272 exp.DataType.Type.NVARCHAR: "STRING", 273 exp.DataType.Type.SMALLINT: "INT64", 274 exp.DataType.Type.TEXT: "STRING", 275 exp.DataType.Type.TINYINT: "INT64", 276 exp.DataType.Type.VARCHAR: "STRING", 277 } 278 PROPERTIES_LOCATION = { 279 **generator.Generator.PROPERTIES_LOCATION, # type: ignore 280 exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA, 281 } 282 283 EXPLICIT_UNION = True 284 LIMIT_FETCH = "LIMIT" 285 286 def array_sql(self, expression: exp.Array) -> str: 287 first_arg = seq_get(expression.expressions, 0) 288 if isinstance(first_arg, exp.Subqueryable): 289 return f"ARRAY{self.wrap(self.sql(first_arg))}" 290 291 return inline_array_sql(self, expression) 292 293 def transaction_sql(self, *_) -> str: 294 return "BEGIN TRANSACTION" 295 296 def commit_sql(self, *_) -> str: 297 return "COMMIT TRANSACTION" 298 299 def rollback_sql(self, *_) -> str: 300 return "ROLLBACK TRANSACTION" 301 302 def in_unnest_op(self, expression: exp.Unnest) -> str: 303 return self.sql(expression) 304 305 def except_op(self, expression: exp.Except) -> str: 306 if not expression.args.get("distinct", False): 307 self.unsupported("EXCEPT without DISTINCT is not supported in BigQuery") 308 return f"EXCEPT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}" 309 310 def intersect_op(self, expression: exp.Intersect) -> str: 311 if not expression.args.get("distinct", False): 312 self.unsupported("INTERSECT without DISTINCT is not supported in BigQuery") 313 return f"INTERSECT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}"
120class BigQuery(Dialect): 121 unnest_column_only = True 122 time_mapping = { 123 "%M": "%-M", 124 "%d": "%-d", 125 "%m": "%-m", 126 "%y": "%-y", 127 "%H": "%-H", 128 "%I": "%-I", 129 "%S": "%-S", 130 "%j": "%-j", 131 } 132 133 class Tokenizer(tokens.Tokenizer): 134 QUOTES = [ 135 (prefix + quote, quote) if prefix else quote 136 for quote in ["'", '"', '"""', "'''"] 137 for prefix in ["", "r", "R"] 138 ] 139 COMMENTS = ["--", "#", ("/*", "*/")] 140 IDENTIFIERS = ["`"] 141 STRING_ESCAPES = ["\\"] 142 HEX_STRINGS = [("0x", ""), ("0X", "")] 143 144 KEYWORDS = { 145 **tokens.Tokenizer.KEYWORDS, 146 "BEGIN": TokenType.COMMAND, 147 "BEGIN TRANSACTION": TokenType.BEGIN, 148 "CURRENT_DATETIME": TokenType.CURRENT_DATETIME, 149 "DECLARE": TokenType.COMMAND, 150 "GEOGRAPHY": TokenType.GEOGRAPHY, 151 "FLOAT64": TokenType.DOUBLE, 152 "INT64": TokenType.BIGINT, 153 "NOT DETERMINISTIC": TokenType.VOLATILE, 154 "UNKNOWN": TokenType.NULL, 155 } 156 KEYWORDS.pop("DIV") 157 158 class Parser(parser.Parser): 159 FUNCTIONS = { 160 **parser.Parser.FUNCTIONS, # type: ignore 161 "DATE_TRUNC": lambda args: exp.DateTrunc( 162 unit=exp.Literal.string(seq_get(args, 1).name), # type: ignore 163 this=seq_get(args, 0), 164 ), 165 "DATE_ADD": _date_add(exp.DateAdd), 166 "DATETIME_ADD": _date_add(exp.DatetimeAdd), 167 "DIV": lambda args: exp.IntDiv(this=seq_get(args, 0), expression=seq_get(args, 1)), 168 "REGEXP_CONTAINS": exp.RegexpLike.from_arg_list, 169 "REGEXP_EXTRACT": lambda args: exp.RegexpExtract( 170 this=seq_get(args, 0), 171 expression=seq_get(args, 1), 172 position=seq_get(args, 2), 173 occurrence=seq_get(args, 3), 174 group=exp.Literal.number(1) 175 if re.compile(str(seq_get(args, 1))).groups == 1 176 else None, 177 ), 178 "TIME_ADD": _date_add(exp.TimeAdd), 179 "TIMESTAMP_ADD": _date_add(exp.TimestampAdd), 180 "DATE_SUB": _date_add(exp.DateSub), 181 "DATETIME_SUB": _date_add(exp.DatetimeSub), 182 "TIME_SUB": _date_add(exp.TimeSub), 183 "TIMESTAMP_SUB": _date_add(exp.TimestampSub), 184 "PARSE_TIMESTAMP": lambda args: exp.StrToTime( 185 this=seq_get(args, 1), format=seq_get(args, 0) 186 ), 187 } 188 189 FUNCTION_PARSERS = { 190 **parser.Parser.FUNCTION_PARSERS, # type: ignore 191 "ARRAY": lambda self: self.expression(exp.Array, expressions=[self._parse_statement()]), 192 } 193 FUNCTION_PARSERS.pop("TRIM") 194 195 NO_PAREN_FUNCTIONS = { 196 **parser.Parser.NO_PAREN_FUNCTIONS, # type: ignore 197 TokenType.CURRENT_DATETIME: exp.CurrentDatetime, 198 } 199 200 NESTED_TYPE_TOKENS = { 201 *parser.Parser.NESTED_TYPE_TOKENS, # type: ignore 202 TokenType.TABLE, 203 } 204 205 ID_VAR_TOKENS = { 206 *parser.Parser.ID_VAR_TOKENS, # type: ignore 207 TokenType.VALUES, 208 } 209 210 PROPERTY_PARSERS = { 211 **parser.Parser.PROPERTY_PARSERS, # type: ignore 212 "NOT DETERMINISTIC": lambda self: self.expression( 213 exp.VolatilityProperty, this=exp.Literal.string("VOLATILE") 214 ), 215 } 216 217 LOG_BASE_FIRST = False 218 LOG_DEFAULTS_TO_LN = True 219 220 class Generator(generator.Generator): 221 TRANSFORMS = { 222 **generator.Generator.TRANSFORMS, # type: ignore 223 **transforms.REMOVE_PRECISION_PARAMETERIZED_TYPES, # type: ignore 224 exp.ArraySize: rename_func("ARRAY_LENGTH"), 225 exp.AtTimeZone: lambda self, e: self.func( 226 "TIMESTAMP", self.func("DATETIME", e.this, e.args.get("zone")) 227 ), 228 exp.DateAdd: _date_add_sql("DATE", "ADD"), 229 exp.DateSub: _date_add_sql("DATE", "SUB"), 230 exp.DatetimeAdd: _date_add_sql("DATETIME", "ADD"), 231 exp.DatetimeSub: _date_add_sql("DATETIME", "SUB"), 232 exp.DateDiff: lambda self, e: f"DATE_DIFF({self.sql(e, 'this')}, {self.sql(e, 'expression')}, {self.sql(e.args.get('unit', 'DAY'))})", 233 exp.DateStrToDate: datestrtodate_sql, 234 exp.DateTrunc: lambda self, e: self.func("DATE_TRUNC", e.this, e.text("unit")), 235 exp.GroupConcat: rename_func("STRING_AGG"), 236 exp.ILike: no_ilike_sql, 237 exp.IntDiv: rename_func("DIV"), 238 exp.Max: max_or_greatest, 239 exp.Min: min_or_least, 240 exp.Select: transforms.preprocess( 241 [_unqualify_unnest], transforms.delegate("select_sql") 242 ), 243 exp.StrToTime: lambda self, e: f"PARSE_TIMESTAMP({self.format_time(e)}, {self.sql(e, 'this')})", 244 exp.TimeAdd: _date_add_sql("TIME", "ADD"), 245 exp.TimeSub: _date_add_sql("TIME", "SUB"), 246 exp.TimestampAdd: _date_add_sql("TIMESTAMP", "ADD"), 247 exp.TimestampSub: _date_add_sql("TIMESTAMP", "SUB"), 248 exp.TimeStrToTime: timestrtotime_sql, 249 exp.TsOrDsToDate: ts_or_ds_to_date_sql("bigquery"), 250 exp.TsOrDsAdd: _date_add_sql("DATE", "ADD"), 251 exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}", 252 exp.VariancePop: rename_func("VAR_POP"), 253 exp.Values: _derived_table_values_to_unnest, 254 exp.ReturnsProperty: _returnsproperty_sql, 255 exp.Create: _create_sql, 256 exp.Trim: lambda self, e: self.func(f"TRIM", e.this, e.expression), 257 exp.VolatilityProperty: lambda self, e: f"DETERMINISTIC" 258 if e.name == "IMMUTABLE" 259 else "NOT DETERMINISTIC", 260 exp.RegexpLike: rename_func("REGEXP_CONTAINS"), 261 } 262 263 TYPE_MAPPING = { 264 **generator.Generator.TYPE_MAPPING, # type: ignore 265 exp.DataType.Type.BIGINT: "INT64", 266 exp.DataType.Type.BOOLEAN: "BOOL", 267 exp.DataType.Type.CHAR: "STRING", 268 exp.DataType.Type.DECIMAL: "NUMERIC", 269 exp.DataType.Type.DOUBLE: "FLOAT64", 270 exp.DataType.Type.FLOAT: "FLOAT64", 271 exp.DataType.Type.INT: "INT64", 272 exp.DataType.Type.NCHAR: "STRING", 273 exp.DataType.Type.NVARCHAR: "STRING", 274 exp.DataType.Type.SMALLINT: "INT64", 275 exp.DataType.Type.TEXT: "STRING", 276 exp.DataType.Type.TINYINT: "INT64", 277 exp.DataType.Type.VARCHAR: "STRING", 278 } 279 PROPERTIES_LOCATION = { 280 **generator.Generator.PROPERTIES_LOCATION, # type: ignore 281 exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA, 282 } 283 284 EXPLICIT_UNION = True 285 LIMIT_FETCH = "LIMIT" 286 287 def array_sql(self, expression: exp.Array) -> str: 288 first_arg = seq_get(expression.expressions, 0) 289 if isinstance(first_arg, exp.Subqueryable): 290 return f"ARRAY{self.wrap(self.sql(first_arg))}" 291 292 return inline_array_sql(self, expression) 293 294 def transaction_sql(self, *_) -> str: 295 return "BEGIN TRANSACTION" 296 297 def commit_sql(self, *_) -> str: 298 return "COMMIT TRANSACTION" 299 300 def rollback_sql(self, *_) -> str: 301 return "ROLLBACK TRANSACTION" 302 303 def in_unnest_op(self, expression: exp.Unnest) -> str: 304 return self.sql(expression) 305 306 def except_op(self, expression: exp.Except) -> str: 307 if not expression.args.get("distinct", False): 308 self.unsupported("EXCEPT without DISTINCT is not supported in BigQuery") 309 return f"EXCEPT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}" 310 311 def intersect_op(self, expression: exp.Intersect) -> str: 312 if not expression.args.get("distinct", False): 313 self.unsupported("INTERSECT without DISTINCT is not supported in BigQuery") 314 return f"INTERSECT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}"
133 class Tokenizer(tokens.Tokenizer): 134 QUOTES = [ 135 (prefix + quote, quote) if prefix else quote 136 for quote in ["'", '"', '"""', "'''"] 137 for prefix in ["", "r", "R"] 138 ] 139 COMMENTS = ["--", "#", ("/*", "*/")] 140 IDENTIFIERS = ["`"] 141 STRING_ESCAPES = ["\\"] 142 HEX_STRINGS = [("0x", ""), ("0X", "")] 143 144 KEYWORDS = { 145 **tokens.Tokenizer.KEYWORDS, 146 "BEGIN": TokenType.COMMAND, 147 "BEGIN TRANSACTION": TokenType.BEGIN, 148 "CURRENT_DATETIME": TokenType.CURRENT_DATETIME, 149 "DECLARE": TokenType.COMMAND, 150 "GEOGRAPHY": TokenType.GEOGRAPHY, 151 "FLOAT64": TokenType.DOUBLE, 152 "INT64": TokenType.BIGINT, 153 "NOT DETERMINISTIC": TokenType.VOLATILE, 154 "UNKNOWN": TokenType.NULL, 155 } 156 KEYWORDS.pop("DIV")
Inherited Members
158 class Parser(parser.Parser): 159 FUNCTIONS = { 160 **parser.Parser.FUNCTIONS, # type: ignore 161 "DATE_TRUNC": lambda args: exp.DateTrunc( 162 unit=exp.Literal.string(seq_get(args, 1).name), # type: ignore 163 this=seq_get(args, 0), 164 ), 165 "DATE_ADD": _date_add(exp.DateAdd), 166 "DATETIME_ADD": _date_add(exp.DatetimeAdd), 167 "DIV": lambda args: exp.IntDiv(this=seq_get(args, 0), expression=seq_get(args, 1)), 168 "REGEXP_CONTAINS": exp.RegexpLike.from_arg_list, 169 "REGEXP_EXTRACT": lambda args: exp.RegexpExtract( 170 this=seq_get(args, 0), 171 expression=seq_get(args, 1), 172 position=seq_get(args, 2), 173 occurrence=seq_get(args, 3), 174 group=exp.Literal.number(1) 175 if re.compile(str(seq_get(args, 1))).groups == 1 176 else None, 177 ), 178 "TIME_ADD": _date_add(exp.TimeAdd), 179 "TIMESTAMP_ADD": _date_add(exp.TimestampAdd), 180 "DATE_SUB": _date_add(exp.DateSub), 181 "DATETIME_SUB": _date_add(exp.DatetimeSub), 182 "TIME_SUB": _date_add(exp.TimeSub), 183 "TIMESTAMP_SUB": _date_add(exp.TimestampSub), 184 "PARSE_TIMESTAMP": lambda args: exp.StrToTime( 185 this=seq_get(args, 1), format=seq_get(args, 0) 186 ), 187 } 188 189 FUNCTION_PARSERS = { 190 **parser.Parser.FUNCTION_PARSERS, # type: ignore 191 "ARRAY": lambda self: self.expression(exp.Array, expressions=[self._parse_statement()]), 192 } 193 FUNCTION_PARSERS.pop("TRIM") 194 195 NO_PAREN_FUNCTIONS = { 196 **parser.Parser.NO_PAREN_FUNCTIONS, # type: ignore 197 TokenType.CURRENT_DATETIME: exp.CurrentDatetime, 198 } 199 200 NESTED_TYPE_TOKENS = { 201 *parser.Parser.NESTED_TYPE_TOKENS, # type: ignore 202 TokenType.TABLE, 203 } 204 205 ID_VAR_TOKENS = { 206 *parser.Parser.ID_VAR_TOKENS, # type: ignore 207 TokenType.VALUES, 208 } 209 210 PROPERTY_PARSERS = { 211 **parser.Parser.PROPERTY_PARSERS, # type: ignore 212 "NOT DETERMINISTIC": lambda self: self.expression( 213 exp.VolatilityProperty, this=exp.Literal.string("VOLATILE") 214 ), 215 } 216 217 LOG_BASE_FIRST = False 218 LOG_DEFAULTS_TO_LN = True
Parser consumes a list of tokens produced by the sqlglot.tokens.Tokenizer
and produces
a parsed syntax tree.
Arguments:
- error_level: the desired error level. Default: ErrorLevel.RAISE
- error_message_context: determines the amount of context to capture from a query string when displaying the error message (in number of characters). Default: 50.
- index_offset: Index offset for arrays eg ARRAY[0] vs ARRAY[1] as the head of a list. Default: 0
- alias_post_tablesample: If the table alias comes after tablesample. Default: False
- max_errors: Maximum number of error messages to include in a raised ParseError. This is only relevant if error_level is ErrorLevel.RAISE. Default: 3
- null_ordering: Indicates the default null ordering method to use if not explicitly set. Options are "nulls_are_small", "nulls_are_large", "nulls_are_last". Default: "nulls_are_small"
Inherited Members
220 class Generator(generator.Generator): 221 TRANSFORMS = { 222 **generator.Generator.TRANSFORMS, # type: ignore 223 **transforms.REMOVE_PRECISION_PARAMETERIZED_TYPES, # type: ignore 224 exp.ArraySize: rename_func("ARRAY_LENGTH"), 225 exp.AtTimeZone: lambda self, e: self.func( 226 "TIMESTAMP", self.func("DATETIME", e.this, e.args.get("zone")) 227 ), 228 exp.DateAdd: _date_add_sql("DATE", "ADD"), 229 exp.DateSub: _date_add_sql("DATE", "SUB"), 230 exp.DatetimeAdd: _date_add_sql("DATETIME", "ADD"), 231 exp.DatetimeSub: _date_add_sql("DATETIME", "SUB"), 232 exp.DateDiff: lambda self, e: f"DATE_DIFF({self.sql(e, 'this')}, {self.sql(e, 'expression')}, {self.sql(e.args.get('unit', 'DAY'))})", 233 exp.DateStrToDate: datestrtodate_sql, 234 exp.DateTrunc: lambda self, e: self.func("DATE_TRUNC", e.this, e.text("unit")), 235 exp.GroupConcat: rename_func("STRING_AGG"), 236 exp.ILike: no_ilike_sql, 237 exp.IntDiv: rename_func("DIV"), 238 exp.Max: max_or_greatest, 239 exp.Min: min_or_least, 240 exp.Select: transforms.preprocess( 241 [_unqualify_unnest], transforms.delegate("select_sql") 242 ), 243 exp.StrToTime: lambda self, e: f"PARSE_TIMESTAMP({self.format_time(e)}, {self.sql(e, 'this')})", 244 exp.TimeAdd: _date_add_sql("TIME", "ADD"), 245 exp.TimeSub: _date_add_sql("TIME", "SUB"), 246 exp.TimestampAdd: _date_add_sql("TIMESTAMP", "ADD"), 247 exp.TimestampSub: _date_add_sql("TIMESTAMP", "SUB"), 248 exp.TimeStrToTime: timestrtotime_sql, 249 exp.TsOrDsToDate: ts_or_ds_to_date_sql("bigquery"), 250 exp.TsOrDsAdd: _date_add_sql("DATE", "ADD"), 251 exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}", 252 exp.VariancePop: rename_func("VAR_POP"), 253 exp.Values: _derived_table_values_to_unnest, 254 exp.ReturnsProperty: _returnsproperty_sql, 255 exp.Create: _create_sql, 256 exp.Trim: lambda self, e: self.func(f"TRIM", e.this, e.expression), 257 exp.VolatilityProperty: lambda self, e: f"DETERMINISTIC" 258 if e.name == "IMMUTABLE" 259 else "NOT DETERMINISTIC", 260 exp.RegexpLike: rename_func("REGEXP_CONTAINS"), 261 } 262 263 TYPE_MAPPING = { 264 **generator.Generator.TYPE_MAPPING, # type: ignore 265 exp.DataType.Type.BIGINT: "INT64", 266 exp.DataType.Type.BOOLEAN: "BOOL", 267 exp.DataType.Type.CHAR: "STRING", 268 exp.DataType.Type.DECIMAL: "NUMERIC", 269 exp.DataType.Type.DOUBLE: "FLOAT64", 270 exp.DataType.Type.FLOAT: "FLOAT64", 271 exp.DataType.Type.INT: "INT64", 272 exp.DataType.Type.NCHAR: "STRING", 273 exp.DataType.Type.NVARCHAR: "STRING", 274 exp.DataType.Type.SMALLINT: "INT64", 275 exp.DataType.Type.TEXT: "STRING", 276 exp.DataType.Type.TINYINT: "INT64", 277 exp.DataType.Type.VARCHAR: "STRING", 278 } 279 PROPERTIES_LOCATION = { 280 **generator.Generator.PROPERTIES_LOCATION, # type: ignore 281 exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA, 282 } 283 284 EXPLICIT_UNION = True 285 LIMIT_FETCH = "LIMIT" 286 287 def array_sql(self, expression: exp.Array) -> str: 288 first_arg = seq_get(expression.expressions, 0) 289 if isinstance(first_arg, exp.Subqueryable): 290 return f"ARRAY{self.wrap(self.sql(first_arg))}" 291 292 return inline_array_sql(self, expression) 293 294 def transaction_sql(self, *_) -> str: 295 return "BEGIN TRANSACTION" 296 297 def commit_sql(self, *_) -> str: 298 return "COMMIT TRANSACTION" 299 300 def rollback_sql(self, *_) -> str: 301 return "ROLLBACK TRANSACTION" 302 303 def in_unnest_op(self, expression: exp.Unnest) -> str: 304 return self.sql(expression) 305 306 def except_op(self, expression: exp.Except) -> str: 307 if not expression.args.get("distinct", False): 308 self.unsupported("EXCEPT without DISTINCT is not supported in BigQuery") 309 return f"EXCEPT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}" 310 311 def intersect_op(self, expression: exp.Intersect) -> str: 312 if not expression.args.get("distinct", False): 313 self.unsupported("INTERSECT without DISTINCT is not supported in BigQuery") 314 return f"INTERSECT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}"
Generator interprets the given syntax tree and produces a SQL string as an output.
Arguments:
- time_mapping (dict): the dictionary of custom time mappings in which the key represents a python time format and the output the target time format
- time_trie (trie): a trie of the time_mapping keys
- pretty (bool): if set to True the returned string will be formatted. Default: False.
- quote_start (str): specifies which starting character to use to delimit quotes. Default: '.
- quote_end (str): specifies which ending character to use to delimit quotes. Default: '.
- identifier_start (str): specifies which starting character to use to delimit identifiers. Default: ".
- identifier_end (str): specifies which ending character to use to delimit identifiers. Default: ".
- identify (bool | str): 'always': always quote, 'safe': quote identifiers if they don't contain an upcase, True defaults to always.
- normalize (bool): if set to True all identifiers will lower cased
- string_escape (str): specifies a string escape character. Default: '.
- identifier_escape (str): specifies an identifier escape character. Default: ".
- pad (int): determines padding in a formatted string. Default: 2.
- indent (int): determines the size of indentation in a formatted string. Default: 4.
- unnest_column_only (bool): if true unnest table aliases are considered only as column aliases
- normalize_functions (str): normalize function names, "upper", "lower", or None Default: "upper"
- alias_post_tablesample (bool): if the table alias comes after tablesample Default: False
- unsupported_level (ErrorLevel): determines the generator's behavior when it encounters unsupported expressions. Default ErrorLevel.WARN.
- null_ordering (str): Indicates the default null ordering method to use if not explicitly set. Options are "nulls_are_small", "nulls_are_large", "nulls_are_last". Default: "nulls_are_small"
- max_unsupported (int): Maximum number of unsupported messages to include in a raised UnsupportedError. This is only relevant if unsupported_level is ErrorLevel.RAISE. Default: 3
- leading_comma (bool): if the the comma is leading or trailing in select statements Default: False
- max_text_width: The max number of characters in a segment before creating new lines in pretty mode. The default is on the smaller end because the length only represents a segment and not the true line length. Default: 80
- comments: Whether or not to preserve comments in the output SQL code. Default: True
Inherited Members
- sqlglot.generator.Generator
- Generator
- generate
- unsupported
- sep
- seg
- pad_comment
- maybe_comment
- wrap
- no_identify
- normalize_func
- indent
- sql
- uncache_sql
- cache_sql
- characterset_sql
- column_sql
- columnposition_sql
- columndef_sql
- columnconstraint_sql
- autoincrementcolumnconstraint_sql
- compresscolumnconstraint_sql
- generatedasidentitycolumnconstraint_sql
- notnullcolumnconstraint_sql
- primarykeycolumnconstraint_sql
- uniquecolumnconstraint_sql
- create_sql
- describe_sql
- prepend_ctes
- with_sql
- cte_sql
- tablealias_sql
- bitstring_sql
- hexstring_sql
- datatype_sql
- directory_sql
- delete_sql
- drop_sql
- except_sql
- fetch_sql
- filter_sql
- hint_sql
- index_sql
- identifier_sql
- national_sql
- partition_sql
- properties_sql
- root_properties
- properties
- with_properties
- locate_properties
- property_sql
- likeproperty_sql
- fallbackproperty_sql
- journalproperty_sql
- freespaceproperty_sql
- afterjournalproperty_sql
- checksumproperty_sql
- mergeblockratioproperty_sql
- datablocksizeproperty_sql
- blockcompressionproperty_sql
- isolatedloadingproperty_sql
- lockingproperty_sql
- withdataproperty_sql
- insert_sql
- intersect_sql
- introducer_sql
- pseudotype_sql
- returning_sql
- rowformatdelimitedproperty_sql
- table_sql
- tablesample_sql
- pivot_sql
- tuple_sql
- update_sql
- values_sql
- var_sql
- into_sql
- from_sql
- group_sql
- having_sql
- join_sql
- lambda_sql
- lateral_sql
- limit_sql
- offset_sql
- setitem_sql
- set_sql
- pragma_sql
- lock_sql
- literal_sql
- loaddata_sql
- null_sql
- boolean_sql
- order_sql
- cluster_sql
- distribute_sql
- sort_sql
- ordered_sql
- matchrecognize_sql
- query_modifiers
- select_sql
- schema_sql
- star_sql
- structkwarg_sql
- parameter_sql
- sessionparameter_sql
- placeholder_sql
- subquery_sql
- qualify_sql
- union_sql
- union_op
- unnest_sql
- where_sql
- window_sql
- partition_by_sql
- window_spec_sql
- withingroup_sql
- between_sql
- bracket_sql
- all_sql
- any_sql
- exists_sql
- case_sql
- constraint_sql
- extract_sql
- trim_sql
- concat_sql
- check_sql
- foreignkey_sql
- primarykey_sql
- unique_sql
- if_sql
- matchagainst_sql
- jsonkeyvalue_sql
- jsonobject_sql
- in_sql
- interval_sql
- return_sql
- reference_sql
- anonymous_sql
- paren_sql
- neg_sql
- not_sql
- alias_sql
- aliases_sql
- attimezone_sql
- add_sql
- and_sql
- connector_sql
- bitwiseand_sql
- bitwiseleftshift_sql
- bitwisenot_sql
- bitwiseor_sql
- bitwiserightshift_sql
- bitwisexor_sql
- cast_sql
- currentdate_sql
- collate_sql
- command_sql
- comment_sql
- altercolumn_sql
- renametable_sql
- altertable_sql
- droppartition_sql
- addconstraint_sql
- distinct_sql
- ignorenulls_sql
- respectnulls_sql
- intdiv_sql
- dpipe_sql
- div_sql
- overlaps_sql
- distance_sql
- dot_sql
- eq_sql
- escape_sql
- glob_sql
- gt_sql
- gte_sql
- ilike_sql
- ilikeany_sql
- is_sql
- like_sql
- likeany_sql
- similarto_sql
- lt_sql
- lte_sql
- mod_sql
- mul_sql
- neq_sql
- nullsafeeq_sql
- nullsafeneq_sql
- or_sql
- slice_sql
- sub_sql
- trycast_sql
- use_sql
- binary
- function_fallback_sql
- func
- format_args
- text_width
- format_time
- expressions
- op_expressions
- naked_property
- set_operation
- tag_sql
- token_sql
- userdefinedfunction_sql
- joinhint_sql
- kwarg_sql
- when_sql
- merge_sql
- tochar_sql