Supports BigQuery Standard SQL.
1"""Supports BigQuery Standard SQL.""" 2 3from __future__ import annotations 4 5import re 6import typing as t 7 8from sqlglot import exp, generator, parser, tokens, transforms 9from sqlglot.dialects.dialect import ( 10 Dialect, 11 datestrtodate_sql, 12 inline_array_sql, 13 max_or_greatest, 14 min_or_least, 15 no_ilike_sql, 16 parse_date_delta_with_interval, 17 rename_func, 18 timestrtotime_sql, 19 ts_or_ds_to_date_sql, 20) 21from sqlglot.helper import seq_get 22from sqlglot.tokens import TokenType 23 24E = t.TypeVar("E", bound=exp.Expression) 25 26 27def _date_add_sql( 28 data_type: str, kind: str 29) -> t.Callable[[generator.Generator, exp.Expression], str]: 30 def func(self, expression): 31 this = self.sql(expression, "this") 32 unit = expression.args.get("unit") 33 unit = exp.var(unit.name.upper() if unit else "DAY") 34 interval = exp.Interval(this=expression.expression, unit=unit) 35 return f"{data_type}_{kind}({this}, {self.sql(interval)})" 36 37 return func 38 39 40def _derived_table_values_to_unnest(self: generator.Generator, expression: exp.Values) -> str: 41 if not isinstance(expression.unnest().parent, exp.From): 42 expression = t.cast(exp.Values, transforms.remove_precision_parameterized_types(expression)) 43 return self.values_sql(expression) 44 rows = [tuple_exp.expressions for tuple_exp in expression.find_all(exp.Tuple)] 45 structs = [] 46 for row in rows: 47 aliases = [ 48 exp.alias_(value, column_name) 49 for value, column_name in zip(row, expression.args["alias"].args["columns"]) 50 ] 51 structs.append(exp.Struct(expressions=aliases)) 52 unnest_exp = exp.Unnest(expressions=[exp.Array(expressions=structs)]) 53 return self.unnest_sql(unnest_exp) 54 55 56def _returnsproperty_sql(self: generator.Generator, expression: exp.ReturnsProperty) -> str: 57 this = expression.this 58 if isinstance(this, exp.Schema): 59 this = f"{this.this} <{self.expressions(this)}>" 60 else: 61 this = self.sql(this) 62 return f"RETURNS {this}" 63 64 65def _create_sql(self: generator.Generator, expression: exp.Create) -> str: 66 kind = expression.args["kind"] 67 returns = expression.find(exp.ReturnsProperty) 68 if kind.upper() == "FUNCTION" and returns and returns.args.get("is_table"): 69 expression = expression.copy() 70 expression.set("kind", "TABLE FUNCTION") 71 if isinstance( 72 expression.expression, 73 ( 74 exp.Subquery, 75 exp.Literal, 76 ), 77 ): 78 expression.set("expression", expression.expression.this) 79 80 return self.create_sql(expression) 81 82 return self.create_sql(expression) 83 84 85def _unqualify_unnest(expression: exp.Expression) -> exp.Expression: 86 """Remove references to unnest table aliases since bigquery doesn't allow them. 87 88 These are added by the optimizer's qualify_column step. 89 """ 90 if isinstance(expression, exp.Select): 91 unnests = { 92 unnest.alias 93 for unnest in expression.args.get("from", exp.From(expressions=[])).expressions 94 if isinstance(unnest, exp.Unnest) and unnest.alias 95 } 96 97 if unnests: 98 expression = expression.copy() 99 100 for select in expression.expressions: 101 for column in select.find_all(exp.Column): 102 if column.table in unnests: 103 column.set("table", None) 104 105 return expression 106 107 108class BigQuery(Dialect): 109 unnest_column_only = True 110 time_mapping = { 111 "%M": "%-M", 112 "%d": "%-d", 113 "%m": "%-m", 114 "%y": "%-y", 115 "%H": "%-H", 116 "%I": "%-I", 117 "%S": "%-S", 118 "%j": "%-j", 119 } 120 121 class Tokenizer(tokens.Tokenizer): 122 QUOTES = [ 123 (prefix + quote, quote) if prefix else quote 124 for quote in ["'", '"', '"""', "'''"] 125 for prefix in ["", "r", "R"] 126 ] 127 COMMENTS = ["--", "#", ("/*", "*/")] 128 IDENTIFIERS = ["`"] 129 STRING_ESCAPES = ["\\"] 130 HEX_STRINGS = [("0x", ""), ("0X", "")] 131 132 KEYWORDS = { 133 **tokens.Tokenizer.KEYWORDS, 134 "ANY TYPE": TokenType.VARIANT, 135 "BEGIN": TokenType.COMMAND, 136 "BEGIN TRANSACTION": TokenType.BEGIN, 137 "CURRENT_DATETIME": TokenType.CURRENT_DATETIME, 138 "DECLARE": TokenType.COMMAND, 139 "GEOGRAPHY": TokenType.GEOGRAPHY, 140 "FLOAT64": TokenType.DOUBLE, 141 "INT64": TokenType.BIGINT, 142 "NOT DETERMINISTIC": TokenType.VOLATILE, 143 "UNKNOWN": TokenType.NULL, 144 } 145 KEYWORDS.pop("DIV") 146 147 class Parser(parser.Parser): 148 PREFIXED_PIVOT_COLUMNS = True 149 150 LOG_BASE_FIRST = False 151 LOG_DEFAULTS_TO_LN = True 152 153 FUNCTIONS = { 154 **parser.Parser.FUNCTIONS, # type: ignore 155 "DATE_TRUNC": lambda args: exp.DateTrunc( 156 unit=exp.Literal.string(seq_get(args, 1).name), # type: ignore 157 this=seq_get(args, 0), 158 ), 159 "DATE_ADD": parse_date_delta_with_interval(exp.DateAdd), 160 "DATETIME_ADD": parse_date_delta_with_interval(exp.DatetimeAdd), 161 "DIV": lambda args: exp.IntDiv(this=seq_get(args, 0), expression=seq_get(args, 1)), 162 "REGEXP_CONTAINS": exp.RegexpLike.from_arg_list, 163 "REGEXP_EXTRACT": lambda args: exp.RegexpExtract( 164 this=seq_get(args, 0), 165 expression=seq_get(args, 1), 166 position=seq_get(args, 2), 167 occurrence=seq_get(args, 3), 168 group=exp.Literal.number(1) 169 if re.compile(str(seq_get(args, 1))).groups == 1 170 else None, 171 ), 172 "TIME_ADD": parse_date_delta_with_interval(exp.TimeAdd), 173 "TIMESTAMP_ADD": parse_date_delta_with_interval(exp.TimestampAdd), 174 "DATE_SUB": parse_date_delta_with_interval(exp.DateSub), 175 "DATETIME_SUB": parse_date_delta_with_interval(exp.DatetimeSub), 176 "TIME_SUB": parse_date_delta_with_interval(exp.TimeSub), 177 "TIMESTAMP_SUB": parse_date_delta_with_interval(exp.TimestampSub), 178 "PARSE_TIMESTAMP": lambda args: exp.StrToTime( 179 this=seq_get(args, 1), format=seq_get(args, 0) 180 ), 181 } 182 183 FUNCTION_PARSERS = { 184 **parser.Parser.FUNCTION_PARSERS, # type: ignore 185 "ARRAY": lambda self: self.expression(exp.Array, expressions=[self._parse_statement()]), 186 } 187 FUNCTION_PARSERS.pop("TRIM") 188 189 NO_PAREN_FUNCTIONS = { 190 **parser.Parser.NO_PAREN_FUNCTIONS, # type: ignore 191 TokenType.CURRENT_DATETIME: exp.CurrentDatetime, 192 } 193 194 NESTED_TYPE_TOKENS = { 195 *parser.Parser.NESTED_TYPE_TOKENS, # type: ignore 196 TokenType.TABLE, 197 } 198 199 ID_VAR_TOKENS = { 200 *parser.Parser.ID_VAR_TOKENS, # type: ignore 201 TokenType.VALUES, 202 } 203 204 PROPERTY_PARSERS = { 205 **parser.Parser.PROPERTY_PARSERS, # type: ignore 206 "NOT DETERMINISTIC": lambda self: self.expression( 207 exp.StabilityProperty, this=exp.Literal.string("VOLATILE") 208 ), 209 } 210 211 class Generator(generator.Generator): 212 EXPLICIT_UNION = True 213 INTERVAL_ALLOWS_PLURAL_FORM = False 214 JOIN_HINTS = False 215 TABLE_HINTS = False 216 LIMIT_FETCH = "LIMIT" 217 218 TRANSFORMS = { 219 **generator.Generator.TRANSFORMS, # type: ignore 220 **transforms.REMOVE_PRECISION_PARAMETERIZED_TYPES, # type: ignore 221 exp.ArraySize: rename_func("ARRAY_LENGTH"), 222 exp.AtTimeZone: lambda self, e: self.func( 223 "TIMESTAMP", self.func("DATETIME", e.this, e.args.get("zone")) 224 ), 225 exp.DateAdd: _date_add_sql("DATE", "ADD"), 226 exp.DateSub: _date_add_sql("DATE", "SUB"), 227 exp.DatetimeAdd: _date_add_sql("DATETIME", "ADD"), 228 exp.DatetimeSub: _date_add_sql("DATETIME", "SUB"), 229 exp.DateDiff: lambda self, e: f"DATE_DIFF({self.sql(e, 'this')}, {self.sql(e, 'expression')}, {self.sql(e.args.get('unit', 'DAY'))})", 230 exp.DateStrToDate: datestrtodate_sql, 231 exp.DateTrunc: lambda self, e: self.func("DATE_TRUNC", e.this, e.text("unit")), 232 exp.GroupConcat: rename_func("STRING_AGG"), 233 exp.ILike: no_ilike_sql, 234 exp.IntDiv: rename_func("DIV"), 235 exp.Max: max_or_greatest, 236 exp.Min: min_or_least, 237 exp.Select: transforms.preprocess([_unqualify_unnest]), 238 exp.StrToTime: lambda self, e: f"PARSE_TIMESTAMP({self.format_time(e)}, {self.sql(e, 'this')})", 239 exp.TimeAdd: _date_add_sql("TIME", "ADD"), 240 exp.TimeSub: _date_add_sql("TIME", "SUB"), 241 exp.TimestampAdd: _date_add_sql("TIMESTAMP", "ADD"), 242 exp.TimestampSub: _date_add_sql("TIMESTAMP", "SUB"), 243 exp.TimeStrToTime: timestrtotime_sql, 244 exp.TsOrDsToDate: ts_or_ds_to_date_sql("bigquery"), 245 exp.TsOrDsAdd: _date_add_sql("DATE", "ADD"), 246 exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}", 247 exp.VariancePop: rename_func("VAR_POP"), 248 exp.Values: _derived_table_values_to_unnest, 249 exp.ReturnsProperty: _returnsproperty_sql, 250 exp.Create: _create_sql, 251 exp.Trim: lambda self, e: self.func(f"TRIM", e.this, e.expression), 252 exp.StabilityProperty: lambda self, e: f"DETERMINISTIC" 253 if e.name == "IMMUTABLE" 254 else "NOT DETERMINISTIC", 255 exp.RegexpLike: rename_func("REGEXP_CONTAINS"), 256 } 257 258 TYPE_MAPPING = { 259 **generator.Generator.TYPE_MAPPING, # type: ignore 260 exp.DataType.Type.BIGDECIMAL: "BIGNUMERIC", 261 exp.DataType.Type.BIGINT: "INT64", 262 exp.DataType.Type.BOOLEAN: "BOOL", 263 exp.DataType.Type.CHAR: "STRING", 264 exp.DataType.Type.DECIMAL: "NUMERIC", 265 exp.DataType.Type.DOUBLE: "FLOAT64", 266 exp.DataType.Type.FLOAT: "FLOAT64", 267 exp.DataType.Type.INT: "INT64", 268 exp.DataType.Type.NCHAR: "STRING", 269 exp.DataType.Type.NVARCHAR: "STRING", 270 exp.DataType.Type.SMALLINT: "INT64", 271 exp.DataType.Type.TEXT: "STRING", 272 exp.DataType.Type.TIMESTAMP: "DATETIME", 273 exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP", 274 exp.DataType.Type.TINYINT: "INT64", 275 exp.DataType.Type.VARCHAR: "STRING", 276 exp.DataType.Type.VARIANT: "ANY TYPE", 277 } 278 279 PROPERTIES_LOCATION = { 280 **generator.Generator.PROPERTIES_LOCATION, # type: ignore 281 exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA, 282 exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED, 283 } 284 285 def array_sql(self, expression: exp.Array) -> str: 286 first_arg = seq_get(expression.expressions, 0) 287 if isinstance(first_arg, exp.Subqueryable): 288 return f"ARRAY{self.wrap(self.sql(first_arg))}" 289 290 return inline_array_sql(self, expression) 291 292 def transaction_sql(self, *_) -> str: 293 return "BEGIN TRANSACTION" 294 295 def commit_sql(self, *_) -> str: 296 return "COMMIT TRANSACTION" 297 298 def rollback_sql(self, *_) -> str: 299 return "ROLLBACK TRANSACTION" 300 301 def in_unnest_op(self, expression: exp.Unnest) -> str: 302 return self.sql(expression) 303 304 def except_op(self, expression: exp.Except) -> str: 305 if not expression.args.get("distinct", False): 306 self.unsupported("EXCEPT without DISTINCT is not supported in BigQuery") 307 return f"EXCEPT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}" 308 309 def intersect_op(self, expression: exp.Intersect) -> str: 310 if not expression.args.get("distinct", False): 311 self.unsupported("INTERSECT without DISTINCT is not supported in BigQuery") 312 return f"INTERSECT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}"
109class BigQuery(Dialect): 110 unnest_column_only = True 111 time_mapping = { 112 "%M": "%-M", 113 "%d": "%-d", 114 "%m": "%-m", 115 "%y": "%-y", 116 "%H": "%-H", 117 "%I": "%-I", 118 "%S": "%-S", 119 "%j": "%-j", 120 } 121 122 class Tokenizer(tokens.Tokenizer): 123 QUOTES = [ 124 (prefix + quote, quote) if prefix else quote 125 for quote in ["'", '"', '"""', "'''"] 126 for prefix in ["", "r", "R"] 127 ] 128 COMMENTS = ["--", "#", ("/*", "*/")] 129 IDENTIFIERS = ["`"] 130 STRING_ESCAPES = ["\\"] 131 HEX_STRINGS = [("0x", ""), ("0X", "")] 132 133 KEYWORDS = { 134 **tokens.Tokenizer.KEYWORDS, 135 "ANY TYPE": TokenType.VARIANT, 136 "BEGIN": TokenType.COMMAND, 137 "BEGIN TRANSACTION": TokenType.BEGIN, 138 "CURRENT_DATETIME": TokenType.CURRENT_DATETIME, 139 "DECLARE": TokenType.COMMAND, 140 "GEOGRAPHY": TokenType.GEOGRAPHY, 141 "FLOAT64": TokenType.DOUBLE, 142 "INT64": TokenType.BIGINT, 143 "NOT DETERMINISTIC": TokenType.VOLATILE, 144 "UNKNOWN": TokenType.NULL, 145 } 146 KEYWORDS.pop("DIV") 147 148 class Parser(parser.Parser): 149 PREFIXED_PIVOT_COLUMNS = True 150 151 LOG_BASE_FIRST = False 152 LOG_DEFAULTS_TO_LN = True 153 154 FUNCTIONS = { 155 **parser.Parser.FUNCTIONS, # type: ignore 156 "DATE_TRUNC": lambda args: exp.DateTrunc( 157 unit=exp.Literal.string(seq_get(args, 1).name), # type: ignore 158 this=seq_get(args, 0), 159 ), 160 "DATE_ADD": parse_date_delta_with_interval(exp.DateAdd), 161 "DATETIME_ADD": parse_date_delta_with_interval(exp.DatetimeAdd), 162 "DIV": lambda args: exp.IntDiv(this=seq_get(args, 0), expression=seq_get(args, 1)), 163 "REGEXP_CONTAINS": exp.RegexpLike.from_arg_list, 164 "REGEXP_EXTRACT": lambda args: exp.RegexpExtract( 165 this=seq_get(args, 0), 166 expression=seq_get(args, 1), 167 position=seq_get(args, 2), 168 occurrence=seq_get(args, 3), 169 group=exp.Literal.number(1) 170 if re.compile(str(seq_get(args, 1))).groups == 1 171 else None, 172 ), 173 "TIME_ADD": parse_date_delta_with_interval(exp.TimeAdd), 174 "TIMESTAMP_ADD": parse_date_delta_with_interval(exp.TimestampAdd), 175 "DATE_SUB": parse_date_delta_with_interval(exp.DateSub), 176 "DATETIME_SUB": parse_date_delta_with_interval(exp.DatetimeSub), 177 "TIME_SUB": parse_date_delta_with_interval(exp.TimeSub), 178 "TIMESTAMP_SUB": parse_date_delta_with_interval(exp.TimestampSub), 179 "PARSE_TIMESTAMP": lambda args: exp.StrToTime( 180 this=seq_get(args, 1), format=seq_get(args, 0) 181 ), 182 } 183 184 FUNCTION_PARSERS = { 185 **parser.Parser.FUNCTION_PARSERS, # type: ignore 186 "ARRAY": lambda self: self.expression(exp.Array, expressions=[self._parse_statement()]), 187 } 188 FUNCTION_PARSERS.pop("TRIM") 189 190 NO_PAREN_FUNCTIONS = { 191 **parser.Parser.NO_PAREN_FUNCTIONS, # type: ignore 192 TokenType.CURRENT_DATETIME: exp.CurrentDatetime, 193 } 194 195 NESTED_TYPE_TOKENS = { 196 *parser.Parser.NESTED_TYPE_TOKENS, # type: ignore 197 TokenType.TABLE, 198 } 199 200 ID_VAR_TOKENS = { 201 *parser.Parser.ID_VAR_TOKENS, # type: ignore 202 TokenType.VALUES, 203 } 204 205 PROPERTY_PARSERS = { 206 **parser.Parser.PROPERTY_PARSERS, # type: ignore 207 "NOT DETERMINISTIC": lambda self: self.expression( 208 exp.StabilityProperty, this=exp.Literal.string("VOLATILE") 209 ), 210 } 211 212 class Generator(generator.Generator): 213 EXPLICIT_UNION = True 214 INTERVAL_ALLOWS_PLURAL_FORM = False 215 JOIN_HINTS = False 216 TABLE_HINTS = False 217 LIMIT_FETCH = "LIMIT" 218 219 TRANSFORMS = { 220 **generator.Generator.TRANSFORMS, # type: ignore 221 **transforms.REMOVE_PRECISION_PARAMETERIZED_TYPES, # type: ignore 222 exp.ArraySize: rename_func("ARRAY_LENGTH"), 223 exp.AtTimeZone: lambda self, e: self.func( 224 "TIMESTAMP", self.func("DATETIME", e.this, e.args.get("zone")) 225 ), 226 exp.DateAdd: _date_add_sql("DATE", "ADD"), 227 exp.DateSub: _date_add_sql("DATE", "SUB"), 228 exp.DatetimeAdd: _date_add_sql("DATETIME", "ADD"), 229 exp.DatetimeSub: _date_add_sql("DATETIME", "SUB"), 230 exp.DateDiff: lambda self, e: f"DATE_DIFF({self.sql(e, 'this')}, {self.sql(e, 'expression')}, {self.sql(e.args.get('unit', 'DAY'))})", 231 exp.DateStrToDate: datestrtodate_sql, 232 exp.DateTrunc: lambda self, e: self.func("DATE_TRUNC", e.this, e.text("unit")), 233 exp.GroupConcat: rename_func("STRING_AGG"), 234 exp.ILike: no_ilike_sql, 235 exp.IntDiv: rename_func("DIV"), 236 exp.Max: max_or_greatest, 237 exp.Min: min_or_least, 238 exp.Select: transforms.preprocess([_unqualify_unnest]), 239 exp.StrToTime: lambda self, e: f"PARSE_TIMESTAMP({self.format_time(e)}, {self.sql(e, 'this')})", 240 exp.TimeAdd: _date_add_sql("TIME", "ADD"), 241 exp.TimeSub: _date_add_sql("TIME", "SUB"), 242 exp.TimestampAdd: _date_add_sql("TIMESTAMP", "ADD"), 243 exp.TimestampSub: _date_add_sql("TIMESTAMP", "SUB"), 244 exp.TimeStrToTime: timestrtotime_sql, 245 exp.TsOrDsToDate: ts_or_ds_to_date_sql("bigquery"), 246 exp.TsOrDsAdd: _date_add_sql("DATE", "ADD"), 247 exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}", 248 exp.VariancePop: rename_func("VAR_POP"), 249 exp.Values: _derived_table_values_to_unnest, 250 exp.ReturnsProperty: _returnsproperty_sql, 251 exp.Create: _create_sql, 252 exp.Trim: lambda self, e: self.func(f"TRIM", e.this, e.expression), 253 exp.StabilityProperty: lambda self, e: f"DETERMINISTIC" 254 if e.name == "IMMUTABLE" 255 else "NOT DETERMINISTIC", 256 exp.RegexpLike: rename_func("REGEXP_CONTAINS"), 257 } 258 259 TYPE_MAPPING = { 260 **generator.Generator.TYPE_MAPPING, # type: ignore 261 exp.DataType.Type.BIGDECIMAL: "BIGNUMERIC", 262 exp.DataType.Type.BIGINT: "INT64", 263 exp.DataType.Type.BOOLEAN: "BOOL", 264 exp.DataType.Type.CHAR: "STRING", 265 exp.DataType.Type.DECIMAL: "NUMERIC", 266 exp.DataType.Type.DOUBLE: "FLOAT64", 267 exp.DataType.Type.FLOAT: "FLOAT64", 268 exp.DataType.Type.INT: "INT64", 269 exp.DataType.Type.NCHAR: "STRING", 270 exp.DataType.Type.NVARCHAR: "STRING", 271 exp.DataType.Type.SMALLINT: "INT64", 272 exp.DataType.Type.TEXT: "STRING", 273 exp.DataType.Type.TIMESTAMP: "DATETIME", 274 exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP", 275 exp.DataType.Type.TINYINT: "INT64", 276 exp.DataType.Type.VARCHAR: "STRING", 277 exp.DataType.Type.VARIANT: "ANY TYPE", 278 } 279 280 PROPERTIES_LOCATION = { 281 **generator.Generator.PROPERTIES_LOCATION, # type: ignore 282 exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA, 283 exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED, 284 } 285 286 def array_sql(self, expression: exp.Array) -> str: 287 first_arg = seq_get(expression.expressions, 0) 288 if isinstance(first_arg, exp.Subqueryable): 289 return f"ARRAY{self.wrap(self.sql(first_arg))}" 290 291 return inline_array_sql(self, expression) 292 293 def transaction_sql(self, *_) -> str: 294 return "BEGIN TRANSACTION" 295 296 def commit_sql(self, *_) -> str: 297 return "COMMIT TRANSACTION" 298 299 def rollback_sql(self, *_) -> str: 300 return "ROLLBACK TRANSACTION" 301 302 def in_unnest_op(self, expression: exp.Unnest) -> str: 303 return self.sql(expression) 304 305 def except_op(self, expression: exp.Except) -> str: 306 if not expression.args.get("distinct", False): 307 self.unsupported("EXCEPT without DISTINCT is not supported in BigQuery") 308 return f"EXCEPT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}" 309 310 def intersect_op(self, expression: exp.Intersect) -> str: 311 if not expression.args.get("distinct", False): 312 self.unsupported("INTERSECT without DISTINCT is not supported in BigQuery") 313 return f"INTERSECT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}"
122 class Tokenizer(tokens.Tokenizer): 123 QUOTES = [ 124 (prefix + quote, quote) if prefix else quote 125 for quote in ["'", '"', '"""', "'''"] 126 for prefix in ["", "r", "R"] 127 ] 128 COMMENTS = ["--", "#", ("/*", "*/")] 129 IDENTIFIERS = ["`"] 130 STRING_ESCAPES = ["\\"] 131 HEX_STRINGS = [("0x", ""), ("0X", "")] 132 133 KEYWORDS = { 134 **tokens.Tokenizer.KEYWORDS, 135 "ANY TYPE": TokenType.VARIANT, 136 "BEGIN": TokenType.COMMAND, 137 "BEGIN TRANSACTION": TokenType.BEGIN, 138 "CURRENT_DATETIME": TokenType.CURRENT_DATETIME, 139 "DECLARE": TokenType.COMMAND, 140 "GEOGRAPHY": TokenType.GEOGRAPHY, 141 "FLOAT64": TokenType.DOUBLE, 142 "INT64": TokenType.BIGINT, 143 "NOT DETERMINISTIC": TokenType.VOLATILE, 144 "UNKNOWN": TokenType.NULL, 145 } 146 KEYWORDS.pop("DIV")
Inherited Members
148 class Parser(parser.Parser): 149 PREFIXED_PIVOT_COLUMNS = True 150 151 LOG_BASE_FIRST = False 152 LOG_DEFAULTS_TO_LN = True 153 154 FUNCTIONS = { 155 **parser.Parser.FUNCTIONS, # type: ignore 156 "DATE_TRUNC": lambda args: exp.DateTrunc( 157 unit=exp.Literal.string(seq_get(args, 1).name), # type: ignore 158 this=seq_get(args, 0), 159 ), 160 "DATE_ADD": parse_date_delta_with_interval(exp.DateAdd), 161 "DATETIME_ADD": parse_date_delta_with_interval(exp.DatetimeAdd), 162 "DIV": lambda args: exp.IntDiv(this=seq_get(args, 0), expression=seq_get(args, 1)), 163 "REGEXP_CONTAINS": exp.RegexpLike.from_arg_list, 164 "REGEXP_EXTRACT": lambda args: exp.RegexpExtract( 165 this=seq_get(args, 0), 166 expression=seq_get(args, 1), 167 position=seq_get(args, 2), 168 occurrence=seq_get(args, 3), 169 group=exp.Literal.number(1) 170 if re.compile(str(seq_get(args, 1))).groups == 1 171 else None, 172 ), 173 "TIME_ADD": parse_date_delta_with_interval(exp.TimeAdd), 174 "TIMESTAMP_ADD": parse_date_delta_with_interval(exp.TimestampAdd), 175 "DATE_SUB": parse_date_delta_with_interval(exp.DateSub), 176 "DATETIME_SUB": parse_date_delta_with_interval(exp.DatetimeSub), 177 "TIME_SUB": parse_date_delta_with_interval(exp.TimeSub), 178 "TIMESTAMP_SUB": parse_date_delta_with_interval(exp.TimestampSub), 179 "PARSE_TIMESTAMP": lambda args: exp.StrToTime( 180 this=seq_get(args, 1), format=seq_get(args, 0) 181 ), 182 } 183 184 FUNCTION_PARSERS = { 185 **parser.Parser.FUNCTION_PARSERS, # type: ignore 186 "ARRAY": lambda self: self.expression(exp.Array, expressions=[self._parse_statement()]), 187 } 188 FUNCTION_PARSERS.pop("TRIM") 189 190 NO_PAREN_FUNCTIONS = { 191 **parser.Parser.NO_PAREN_FUNCTIONS, # type: ignore 192 TokenType.CURRENT_DATETIME: exp.CurrentDatetime, 193 } 194 195 NESTED_TYPE_TOKENS = { 196 *parser.Parser.NESTED_TYPE_TOKENS, # type: ignore 197 TokenType.TABLE, 198 } 199 200 ID_VAR_TOKENS = { 201 *parser.Parser.ID_VAR_TOKENS, # type: ignore 202 TokenType.VALUES, 203 } 204 205 PROPERTY_PARSERS = { 206 **parser.Parser.PROPERTY_PARSERS, # type: ignore 207 "NOT DETERMINISTIC": lambda self: self.expression( 208 exp.StabilityProperty, this=exp.Literal.string("VOLATILE") 209 ), 210 }
Parser consumes a list of tokens produced by the sqlglot.tokens.Tokenizer
and produces
a parsed syntax tree.
Arguments:
- error_level: the desired error level. Default: ErrorLevel.RAISE
- error_message_context: determines the amount of context to capture from a query string when displaying the error message (in number of characters). Default: 50.
- index_offset: Index offset for arrays eg ARRAY[0] vs ARRAY[1] as the head of a list. Default: 0
- alias_post_tablesample: If the table alias comes after tablesample. Default: False
- max_errors: Maximum number of error messages to include in a raised ParseError. This is only relevant if error_level is ErrorLevel.RAISE. Default: 3
- null_ordering: Indicates the default null ordering method to use if not explicitly set. Options are "nulls_are_small", "nulls_are_large", "nulls_are_last". Default: "nulls_are_small"
Inherited Members
212 class Generator(generator.Generator): 213 EXPLICIT_UNION = True 214 INTERVAL_ALLOWS_PLURAL_FORM = False 215 JOIN_HINTS = False 216 TABLE_HINTS = False 217 LIMIT_FETCH = "LIMIT" 218 219 TRANSFORMS = { 220 **generator.Generator.TRANSFORMS, # type: ignore 221 **transforms.REMOVE_PRECISION_PARAMETERIZED_TYPES, # type: ignore 222 exp.ArraySize: rename_func("ARRAY_LENGTH"), 223 exp.AtTimeZone: lambda self, e: self.func( 224 "TIMESTAMP", self.func("DATETIME", e.this, e.args.get("zone")) 225 ), 226 exp.DateAdd: _date_add_sql("DATE", "ADD"), 227 exp.DateSub: _date_add_sql("DATE", "SUB"), 228 exp.DatetimeAdd: _date_add_sql("DATETIME", "ADD"), 229 exp.DatetimeSub: _date_add_sql("DATETIME", "SUB"), 230 exp.DateDiff: lambda self, e: f"DATE_DIFF({self.sql(e, 'this')}, {self.sql(e, 'expression')}, {self.sql(e.args.get('unit', 'DAY'))})", 231 exp.DateStrToDate: datestrtodate_sql, 232 exp.DateTrunc: lambda self, e: self.func("DATE_TRUNC", e.this, e.text("unit")), 233 exp.GroupConcat: rename_func("STRING_AGG"), 234 exp.ILike: no_ilike_sql, 235 exp.IntDiv: rename_func("DIV"), 236 exp.Max: max_or_greatest, 237 exp.Min: min_or_least, 238 exp.Select: transforms.preprocess([_unqualify_unnest]), 239 exp.StrToTime: lambda self, e: f"PARSE_TIMESTAMP({self.format_time(e)}, {self.sql(e, 'this')})", 240 exp.TimeAdd: _date_add_sql("TIME", "ADD"), 241 exp.TimeSub: _date_add_sql("TIME", "SUB"), 242 exp.TimestampAdd: _date_add_sql("TIMESTAMP", "ADD"), 243 exp.TimestampSub: _date_add_sql("TIMESTAMP", "SUB"), 244 exp.TimeStrToTime: timestrtotime_sql, 245 exp.TsOrDsToDate: ts_or_ds_to_date_sql("bigquery"), 246 exp.TsOrDsAdd: _date_add_sql("DATE", "ADD"), 247 exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}", 248 exp.VariancePop: rename_func("VAR_POP"), 249 exp.Values: _derived_table_values_to_unnest, 250 exp.ReturnsProperty: _returnsproperty_sql, 251 exp.Create: _create_sql, 252 exp.Trim: lambda self, e: self.func(f"TRIM", e.this, e.expression), 253 exp.StabilityProperty: lambda self, e: f"DETERMINISTIC" 254 if e.name == "IMMUTABLE" 255 else "NOT DETERMINISTIC", 256 exp.RegexpLike: rename_func("REGEXP_CONTAINS"), 257 } 258 259 TYPE_MAPPING = { 260 **generator.Generator.TYPE_MAPPING, # type: ignore 261 exp.DataType.Type.BIGDECIMAL: "BIGNUMERIC", 262 exp.DataType.Type.BIGINT: "INT64", 263 exp.DataType.Type.BOOLEAN: "BOOL", 264 exp.DataType.Type.CHAR: "STRING", 265 exp.DataType.Type.DECIMAL: "NUMERIC", 266 exp.DataType.Type.DOUBLE: "FLOAT64", 267 exp.DataType.Type.FLOAT: "FLOAT64", 268 exp.DataType.Type.INT: "INT64", 269 exp.DataType.Type.NCHAR: "STRING", 270 exp.DataType.Type.NVARCHAR: "STRING", 271 exp.DataType.Type.SMALLINT: "INT64", 272 exp.DataType.Type.TEXT: "STRING", 273 exp.DataType.Type.TIMESTAMP: "DATETIME", 274 exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP", 275 exp.DataType.Type.TINYINT: "INT64", 276 exp.DataType.Type.VARCHAR: "STRING", 277 exp.DataType.Type.VARIANT: "ANY TYPE", 278 } 279 280 PROPERTIES_LOCATION = { 281 **generator.Generator.PROPERTIES_LOCATION, # type: ignore 282 exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA, 283 exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED, 284 } 285 286 def array_sql(self, expression: exp.Array) -> str: 287 first_arg = seq_get(expression.expressions, 0) 288 if isinstance(first_arg, exp.Subqueryable): 289 return f"ARRAY{self.wrap(self.sql(first_arg))}" 290 291 return inline_array_sql(self, expression) 292 293 def transaction_sql(self, *_) -> str: 294 return "BEGIN TRANSACTION" 295 296 def commit_sql(self, *_) -> str: 297 return "COMMIT TRANSACTION" 298 299 def rollback_sql(self, *_) -> str: 300 return "ROLLBACK TRANSACTION" 301 302 def in_unnest_op(self, expression: exp.Unnest) -> str: 303 return self.sql(expression) 304 305 def except_op(self, expression: exp.Except) -> str: 306 if not expression.args.get("distinct", False): 307 self.unsupported("EXCEPT without DISTINCT is not supported in BigQuery") 308 return f"EXCEPT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}" 309 310 def intersect_op(self, expression: exp.Intersect) -> str: 311 if not expression.args.get("distinct", False): 312 self.unsupported("INTERSECT without DISTINCT is not supported in BigQuery") 313 return f"INTERSECT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}"
Generator interprets the given syntax tree and produces a SQL string as an output.
Arguments:
- time_mapping (dict): the dictionary of custom time mappings in which the key represents a python time format and the output the target time format
- time_trie (trie): a trie of the time_mapping keys
- pretty (bool): if set to True the returned string will be formatted. Default: False.
- quote_start (str): specifies which starting character to use to delimit quotes. Default: '.
- quote_end (str): specifies which ending character to use to delimit quotes. Default: '.
- identifier_start (str): specifies which starting character to use to delimit identifiers. Default: ".
- identifier_end (str): specifies which ending character to use to delimit identifiers. Default: ".
- identify (bool | str): 'always': always quote, 'safe': quote identifiers if they don't contain an upcase, True defaults to always.
- normalize (bool): if set to True all identifiers will lower cased
- string_escape (str): specifies a string escape character. Default: '.
- identifier_escape (str): specifies an identifier escape character. Default: ".
- pad (int): determines padding in a formatted string. Default: 2.
- indent (int): determines the size of indentation in a formatted string. Default: 4.
- unnest_column_only (bool): if true unnest table aliases are considered only as column aliases
- normalize_functions (str): normalize function names, "upper", "lower", or None Default: "upper"
- alias_post_tablesample (bool): if the table alias comes after tablesample Default: False
- unsupported_level (ErrorLevel): determines the generator's behavior when it encounters unsupported expressions. Default ErrorLevel.WARN.
- null_ordering (str): Indicates the default null ordering method to use if not explicitly set. Options are "nulls_are_small", "nulls_are_large", "nulls_are_last". Default: "nulls_are_small"
- max_unsupported (int): Maximum number of unsupported messages to include in a raised UnsupportedError. This is only relevant if unsupported_level is ErrorLevel.RAISE. Default: 3
- leading_comma (bool): if the the comma is leading or trailing in select statements Default: False
- max_text_width: The max number of characters in a segment before creating new lines in pretty mode. The default is on the smaller end because the length only represents a segment and not the true line length. Default: 80
- comments: Whether or not to preserve comments in the output SQL code. Default: True
Inherited Members
- sqlglot.generator.Generator
- Generator
- generate
- unsupported
- sep
- seg
- pad_comment
- maybe_comment
- wrap
- no_identify
- normalize_func
- indent
- sql
- uncache_sql
- cache_sql
- characterset_sql
- column_sql
- columnposition_sql
- columndef_sql
- columnconstraint_sql
- autoincrementcolumnconstraint_sql
- compresscolumnconstraint_sql
- generatedasidentitycolumnconstraint_sql
- notnullcolumnconstraint_sql
- primarykeycolumnconstraint_sql
- uniquecolumnconstraint_sql
- create_sql
- describe_sql
- prepend_ctes
- with_sql
- cte_sql
- tablealias_sql
- bitstring_sql
- hexstring_sql
- bytestring_sql
- datatype_sql
- directory_sql
- delete_sql
- drop_sql
- except_sql
- fetch_sql
- filter_sql
- hint_sql
- index_sql
- identifier_sql
- inputoutputformat_sql
- national_sql
- partition_sql
- properties_sql
- root_properties
- properties
- with_properties
- locate_properties
- property_sql
- likeproperty_sql
- fallbackproperty_sql
- journalproperty_sql
- freespaceproperty_sql
- afterjournalproperty_sql
- checksumproperty_sql
- mergeblockratioproperty_sql
- datablocksizeproperty_sql
- blockcompressionproperty_sql
- isolatedloadingproperty_sql
- lockingproperty_sql
- withdataproperty_sql
- insert_sql
- intersect_sql
- introducer_sql
- pseudotype_sql
- onconflict_sql
- returning_sql
- rowformatdelimitedproperty_sql
- table_sql
- tablesample_sql
- pivot_sql
- tuple_sql
- update_sql
- values_sql
- var_sql
- into_sql
- from_sql
- group_sql
- having_sql
- join_sql
- lambda_sql
- lateral_sql
- limit_sql
- offset_sql
- setitem_sql
- set_sql
- pragma_sql
- lock_sql
- literal_sql
- loaddata_sql
- null_sql
- boolean_sql
- order_sql
- cluster_sql
- distribute_sql
- sort_sql
- ordered_sql
- matchrecognize_sql
- query_modifiers
- select_sql
- schema_sql
- star_sql
- structkwarg_sql
- parameter_sql
- sessionparameter_sql
- placeholder_sql
- subquery_sql
- qualify_sql
- union_sql
- union_op
- unnest_sql
- where_sql
- window_sql
- partition_by_sql
- window_spec_sql
- withingroup_sql
- between_sql
- bracket_sql
- all_sql
- any_sql
- exists_sql
- case_sql
- constraint_sql
- extract_sql
- trim_sql
- concat_sql
- check_sql
- foreignkey_sql
- primarykey_sql
- unique_sql
- if_sql
- matchagainst_sql
- jsonkeyvalue_sql
- jsonobject_sql
- in_sql
- interval_sql
- return_sql
- reference_sql
- anonymous_sql
- paren_sql
- neg_sql
- not_sql
- alias_sql
- aliases_sql
- attimezone_sql
- add_sql
- and_sql
- connector_sql
- bitwiseand_sql
- bitwiseleftshift_sql
- bitwisenot_sql
- bitwiseor_sql
- bitwiserightshift_sql
- bitwisexor_sql
- cast_sql
- currentdate_sql
- collate_sql
- command_sql
- comment_sql
- altercolumn_sql
- renametable_sql
- altertable_sql
- droppartition_sql
- addconstraint_sql
- distinct_sql
- ignorenulls_sql
- respectnulls_sql
- intdiv_sql
- dpipe_sql
- div_sql
- overlaps_sql
- distance_sql
- dot_sql
- eq_sql
- escape_sql
- glob_sql
- gt_sql
- gte_sql
- ilike_sql
- ilikeany_sql
- is_sql
- like_sql
- likeany_sql
- similarto_sql
- lt_sql
- lte_sql
- mod_sql
- mul_sql
- neq_sql
- nullsafeeq_sql
- nullsafeneq_sql
- or_sql
- slice_sql
- sub_sql
- trycast_sql
- use_sql
- binary
- function_fallback_sql
- func
- format_args
- text_width
- format_time
- expressions
- op_expressions
- naked_property
- set_operation
- tag_sql
- token_sql
- userdefinedfunction_sql
- joinhint_sql
- kwarg_sql
- when_sql
- merge_sql
- tochar_sql