Edit on GitHub

sqlglot.dialects.teradata

  1from __future__ import annotations
  2
  3import typing as t
  4
  5from sqlglot import exp, generator, parser, tokens
  6from sqlglot.dialects.dialect import (
  7    Dialect,
  8    format_time_lambda,
  9    max_or_greatest,
 10    min_or_least,
 11)
 12from sqlglot.tokens import TokenType
 13
 14
 15class Teradata(Dialect):
 16    class Tokenizer(tokens.Tokenizer):
 17        # https://docs.teradata.com/r/Teradata-Database-SQL-Functions-Operators-Expressions-and-Predicates/March-2017/Comparison-Operators-and-Functions/Comparison-Operators/ANSI-Compliance
 18        KEYWORDS = {
 19            **tokens.Tokenizer.KEYWORDS,
 20            "BYTEINT": TokenType.SMALLINT,
 21            "SEL": TokenType.SELECT,
 22            "INS": TokenType.INSERT,
 23            "MOD": TokenType.MOD,
 24            "LT": TokenType.LT,
 25            "LE": TokenType.LTE,
 26            "GT": TokenType.GT,
 27            "GE": TokenType.GTE,
 28            "^=": TokenType.NEQ,
 29            "NE": TokenType.NEQ,
 30            "NOT=": TokenType.NEQ,
 31            "ST_GEOMETRY": TokenType.GEOMETRY,
 32        }
 33
 34        # teradata does not support % for modulus
 35        SINGLE_TOKENS = {**tokens.Tokenizer.SINGLE_TOKENS}
 36        SINGLE_TOKENS.pop("%")
 37
 38    class Parser(parser.Parser):
 39        CHARSET_TRANSLATORS = {
 40            "GRAPHIC_TO_KANJISJIS",
 41            "GRAPHIC_TO_LATIN",
 42            "GRAPHIC_TO_UNICODE",
 43            "GRAPHIC_TO_UNICODE_PadSpace",
 44            "KANJI1_KanjiEBCDIC_TO_UNICODE",
 45            "KANJI1_KanjiEUC_TO_UNICODE",
 46            "KANJI1_KANJISJIS_TO_UNICODE",
 47            "KANJI1_SBC_TO_UNICODE",
 48            "KANJISJIS_TO_GRAPHIC",
 49            "KANJISJIS_TO_LATIN",
 50            "KANJISJIS_TO_UNICODE",
 51            "LATIN_TO_GRAPHIC",
 52            "LATIN_TO_KANJISJIS",
 53            "LATIN_TO_UNICODE",
 54            "LOCALE_TO_UNICODE",
 55            "UNICODE_TO_GRAPHIC",
 56            "UNICODE_TO_GRAPHIC_PadGraphic",
 57            "UNICODE_TO_GRAPHIC_VarGraphic",
 58            "UNICODE_TO_KANJI1_KanjiEBCDIC",
 59            "UNICODE_TO_KANJI1_KanjiEUC",
 60            "UNICODE_TO_KANJI1_KANJISJIS",
 61            "UNICODE_TO_KANJI1_SBC",
 62            "UNICODE_TO_KANJISJIS",
 63            "UNICODE_TO_LATIN",
 64            "UNICODE_TO_LOCALE",
 65            "UNICODE_TO_UNICODE_FoldSpace",
 66            "UNICODE_TO_UNICODE_Fullwidth",
 67            "UNICODE_TO_UNICODE_Halfwidth",
 68            "UNICODE_TO_UNICODE_NFC",
 69            "UNICODE_TO_UNICODE_NFD",
 70            "UNICODE_TO_UNICODE_NFKC",
 71            "UNICODE_TO_UNICODE_NFKD",
 72        }
 73
 74        FUNC_TOKENS = {*parser.Parser.FUNC_TOKENS}
 75        FUNC_TOKENS.remove(TokenType.REPLACE)
 76
 77        STATEMENT_PARSERS = {
 78            **parser.Parser.STATEMENT_PARSERS,  # type: ignore
 79            TokenType.REPLACE: lambda self: self._parse_create(),
 80        }
 81
 82        FUNCTION_PARSERS = {
 83            **parser.Parser.FUNCTION_PARSERS,  # type: ignore
 84            "RANGE_N": lambda self: self._parse_rangen(),
 85            "TRANSLATE": lambda self: self._parse_translate(self.STRICT_CAST),
 86        }
 87
 88        def _parse_translate(self, strict: bool) -> exp.Expression:
 89            this = self._parse_conjunction()
 90
 91            if not self._match(TokenType.USING):
 92                self.raise_error("Expected USING in TRANSLATE")
 93
 94            if self._match_texts(self.CHARSET_TRANSLATORS):
 95                charset_split = self._prev.text.split("_TO_")
 96                to = self.expression(exp.CharacterSet, this=charset_split[1])
 97            else:
 98                self.raise_error("Expected a character set translator after USING in TRANSLATE")
 99
100            return self.expression(exp.Cast if strict else exp.TryCast, this=this, to=to)
101
102        # FROM before SET in Teradata UPDATE syntax
103        # https://docs.teradata.com/r/Enterprise_IntelliFlex_VMware/Teradata-VantageTM-SQL-Data-Manipulation-Language-17.20/Statement-Syntax/UPDATE/UPDATE-Syntax-Basic-Form-FROM-Clause
104        def _parse_update(self) -> exp.Expression:
105            return self.expression(
106                exp.Update,
107                **{  # type: ignore
108                    "this": self._parse_table(alias_tokens=self.UPDATE_ALIAS_TOKENS),
109                    "from": self._parse_from(),
110                    "expressions": self._match(TokenType.SET)
111                    and self._parse_csv(self._parse_equality),
112                    "where": self._parse_where(),
113                },
114            )
115
116        def _parse_rangen(self):
117            this = self._parse_id_var()
118            self._match(TokenType.BETWEEN)
119
120            expressions = self._parse_csv(self._parse_conjunction)
121            each = self._match_text_seq("EACH") and self._parse_conjunction()
122
123            return self.expression(exp.RangeN, this=this, expressions=expressions, each=each)
124
125        def _parse_cast(self, strict: bool) -> exp.Expression:
126            cast = t.cast(exp.Cast, super()._parse_cast(strict))
127            if cast.to.this == exp.DataType.Type.DATE and self._match(TokenType.FORMAT):
128                return format_time_lambda(exp.TimeToStr, "teradata")(
129                    [cast.this, self._parse_string()]
130                )
131            return cast
132
133    class Generator(generator.Generator):
134        JOIN_HINTS = False
135        TABLE_HINTS = False
136
137        TYPE_MAPPING = {
138            **generator.Generator.TYPE_MAPPING,  # type: ignore
139            exp.DataType.Type.GEOMETRY: "ST_GEOMETRY",
140        }
141
142        PROPERTIES_LOCATION = {
143            **generator.Generator.PROPERTIES_LOCATION,  # type: ignore
144            exp.PartitionedByProperty: exp.Properties.Location.POST_INDEX,
145        }
146
147        TRANSFORMS = {
148            **generator.Generator.TRANSFORMS,
149            exp.Max: max_or_greatest,
150            exp.Min: min_or_least,
151            exp.TimeToStr: lambda self, e: f"CAST({self.sql(e, 'this')} AS DATE FORMAT {self.format_time(e)})",
152            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
153        }
154
155        def partitionedbyproperty_sql(self, expression: exp.PartitionedByProperty) -> str:
156            return f"PARTITION BY {self.sql(expression, 'this')}"
157
158        # FROM before SET in Teradata UPDATE syntax
159        # https://docs.teradata.com/r/Enterprise_IntelliFlex_VMware/Teradata-VantageTM-SQL-Data-Manipulation-Language-17.20/Statement-Syntax/UPDATE/UPDATE-Syntax-Basic-Form-FROM-Clause
160        def update_sql(self, expression: exp.Update) -> str:
161            this = self.sql(expression, "this")
162            from_sql = self.sql(expression, "from")
163            set_sql = self.expressions(expression, flat=True)
164            where_sql = self.sql(expression, "where")
165            sql = f"UPDATE {this}{from_sql} SET {set_sql}{where_sql}"
166            return self.prepend_ctes(expression, sql)
167
168        def mod_sql(self, expression: exp.Mod) -> str:
169            return self.binary(expression, "MOD")
170
171        def datatype_sql(self, expression: exp.DataType) -> str:
172            type_sql = super().datatype_sql(expression)
173            prefix_sql = expression.args.get("prefix")
174            return f"SYSUDTLIB.{type_sql}" if prefix_sql else type_sql
175
176        def rangen_sql(self, expression: exp.RangeN) -> str:
177            this = self.sql(expression, "this")
178            expressions_sql = self.expressions(expression)
179            each_sql = self.sql(expression, "each")
180            each_sql = f" EACH {each_sql}" if each_sql else ""
181
182            return f"RANGE_N({this} BETWEEN {expressions_sql}{each_sql})"
class Teradata(sqlglot.dialects.dialect.Dialect):
 16class Teradata(Dialect):
 17    class Tokenizer(tokens.Tokenizer):
 18        # https://docs.teradata.com/r/Teradata-Database-SQL-Functions-Operators-Expressions-and-Predicates/March-2017/Comparison-Operators-and-Functions/Comparison-Operators/ANSI-Compliance
 19        KEYWORDS = {
 20            **tokens.Tokenizer.KEYWORDS,
 21            "BYTEINT": TokenType.SMALLINT,
 22            "SEL": TokenType.SELECT,
 23            "INS": TokenType.INSERT,
 24            "MOD": TokenType.MOD,
 25            "LT": TokenType.LT,
 26            "LE": TokenType.LTE,
 27            "GT": TokenType.GT,
 28            "GE": TokenType.GTE,
 29            "^=": TokenType.NEQ,
 30            "NE": TokenType.NEQ,
 31            "NOT=": TokenType.NEQ,
 32            "ST_GEOMETRY": TokenType.GEOMETRY,
 33        }
 34
 35        # teradata does not support % for modulus
 36        SINGLE_TOKENS = {**tokens.Tokenizer.SINGLE_TOKENS}
 37        SINGLE_TOKENS.pop("%")
 38
 39    class Parser(parser.Parser):
 40        CHARSET_TRANSLATORS = {
 41            "GRAPHIC_TO_KANJISJIS",
 42            "GRAPHIC_TO_LATIN",
 43            "GRAPHIC_TO_UNICODE",
 44            "GRAPHIC_TO_UNICODE_PadSpace",
 45            "KANJI1_KanjiEBCDIC_TO_UNICODE",
 46            "KANJI1_KanjiEUC_TO_UNICODE",
 47            "KANJI1_KANJISJIS_TO_UNICODE",
 48            "KANJI1_SBC_TO_UNICODE",
 49            "KANJISJIS_TO_GRAPHIC",
 50            "KANJISJIS_TO_LATIN",
 51            "KANJISJIS_TO_UNICODE",
 52            "LATIN_TO_GRAPHIC",
 53            "LATIN_TO_KANJISJIS",
 54            "LATIN_TO_UNICODE",
 55            "LOCALE_TO_UNICODE",
 56            "UNICODE_TO_GRAPHIC",
 57            "UNICODE_TO_GRAPHIC_PadGraphic",
 58            "UNICODE_TO_GRAPHIC_VarGraphic",
 59            "UNICODE_TO_KANJI1_KanjiEBCDIC",
 60            "UNICODE_TO_KANJI1_KanjiEUC",
 61            "UNICODE_TO_KANJI1_KANJISJIS",
 62            "UNICODE_TO_KANJI1_SBC",
 63            "UNICODE_TO_KANJISJIS",
 64            "UNICODE_TO_LATIN",
 65            "UNICODE_TO_LOCALE",
 66            "UNICODE_TO_UNICODE_FoldSpace",
 67            "UNICODE_TO_UNICODE_Fullwidth",
 68            "UNICODE_TO_UNICODE_Halfwidth",
 69            "UNICODE_TO_UNICODE_NFC",
 70            "UNICODE_TO_UNICODE_NFD",
 71            "UNICODE_TO_UNICODE_NFKC",
 72            "UNICODE_TO_UNICODE_NFKD",
 73        }
 74
 75        FUNC_TOKENS = {*parser.Parser.FUNC_TOKENS}
 76        FUNC_TOKENS.remove(TokenType.REPLACE)
 77
 78        STATEMENT_PARSERS = {
 79            **parser.Parser.STATEMENT_PARSERS,  # type: ignore
 80            TokenType.REPLACE: lambda self: self._parse_create(),
 81        }
 82
 83        FUNCTION_PARSERS = {
 84            **parser.Parser.FUNCTION_PARSERS,  # type: ignore
 85            "RANGE_N": lambda self: self._parse_rangen(),
 86            "TRANSLATE": lambda self: self._parse_translate(self.STRICT_CAST),
 87        }
 88
 89        def _parse_translate(self, strict: bool) -> exp.Expression:
 90            this = self._parse_conjunction()
 91
 92            if not self._match(TokenType.USING):
 93                self.raise_error("Expected USING in TRANSLATE")
 94
 95            if self._match_texts(self.CHARSET_TRANSLATORS):
 96                charset_split = self._prev.text.split("_TO_")
 97                to = self.expression(exp.CharacterSet, this=charset_split[1])
 98            else:
 99                self.raise_error("Expected a character set translator after USING in TRANSLATE")
100
101            return self.expression(exp.Cast if strict else exp.TryCast, this=this, to=to)
102
103        # FROM before SET in Teradata UPDATE syntax
104        # https://docs.teradata.com/r/Enterprise_IntelliFlex_VMware/Teradata-VantageTM-SQL-Data-Manipulation-Language-17.20/Statement-Syntax/UPDATE/UPDATE-Syntax-Basic-Form-FROM-Clause
105        def _parse_update(self) -> exp.Expression:
106            return self.expression(
107                exp.Update,
108                **{  # type: ignore
109                    "this": self._parse_table(alias_tokens=self.UPDATE_ALIAS_TOKENS),
110                    "from": self._parse_from(),
111                    "expressions": self._match(TokenType.SET)
112                    and self._parse_csv(self._parse_equality),
113                    "where": self._parse_where(),
114                },
115            )
116
117        def _parse_rangen(self):
118            this = self._parse_id_var()
119            self._match(TokenType.BETWEEN)
120
121            expressions = self._parse_csv(self._parse_conjunction)
122            each = self._match_text_seq("EACH") and self._parse_conjunction()
123
124            return self.expression(exp.RangeN, this=this, expressions=expressions, each=each)
125
126        def _parse_cast(self, strict: bool) -> exp.Expression:
127            cast = t.cast(exp.Cast, super()._parse_cast(strict))
128            if cast.to.this == exp.DataType.Type.DATE and self._match(TokenType.FORMAT):
129                return format_time_lambda(exp.TimeToStr, "teradata")(
130                    [cast.this, self._parse_string()]
131                )
132            return cast
133
134    class Generator(generator.Generator):
135        JOIN_HINTS = False
136        TABLE_HINTS = False
137
138        TYPE_MAPPING = {
139            **generator.Generator.TYPE_MAPPING,  # type: ignore
140            exp.DataType.Type.GEOMETRY: "ST_GEOMETRY",
141        }
142
143        PROPERTIES_LOCATION = {
144            **generator.Generator.PROPERTIES_LOCATION,  # type: ignore
145            exp.PartitionedByProperty: exp.Properties.Location.POST_INDEX,
146        }
147
148        TRANSFORMS = {
149            **generator.Generator.TRANSFORMS,
150            exp.Max: max_or_greatest,
151            exp.Min: min_or_least,
152            exp.TimeToStr: lambda self, e: f"CAST({self.sql(e, 'this')} AS DATE FORMAT {self.format_time(e)})",
153            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
154        }
155
156        def partitionedbyproperty_sql(self, expression: exp.PartitionedByProperty) -> str:
157            return f"PARTITION BY {self.sql(expression, 'this')}"
158
159        # FROM before SET in Teradata UPDATE syntax
160        # https://docs.teradata.com/r/Enterprise_IntelliFlex_VMware/Teradata-VantageTM-SQL-Data-Manipulation-Language-17.20/Statement-Syntax/UPDATE/UPDATE-Syntax-Basic-Form-FROM-Clause
161        def update_sql(self, expression: exp.Update) -> str:
162            this = self.sql(expression, "this")
163            from_sql = self.sql(expression, "from")
164            set_sql = self.expressions(expression, flat=True)
165            where_sql = self.sql(expression, "where")
166            sql = f"UPDATE {this}{from_sql} SET {set_sql}{where_sql}"
167            return self.prepend_ctes(expression, sql)
168
169        def mod_sql(self, expression: exp.Mod) -> str:
170            return self.binary(expression, "MOD")
171
172        def datatype_sql(self, expression: exp.DataType) -> str:
173            type_sql = super().datatype_sql(expression)
174            prefix_sql = expression.args.get("prefix")
175            return f"SYSUDTLIB.{type_sql}" if prefix_sql else type_sql
176
177        def rangen_sql(self, expression: exp.RangeN) -> str:
178            this = self.sql(expression, "this")
179            expressions_sql = self.expressions(expression)
180            each_sql = self.sql(expression, "each")
181            each_sql = f" EACH {each_sql}" if each_sql else ""
182
183            return f"RANGE_N({this} BETWEEN {expressions_sql}{each_sql})"
class Teradata.Tokenizer(sqlglot.tokens.Tokenizer):
17    class Tokenizer(tokens.Tokenizer):
18        # https://docs.teradata.com/r/Teradata-Database-SQL-Functions-Operators-Expressions-and-Predicates/March-2017/Comparison-Operators-and-Functions/Comparison-Operators/ANSI-Compliance
19        KEYWORDS = {
20            **tokens.Tokenizer.KEYWORDS,
21            "BYTEINT": TokenType.SMALLINT,
22            "SEL": TokenType.SELECT,
23            "INS": TokenType.INSERT,
24            "MOD": TokenType.MOD,
25            "LT": TokenType.LT,
26            "LE": TokenType.LTE,
27            "GT": TokenType.GT,
28            "GE": TokenType.GTE,
29            "^=": TokenType.NEQ,
30            "NE": TokenType.NEQ,
31            "NOT=": TokenType.NEQ,
32            "ST_GEOMETRY": TokenType.GEOMETRY,
33        }
34
35        # teradata does not support % for modulus
36        SINGLE_TOKENS = {**tokens.Tokenizer.SINGLE_TOKENS}
37        SINGLE_TOKENS.pop("%")
class Teradata.Parser(sqlglot.parser.Parser):
 39    class Parser(parser.Parser):
 40        CHARSET_TRANSLATORS = {
 41            "GRAPHIC_TO_KANJISJIS",
 42            "GRAPHIC_TO_LATIN",
 43            "GRAPHIC_TO_UNICODE",
 44            "GRAPHIC_TO_UNICODE_PadSpace",
 45            "KANJI1_KanjiEBCDIC_TO_UNICODE",
 46            "KANJI1_KanjiEUC_TO_UNICODE",
 47            "KANJI1_KANJISJIS_TO_UNICODE",
 48            "KANJI1_SBC_TO_UNICODE",
 49            "KANJISJIS_TO_GRAPHIC",
 50            "KANJISJIS_TO_LATIN",
 51            "KANJISJIS_TO_UNICODE",
 52            "LATIN_TO_GRAPHIC",
 53            "LATIN_TO_KANJISJIS",
 54            "LATIN_TO_UNICODE",
 55            "LOCALE_TO_UNICODE",
 56            "UNICODE_TO_GRAPHIC",
 57            "UNICODE_TO_GRAPHIC_PadGraphic",
 58            "UNICODE_TO_GRAPHIC_VarGraphic",
 59            "UNICODE_TO_KANJI1_KanjiEBCDIC",
 60            "UNICODE_TO_KANJI1_KanjiEUC",
 61            "UNICODE_TO_KANJI1_KANJISJIS",
 62            "UNICODE_TO_KANJI1_SBC",
 63            "UNICODE_TO_KANJISJIS",
 64            "UNICODE_TO_LATIN",
 65            "UNICODE_TO_LOCALE",
 66            "UNICODE_TO_UNICODE_FoldSpace",
 67            "UNICODE_TO_UNICODE_Fullwidth",
 68            "UNICODE_TO_UNICODE_Halfwidth",
 69            "UNICODE_TO_UNICODE_NFC",
 70            "UNICODE_TO_UNICODE_NFD",
 71            "UNICODE_TO_UNICODE_NFKC",
 72            "UNICODE_TO_UNICODE_NFKD",
 73        }
 74
 75        FUNC_TOKENS = {*parser.Parser.FUNC_TOKENS}
 76        FUNC_TOKENS.remove(TokenType.REPLACE)
 77
 78        STATEMENT_PARSERS = {
 79            **parser.Parser.STATEMENT_PARSERS,  # type: ignore
 80            TokenType.REPLACE: lambda self: self._parse_create(),
 81        }
 82
 83        FUNCTION_PARSERS = {
 84            **parser.Parser.FUNCTION_PARSERS,  # type: ignore
 85            "RANGE_N": lambda self: self._parse_rangen(),
 86            "TRANSLATE": lambda self: self._parse_translate(self.STRICT_CAST),
 87        }
 88
 89        def _parse_translate(self, strict: bool) -> exp.Expression:
 90            this = self._parse_conjunction()
 91
 92            if not self._match(TokenType.USING):
 93                self.raise_error("Expected USING in TRANSLATE")
 94
 95            if self._match_texts(self.CHARSET_TRANSLATORS):
 96                charset_split = self._prev.text.split("_TO_")
 97                to = self.expression(exp.CharacterSet, this=charset_split[1])
 98            else:
 99                self.raise_error("Expected a character set translator after USING in TRANSLATE")
100
101            return self.expression(exp.Cast if strict else exp.TryCast, this=this, to=to)
102
103        # FROM before SET in Teradata UPDATE syntax
104        # https://docs.teradata.com/r/Enterprise_IntelliFlex_VMware/Teradata-VantageTM-SQL-Data-Manipulation-Language-17.20/Statement-Syntax/UPDATE/UPDATE-Syntax-Basic-Form-FROM-Clause
105        def _parse_update(self) -> exp.Expression:
106            return self.expression(
107                exp.Update,
108                **{  # type: ignore
109                    "this": self._parse_table(alias_tokens=self.UPDATE_ALIAS_TOKENS),
110                    "from": self._parse_from(),
111                    "expressions": self._match(TokenType.SET)
112                    and self._parse_csv(self._parse_equality),
113                    "where": self._parse_where(),
114                },
115            )
116
117        def _parse_rangen(self):
118            this = self._parse_id_var()
119            self._match(TokenType.BETWEEN)
120
121            expressions = self._parse_csv(self._parse_conjunction)
122            each = self._match_text_seq("EACH") and self._parse_conjunction()
123
124            return self.expression(exp.RangeN, this=this, expressions=expressions, each=each)
125
126        def _parse_cast(self, strict: bool) -> exp.Expression:
127            cast = t.cast(exp.Cast, super()._parse_cast(strict))
128            if cast.to.this == exp.DataType.Type.DATE and self._match(TokenType.FORMAT):
129                return format_time_lambda(exp.TimeToStr, "teradata")(
130                    [cast.this, self._parse_string()]
131                )
132            return cast

Parser consumes a list of tokens produced by the sqlglot.tokens.Tokenizer and produces a parsed syntax tree.

Arguments:
  • error_level: the desired error level. Default: ErrorLevel.RAISE
  • error_message_context: determines the amount of context to capture from a query string when displaying the error message (in number of characters). Default: 50.
  • index_offset: Index offset for arrays eg ARRAY[0] vs ARRAY[1] as the head of a list. Default: 0
  • alias_post_tablesample: If the table alias comes after tablesample. Default: False
  • max_errors: Maximum number of error messages to include in a raised ParseError. This is only relevant if error_level is ErrorLevel.RAISE. Default: 3
  • null_ordering: Indicates the default null ordering method to use if not explicitly set. Options are "nulls_are_small", "nulls_are_large", "nulls_are_last". Default: "nulls_are_small"
class Teradata.Generator(sqlglot.generator.Generator):
134    class Generator(generator.Generator):
135        JOIN_HINTS = False
136        TABLE_HINTS = False
137
138        TYPE_MAPPING = {
139            **generator.Generator.TYPE_MAPPING,  # type: ignore
140            exp.DataType.Type.GEOMETRY: "ST_GEOMETRY",
141        }
142
143        PROPERTIES_LOCATION = {
144            **generator.Generator.PROPERTIES_LOCATION,  # type: ignore
145            exp.PartitionedByProperty: exp.Properties.Location.POST_INDEX,
146        }
147
148        TRANSFORMS = {
149            **generator.Generator.TRANSFORMS,
150            exp.Max: max_or_greatest,
151            exp.Min: min_or_least,
152            exp.TimeToStr: lambda self, e: f"CAST({self.sql(e, 'this')} AS DATE FORMAT {self.format_time(e)})",
153            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
154        }
155
156        def partitionedbyproperty_sql(self, expression: exp.PartitionedByProperty) -> str:
157            return f"PARTITION BY {self.sql(expression, 'this')}"
158
159        # FROM before SET in Teradata UPDATE syntax
160        # https://docs.teradata.com/r/Enterprise_IntelliFlex_VMware/Teradata-VantageTM-SQL-Data-Manipulation-Language-17.20/Statement-Syntax/UPDATE/UPDATE-Syntax-Basic-Form-FROM-Clause
161        def update_sql(self, expression: exp.Update) -> str:
162            this = self.sql(expression, "this")
163            from_sql = self.sql(expression, "from")
164            set_sql = self.expressions(expression, flat=True)
165            where_sql = self.sql(expression, "where")
166            sql = f"UPDATE {this}{from_sql} SET {set_sql}{where_sql}"
167            return self.prepend_ctes(expression, sql)
168
169        def mod_sql(self, expression: exp.Mod) -> str:
170            return self.binary(expression, "MOD")
171
172        def datatype_sql(self, expression: exp.DataType) -> str:
173            type_sql = super().datatype_sql(expression)
174            prefix_sql = expression.args.get("prefix")
175            return f"SYSUDTLIB.{type_sql}" if prefix_sql else type_sql
176
177        def rangen_sql(self, expression: exp.RangeN) -> str:
178            this = self.sql(expression, "this")
179            expressions_sql = self.expressions(expression)
180            each_sql = self.sql(expression, "each")
181            each_sql = f" EACH {each_sql}" if each_sql else ""
182
183            return f"RANGE_N({this} BETWEEN {expressions_sql}{each_sql})"

Generator interprets the given syntax tree and produces a SQL string as an output.

Arguments:
  • time_mapping (dict): the dictionary of custom time mappings in which the key represents a python time format and the output the target time format
  • time_trie (trie): a trie of the time_mapping keys
  • pretty (bool): if set to True the returned string will be formatted. Default: False.
  • quote_start (str): specifies which starting character to use to delimit quotes. Default: '.
  • quote_end (str): specifies which ending character to use to delimit quotes. Default: '.
  • identifier_start (str): specifies which starting character to use to delimit identifiers. Default: ".
  • identifier_end (str): specifies which ending character to use to delimit identifiers. Default: ".
  • identify (bool | str): 'always': always quote, 'safe': quote identifiers if they don't contain an upcase, True defaults to always.
  • normalize (bool): if set to True all identifiers will lower cased
  • string_escape (str): specifies a string escape character. Default: '.
  • identifier_escape (str): specifies an identifier escape character. Default: ".
  • pad (int): determines padding in a formatted string. Default: 2.
  • indent (int): determines the size of indentation in a formatted string. Default: 4.
  • unnest_column_only (bool): if true unnest table aliases are considered only as column aliases
  • normalize_functions (str): normalize function names, "upper", "lower", or None Default: "upper"
  • alias_post_tablesample (bool): if the table alias comes after tablesample Default: False
  • unsupported_level (ErrorLevel): determines the generator's behavior when it encounters unsupported expressions. Default ErrorLevel.WARN.
  • null_ordering (str): Indicates the default null ordering method to use if not explicitly set. Options are "nulls_are_small", "nulls_are_large", "nulls_are_last". Default: "nulls_are_small"
  • max_unsupported (int): Maximum number of unsupported messages to include in a raised UnsupportedError. This is only relevant if unsupported_level is ErrorLevel.RAISE. Default: 3
  • leading_comma (bool): if the the comma is leading or trailing in select statements Default: False
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode. The default is on the smaller end because the length only represents a segment and not the true line length. Default: 80
  • comments: Whether or not to preserve comments in the output SQL code. Default: True
def partitionedbyproperty_sql(self, expression: sqlglot.expressions.PartitionedByProperty) -> str:
156        def partitionedbyproperty_sql(self, expression: exp.PartitionedByProperty) -> str:
157            return f"PARTITION BY {self.sql(expression, 'this')}"
def update_sql(self, expression: sqlglot.expressions.Update) -> str:
161        def update_sql(self, expression: exp.Update) -> str:
162            this = self.sql(expression, "this")
163            from_sql = self.sql(expression, "from")
164            set_sql = self.expressions(expression, flat=True)
165            where_sql = self.sql(expression, "where")
166            sql = f"UPDATE {this}{from_sql} SET {set_sql}{where_sql}"
167            return self.prepend_ctes(expression, sql)
def mod_sql(self, expression: sqlglot.expressions.Mod) -> str:
169        def mod_sql(self, expression: exp.Mod) -> str:
170            return self.binary(expression, "MOD")
def datatype_sql(self, expression: sqlglot.expressions.DataType) -> str:
172        def datatype_sql(self, expression: exp.DataType) -> str:
173            type_sql = super().datatype_sql(expression)
174            prefix_sql = expression.args.get("prefix")
175            return f"SYSUDTLIB.{type_sql}" if prefix_sql else type_sql
def rangen_sql(self, expression: sqlglot.expressions.RangeN) -> str:
177        def rangen_sql(self, expression: exp.RangeN) -> str:
178            this = self.sql(expression, "this")
179            expressions_sql = self.expressions(expression)
180            each_sql = self.sql(expression, "each")
181            each_sql = f" EACH {each_sql}" if each_sql else ""
182
183            return f"RANGE_N({this} BETWEEN {expressions_sql}{each_sql})"
Inherited Members
sqlglot.generator.Generator
Generator
generate
unsupported
sep
seg
pad_comment
maybe_comment
wrap
no_identify
normalize_func
indent
sql
uncache_sql
cache_sql
characterset_sql
column_sql
columnposition_sql
columndef_sql
columnconstraint_sql
autoincrementcolumnconstraint_sql
compresscolumnconstraint_sql
generatedasidentitycolumnconstraint_sql
notnullcolumnconstraint_sql
primarykeycolumnconstraint_sql
uniquecolumnconstraint_sql
create_sql
describe_sql
prepend_ctes
with_sql
cte_sql
tablealias_sql
bitstring_sql
hexstring_sql
bytestring_sql
directory_sql
delete_sql
drop_sql
except_sql
except_op
fetch_sql
filter_sql
hint_sql
index_sql
identifier_sql
inputoutputformat_sql
national_sql
partition_sql
properties_sql
root_properties
properties
with_properties
locate_properties
property_sql
likeproperty_sql
fallbackproperty_sql
journalproperty_sql
freespaceproperty_sql
afterjournalproperty_sql
checksumproperty_sql
mergeblockratioproperty_sql
datablocksizeproperty_sql
blockcompressionproperty_sql
isolatedloadingproperty_sql
lockingproperty_sql
withdataproperty_sql
insert_sql
intersect_sql
intersect_op
introducer_sql
pseudotype_sql
onconflict_sql
returning_sql
rowformatdelimitedproperty_sql
table_sql
tablesample_sql
pivot_sql
tuple_sql
values_sql
var_sql
into_sql
from_sql
group_sql
having_sql
join_sql
lambda_sql
lateral_sql
limit_sql
offset_sql
setitem_sql
set_sql
pragma_sql
lock_sql
literal_sql
loaddata_sql
null_sql
boolean_sql
order_sql
cluster_sql
distribute_sql
sort_sql
ordered_sql
matchrecognize_sql
query_modifiers
select_sql
schema_sql
star_sql
structkwarg_sql
parameter_sql
sessionparameter_sql
placeholder_sql
subquery_sql
qualify_sql
union_sql
union_op
unnest_sql
where_sql
window_sql
partition_by_sql
window_spec_sql
withingroup_sql
between_sql
bracket_sql
all_sql
any_sql
exists_sql
case_sql
constraint_sql
extract_sql
trim_sql
concat_sql
check_sql
foreignkey_sql
primarykey_sql
unique_sql
if_sql
matchagainst_sql
jsonkeyvalue_sql
jsonobject_sql
in_sql
in_unnest_op
interval_sql
return_sql
reference_sql
anonymous_sql
paren_sql
neg_sql
not_sql
alias_sql
aliases_sql
attimezone_sql
add_sql
and_sql
connector_sql
bitwiseand_sql
bitwiseleftshift_sql
bitwisenot_sql
bitwiseor_sql
bitwiserightshift_sql
bitwisexor_sql
cast_sql
currentdate_sql
collate_sql
command_sql
comment_sql
transaction_sql
commit_sql
rollback_sql
altercolumn_sql
renametable_sql
altertable_sql
droppartition_sql
addconstraint_sql
distinct_sql
ignorenulls_sql
respectnulls_sql
intdiv_sql
dpipe_sql
div_sql
overlaps_sql
distance_sql
dot_sql
eq_sql
escape_sql
glob_sql
gt_sql
gte_sql
ilike_sql
ilikeany_sql
is_sql
like_sql
likeany_sql
similarto_sql
lt_sql
lte_sql
mul_sql
neq_sql
nullsafeeq_sql
nullsafeneq_sql
or_sql
slice_sql
sub_sql
trycast_sql
use_sql
binary
function_fallback_sql
func
format_args
text_width
format_time
expressions
op_expressions
naked_property
set_operation
tag_sql
token_sql
userdefinedfunction_sql
joinhint_sql
kwarg_sql
when_sql
merge_sql
tochar_sql