Edit on GitHub

sqlglot.dialects.teradata

  1from __future__ import annotations
  2
  3from sqlglot import exp, generator, parser, tokens, transforms
  4from sqlglot.dialects.dialect import Dialect, max_or_greatest, min_or_least
  5from sqlglot.tokens import TokenType
  6
  7
  8class Teradata(Dialect):
  9    TIME_MAPPING = {
 10        "Y": "%Y",
 11        "YYYY": "%Y",
 12        "YY": "%y",
 13        "MMMM": "%B",
 14        "MMM": "%b",
 15        "DD": "%d",
 16        "D": "%-d",
 17        "HH": "%H",
 18        "H": "%-H",
 19        "MM": "%M",
 20        "M": "%-M",
 21        "SS": "%S",
 22        "S": "%-S",
 23        "SSSSSS": "%f",
 24        "E": "%a",
 25        "EE": "%a",
 26        "EEE": "%a",
 27        "EEEE": "%A",
 28    }
 29
 30    class Tokenizer(tokens.Tokenizer):
 31        # https://docs.teradata.com/r/Teradata-Database-SQL-Functions-Operators-Expressions-and-Predicates/March-2017/Comparison-Operators-and-Functions/Comparison-Operators/ANSI-Compliance
 32        KEYWORDS = {
 33            **tokens.Tokenizer.KEYWORDS,
 34            "BYTEINT": TokenType.SMALLINT,
 35            "SEL": TokenType.SELECT,
 36            "INS": TokenType.INSERT,
 37            "MOD": TokenType.MOD,
 38            "LT": TokenType.LT,
 39            "LE": TokenType.LTE,
 40            "GT": TokenType.GT,
 41            "GE": TokenType.GTE,
 42            "^=": TokenType.NEQ,
 43            "NE": TokenType.NEQ,
 44            "NOT=": TokenType.NEQ,
 45            "ST_GEOMETRY": TokenType.GEOMETRY,
 46        }
 47
 48        # Teradata does not support % as a modulo operator
 49        SINGLE_TOKENS = {**tokens.Tokenizer.SINGLE_TOKENS}
 50        SINGLE_TOKENS.pop("%")
 51
 52    class Parser(parser.Parser):
 53        CHARSET_TRANSLATORS = {
 54            "GRAPHIC_TO_KANJISJIS",
 55            "GRAPHIC_TO_LATIN",
 56            "GRAPHIC_TO_UNICODE",
 57            "GRAPHIC_TO_UNICODE_PadSpace",
 58            "KANJI1_KanjiEBCDIC_TO_UNICODE",
 59            "KANJI1_KanjiEUC_TO_UNICODE",
 60            "KANJI1_KANJISJIS_TO_UNICODE",
 61            "KANJI1_SBC_TO_UNICODE",
 62            "KANJISJIS_TO_GRAPHIC",
 63            "KANJISJIS_TO_LATIN",
 64            "KANJISJIS_TO_UNICODE",
 65            "LATIN_TO_GRAPHIC",
 66            "LATIN_TO_KANJISJIS",
 67            "LATIN_TO_UNICODE",
 68            "LOCALE_TO_UNICODE",
 69            "UNICODE_TO_GRAPHIC",
 70            "UNICODE_TO_GRAPHIC_PadGraphic",
 71            "UNICODE_TO_GRAPHIC_VarGraphic",
 72            "UNICODE_TO_KANJI1_KanjiEBCDIC",
 73            "UNICODE_TO_KANJI1_KanjiEUC",
 74            "UNICODE_TO_KANJI1_KANJISJIS",
 75            "UNICODE_TO_KANJI1_SBC",
 76            "UNICODE_TO_KANJISJIS",
 77            "UNICODE_TO_LATIN",
 78            "UNICODE_TO_LOCALE",
 79            "UNICODE_TO_UNICODE_FoldSpace",
 80            "UNICODE_TO_UNICODE_Fullwidth",
 81            "UNICODE_TO_UNICODE_Halfwidth",
 82            "UNICODE_TO_UNICODE_NFC",
 83            "UNICODE_TO_UNICODE_NFD",
 84            "UNICODE_TO_UNICODE_NFKC",
 85            "UNICODE_TO_UNICODE_NFKD",
 86        }
 87
 88        FUNC_TOKENS = {*parser.Parser.FUNC_TOKENS}
 89        FUNC_TOKENS.remove(TokenType.REPLACE)
 90
 91        STATEMENT_PARSERS = {
 92            **parser.Parser.STATEMENT_PARSERS,
 93            TokenType.REPLACE: lambda self: self._parse_create(),
 94        }
 95
 96        FUNCTION_PARSERS = {
 97            **parser.Parser.FUNCTION_PARSERS,
 98            "RANGE_N": lambda self: self._parse_rangen(),
 99            "TRANSLATE": lambda self: self._parse_translate(self.STRICT_CAST),
100        }
101
102        def _parse_translate(self, strict: bool) -> exp.Expression:
103            this = self._parse_conjunction()
104
105            if not self._match(TokenType.USING):
106                self.raise_error("Expected USING in TRANSLATE")
107
108            if self._match_texts(self.CHARSET_TRANSLATORS):
109                charset_split = self._prev.text.split("_TO_")
110                to = self.expression(exp.CharacterSet, this=charset_split[1])
111            else:
112                self.raise_error("Expected a character set translator after USING in TRANSLATE")
113
114            return self.expression(exp.Cast if strict else exp.TryCast, this=this, to=to)
115
116        # FROM before SET in Teradata UPDATE syntax
117        # https://docs.teradata.com/r/Enterprise_IntelliFlex_VMware/Teradata-VantageTM-SQL-Data-Manipulation-Language-17.20/Statement-Syntax/UPDATE/UPDATE-Syntax-Basic-Form-FROM-Clause
118        def _parse_update(self) -> exp.Update:
119            return self.expression(
120                exp.Update,
121                **{  # type: ignore
122                    "this": self._parse_table(alias_tokens=self.UPDATE_ALIAS_TOKENS),
123                    "from": self._parse_from(modifiers=True),
124                    "expressions": self._match(TokenType.SET)
125                    and self._parse_csv(self._parse_equality),
126                    "where": self._parse_where(),
127                },
128            )
129
130        def _parse_rangen(self):
131            this = self._parse_id_var()
132            self._match(TokenType.BETWEEN)
133
134            expressions = self._parse_csv(self._parse_conjunction)
135            each = self._match_text_seq("EACH") and self._parse_conjunction()
136
137            return self.expression(exp.RangeN, this=this, expressions=expressions, each=each)
138
139    class Generator(generator.Generator):
140        JOIN_HINTS = False
141        TABLE_HINTS = False
142
143        TYPE_MAPPING = {
144            **generator.Generator.TYPE_MAPPING,
145            exp.DataType.Type.GEOMETRY: "ST_GEOMETRY",
146        }
147
148        PROPERTIES_LOCATION = {
149            **generator.Generator.PROPERTIES_LOCATION,
150            exp.OnCommitProperty: exp.Properties.Location.POST_INDEX,
151            exp.PartitionedByProperty: exp.Properties.Location.POST_EXPRESSION,
152            exp.StabilityProperty: exp.Properties.Location.POST_CREATE,
153        }
154
155        TRANSFORMS = {
156            **generator.Generator.TRANSFORMS,
157            exp.Max: max_or_greatest,
158            exp.Min: min_or_least,
159            exp.Select: transforms.preprocess([transforms.eliminate_distinct_on]),
160            exp.StrToDate: lambda self, e: f"CAST({self.sql(e, 'this')} AS DATE FORMAT {self.format_time(e)})",
161            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
162        }
163
164        def partitionedbyproperty_sql(self, expression: exp.PartitionedByProperty) -> str:
165            return f"PARTITION BY {self.sql(expression, 'this')}"
166
167        # FROM before SET in Teradata UPDATE syntax
168        # https://docs.teradata.com/r/Enterprise_IntelliFlex_VMware/Teradata-VantageTM-SQL-Data-Manipulation-Language-17.20/Statement-Syntax/UPDATE/UPDATE-Syntax-Basic-Form-FROM-Clause
169        def update_sql(self, expression: exp.Update) -> str:
170            this = self.sql(expression, "this")
171            from_sql = self.sql(expression, "from")
172            set_sql = self.expressions(expression, flat=True)
173            where_sql = self.sql(expression, "where")
174            sql = f"UPDATE {this}{from_sql} SET {set_sql}{where_sql}"
175            return self.prepend_ctes(expression, sql)
176
177        def mod_sql(self, expression: exp.Mod) -> str:
178            return self.binary(expression, "MOD")
179
180        def datatype_sql(self, expression: exp.DataType) -> str:
181            type_sql = super().datatype_sql(expression)
182            prefix_sql = expression.args.get("prefix")
183            return f"SYSUDTLIB.{type_sql}" if prefix_sql else type_sql
184
185        def rangen_sql(self, expression: exp.RangeN) -> str:
186            this = self.sql(expression, "this")
187            expressions_sql = self.expressions(expression)
188            each_sql = self.sql(expression, "each")
189            each_sql = f" EACH {each_sql}" if each_sql else ""
190
191            return f"RANGE_N({this} BETWEEN {expressions_sql}{each_sql})"
192
193        def createable_sql(
194            self,
195            expression: exp.Create,
196            locations: dict[exp.Properties.Location, list[exp.Property]],
197        ) -> str:
198            kind = self.sql(expression, "kind").upper()
199            if kind == "TABLE" and locations.get(exp.Properties.Location.POST_NAME):
200                this_name = self.sql(expression.this, "this")
201                this_properties = self.properties(
202                    exp.Properties(expressions=locations[exp.Properties.Location.POST_NAME]),
203                    wrapped=False,
204                    prefix=",",
205                )
206                this_schema = self.schema_columns_sql(expression.this)
207                return f"{this_name}{this_properties}{self.sep()}{this_schema}"
208            return super().createable_sql(expression, locations)
class Teradata(sqlglot.dialects.dialect.Dialect):
  9class Teradata(Dialect):
 10    TIME_MAPPING = {
 11        "Y": "%Y",
 12        "YYYY": "%Y",
 13        "YY": "%y",
 14        "MMMM": "%B",
 15        "MMM": "%b",
 16        "DD": "%d",
 17        "D": "%-d",
 18        "HH": "%H",
 19        "H": "%-H",
 20        "MM": "%M",
 21        "M": "%-M",
 22        "SS": "%S",
 23        "S": "%-S",
 24        "SSSSSS": "%f",
 25        "E": "%a",
 26        "EE": "%a",
 27        "EEE": "%a",
 28        "EEEE": "%A",
 29    }
 30
 31    class Tokenizer(tokens.Tokenizer):
 32        # https://docs.teradata.com/r/Teradata-Database-SQL-Functions-Operators-Expressions-and-Predicates/March-2017/Comparison-Operators-and-Functions/Comparison-Operators/ANSI-Compliance
 33        KEYWORDS = {
 34            **tokens.Tokenizer.KEYWORDS,
 35            "BYTEINT": TokenType.SMALLINT,
 36            "SEL": TokenType.SELECT,
 37            "INS": TokenType.INSERT,
 38            "MOD": TokenType.MOD,
 39            "LT": TokenType.LT,
 40            "LE": TokenType.LTE,
 41            "GT": TokenType.GT,
 42            "GE": TokenType.GTE,
 43            "^=": TokenType.NEQ,
 44            "NE": TokenType.NEQ,
 45            "NOT=": TokenType.NEQ,
 46            "ST_GEOMETRY": TokenType.GEOMETRY,
 47        }
 48
 49        # Teradata does not support % as a modulo operator
 50        SINGLE_TOKENS = {**tokens.Tokenizer.SINGLE_TOKENS}
 51        SINGLE_TOKENS.pop("%")
 52
 53    class Parser(parser.Parser):
 54        CHARSET_TRANSLATORS = {
 55            "GRAPHIC_TO_KANJISJIS",
 56            "GRAPHIC_TO_LATIN",
 57            "GRAPHIC_TO_UNICODE",
 58            "GRAPHIC_TO_UNICODE_PadSpace",
 59            "KANJI1_KanjiEBCDIC_TO_UNICODE",
 60            "KANJI1_KanjiEUC_TO_UNICODE",
 61            "KANJI1_KANJISJIS_TO_UNICODE",
 62            "KANJI1_SBC_TO_UNICODE",
 63            "KANJISJIS_TO_GRAPHIC",
 64            "KANJISJIS_TO_LATIN",
 65            "KANJISJIS_TO_UNICODE",
 66            "LATIN_TO_GRAPHIC",
 67            "LATIN_TO_KANJISJIS",
 68            "LATIN_TO_UNICODE",
 69            "LOCALE_TO_UNICODE",
 70            "UNICODE_TO_GRAPHIC",
 71            "UNICODE_TO_GRAPHIC_PadGraphic",
 72            "UNICODE_TO_GRAPHIC_VarGraphic",
 73            "UNICODE_TO_KANJI1_KanjiEBCDIC",
 74            "UNICODE_TO_KANJI1_KanjiEUC",
 75            "UNICODE_TO_KANJI1_KANJISJIS",
 76            "UNICODE_TO_KANJI1_SBC",
 77            "UNICODE_TO_KANJISJIS",
 78            "UNICODE_TO_LATIN",
 79            "UNICODE_TO_LOCALE",
 80            "UNICODE_TO_UNICODE_FoldSpace",
 81            "UNICODE_TO_UNICODE_Fullwidth",
 82            "UNICODE_TO_UNICODE_Halfwidth",
 83            "UNICODE_TO_UNICODE_NFC",
 84            "UNICODE_TO_UNICODE_NFD",
 85            "UNICODE_TO_UNICODE_NFKC",
 86            "UNICODE_TO_UNICODE_NFKD",
 87        }
 88
 89        FUNC_TOKENS = {*parser.Parser.FUNC_TOKENS}
 90        FUNC_TOKENS.remove(TokenType.REPLACE)
 91
 92        STATEMENT_PARSERS = {
 93            **parser.Parser.STATEMENT_PARSERS,
 94            TokenType.REPLACE: lambda self: self._parse_create(),
 95        }
 96
 97        FUNCTION_PARSERS = {
 98            **parser.Parser.FUNCTION_PARSERS,
 99            "RANGE_N": lambda self: self._parse_rangen(),
100            "TRANSLATE": lambda self: self._parse_translate(self.STRICT_CAST),
101        }
102
103        def _parse_translate(self, strict: bool) -> exp.Expression:
104            this = self._parse_conjunction()
105
106            if not self._match(TokenType.USING):
107                self.raise_error("Expected USING in TRANSLATE")
108
109            if self._match_texts(self.CHARSET_TRANSLATORS):
110                charset_split = self._prev.text.split("_TO_")
111                to = self.expression(exp.CharacterSet, this=charset_split[1])
112            else:
113                self.raise_error("Expected a character set translator after USING in TRANSLATE")
114
115            return self.expression(exp.Cast if strict else exp.TryCast, this=this, to=to)
116
117        # FROM before SET in Teradata UPDATE syntax
118        # https://docs.teradata.com/r/Enterprise_IntelliFlex_VMware/Teradata-VantageTM-SQL-Data-Manipulation-Language-17.20/Statement-Syntax/UPDATE/UPDATE-Syntax-Basic-Form-FROM-Clause
119        def _parse_update(self) -> exp.Update:
120            return self.expression(
121                exp.Update,
122                **{  # type: ignore
123                    "this": self._parse_table(alias_tokens=self.UPDATE_ALIAS_TOKENS),
124                    "from": self._parse_from(modifiers=True),
125                    "expressions": self._match(TokenType.SET)
126                    and self._parse_csv(self._parse_equality),
127                    "where": self._parse_where(),
128                },
129            )
130
131        def _parse_rangen(self):
132            this = self._parse_id_var()
133            self._match(TokenType.BETWEEN)
134
135            expressions = self._parse_csv(self._parse_conjunction)
136            each = self._match_text_seq("EACH") and self._parse_conjunction()
137
138            return self.expression(exp.RangeN, this=this, expressions=expressions, each=each)
139
140    class Generator(generator.Generator):
141        JOIN_HINTS = False
142        TABLE_HINTS = False
143
144        TYPE_MAPPING = {
145            **generator.Generator.TYPE_MAPPING,
146            exp.DataType.Type.GEOMETRY: "ST_GEOMETRY",
147        }
148
149        PROPERTIES_LOCATION = {
150            **generator.Generator.PROPERTIES_LOCATION,
151            exp.OnCommitProperty: exp.Properties.Location.POST_INDEX,
152            exp.PartitionedByProperty: exp.Properties.Location.POST_EXPRESSION,
153            exp.StabilityProperty: exp.Properties.Location.POST_CREATE,
154        }
155
156        TRANSFORMS = {
157            **generator.Generator.TRANSFORMS,
158            exp.Max: max_or_greatest,
159            exp.Min: min_or_least,
160            exp.Select: transforms.preprocess([transforms.eliminate_distinct_on]),
161            exp.StrToDate: lambda self, e: f"CAST({self.sql(e, 'this')} AS DATE FORMAT {self.format_time(e)})",
162            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
163        }
164
165        def partitionedbyproperty_sql(self, expression: exp.PartitionedByProperty) -> str:
166            return f"PARTITION BY {self.sql(expression, 'this')}"
167
168        # FROM before SET in Teradata UPDATE syntax
169        # https://docs.teradata.com/r/Enterprise_IntelliFlex_VMware/Teradata-VantageTM-SQL-Data-Manipulation-Language-17.20/Statement-Syntax/UPDATE/UPDATE-Syntax-Basic-Form-FROM-Clause
170        def update_sql(self, expression: exp.Update) -> str:
171            this = self.sql(expression, "this")
172            from_sql = self.sql(expression, "from")
173            set_sql = self.expressions(expression, flat=True)
174            where_sql = self.sql(expression, "where")
175            sql = f"UPDATE {this}{from_sql} SET {set_sql}{where_sql}"
176            return self.prepend_ctes(expression, sql)
177
178        def mod_sql(self, expression: exp.Mod) -> str:
179            return self.binary(expression, "MOD")
180
181        def datatype_sql(self, expression: exp.DataType) -> str:
182            type_sql = super().datatype_sql(expression)
183            prefix_sql = expression.args.get("prefix")
184            return f"SYSUDTLIB.{type_sql}" if prefix_sql else type_sql
185
186        def rangen_sql(self, expression: exp.RangeN) -> str:
187            this = self.sql(expression, "this")
188            expressions_sql = self.expressions(expression)
189            each_sql = self.sql(expression, "each")
190            each_sql = f" EACH {each_sql}" if each_sql else ""
191
192            return f"RANGE_N({this} BETWEEN {expressions_sql}{each_sql})"
193
194        def createable_sql(
195            self,
196            expression: exp.Create,
197            locations: dict[exp.Properties.Location, list[exp.Property]],
198        ) -> str:
199            kind = self.sql(expression, "kind").upper()
200            if kind == "TABLE" and locations.get(exp.Properties.Location.POST_NAME):
201                this_name = self.sql(expression.this, "this")
202                this_properties = self.properties(
203                    exp.Properties(expressions=locations[exp.Properties.Location.POST_NAME]),
204                    wrapped=False,
205                    prefix=",",
206                )
207                this_schema = self.schema_columns_sql(expression.this)
208                return f"{this_name}{this_properties}{self.sep()}{this_schema}"
209            return super().createable_sql(expression, locations)
class Teradata.Tokenizer(sqlglot.tokens.Tokenizer):
31    class Tokenizer(tokens.Tokenizer):
32        # https://docs.teradata.com/r/Teradata-Database-SQL-Functions-Operators-Expressions-and-Predicates/March-2017/Comparison-Operators-and-Functions/Comparison-Operators/ANSI-Compliance
33        KEYWORDS = {
34            **tokens.Tokenizer.KEYWORDS,
35            "BYTEINT": TokenType.SMALLINT,
36            "SEL": TokenType.SELECT,
37            "INS": TokenType.INSERT,
38            "MOD": TokenType.MOD,
39            "LT": TokenType.LT,
40            "LE": TokenType.LTE,
41            "GT": TokenType.GT,
42            "GE": TokenType.GTE,
43            "^=": TokenType.NEQ,
44            "NE": TokenType.NEQ,
45            "NOT=": TokenType.NEQ,
46            "ST_GEOMETRY": TokenType.GEOMETRY,
47        }
48
49        # Teradata does not support % as a modulo operator
50        SINGLE_TOKENS = {**tokens.Tokenizer.SINGLE_TOKENS}
51        SINGLE_TOKENS.pop("%")
class Teradata.Parser(sqlglot.parser.Parser):
 53    class Parser(parser.Parser):
 54        CHARSET_TRANSLATORS = {
 55            "GRAPHIC_TO_KANJISJIS",
 56            "GRAPHIC_TO_LATIN",
 57            "GRAPHIC_TO_UNICODE",
 58            "GRAPHIC_TO_UNICODE_PadSpace",
 59            "KANJI1_KanjiEBCDIC_TO_UNICODE",
 60            "KANJI1_KanjiEUC_TO_UNICODE",
 61            "KANJI1_KANJISJIS_TO_UNICODE",
 62            "KANJI1_SBC_TO_UNICODE",
 63            "KANJISJIS_TO_GRAPHIC",
 64            "KANJISJIS_TO_LATIN",
 65            "KANJISJIS_TO_UNICODE",
 66            "LATIN_TO_GRAPHIC",
 67            "LATIN_TO_KANJISJIS",
 68            "LATIN_TO_UNICODE",
 69            "LOCALE_TO_UNICODE",
 70            "UNICODE_TO_GRAPHIC",
 71            "UNICODE_TO_GRAPHIC_PadGraphic",
 72            "UNICODE_TO_GRAPHIC_VarGraphic",
 73            "UNICODE_TO_KANJI1_KanjiEBCDIC",
 74            "UNICODE_TO_KANJI1_KanjiEUC",
 75            "UNICODE_TO_KANJI1_KANJISJIS",
 76            "UNICODE_TO_KANJI1_SBC",
 77            "UNICODE_TO_KANJISJIS",
 78            "UNICODE_TO_LATIN",
 79            "UNICODE_TO_LOCALE",
 80            "UNICODE_TO_UNICODE_FoldSpace",
 81            "UNICODE_TO_UNICODE_Fullwidth",
 82            "UNICODE_TO_UNICODE_Halfwidth",
 83            "UNICODE_TO_UNICODE_NFC",
 84            "UNICODE_TO_UNICODE_NFD",
 85            "UNICODE_TO_UNICODE_NFKC",
 86            "UNICODE_TO_UNICODE_NFKD",
 87        }
 88
 89        FUNC_TOKENS = {*parser.Parser.FUNC_TOKENS}
 90        FUNC_TOKENS.remove(TokenType.REPLACE)
 91
 92        STATEMENT_PARSERS = {
 93            **parser.Parser.STATEMENT_PARSERS,
 94            TokenType.REPLACE: lambda self: self._parse_create(),
 95        }
 96
 97        FUNCTION_PARSERS = {
 98            **parser.Parser.FUNCTION_PARSERS,
 99            "RANGE_N": lambda self: self._parse_rangen(),
100            "TRANSLATE": lambda self: self._parse_translate(self.STRICT_CAST),
101        }
102
103        def _parse_translate(self, strict: bool) -> exp.Expression:
104            this = self._parse_conjunction()
105
106            if not self._match(TokenType.USING):
107                self.raise_error("Expected USING in TRANSLATE")
108
109            if self._match_texts(self.CHARSET_TRANSLATORS):
110                charset_split = self._prev.text.split("_TO_")
111                to = self.expression(exp.CharacterSet, this=charset_split[1])
112            else:
113                self.raise_error("Expected a character set translator after USING in TRANSLATE")
114
115            return self.expression(exp.Cast if strict else exp.TryCast, this=this, to=to)
116
117        # FROM before SET in Teradata UPDATE syntax
118        # https://docs.teradata.com/r/Enterprise_IntelliFlex_VMware/Teradata-VantageTM-SQL-Data-Manipulation-Language-17.20/Statement-Syntax/UPDATE/UPDATE-Syntax-Basic-Form-FROM-Clause
119        def _parse_update(self) -> exp.Update:
120            return self.expression(
121                exp.Update,
122                **{  # type: ignore
123                    "this": self._parse_table(alias_tokens=self.UPDATE_ALIAS_TOKENS),
124                    "from": self._parse_from(modifiers=True),
125                    "expressions": self._match(TokenType.SET)
126                    and self._parse_csv(self._parse_equality),
127                    "where": self._parse_where(),
128                },
129            )
130
131        def _parse_rangen(self):
132            this = self._parse_id_var()
133            self._match(TokenType.BETWEEN)
134
135            expressions = self._parse_csv(self._parse_conjunction)
136            each = self._match_text_seq("EACH") and self._parse_conjunction()
137
138            return self.expression(exp.RangeN, this=this, expressions=expressions, each=each)

Parser consumes a list of tokens produced by the Tokenizer and produces a parsed syntax tree.

Arguments:
  • error_level: The desired error level. Default: ErrorLevel.IMMEDIATE
  • error_message_context: Determines the amount of context to capture from a query string when displaying the error message (in number of characters). Default: 100
  • max_errors: Maximum number of error messages to include in a raised ParseError. This is only relevant if error_level is ErrorLevel.RAISE. Default: 3
class Teradata.Generator(sqlglot.generator.Generator):
140    class Generator(generator.Generator):
141        JOIN_HINTS = False
142        TABLE_HINTS = False
143
144        TYPE_MAPPING = {
145            **generator.Generator.TYPE_MAPPING,
146            exp.DataType.Type.GEOMETRY: "ST_GEOMETRY",
147        }
148
149        PROPERTIES_LOCATION = {
150            **generator.Generator.PROPERTIES_LOCATION,
151            exp.OnCommitProperty: exp.Properties.Location.POST_INDEX,
152            exp.PartitionedByProperty: exp.Properties.Location.POST_EXPRESSION,
153            exp.StabilityProperty: exp.Properties.Location.POST_CREATE,
154        }
155
156        TRANSFORMS = {
157            **generator.Generator.TRANSFORMS,
158            exp.Max: max_or_greatest,
159            exp.Min: min_or_least,
160            exp.Select: transforms.preprocess([transforms.eliminate_distinct_on]),
161            exp.StrToDate: lambda self, e: f"CAST({self.sql(e, 'this')} AS DATE FORMAT {self.format_time(e)})",
162            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
163        }
164
165        def partitionedbyproperty_sql(self, expression: exp.PartitionedByProperty) -> str:
166            return f"PARTITION BY {self.sql(expression, 'this')}"
167
168        # FROM before SET in Teradata UPDATE syntax
169        # https://docs.teradata.com/r/Enterprise_IntelliFlex_VMware/Teradata-VantageTM-SQL-Data-Manipulation-Language-17.20/Statement-Syntax/UPDATE/UPDATE-Syntax-Basic-Form-FROM-Clause
170        def update_sql(self, expression: exp.Update) -> str:
171            this = self.sql(expression, "this")
172            from_sql = self.sql(expression, "from")
173            set_sql = self.expressions(expression, flat=True)
174            where_sql = self.sql(expression, "where")
175            sql = f"UPDATE {this}{from_sql} SET {set_sql}{where_sql}"
176            return self.prepend_ctes(expression, sql)
177
178        def mod_sql(self, expression: exp.Mod) -> str:
179            return self.binary(expression, "MOD")
180
181        def datatype_sql(self, expression: exp.DataType) -> str:
182            type_sql = super().datatype_sql(expression)
183            prefix_sql = expression.args.get("prefix")
184            return f"SYSUDTLIB.{type_sql}" if prefix_sql else type_sql
185
186        def rangen_sql(self, expression: exp.RangeN) -> str:
187            this = self.sql(expression, "this")
188            expressions_sql = self.expressions(expression)
189            each_sql = self.sql(expression, "each")
190            each_sql = f" EACH {each_sql}" if each_sql else ""
191
192            return f"RANGE_N({this} BETWEEN {expressions_sql}{each_sql})"
193
194        def createable_sql(
195            self,
196            expression: exp.Create,
197            locations: dict[exp.Properties.Location, list[exp.Property]],
198        ) -> str:
199            kind = self.sql(expression, "kind").upper()
200            if kind == "TABLE" and locations.get(exp.Properties.Location.POST_NAME):
201                this_name = self.sql(expression.this, "this")
202                this_properties = self.properties(
203                    exp.Properties(expressions=locations[exp.Properties.Location.POST_NAME]),
204                    wrapped=False,
205                    prefix=",",
206                )
207                this_schema = self.schema_columns_sql(expression.this)
208                return f"{this_name}{this_properties}{self.sep()}{this_schema}"
209            return super().createable_sql(expression, locations)

Generator converts a given syntax tree to the corresponding SQL string.

Arguments:
  • pretty: Whether or not to format the produced SQL string. Default: False.
  • identify: Determines when an identifier should be quoted. Possible values are: False (default): Never quote, except in cases where it's mandatory by the dialect. True or 'always': Always quote. 'safe': Only quote identifiers that are case insensitive.
  • normalize: Whether or not to normalize identifiers to lowercase. Default: False.
  • pad: Determines the pad size in a formatted string. Default: 2.
  • indent: Determines the indentation size in a formatted string. Default: 2.
  • normalize_functions: Whether or not to normalize all function names. Possible values are: "upper" or True (default): Convert names to uppercase. "lower": Convert names to lowercase. False: Disables function name normalization.
  • unsupported_level: Determines the generator's behavior when it encounters unsupported expressions. Default ErrorLevel.WARN.
  • max_unsupported: Maximum number of unsupported messages to include in a raised UnsupportedError. This is only relevant if unsupported_level is ErrorLevel.RAISE. Default: 3
  • leading_comma: Determines whether or not the comma is leading or trailing in select expressions. This is only relevant when generating in pretty mode. Default: False
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode. The default is on the smaller end because the length only represents a segment and not the true line length. Default: 80
  • comments: Whether or not to preserve comments in the output SQL code. Default: True
def partitionedbyproperty_sql(self, expression: sqlglot.expressions.PartitionedByProperty) -> str:
165        def partitionedbyproperty_sql(self, expression: exp.PartitionedByProperty) -> str:
166            return f"PARTITION BY {self.sql(expression, 'this')}"
def update_sql(self, expression: sqlglot.expressions.Update) -> str:
170        def update_sql(self, expression: exp.Update) -> str:
171            this = self.sql(expression, "this")
172            from_sql = self.sql(expression, "from")
173            set_sql = self.expressions(expression, flat=True)
174            where_sql = self.sql(expression, "where")
175            sql = f"UPDATE {this}{from_sql} SET {set_sql}{where_sql}"
176            return self.prepend_ctes(expression, sql)
def mod_sql(self, expression: sqlglot.expressions.Mod) -> str:
178        def mod_sql(self, expression: exp.Mod) -> str:
179            return self.binary(expression, "MOD")
def datatype_sql(self, expression: sqlglot.expressions.DataType) -> str:
181        def datatype_sql(self, expression: exp.DataType) -> str:
182            type_sql = super().datatype_sql(expression)
183            prefix_sql = expression.args.get("prefix")
184            return f"SYSUDTLIB.{type_sql}" if prefix_sql else type_sql
def rangen_sql(self, expression: sqlglot.expressions.RangeN) -> str:
186        def rangen_sql(self, expression: exp.RangeN) -> str:
187            this = self.sql(expression, "this")
188            expressions_sql = self.expressions(expression)
189            each_sql = self.sql(expression, "each")
190            each_sql = f" EACH {each_sql}" if each_sql else ""
191
192            return f"RANGE_N({this} BETWEEN {expressions_sql}{each_sql})"
def createable_sql( self, expression: sqlglot.expressions.Create, locations: dict[sqlglot.expressions.Properties.Location, list[sqlglot.expressions.Property]]) -> str:
194        def createable_sql(
195            self,
196            expression: exp.Create,
197            locations: dict[exp.Properties.Location, list[exp.Property]],
198        ) -> str:
199            kind = self.sql(expression, "kind").upper()
200            if kind == "TABLE" and locations.get(exp.Properties.Location.POST_NAME):
201                this_name = self.sql(expression.this, "this")
202                this_properties = self.properties(
203                    exp.Properties(expressions=locations[exp.Properties.Location.POST_NAME]),
204                    wrapped=False,
205                    prefix=",",
206                )
207                this_schema = self.schema_columns_sql(expression.this)
208                return f"{this_name}{this_properties}{self.sep()}{this_schema}"
209            return super().createable_sql(expression, locations)
@classmethod
def can_identify(text: str, identify: str | bool = 'safe') -> bool:
247    @classmethod
248    def can_identify(cls, text: str, identify: str | bool = "safe") -> bool:
249        """Checks if text can be identified given an identify option.
250
251        Args:
252            text: The text to check.
253            identify:
254                "always" or `True`: Always returns true.
255                "safe": True if the identifier is case-insensitive.
256
257        Returns:
258            Whether or not the given text can be identified.
259        """
260        if identify is True or identify == "always":
261            return True
262
263        if identify == "safe":
264            return not cls.case_sensitive(text)
265
266        return False

Checks if text can be identified given an identify option.

Arguments:
  • text: The text to check.
  • identify: "always" or True: Always returns true. "safe": True if the identifier is case-insensitive.
Returns:

Whether or not the given text can be identified.

Inherited Members
sqlglot.generator.Generator
Generator
generate
unsupported
sep
seg
pad_comment
maybe_comment
wrap
no_identify
normalize_func
indent
sql
uncache_sql
cache_sql
characterset_sql
column_sql
columnposition_sql
columndef_sql
columnconstraint_sql
autoincrementcolumnconstraint_sql
compresscolumnconstraint_sql
generatedasidentitycolumnconstraint_sql
notnullcolumnconstraint_sql
primarykeycolumnconstraint_sql
uniquecolumnconstraint_sql
create_sql
clone_sql
describe_sql
prepend_ctes
with_sql
cte_sql
tablealias_sql
bitstring_sql
hexstring_sql
bytestring_sql
rawstring_sql
datatypesize_sql
directory_sql
delete_sql
drop_sql
except_sql
except_op
fetch_sql
filter_sql
hint_sql
index_sql
identifier_sql
inputoutputformat_sql
national_sql
partition_sql
properties_sql
root_properties
properties
with_properties
locate_properties
property_sql
likeproperty_sql
fallbackproperty_sql
journalproperty_sql
freespaceproperty_sql
checksumproperty_sql
mergeblockratioproperty_sql
datablocksizeproperty_sql
blockcompressionproperty_sql
isolatedloadingproperty_sql
lockingproperty_sql
withdataproperty_sql
insert_sql
intersect_sql
intersect_op
introducer_sql
pseudotype_sql
onconflict_sql
returning_sql
rowformatdelimitedproperty_sql
table_sql
tablesample_sql
pivot_sql
tuple_sql
values_sql
var_sql
into_sql
from_sql
group_sql
having_sql
join_sql
lambda_sql
lateral_sql
limit_sql
offset_sql
setitem_sql
set_sql
pragma_sql
lock_sql
literal_sql
escape_str
loaddata_sql
null_sql
boolean_sql
order_sql
cluster_sql
distribute_sql
sort_sql
ordered_sql
matchrecognize_sql
query_modifiers
offset_limit_modifiers
after_having_modifiers
after_limit_modifiers
select_sql
schema_sql
schema_columns_sql
star_sql
parameter_sql
sessionparameter_sql
placeholder_sql
subquery_sql
qualify_sql
union_sql
union_op
unnest_sql
where_sql
window_sql
partition_by_sql
windowspec_sql
withingroup_sql
between_sql
bracket_sql
all_sql
any_sql
exists_sql
case_sql
constraint_sql
nextvaluefor_sql
extract_sql
trim_sql
safeconcat_sql
check_sql
foreignkey_sql
primarykey_sql
if_sql
matchagainst_sql
jsonkeyvalue_sql
jsonobject_sql
openjsoncolumndef_sql
openjson_sql
in_sql
in_unnest_op
interval_sql
return_sql
reference_sql
anonymous_sql
paren_sql
neg_sql
not_sql
alias_sql
aliases_sql
attimezone_sql
add_sql
and_sql
connector_sql
bitwiseand_sql
bitwiseleftshift_sql
bitwisenot_sql
bitwiseor_sql
bitwiserightshift_sql
bitwisexor_sql
cast_sql
currentdate_sql
collate_sql
command_sql
comment_sql
mergetreettlaction_sql
mergetreettl_sql
transaction_sql
commit_sql
rollback_sql
altercolumn_sql
renametable_sql
altertable_sql
droppartition_sql
addconstraint_sql
distinct_sql
ignorenulls_sql
respectnulls_sql
intdiv_sql
dpipe_sql
safedpipe_sql
div_sql
overlaps_sql
distance_sql
dot_sql
eq_sql
escape_sql
glob_sql
gt_sql
gte_sql
ilike_sql
ilikeany_sql
is_sql
like_sql
likeany_sql
similarto_sql
lt_sql
lte_sql
mul_sql
neq_sql
nullsafeeq_sql
nullsafeneq_sql
or_sql
slice_sql
sub_sql
trycast_sql
use_sql
binary
function_fallback_sql
func
format_args
text_width
format_time
expressions
op_expressions
naked_property
set_operation
tag_sql
token_sql
userdefinedfunction_sql
joinhint_sql
kwarg_sql
when_sql
merge_sql
tochar_sql
dictproperty_sql
dictrange_sql
dictsubproperty_sql
oncluster_sql