Edit on GitHub

sqlglot.dialects.oracle

  1from __future__ import annotations
  2
  3import typing as t
  4
  5from sqlglot import exp, generator, parser, tokens, transforms
  6from sqlglot.dialects.dialect import Dialect, no_ilike_sql, rename_func, trim_sql
  7from sqlglot.helper import seq_get
  8from sqlglot.tokens import TokenType
  9
 10
 11def _parse_xml_table(self: parser.Parser) -> exp.XMLTable:
 12    this = self._parse_string()
 13
 14    passing = None
 15    columns = None
 16
 17    if self._match_text_seq("PASSING"):
 18        # The BY VALUE keywords are optional and are provided for semantic clarity
 19        self._match_text_seq("BY", "VALUE")
 20        passing = self._parse_csv(self._parse_column)
 21
 22    by_ref = self._match_text_seq("RETURNING", "SEQUENCE", "BY", "REF")
 23
 24    if self._match_text_seq("COLUMNS"):
 25        columns = self._parse_csv(lambda: self._parse_column_def(self._parse_field(any_token=True)))
 26
 27    return self.expression(exp.XMLTable, this=this, passing=passing, columns=columns, by_ref=by_ref)
 28
 29
 30class Oracle(Dialect):
 31    ALIAS_POST_TABLESAMPLE = True
 32
 33    # https://docs.oracle.com/database/121/SQLRF/sql_elements004.htm#SQLRF00212
 34    # https://docs.python.org/3/library/datetime.html#strftime-and-strptime-format-codes
 35    TIME_MAPPING = {
 36        "AM": "%p",  # Meridian indicator with or without periods
 37        "A.M.": "%p",  # Meridian indicator with or without periods
 38        "PM": "%p",  # Meridian indicator with or without periods
 39        "P.M.": "%p",  # Meridian indicator with or without periods
 40        "D": "%u",  # Day of week (1-7)
 41        "DAY": "%A",  # name of day
 42        "DD": "%d",  # day of month (1-31)
 43        "DDD": "%j",  # day of year (1-366)
 44        "DY": "%a",  # abbreviated name of day
 45        "HH": "%I",  # Hour of day (1-12)
 46        "HH12": "%I",  # alias for HH
 47        "HH24": "%H",  # Hour of day (0-23)
 48        "IW": "%V",  # Calendar week of year (1-52 or 1-53), as defined by the ISO 8601 standard
 49        "MI": "%M",  # Minute (0-59)
 50        "MM": "%m",  # Month (01-12; January = 01)
 51        "MON": "%b",  # Abbreviated name of month
 52        "MONTH": "%B",  # Name of month
 53        "SS": "%S",  # Second (0-59)
 54        "WW": "%W",  # Week of year (1-53)
 55        "YY": "%y",  # 15
 56        "YYYY": "%Y",  # 2015
 57    }
 58
 59    class Parser(parser.Parser):
 60        WINDOW_BEFORE_PAREN_TOKENS = {TokenType.OVER, TokenType.KEEP}
 61
 62        FUNCTIONS = {
 63            **parser.Parser.FUNCTIONS,
 64            "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),
 65        }
 66
 67        FUNCTION_PARSERS: t.Dict[str, t.Callable] = {
 68            **parser.Parser.FUNCTION_PARSERS,
 69            "XMLTABLE": _parse_xml_table,
 70        }
 71
 72        TYPE_LITERAL_PARSERS = {
 73            exp.DataType.Type.DATE: lambda self, this, _: self.expression(
 74                exp.DateStrToDate, this=this
 75            )
 76        }
 77
 78        def _parse_column(self) -> t.Optional[exp.Expression]:
 79            column = super()._parse_column()
 80            if column:
 81                column.set("join_mark", self._match(TokenType.JOIN_MARKER))
 82            return column
 83
 84        def _parse_hint(self) -> t.Optional[exp.Hint]:
 85            if self._match(TokenType.HINT):
 86                start = self._curr
 87                while self._curr and not self._match_pair(TokenType.STAR, TokenType.SLASH):
 88                    self._advance()
 89
 90                if not self._curr:
 91                    self.raise_error("Expected */ after HINT")
 92
 93                end = self._tokens[self._index - 3]
 94                return exp.Hint(expressions=[self._find_sql(start, end)])
 95
 96            return None
 97
 98    class Generator(generator.Generator):
 99        LOCKING_READS_SUPPORTED = True
100        JOIN_HINTS = False
101        TABLE_HINTS = False
102
103        TYPE_MAPPING = {
104            **generator.Generator.TYPE_MAPPING,
105            exp.DataType.Type.TINYINT: "NUMBER",
106            exp.DataType.Type.SMALLINT: "NUMBER",
107            exp.DataType.Type.INT: "NUMBER",
108            exp.DataType.Type.BIGINT: "NUMBER",
109            exp.DataType.Type.DECIMAL: "NUMBER",
110            exp.DataType.Type.DOUBLE: "DOUBLE PRECISION",
111            exp.DataType.Type.VARCHAR: "VARCHAR2",
112            exp.DataType.Type.NVARCHAR: "NVARCHAR2",
113            exp.DataType.Type.TEXT: "CLOB",
114            exp.DataType.Type.BINARY: "BLOB",
115            exp.DataType.Type.VARBINARY: "BLOB",
116        }
117
118        TRANSFORMS = {
119            **generator.Generator.TRANSFORMS,
120            exp.DateStrToDate: lambda self, e: self.func(
121                "TO_DATE", e.this, exp.Literal.string("YYYY-MM-DD")
122            ),
123            exp.Group: transforms.preprocess([transforms.unalias_group]),
124            exp.Hint: lambda self, e: f" /*+ {self.expressions(e).strip()} */",
125            exp.ILike: no_ilike_sql,
126            exp.Coalesce: rename_func("NVL"),
127            exp.Select: transforms.preprocess([transforms.eliminate_distinct_on]),
128            exp.StrToTime: lambda self, e: f"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})",
129            exp.Subquery: lambda self, e: self.subquery_sql(e, sep=" "),
130            exp.Substring: rename_func("SUBSTR"),
131            exp.Table: lambda self, e: self.table_sql(e, sep=" "),
132            exp.TableSample: lambda self, e: self.tablesample_sql(e, sep=" "),
133            exp.TimeToStr: lambda self, e: f"TO_CHAR({self.sql(e, 'this')}, {self.format_time(e)})",
134            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
135            exp.Trim: trim_sql,
136            exp.UnixToTime: lambda self, e: f"TO_DATE('1970-01-01','YYYY-MM-DD') + ({self.sql(e, 'this')} / 86400)",
137        }
138
139        PROPERTIES_LOCATION = {
140            **generator.Generator.PROPERTIES_LOCATION,
141            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
142        }
143
144        LIMIT_FETCH = "FETCH"
145
146        def offset_sql(self, expression: exp.Offset) -> str:
147            return f"{super().offset_sql(expression)} ROWS"
148
149        def column_sql(self, expression: exp.Column) -> str:
150            column = super().column_sql(expression)
151            return f"{column} (+)" if expression.args.get("join_mark") else column
152
153        def xmltable_sql(self, expression: exp.XMLTable) -> str:
154            this = self.sql(expression, "this")
155            passing = self.expressions(expression, key="passing")
156            passing = f"{self.sep()}PASSING{self.seg(passing)}" if passing else ""
157            columns = self.expressions(expression, key="columns")
158            columns = f"{self.sep()}COLUMNS{self.seg(columns)}" if columns else ""
159            by_ref = (
160                f"{self.sep()}RETURNING SEQUENCE BY REF" if expression.args.get("by_ref") else ""
161            )
162            return f"XMLTABLE({self.sep('')}{self.indent(this + passing + by_ref + columns)}{self.seg(')', sep='')}"
163
164    class Tokenizer(tokens.Tokenizer):
165        VAR_SINGLE_TOKENS = {"@"}
166
167        KEYWORDS = {
168            **tokens.Tokenizer.KEYWORDS,
169            "(+)": TokenType.JOIN_MARKER,
170            "BINARY_DOUBLE": TokenType.DOUBLE,
171            "BINARY_FLOAT": TokenType.FLOAT,
172            "COLUMNS": TokenType.COLUMN,
173            "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE,
174            "MINUS": TokenType.EXCEPT,
175            "NVARCHAR2": TokenType.NVARCHAR,
176            "SAMPLE": TokenType.TABLE_SAMPLE,
177            "START": TokenType.BEGIN,
178            "TOP": TokenType.TOP,
179            "VARCHAR2": TokenType.VARCHAR,
180        }
class Oracle(sqlglot.dialects.dialect.Dialect):
 31class Oracle(Dialect):
 32    ALIAS_POST_TABLESAMPLE = True
 33
 34    # https://docs.oracle.com/database/121/SQLRF/sql_elements004.htm#SQLRF00212
 35    # https://docs.python.org/3/library/datetime.html#strftime-and-strptime-format-codes
 36    TIME_MAPPING = {
 37        "AM": "%p",  # Meridian indicator with or without periods
 38        "A.M.": "%p",  # Meridian indicator with or without periods
 39        "PM": "%p",  # Meridian indicator with or without periods
 40        "P.M.": "%p",  # Meridian indicator with or without periods
 41        "D": "%u",  # Day of week (1-7)
 42        "DAY": "%A",  # name of day
 43        "DD": "%d",  # day of month (1-31)
 44        "DDD": "%j",  # day of year (1-366)
 45        "DY": "%a",  # abbreviated name of day
 46        "HH": "%I",  # Hour of day (1-12)
 47        "HH12": "%I",  # alias for HH
 48        "HH24": "%H",  # Hour of day (0-23)
 49        "IW": "%V",  # Calendar week of year (1-52 or 1-53), as defined by the ISO 8601 standard
 50        "MI": "%M",  # Minute (0-59)
 51        "MM": "%m",  # Month (01-12; January = 01)
 52        "MON": "%b",  # Abbreviated name of month
 53        "MONTH": "%B",  # Name of month
 54        "SS": "%S",  # Second (0-59)
 55        "WW": "%W",  # Week of year (1-53)
 56        "YY": "%y",  # 15
 57        "YYYY": "%Y",  # 2015
 58    }
 59
 60    class Parser(parser.Parser):
 61        WINDOW_BEFORE_PAREN_TOKENS = {TokenType.OVER, TokenType.KEEP}
 62
 63        FUNCTIONS = {
 64            **parser.Parser.FUNCTIONS,
 65            "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),
 66        }
 67
 68        FUNCTION_PARSERS: t.Dict[str, t.Callable] = {
 69            **parser.Parser.FUNCTION_PARSERS,
 70            "XMLTABLE": _parse_xml_table,
 71        }
 72
 73        TYPE_LITERAL_PARSERS = {
 74            exp.DataType.Type.DATE: lambda self, this, _: self.expression(
 75                exp.DateStrToDate, this=this
 76            )
 77        }
 78
 79        def _parse_column(self) -> t.Optional[exp.Expression]:
 80            column = super()._parse_column()
 81            if column:
 82                column.set("join_mark", self._match(TokenType.JOIN_MARKER))
 83            return column
 84
 85        def _parse_hint(self) -> t.Optional[exp.Hint]:
 86            if self._match(TokenType.HINT):
 87                start = self._curr
 88                while self._curr and not self._match_pair(TokenType.STAR, TokenType.SLASH):
 89                    self._advance()
 90
 91                if not self._curr:
 92                    self.raise_error("Expected */ after HINT")
 93
 94                end = self._tokens[self._index - 3]
 95                return exp.Hint(expressions=[self._find_sql(start, end)])
 96
 97            return None
 98
 99    class Generator(generator.Generator):
100        LOCKING_READS_SUPPORTED = True
101        JOIN_HINTS = False
102        TABLE_HINTS = False
103
104        TYPE_MAPPING = {
105            **generator.Generator.TYPE_MAPPING,
106            exp.DataType.Type.TINYINT: "NUMBER",
107            exp.DataType.Type.SMALLINT: "NUMBER",
108            exp.DataType.Type.INT: "NUMBER",
109            exp.DataType.Type.BIGINT: "NUMBER",
110            exp.DataType.Type.DECIMAL: "NUMBER",
111            exp.DataType.Type.DOUBLE: "DOUBLE PRECISION",
112            exp.DataType.Type.VARCHAR: "VARCHAR2",
113            exp.DataType.Type.NVARCHAR: "NVARCHAR2",
114            exp.DataType.Type.TEXT: "CLOB",
115            exp.DataType.Type.BINARY: "BLOB",
116            exp.DataType.Type.VARBINARY: "BLOB",
117        }
118
119        TRANSFORMS = {
120            **generator.Generator.TRANSFORMS,
121            exp.DateStrToDate: lambda self, e: self.func(
122                "TO_DATE", e.this, exp.Literal.string("YYYY-MM-DD")
123            ),
124            exp.Group: transforms.preprocess([transforms.unalias_group]),
125            exp.Hint: lambda self, e: f" /*+ {self.expressions(e).strip()} */",
126            exp.ILike: no_ilike_sql,
127            exp.Coalesce: rename_func("NVL"),
128            exp.Select: transforms.preprocess([transforms.eliminate_distinct_on]),
129            exp.StrToTime: lambda self, e: f"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})",
130            exp.Subquery: lambda self, e: self.subquery_sql(e, sep=" "),
131            exp.Substring: rename_func("SUBSTR"),
132            exp.Table: lambda self, e: self.table_sql(e, sep=" "),
133            exp.TableSample: lambda self, e: self.tablesample_sql(e, sep=" "),
134            exp.TimeToStr: lambda self, e: f"TO_CHAR({self.sql(e, 'this')}, {self.format_time(e)})",
135            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
136            exp.Trim: trim_sql,
137            exp.UnixToTime: lambda self, e: f"TO_DATE('1970-01-01','YYYY-MM-DD') + ({self.sql(e, 'this')} / 86400)",
138        }
139
140        PROPERTIES_LOCATION = {
141            **generator.Generator.PROPERTIES_LOCATION,
142            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
143        }
144
145        LIMIT_FETCH = "FETCH"
146
147        def offset_sql(self, expression: exp.Offset) -> str:
148            return f"{super().offset_sql(expression)} ROWS"
149
150        def column_sql(self, expression: exp.Column) -> str:
151            column = super().column_sql(expression)
152            return f"{column} (+)" if expression.args.get("join_mark") else column
153
154        def xmltable_sql(self, expression: exp.XMLTable) -> str:
155            this = self.sql(expression, "this")
156            passing = self.expressions(expression, key="passing")
157            passing = f"{self.sep()}PASSING{self.seg(passing)}" if passing else ""
158            columns = self.expressions(expression, key="columns")
159            columns = f"{self.sep()}COLUMNS{self.seg(columns)}" if columns else ""
160            by_ref = (
161                f"{self.sep()}RETURNING SEQUENCE BY REF" if expression.args.get("by_ref") else ""
162            )
163            return f"XMLTABLE({self.sep('')}{self.indent(this + passing + by_ref + columns)}{self.seg(')', sep='')}"
164
165    class Tokenizer(tokens.Tokenizer):
166        VAR_SINGLE_TOKENS = {"@"}
167
168        KEYWORDS = {
169            **tokens.Tokenizer.KEYWORDS,
170            "(+)": TokenType.JOIN_MARKER,
171            "BINARY_DOUBLE": TokenType.DOUBLE,
172            "BINARY_FLOAT": TokenType.FLOAT,
173            "COLUMNS": TokenType.COLUMN,
174            "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE,
175            "MINUS": TokenType.EXCEPT,
176            "NVARCHAR2": TokenType.NVARCHAR,
177            "SAMPLE": TokenType.TABLE_SAMPLE,
178            "START": TokenType.BEGIN,
179            "TOP": TokenType.TOP,
180            "VARCHAR2": TokenType.VARCHAR,
181        }
class Oracle.Parser(sqlglot.parser.Parser):
60    class Parser(parser.Parser):
61        WINDOW_BEFORE_PAREN_TOKENS = {TokenType.OVER, TokenType.KEEP}
62
63        FUNCTIONS = {
64            **parser.Parser.FUNCTIONS,
65            "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),
66        }
67
68        FUNCTION_PARSERS: t.Dict[str, t.Callable] = {
69            **parser.Parser.FUNCTION_PARSERS,
70            "XMLTABLE": _parse_xml_table,
71        }
72
73        TYPE_LITERAL_PARSERS = {
74            exp.DataType.Type.DATE: lambda self, this, _: self.expression(
75                exp.DateStrToDate, this=this
76            )
77        }
78
79        def _parse_column(self) -> t.Optional[exp.Expression]:
80            column = super()._parse_column()
81            if column:
82                column.set("join_mark", self._match(TokenType.JOIN_MARKER))
83            return column
84
85        def _parse_hint(self) -> t.Optional[exp.Hint]:
86            if self._match(TokenType.HINT):
87                start = self._curr
88                while self._curr and not self._match_pair(TokenType.STAR, TokenType.SLASH):
89                    self._advance()
90
91                if not self._curr:
92                    self.raise_error("Expected */ after HINT")
93
94                end = self._tokens[self._index - 3]
95                return exp.Hint(expressions=[self._find_sql(start, end)])
96
97            return None

Parser consumes a list of tokens produced by the Tokenizer and produces a parsed syntax tree.

Arguments:
  • error_level: The desired error level. Default: ErrorLevel.IMMEDIATE
  • error_message_context: Determines the amount of context to capture from a query string when displaying the error message (in number of characters). Default: 100
  • max_errors: Maximum number of error messages to include in a raised ParseError. This is only relevant if error_level is ErrorLevel.RAISE. Default: 3
class Oracle.Generator(sqlglot.generator.Generator):
 99    class Generator(generator.Generator):
100        LOCKING_READS_SUPPORTED = True
101        JOIN_HINTS = False
102        TABLE_HINTS = False
103
104        TYPE_MAPPING = {
105            **generator.Generator.TYPE_MAPPING,
106            exp.DataType.Type.TINYINT: "NUMBER",
107            exp.DataType.Type.SMALLINT: "NUMBER",
108            exp.DataType.Type.INT: "NUMBER",
109            exp.DataType.Type.BIGINT: "NUMBER",
110            exp.DataType.Type.DECIMAL: "NUMBER",
111            exp.DataType.Type.DOUBLE: "DOUBLE PRECISION",
112            exp.DataType.Type.VARCHAR: "VARCHAR2",
113            exp.DataType.Type.NVARCHAR: "NVARCHAR2",
114            exp.DataType.Type.TEXT: "CLOB",
115            exp.DataType.Type.BINARY: "BLOB",
116            exp.DataType.Type.VARBINARY: "BLOB",
117        }
118
119        TRANSFORMS = {
120            **generator.Generator.TRANSFORMS,
121            exp.DateStrToDate: lambda self, e: self.func(
122                "TO_DATE", e.this, exp.Literal.string("YYYY-MM-DD")
123            ),
124            exp.Group: transforms.preprocess([transforms.unalias_group]),
125            exp.Hint: lambda self, e: f" /*+ {self.expressions(e).strip()} */",
126            exp.ILike: no_ilike_sql,
127            exp.Coalesce: rename_func("NVL"),
128            exp.Select: transforms.preprocess([transforms.eliminate_distinct_on]),
129            exp.StrToTime: lambda self, e: f"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})",
130            exp.Subquery: lambda self, e: self.subquery_sql(e, sep=" "),
131            exp.Substring: rename_func("SUBSTR"),
132            exp.Table: lambda self, e: self.table_sql(e, sep=" "),
133            exp.TableSample: lambda self, e: self.tablesample_sql(e, sep=" "),
134            exp.TimeToStr: lambda self, e: f"TO_CHAR({self.sql(e, 'this')}, {self.format_time(e)})",
135            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
136            exp.Trim: trim_sql,
137            exp.UnixToTime: lambda self, e: f"TO_DATE('1970-01-01','YYYY-MM-DD') + ({self.sql(e, 'this')} / 86400)",
138        }
139
140        PROPERTIES_LOCATION = {
141            **generator.Generator.PROPERTIES_LOCATION,
142            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
143        }
144
145        LIMIT_FETCH = "FETCH"
146
147        def offset_sql(self, expression: exp.Offset) -> str:
148            return f"{super().offset_sql(expression)} ROWS"
149
150        def column_sql(self, expression: exp.Column) -> str:
151            column = super().column_sql(expression)
152            return f"{column} (+)" if expression.args.get("join_mark") else column
153
154        def xmltable_sql(self, expression: exp.XMLTable) -> str:
155            this = self.sql(expression, "this")
156            passing = self.expressions(expression, key="passing")
157            passing = f"{self.sep()}PASSING{self.seg(passing)}" if passing else ""
158            columns = self.expressions(expression, key="columns")
159            columns = f"{self.sep()}COLUMNS{self.seg(columns)}" if columns else ""
160            by_ref = (
161                f"{self.sep()}RETURNING SEQUENCE BY REF" if expression.args.get("by_ref") else ""
162            )
163            return f"XMLTABLE({self.sep('')}{self.indent(this + passing + by_ref + columns)}{self.seg(')', sep='')}"

Generator converts a given syntax tree to the corresponding SQL string.

Arguments:
  • pretty: Whether or not to format the produced SQL string. Default: False.
  • identify: Determines when an identifier should be quoted. Possible values are: False (default): Never quote, except in cases where it's mandatory by the dialect. True or 'always': Always quote. 'safe': Only quote identifiers that are case insensitive.
  • normalize: Whether or not to normalize identifiers to lowercase. Default: False.
  • pad: Determines the pad size in a formatted string. Default: 2.
  • indent: Determines the indentation size in a formatted string. Default: 2.
  • normalize_functions: Whether or not to normalize all function names. Possible values are: "upper" or True (default): Convert names to uppercase. "lower": Convert names to lowercase. False: Disables function name normalization.
  • unsupported_level: Determines the generator's behavior when it encounters unsupported expressions. Default ErrorLevel.WARN.
  • max_unsupported: Maximum number of unsupported messages to include in a raised UnsupportedError. This is only relevant if unsupported_level is ErrorLevel.RAISE. Default: 3
  • leading_comma: Determines whether or not the comma is leading or trailing in select expressions. This is only relevant when generating in pretty mode. Default: False
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode. The default is on the smaller end because the length only represents a segment and not the true line length. Default: 80
  • comments: Whether or not to preserve comments in the output SQL code. Default: True
def offset_sql(self, expression: sqlglot.expressions.Offset) -> str:
147        def offset_sql(self, expression: exp.Offset) -> str:
148            return f"{super().offset_sql(expression)} ROWS"
def column_sql(self, expression: sqlglot.expressions.Column) -> str:
150        def column_sql(self, expression: exp.Column) -> str:
151            column = super().column_sql(expression)
152            return f"{column} (+)" if expression.args.get("join_mark") else column
def xmltable_sql(self, expression: sqlglot.expressions.XMLTable) -> str:
154        def xmltable_sql(self, expression: exp.XMLTable) -> str:
155            this = self.sql(expression, "this")
156            passing = self.expressions(expression, key="passing")
157            passing = f"{self.sep()}PASSING{self.seg(passing)}" if passing else ""
158            columns = self.expressions(expression, key="columns")
159            columns = f"{self.sep()}COLUMNS{self.seg(columns)}" if columns else ""
160            by_ref = (
161                f"{self.sep()}RETURNING SEQUENCE BY REF" if expression.args.get("by_ref") else ""
162            )
163            return f"XMLTABLE({self.sep('')}{self.indent(this + passing + by_ref + columns)}{self.seg(')', sep='')}"
@classmethod
def can_identify(text: str, identify: str | bool = 'safe') -> bool:
247    @classmethod
248    def can_identify(cls, text: str, identify: str | bool = "safe") -> bool:
249        """Checks if text can be identified given an identify option.
250
251        Args:
252            text: The text to check.
253            identify:
254                "always" or `True`: Always returns true.
255                "safe": True if the identifier is case-insensitive.
256
257        Returns:
258            Whether or not the given text can be identified.
259        """
260        if identify is True or identify == "always":
261            return True
262
263        if identify == "safe":
264            return not cls.case_sensitive(text)
265
266        return False

Checks if text can be identified given an identify option.

Arguments:
  • text: The text to check.
  • identify: "always" or True: Always returns true. "safe": True if the identifier is case-insensitive.
Returns:

Whether or not the given text can be identified.

Inherited Members
sqlglot.generator.Generator
Generator
generate
unsupported
sep
seg
pad_comment
maybe_comment
wrap
no_identify
normalize_func
indent
sql
uncache_sql
cache_sql
characterset_sql
columnposition_sql
columndef_sql
columnconstraint_sql
autoincrementcolumnconstraint_sql
compresscolumnconstraint_sql
generatedasidentitycolumnconstraint_sql
notnullcolumnconstraint_sql
primarykeycolumnconstraint_sql
uniquecolumnconstraint_sql
createable_sql
create_sql
clone_sql
describe_sql
prepend_ctes
with_sql
cte_sql
tablealias_sql
bitstring_sql
hexstring_sql
bytestring_sql
rawstring_sql
datatypesize_sql
datatype_sql
directory_sql
delete_sql
drop_sql
except_sql
except_op
fetch_sql
filter_sql
hint_sql
index_sql
identifier_sql
inputoutputformat_sql
national_sql
partition_sql
properties_sql
root_properties
properties
with_properties
locate_properties
property_sql
likeproperty_sql
fallbackproperty_sql
journalproperty_sql
freespaceproperty_sql
checksumproperty_sql
mergeblockratioproperty_sql
datablocksizeproperty_sql
blockcompressionproperty_sql
isolatedloadingproperty_sql
lockingproperty_sql
withdataproperty_sql
insert_sql
intersect_sql
intersect_op
introducer_sql
pseudotype_sql
onconflict_sql
returning_sql
rowformatdelimitedproperty_sql
table_sql
tablesample_sql
pivot_sql
tuple_sql
update_sql
values_sql
var_sql
into_sql
from_sql
group_sql
having_sql
join_sql
lambda_sql
lateral_sql
limit_sql
setitem_sql
set_sql
pragma_sql
lock_sql
literal_sql
escape_str
loaddata_sql
null_sql
boolean_sql
order_sql
cluster_sql
distribute_sql
sort_sql
ordered_sql
matchrecognize_sql
query_modifiers
offset_limit_modifiers
after_having_modifiers
after_limit_modifiers
select_sql
schema_sql
schema_columns_sql
star_sql
parameter_sql
sessionparameter_sql
placeholder_sql
subquery_sql
qualify_sql
union_sql
union_op
unnest_sql
where_sql
window_sql
partition_by_sql
windowspec_sql
withingroup_sql
between_sql
bracket_sql
all_sql
any_sql
exists_sql
case_sql
constraint_sql
nextvaluefor_sql
extract_sql
trim_sql
safeconcat_sql
check_sql
foreignkey_sql
primarykey_sql
if_sql
matchagainst_sql
jsonkeyvalue_sql
jsonobject_sql
openjsoncolumndef_sql
openjson_sql
in_sql
in_unnest_op
interval_sql
return_sql
reference_sql
anonymous_sql
paren_sql
neg_sql
not_sql
alias_sql
aliases_sql
attimezone_sql
add_sql
and_sql
connector_sql
bitwiseand_sql
bitwiseleftshift_sql
bitwisenot_sql
bitwiseor_sql
bitwiserightshift_sql
bitwisexor_sql
cast_sql
currentdate_sql
collate_sql
command_sql
comment_sql
mergetreettlaction_sql
mergetreettl_sql
transaction_sql
commit_sql
rollback_sql
altercolumn_sql
renametable_sql
altertable_sql
droppartition_sql
addconstraint_sql
distinct_sql
ignorenulls_sql
respectnulls_sql
intdiv_sql
dpipe_sql
safedpipe_sql
div_sql
overlaps_sql
distance_sql
dot_sql
eq_sql
escape_sql
glob_sql
gt_sql
gte_sql
ilike_sql
ilikeany_sql
is_sql
like_sql
likeany_sql
similarto_sql
lt_sql
lte_sql
mod_sql
mul_sql
neq_sql
nullsafeeq_sql
nullsafeneq_sql
or_sql
slice_sql
sub_sql
trycast_sql
use_sql
binary
function_fallback_sql
func
format_args
text_width
format_time
expressions
op_expressions
naked_property
set_operation
tag_sql
token_sql
userdefinedfunction_sql
joinhint_sql
kwarg_sql
when_sql
merge_sql
tochar_sql
dictproperty_sql
dictrange_sql
dictsubproperty_sql
oncluster_sql
class Oracle.Tokenizer(sqlglot.tokens.Tokenizer):
165    class Tokenizer(tokens.Tokenizer):
166        VAR_SINGLE_TOKENS = {"@"}
167
168        KEYWORDS = {
169            **tokens.Tokenizer.KEYWORDS,
170            "(+)": TokenType.JOIN_MARKER,
171            "BINARY_DOUBLE": TokenType.DOUBLE,
172            "BINARY_FLOAT": TokenType.FLOAT,
173            "COLUMNS": TokenType.COLUMN,
174            "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE,
175            "MINUS": TokenType.EXCEPT,
176            "NVARCHAR2": TokenType.NVARCHAR,
177            "SAMPLE": TokenType.TABLE_SAMPLE,
178            "START": TokenType.BEGIN,
179            "TOP": TokenType.TOP,
180            "VARCHAR2": TokenType.VARCHAR,
181        }