sqlglot.executor.python
1import ast 2import collections 3import itertools 4import math 5 6from sqlglot import exp, generator, planner, tokens 7from sqlglot.dialects.dialect import Dialect, inline_array_sql 8from sqlglot.errors import ExecuteError 9from sqlglot.executor.context import Context 10from sqlglot.executor.env import ENV 11from sqlglot.executor.table import RowReader, Table 12from sqlglot.helper import csv_reader, subclasses 13 14 15class PythonExecutor: 16 def __init__(self, env=None, tables=None): 17 self.generator = Python().generator(identify=True, comments=False) 18 self.env = {**ENV, **(env or {})} 19 self.tables = tables or {} 20 21 def execute(self, plan): 22 running = set() 23 finished = set() 24 queue = set(plan.leaves) 25 contexts = {} 26 27 while queue: 28 node = queue.pop() 29 try: 30 context = self.context( 31 { 32 name: table 33 for dep in node.dependencies 34 for name, table in contexts[dep].tables.items() 35 } 36 ) 37 running.add(node) 38 39 if isinstance(node, planner.Scan): 40 contexts[node] = self.scan(node, context) 41 elif isinstance(node, planner.Aggregate): 42 contexts[node] = self.aggregate(node, context) 43 elif isinstance(node, planner.Join): 44 contexts[node] = self.join(node, context) 45 elif isinstance(node, planner.Sort): 46 contexts[node] = self.sort(node, context) 47 elif isinstance(node, planner.SetOperation): 48 contexts[node] = self.set_operation(node, context) 49 else: 50 raise NotImplementedError 51 52 running.remove(node) 53 finished.add(node) 54 55 for dep in node.dependents: 56 if dep not in running and all(d in contexts for d in dep.dependencies): 57 queue.add(dep) 58 59 for dep in node.dependencies: 60 if all(d in finished for d in dep.dependents): 61 contexts.pop(dep) 62 except Exception as e: 63 raise ExecuteError(f"Step '{node.id}' failed: {e}") from e 64 65 root = plan.root 66 return contexts[root].tables[root.name] 67 68 def generate(self, expression): 69 """Convert a SQL expression into literal Python code and compile it into bytecode.""" 70 if not expression: 71 return None 72 73 sql = self.generator.generate(expression) 74 return compile(sql, sql, "eval", optimize=2) 75 76 def generate_tuple(self, expressions): 77 """Convert an array of SQL expressions into tuple of Python byte code.""" 78 if not expressions: 79 return tuple() 80 return tuple(self.generate(expression) for expression in expressions) 81 82 def context(self, tables): 83 return Context(tables, env=self.env) 84 85 def table(self, expressions): 86 return Table( 87 expression.alias_or_name if isinstance(expression, exp.Expression) else expression 88 for expression in expressions 89 ) 90 91 def scan(self, step, context): 92 source = step.source 93 94 if source and isinstance(source, exp.Expression): 95 source = source.name or source.alias 96 97 if source is None: 98 context, table_iter = self.static() 99 elif source in context: 100 if not step.projections and not step.condition: 101 return self.context({step.name: context.tables[source]}) 102 table_iter = context.table_iter(source) 103 elif isinstance(step.source, exp.Table) and isinstance(step.source.this, exp.ReadCSV): 104 table_iter = self.scan_csv(step) 105 context = next(table_iter) 106 else: 107 context, table_iter = self.scan_table(step) 108 109 return self.context({step.name: self._project_and_filter(context, step, table_iter)}) 110 111 def _project_and_filter(self, context, step, table_iter): 112 sink = self.table(step.projections if step.projections else context.columns) 113 condition = self.generate(step.condition) 114 projections = self.generate_tuple(step.projections) 115 116 for reader in table_iter: 117 if len(sink) >= step.limit: 118 break 119 120 if condition and not context.eval(condition): 121 continue 122 123 if projections: 124 sink.append(context.eval_tuple(projections)) 125 else: 126 sink.append(reader.row) 127 128 return sink 129 130 def static(self): 131 return self.context({}), [RowReader(())] 132 133 def scan_table(self, step): 134 table = self.tables.find(step.source) 135 context = self.context({step.source.alias_or_name: table}) 136 return context, iter(table) 137 138 def scan_csv(self, step): 139 alias = step.source.alias 140 source = step.source.this 141 142 with csv_reader(source) as reader: 143 columns = next(reader) 144 table = Table(columns) 145 context = self.context({alias: table}) 146 yield context 147 types = [] 148 149 for row in reader: 150 if not types: 151 for v in row: 152 try: 153 types.append(type(ast.literal_eval(v))) 154 except (ValueError, SyntaxError): 155 types.append(str) 156 context.set_row(tuple(t(v) for t, v in zip(types, row))) 157 yield context.table.reader 158 159 def join(self, step, context): 160 source = step.name 161 162 source_table = context.tables[source] 163 source_context = self.context({source: source_table}) 164 column_ranges = {source: range(0, len(source_table.columns))} 165 166 for name, join in step.joins.items(): 167 table = context.tables[name] 168 start = max(r.stop for r in column_ranges.values()) 169 column_ranges[name] = range(start, len(table.columns) + start) 170 join_context = self.context({name: table}) 171 172 if join.get("source_key"): 173 table = self.hash_join(join, source_context, join_context) 174 else: 175 table = self.nested_loop_join(join, source_context, join_context) 176 177 source_context = self.context( 178 { 179 name: Table(table.columns, table.rows, column_range) 180 for name, column_range in column_ranges.items() 181 } 182 ) 183 condition = self.generate(join["condition"]) 184 if condition: 185 source_context.filter(condition) 186 187 if not step.condition and not step.projections: 188 return source_context 189 190 sink = self._project_and_filter( 191 source_context, 192 step, 193 (reader for reader, _ in iter(source_context)), 194 ) 195 196 if step.projections: 197 return self.context({step.name: sink}) 198 else: 199 return self.context( 200 { 201 name: Table(table.columns, sink.rows, table.column_range) 202 for name, table in source_context.tables.items() 203 } 204 ) 205 206 def nested_loop_join(self, _join, source_context, join_context): 207 table = Table(source_context.columns + join_context.columns) 208 209 for reader_a, _ in source_context: 210 for reader_b, _ in join_context: 211 table.append(reader_a.row + reader_b.row) 212 213 return table 214 215 def hash_join(self, join, source_context, join_context): 216 source_key = self.generate_tuple(join["source_key"]) 217 join_key = self.generate_tuple(join["join_key"]) 218 left = join.get("side") == "LEFT" 219 right = join.get("side") == "RIGHT" 220 221 results = collections.defaultdict(lambda: ([], [])) 222 223 for reader, ctx in source_context: 224 results[ctx.eval_tuple(source_key)][0].append(reader.row) 225 for reader, ctx in join_context: 226 results[ctx.eval_tuple(join_key)][1].append(reader.row) 227 228 table = Table(source_context.columns + join_context.columns) 229 nulls = [(None,) * len(join_context.columns if left else source_context.columns)] 230 231 for a_group, b_group in results.values(): 232 if left: 233 b_group = b_group or nulls 234 elif right: 235 a_group = a_group or nulls 236 237 for a_row, b_row in itertools.product(a_group, b_group): 238 table.append(a_row + b_row) 239 240 return table 241 242 def aggregate(self, step, context): 243 group_by = self.generate_tuple(step.group.values()) 244 aggregations = self.generate_tuple(step.aggregations) 245 operands = self.generate_tuple(step.operands) 246 247 if operands: 248 operand_table = Table(self.table(step.operands).columns) 249 250 for reader, ctx in context: 251 operand_table.append(ctx.eval_tuple(operands)) 252 253 for i, (a, b) in enumerate(zip(context.table.rows, operand_table.rows)): 254 context.table.rows[i] = a + b 255 256 width = len(context.columns) 257 context.add_columns(*operand_table.columns) 258 259 operand_table = Table( 260 context.columns, 261 context.table.rows, 262 range(width, width + len(operand_table.columns)), 263 ) 264 265 context = self.context( 266 { 267 None: operand_table, 268 **context.tables, 269 } 270 ) 271 272 context.sort(group_by) 273 274 group = None 275 start = 0 276 end = 1 277 length = len(context.table) 278 table = self.table(list(step.group) + step.aggregations) 279 condition = self.generate(step.condition) 280 281 def add_row(): 282 if not condition or context.eval(condition): 283 table.append(group + context.eval_tuple(aggregations)) 284 285 if length: 286 for i in range(length): 287 context.set_index(i) 288 key = context.eval_tuple(group_by) 289 group = key if group is None else group 290 end += 1 291 if key != group: 292 context.set_range(start, end - 2) 293 add_row() 294 group = key 295 start = end - 2 296 if len(table.rows) >= step.limit: 297 break 298 if i == length - 1: 299 context.set_range(start, end - 1) 300 add_row() 301 elif step.limit > 0 and not group_by: 302 context.set_range(0, 0) 303 table.append(context.eval_tuple(aggregations)) 304 305 context = self.context({step.name: table, **{name: table for name in context.tables}}) 306 307 if step.projections: 308 return self.scan(step, context) 309 return context 310 311 def sort(self, step, context): 312 projections = self.generate_tuple(step.projections) 313 projection_columns = [p.alias_or_name for p in step.projections] 314 all_columns = list(context.columns) + projection_columns 315 sink = self.table(all_columns) 316 for reader, ctx in context: 317 sink.append(reader.row + ctx.eval_tuple(projections)) 318 319 sort_ctx = self.context( 320 { 321 None: sink, 322 **{table: sink for table in context.tables}, 323 } 324 ) 325 sort_ctx.sort(self.generate_tuple(step.key)) 326 327 if not math.isinf(step.limit): 328 sort_ctx.table.rows = sort_ctx.table.rows[0 : step.limit] 329 330 output = Table( 331 projection_columns, 332 rows=[r[len(context.columns) : len(all_columns)] for r in sort_ctx.table.rows], 333 ) 334 return self.context({step.name: output}) 335 336 def set_operation(self, step, context): 337 left = context.tables[step.left] 338 right = context.tables[step.right] 339 340 sink = self.table(left.columns) 341 342 if issubclass(step.op, exp.Intersect): 343 sink.rows = list(set(left.rows).intersection(set(right.rows))) 344 elif issubclass(step.op, exp.Except): 345 sink.rows = list(set(left.rows).difference(set(right.rows))) 346 elif issubclass(step.op, exp.Union) and step.distinct: 347 sink.rows = list(set(left.rows).union(set(right.rows))) 348 else: 349 sink.rows = left.rows + right.rows 350 351 return self.context({step.name: sink}) 352 353 354def _ordered_py(self, expression): 355 this = self.sql(expression, "this") 356 desc = "True" if expression.args.get("desc") else "False" 357 nulls_first = "True" if expression.args.get("nulls_first") else "False" 358 return f"ORDERED({this}, {desc}, {nulls_first})" 359 360 361def _rename(self, e): 362 try: 363 values = list(e.args.values()) 364 365 if len(values) == 1: 366 values = values[0] 367 if not isinstance(values, list): 368 return self.func(e.key, values) 369 return self.func(e.key, *values) 370 371 if isinstance(e, exp.Func) and e.is_var_len_args: 372 *head, tail = values 373 return self.func(e.key, *head, *tail) 374 375 return self.func(e.key, *values) 376 except Exception as ex: 377 raise Exception(f"Could not rename {repr(e)}") from ex 378 379 380def _case_sql(self, expression): 381 this = self.sql(expression, "this") 382 chain = self.sql(expression, "default") or "None" 383 384 for e in reversed(expression.args["ifs"]): 385 true = self.sql(e, "true") 386 condition = self.sql(e, "this") 387 condition = f"{this} = ({condition})" if this else condition 388 chain = f"{true} if {condition} else ({chain})" 389 390 return chain 391 392 393def _lambda_sql(self, e: exp.Lambda) -> str: 394 names = {e.name.lower() for e in e.expressions} 395 396 e = e.transform( 397 lambda n: exp.var(n.name) 398 if isinstance(n, exp.Identifier) and n.name.lower() in names 399 else n 400 ) 401 402 return f"lambda {self.expressions(e, flat=True)}: {self.sql(e, 'this')}" 403 404 405class Python(Dialect): 406 class Tokenizer(tokens.Tokenizer): 407 STRING_ESCAPES = ["\\"] 408 409 class Generator(generator.Generator): 410 TRANSFORMS = { 411 **{klass: _rename for klass in subclasses(exp.__name__, exp.Binary)}, 412 **{klass: _rename for klass in exp.ALL_FUNCTIONS}, 413 exp.Case: _case_sql, 414 exp.Alias: lambda self, e: self.sql(e.this), 415 exp.Array: inline_array_sql, 416 exp.And: lambda self, e: self.binary(e, "and"), 417 exp.Between: _rename, 418 exp.Boolean: lambda self, e: "True" if e.this else "False", 419 exp.Cast: lambda self, e: f"CAST({self.sql(e.this)}, exp.DataType.Type.{e.args['to']})", 420 exp.Column: lambda self, e: f"scope[{self.sql(e, 'table') or None}][{self.sql(e.this)}]", 421 exp.Distinct: lambda self, e: f"set({self.sql(e, 'this')})", 422 exp.Extract: lambda self, e: f"EXTRACT('{e.name.lower()}', {self.sql(e, 'expression')})", 423 exp.In: lambda self, e: f"{self.sql(e, 'this')} in ({self.expressions(e, flat=True)})", 424 exp.Interval: lambda self, e: f"INTERVAL({self.sql(e.this)}, '{self.sql(e.unit)}')", 425 exp.Is: lambda self, e: self.binary(e, "is"), 426 exp.Lambda: _lambda_sql, 427 exp.Not: lambda self, e: f"not {self.sql(e.this)}", 428 exp.Null: lambda *_: "None", 429 exp.Or: lambda self, e: self.binary(e, "or"), 430 exp.Ordered: _ordered_py, 431 exp.Star: lambda *_: "1", 432 }
class
PythonExecutor:
16class PythonExecutor: 17 def __init__(self, env=None, tables=None): 18 self.generator = Python().generator(identify=True, comments=False) 19 self.env = {**ENV, **(env or {})} 20 self.tables = tables or {} 21 22 def execute(self, plan): 23 running = set() 24 finished = set() 25 queue = set(plan.leaves) 26 contexts = {} 27 28 while queue: 29 node = queue.pop() 30 try: 31 context = self.context( 32 { 33 name: table 34 for dep in node.dependencies 35 for name, table in contexts[dep].tables.items() 36 } 37 ) 38 running.add(node) 39 40 if isinstance(node, planner.Scan): 41 contexts[node] = self.scan(node, context) 42 elif isinstance(node, planner.Aggregate): 43 contexts[node] = self.aggregate(node, context) 44 elif isinstance(node, planner.Join): 45 contexts[node] = self.join(node, context) 46 elif isinstance(node, planner.Sort): 47 contexts[node] = self.sort(node, context) 48 elif isinstance(node, planner.SetOperation): 49 contexts[node] = self.set_operation(node, context) 50 else: 51 raise NotImplementedError 52 53 running.remove(node) 54 finished.add(node) 55 56 for dep in node.dependents: 57 if dep not in running and all(d in contexts for d in dep.dependencies): 58 queue.add(dep) 59 60 for dep in node.dependencies: 61 if all(d in finished for d in dep.dependents): 62 contexts.pop(dep) 63 except Exception as e: 64 raise ExecuteError(f"Step '{node.id}' failed: {e}") from e 65 66 root = plan.root 67 return contexts[root].tables[root.name] 68 69 def generate(self, expression): 70 """Convert a SQL expression into literal Python code and compile it into bytecode.""" 71 if not expression: 72 return None 73 74 sql = self.generator.generate(expression) 75 return compile(sql, sql, "eval", optimize=2) 76 77 def generate_tuple(self, expressions): 78 """Convert an array of SQL expressions into tuple of Python byte code.""" 79 if not expressions: 80 return tuple() 81 return tuple(self.generate(expression) for expression in expressions) 82 83 def context(self, tables): 84 return Context(tables, env=self.env) 85 86 def table(self, expressions): 87 return Table( 88 expression.alias_or_name if isinstance(expression, exp.Expression) else expression 89 for expression in expressions 90 ) 91 92 def scan(self, step, context): 93 source = step.source 94 95 if source and isinstance(source, exp.Expression): 96 source = source.name or source.alias 97 98 if source is None: 99 context, table_iter = self.static() 100 elif source in context: 101 if not step.projections and not step.condition: 102 return self.context({step.name: context.tables[source]}) 103 table_iter = context.table_iter(source) 104 elif isinstance(step.source, exp.Table) and isinstance(step.source.this, exp.ReadCSV): 105 table_iter = self.scan_csv(step) 106 context = next(table_iter) 107 else: 108 context, table_iter = self.scan_table(step) 109 110 return self.context({step.name: self._project_and_filter(context, step, table_iter)}) 111 112 def _project_and_filter(self, context, step, table_iter): 113 sink = self.table(step.projections if step.projections else context.columns) 114 condition = self.generate(step.condition) 115 projections = self.generate_tuple(step.projections) 116 117 for reader in table_iter: 118 if len(sink) >= step.limit: 119 break 120 121 if condition and not context.eval(condition): 122 continue 123 124 if projections: 125 sink.append(context.eval_tuple(projections)) 126 else: 127 sink.append(reader.row) 128 129 return sink 130 131 def static(self): 132 return self.context({}), [RowReader(())] 133 134 def scan_table(self, step): 135 table = self.tables.find(step.source) 136 context = self.context({step.source.alias_or_name: table}) 137 return context, iter(table) 138 139 def scan_csv(self, step): 140 alias = step.source.alias 141 source = step.source.this 142 143 with csv_reader(source) as reader: 144 columns = next(reader) 145 table = Table(columns) 146 context = self.context({alias: table}) 147 yield context 148 types = [] 149 150 for row in reader: 151 if not types: 152 for v in row: 153 try: 154 types.append(type(ast.literal_eval(v))) 155 except (ValueError, SyntaxError): 156 types.append(str) 157 context.set_row(tuple(t(v) for t, v in zip(types, row))) 158 yield context.table.reader 159 160 def join(self, step, context): 161 source = step.name 162 163 source_table = context.tables[source] 164 source_context = self.context({source: source_table}) 165 column_ranges = {source: range(0, len(source_table.columns))} 166 167 for name, join in step.joins.items(): 168 table = context.tables[name] 169 start = max(r.stop for r in column_ranges.values()) 170 column_ranges[name] = range(start, len(table.columns) + start) 171 join_context = self.context({name: table}) 172 173 if join.get("source_key"): 174 table = self.hash_join(join, source_context, join_context) 175 else: 176 table = self.nested_loop_join(join, source_context, join_context) 177 178 source_context = self.context( 179 { 180 name: Table(table.columns, table.rows, column_range) 181 for name, column_range in column_ranges.items() 182 } 183 ) 184 condition = self.generate(join["condition"]) 185 if condition: 186 source_context.filter(condition) 187 188 if not step.condition and not step.projections: 189 return source_context 190 191 sink = self._project_and_filter( 192 source_context, 193 step, 194 (reader for reader, _ in iter(source_context)), 195 ) 196 197 if step.projections: 198 return self.context({step.name: sink}) 199 else: 200 return self.context( 201 { 202 name: Table(table.columns, sink.rows, table.column_range) 203 for name, table in source_context.tables.items() 204 } 205 ) 206 207 def nested_loop_join(self, _join, source_context, join_context): 208 table = Table(source_context.columns + join_context.columns) 209 210 for reader_a, _ in source_context: 211 for reader_b, _ in join_context: 212 table.append(reader_a.row + reader_b.row) 213 214 return table 215 216 def hash_join(self, join, source_context, join_context): 217 source_key = self.generate_tuple(join["source_key"]) 218 join_key = self.generate_tuple(join["join_key"]) 219 left = join.get("side") == "LEFT" 220 right = join.get("side") == "RIGHT" 221 222 results = collections.defaultdict(lambda: ([], [])) 223 224 for reader, ctx in source_context: 225 results[ctx.eval_tuple(source_key)][0].append(reader.row) 226 for reader, ctx in join_context: 227 results[ctx.eval_tuple(join_key)][1].append(reader.row) 228 229 table = Table(source_context.columns + join_context.columns) 230 nulls = [(None,) * len(join_context.columns if left else source_context.columns)] 231 232 for a_group, b_group in results.values(): 233 if left: 234 b_group = b_group or nulls 235 elif right: 236 a_group = a_group or nulls 237 238 for a_row, b_row in itertools.product(a_group, b_group): 239 table.append(a_row + b_row) 240 241 return table 242 243 def aggregate(self, step, context): 244 group_by = self.generate_tuple(step.group.values()) 245 aggregations = self.generate_tuple(step.aggregations) 246 operands = self.generate_tuple(step.operands) 247 248 if operands: 249 operand_table = Table(self.table(step.operands).columns) 250 251 for reader, ctx in context: 252 operand_table.append(ctx.eval_tuple(operands)) 253 254 for i, (a, b) in enumerate(zip(context.table.rows, operand_table.rows)): 255 context.table.rows[i] = a + b 256 257 width = len(context.columns) 258 context.add_columns(*operand_table.columns) 259 260 operand_table = Table( 261 context.columns, 262 context.table.rows, 263 range(width, width + len(operand_table.columns)), 264 ) 265 266 context = self.context( 267 { 268 None: operand_table, 269 **context.tables, 270 } 271 ) 272 273 context.sort(group_by) 274 275 group = None 276 start = 0 277 end = 1 278 length = len(context.table) 279 table = self.table(list(step.group) + step.aggregations) 280 condition = self.generate(step.condition) 281 282 def add_row(): 283 if not condition or context.eval(condition): 284 table.append(group + context.eval_tuple(aggregations)) 285 286 if length: 287 for i in range(length): 288 context.set_index(i) 289 key = context.eval_tuple(group_by) 290 group = key if group is None else group 291 end += 1 292 if key != group: 293 context.set_range(start, end - 2) 294 add_row() 295 group = key 296 start = end - 2 297 if len(table.rows) >= step.limit: 298 break 299 if i == length - 1: 300 context.set_range(start, end - 1) 301 add_row() 302 elif step.limit > 0 and not group_by: 303 context.set_range(0, 0) 304 table.append(context.eval_tuple(aggregations)) 305 306 context = self.context({step.name: table, **{name: table for name in context.tables}}) 307 308 if step.projections: 309 return self.scan(step, context) 310 return context 311 312 def sort(self, step, context): 313 projections = self.generate_tuple(step.projections) 314 projection_columns = [p.alias_or_name for p in step.projections] 315 all_columns = list(context.columns) + projection_columns 316 sink = self.table(all_columns) 317 for reader, ctx in context: 318 sink.append(reader.row + ctx.eval_tuple(projections)) 319 320 sort_ctx = self.context( 321 { 322 None: sink, 323 **{table: sink for table in context.tables}, 324 } 325 ) 326 sort_ctx.sort(self.generate_tuple(step.key)) 327 328 if not math.isinf(step.limit): 329 sort_ctx.table.rows = sort_ctx.table.rows[0 : step.limit] 330 331 output = Table( 332 projection_columns, 333 rows=[r[len(context.columns) : len(all_columns)] for r in sort_ctx.table.rows], 334 ) 335 return self.context({step.name: output}) 336 337 def set_operation(self, step, context): 338 left = context.tables[step.left] 339 right = context.tables[step.right] 340 341 sink = self.table(left.columns) 342 343 if issubclass(step.op, exp.Intersect): 344 sink.rows = list(set(left.rows).intersection(set(right.rows))) 345 elif issubclass(step.op, exp.Except): 346 sink.rows = list(set(left.rows).difference(set(right.rows))) 347 elif issubclass(step.op, exp.Union) and step.distinct: 348 sink.rows = list(set(left.rows).union(set(right.rows))) 349 else: 350 sink.rows = left.rows + right.rows 351 352 return self.context({step.name: sink})
def
execute(self, plan):
22 def execute(self, plan): 23 running = set() 24 finished = set() 25 queue = set(plan.leaves) 26 contexts = {} 27 28 while queue: 29 node = queue.pop() 30 try: 31 context = self.context( 32 { 33 name: table 34 for dep in node.dependencies 35 for name, table in contexts[dep].tables.items() 36 } 37 ) 38 running.add(node) 39 40 if isinstance(node, planner.Scan): 41 contexts[node] = self.scan(node, context) 42 elif isinstance(node, planner.Aggregate): 43 contexts[node] = self.aggregate(node, context) 44 elif isinstance(node, planner.Join): 45 contexts[node] = self.join(node, context) 46 elif isinstance(node, planner.Sort): 47 contexts[node] = self.sort(node, context) 48 elif isinstance(node, planner.SetOperation): 49 contexts[node] = self.set_operation(node, context) 50 else: 51 raise NotImplementedError 52 53 running.remove(node) 54 finished.add(node) 55 56 for dep in node.dependents: 57 if dep not in running and all(d in contexts for d in dep.dependencies): 58 queue.add(dep) 59 60 for dep in node.dependencies: 61 if all(d in finished for d in dep.dependents): 62 contexts.pop(dep) 63 except Exception as e: 64 raise ExecuteError(f"Step '{node.id}' failed: {e}") from e 65 66 root = plan.root 67 return contexts[root].tables[root.name]
def
generate(self, expression):
69 def generate(self, expression): 70 """Convert a SQL expression into literal Python code and compile it into bytecode.""" 71 if not expression: 72 return None 73 74 sql = self.generator.generate(expression) 75 return compile(sql, sql, "eval", optimize=2)
Convert a SQL expression into literal Python code and compile it into bytecode.
def
generate_tuple(self, expressions):
77 def generate_tuple(self, expressions): 78 """Convert an array of SQL expressions into tuple of Python byte code.""" 79 if not expressions: 80 return tuple() 81 return tuple(self.generate(expression) for expression in expressions)
Convert an array of SQL expressions into tuple of Python byte code.
def
scan(self, step, context):
92 def scan(self, step, context): 93 source = step.source 94 95 if source and isinstance(source, exp.Expression): 96 source = source.name or source.alias 97 98 if source is None: 99 context, table_iter = self.static() 100 elif source in context: 101 if not step.projections and not step.condition: 102 return self.context({step.name: context.tables[source]}) 103 table_iter = context.table_iter(source) 104 elif isinstance(step.source, exp.Table) and isinstance(step.source.this, exp.ReadCSV): 105 table_iter = self.scan_csv(step) 106 context = next(table_iter) 107 else: 108 context, table_iter = self.scan_table(step) 109 110 return self.context({step.name: self._project_and_filter(context, step, table_iter)})
def
scan_csv(self, step):
139 def scan_csv(self, step): 140 alias = step.source.alias 141 source = step.source.this 142 143 with csv_reader(source) as reader: 144 columns = next(reader) 145 table = Table(columns) 146 context = self.context({alias: table}) 147 yield context 148 types = [] 149 150 for row in reader: 151 if not types: 152 for v in row: 153 try: 154 types.append(type(ast.literal_eval(v))) 155 except (ValueError, SyntaxError): 156 types.append(str) 157 context.set_row(tuple(t(v) for t, v in zip(types, row))) 158 yield context.table.reader
def
join(self, step, context):
160 def join(self, step, context): 161 source = step.name 162 163 source_table = context.tables[source] 164 source_context = self.context({source: source_table}) 165 column_ranges = {source: range(0, len(source_table.columns))} 166 167 for name, join in step.joins.items(): 168 table = context.tables[name] 169 start = max(r.stop for r in column_ranges.values()) 170 column_ranges[name] = range(start, len(table.columns) + start) 171 join_context = self.context({name: table}) 172 173 if join.get("source_key"): 174 table = self.hash_join(join, source_context, join_context) 175 else: 176 table = self.nested_loop_join(join, source_context, join_context) 177 178 source_context = self.context( 179 { 180 name: Table(table.columns, table.rows, column_range) 181 for name, column_range in column_ranges.items() 182 } 183 ) 184 condition = self.generate(join["condition"]) 185 if condition: 186 source_context.filter(condition) 187 188 if not step.condition and not step.projections: 189 return source_context 190 191 sink = self._project_and_filter( 192 source_context, 193 step, 194 (reader for reader, _ in iter(source_context)), 195 ) 196 197 if step.projections: 198 return self.context({step.name: sink}) 199 else: 200 return self.context( 201 { 202 name: Table(table.columns, sink.rows, table.column_range) 203 for name, table in source_context.tables.items() 204 } 205 )
def
hash_join(self, join, source_context, join_context):
216 def hash_join(self, join, source_context, join_context): 217 source_key = self.generate_tuple(join["source_key"]) 218 join_key = self.generate_tuple(join["join_key"]) 219 left = join.get("side") == "LEFT" 220 right = join.get("side") == "RIGHT" 221 222 results = collections.defaultdict(lambda: ([], [])) 223 224 for reader, ctx in source_context: 225 results[ctx.eval_tuple(source_key)][0].append(reader.row) 226 for reader, ctx in join_context: 227 results[ctx.eval_tuple(join_key)][1].append(reader.row) 228 229 table = Table(source_context.columns + join_context.columns) 230 nulls = [(None,) * len(join_context.columns if left else source_context.columns)] 231 232 for a_group, b_group in results.values(): 233 if left: 234 b_group = b_group or nulls 235 elif right: 236 a_group = a_group or nulls 237 238 for a_row, b_row in itertools.product(a_group, b_group): 239 table.append(a_row + b_row) 240 241 return table
def
aggregate(self, step, context):
243 def aggregate(self, step, context): 244 group_by = self.generate_tuple(step.group.values()) 245 aggregations = self.generate_tuple(step.aggregations) 246 operands = self.generate_tuple(step.operands) 247 248 if operands: 249 operand_table = Table(self.table(step.operands).columns) 250 251 for reader, ctx in context: 252 operand_table.append(ctx.eval_tuple(operands)) 253 254 for i, (a, b) in enumerate(zip(context.table.rows, operand_table.rows)): 255 context.table.rows[i] = a + b 256 257 width = len(context.columns) 258 context.add_columns(*operand_table.columns) 259 260 operand_table = Table( 261 context.columns, 262 context.table.rows, 263 range(width, width + len(operand_table.columns)), 264 ) 265 266 context = self.context( 267 { 268 None: operand_table, 269 **context.tables, 270 } 271 ) 272 273 context.sort(group_by) 274 275 group = None 276 start = 0 277 end = 1 278 length = len(context.table) 279 table = self.table(list(step.group) + step.aggregations) 280 condition = self.generate(step.condition) 281 282 def add_row(): 283 if not condition or context.eval(condition): 284 table.append(group + context.eval_tuple(aggregations)) 285 286 if length: 287 for i in range(length): 288 context.set_index(i) 289 key = context.eval_tuple(group_by) 290 group = key if group is None else group 291 end += 1 292 if key != group: 293 context.set_range(start, end - 2) 294 add_row() 295 group = key 296 start = end - 2 297 if len(table.rows) >= step.limit: 298 break 299 if i == length - 1: 300 context.set_range(start, end - 1) 301 add_row() 302 elif step.limit > 0 and not group_by: 303 context.set_range(0, 0) 304 table.append(context.eval_tuple(aggregations)) 305 306 context = self.context({step.name: table, **{name: table for name in context.tables}}) 307 308 if step.projections: 309 return self.scan(step, context) 310 return context
def
sort(self, step, context):
312 def sort(self, step, context): 313 projections = self.generate_tuple(step.projections) 314 projection_columns = [p.alias_or_name for p in step.projections] 315 all_columns = list(context.columns) + projection_columns 316 sink = self.table(all_columns) 317 for reader, ctx in context: 318 sink.append(reader.row + ctx.eval_tuple(projections)) 319 320 sort_ctx = self.context( 321 { 322 None: sink, 323 **{table: sink for table in context.tables}, 324 } 325 ) 326 sort_ctx.sort(self.generate_tuple(step.key)) 327 328 if not math.isinf(step.limit): 329 sort_ctx.table.rows = sort_ctx.table.rows[0 : step.limit] 330 331 output = Table( 332 projection_columns, 333 rows=[r[len(context.columns) : len(all_columns)] for r in sort_ctx.table.rows], 334 ) 335 return self.context({step.name: output})
def
set_operation(self, step, context):
337 def set_operation(self, step, context): 338 left = context.tables[step.left] 339 right = context.tables[step.right] 340 341 sink = self.table(left.columns) 342 343 if issubclass(step.op, exp.Intersect): 344 sink.rows = list(set(left.rows).intersection(set(right.rows))) 345 elif issubclass(step.op, exp.Except): 346 sink.rows = list(set(left.rows).difference(set(right.rows))) 347 elif issubclass(step.op, exp.Union) and step.distinct: 348 sink.rows = list(set(left.rows).union(set(right.rows))) 349 else: 350 sink.rows = left.rows + right.rows 351 352 return self.context({step.name: sink})
406class Python(Dialect): 407 class Tokenizer(tokens.Tokenizer): 408 STRING_ESCAPES = ["\\"] 409 410 class Generator(generator.Generator): 411 TRANSFORMS = { 412 **{klass: _rename for klass in subclasses(exp.__name__, exp.Binary)}, 413 **{klass: _rename for klass in exp.ALL_FUNCTIONS}, 414 exp.Case: _case_sql, 415 exp.Alias: lambda self, e: self.sql(e.this), 416 exp.Array: inline_array_sql, 417 exp.And: lambda self, e: self.binary(e, "and"), 418 exp.Between: _rename, 419 exp.Boolean: lambda self, e: "True" if e.this else "False", 420 exp.Cast: lambda self, e: f"CAST({self.sql(e.this)}, exp.DataType.Type.{e.args['to']})", 421 exp.Column: lambda self, e: f"scope[{self.sql(e, 'table') or None}][{self.sql(e.this)}]", 422 exp.Distinct: lambda self, e: f"set({self.sql(e, 'this')})", 423 exp.Extract: lambda self, e: f"EXTRACT('{e.name.lower()}', {self.sql(e, 'expression')})", 424 exp.In: lambda self, e: f"{self.sql(e, 'this')} in ({self.expressions(e, flat=True)})", 425 exp.Interval: lambda self, e: f"INTERVAL({self.sql(e.this)}, '{self.sql(e.unit)}')", 426 exp.Is: lambda self, e: self.binary(e, "is"), 427 exp.Lambda: _lambda_sql, 428 exp.Not: lambda self, e: f"not {self.sql(e.this)}", 429 exp.Null: lambda *_: "None", 430 exp.Or: lambda self, e: self.binary(e, "or"), 431 exp.Ordered: _ordered_py, 432 exp.Star: lambda *_: "1", 433 }
Inherited Members
410 class Generator(generator.Generator): 411 TRANSFORMS = { 412 **{klass: _rename for klass in subclasses(exp.__name__, exp.Binary)}, 413 **{klass: _rename for klass in exp.ALL_FUNCTIONS}, 414 exp.Case: _case_sql, 415 exp.Alias: lambda self, e: self.sql(e.this), 416 exp.Array: inline_array_sql, 417 exp.And: lambda self, e: self.binary(e, "and"), 418 exp.Between: _rename, 419 exp.Boolean: lambda self, e: "True" if e.this else "False", 420 exp.Cast: lambda self, e: f"CAST({self.sql(e.this)}, exp.DataType.Type.{e.args['to']})", 421 exp.Column: lambda self, e: f"scope[{self.sql(e, 'table') or None}][{self.sql(e.this)}]", 422 exp.Distinct: lambda self, e: f"set({self.sql(e, 'this')})", 423 exp.Extract: lambda self, e: f"EXTRACT('{e.name.lower()}', {self.sql(e, 'expression')})", 424 exp.In: lambda self, e: f"{self.sql(e, 'this')} in ({self.expressions(e, flat=True)})", 425 exp.Interval: lambda self, e: f"INTERVAL({self.sql(e.this)}, '{self.sql(e.unit)}')", 426 exp.Is: lambda self, e: self.binary(e, "is"), 427 exp.Lambda: _lambda_sql, 428 exp.Not: lambda self, e: f"not {self.sql(e.this)}", 429 exp.Null: lambda *_: "None", 430 exp.Or: lambda self, e: self.binary(e, "or"), 431 exp.Ordered: _ordered_py, 432 exp.Star: lambda *_: "1", 433 }
Generator converts a given syntax tree to the corresponding SQL string.
Arguments:
- pretty: Whether or not to format the produced SQL string. Default: False.
- identify: Determines when an identifier should be quoted. Possible values are: False (default): Never quote, except in cases where it's mandatory by the dialect. True or 'always': Always quote. 'safe': Only quote identifiers that are case insensitive.
- normalize: Whether or not to normalize identifiers to lowercase. Default: False.
- pad: Determines the pad size in a formatted string. Default: 2.
- indent: Determines the indentation size in a formatted string. Default: 2.
- normalize_functions: Whether or not to normalize all function names. Possible values are: "upper" or True (default): Convert names to uppercase. "lower": Convert names to lowercase. False: Disables function name normalization.
- unsupported_level: Determines the generator's behavior when it encounters unsupported expressions. Default ErrorLevel.WARN.
- max_unsupported: Maximum number of unsupported messages to include in a raised UnsupportedError. This is only relevant if unsupported_level is ErrorLevel.RAISE. Default: 3
- leading_comma: Determines whether or not the comma is leading or trailing in select expressions. This is only relevant when generating in pretty mode. Default: False
- max_text_width: The max number of characters in a segment before creating new lines in pretty mode. The default is on the smaller end because the length only represents a segment and not the true line length. Default: 80
- comments: Whether or not to preserve comments in the output SQL code. Default: True
@classmethod
def
can_identify(text: str, identify: str | bool = 'safe') -> bool:
247 @classmethod 248 def can_identify(cls, text: str, identify: str | bool = "safe") -> bool: 249 """Checks if text can be identified given an identify option. 250 251 Args: 252 text: The text to check. 253 identify: 254 "always" or `True`: Always returns true. 255 "safe": True if the identifier is case-insensitive. 256 257 Returns: 258 Whether or not the given text can be identified. 259 """ 260 if identify is True or identify == "always": 261 return True 262 263 if identify == "safe": 264 return not cls.case_sensitive(text) 265 266 return False
Checks if text can be identified given an identify option.
Arguments:
- text: The text to check.
- identify: "always" or
True
: Always returns true. "safe": True if the identifier is case-insensitive.
Returns:
Whether or not the given text can be identified.
Inherited Members
- sqlglot.generator.Generator
- Generator
- generate
- unsupported
- sep
- seg
- pad_comment
- maybe_comment
- wrap
- no_identify
- normalize_func
- indent
- sql
- uncache_sql
- cache_sql
- characterset_sql
- column_sql
- columnposition_sql
- columndef_sql
- columnconstraint_sql
- autoincrementcolumnconstraint_sql
- compresscolumnconstraint_sql
- generatedasidentitycolumnconstraint_sql
- notnullcolumnconstraint_sql
- primarykeycolumnconstraint_sql
- uniquecolumnconstraint_sql
- createable_sql
- create_sql
- clone_sql
- describe_sql
- prepend_ctes
- with_sql
- cte_sql
- tablealias_sql
- bitstring_sql
- hexstring_sql
- bytestring_sql
- rawstring_sql
- datatypesize_sql
- datatype_sql
- directory_sql
- delete_sql
- drop_sql
- except_sql
- except_op
- fetch_sql
- filter_sql
- hint_sql
- index_sql
- identifier_sql
- inputoutputformat_sql
- national_sql
- partition_sql
- properties_sql
- root_properties
- properties
- with_properties
- locate_properties
- property_sql
- likeproperty_sql
- fallbackproperty_sql
- journalproperty_sql
- freespaceproperty_sql
- checksumproperty_sql
- mergeblockratioproperty_sql
- datablocksizeproperty_sql
- blockcompressionproperty_sql
- isolatedloadingproperty_sql
- lockingproperty_sql
- withdataproperty_sql
- insert_sql
- intersect_sql
- intersect_op
- introducer_sql
- pseudotype_sql
- onconflict_sql
- returning_sql
- rowformatdelimitedproperty_sql
- table_sql
- tablesample_sql
- pivot_sql
- tuple_sql
- update_sql
- values_sql
- var_sql
- into_sql
- from_sql
- group_sql
- having_sql
- join_sql
- lambda_sql
- lateral_sql
- limit_sql
- offset_sql
- setitem_sql
- set_sql
- pragma_sql
- lock_sql
- literal_sql
- escape_str
- loaddata_sql
- null_sql
- boolean_sql
- order_sql
- cluster_sql
- distribute_sql
- sort_sql
- ordered_sql
- matchrecognize_sql
- query_modifiers
- offset_limit_modifiers
- after_having_modifiers
- after_limit_modifiers
- select_sql
- schema_sql
- schema_columns_sql
- star_sql
- parameter_sql
- sessionparameter_sql
- placeholder_sql
- subquery_sql
- qualify_sql
- union_sql
- union_op
- unnest_sql
- where_sql
- window_sql
- partition_by_sql
- windowspec_sql
- withingroup_sql
- between_sql
- bracket_sql
- all_sql
- any_sql
- exists_sql
- case_sql
- constraint_sql
- nextvaluefor_sql
- extract_sql
- trim_sql
- safeconcat_sql
- check_sql
- foreignkey_sql
- primarykey_sql
- if_sql
- matchagainst_sql
- jsonkeyvalue_sql
- jsonobject_sql
- openjsoncolumndef_sql
- openjson_sql
- in_sql
- in_unnest_op
- interval_sql
- return_sql
- reference_sql
- anonymous_sql
- paren_sql
- neg_sql
- not_sql
- alias_sql
- aliases_sql
- attimezone_sql
- add_sql
- and_sql
- connector_sql
- bitwiseand_sql
- bitwiseleftshift_sql
- bitwisenot_sql
- bitwiseor_sql
- bitwiserightshift_sql
- bitwisexor_sql
- cast_sql
- currentdate_sql
- collate_sql
- command_sql
- comment_sql
- mergetreettlaction_sql
- mergetreettl_sql
- transaction_sql
- commit_sql
- rollback_sql
- altercolumn_sql
- renametable_sql
- altertable_sql
- droppartition_sql
- addconstraint_sql
- distinct_sql
- ignorenulls_sql
- respectnulls_sql
- intdiv_sql
- dpipe_sql
- safedpipe_sql
- div_sql
- overlaps_sql
- distance_sql
- dot_sql
- eq_sql
- escape_sql
- glob_sql
- gt_sql
- gte_sql
- ilike_sql
- ilikeany_sql
- is_sql
- like_sql
- likeany_sql
- similarto_sql
- lt_sql
- lte_sql
- mod_sql
- mul_sql
- neq_sql
- nullsafeeq_sql
- nullsafeneq_sql
- or_sql
- slice_sql
- sub_sql
- trycast_sql
- use_sql
- binary
- function_fallback_sql
- func
- format_args
- text_width
- format_time
- expressions
- op_expressions
- naked_property
- set_operation
- tag_sql
- token_sql
- userdefinedfunction_sql
- joinhint_sql
- kwarg_sql
- when_sql
- merge_sql
- tochar_sql
- dictproperty_sql
- dictrange_sql
- dictsubproperty_sql
- oncluster_sql